Loading...
1/*
2 * The "user cache".
3 *
4 * (C) Copyright 1991-2000 Linus Torvalds
5 *
6 * We have a per-user structure to keep track of how many
7 * processes, files etc the user has claimed, in order to be
8 * able to have per-user limits for system resources.
9 */
10
11#include <linux/init.h>
12#include <linux/sched.h>
13#include <linux/slab.h>
14#include <linux/bitops.h>
15#include <linux/key.h>
16#include <linux/sched/user.h>
17#include <linux/interrupt.h>
18#include <linux/export.h>
19#include <linux/user_namespace.h>
20#include <linux/proc_ns.h>
21
22/*
23 * userns count is 1 for root user, 1 for init_uts_ns,
24 * and 1 for... ?
25 */
26struct user_namespace init_user_ns = {
27 .uid_map = {
28 .nr_extents = 1,
29 {
30 .extent[0] = {
31 .first = 0,
32 .lower_first = 0,
33 .count = 4294967295U,
34 },
35 },
36 },
37 .gid_map = {
38 .nr_extents = 1,
39 {
40 .extent[0] = {
41 .first = 0,
42 .lower_first = 0,
43 .count = 4294967295U,
44 },
45 },
46 },
47 .projid_map = {
48 .nr_extents = 1,
49 {
50 .extent[0] = {
51 .first = 0,
52 .lower_first = 0,
53 .count = 4294967295U,
54 },
55 },
56 },
57 .count = ATOMIC_INIT(3),
58 .owner = GLOBAL_ROOT_UID,
59 .group = GLOBAL_ROOT_GID,
60 .ns.inum = PROC_USER_INIT_INO,
61#ifdef CONFIG_USER_NS
62 .ns.ops = &userns_operations,
63#endif
64 .flags = USERNS_INIT_FLAGS,
65#ifdef CONFIG_PERSISTENT_KEYRINGS
66 .persistent_keyring_register_sem =
67 __RWSEM_INITIALIZER(init_user_ns.persistent_keyring_register_sem),
68#endif
69};
70EXPORT_SYMBOL_GPL(init_user_ns);
71
72/*
73 * UID task count cache, to get fast user lookup in "alloc_uid"
74 * when changing user ID's (ie setuid() and friends).
75 */
76
77#define UIDHASH_BITS (CONFIG_BASE_SMALL ? 3 : 7)
78#define UIDHASH_SZ (1 << UIDHASH_BITS)
79#define UIDHASH_MASK (UIDHASH_SZ - 1)
80#define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
81#define uidhashentry(uid) (uidhash_table + __uidhashfn((__kuid_val(uid))))
82
83static struct kmem_cache *uid_cachep;
84struct hlist_head uidhash_table[UIDHASH_SZ];
85
86/*
87 * The uidhash_lock is mostly taken from process context, but it is
88 * occasionally also taken from softirq/tasklet context, when
89 * task-structs get RCU-freed. Hence all locking must be softirq-safe.
90 * But free_uid() is also called with local interrupts disabled, and running
91 * local_bh_enable() with local interrupts disabled is an error - we'll run
92 * softirq callbacks, and they can unconditionally enable interrupts, and
93 * the caller of free_uid() didn't expect that..
94 */
95static DEFINE_SPINLOCK(uidhash_lock);
96
97/* root_user.__count is 1, for init task cred */
98struct user_struct root_user = {
99 .__count = ATOMIC_INIT(1),
100 .processes = ATOMIC_INIT(1),
101 .sigpending = ATOMIC_INIT(0),
102 .locked_shm = 0,
103 .uid = GLOBAL_ROOT_UID,
104 .ratelimit = RATELIMIT_STATE_INIT(root_user.ratelimit, 0, 0),
105};
106
107/*
108 * These routines must be called with the uidhash spinlock held!
109 */
110static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
111{
112 hlist_add_head(&up->uidhash_node, hashent);
113}
114
115static void uid_hash_remove(struct user_struct *up)
116{
117 hlist_del_init(&up->uidhash_node);
118}
119
120static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent)
121{
122 struct user_struct *user;
123
124 hlist_for_each_entry(user, hashent, uidhash_node) {
125 if (uid_eq(user->uid, uid)) {
126 atomic_inc(&user->__count);
127 return user;
128 }
129 }
130
131 return NULL;
132}
133
134/* IRQs are disabled and uidhash_lock is held upon function entry.
135 * IRQ state (as stored in flags) is restored and uidhash_lock released
136 * upon function exit.
137 */
138static void free_user(struct user_struct *up, unsigned long flags)
139 __releases(&uidhash_lock)
140{
141 uid_hash_remove(up);
142 spin_unlock_irqrestore(&uidhash_lock, flags);
143 key_put(up->uid_keyring);
144 key_put(up->session_keyring);
145 kmem_cache_free(uid_cachep, up);
146}
147
148/*
149 * Locate the user_struct for the passed UID. If found, take a ref on it. The
150 * caller must undo that ref with free_uid().
151 *
152 * If the user_struct could not be found, return NULL.
153 */
154struct user_struct *find_user(kuid_t uid)
155{
156 struct user_struct *ret;
157 unsigned long flags;
158
159 spin_lock_irqsave(&uidhash_lock, flags);
160 ret = uid_hash_find(uid, uidhashentry(uid));
161 spin_unlock_irqrestore(&uidhash_lock, flags);
162 return ret;
163}
164
165void free_uid(struct user_struct *up)
166{
167 unsigned long flags;
168
169 if (!up)
170 return;
171
172 local_irq_save(flags);
173 if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
174 free_user(up, flags);
175 else
176 local_irq_restore(flags);
177}
178
179struct user_struct *alloc_uid(kuid_t uid)
180{
181 struct hlist_head *hashent = uidhashentry(uid);
182 struct user_struct *up, *new;
183
184 spin_lock_irq(&uidhash_lock);
185 up = uid_hash_find(uid, hashent);
186 spin_unlock_irq(&uidhash_lock);
187
188 if (!up) {
189 new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL);
190 if (!new)
191 goto out_unlock;
192
193 new->uid = uid;
194 atomic_set(&new->__count, 1);
195 ratelimit_state_init(&new->ratelimit, HZ, 100);
196 ratelimit_set_flags(&new->ratelimit, RATELIMIT_MSG_ON_RELEASE);
197
198 /*
199 * Before adding this, check whether we raced
200 * on adding the same user already..
201 */
202 spin_lock_irq(&uidhash_lock);
203 up = uid_hash_find(uid, hashent);
204 if (up) {
205 key_put(new->uid_keyring);
206 key_put(new->session_keyring);
207 kmem_cache_free(uid_cachep, new);
208 } else {
209 uid_hash_insert(new, hashent);
210 up = new;
211 }
212 spin_unlock_irq(&uidhash_lock);
213 }
214
215 return up;
216
217out_unlock:
218 return NULL;
219}
220
221static int __init uid_cache_init(void)
222{
223 int n;
224
225 uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
226 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
227
228 for(n = 0; n < UIDHASH_SZ; ++n)
229 INIT_HLIST_HEAD(uidhash_table + n);
230
231 /* Insert the root user immediately (init already runs as root) */
232 spin_lock_irq(&uidhash_lock);
233 uid_hash_insert(&root_user, uidhashentry(GLOBAL_ROOT_UID));
234 spin_unlock_irq(&uidhash_lock);
235
236 return 0;
237}
238subsys_initcall(uid_cache_init);
1/*
2 * The "user cache".
3 *
4 * (C) Copyright 1991-2000 Linus Torvalds
5 *
6 * We have a per-user structure to keep track of how many
7 * processes, files etc the user has claimed, in order to be
8 * able to have per-user limits for system resources.
9 */
10
11#include <linux/init.h>
12#include <linux/sched.h>
13#include <linux/slab.h>
14#include <linux/bitops.h>
15#include <linux/key.h>
16#include <linux/interrupt.h>
17#include <linux/export.h>
18#include <linux/user_namespace.h>
19
20/*
21 * userns count is 1 for root user, 1 for init_uts_ns,
22 * and 1 for... ?
23 */
24struct user_namespace init_user_ns = {
25 .uid_map = {
26 .nr_extents = 1,
27 .extent[0] = {
28 .first = 0,
29 .lower_first = 0,
30 .count = 4294967295U,
31 },
32 },
33 .gid_map = {
34 .nr_extents = 1,
35 .extent[0] = {
36 .first = 0,
37 .lower_first = 0,
38 .count = 4294967295U,
39 },
40 },
41 .kref = {
42 .refcount = ATOMIC_INIT(3),
43 },
44 .owner = GLOBAL_ROOT_UID,
45 .group = GLOBAL_ROOT_GID,
46};
47EXPORT_SYMBOL_GPL(init_user_ns);
48
49/*
50 * UID task count cache, to get fast user lookup in "alloc_uid"
51 * when changing user ID's (ie setuid() and friends).
52 */
53
54#define UIDHASH_BITS (CONFIG_BASE_SMALL ? 3 : 7)
55#define UIDHASH_SZ (1 << UIDHASH_BITS)
56#define UIDHASH_MASK (UIDHASH_SZ - 1)
57#define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
58#define uidhashentry(uid) (uidhash_table + __uidhashfn((__kuid_val(uid))))
59
60static struct kmem_cache *uid_cachep;
61struct hlist_head uidhash_table[UIDHASH_SZ];
62
63/*
64 * The uidhash_lock is mostly taken from process context, but it is
65 * occasionally also taken from softirq/tasklet context, when
66 * task-structs get RCU-freed. Hence all locking must be softirq-safe.
67 * But free_uid() is also called with local interrupts disabled, and running
68 * local_bh_enable() with local interrupts disabled is an error - we'll run
69 * softirq callbacks, and they can unconditionally enable interrupts, and
70 * the caller of free_uid() didn't expect that..
71 */
72static DEFINE_SPINLOCK(uidhash_lock);
73
74/* root_user.__count is 1, for init task cred */
75struct user_struct root_user = {
76 .__count = ATOMIC_INIT(1),
77 .processes = ATOMIC_INIT(1),
78 .files = ATOMIC_INIT(0),
79 .sigpending = ATOMIC_INIT(0),
80 .locked_shm = 0,
81 .uid = GLOBAL_ROOT_UID,
82};
83
84/*
85 * These routines must be called with the uidhash spinlock held!
86 */
87static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
88{
89 hlist_add_head(&up->uidhash_node, hashent);
90}
91
92static void uid_hash_remove(struct user_struct *up)
93{
94 hlist_del_init(&up->uidhash_node);
95}
96
97static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent)
98{
99 struct user_struct *user;
100 struct hlist_node *h;
101
102 hlist_for_each_entry(user, h, hashent, uidhash_node) {
103 if (uid_eq(user->uid, uid)) {
104 atomic_inc(&user->__count);
105 return user;
106 }
107 }
108
109 return NULL;
110}
111
112/* IRQs are disabled and uidhash_lock is held upon function entry.
113 * IRQ state (as stored in flags) is restored and uidhash_lock released
114 * upon function exit.
115 */
116static void free_user(struct user_struct *up, unsigned long flags)
117 __releases(&uidhash_lock)
118{
119 uid_hash_remove(up);
120 spin_unlock_irqrestore(&uidhash_lock, flags);
121 key_put(up->uid_keyring);
122 key_put(up->session_keyring);
123 kmem_cache_free(uid_cachep, up);
124}
125
126/*
127 * Locate the user_struct for the passed UID. If found, take a ref on it. The
128 * caller must undo that ref with free_uid().
129 *
130 * If the user_struct could not be found, return NULL.
131 */
132struct user_struct *find_user(kuid_t uid)
133{
134 struct user_struct *ret;
135 unsigned long flags;
136
137 spin_lock_irqsave(&uidhash_lock, flags);
138 ret = uid_hash_find(uid, uidhashentry(uid));
139 spin_unlock_irqrestore(&uidhash_lock, flags);
140 return ret;
141}
142
143void free_uid(struct user_struct *up)
144{
145 unsigned long flags;
146
147 if (!up)
148 return;
149
150 local_irq_save(flags);
151 if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
152 free_user(up, flags);
153 else
154 local_irq_restore(flags);
155}
156
157struct user_struct *alloc_uid(kuid_t uid)
158{
159 struct hlist_head *hashent = uidhashentry(uid);
160 struct user_struct *up, *new;
161
162 spin_lock_irq(&uidhash_lock);
163 up = uid_hash_find(uid, hashent);
164 spin_unlock_irq(&uidhash_lock);
165
166 if (!up) {
167 new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL);
168 if (!new)
169 goto out_unlock;
170
171 new->uid = uid;
172 atomic_set(&new->__count, 1);
173
174 /*
175 * Before adding this, check whether we raced
176 * on adding the same user already..
177 */
178 spin_lock_irq(&uidhash_lock);
179 up = uid_hash_find(uid, hashent);
180 if (up) {
181 key_put(new->uid_keyring);
182 key_put(new->session_keyring);
183 kmem_cache_free(uid_cachep, new);
184 } else {
185 uid_hash_insert(new, hashent);
186 up = new;
187 }
188 spin_unlock_irq(&uidhash_lock);
189 }
190
191 return up;
192
193out_unlock:
194 return NULL;
195}
196
197static int __init uid_cache_init(void)
198{
199 int n;
200
201 uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
202 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
203
204 for(n = 0; n < UIDHASH_SZ; ++n)
205 INIT_HLIST_HEAD(uidhash_table + n);
206
207 /* Insert the root user immediately (init already runs as root) */
208 spin_lock_irq(&uidhash_lock);
209 uid_hash_insert(&root_user, uidhashentry(GLOBAL_ROOT_UID));
210 spin_unlock_irq(&uidhash_lock);
211
212 return 0;
213}
214
215module_init(uid_cache_init);