Loading...
1/*
2 * The "user cache".
3 *
4 * (C) Copyright 1991-2000 Linus Torvalds
5 *
6 * We have a per-user structure to keep track of how many
7 * processes, files etc the user has claimed, in order to be
8 * able to have per-user limits for system resources.
9 */
10
11#include <linux/init.h>
12#include <linux/sched.h>
13#include <linux/slab.h>
14#include <linux/bitops.h>
15#include <linux/key.h>
16#include <linux/sched/user.h>
17#include <linux/interrupt.h>
18#include <linux/export.h>
19#include <linux/user_namespace.h>
20#include <linux/proc_ns.h>
21
22/*
23 * userns count is 1 for root user, 1 for init_uts_ns,
24 * and 1 for... ?
25 */
26struct user_namespace init_user_ns = {
27 .uid_map = {
28 .nr_extents = 1,
29 {
30 .extent[0] = {
31 .first = 0,
32 .lower_first = 0,
33 .count = 4294967295U,
34 },
35 },
36 },
37 .gid_map = {
38 .nr_extents = 1,
39 {
40 .extent[0] = {
41 .first = 0,
42 .lower_first = 0,
43 .count = 4294967295U,
44 },
45 },
46 },
47 .projid_map = {
48 .nr_extents = 1,
49 {
50 .extent[0] = {
51 .first = 0,
52 .lower_first = 0,
53 .count = 4294967295U,
54 },
55 },
56 },
57 .count = ATOMIC_INIT(3),
58 .owner = GLOBAL_ROOT_UID,
59 .group = GLOBAL_ROOT_GID,
60 .ns.inum = PROC_USER_INIT_INO,
61#ifdef CONFIG_USER_NS
62 .ns.ops = &userns_operations,
63#endif
64 .flags = USERNS_INIT_FLAGS,
65#ifdef CONFIG_PERSISTENT_KEYRINGS
66 .persistent_keyring_register_sem =
67 __RWSEM_INITIALIZER(init_user_ns.persistent_keyring_register_sem),
68#endif
69};
70EXPORT_SYMBOL_GPL(init_user_ns);
71
72/*
73 * UID task count cache, to get fast user lookup in "alloc_uid"
74 * when changing user ID's (ie setuid() and friends).
75 */
76
77#define UIDHASH_BITS (CONFIG_BASE_SMALL ? 3 : 7)
78#define UIDHASH_SZ (1 << UIDHASH_BITS)
79#define UIDHASH_MASK (UIDHASH_SZ - 1)
80#define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
81#define uidhashentry(uid) (uidhash_table + __uidhashfn((__kuid_val(uid))))
82
83static struct kmem_cache *uid_cachep;
84struct hlist_head uidhash_table[UIDHASH_SZ];
85
86/*
87 * The uidhash_lock is mostly taken from process context, but it is
88 * occasionally also taken from softirq/tasklet context, when
89 * task-structs get RCU-freed. Hence all locking must be softirq-safe.
90 * But free_uid() is also called with local interrupts disabled, and running
91 * local_bh_enable() with local interrupts disabled is an error - we'll run
92 * softirq callbacks, and they can unconditionally enable interrupts, and
93 * the caller of free_uid() didn't expect that..
94 */
95static DEFINE_SPINLOCK(uidhash_lock);
96
97/* root_user.__count is 1, for init task cred */
98struct user_struct root_user = {
99 .__count = ATOMIC_INIT(1),
100 .processes = ATOMIC_INIT(1),
101 .sigpending = ATOMIC_INIT(0),
102 .locked_shm = 0,
103 .uid = GLOBAL_ROOT_UID,
104 .ratelimit = RATELIMIT_STATE_INIT(root_user.ratelimit, 0, 0),
105};
106
107/*
108 * These routines must be called with the uidhash spinlock held!
109 */
110static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
111{
112 hlist_add_head(&up->uidhash_node, hashent);
113}
114
115static void uid_hash_remove(struct user_struct *up)
116{
117 hlist_del_init(&up->uidhash_node);
118}
119
120static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent)
121{
122 struct user_struct *user;
123
124 hlist_for_each_entry(user, hashent, uidhash_node) {
125 if (uid_eq(user->uid, uid)) {
126 atomic_inc(&user->__count);
127 return user;
128 }
129 }
130
131 return NULL;
132}
133
134/* IRQs are disabled and uidhash_lock is held upon function entry.
135 * IRQ state (as stored in flags) is restored and uidhash_lock released
136 * upon function exit.
137 */
138static void free_user(struct user_struct *up, unsigned long flags)
139 __releases(&uidhash_lock)
140{
141 uid_hash_remove(up);
142 spin_unlock_irqrestore(&uidhash_lock, flags);
143 key_put(up->uid_keyring);
144 key_put(up->session_keyring);
145 kmem_cache_free(uid_cachep, up);
146}
147
148/*
149 * Locate the user_struct for the passed UID. If found, take a ref on it. The
150 * caller must undo that ref with free_uid().
151 *
152 * If the user_struct could not be found, return NULL.
153 */
154struct user_struct *find_user(kuid_t uid)
155{
156 struct user_struct *ret;
157 unsigned long flags;
158
159 spin_lock_irqsave(&uidhash_lock, flags);
160 ret = uid_hash_find(uid, uidhashentry(uid));
161 spin_unlock_irqrestore(&uidhash_lock, flags);
162 return ret;
163}
164
165void free_uid(struct user_struct *up)
166{
167 unsigned long flags;
168
169 if (!up)
170 return;
171
172 local_irq_save(flags);
173 if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
174 free_user(up, flags);
175 else
176 local_irq_restore(flags);
177}
178
179struct user_struct *alloc_uid(kuid_t uid)
180{
181 struct hlist_head *hashent = uidhashentry(uid);
182 struct user_struct *up, *new;
183
184 spin_lock_irq(&uidhash_lock);
185 up = uid_hash_find(uid, hashent);
186 spin_unlock_irq(&uidhash_lock);
187
188 if (!up) {
189 new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL);
190 if (!new)
191 goto out_unlock;
192
193 new->uid = uid;
194 atomic_set(&new->__count, 1);
195 ratelimit_state_init(&new->ratelimit, HZ, 100);
196 ratelimit_set_flags(&new->ratelimit, RATELIMIT_MSG_ON_RELEASE);
197
198 /*
199 * Before adding this, check whether we raced
200 * on adding the same user already..
201 */
202 spin_lock_irq(&uidhash_lock);
203 up = uid_hash_find(uid, hashent);
204 if (up) {
205 key_put(new->uid_keyring);
206 key_put(new->session_keyring);
207 kmem_cache_free(uid_cachep, new);
208 } else {
209 uid_hash_insert(new, hashent);
210 up = new;
211 }
212 spin_unlock_irq(&uidhash_lock);
213 }
214
215 return up;
216
217out_unlock:
218 return NULL;
219}
220
221static int __init uid_cache_init(void)
222{
223 int n;
224
225 uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
226 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
227
228 for(n = 0; n < UIDHASH_SZ; ++n)
229 INIT_HLIST_HEAD(uidhash_table + n);
230
231 /* Insert the root user immediately (init already runs as root) */
232 spin_lock_irq(&uidhash_lock);
233 uid_hash_insert(&root_user, uidhashentry(GLOBAL_ROOT_UID));
234 spin_unlock_irq(&uidhash_lock);
235
236 return 0;
237}
238subsys_initcall(uid_cache_init);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * The "user cache".
4 *
5 * (C) Copyright 1991-2000 Linus Torvalds
6 *
7 * We have a per-user structure to keep track of how many
8 * processes, files etc the user has claimed, in order to be
9 * able to have per-user limits for system resources.
10 */
11
12#include <linux/init.h>
13#include <linux/sched.h>
14#include <linux/slab.h>
15#include <linux/bitops.h>
16#include <linux/key.h>
17#include <linux/sched/user.h>
18#include <linux/interrupt.h>
19#include <linux/export.h>
20#include <linux/user_namespace.h>
21#include <linux/binfmts.h>
22#include <linux/proc_ns.h>
23
24#if IS_ENABLED(CONFIG_BINFMT_MISC)
25struct binfmt_misc init_binfmt_misc = {
26 .entries = LIST_HEAD_INIT(init_binfmt_misc.entries),
27 .enabled = true,
28 .entries_lock = __RW_LOCK_UNLOCKED(init_binfmt_misc.entries_lock),
29};
30EXPORT_SYMBOL_GPL(init_binfmt_misc);
31#endif
32
33/*
34 * userns count is 1 for root user, 1 for init_uts_ns,
35 * and 1 for... ?
36 */
37struct user_namespace init_user_ns = {
38 .uid_map = {
39 .nr_extents = 1,
40 {
41 .extent[0] = {
42 .first = 0,
43 .lower_first = 0,
44 .count = 4294967295U,
45 },
46 },
47 },
48 .gid_map = {
49 .nr_extents = 1,
50 {
51 .extent[0] = {
52 .first = 0,
53 .lower_first = 0,
54 .count = 4294967295U,
55 },
56 },
57 },
58 .projid_map = {
59 .nr_extents = 1,
60 {
61 .extent[0] = {
62 .first = 0,
63 .lower_first = 0,
64 .count = 4294967295U,
65 },
66 },
67 },
68 .ns.count = REFCOUNT_INIT(3),
69 .owner = GLOBAL_ROOT_UID,
70 .group = GLOBAL_ROOT_GID,
71 .ns.inum = PROC_USER_INIT_INO,
72#ifdef CONFIG_USER_NS
73 .ns.ops = &userns_operations,
74#endif
75 .flags = USERNS_INIT_FLAGS,
76#ifdef CONFIG_KEYS
77 .keyring_name_list = LIST_HEAD_INIT(init_user_ns.keyring_name_list),
78 .keyring_sem = __RWSEM_INITIALIZER(init_user_ns.keyring_sem),
79#endif
80#if IS_ENABLED(CONFIG_BINFMT_MISC)
81 .binfmt_misc = &init_binfmt_misc,
82#endif
83};
84EXPORT_SYMBOL_GPL(init_user_ns);
85
86/*
87 * UID task count cache, to get fast user lookup in "alloc_uid"
88 * when changing user ID's (ie setuid() and friends).
89 */
90
91#define UIDHASH_BITS (CONFIG_BASE_SMALL ? 3 : 7)
92#define UIDHASH_SZ (1 << UIDHASH_BITS)
93#define UIDHASH_MASK (UIDHASH_SZ - 1)
94#define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
95#define uidhashentry(uid) (uidhash_table + __uidhashfn((__kuid_val(uid))))
96
97static struct kmem_cache *uid_cachep;
98static struct hlist_head uidhash_table[UIDHASH_SZ];
99
100/*
101 * The uidhash_lock is mostly taken from process context, but it is
102 * occasionally also taken from softirq/tasklet context, when
103 * task-structs get RCU-freed. Hence all locking must be softirq-safe.
104 * But free_uid() is also called with local interrupts disabled, and running
105 * local_bh_enable() with local interrupts disabled is an error - we'll run
106 * softirq callbacks, and they can unconditionally enable interrupts, and
107 * the caller of free_uid() didn't expect that..
108 */
109static DEFINE_SPINLOCK(uidhash_lock);
110
111/* root_user.__count is 1, for init task cred */
112struct user_struct root_user = {
113 .__count = REFCOUNT_INIT(1),
114 .uid = GLOBAL_ROOT_UID,
115 .ratelimit = RATELIMIT_STATE_INIT(root_user.ratelimit, 0, 0),
116};
117
118/*
119 * These routines must be called with the uidhash spinlock held!
120 */
121static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
122{
123 hlist_add_head(&up->uidhash_node, hashent);
124}
125
126static void uid_hash_remove(struct user_struct *up)
127{
128 hlist_del_init(&up->uidhash_node);
129}
130
131static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent)
132{
133 struct user_struct *user;
134
135 hlist_for_each_entry(user, hashent, uidhash_node) {
136 if (uid_eq(user->uid, uid)) {
137 refcount_inc(&user->__count);
138 return user;
139 }
140 }
141
142 return NULL;
143}
144
145static int user_epoll_alloc(struct user_struct *up)
146{
147#ifdef CONFIG_EPOLL
148 return percpu_counter_init(&up->epoll_watches, 0, GFP_KERNEL);
149#else
150 return 0;
151#endif
152}
153
154static void user_epoll_free(struct user_struct *up)
155{
156#ifdef CONFIG_EPOLL
157 percpu_counter_destroy(&up->epoll_watches);
158#endif
159}
160
161/* IRQs are disabled and uidhash_lock is held upon function entry.
162 * IRQ state (as stored in flags) is restored and uidhash_lock released
163 * upon function exit.
164 */
165static void free_user(struct user_struct *up, unsigned long flags)
166 __releases(&uidhash_lock)
167{
168 uid_hash_remove(up);
169 spin_unlock_irqrestore(&uidhash_lock, flags);
170 user_epoll_free(up);
171 kmem_cache_free(uid_cachep, up);
172}
173
174/*
175 * Locate the user_struct for the passed UID. If found, take a ref on it. The
176 * caller must undo that ref with free_uid().
177 *
178 * If the user_struct could not be found, return NULL.
179 */
180struct user_struct *find_user(kuid_t uid)
181{
182 struct user_struct *ret;
183 unsigned long flags;
184
185 spin_lock_irqsave(&uidhash_lock, flags);
186 ret = uid_hash_find(uid, uidhashentry(uid));
187 spin_unlock_irqrestore(&uidhash_lock, flags);
188 return ret;
189}
190
191void free_uid(struct user_struct *up)
192{
193 unsigned long flags;
194
195 if (!up)
196 return;
197
198 if (refcount_dec_and_lock_irqsave(&up->__count, &uidhash_lock, &flags))
199 free_user(up, flags);
200}
201EXPORT_SYMBOL_GPL(free_uid);
202
203struct user_struct *alloc_uid(kuid_t uid)
204{
205 struct hlist_head *hashent = uidhashentry(uid);
206 struct user_struct *up, *new;
207
208 spin_lock_irq(&uidhash_lock);
209 up = uid_hash_find(uid, hashent);
210 spin_unlock_irq(&uidhash_lock);
211
212 if (!up) {
213 new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL);
214 if (!new)
215 return NULL;
216
217 new->uid = uid;
218 refcount_set(&new->__count, 1);
219 if (user_epoll_alloc(new)) {
220 kmem_cache_free(uid_cachep, new);
221 return NULL;
222 }
223 ratelimit_state_init(&new->ratelimit, HZ, 100);
224 ratelimit_set_flags(&new->ratelimit, RATELIMIT_MSG_ON_RELEASE);
225
226 /*
227 * Before adding this, check whether we raced
228 * on adding the same user already..
229 */
230 spin_lock_irq(&uidhash_lock);
231 up = uid_hash_find(uid, hashent);
232 if (up) {
233 user_epoll_free(new);
234 kmem_cache_free(uid_cachep, new);
235 } else {
236 uid_hash_insert(new, hashent);
237 up = new;
238 }
239 spin_unlock_irq(&uidhash_lock);
240 }
241
242 return up;
243}
244
245static int __init uid_cache_init(void)
246{
247 int n;
248
249 uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
250 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
251
252 for(n = 0; n < UIDHASH_SZ; ++n)
253 INIT_HLIST_HEAD(uidhash_table + n);
254
255 if (user_epoll_alloc(&root_user))
256 panic("root_user epoll percpu counter alloc failed");
257
258 /* Insert the root user immediately (init already runs as root) */
259 spin_lock_irq(&uidhash_lock);
260 uid_hash_insert(&root_user, uidhashentry(GLOBAL_ROOT_UID));
261 spin_unlock_irq(&uidhash_lock);
262
263 return 0;
264}
265subsys_initcall(uid_cache_init);