Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * The "user cache".
4 *
5 * (C) Copyright 1991-2000 Linus Torvalds
6 *
7 * We have a per-user structure to keep track of how many
8 * processes, files etc the user has claimed, in order to be
9 * able to have per-user limits for system resources.
10 */
11
12#include <linux/init.h>
13#include <linux/sched.h>
14#include <linux/slab.h>
15#include <linux/bitops.h>
16#include <linux/key.h>
17#include <linux/sched/user.h>
18#include <linux/interrupt.h>
19#include <linux/export.h>
20#include <linux/user_namespace.h>
21#include <linux/proc_ns.h>
22
23/*
24 * userns count is 1 for root user, 1 for init_uts_ns,
25 * and 1 for... ?
26 */
27struct user_namespace init_user_ns = {
28 .uid_map = {
29 .nr_extents = 1,
30 {
31 .extent[0] = {
32 .first = 0,
33 .lower_first = 0,
34 .count = 4294967295U,
35 },
36 },
37 },
38 .gid_map = {
39 .nr_extents = 1,
40 {
41 .extent[0] = {
42 .first = 0,
43 .lower_first = 0,
44 .count = 4294967295U,
45 },
46 },
47 },
48 .projid_map = {
49 .nr_extents = 1,
50 {
51 .extent[0] = {
52 .first = 0,
53 .lower_first = 0,
54 .count = 4294967295U,
55 },
56 },
57 },
58 .count = ATOMIC_INIT(3),
59 .owner = GLOBAL_ROOT_UID,
60 .group = GLOBAL_ROOT_GID,
61 .ns.inum = PROC_USER_INIT_INO,
62#ifdef CONFIG_USER_NS
63 .ns.ops = &userns_operations,
64#endif
65 .flags = USERNS_INIT_FLAGS,
66#ifdef CONFIG_KEYS
67 .keyring_name_list = LIST_HEAD_INIT(init_user_ns.keyring_name_list),
68 .keyring_sem = __RWSEM_INITIALIZER(init_user_ns.keyring_sem),
69#endif
70};
71EXPORT_SYMBOL_GPL(init_user_ns);
72
73/*
74 * UID task count cache, to get fast user lookup in "alloc_uid"
75 * when changing user ID's (ie setuid() and friends).
76 */
77
78#define UIDHASH_BITS (CONFIG_BASE_SMALL ? 3 : 7)
79#define UIDHASH_SZ (1 << UIDHASH_BITS)
80#define UIDHASH_MASK (UIDHASH_SZ - 1)
81#define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
82#define uidhashentry(uid) (uidhash_table + __uidhashfn((__kuid_val(uid))))
83
84static struct kmem_cache *uid_cachep;
85struct hlist_head uidhash_table[UIDHASH_SZ];
86
87/*
88 * The uidhash_lock is mostly taken from process context, but it is
89 * occasionally also taken from softirq/tasklet context, when
90 * task-structs get RCU-freed. Hence all locking must be softirq-safe.
91 * But free_uid() is also called with local interrupts disabled, and running
92 * local_bh_enable() with local interrupts disabled is an error - we'll run
93 * softirq callbacks, and they can unconditionally enable interrupts, and
94 * the caller of free_uid() didn't expect that..
95 */
96static DEFINE_SPINLOCK(uidhash_lock);
97
98/* root_user.__count is 1, for init task cred */
99struct user_struct root_user = {
100 .__count = REFCOUNT_INIT(1),
101 .processes = ATOMIC_INIT(1),
102 .sigpending = ATOMIC_INIT(0),
103 .locked_shm = 0,
104 .uid = GLOBAL_ROOT_UID,
105 .ratelimit = RATELIMIT_STATE_INIT(root_user.ratelimit, 0, 0),
106};
107
108/*
109 * These routines must be called with the uidhash spinlock held!
110 */
111static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
112{
113 hlist_add_head(&up->uidhash_node, hashent);
114}
115
116static void uid_hash_remove(struct user_struct *up)
117{
118 hlist_del_init(&up->uidhash_node);
119}
120
121static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent)
122{
123 struct user_struct *user;
124
125 hlist_for_each_entry(user, hashent, uidhash_node) {
126 if (uid_eq(user->uid, uid)) {
127 refcount_inc(&user->__count);
128 return user;
129 }
130 }
131
132 return NULL;
133}
134
135/* IRQs are disabled and uidhash_lock is held upon function entry.
136 * IRQ state (as stored in flags) is restored and uidhash_lock released
137 * upon function exit.
138 */
139static void free_user(struct user_struct *up, unsigned long flags)
140 __releases(&uidhash_lock)
141{
142 uid_hash_remove(up);
143 spin_unlock_irqrestore(&uidhash_lock, flags);
144 kmem_cache_free(uid_cachep, up);
145}
146
147/*
148 * Locate the user_struct for the passed UID. If found, take a ref on it. The
149 * caller must undo that ref with free_uid().
150 *
151 * If the user_struct could not be found, return NULL.
152 */
153struct user_struct *find_user(kuid_t uid)
154{
155 struct user_struct *ret;
156 unsigned long flags;
157
158 spin_lock_irqsave(&uidhash_lock, flags);
159 ret = uid_hash_find(uid, uidhashentry(uid));
160 spin_unlock_irqrestore(&uidhash_lock, flags);
161 return ret;
162}
163
164void free_uid(struct user_struct *up)
165{
166 unsigned long flags;
167
168 if (!up)
169 return;
170
171 if (refcount_dec_and_lock_irqsave(&up->__count, &uidhash_lock, &flags))
172 free_user(up, flags);
173}
174
175struct user_struct *alloc_uid(kuid_t uid)
176{
177 struct hlist_head *hashent = uidhashentry(uid);
178 struct user_struct *up, *new;
179
180 spin_lock_irq(&uidhash_lock);
181 up = uid_hash_find(uid, hashent);
182 spin_unlock_irq(&uidhash_lock);
183
184 if (!up) {
185 new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL);
186 if (!new)
187 return NULL;
188
189 new->uid = uid;
190 refcount_set(&new->__count, 1);
191 ratelimit_state_init(&new->ratelimit, HZ, 100);
192 ratelimit_set_flags(&new->ratelimit, RATELIMIT_MSG_ON_RELEASE);
193
194 /*
195 * Before adding this, check whether we raced
196 * on adding the same user already..
197 */
198 spin_lock_irq(&uidhash_lock);
199 up = uid_hash_find(uid, hashent);
200 if (up) {
201 kmem_cache_free(uid_cachep, new);
202 } else {
203 uid_hash_insert(new, hashent);
204 up = new;
205 }
206 spin_unlock_irq(&uidhash_lock);
207 }
208
209 return up;
210}
211
212static int __init uid_cache_init(void)
213{
214 int n;
215
216 uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
217 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
218
219 for(n = 0; n < UIDHASH_SZ; ++n)
220 INIT_HLIST_HEAD(uidhash_table + n);
221
222 /* Insert the root user immediately (init already runs as root) */
223 spin_lock_irq(&uidhash_lock);
224 uid_hash_insert(&root_user, uidhashentry(GLOBAL_ROOT_UID));
225 spin_unlock_irq(&uidhash_lock);
226
227 return 0;
228}
229subsys_initcall(uid_cache_init);
1/*
2 * The "user cache".
3 *
4 * (C) Copyright 1991-2000 Linus Torvalds
5 *
6 * We have a per-user structure to keep track of how many
7 * processes, files etc the user has claimed, in order to be
8 * able to have per-user limits for system resources.
9 */
10
11#include <linux/init.h>
12#include <linux/sched.h>
13#include <linux/slab.h>
14#include <linux/bitops.h>
15#include <linux/key.h>
16#include <linux/interrupt.h>
17#include <linux/module.h>
18#include <linux/user_namespace.h>
19
20/*
21 * userns count is 1 for root user, 1 for init_uts_ns,
22 * and 1 for... ?
23 */
24struct user_namespace init_user_ns = {
25 .kref = {
26 .refcount = ATOMIC_INIT(3),
27 },
28 .creator = &root_user,
29};
30EXPORT_SYMBOL_GPL(init_user_ns);
31
32/*
33 * UID task count cache, to get fast user lookup in "alloc_uid"
34 * when changing user ID's (ie setuid() and friends).
35 */
36
37#define UIDHASH_MASK (UIDHASH_SZ - 1)
38#define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
39#define uidhashentry(ns, uid) ((ns)->uidhash_table + __uidhashfn((uid)))
40
41static struct kmem_cache *uid_cachep;
42
43/*
44 * The uidhash_lock is mostly taken from process context, but it is
45 * occasionally also taken from softirq/tasklet context, when
46 * task-structs get RCU-freed. Hence all locking must be softirq-safe.
47 * But free_uid() is also called with local interrupts disabled, and running
48 * local_bh_enable() with local interrupts disabled is an error - we'll run
49 * softirq callbacks, and they can unconditionally enable interrupts, and
50 * the caller of free_uid() didn't expect that..
51 */
52static DEFINE_SPINLOCK(uidhash_lock);
53
54/* root_user.__count is 2, 1 for init task cred, 1 for init_user_ns->user_ns */
55struct user_struct root_user = {
56 .__count = ATOMIC_INIT(2),
57 .processes = ATOMIC_INIT(1),
58 .files = ATOMIC_INIT(0),
59 .sigpending = ATOMIC_INIT(0),
60 .locked_shm = 0,
61 .user_ns = &init_user_ns,
62};
63
64/*
65 * These routines must be called with the uidhash spinlock held!
66 */
67static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
68{
69 hlist_add_head(&up->uidhash_node, hashent);
70}
71
72static void uid_hash_remove(struct user_struct *up)
73{
74 hlist_del_init(&up->uidhash_node);
75 put_user_ns(up->user_ns);
76}
77
78static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
79{
80 struct user_struct *user;
81 struct hlist_node *h;
82
83 hlist_for_each_entry(user, h, hashent, uidhash_node) {
84 if (user->uid == uid) {
85 atomic_inc(&user->__count);
86 return user;
87 }
88 }
89
90 return NULL;
91}
92
93/* IRQs are disabled and uidhash_lock is held upon function entry.
94 * IRQ state (as stored in flags) is restored and uidhash_lock released
95 * upon function exit.
96 */
97static void free_user(struct user_struct *up, unsigned long flags)
98 __releases(&uidhash_lock)
99{
100 uid_hash_remove(up);
101 spin_unlock_irqrestore(&uidhash_lock, flags);
102 key_put(up->uid_keyring);
103 key_put(up->session_keyring);
104 kmem_cache_free(uid_cachep, up);
105}
106
107/*
108 * Locate the user_struct for the passed UID. If found, take a ref on it. The
109 * caller must undo that ref with free_uid().
110 *
111 * If the user_struct could not be found, return NULL.
112 */
113struct user_struct *find_user(uid_t uid)
114{
115 struct user_struct *ret;
116 unsigned long flags;
117 struct user_namespace *ns = current_user_ns();
118
119 spin_lock_irqsave(&uidhash_lock, flags);
120 ret = uid_hash_find(uid, uidhashentry(ns, uid));
121 spin_unlock_irqrestore(&uidhash_lock, flags);
122 return ret;
123}
124
125void free_uid(struct user_struct *up)
126{
127 unsigned long flags;
128
129 if (!up)
130 return;
131
132 local_irq_save(flags);
133 if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
134 free_user(up, flags);
135 else
136 local_irq_restore(flags);
137}
138
139struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
140{
141 struct hlist_head *hashent = uidhashentry(ns, uid);
142 struct user_struct *up, *new;
143
144 spin_lock_irq(&uidhash_lock);
145 up = uid_hash_find(uid, hashent);
146 spin_unlock_irq(&uidhash_lock);
147
148 if (!up) {
149 new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL);
150 if (!new)
151 goto out_unlock;
152
153 new->uid = uid;
154 atomic_set(&new->__count, 1);
155
156 new->user_ns = get_user_ns(ns);
157
158 /*
159 * Before adding this, check whether we raced
160 * on adding the same user already..
161 */
162 spin_lock_irq(&uidhash_lock);
163 up = uid_hash_find(uid, hashent);
164 if (up) {
165 put_user_ns(ns);
166 key_put(new->uid_keyring);
167 key_put(new->session_keyring);
168 kmem_cache_free(uid_cachep, new);
169 } else {
170 uid_hash_insert(new, hashent);
171 up = new;
172 }
173 spin_unlock_irq(&uidhash_lock);
174 }
175
176 return up;
177
178out_unlock:
179 return NULL;
180}
181
182static int __init uid_cache_init(void)
183{
184 int n;
185
186 uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
187 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
188
189 for(n = 0; n < UIDHASH_SZ; ++n)
190 INIT_HLIST_HEAD(init_user_ns.uidhash_table + n);
191
192 /* Insert the root user immediately (init already runs as root) */
193 spin_lock_irq(&uidhash_lock);
194 uid_hash_insert(&root_user, uidhashentry(&init_user_ns, 0));
195 spin_unlock_irq(&uidhash_lock);
196
197 return 0;
198}
199
200module_init(uid_cache_init);