Loading...
1/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License as
4 * published by the Free Software Foundation, version 2 of the
5 * License.
6 */
7
8#include <linux/stat.h>
9#include <linux/sysctl.h>
10#include <linux/slab.h>
11#include <linux/cred.h>
12#include <linux/hash.h>
13#include <linux/kmemleak.h>
14#include <linux/user_namespace.h>
15
16#define UCOUNTS_HASHTABLE_BITS 10
17static struct hlist_head ucounts_hashtable[(1 << UCOUNTS_HASHTABLE_BITS)];
18static DEFINE_SPINLOCK(ucounts_lock);
19
20#define ucounts_hashfn(ns, uid) \
21 hash_long((unsigned long)__kuid_val(uid) + (unsigned long)(ns), \
22 UCOUNTS_HASHTABLE_BITS)
23#define ucounts_hashentry(ns, uid) \
24 (ucounts_hashtable + ucounts_hashfn(ns, uid))
25
26
27#ifdef CONFIG_SYSCTL
28static struct ctl_table_set *
29set_lookup(struct ctl_table_root *root)
30{
31 return ¤t_user_ns()->set;
32}
33
34static int set_is_seen(struct ctl_table_set *set)
35{
36 return ¤t_user_ns()->set == set;
37}
38
39static int set_permissions(struct ctl_table_header *head,
40 struct ctl_table *table)
41{
42 struct user_namespace *user_ns =
43 container_of(head->set, struct user_namespace, set);
44 int mode;
45
46 /* Allow users with CAP_SYS_RESOURCE unrestrained access */
47 if (ns_capable(user_ns, CAP_SYS_RESOURCE))
48 mode = (table->mode & S_IRWXU) >> 6;
49 else
50 /* Allow all others at most read-only access */
51 mode = table->mode & S_IROTH;
52 return (mode << 6) | (mode << 3) | mode;
53}
54
55static struct ctl_table_root set_root = {
56 .lookup = set_lookup,
57 .permissions = set_permissions,
58};
59
60static int zero = 0;
61static int int_max = INT_MAX;
62#define UCOUNT_ENTRY(name) \
63 { \
64 .procname = name, \
65 .maxlen = sizeof(int), \
66 .mode = 0644, \
67 .proc_handler = proc_dointvec_minmax, \
68 .extra1 = &zero, \
69 .extra2 = &int_max, \
70 }
71static struct ctl_table user_table[] = {
72 UCOUNT_ENTRY("max_user_namespaces"),
73 UCOUNT_ENTRY("max_pid_namespaces"),
74 UCOUNT_ENTRY("max_uts_namespaces"),
75 UCOUNT_ENTRY("max_ipc_namespaces"),
76 UCOUNT_ENTRY("max_net_namespaces"),
77 UCOUNT_ENTRY("max_mnt_namespaces"),
78 UCOUNT_ENTRY("max_cgroup_namespaces"),
79#ifdef CONFIG_INOTIFY_USER
80 UCOUNT_ENTRY("max_inotify_instances"),
81 UCOUNT_ENTRY("max_inotify_watches"),
82#endif
83 { }
84};
85#endif /* CONFIG_SYSCTL */
86
87bool setup_userns_sysctls(struct user_namespace *ns)
88{
89#ifdef CONFIG_SYSCTL
90 struct ctl_table *tbl;
91 setup_sysctl_set(&ns->set, &set_root, set_is_seen);
92 tbl = kmemdup(user_table, sizeof(user_table), GFP_KERNEL);
93 if (tbl) {
94 int i;
95 for (i = 0; i < UCOUNT_COUNTS; i++) {
96 tbl[i].data = &ns->ucount_max[i];
97 }
98 ns->sysctls = __register_sysctl_table(&ns->set, "user", tbl);
99 }
100 if (!ns->sysctls) {
101 kfree(tbl);
102 retire_sysctl_set(&ns->set);
103 return false;
104 }
105#endif
106 return true;
107}
108
109void retire_userns_sysctls(struct user_namespace *ns)
110{
111#ifdef CONFIG_SYSCTL
112 struct ctl_table *tbl;
113
114 tbl = ns->sysctls->ctl_table_arg;
115 unregister_sysctl_table(ns->sysctls);
116 retire_sysctl_set(&ns->set);
117 kfree(tbl);
118#endif
119}
120
121static struct ucounts *find_ucounts(struct user_namespace *ns, kuid_t uid, struct hlist_head *hashent)
122{
123 struct ucounts *ucounts;
124
125 hlist_for_each_entry(ucounts, hashent, node) {
126 if (uid_eq(ucounts->uid, uid) && (ucounts->ns == ns))
127 return ucounts;
128 }
129 return NULL;
130}
131
132static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
133{
134 struct hlist_head *hashent = ucounts_hashentry(ns, uid);
135 struct ucounts *ucounts, *new;
136
137 spin_lock_irq(&ucounts_lock);
138 ucounts = find_ucounts(ns, uid, hashent);
139 if (!ucounts) {
140 spin_unlock_irq(&ucounts_lock);
141
142 new = kzalloc(sizeof(*new), GFP_KERNEL);
143 if (!new)
144 return NULL;
145
146 new->ns = ns;
147 new->uid = uid;
148 new->count = 0;
149
150 spin_lock_irq(&ucounts_lock);
151 ucounts = find_ucounts(ns, uid, hashent);
152 if (ucounts) {
153 kfree(new);
154 } else {
155 hlist_add_head(&new->node, hashent);
156 ucounts = new;
157 }
158 }
159 if (ucounts->count == INT_MAX)
160 ucounts = NULL;
161 else
162 ucounts->count += 1;
163 spin_unlock_irq(&ucounts_lock);
164 return ucounts;
165}
166
167static void put_ucounts(struct ucounts *ucounts)
168{
169 unsigned long flags;
170
171 spin_lock_irqsave(&ucounts_lock, flags);
172 ucounts->count -= 1;
173 if (!ucounts->count)
174 hlist_del_init(&ucounts->node);
175 else
176 ucounts = NULL;
177 spin_unlock_irqrestore(&ucounts_lock, flags);
178
179 kfree(ucounts);
180}
181
182static inline bool atomic_inc_below(atomic_t *v, int u)
183{
184 int c, old;
185 c = atomic_read(v);
186 for (;;) {
187 if (unlikely(c >= u))
188 return false;
189 old = atomic_cmpxchg(v, c, c+1);
190 if (likely(old == c))
191 return true;
192 c = old;
193 }
194}
195
196struct ucounts *inc_ucount(struct user_namespace *ns, kuid_t uid,
197 enum ucount_type type)
198{
199 struct ucounts *ucounts, *iter, *bad;
200 struct user_namespace *tns;
201 ucounts = get_ucounts(ns, uid);
202 for (iter = ucounts; iter; iter = tns->ucounts) {
203 int max;
204 tns = iter->ns;
205 max = READ_ONCE(tns->ucount_max[type]);
206 if (!atomic_inc_below(&iter->ucount[type], max))
207 goto fail;
208 }
209 return ucounts;
210fail:
211 bad = iter;
212 for (iter = ucounts; iter != bad; iter = iter->ns->ucounts)
213 atomic_dec(&iter->ucount[type]);
214
215 put_ucounts(ucounts);
216 return NULL;
217}
218
219void dec_ucount(struct ucounts *ucounts, enum ucount_type type)
220{
221 struct ucounts *iter;
222 for (iter = ucounts; iter; iter = iter->ns->ucounts) {
223 int dec = atomic_dec_if_positive(&iter->ucount[type]);
224 WARN_ON_ONCE(dec < 0);
225 }
226 put_ucounts(ucounts);
227}
228
229static __init int user_namespace_sysctl_init(void)
230{
231#ifdef CONFIG_SYSCTL
232 static struct ctl_table_header *user_header;
233 static struct ctl_table empty[1];
234 /*
235 * It is necessary to register the user directory in the
236 * default set so that registrations in the child sets work
237 * properly.
238 */
239 user_header = register_sysctl("user", empty);
240 kmemleak_ignore(user_header);
241 BUG_ON(!user_header);
242 BUG_ON(!setup_userns_sysctls(&init_user_ns));
243#endif
244 return 0;
245}
246subsys_initcall(user_namespace_sysctl_init);
1// SPDX-License-Identifier: GPL-2.0-only
2
3#include <linux/stat.h>
4#include <linux/sysctl.h>
5#include <linux/slab.h>
6#include <linux/cred.h>
7#include <linux/hash.h>
8#include <linux/kmemleak.h>
9#include <linux/user_namespace.h>
10
11struct ucounts init_ucounts = {
12 .ns = &init_user_ns,
13 .uid = GLOBAL_ROOT_UID,
14 .count = ATOMIC_INIT(1),
15};
16
17#define UCOUNTS_HASHTABLE_BITS 10
18static struct hlist_head ucounts_hashtable[(1 << UCOUNTS_HASHTABLE_BITS)];
19static DEFINE_SPINLOCK(ucounts_lock);
20
21#define ucounts_hashfn(ns, uid) \
22 hash_long((unsigned long)__kuid_val(uid) + (unsigned long)(ns), \
23 UCOUNTS_HASHTABLE_BITS)
24#define ucounts_hashentry(ns, uid) \
25 (ucounts_hashtable + ucounts_hashfn(ns, uid))
26
27
28#ifdef CONFIG_SYSCTL
29static struct ctl_table_set *
30set_lookup(struct ctl_table_root *root)
31{
32 return ¤t_user_ns()->set;
33}
34
35static int set_is_seen(struct ctl_table_set *set)
36{
37 return ¤t_user_ns()->set == set;
38}
39
40static int set_permissions(struct ctl_table_header *head,
41 const struct ctl_table *table)
42{
43 struct user_namespace *user_ns =
44 container_of(head->set, struct user_namespace, set);
45 int mode;
46
47 /* Allow users with CAP_SYS_RESOURCE unrestrained access */
48 if (ns_capable(user_ns, CAP_SYS_RESOURCE))
49 mode = (table->mode & S_IRWXU) >> 6;
50 else
51 /* Allow all others at most read-only access */
52 mode = table->mode & S_IROTH;
53 return (mode << 6) | (mode << 3) | mode;
54}
55
56static struct ctl_table_root set_root = {
57 .lookup = set_lookup,
58 .permissions = set_permissions,
59};
60
61static long ue_zero = 0;
62static long ue_int_max = INT_MAX;
63
64#define UCOUNT_ENTRY(name) \
65 { \
66 .procname = name, \
67 .maxlen = sizeof(long), \
68 .mode = 0644, \
69 .proc_handler = proc_doulongvec_minmax, \
70 .extra1 = &ue_zero, \
71 .extra2 = &ue_int_max, \
72 }
73static const struct ctl_table user_table[] = {
74 UCOUNT_ENTRY("max_user_namespaces"),
75 UCOUNT_ENTRY("max_pid_namespaces"),
76 UCOUNT_ENTRY("max_uts_namespaces"),
77 UCOUNT_ENTRY("max_ipc_namespaces"),
78 UCOUNT_ENTRY("max_net_namespaces"),
79 UCOUNT_ENTRY("max_mnt_namespaces"),
80 UCOUNT_ENTRY("max_cgroup_namespaces"),
81 UCOUNT_ENTRY("max_time_namespaces"),
82#ifdef CONFIG_INOTIFY_USER
83 UCOUNT_ENTRY("max_inotify_instances"),
84 UCOUNT_ENTRY("max_inotify_watches"),
85#endif
86#ifdef CONFIG_FANOTIFY
87 UCOUNT_ENTRY("max_fanotify_groups"),
88 UCOUNT_ENTRY("max_fanotify_marks"),
89#endif
90};
91#endif /* CONFIG_SYSCTL */
92
93bool setup_userns_sysctls(struct user_namespace *ns)
94{
95#ifdef CONFIG_SYSCTL
96 struct ctl_table *tbl;
97
98 BUILD_BUG_ON(ARRAY_SIZE(user_table) != UCOUNT_COUNTS);
99 setup_sysctl_set(&ns->set, &set_root, set_is_seen);
100 tbl = kmemdup(user_table, sizeof(user_table), GFP_KERNEL);
101 if (tbl) {
102 int i;
103 for (i = 0; i < UCOUNT_COUNTS; i++) {
104 tbl[i].data = &ns->ucount_max[i];
105 }
106 ns->sysctls = __register_sysctl_table(&ns->set, "user", tbl,
107 ARRAY_SIZE(user_table));
108 }
109 if (!ns->sysctls) {
110 kfree(tbl);
111 retire_sysctl_set(&ns->set);
112 return false;
113 }
114#endif
115 return true;
116}
117
118void retire_userns_sysctls(struct user_namespace *ns)
119{
120#ifdef CONFIG_SYSCTL
121 const struct ctl_table *tbl;
122
123 tbl = ns->sysctls->ctl_table_arg;
124 unregister_sysctl_table(ns->sysctls);
125 retire_sysctl_set(&ns->set);
126 kfree(tbl);
127#endif
128}
129
130static struct ucounts *find_ucounts(struct user_namespace *ns, kuid_t uid, struct hlist_head *hashent)
131{
132 struct ucounts *ucounts;
133
134 hlist_for_each_entry(ucounts, hashent, node) {
135 if (uid_eq(ucounts->uid, uid) && (ucounts->ns == ns))
136 return ucounts;
137 }
138 return NULL;
139}
140
141static void hlist_add_ucounts(struct ucounts *ucounts)
142{
143 struct hlist_head *hashent = ucounts_hashentry(ucounts->ns, ucounts->uid);
144 spin_lock_irq(&ucounts_lock);
145 hlist_add_head(&ucounts->node, hashent);
146 spin_unlock_irq(&ucounts_lock);
147}
148
149static inline bool get_ucounts_or_wrap(struct ucounts *ucounts)
150{
151 /* Returns true on a successful get, false if the count wraps. */
152 return !atomic_add_negative(1, &ucounts->count);
153}
154
155struct ucounts *get_ucounts(struct ucounts *ucounts)
156{
157 if (!get_ucounts_or_wrap(ucounts)) {
158 put_ucounts(ucounts);
159 ucounts = NULL;
160 }
161 return ucounts;
162}
163
164struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid)
165{
166 struct hlist_head *hashent = ucounts_hashentry(ns, uid);
167 struct ucounts *ucounts, *new;
168 bool wrapped;
169
170 spin_lock_irq(&ucounts_lock);
171 ucounts = find_ucounts(ns, uid, hashent);
172 if (!ucounts) {
173 spin_unlock_irq(&ucounts_lock);
174
175 new = kzalloc(sizeof(*new), GFP_KERNEL);
176 if (!new)
177 return NULL;
178
179 new->ns = ns;
180 new->uid = uid;
181 atomic_set(&new->count, 1);
182
183 spin_lock_irq(&ucounts_lock);
184 ucounts = find_ucounts(ns, uid, hashent);
185 if (ucounts) {
186 kfree(new);
187 } else {
188 hlist_add_head(&new->node, hashent);
189 get_user_ns(new->ns);
190 spin_unlock_irq(&ucounts_lock);
191 return new;
192 }
193 }
194 wrapped = !get_ucounts_or_wrap(ucounts);
195 spin_unlock_irq(&ucounts_lock);
196 if (wrapped) {
197 put_ucounts(ucounts);
198 return NULL;
199 }
200 return ucounts;
201}
202
203void put_ucounts(struct ucounts *ucounts)
204{
205 unsigned long flags;
206
207 if (atomic_dec_and_lock_irqsave(&ucounts->count, &ucounts_lock, flags)) {
208 hlist_del_init(&ucounts->node);
209 spin_unlock_irqrestore(&ucounts_lock, flags);
210 put_user_ns(ucounts->ns);
211 kfree(ucounts);
212 }
213}
214
215static inline bool atomic_long_inc_below(atomic_long_t *v, int u)
216{
217 long c, old;
218 c = atomic_long_read(v);
219 for (;;) {
220 if (unlikely(c >= u))
221 return false;
222 old = atomic_long_cmpxchg(v, c, c+1);
223 if (likely(old == c))
224 return true;
225 c = old;
226 }
227}
228
229struct ucounts *inc_ucount(struct user_namespace *ns, kuid_t uid,
230 enum ucount_type type)
231{
232 struct ucounts *ucounts, *iter, *bad;
233 struct user_namespace *tns;
234 ucounts = alloc_ucounts(ns, uid);
235 for (iter = ucounts; iter; iter = tns->ucounts) {
236 long max;
237 tns = iter->ns;
238 max = READ_ONCE(tns->ucount_max[type]);
239 if (!atomic_long_inc_below(&iter->ucount[type], max))
240 goto fail;
241 }
242 return ucounts;
243fail:
244 bad = iter;
245 for (iter = ucounts; iter != bad; iter = iter->ns->ucounts)
246 atomic_long_dec(&iter->ucount[type]);
247
248 put_ucounts(ucounts);
249 return NULL;
250}
251
252void dec_ucount(struct ucounts *ucounts, enum ucount_type type)
253{
254 struct ucounts *iter;
255 for (iter = ucounts; iter; iter = iter->ns->ucounts) {
256 long dec = atomic_long_dec_if_positive(&iter->ucount[type]);
257 WARN_ON_ONCE(dec < 0);
258 }
259 put_ucounts(ucounts);
260}
261
262long inc_rlimit_ucounts(struct ucounts *ucounts, enum rlimit_type type, long v)
263{
264 struct ucounts *iter;
265 long max = LONG_MAX;
266 long ret = 0;
267
268 for (iter = ucounts; iter; iter = iter->ns->ucounts) {
269 long new = atomic_long_add_return(v, &iter->rlimit[type]);
270 if (new < 0 || new > max)
271 ret = LONG_MAX;
272 else if (iter == ucounts)
273 ret = new;
274 max = get_userns_rlimit_max(iter->ns, type);
275 }
276 return ret;
277}
278
279bool dec_rlimit_ucounts(struct ucounts *ucounts, enum rlimit_type type, long v)
280{
281 struct ucounts *iter;
282 long new = -1; /* Silence compiler warning */
283 for (iter = ucounts; iter; iter = iter->ns->ucounts) {
284 long dec = atomic_long_sub_return(v, &iter->rlimit[type]);
285 WARN_ON_ONCE(dec < 0);
286 if (iter == ucounts)
287 new = dec;
288 }
289 return (new == 0);
290}
291
292static void do_dec_rlimit_put_ucounts(struct ucounts *ucounts,
293 struct ucounts *last, enum rlimit_type type)
294{
295 struct ucounts *iter, *next;
296 for (iter = ucounts; iter != last; iter = next) {
297 long dec = atomic_long_sub_return(1, &iter->rlimit[type]);
298 WARN_ON_ONCE(dec < 0);
299 next = iter->ns->ucounts;
300 if (dec == 0)
301 put_ucounts(iter);
302 }
303}
304
305void dec_rlimit_put_ucounts(struct ucounts *ucounts, enum rlimit_type type)
306{
307 do_dec_rlimit_put_ucounts(ucounts, NULL, type);
308}
309
310long inc_rlimit_get_ucounts(struct ucounts *ucounts, enum rlimit_type type,
311 bool override_rlimit)
312{
313 /* Caller must hold a reference to ucounts */
314 struct ucounts *iter;
315 long max = LONG_MAX;
316 long dec, ret = 0;
317
318 for (iter = ucounts; iter; iter = iter->ns->ucounts) {
319 long new = atomic_long_add_return(1, &iter->rlimit[type]);
320 if (new < 0 || new > max)
321 goto dec_unwind;
322 if (iter == ucounts)
323 ret = new;
324 if (!override_rlimit)
325 max = get_userns_rlimit_max(iter->ns, type);
326 /*
327 * Grab an extra ucount reference for the caller when
328 * the rlimit count was previously 0.
329 */
330 if (new != 1)
331 continue;
332 if (!get_ucounts(iter))
333 goto dec_unwind;
334 }
335 return ret;
336dec_unwind:
337 dec = atomic_long_sub_return(1, &iter->rlimit[type]);
338 WARN_ON_ONCE(dec < 0);
339 do_dec_rlimit_put_ucounts(ucounts, iter, type);
340 return 0;
341}
342
343bool is_rlimit_overlimit(struct ucounts *ucounts, enum rlimit_type type, unsigned long rlimit)
344{
345 struct ucounts *iter;
346 long max = rlimit;
347 if (rlimit > LONG_MAX)
348 max = LONG_MAX;
349 for (iter = ucounts; iter; iter = iter->ns->ucounts) {
350 long val = get_rlimit_value(iter, type);
351 if (val < 0 || val > max)
352 return true;
353 max = get_userns_rlimit_max(iter->ns, type);
354 }
355 return false;
356}
357
358static __init int user_namespace_sysctl_init(void)
359{
360#ifdef CONFIG_SYSCTL
361 static struct ctl_table_header *user_header;
362 static struct ctl_table empty[1];
363 /*
364 * It is necessary to register the user directory in the
365 * default set so that registrations in the child sets work
366 * properly.
367 */
368 user_header = register_sysctl_sz("user", empty, 0);
369 kmemleak_ignore(user_header);
370 BUG_ON(!user_header);
371 BUG_ON(!setup_userns_sysctls(&init_user_ns));
372#endif
373 hlist_add_ucounts(&init_ucounts);
374 inc_rlimit_ucounts(&init_ucounts, UCOUNT_RLIMIT_NPROC, 1);
375 return 0;
376}
377subsys_initcall(user_namespace_sysctl_init);