Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2
3#include <linux/export.h>
4#include <linux/nsproxy.h>
5#include <linux/slab.h>
6#include <linux/sched/signal.h>
7#include <linux/user_namespace.h>
8#include <linux/proc_ns.h>
9#include <linux/highuid.h>
10#include <linux/cred.h>
11#include <linux/securebits.h>
12#include <linux/security.h>
13#include <linux/keyctl.h>
14#include <linux/key-type.h>
15#include <keys/user-type.h>
16#include <linux/seq_file.h>
17#include <linux/fs.h>
18#include <linux/uaccess.h>
19#include <linux/ctype.h>
20#include <linux/projid.h>
21#include <linux/fs_struct.h>
22#include <linux/bsearch.h>
23#include <linux/sort.h>
24
25static struct kmem_cache *user_ns_cachep __read_mostly;
26static DEFINE_MUTEX(userns_state_mutex);
27
28static bool new_idmap_permitted(const struct file *file,
29 struct user_namespace *ns, int cap_setid,
30 struct uid_gid_map *map);
31static void free_user_ns(struct work_struct *work);
32
33static struct ucounts *inc_user_namespaces(struct user_namespace *ns, kuid_t uid)
34{
35 return inc_ucount(ns, uid, UCOUNT_USER_NAMESPACES);
36}
37
38static void dec_user_namespaces(struct ucounts *ucounts)
39{
40 return dec_ucount(ucounts, UCOUNT_USER_NAMESPACES);
41}
42
43static void set_cred_user_ns(struct cred *cred, struct user_namespace *user_ns)
44{
45 /* Start with the same capabilities as init but useless for doing
46 * anything as the capabilities are bound to the new user namespace.
47 */
48 cred->securebits = SECUREBITS_DEFAULT;
49 cred->cap_inheritable = CAP_EMPTY_SET;
50 cred->cap_permitted = CAP_FULL_SET;
51 cred->cap_effective = CAP_FULL_SET;
52 cred->cap_ambient = CAP_EMPTY_SET;
53 cred->cap_bset = CAP_FULL_SET;
54#ifdef CONFIG_KEYS
55 key_put(cred->request_key_auth);
56 cred->request_key_auth = NULL;
57#endif
58 /* tgcred will be cleared in our caller bc CLONE_THREAD won't be set */
59 cred->user_ns = user_ns;
60}
61
62static unsigned long enforced_nproc_rlimit(void)
63{
64 unsigned long limit = RLIM_INFINITY;
65
66 /* Is RLIMIT_NPROC currently enforced? */
67 if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) ||
68 (current_user_ns() != &init_user_ns))
69 limit = rlimit(RLIMIT_NPROC);
70
71 return limit;
72}
73
74/*
75 * Create a new user namespace, deriving the creator from the user in the
76 * passed credentials, and replacing that user with the new root user for the
77 * new namespace.
78 *
79 * This is called by copy_creds(), which will finish setting the target task's
80 * credentials.
81 */
82int create_user_ns(struct cred *new)
83{
84 struct user_namespace *ns, *parent_ns = new->user_ns;
85 kuid_t owner = new->euid;
86 kgid_t group = new->egid;
87 struct ucounts *ucounts;
88 int ret, i;
89
90 ret = -ENOSPC;
91 if (parent_ns->level > 32)
92 goto fail;
93
94 ucounts = inc_user_namespaces(parent_ns, owner);
95 if (!ucounts)
96 goto fail;
97
98 /*
99 * Verify that we can not violate the policy of which files
100 * may be accessed that is specified by the root directory,
101 * by verifying that the root directory is at the root of the
102 * mount namespace which allows all files to be accessed.
103 */
104 ret = -EPERM;
105 if (current_chrooted())
106 goto fail_dec;
107
108 /* The creator needs a mapping in the parent user namespace
109 * or else we won't be able to reasonably tell userspace who
110 * created a user_namespace.
111 */
112 ret = -EPERM;
113 if (!kuid_has_mapping(parent_ns, owner) ||
114 !kgid_has_mapping(parent_ns, group))
115 goto fail_dec;
116
117 ret = security_create_user_ns(new);
118 if (ret < 0)
119 goto fail_dec;
120
121 ret = -ENOMEM;
122 ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
123 if (!ns)
124 goto fail_dec;
125
126 ns->parent_could_setfcap = cap_raised(new->cap_effective, CAP_SETFCAP);
127 ret = ns_alloc_inum(&ns->ns);
128 if (ret)
129 goto fail_free;
130 ns->ns.ops = &userns_operations;
131
132 refcount_set(&ns->ns.count, 1);
133 /* Leave the new->user_ns reference with the new user namespace. */
134 ns->parent = parent_ns;
135 ns->level = parent_ns->level + 1;
136 ns->owner = owner;
137 ns->group = group;
138 INIT_WORK(&ns->work, free_user_ns);
139 for (i = 0; i < UCOUNT_COUNTS; i++) {
140 ns->ucount_max[i] = INT_MAX;
141 }
142 set_userns_rlimit_max(ns, UCOUNT_RLIMIT_NPROC, enforced_nproc_rlimit());
143 set_userns_rlimit_max(ns, UCOUNT_RLIMIT_MSGQUEUE, rlimit(RLIMIT_MSGQUEUE));
144 set_userns_rlimit_max(ns, UCOUNT_RLIMIT_SIGPENDING, rlimit(RLIMIT_SIGPENDING));
145 set_userns_rlimit_max(ns, UCOUNT_RLIMIT_MEMLOCK, rlimit(RLIMIT_MEMLOCK));
146 ns->ucounts = ucounts;
147
148 /* Inherit USERNS_SETGROUPS_ALLOWED from our parent */
149 mutex_lock(&userns_state_mutex);
150 ns->flags = parent_ns->flags;
151 mutex_unlock(&userns_state_mutex);
152
153#ifdef CONFIG_KEYS
154 INIT_LIST_HEAD(&ns->keyring_name_list);
155 init_rwsem(&ns->keyring_sem);
156#endif
157 ret = -ENOMEM;
158 if (!setup_userns_sysctls(ns))
159 goto fail_keyring;
160
161 set_cred_user_ns(new, ns);
162 return 0;
163fail_keyring:
164#ifdef CONFIG_PERSISTENT_KEYRINGS
165 key_put(ns->persistent_keyring_register);
166#endif
167 ns_free_inum(&ns->ns);
168fail_free:
169 kmem_cache_free(user_ns_cachep, ns);
170fail_dec:
171 dec_user_namespaces(ucounts);
172fail:
173 return ret;
174}
175
176int unshare_userns(unsigned long unshare_flags, struct cred **new_cred)
177{
178 struct cred *cred;
179 int err = -ENOMEM;
180
181 if (!(unshare_flags & CLONE_NEWUSER))
182 return 0;
183
184 cred = prepare_creds();
185 if (cred) {
186 err = create_user_ns(cred);
187 if (err)
188 put_cred(cred);
189 else
190 *new_cred = cred;
191 }
192
193 return err;
194}
195
196static void free_user_ns(struct work_struct *work)
197{
198 struct user_namespace *parent, *ns =
199 container_of(work, struct user_namespace, work);
200
201 do {
202 struct ucounts *ucounts = ns->ucounts;
203 parent = ns->parent;
204 if (ns->gid_map.nr_extents > UID_GID_MAP_MAX_BASE_EXTENTS) {
205 kfree(ns->gid_map.forward);
206 kfree(ns->gid_map.reverse);
207 }
208 if (ns->uid_map.nr_extents > UID_GID_MAP_MAX_BASE_EXTENTS) {
209 kfree(ns->uid_map.forward);
210 kfree(ns->uid_map.reverse);
211 }
212 if (ns->projid_map.nr_extents > UID_GID_MAP_MAX_BASE_EXTENTS) {
213 kfree(ns->projid_map.forward);
214 kfree(ns->projid_map.reverse);
215 }
216 retire_userns_sysctls(ns);
217 key_free_user_ns(ns);
218 ns_free_inum(&ns->ns);
219 kmem_cache_free(user_ns_cachep, ns);
220 dec_user_namespaces(ucounts);
221 ns = parent;
222 } while (refcount_dec_and_test(&parent->ns.count));
223}
224
225void __put_user_ns(struct user_namespace *ns)
226{
227 schedule_work(&ns->work);
228}
229EXPORT_SYMBOL(__put_user_ns);
230
231/**
232 * idmap_key struct holds the information necessary to find an idmapping in a
233 * sorted idmap array. It is passed to cmp_map_id() as first argument.
234 */
235struct idmap_key {
236 bool map_up; /* true -> id from kid; false -> kid from id */
237 u32 id; /* id to find */
238 u32 count; /* == 0 unless used with map_id_range_down() */
239};
240
241/**
242 * cmp_map_id - Function to be passed to bsearch() to find the requested
243 * idmapping. Expects struct idmap_key to be passed via @k.
244 */
245static int cmp_map_id(const void *k, const void *e)
246{
247 u32 first, last, id2;
248 const struct idmap_key *key = k;
249 const struct uid_gid_extent *el = e;
250
251 id2 = key->id + key->count - 1;
252
253 /* handle map_id_{down,up}() */
254 if (key->map_up)
255 first = el->lower_first;
256 else
257 first = el->first;
258
259 last = first + el->count - 1;
260
261 if (key->id >= first && key->id <= last &&
262 (id2 >= first && id2 <= last))
263 return 0;
264
265 if (key->id < first || id2 < first)
266 return -1;
267
268 return 1;
269}
270
271/**
272 * map_id_range_down_max - Find idmap via binary search in ordered idmap array.
273 * Can only be called if number of mappings exceeds UID_GID_MAP_MAX_BASE_EXTENTS.
274 */
275static struct uid_gid_extent *
276map_id_range_down_max(unsigned extents, struct uid_gid_map *map, u32 id, u32 count)
277{
278 struct idmap_key key;
279
280 key.map_up = false;
281 key.count = count;
282 key.id = id;
283
284 return bsearch(&key, map->forward, extents,
285 sizeof(struct uid_gid_extent), cmp_map_id);
286}
287
288/**
289 * map_id_range_down_base - Find idmap via binary search in static extent array.
290 * Can only be called if number of mappings is equal or less than
291 * UID_GID_MAP_MAX_BASE_EXTENTS.
292 */
293static struct uid_gid_extent *
294map_id_range_down_base(unsigned extents, struct uid_gid_map *map, u32 id, u32 count)
295{
296 unsigned idx;
297 u32 first, last, id2;
298
299 id2 = id + count - 1;
300
301 /* Find the matching extent */
302 for (idx = 0; idx < extents; idx++) {
303 first = map->extent[idx].first;
304 last = first + map->extent[idx].count - 1;
305 if (id >= first && id <= last &&
306 (id2 >= first && id2 <= last))
307 return &map->extent[idx];
308 }
309 return NULL;
310}
311
312static u32 map_id_range_down(struct uid_gid_map *map, u32 id, u32 count)
313{
314 struct uid_gid_extent *extent;
315 unsigned extents = map->nr_extents;
316 smp_rmb();
317
318 if (extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
319 extent = map_id_range_down_base(extents, map, id, count);
320 else
321 extent = map_id_range_down_max(extents, map, id, count);
322
323 /* Map the id or note failure */
324 if (extent)
325 id = (id - extent->first) + extent->lower_first;
326 else
327 id = (u32) -1;
328
329 return id;
330}
331
332static u32 map_id_down(struct uid_gid_map *map, u32 id)
333{
334 return map_id_range_down(map, id, 1);
335}
336
337/**
338 * map_id_up_base - Find idmap via binary search in static extent array.
339 * Can only be called if number of mappings is equal or less than
340 * UID_GID_MAP_MAX_BASE_EXTENTS.
341 */
342static struct uid_gid_extent *
343map_id_up_base(unsigned extents, struct uid_gid_map *map, u32 id)
344{
345 unsigned idx;
346 u32 first, last;
347
348 /* Find the matching extent */
349 for (idx = 0; idx < extents; idx++) {
350 first = map->extent[idx].lower_first;
351 last = first + map->extent[idx].count - 1;
352 if (id >= first && id <= last)
353 return &map->extent[idx];
354 }
355 return NULL;
356}
357
358/**
359 * map_id_up_max - Find idmap via binary search in ordered idmap array.
360 * Can only be called if number of mappings exceeds UID_GID_MAP_MAX_BASE_EXTENTS.
361 */
362static struct uid_gid_extent *
363map_id_up_max(unsigned extents, struct uid_gid_map *map, u32 id)
364{
365 struct idmap_key key;
366
367 key.map_up = true;
368 key.count = 1;
369 key.id = id;
370
371 return bsearch(&key, map->reverse, extents,
372 sizeof(struct uid_gid_extent), cmp_map_id);
373}
374
375static u32 map_id_up(struct uid_gid_map *map, u32 id)
376{
377 struct uid_gid_extent *extent;
378 unsigned extents = map->nr_extents;
379 smp_rmb();
380
381 if (extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
382 extent = map_id_up_base(extents, map, id);
383 else
384 extent = map_id_up_max(extents, map, id);
385
386 /* Map the id or note failure */
387 if (extent)
388 id = (id - extent->lower_first) + extent->first;
389 else
390 id = (u32) -1;
391
392 return id;
393}
394
395/**
396 * make_kuid - Map a user-namespace uid pair into a kuid.
397 * @ns: User namespace that the uid is in
398 * @uid: User identifier
399 *
400 * Maps a user-namespace uid pair into a kernel internal kuid,
401 * and returns that kuid.
402 *
403 * When there is no mapping defined for the user-namespace uid
404 * pair INVALID_UID is returned. Callers are expected to test
405 * for and handle INVALID_UID being returned. INVALID_UID
406 * may be tested for using uid_valid().
407 */
408kuid_t make_kuid(struct user_namespace *ns, uid_t uid)
409{
410 /* Map the uid to a global kernel uid */
411 return KUIDT_INIT(map_id_down(&ns->uid_map, uid));
412}
413EXPORT_SYMBOL(make_kuid);
414
415/**
416 * from_kuid - Create a uid from a kuid user-namespace pair.
417 * @targ: The user namespace we want a uid in.
418 * @kuid: The kernel internal uid to start with.
419 *
420 * Map @kuid into the user-namespace specified by @targ and
421 * return the resulting uid.
422 *
423 * There is always a mapping into the initial user_namespace.
424 *
425 * If @kuid has no mapping in @targ (uid_t)-1 is returned.
426 */
427uid_t from_kuid(struct user_namespace *targ, kuid_t kuid)
428{
429 /* Map the uid from a global kernel uid */
430 return map_id_up(&targ->uid_map, __kuid_val(kuid));
431}
432EXPORT_SYMBOL(from_kuid);
433
434/**
435 * from_kuid_munged - Create a uid from a kuid user-namespace pair.
436 * @targ: The user namespace we want a uid in.
437 * @kuid: The kernel internal uid to start with.
438 *
439 * Map @kuid into the user-namespace specified by @targ and
440 * return the resulting uid.
441 *
442 * There is always a mapping into the initial user_namespace.
443 *
444 * Unlike from_kuid from_kuid_munged never fails and always
445 * returns a valid uid. This makes from_kuid_munged appropriate
446 * for use in syscalls like stat and getuid where failing the
447 * system call and failing to provide a valid uid are not an
448 * options.
449 *
450 * If @kuid has no mapping in @targ overflowuid is returned.
451 */
452uid_t from_kuid_munged(struct user_namespace *targ, kuid_t kuid)
453{
454 uid_t uid;
455 uid = from_kuid(targ, kuid);
456
457 if (uid == (uid_t) -1)
458 uid = overflowuid;
459 return uid;
460}
461EXPORT_SYMBOL(from_kuid_munged);
462
463/**
464 * make_kgid - Map a user-namespace gid pair into a kgid.
465 * @ns: User namespace that the gid is in
466 * @gid: group identifier
467 *
468 * Maps a user-namespace gid pair into a kernel internal kgid,
469 * and returns that kgid.
470 *
471 * When there is no mapping defined for the user-namespace gid
472 * pair INVALID_GID is returned. Callers are expected to test
473 * for and handle INVALID_GID being returned. INVALID_GID may be
474 * tested for using gid_valid().
475 */
476kgid_t make_kgid(struct user_namespace *ns, gid_t gid)
477{
478 /* Map the gid to a global kernel gid */
479 return KGIDT_INIT(map_id_down(&ns->gid_map, gid));
480}
481EXPORT_SYMBOL(make_kgid);
482
483/**
484 * from_kgid - Create a gid from a kgid user-namespace pair.
485 * @targ: The user namespace we want a gid in.
486 * @kgid: The kernel internal gid to start with.
487 *
488 * Map @kgid into the user-namespace specified by @targ and
489 * return the resulting gid.
490 *
491 * There is always a mapping into the initial user_namespace.
492 *
493 * If @kgid has no mapping in @targ (gid_t)-1 is returned.
494 */
495gid_t from_kgid(struct user_namespace *targ, kgid_t kgid)
496{
497 /* Map the gid from a global kernel gid */
498 return map_id_up(&targ->gid_map, __kgid_val(kgid));
499}
500EXPORT_SYMBOL(from_kgid);
501
502/**
503 * from_kgid_munged - Create a gid from a kgid user-namespace pair.
504 * @targ: The user namespace we want a gid in.
505 * @kgid: The kernel internal gid to start with.
506 *
507 * Map @kgid into the user-namespace specified by @targ and
508 * return the resulting gid.
509 *
510 * There is always a mapping into the initial user_namespace.
511 *
512 * Unlike from_kgid from_kgid_munged never fails and always
513 * returns a valid gid. This makes from_kgid_munged appropriate
514 * for use in syscalls like stat and getgid where failing the
515 * system call and failing to provide a valid gid are not options.
516 *
517 * If @kgid has no mapping in @targ overflowgid is returned.
518 */
519gid_t from_kgid_munged(struct user_namespace *targ, kgid_t kgid)
520{
521 gid_t gid;
522 gid = from_kgid(targ, kgid);
523
524 if (gid == (gid_t) -1)
525 gid = overflowgid;
526 return gid;
527}
528EXPORT_SYMBOL(from_kgid_munged);
529
530/**
531 * make_kprojid - Map a user-namespace projid pair into a kprojid.
532 * @ns: User namespace that the projid is in
533 * @projid: Project identifier
534 *
535 * Maps a user-namespace uid pair into a kernel internal kuid,
536 * and returns that kuid.
537 *
538 * When there is no mapping defined for the user-namespace projid
539 * pair INVALID_PROJID is returned. Callers are expected to test
540 * for and handle INVALID_PROJID being returned. INVALID_PROJID
541 * may be tested for using projid_valid().
542 */
543kprojid_t make_kprojid(struct user_namespace *ns, projid_t projid)
544{
545 /* Map the uid to a global kernel uid */
546 return KPROJIDT_INIT(map_id_down(&ns->projid_map, projid));
547}
548EXPORT_SYMBOL(make_kprojid);
549
550/**
551 * from_kprojid - Create a projid from a kprojid user-namespace pair.
552 * @targ: The user namespace we want a projid in.
553 * @kprojid: The kernel internal project identifier to start with.
554 *
555 * Map @kprojid into the user-namespace specified by @targ and
556 * return the resulting projid.
557 *
558 * There is always a mapping into the initial user_namespace.
559 *
560 * If @kprojid has no mapping in @targ (projid_t)-1 is returned.
561 */
562projid_t from_kprojid(struct user_namespace *targ, kprojid_t kprojid)
563{
564 /* Map the uid from a global kernel uid */
565 return map_id_up(&targ->projid_map, __kprojid_val(kprojid));
566}
567EXPORT_SYMBOL(from_kprojid);
568
569/**
570 * from_kprojid_munged - Create a projiid from a kprojid user-namespace pair.
571 * @targ: The user namespace we want a projid in.
572 * @kprojid: The kernel internal projid to start with.
573 *
574 * Map @kprojid into the user-namespace specified by @targ and
575 * return the resulting projid.
576 *
577 * There is always a mapping into the initial user_namespace.
578 *
579 * Unlike from_kprojid from_kprojid_munged never fails and always
580 * returns a valid projid. This makes from_kprojid_munged
581 * appropriate for use in syscalls like stat and where
582 * failing the system call and failing to provide a valid projid are
583 * not an options.
584 *
585 * If @kprojid has no mapping in @targ OVERFLOW_PROJID is returned.
586 */
587projid_t from_kprojid_munged(struct user_namespace *targ, kprojid_t kprojid)
588{
589 projid_t projid;
590 projid = from_kprojid(targ, kprojid);
591
592 if (projid == (projid_t) -1)
593 projid = OVERFLOW_PROJID;
594 return projid;
595}
596EXPORT_SYMBOL(from_kprojid_munged);
597
598
599static int uid_m_show(struct seq_file *seq, void *v)
600{
601 struct user_namespace *ns = seq->private;
602 struct uid_gid_extent *extent = v;
603 struct user_namespace *lower_ns;
604 uid_t lower;
605
606 lower_ns = seq_user_ns(seq);
607 if ((lower_ns == ns) && lower_ns->parent)
608 lower_ns = lower_ns->parent;
609
610 lower = from_kuid(lower_ns, KUIDT_INIT(extent->lower_first));
611
612 seq_printf(seq, "%10u %10u %10u\n",
613 extent->first,
614 lower,
615 extent->count);
616
617 return 0;
618}
619
620static int gid_m_show(struct seq_file *seq, void *v)
621{
622 struct user_namespace *ns = seq->private;
623 struct uid_gid_extent *extent = v;
624 struct user_namespace *lower_ns;
625 gid_t lower;
626
627 lower_ns = seq_user_ns(seq);
628 if ((lower_ns == ns) && lower_ns->parent)
629 lower_ns = lower_ns->parent;
630
631 lower = from_kgid(lower_ns, KGIDT_INIT(extent->lower_first));
632
633 seq_printf(seq, "%10u %10u %10u\n",
634 extent->first,
635 lower,
636 extent->count);
637
638 return 0;
639}
640
641static int projid_m_show(struct seq_file *seq, void *v)
642{
643 struct user_namespace *ns = seq->private;
644 struct uid_gid_extent *extent = v;
645 struct user_namespace *lower_ns;
646 projid_t lower;
647
648 lower_ns = seq_user_ns(seq);
649 if ((lower_ns == ns) && lower_ns->parent)
650 lower_ns = lower_ns->parent;
651
652 lower = from_kprojid(lower_ns, KPROJIDT_INIT(extent->lower_first));
653
654 seq_printf(seq, "%10u %10u %10u\n",
655 extent->first,
656 lower,
657 extent->count);
658
659 return 0;
660}
661
662static void *m_start(struct seq_file *seq, loff_t *ppos,
663 struct uid_gid_map *map)
664{
665 loff_t pos = *ppos;
666 unsigned extents = map->nr_extents;
667 smp_rmb();
668
669 if (pos >= extents)
670 return NULL;
671
672 if (extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
673 return &map->extent[pos];
674
675 return &map->forward[pos];
676}
677
678static void *uid_m_start(struct seq_file *seq, loff_t *ppos)
679{
680 struct user_namespace *ns = seq->private;
681
682 return m_start(seq, ppos, &ns->uid_map);
683}
684
685static void *gid_m_start(struct seq_file *seq, loff_t *ppos)
686{
687 struct user_namespace *ns = seq->private;
688
689 return m_start(seq, ppos, &ns->gid_map);
690}
691
692static void *projid_m_start(struct seq_file *seq, loff_t *ppos)
693{
694 struct user_namespace *ns = seq->private;
695
696 return m_start(seq, ppos, &ns->projid_map);
697}
698
699static void *m_next(struct seq_file *seq, void *v, loff_t *pos)
700{
701 (*pos)++;
702 return seq->op->start(seq, pos);
703}
704
705static void m_stop(struct seq_file *seq, void *v)
706{
707 return;
708}
709
710const struct seq_operations proc_uid_seq_operations = {
711 .start = uid_m_start,
712 .stop = m_stop,
713 .next = m_next,
714 .show = uid_m_show,
715};
716
717const struct seq_operations proc_gid_seq_operations = {
718 .start = gid_m_start,
719 .stop = m_stop,
720 .next = m_next,
721 .show = gid_m_show,
722};
723
724const struct seq_operations proc_projid_seq_operations = {
725 .start = projid_m_start,
726 .stop = m_stop,
727 .next = m_next,
728 .show = projid_m_show,
729};
730
731static bool mappings_overlap(struct uid_gid_map *new_map,
732 struct uid_gid_extent *extent)
733{
734 u32 upper_first, lower_first, upper_last, lower_last;
735 unsigned idx;
736
737 upper_first = extent->first;
738 lower_first = extent->lower_first;
739 upper_last = upper_first + extent->count - 1;
740 lower_last = lower_first + extent->count - 1;
741
742 for (idx = 0; idx < new_map->nr_extents; idx++) {
743 u32 prev_upper_first, prev_lower_first;
744 u32 prev_upper_last, prev_lower_last;
745 struct uid_gid_extent *prev;
746
747 if (new_map->nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
748 prev = &new_map->extent[idx];
749 else
750 prev = &new_map->forward[idx];
751
752 prev_upper_first = prev->first;
753 prev_lower_first = prev->lower_first;
754 prev_upper_last = prev_upper_first + prev->count - 1;
755 prev_lower_last = prev_lower_first + prev->count - 1;
756
757 /* Does the upper range intersect a previous extent? */
758 if ((prev_upper_first <= upper_last) &&
759 (prev_upper_last >= upper_first))
760 return true;
761
762 /* Does the lower range intersect a previous extent? */
763 if ((prev_lower_first <= lower_last) &&
764 (prev_lower_last >= lower_first))
765 return true;
766 }
767 return false;
768}
769
770/**
771 * insert_extent - Safely insert a new idmap extent into struct uid_gid_map.
772 * Takes care to allocate a 4K block of memory if the number of mappings exceeds
773 * UID_GID_MAP_MAX_BASE_EXTENTS.
774 */
775static int insert_extent(struct uid_gid_map *map, struct uid_gid_extent *extent)
776{
777 struct uid_gid_extent *dest;
778
779 if (map->nr_extents == UID_GID_MAP_MAX_BASE_EXTENTS) {
780 struct uid_gid_extent *forward;
781
782 /* Allocate memory for 340 mappings. */
783 forward = kmalloc_array(UID_GID_MAP_MAX_EXTENTS,
784 sizeof(struct uid_gid_extent),
785 GFP_KERNEL);
786 if (!forward)
787 return -ENOMEM;
788
789 /* Copy over memory. Only set up memory for the forward pointer.
790 * Defer the memory setup for the reverse pointer.
791 */
792 memcpy(forward, map->extent,
793 map->nr_extents * sizeof(map->extent[0]));
794
795 map->forward = forward;
796 map->reverse = NULL;
797 }
798
799 if (map->nr_extents < UID_GID_MAP_MAX_BASE_EXTENTS)
800 dest = &map->extent[map->nr_extents];
801 else
802 dest = &map->forward[map->nr_extents];
803
804 *dest = *extent;
805 map->nr_extents++;
806 return 0;
807}
808
809/* cmp function to sort() forward mappings */
810static int cmp_extents_forward(const void *a, const void *b)
811{
812 const struct uid_gid_extent *e1 = a;
813 const struct uid_gid_extent *e2 = b;
814
815 if (e1->first < e2->first)
816 return -1;
817
818 if (e1->first > e2->first)
819 return 1;
820
821 return 0;
822}
823
824/* cmp function to sort() reverse mappings */
825static int cmp_extents_reverse(const void *a, const void *b)
826{
827 const struct uid_gid_extent *e1 = a;
828 const struct uid_gid_extent *e2 = b;
829
830 if (e1->lower_first < e2->lower_first)
831 return -1;
832
833 if (e1->lower_first > e2->lower_first)
834 return 1;
835
836 return 0;
837}
838
839/**
840 * sort_idmaps - Sorts an array of idmap entries.
841 * Can only be called if number of mappings exceeds UID_GID_MAP_MAX_BASE_EXTENTS.
842 */
843static int sort_idmaps(struct uid_gid_map *map)
844{
845 if (map->nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
846 return 0;
847
848 /* Sort forward array. */
849 sort(map->forward, map->nr_extents, sizeof(struct uid_gid_extent),
850 cmp_extents_forward, NULL);
851
852 /* Only copy the memory from forward we actually need. */
853 map->reverse = kmemdup(map->forward,
854 map->nr_extents * sizeof(struct uid_gid_extent),
855 GFP_KERNEL);
856 if (!map->reverse)
857 return -ENOMEM;
858
859 /* Sort reverse array. */
860 sort(map->reverse, map->nr_extents, sizeof(struct uid_gid_extent),
861 cmp_extents_reverse, NULL);
862
863 return 0;
864}
865
866/**
867 * verify_root_map() - check the uid 0 mapping
868 * @file: idmapping file
869 * @map_ns: user namespace of the target process
870 * @new_map: requested idmap
871 *
872 * If a process requests mapping parent uid 0 into the new ns, verify that the
873 * process writing the map had the CAP_SETFCAP capability as the target process
874 * will be able to write fscaps that are valid in ancestor user namespaces.
875 *
876 * Return: true if the mapping is allowed, false if not.
877 */
878static bool verify_root_map(const struct file *file,
879 struct user_namespace *map_ns,
880 struct uid_gid_map *new_map)
881{
882 int idx;
883 const struct user_namespace *file_ns = file->f_cred->user_ns;
884 struct uid_gid_extent *extent0 = NULL;
885
886 for (idx = 0; idx < new_map->nr_extents; idx++) {
887 if (new_map->nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
888 extent0 = &new_map->extent[idx];
889 else
890 extent0 = &new_map->forward[idx];
891 if (extent0->lower_first == 0)
892 break;
893
894 extent0 = NULL;
895 }
896
897 if (!extent0)
898 return true;
899
900 if (map_ns == file_ns) {
901 /* The process unshared its ns and is writing to its own
902 * /proc/self/uid_map. User already has full capabilites in
903 * the new namespace. Verify that the parent had CAP_SETFCAP
904 * when it unshared.
905 * */
906 if (!file_ns->parent_could_setfcap)
907 return false;
908 } else {
909 /* Process p1 is writing to uid_map of p2, who is in a child
910 * user namespace to p1's. Verify that the opener of the map
911 * file has CAP_SETFCAP against the parent of the new map
912 * namespace */
913 if (!file_ns_capable(file, map_ns->parent, CAP_SETFCAP))
914 return false;
915 }
916
917 return true;
918}
919
920static ssize_t map_write(struct file *file, const char __user *buf,
921 size_t count, loff_t *ppos,
922 int cap_setid,
923 struct uid_gid_map *map,
924 struct uid_gid_map *parent_map)
925{
926 struct seq_file *seq = file->private_data;
927 struct user_namespace *map_ns = seq->private;
928 struct uid_gid_map new_map;
929 unsigned idx;
930 struct uid_gid_extent extent;
931 char *kbuf = NULL, *pos, *next_line;
932 ssize_t ret;
933
934 /* Only allow < page size writes at the beginning of the file */
935 if ((*ppos != 0) || (count >= PAGE_SIZE))
936 return -EINVAL;
937
938 /* Slurp in the user data */
939 kbuf = memdup_user_nul(buf, count);
940 if (IS_ERR(kbuf))
941 return PTR_ERR(kbuf);
942
943 /*
944 * The userns_state_mutex serializes all writes to any given map.
945 *
946 * Any map is only ever written once.
947 *
948 * An id map fits within 1 cache line on most architectures.
949 *
950 * On read nothing needs to be done unless you are on an
951 * architecture with a crazy cache coherency model like alpha.
952 *
953 * There is a one time data dependency between reading the
954 * count of the extents and the values of the extents. The
955 * desired behavior is to see the values of the extents that
956 * were written before the count of the extents.
957 *
958 * To achieve this smp_wmb() is used on guarantee the write
959 * order and smp_rmb() is guaranteed that we don't have crazy
960 * architectures returning stale data.
961 */
962 mutex_lock(&userns_state_mutex);
963
964 memset(&new_map, 0, sizeof(struct uid_gid_map));
965
966 ret = -EPERM;
967 /* Only allow one successful write to the map */
968 if (map->nr_extents != 0)
969 goto out;
970
971 /*
972 * Adjusting namespace settings requires capabilities on the target.
973 */
974 if (cap_valid(cap_setid) && !file_ns_capable(file, map_ns, CAP_SYS_ADMIN))
975 goto out;
976
977 /* Parse the user data */
978 ret = -EINVAL;
979 pos = kbuf;
980 for (; pos; pos = next_line) {
981
982 /* Find the end of line and ensure I don't look past it */
983 next_line = strchr(pos, '\n');
984 if (next_line) {
985 *next_line = '\0';
986 next_line++;
987 if (*next_line == '\0')
988 next_line = NULL;
989 }
990
991 pos = skip_spaces(pos);
992 extent.first = simple_strtoul(pos, &pos, 10);
993 if (!isspace(*pos))
994 goto out;
995
996 pos = skip_spaces(pos);
997 extent.lower_first = simple_strtoul(pos, &pos, 10);
998 if (!isspace(*pos))
999 goto out;
1000
1001 pos = skip_spaces(pos);
1002 extent.count = simple_strtoul(pos, &pos, 10);
1003 if (*pos && !isspace(*pos))
1004 goto out;
1005
1006 /* Verify there is not trailing junk on the line */
1007 pos = skip_spaces(pos);
1008 if (*pos != '\0')
1009 goto out;
1010
1011 /* Verify we have been given valid starting values */
1012 if ((extent.first == (u32) -1) ||
1013 (extent.lower_first == (u32) -1))
1014 goto out;
1015
1016 /* Verify count is not zero and does not cause the
1017 * extent to wrap
1018 */
1019 if ((extent.first + extent.count) <= extent.first)
1020 goto out;
1021 if ((extent.lower_first + extent.count) <=
1022 extent.lower_first)
1023 goto out;
1024
1025 /* Do the ranges in extent overlap any previous extents? */
1026 if (mappings_overlap(&new_map, &extent))
1027 goto out;
1028
1029 if ((new_map.nr_extents + 1) == UID_GID_MAP_MAX_EXTENTS &&
1030 (next_line != NULL))
1031 goto out;
1032
1033 ret = insert_extent(&new_map, &extent);
1034 if (ret < 0)
1035 goto out;
1036 ret = -EINVAL;
1037 }
1038 /* Be very certain the new map actually exists */
1039 if (new_map.nr_extents == 0)
1040 goto out;
1041
1042 ret = -EPERM;
1043 /* Validate the user is allowed to use user id's mapped to. */
1044 if (!new_idmap_permitted(file, map_ns, cap_setid, &new_map))
1045 goto out;
1046
1047 ret = -EPERM;
1048 /* Map the lower ids from the parent user namespace to the
1049 * kernel global id space.
1050 */
1051 for (idx = 0; idx < new_map.nr_extents; idx++) {
1052 struct uid_gid_extent *e;
1053 u32 lower_first;
1054
1055 if (new_map.nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
1056 e = &new_map.extent[idx];
1057 else
1058 e = &new_map.forward[idx];
1059
1060 lower_first = map_id_range_down(parent_map,
1061 e->lower_first,
1062 e->count);
1063
1064 /* Fail if we can not map the specified extent to
1065 * the kernel global id space.
1066 */
1067 if (lower_first == (u32) -1)
1068 goto out;
1069
1070 e->lower_first = lower_first;
1071 }
1072
1073 /*
1074 * If we want to use binary search for lookup, this clones the extent
1075 * array and sorts both copies.
1076 */
1077 ret = sort_idmaps(&new_map);
1078 if (ret < 0)
1079 goto out;
1080
1081 /* Install the map */
1082 if (new_map.nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS) {
1083 memcpy(map->extent, new_map.extent,
1084 new_map.nr_extents * sizeof(new_map.extent[0]));
1085 } else {
1086 map->forward = new_map.forward;
1087 map->reverse = new_map.reverse;
1088 }
1089 smp_wmb();
1090 map->nr_extents = new_map.nr_extents;
1091
1092 *ppos = count;
1093 ret = count;
1094out:
1095 if (ret < 0 && new_map.nr_extents > UID_GID_MAP_MAX_BASE_EXTENTS) {
1096 kfree(new_map.forward);
1097 kfree(new_map.reverse);
1098 map->forward = NULL;
1099 map->reverse = NULL;
1100 map->nr_extents = 0;
1101 }
1102
1103 mutex_unlock(&userns_state_mutex);
1104 kfree(kbuf);
1105 return ret;
1106}
1107
1108ssize_t proc_uid_map_write(struct file *file, const char __user *buf,
1109 size_t size, loff_t *ppos)
1110{
1111 struct seq_file *seq = file->private_data;
1112 struct user_namespace *ns = seq->private;
1113 struct user_namespace *seq_ns = seq_user_ns(seq);
1114
1115 if (!ns->parent)
1116 return -EPERM;
1117
1118 if ((seq_ns != ns) && (seq_ns != ns->parent))
1119 return -EPERM;
1120
1121 return map_write(file, buf, size, ppos, CAP_SETUID,
1122 &ns->uid_map, &ns->parent->uid_map);
1123}
1124
1125ssize_t proc_gid_map_write(struct file *file, const char __user *buf,
1126 size_t size, loff_t *ppos)
1127{
1128 struct seq_file *seq = file->private_data;
1129 struct user_namespace *ns = seq->private;
1130 struct user_namespace *seq_ns = seq_user_ns(seq);
1131
1132 if (!ns->parent)
1133 return -EPERM;
1134
1135 if ((seq_ns != ns) && (seq_ns != ns->parent))
1136 return -EPERM;
1137
1138 return map_write(file, buf, size, ppos, CAP_SETGID,
1139 &ns->gid_map, &ns->parent->gid_map);
1140}
1141
1142ssize_t proc_projid_map_write(struct file *file, const char __user *buf,
1143 size_t size, loff_t *ppos)
1144{
1145 struct seq_file *seq = file->private_data;
1146 struct user_namespace *ns = seq->private;
1147 struct user_namespace *seq_ns = seq_user_ns(seq);
1148
1149 if (!ns->parent)
1150 return -EPERM;
1151
1152 if ((seq_ns != ns) && (seq_ns != ns->parent))
1153 return -EPERM;
1154
1155 /* Anyone can set any valid project id no capability needed */
1156 return map_write(file, buf, size, ppos, -1,
1157 &ns->projid_map, &ns->parent->projid_map);
1158}
1159
1160static bool new_idmap_permitted(const struct file *file,
1161 struct user_namespace *ns, int cap_setid,
1162 struct uid_gid_map *new_map)
1163{
1164 const struct cred *cred = file->f_cred;
1165
1166 if (cap_setid == CAP_SETUID && !verify_root_map(file, ns, new_map))
1167 return false;
1168
1169 /* Don't allow mappings that would allow anything that wouldn't
1170 * be allowed without the establishment of unprivileged mappings.
1171 */
1172 if ((new_map->nr_extents == 1) && (new_map->extent[0].count == 1) &&
1173 uid_eq(ns->owner, cred->euid)) {
1174 u32 id = new_map->extent[0].lower_first;
1175 if (cap_setid == CAP_SETUID) {
1176 kuid_t uid = make_kuid(ns->parent, id);
1177 if (uid_eq(uid, cred->euid))
1178 return true;
1179 } else if (cap_setid == CAP_SETGID) {
1180 kgid_t gid = make_kgid(ns->parent, id);
1181 if (!(ns->flags & USERNS_SETGROUPS_ALLOWED) &&
1182 gid_eq(gid, cred->egid))
1183 return true;
1184 }
1185 }
1186
1187 /* Allow anyone to set a mapping that doesn't require privilege */
1188 if (!cap_valid(cap_setid))
1189 return true;
1190
1191 /* Allow the specified ids if we have the appropriate capability
1192 * (CAP_SETUID or CAP_SETGID) over the parent user namespace.
1193 * And the opener of the id file also has the appropriate capability.
1194 */
1195 if (ns_capable(ns->parent, cap_setid) &&
1196 file_ns_capable(file, ns->parent, cap_setid))
1197 return true;
1198
1199 return false;
1200}
1201
1202int proc_setgroups_show(struct seq_file *seq, void *v)
1203{
1204 struct user_namespace *ns = seq->private;
1205 unsigned long userns_flags = READ_ONCE(ns->flags);
1206
1207 seq_printf(seq, "%s\n",
1208 (userns_flags & USERNS_SETGROUPS_ALLOWED) ?
1209 "allow" : "deny");
1210 return 0;
1211}
1212
1213ssize_t proc_setgroups_write(struct file *file, const char __user *buf,
1214 size_t count, loff_t *ppos)
1215{
1216 struct seq_file *seq = file->private_data;
1217 struct user_namespace *ns = seq->private;
1218 char kbuf[8], *pos;
1219 bool setgroups_allowed;
1220 ssize_t ret;
1221
1222 /* Only allow a very narrow range of strings to be written */
1223 ret = -EINVAL;
1224 if ((*ppos != 0) || (count >= sizeof(kbuf)))
1225 goto out;
1226
1227 /* What was written? */
1228 ret = -EFAULT;
1229 if (copy_from_user(kbuf, buf, count))
1230 goto out;
1231 kbuf[count] = '\0';
1232 pos = kbuf;
1233
1234 /* What is being requested? */
1235 ret = -EINVAL;
1236 if (strncmp(pos, "allow", 5) == 0) {
1237 pos += 5;
1238 setgroups_allowed = true;
1239 }
1240 else if (strncmp(pos, "deny", 4) == 0) {
1241 pos += 4;
1242 setgroups_allowed = false;
1243 }
1244 else
1245 goto out;
1246
1247 /* Verify there is not trailing junk on the line */
1248 pos = skip_spaces(pos);
1249 if (*pos != '\0')
1250 goto out;
1251
1252 ret = -EPERM;
1253 mutex_lock(&userns_state_mutex);
1254 if (setgroups_allowed) {
1255 /* Enabling setgroups after setgroups has been disabled
1256 * is not allowed.
1257 */
1258 if (!(ns->flags & USERNS_SETGROUPS_ALLOWED))
1259 goto out_unlock;
1260 } else {
1261 /* Permanently disabling setgroups after setgroups has
1262 * been enabled by writing the gid_map is not allowed.
1263 */
1264 if (ns->gid_map.nr_extents != 0)
1265 goto out_unlock;
1266 ns->flags &= ~USERNS_SETGROUPS_ALLOWED;
1267 }
1268 mutex_unlock(&userns_state_mutex);
1269
1270 /* Report a successful write */
1271 *ppos = count;
1272 ret = count;
1273out:
1274 return ret;
1275out_unlock:
1276 mutex_unlock(&userns_state_mutex);
1277 goto out;
1278}
1279
1280bool userns_may_setgroups(const struct user_namespace *ns)
1281{
1282 bool allowed;
1283
1284 mutex_lock(&userns_state_mutex);
1285 /* It is not safe to use setgroups until a gid mapping in
1286 * the user namespace has been established.
1287 */
1288 allowed = ns->gid_map.nr_extents != 0;
1289 /* Is setgroups allowed? */
1290 allowed = allowed && (ns->flags & USERNS_SETGROUPS_ALLOWED);
1291 mutex_unlock(&userns_state_mutex);
1292
1293 return allowed;
1294}
1295
1296/*
1297 * Returns true if @child is the same namespace or a descendant of
1298 * @ancestor.
1299 */
1300bool in_userns(const struct user_namespace *ancestor,
1301 const struct user_namespace *child)
1302{
1303 const struct user_namespace *ns;
1304 for (ns = child; ns->level > ancestor->level; ns = ns->parent)
1305 ;
1306 return (ns == ancestor);
1307}
1308
1309bool current_in_userns(const struct user_namespace *target_ns)
1310{
1311 return in_userns(target_ns, current_user_ns());
1312}
1313EXPORT_SYMBOL(current_in_userns);
1314
1315static inline struct user_namespace *to_user_ns(struct ns_common *ns)
1316{
1317 return container_of(ns, struct user_namespace, ns);
1318}
1319
1320static struct ns_common *userns_get(struct task_struct *task)
1321{
1322 struct user_namespace *user_ns;
1323
1324 rcu_read_lock();
1325 user_ns = get_user_ns(__task_cred(task)->user_ns);
1326 rcu_read_unlock();
1327
1328 return user_ns ? &user_ns->ns : NULL;
1329}
1330
1331static void userns_put(struct ns_common *ns)
1332{
1333 put_user_ns(to_user_ns(ns));
1334}
1335
1336static int userns_install(struct nsset *nsset, struct ns_common *ns)
1337{
1338 struct user_namespace *user_ns = to_user_ns(ns);
1339 struct cred *cred;
1340
1341 /* Don't allow gaining capabilities by reentering
1342 * the same user namespace.
1343 */
1344 if (user_ns == current_user_ns())
1345 return -EINVAL;
1346
1347 /* Tasks that share a thread group must share a user namespace */
1348 if (!thread_group_empty(current))
1349 return -EINVAL;
1350
1351 if (current->fs->users != 1)
1352 return -EINVAL;
1353
1354 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
1355 return -EPERM;
1356
1357 cred = nsset_cred(nsset);
1358 if (!cred)
1359 return -EINVAL;
1360
1361 put_user_ns(cred->user_ns);
1362 set_cred_user_ns(cred, get_user_ns(user_ns));
1363
1364 if (set_cred_ucounts(cred) < 0)
1365 return -EINVAL;
1366
1367 return 0;
1368}
1369
1370struct ns_common *ns_get_owner(struct ns_common *ns)
1371{
1372 struct user_namespace *my_user_ns = current_user_ns();
1373 struct user_namespace *owner, *p;
1374
1375 /* See if the owner is in the current user namespace */
1376 owner = p = ns->ops->owner(ns);
1377 for (;;) {
1378 if (!p)
1379 return ERR_PTR(-EPERM);
1380 if (p == my_user_ns)
1381 break;
1382 p = p->parent;
1383 }
1384
1385 return &get_user_ns(owner)->ns;
1386}
1387
1388static struct user_namespace *userns_owner(struct ns_common *ns)
1389{
1390 return to_user_ns(ns)->parent;
1391}
1392
1393const struct proc_ns_operations userns_operations = {
1394 .name = "user",
1395 .type = CLONE_NEWUSER,
1396 .get = userns_get,
1397 .put = userns_put,
1398 .install = userns_install,
1399 .owner = userns_owner,
1400 .get_parent = ns_get_owner,
1401};
1402
1403static __init int user_namespaces_init(void)
1404{
1405 user_ns_cachep = KMEM_CACHE(user_namespace, SLAB_PANIC | SLAB_ACCOUNT);
1406 return 0;
1407}
1408subsys_initcall(user_namespaces_init);
1// SPDX-License-Identifier: GPL-2.0-only
2
3#include <linux/export.h>
4#include <linux/nsproxy.h>
5#include <linux/slab.h>
6#include <linux/sched/signal.h>
7#include <linux/user_namespace.h>
8#include <linux/proc_ns.h>
9#include <linux/highuid.h>
10#include <linux/cred.h>
11#include <linux/securebits.h>
12#include <linux/keyctl.h>
13#include <linux/key-type.h>
14#include <keys/user-type.h>
15#include <linux/seq_file.h>
16#include <linux/fs.h>
17#include <linux/uaccess.h>
18#include <linux/ctype.h>
19#include <linux/projid.h>
20#include <linux/fs_struct.h>
21#include <linux/bsearch.h>
22#include <linux/sort.h>
23
24static struct kmem_cache *user_ns_cachep __read_mostly;
25static DEFINE_MUTEX(userns_state_mutex);
26
27static bool new_idmap_permitted(const struct file *file,
28 struct user_namespace *ns, int cap_setid,
29 struct uid_gid_map *map);
30static void free_user_ns(struct work_struct *work);
31
32static struct ucounts *inc_user_namespaces(struct user_namespace *ns, kuid_t uid)
33{
34 return inc_ucount(ns, uid, UCOUNT_USER_NAMESPACES);
35}
36
37static void dec_user_namespaces(struct ucounts *ucounts)
38{
39 return dec_ucount(ucounts, UCOUNT_USER_NAMESPACES);
40}
41
42static void set_cred_user_ns(struct cred *cred, struct user_namespace *user_ns)
43{
44 /* Start with the same capabilities as init but useless for doing
45 * anything as the capabilities are bound to the new user namespace.
46 */
47 cred->securebits = SECUREBITS_DEFAULT;
48 cred->cap_inheritable = CAP_EMPTY_SET;
49 cred->cap_permitted = CAP_FULL_SET;
50 cred->cap_effective = CAP_FULL_SET;
51 cred->cap_ambient = CAP_EMPTY_SET;
52 cred->cap_bset = CAP_FULL_SET;
53#ifdef CONFIG_KEYS
54 key_put(cred->request_key_auth);
55 cred->request_key_auth = NULL;
56#endif
57 /* tgcred will be cleared in our caller bc CLONE_THREAD won't be set */
58 cred->user_ns = user_ns;
59}
60
61/*
62 * Create a new user namespace, deriving the creator from the user in the
63 * passed credentials, and replacing that user with the new root user for the
64 * new namespace.
65 *
66 * This is called by copy_creds(), which will finish setting the target task's
67 * credentials.
68 */
69int create_user_ns(struct cred *new)
70{
71 struct user_namespace *ns, *parent_ns = new->user_ns;
72 kuid_t owner = new->euid;
73 kgid_t group = new->egid;
74 struct ucounts *ucounts;
75 int ret, i;
76
77 ret = -ENOSPC;
78 if (parent_ns->level > 32)
79 goto fail;
80
81 ucounts = inc_user_namespaces(parent_ns, owner);
82 if (!ucounts)
83 goto fail;
84
85 /*
86 * Verify that we can not violate the policy of which files
87 * may be accessed that is specified by the root directory,
88 * by verifing that the root directory is at the root of the
89 * mount namespace which allows all files to be accessed.
90 */
91 ret = -EPERM;
92 if (current_chrooted())
93 goto fail_dec;
94
95 /* The creator needs a mapping in the parent user namespace
96 * or else we won't be able to reasonably tell userspace who
97 * created a user_namespace.
98 */
99 ret = -EPERM;
100 if (!kuid_has_mapping(parent_ns, owner) ||
101 !kgid_has_mapping(parent_ns, group))
102 goto fail_dec;
103
104 ret = -ENOMEM;
105 ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
106 if (!ns)
107 goto fail_dec;
108
109 ret = ns_alloc_inum(&ns->ns);
110 if (ret)
111 goto fail_free;
112 ns->ns.ops = &userns_operations;
113
114 atomic_set(&ns->count, 1);
115 /* Leave the new->user_ns reference with the new user namespace. */
116 ns->parent = parent_ns;
117 ns->level = parent_ns->level + 1;
118 ns->owner = owner;
119 ns->group = group;
120 INIT_WORK(&ns->work, free_user_ns);
121 for (i = 0; i < UCOUNT_COUNTS; i++) {
122 ns->ucount_max[i] = INT_MAX;
123 }
124 ns->ucounts = ucounts;
125
126 /* Inherit USERNS_SETGROUPS_ALLOWED from our parent */
127 mutex_lock(&userns_state_mutex);
128 ns->flags = parent_ns->flags;
129 mutex_unlock(&userns_state_mutex);
130
131#ifdef CONFIG_KEYS
132 INIT_LIST_HEAD(&ns->keyring_name_list);
133 init_rwsem(&ns->keyring_sem);
134#endif
135 ret = -ENOMEM;
136 if (!setup_userns_sysctls(ns))
137 goto fail_keyring;
138
139 set_cred_user_ns(new, ns);
140 return 0;
141fail_keyring:
142#ifdef CONFIG_PERSISTENT_KEYRINGS
143 key_put(ns->persistent_keyring_register);
144#endif
145 ns_free_inum(&ns->ns);
146fail_free:
147 kmem_cache_free(user_ns_cachep, ns);
148fail_dec:
149 dec_user_namespaces(ucounts);
150fail:
151 return ret;
152}
153
154int unshare_userns(unsigned long unshare_flags, struct cred **new_cred)
155{
156 struct cred *cred;
157 int err = -ENOMEM;
158
159 if (!(unshare_flags & CLONE_NEWUSER))
160 return 0;
161
162 cred = prepare_creds();
163 if (cred) {
164 err = create_user_ns(cred);
165 if (err)
166 put_cred(cred);
167 else
168 *new_cred = cred;
169 }
170
171 return err;
172}
173
174static void free_user_ns(struct work_struct *work)
175{
176 struct user_namespace *parent, *ns =
177 container_of(work, struct user_namespace, work);
178
179 do {
180 struct ucounts *ucounts = ns->ucounts;
181 parent = ns->parent;
182 if (ns->gid_map.nr_extents > UID_GID_MAP_MAX_BASE_EXTENTS) {
183 kfree(ns->gid_map.forward);
184 kfree(ns->gid_map.reverse);
185 }
186 if (ns->uid_map.nr_extents > UID_GID_MAP_MAX_BASE_EXTENTS) {
187 kfree(ns->uid_map.forward);
188 kfree(ns->uid_map.reverse);
189 }
190 if (ns->projid_map.nr_extents > UID_GID_MAP_MAX_BASE_EXTENTS) {
191 kfree(ns->projid_map.forward);
192 kfree(ns->projid_map.reverse);
193 }
194 retire_userns_sysctls(ns);
195 key_free_user_ns(ns);
196 ns_free_inum(&ns->ns);
197 kmem_cache_free(user_ns_cachep, ns);
198 dec_user_namespaces(ucounts);
199 ns = parent;
200 } while (atomic_dec_and_test(&parent->count));
201}
202
203void __put_user_ns(struct user_namespace *ns)
204{
205 schedule_work(&ns->work);
206}
207EXPORT_SYMBOL(__put_user_ns);
208
209/**
210 * idmap_key struct holds the information necessary to find an idmapping in a
211 * sorted idmap array. It is passed to cmp_map_id() as first argument.
212 */
213struct idmap_key {
214 bool map_up; /* true -> id from kid; false -> kid from id */
215 u32 id; /* id to find */
216 u32 count; /* == 0 unless used with map_id_range_down() */
217};
218
219/**
220 * cmp_map_id - Function to be passed to bsearch() to find the requested
221 * idmapping. Expects struct idmap_key to be passed via @k.
222 */
223static int cmp_map_id(const void *k, const void *e)
224{
225 u32 first, last, id2;
226 const struct idmap_key *key = k;
227 const struct uid_gid_extent *el = e;
228
229 id2 = key->id + key->count - 1;
230
231 /* handle map_id_{down,up}() */
232 if (key->map_up)
233 first = el->lower_first;
234 else
235 first = el->first;
236
237 last = first + el->count - 1;
238
239 if (key->id >= first && key->id <= last &&
240 (id2 >= first && id2 <= last))
241 return 0;
242
243 if (key->id < first || id2 < first)
244 return -1;
245
246 return 1;
247}
248
249/**
250 * map_id_range_down_max - Find idmap via binary search in ordered idmap array.
251 * Can only be called if number of mappings exceeds UID_GID_MAP_MAX_BASE_EXTENTS.
252 */
253static struct uid_gid_extent *
254map_id_range_down_max(unsigned extents, struct uid_gid_map *map, u32 id, u32 count)
255{
256 struct idmap_key key;
257
258 key.map_up = false;
259 key.count = count;
260 key.id = id;
261
262 return bsearch(&key, map->forward, extents,
263 sizeof(struct uid_gid_extent), cmp_map_id);
264}
265
266/**
267 * map_id_range_down_base - Find idmap via binary search in static extent array.
268 * Can only be called if number of mappings is equal or less than
269 * UID_GID_MAP_MAX_BASE_EXTENTS.
270 */
271static struct uid_gid_extent *
272map_id_range_down_base(unsigned extents, struct uid_gid_map *map, u32 id, u32 count)
273{
274 unsigned idx;
275 u32 first, last, id2;
276
277 id2 = id + count - 1;
278
279 /* Find the matching extent */
280 for (idx = 0; idx < extents; idx++) {
281 first = map->extent[idx].first;
282 last = first + map->extent[idx].count - 1;
283 if (id >= first && id <= last &&
284 (id2 >= first && id2 <= last))
285 return &map->extent[idx];
286 }
287 return NULL;
288}
289
290static u32 map_id_range_down(struct uid_gid_map *map, u32 id, u32 count)
291{
292 struct uid_gid_extent *extent;
293 unsigned extents = map->nr_extents;
294 smp_rmb();
295
296 if (extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
297 extent = map_id_range_down_base(extents, map, id, count);
298 else
299 extent = map_id_range_down_max(extents, map, id, count);
300
301 /* Map the id or note failure */
302 if (extent)
303 id = (id - extent->first) + extent->lower_first;
304 else
305 id = (u32) -1;
306
307 return id;
308}
309
310static u32 map_id_down(struct uid_gid_map *map, u32 id)
311{
312 return map_id_range_down(map, id, 1);
313}
314
315/**
316 * map_id_up_base - Find idmap via binary search in static extent array.
317 * Can only be called if number of mappings is equal or less than
318 * UID_GID_MAP_MAX_BASE_EXTENTS.
319 */
320static struct uid_gid_extent *
321map_id_up_base(unsigned extents, struct uid_gid_map *map, u32 id)
322{
323 unsigned idx;
324 u32 first, last;
325
326 /* Find the matching extent */
327 for (idx = 0; idx < extents; idx++) {
328 first = map->extent[idx].lower_first;
329 last = first + map->extent[idx].count - 1;
330 if (id >= first && id <= last)
331 return &map->extent[idx];
332 }
333 return NULL;
334}
335
336/**
337 * map_id_up_max - Find idmap via binary search in ordered idmap array.
338 * Can only be called if number of mappings exceeds UID_GID_MAP_MAX_BASE_EXTENTS.
339 */
340static struct uid_gid_extent *
341map_id_up_max(unsigned extents, struct uid_gid_map *map, u32 id)
342{
343 struct idmap_key key;
344
345 key.map_up = true;
346 key.count = 1;
347 key.id = id;
348
349 return bsearch(&key, map->reverse, extents,
350 sizeof(struct uid_gid_extent), cmp_map_id);
351}
352
353static u32 map_id_up(struct uid_gid_map *map, u32 id)
354{
355 struct uid_gid_extent *extent;
356 unsigned extents = map->nr_extents;
357 smp_rmb();
358
359 if (extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
360 extent = map_id_up_base(extents, map, id);
361 else
362 extent = map_id_up_max(extents, map, id);
363
364 /* Map the id or note failure */
365 if (extent)
366 id = (id - extent->lower_first) + extent->first;
367 else
368 id = (u32) -1;
369
370 return id;
371}
372
373/**
374 * make_kuid - Map a user-namespace uid pair into a kuid.
375 * @ns: User namespace that the uid is in
376 * @uid: User identifier
377 *
378 * Maps a user-namespace uid pair into a kernel internal kuid,
379 * and returns that kuid.
380 *
381 * When there is no mapping defined for the user-namespace uid
382 * pair INVALID_UID is returned. Callers are expected to test
383 * for and handle INVALID_UID being returned. INVALID_UID
384 * may be tested for using uid_valid().
385 */
386kuid_t make_kuid(struct user_namespace *ns, uid_t uid)
387{
388 /* Map the uid to a global kernel uid */
389 return KUIDT_INIT(map_id_down(&ns->uid_map, uid));
390}
391EXPORT_SYMBOL(make_kuid);
392
393/**
394 * from_kuid - Create a uid from a kuid user-namespace pair.
395 * @targ: The user namespace we want a uid in.
396 * @kuid: The kernel internal uid to start with.
397 *
398 * Map @kuid into the user-namespace specified by @targ and
399 * return the resulting uid.
400 *
401 * There is always a mapping into the initial user_namespace.
402 *
403 * If @kuid has no mapping in @targ (uid_t)-1 is returned.
404 */
405uid_t from_kuid(struct user_namespace *targ, kuid_t kuid)
406{
407 /* Map the uid from a global kernel uid */
408 return map_id_up(&targ->uid_map, __kuid_val(kuid));
409}
410EXPORT_SYMBOL(from_kuid);
411
412/**
413 * from_kuid_munged - Create a uid from a kuid user-namespace pair.
414 * @targ: The user namespace we want a uid in.
415 * @kuid: The kernel internal uid to start with.
416 *
417 * Map @kuid into the user-namespace specified by @targ and
418 * return the resulting uid.
419 *
420 * There is always a mapping into the initial user_namespace.
421 *
422 * Unlike from_kuid from_kuid_munged never fails and always
423 * returns a valid uid. This makes from_kuid_munged appropriate
424 * for use in syscalls like stat and getuid where failing the
425 * system call and failing to provide a valid uid are not an
426 * options.
427 *
428 * If @kuid has no mapping in @targ overflowuid is returned.
429 */
430uid_t from_kuid_munged(struct user_namespace *targ, kuid_t kuid)
431{
432 uid_t uid;
433 uid = from_kuid(targ, kuid);
434
435 if (uid == (uid_t) -1)
436 uid = overflowuid;
437 return uid;
438}
439EXPORT_SYMBOL(from_kuid_munged);
440
441/**
442 * make_kgid - Map a user-namespace gid pair into a kgid.
443 * @ns: User namespace that the gid is in
444 * @gid: group identifier
445 *
446 * Maps a user-namespace gid pair into a kernel internal kgid,
447 * and returns that kgid.
448 *
449 * When there is no mapping defined for the user-namespace gid
450 * pair INVALID_GID is returned. Callers are expected to test
451 * for and handle INVALID_GID being returned. INVALID_GID may be
452 * tested for using gid_valid().
453 */
454kgid_t make_kgid(struct user_namespace *ns, gid_t gid)
455{
456 /* Map the gid to a global kernel gid */
457 return KGIDT_INIT(map_id_down(&ns->gid_map, gid));
458}
459EXPORT_SYMBOL(make_kgid);
460
461/**
462 * from_kgid - Create a gid from a kgid user-namespace pair.
463 * @targ: The user namespace we want a gid in.
464 * @kgid: The kernel internal gid to start with.
465 *
466 * Map @kgid into the user-namespace specified by @targ and
467 * return the resulting gid.
468 *
469 * There is always a mapping into the initial user_namespace.
470 *
471 * If @kgid has no mapping in @targ (gid_t)-1 is returned.
472 */
473gid_t from_kgid(struct user_namespace *targ, kgid_t kgid)
474{
475 /* Map the gid from a global kernel gid */
476 return map_id_up(&targ->gid_map, __kgid_val(kgid));
477}
478EXPORT_SYMBOL(from_kgid);
479
480/**
481 * from_kgid_munged - Create a gid from a kgid user-namespace pair.
482 * @targ: The user namespace we want a gid in.
483 * @kgid: The kernel internal gid to start with.
484 *
485 * Map @kgid into the user-namespace specified by @targ and
486 * return the resulting gid.
487 *
488 * There is always a mapping into the initial user_namespace.
489 *
490 * Unlike from_kgid from_kgid_munged never fails and always
491 * returns a valid gid. This makes from_kgid_munged appropriate
492 * for use in syscalls like stat and getgid where failing the
493 * system call and failing to provide a valid gid are not options.
494 *
495 * If @kgid has no mapping in @targ overflowgid is returned.
496 */
497gid_t from_kgid_munged(struct user_namespace *targ, kgid_t kgid)
498{
499 gid_t gid;
500 gid = from_kgid(targ, kgid);
501
502 if (gid == (gid_t) -1)
503 gid = overflowgid;
504 return gid;
505}
506EXPORT_SYMBOL(from_kgid_munged);
507
508/**
509 * make_kprojid - Map a user-namespace projid pair into a kprojid.
510 * @ns: User namespace that the projid is in
511 * @projid: Project identifier
512 *
513 * Maps a user-namespace uid pair into a kernel internal kuid,
514 * and returns that kuid.
515 *
516 * When there is no mapping defined for the user-namespace projid
517 * pair INVALID_PROJID is returned. Callers are expected to test
518 * for and handle handle INVALID_PROJID being returned. INVALID_PROJID
519 * may be tested for using projid_valid().
520 */
521kprojid_t make_kprojid(struct user_namespace *ns, projid_t projid)
522{
523 /* Map the uid to a global kernel uid */
524 return KPROJIDT_INIT(map_id_down(&ns->projid_map, projid));
525}
526EXPORT_SYMBOL(make_kprojid);
527
528/**
529 * from_kprojid - Create a projid from a kprojid user-namespace pair.
530 * @targ: The user namespace we want a projid in.
531 * @kprojid: The kernel internal project identifier to start with.
532 *
533 * Map @kprojid into the user-namespace specified by @targ and
534 * return the resulting projid.
535 *
536 * There is always a mapping into the initial user_namespace.
537 *
538 * If @kprojid has no mapping in @targ (projid_t)-1 is returned.
539 */
540projid_t from_kprojid(struct user_namespace *targ, kprojid_t kprojid)
541{
542 /* Map the uid from a global kernel uid */
543 return map_id_up(&targ->projid_map, __kprojid_val(kprojid));
544}
545EXPORT_SYMBOL(from_kprojid);
546
547/**
548 * from_kprojid_munged - Create a projiid from a kprojid user-namespace pair.
549 * @targ: The user namespace we want a projid in.
550 * @kprojid: The kernel internal projid to start with.
551 *
552 * Map @kprojid into the user-namespace specified by @targ and
553 * return the resulting projid.
554 *
555 * There is always a mapping into the initial user_namespace.
556 *
557 * Unlike from_kprojid from_kprojid_munged never fails and always
558 * returns a valid projid. This makes from_kprojid_munged
559 * appropriate for use in syscalls like stat and where
560 * failing the system call and failing to provide a valid projid are
561 * not an options.
562 *
563 * If @kprojid has no mapping in @targ OVERFLOW_PROJID is returned.
564 */
565projid_t from_kprojid_munged(struct user_namespace *targ, kprojid_t kprojid)
566{
567 projid_t projid;
568 projid = from_kprojid(targ, kprojid);
569
570 if (projid == (projid_t) -1)
571 projid = OVERFLOW_PROJID;
572 return projid;
573}
574EXPORT_SYMBOL(from_kprojid_munged);
575
576
577static int uid_m_show(struct seq_file *seq, void *v)
578{
579 struct user_namespace *ns = seq->private;
580 struct uid_gid_extent *extent = v;
581 struct user_namespace *lower_ns;
582 uid_t lower;
583
584 lower_ns = seq_user_ns(seq);
585 if ((lower_ns == ns) && lower_ns->parent)
586 lower_ns = lower_ns->parent;
587
588 lower = from_kuid(lower_ns, KUIDT_INIT(extent->lower_first));
589
590 seq_printf(seq, "%10u %10u %10u\n",
591 extent->first,
592 lower,
593 extent->count);
594
595 return 0;
596}
597
598static int gid_m_show(struct seq_file *seq, void *v)
599{
600 struct user_namespace *ns = seq->private;
601 struct uid_gid_extent *extent = v;
602 struct user_namespace *lower_ns;
603 gid_t lower;
604
605 lower_ns = seq_user_ns(seq);
606 if ((lower_ns == ns) && lower_ns->parent)
607 lower_ns = lower_ns->parent;
608
609 lower = from_kgid(lower_ns, KGIDT_INIT(extent->lower_first));
610
611 seq_printf(seq, "%10u %10u %10u\n",
612 extent->first,
613 lower,
614 extent->count);
615
616 return 0;
617}
618
619static int projid_m_show(struct seq_file *seq, void *v)
620{
621 struct user_namespace *ns = seq->private;
622 struct uid_gid_extent *extent = v;
623 struct user_namespace *lower_ns;
624 projid_t lower;
625
626 lower_ns = seq_user_ns(seq);
627 if ((lower_ns == ns) && lower_ns->parent)
628 lower_ns = lower_ns->parent;
629
630 lower = from_kprojid(lower_ns, KPROJIDT_INIT(extent->lower_first));
631
632 seq_printf(seq, "%10u %10u %10u\n",
633 extent->first,
634 lower,
635 extent->count);
636
637 return 0;
638}
639
640static void *m_start(struct seq_file *seq, loff_t *ppos,
641 struct uid_gid_map *map)
642{
643 loff_t pos = *ppos;
644 unsigned extents = map->nr_extents;
645 smp_rmb();
646
647 if (pos >= extents)
648 return NULL;
649
650 if (extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
651 return &map->extent[pos];
652
653 return &map->forward[pos];
654}
655
656static void *uid_m_start(struct seq_file *seq, loff_t *ppos)
657{
658 struct user_namespace *ns = seq->private;
659
660 return m_start(seq, ppos, &ns->uid_map);
661}
662
663static void *gid_m_start(struct seq_file *seq, loff_t *ppos)
664{
665 struct user_namespace *ns = seq->private;
666
667 return m_start(seq, ppos, &ns->gid_map);
668}
669
670static void *projid_m_start(struct seq_file *seq, loff_t *ppos)
671{
672 struct user_namespace *ns = seq->private;
673
674 return m_start(seq, ppos, &ns->projid_map);
675}
676
677static void *m_next(struct seq_file *seq, void *v, loff_t *pos)
678{
679 (*pos)++;
680 return seq->op->start(seq, pos);
681}
682
683static void m_stop(struct seq_file *seq, void *v)
684{
685 return;
686}
687
688const struct seq_operations proc_uid_seq_operations = {
689 .start = uid_m_start,
690 .stop = m_stop,
691 .next = m_next,
692 .show = uid_m_show,
693};
694
695const struct seq_operations proc_gid_seq_operations = {
696 .start = gid_m_start,
697 .stop = m_stop,
698 .next = m_next,
699 .show = gid_m_show,
700};
701
702const struct seq_operations proc_projid_seq_operations = {
703 .start = projid_m_start,
704 .stop = m_stop,
705 .next = m_next,
706 .show = projid_m_show,
707};
708
709static bool mappings_overlap(struct uid_gid_map *new_map,
710 struct uid_gid_extent *extent)
711{
712 u32 upper_first, lower_first, upper_last, lower_last;
713 unsigned idx;
714
715 upper_first = extent->first;
716 lower_first = extent->lower_first;
717 upper_last = upper_first + extent->count - 1;
718 lower_last = lower_first + extent->count - 1;
719
720 for (idx = 0; idx < new_map->nr_extents; idx++) {
721 u32 prev_upper_first, prev_lower_first;
722 u32 prev_upper_last, prev_lower_last;
723 struct uid_gid_extent *prev;
724
725 if (new_map->nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
726 prev = &new_map->extent[idx];
727 else
728 prev = &new_map->forward[idx];
729
730 prev_upper_first = prev->first;
731 prev_lower_first = prev->lower_first;
732 prev_upper_last = prev_upper_first + prev->count - 1;
733 prev_lower_last = prev_lower_first + prev->count - 1;
734
735 /* Does the upper range intersect a previous extent? */
736 if ((prev_upper_first <= upper_last) &&
737 (prev_upper_last >= upper_first))
738 return true;
739
740 /* Does the lower range intersect a previous extent? */
741 if ((prev_lower_first <= lower_last) &&
742 (prev_lower_last >= lower_first))
743 return true;
744 }
745 return false;
746}
747
748/**
749 * insert_extent - Safely insert a new idmap extent into struct uid_gid_map.
750 * Takes care to allocate a 4K block of memory if the number of mappings exceeds
751 * UID_GID_MAP_MAX_BASE_EXTENTS.
752 */
753static int insert_extent(struct uid_gid_map *map, struct uid_gid_extent *extent)
754{
755 struct uid_gid_extent *dest;
756
757 if (map->nr_extents == UID_GID_MAP_MAX_BASE_EXTENTS) {
758 struct uid_gid_extent *forward;
759
760 /* Allocate memory for 340 mappings. */
761 forward = kmalloc_array(UID_GID_MAP_MAX_EXTENTS,
762 sizeof(struct uid_gid_extent),
763 GFP_KERNEL);
764 if (!forward)
765 return -ENOMEM;
766
767 /* Copy over memory. Only set up memory for the forward pointer.
768 * Defer the memory setup for the reverse pointer.
769 */
770 memcpy(forward, map->extent,
771 map->nr_extents * sizeof(map->extent[0]));
772
773 map->forward = forward;
774 map->reverse = NULL;
775 }
776
777 if (map->nr_extents < UID_GID_MAP_MAX_BASE_EXTENTS)
778 dest = &map->extent[map->nr_extents];
779 else
780 dest = &map->forward[map->nr_extents];
781
782 *dest = *extent;
783 map->nr_extents++;
784 return 0;
785}
786
787/* cmp function to sort() forward mappings */
788static int cmp_extents_forward(const void *a, const void *b)
789{
790 const struct uid_gid_extent *e1 = a;
791 const struct uid_gid_extent *e2 = b;
792
793 if (e1->first < e2->first)
794 return -1;
795
796 if (e1->first > e2->first)
797 return 1;
798
799 return 0;
800}
801
802/* cmp function to sort() reverse mappings */
803static int cmp_extents_reverse(const void *a, const void *b)
804{
805 const struct uid_gid_extent *e1 = a;
806 const struct uid_gid_extent *e2 = b;
807
808 if (e1->lower_first < e2->lower_first)
809 return -1;
810
811 if (e1->lower_first > e2->lower_first)
812 return 1;
813
814 return 0;
815}
816
817/**
818 * sort_idmaps - Sorts an array of idmap entries.
819 * Can only be called if number of mappings exceeds UID_GID_MAP_MAX_BASE_EXTENTS.
820 */
821static int sort_idmaps(struct uid_gid_map *map)
822{
823 if (map->nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
824 return 0;
825
826 /* Sort forward array. */
827 sort(map->forward, map->nr_extents, sizeof(struct uid_gid_extent),
828 cmp_extents_forward, NULL);
829
830 /* Only copy the memory from forward we actually need. */
831 map->reverse = kmemdup(map->forward,
832 map->nr_extents * sizeof(struct uid_gid_extent),
833 GFP_KERNEL);
834 if (!map->reverse)
835 return -ENOMEM;
836
837 /* Sort reverse array. */
838 sort(map->reverse, map->nr_extents, sizeof(struct uid_gid_extent),
839 cmp_extents_reverse, NULL);
840
841 return 0;
842}
843
844static ssize_t map_write(struct file *file, const char __user *buf,
845 size_t count, loff_t *ppos,
846 int cap_setid,
847 struct uid_gid_map *map,
848 struct uid_gid_map *parent_map)
849{
850 struct seq_file *seq = file->private_data;
851 struct user_namespace *ns = seq->private;
852 struct uid_gid_map new_map;
853 unsigned idx;
854 struct uid_gid_extent extent;
855 char *kbuf = NULL, *pos, *next_line;
856 ssize_t ret;
857
858 /* Only allow < page size writes at the beginning of the file */
859 if ((*ppos != 0) || (count >= PAGE_SIZE))
860 return -EINVAL;
861
862 /* Slurp in the user data */
863 kbuf = memdup_user_nul(buf, count);
864 if (IS_ERR(kbuf))
865 return PTR_ERR(kbuf);
866
867 /*
868 * The userns_state_mutex serializes all writes to any given map.
869 *
870 * Any map is only ever written once.
871 *
872 * An id map fits within 1 cache line on most architectures.
873 *
874 * On read nothing needs to be done unless you are on an
875 * architecture with a crazy cache coherency model like alpha.
876 *
877 * There is a one time data dependency between reading the
878 * count of the extents and the values of the extents. The
879 * desired behavior is to see the values of the extents that
880 * were written before the count of the extents.
881 *
882 * To achieve this smp_wmb() is used on guarantee the write
883 * order and smp_rmb() is guaranteed that we don't have crazy
884 * architectures returning stale data.
885 */
886 mutex_lock(&userns_state_mutex);
887
888 memset(&new_map, 0, sizeof(struct uid_gid_map));
889
890 ret = -EPERM;
891 /* Only allow one successful write to the map */
892 if (map->nr_extents != 0)
893 goto out;
894
895 /*
896 * Adjusting namespace settings requires capabilities on the target.
897 */
898 if (cap_valid(cap_setid) && !file_ns_capable(file, ns, CAP_SYS_ADMIN))
899 goto out;
900
901 /* Parse the user data */
902 ret = -EINVAL;
903 pos = kbuf;
904 for (; pos; pos = next_line) {
905
906 /* Find the end of line and ensure I don't look past it */
907 next_line = strchr(pos, '\n');
908 if (next_line) {
909 *next_line = '\0';
910 next_line++;
911 if (*next_line == '\0')
912 next_line = NULL;
913 }
914
915 pos = skip_spaces(pos);
916 extent.first = simple_strtoul(pos, &pos, 10);
917 if (!isspace(*pos))
918 goto out;
919
920 pos = skip_spaces(pos);
921 extent.lower_first = simple_strtoul(pos, &pos, 10);
922 if (!isspace(*pos))
923 goto out;
924
925 pos = skip_spaces(pos);
926 extent.count = simple_strtoul(pos, &pos, 10);
927 if (*pos && !isspace(*pos))
928 goto out;
929
930 /* Verify there is not trailing junk on the line */
931 pos = skip_spaces(pos);
932 if (*pos != '\0')
933 goto out;
934
935 /* Verify we have been given valid starting values */
936 if ((extent.first == (u32) -1) ||
937 (extent.lower_first == (u32) -1))
938 goto out;
939
940 /* Verify count is not zero and does not cause the
941 * extent to wrap
942 */
943 if ((extent.first + extent.count) <= extent.first)
944 goto out;
945 if ((extent.lower_first + extent.count) <=
946 extent.lower_first)
947 goto out;
948
949 /* Do the ranges in extent overlap any previous extents? */
950 if (mappings_overlap(&new_map, &extent))
951 goto out;
952
953 if ((new_map.nr_extents + 1) == UID_GID_MAP_MAX_EXTENTS &&
954 (next_line != NULL))
955 goto out;
956
957 ret = insert_extent(&new_map, &extent);
958 if (ret < 0)
959 goto out;
960 ret = -EINVAL;
961 }
962 /* Be very certaint the new map actually exists */
963 if (new_map.nr_extents == 0)
964 goto out;
965
966 ret = -EPERM;
967 /* Validate the user is allowed to use user id's mapped to. */
968 if (!new_idmap_permitted(file, ns, cap_setid, &new_map))
969 goto out;
970
971 ret = -EPERM;
972 /* Map the lower ids from the parent user namespace to the
973 * kernel global id space.
974 */
975 for (idx = 0; idx < new_map.nr_extents; idx++) {
976 struct uid_gid_extent *e;
977 u32 lower_first;
978
979 if (new_map.nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS)
980 e = &new_map.extent[idx];
981 else
982 e = &new_map.forward[idx];
983
984 lower_first = map_id_range_down(parent_map,
985 e->lower_first,
986 e->count);
987
988 /* Fail if we can not map the specified extent to
989 * the kernel global id space.
990 */
991 if (lower_first == (u32) -1)
992 goto out;
993
994 e->lower_first = lower_first;
995 }
996
997 /*
998 * If we want to use binary search for lookup, this clones the extent
999 * array and sorts both copies.
1000 */
1001 ret = sort_idmaps(&new_map);
1002 if (ret < 0)
1003 goto out;
1004
1005 /* Install the map */
1006 if (new_map.nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS) {
1007 memcpy(map->extent, new_map.extent,
1008 new_map.nr_extents * sizeof(new_map.extent[0]));
1009 } else {
1010 map->forward = new_map.forward;
1011 map->reverse = new_map.reverse;
1012 }
1013 smp_wmb();
1014 map->nr_extents = new_map.nr_extents;
1015
1016 *ppos = count;
1017 ret = count;
1018out:
1019 if (ret < 0 && new_map.nr_extents > UID_GID_MAP_MAX_BASE_EXTENTS) {
1020 kfree(new_map.forward);
1021 kfree(new_map.reverse);
1022 map->forward = NULL;
1023 map->reverse = NULL;
1024 map->nr_extents = 0;
1025 }
1026
1027 mutex_unlock(&userns_state_mutex);
1028 kfree(kbuf);
1029 return ret;
1030}
1031
1032ssize_t proc_uid_map_write(struct file *file, const char __user *buf,
1033 size_t size, loff_t *ppos)
1034{
1035 struct seq_file *seq = file->private_data;
1036 struct user_namespace *ns = seq->private;
1037 struct user_namespace *seq_ns = seq_user_ns(seq);
1038
1039 if (!ns->parent)
1040 return -EPERM;
1041
1042 if ((seq_ns != ns) && (seq_ns != ns->parent))
1043 return -EPERM;
1044
1045 return map_write(file, buf, size, ppos, CAP_SETUID,
1046 &ns->uid_map, &ns->parent->uid_map);
1047}
1048
1049ssize_t proc_gid_map_write(struct file *file, const char __user *buf,
1050 size_t size, loff_t *ppos)
1051{
1052 struct seq_file *seq = file->private_data;
1053 struct user_namespace *ns = seq->private;
1054 struct user_namespace *seq_ns = seq_user_ns(seq);
1055
1056 if (!ns->parent)
1057 return -EPERM;
1058
1059 if ((seq_ns != ns) && (seq_ns != ns->parent))
1060 return -EPERM;
1061
1062 return map_write(file, buf, size, ppos, CAP_SETGID,
1063 &ns->gid_map, &ns->parent->gid_map);
1064}
1065
1066ssize_t proc_projid_map_write(struct file *file, const char __user *buf,
1067 size_t size, loff_t *ppos)
1068{
1069 struct seq_file *seq = file->private_data;
1070 struct user_namespace *ns = seq->private;
1071 struct user_namespace *seq_ns = seq_user_ns(seq);
1072
1073 if (!ns->parent)
1074 return -EPERM;
1075
1076 if ((seq_ns != ns) && (seq_ns != ns->parent))
1077 return -EPERM;
1078
1079 /* Anyone can set any valid project id no capability needed */
1080 return map_write(file, buf, size, ppos, -1,
1081 &ns->projid_map, &ns->parent->projid_map);
1082}
1083
1084static bool new_idmap_permitted(const struct file *file,
1085 struct user_namespace *ns, int cap_setid,
1086 struct uid_gid_map *new_map)
1087{
1088 const struct cred *cred = file->f_cred;
1089 /* Don't allow mappings that would allow anything that wouldn't
1090 * be allowed without the establishment of unprivileged mappings.
1091 */
1092 if ((new_map->nr_extents == 1) && (new_map->extent[0].count == 1) &&
1093 uid_eq(ns->owner, cred->euid)) {
1094 u32 id = new_map->extent[0].lower_first;
1095 if (cap_setid == CAP_SETUID) {
1096 kuid_t uid = make_kuid(ns->parent, id);
1097 if (uid_eq(uid, cred->euid))
1098 return true;
1099 } else if (cap_setid == CAP_SETGID) {
1100 kgid_t gid = make_kgid(ns->parent, id);
1101 if (!(ns->flags & USERNS_SETGROUPS_ALLOWED) &&
1102 gid_eq(gid, cred->egid))
1103 return true;
1104 }
1105 }
1106
1107 /* Allow anyone to set a mapping that doesn't require privilege */
1108 if (!cap_valid(cap_setid))
1109 return true;
1110
1111 /* Allow the specified ids if we have the appropriate capability
1112 * (CAP_SETUID or CAP_SETGID) over the parent user namespace.
1113 * And the opener of the id file also had the approprpiate capability.
1114 */
1115 if (ns_capable(ns->parent, cap_setid) &&
1116 file_ns_capable(file, ns->parent, cap_setid))
1117 return true;
1118
1119 return false;
1120}
1121
1122int proc_setgroups_show(struct seq_file *seq, void *v)
1123{
1124 struct user_namespace *ns = seq->private;
1125 unsigned long userns_flags = READ_ONCE(ns->flags);
1126
1127 seq_printf(seq, "%s\n",
1128 (userns_flags & USERNS_SETGROUPS_ALLOWED) ?
1129 "allow" : "deny");
1130 return 0;
1131}
1132
1133ssize_t proc_setgroups_write(struct file *file, const char __user *buf,
1134 size_t count, loff_t *ppos)
1135{
1136 struct seq_file *seq = file->private_data;
1137 struct user_namespace *ns = seq->private;
1138 char kbuf[8], *pos;
1139 bool setgroups_allowed;
1140 ssize_t ret;
1141
1142 /* Only allow a very narrow range of strings to be written */
1143 ret = -EINVAL;
1144 if ((*ppos != 0) || (count >= sizeof(kbuf)))
1145 goto out;
1146
1147 /* What was written? */
1148 ret = -EFAULT;
1149 if (copy_from_user(kbuf, buf, count))
1150 goto out;
1151 kbuf[count] = '\0';
1152 pos = kbuf;
1153
1154 /* What is being requested? */
1155 ret = -EINVAL;
1156 if (strncmp(pos, "allow", 5) == 0) {
1157 pos += 5;
1158 setgroups_allowed = true;
1159 }
1160 else if (strncmp(pos, "deny", 4) == 0) {
1161 pos += 4;
1162 setgroups_allowed = false;
1163 }
1164 else
1165 goto out;
1166
1167 /* Verify there is not trailing junk on the line */
1168 pos = skip_spaces(pos);
1169 if (*pos != '\0')
1170 goto out;
1171
1172 ret = -EPERM;
1173 mutex_lock(&userns_state_mutex);
1174 if (setgroups_allowed) {
1175 /* Enabling setgroups after setgroups has been disabled
1176 * is not allowed.
1177 */
1178 if (!(ns->flags & USERNS_SETGROUPS_ALLOWED))
1179 goto out_unlock;
1180 } else {
1181 /* Permanently disabling setgroups after setgroups has
1182 * been enabled by writing the gid_map is not allowed.
1183 */
1184 if (ns->gid_map.nr_extents != 0)
1185 goto out_unlock;
1186 ns->flags &= ~USERNS_SETGROUPS_ALLOWED;
1187 }
1188 mutex_unlock(&userns_state_mutex);
1189
1190 /* Report a successful write */
1191 *ppos = count;
1192 ret = count;
1193out:
1194 return ret;
1195out_unlock:
1196 mutex_unlock(&userns_state_mutex);
1197 goto out;
1198}
1199
1200bool userns_may_setgroups(const struct user_namespace *ns)
1201{
1202 bool allowed;
1203
1204 mutex_lock(&userns_state_mutex);
1205 /* It is not safe to use setgroups until a gid mapping in
1206 * the user namespace has been established.
1207 */
1208 allowed = ns->gid_map.nr_extents != 0;
1209 /* Is setgroups allowed? */
1210 allowed = allowed && (ns->flags & USERNS_SETGROUPS_ALLOWED);
1211 mutex_unlock(&userns_state_mutex);
1212
1213 return allowed;
1214}
1215
1216/*
1217 * Returns true if @child is the same namespace or a descendant of
1218 * @ancestor.
1219 */
1220bool in_userns(const struct user_namespace *ancestor,
1221 const struct user_namespace *child)
1222{
1223 const struct user_namespace *ns;
1224 for (ns = child; ns->level > ancestor->level; ns = ns->parent)
1225 ;
1226 return (ns == ancestor);
1227}
1228
1229bool current_in_userns(const struct user_namespace *target_ns)
1230{
1231 return in_userns(target_ns, current_user_ns());
1232}
1233EXPORT_SYMBOL(current_in_userns);
1234
1235static inline struct user_namespace *to_user_ns(struct ns_common *ns)
1236{
1237 return container_of(ns, struct user_namespace, ns);
1238}
1239
1240static struct ns_common *userns_get(struct task_struct *task)
1241{
1242 struct user_namespace *user_ns;
1243
1244 rcu_read_lock();
1245 user_ns = get_user_ns(__task_cred(task)->user_ns);
1246 rcu_read_unlock();
1247
1248 return user_ns ? &user_ns->ns : NULL;
1249}
1250
1251static void userns_put(struct ns_common *ns)
1252{
1253 put_user_ns(to_user_ns(ns));
1254}
1255
1256static int userns_install(struct nsset *nsset, struct ns_common *ns)
1257{
1258 struct user_namespace *user_ns = to_user_ns(ns);
1259 struct cred *cred;
1260
1261 /* Don't allow gaining capabilities by reentering
1262 * the same user namespace.
1263 */
1264 if (user_ns == current_user_ns())
1265 return -EINVAL;
1266
1267 /* Tasks that share a thread group must share a user namespace */
1268 if (!thread_group_empty(current))
1269 return -EINVAL;
1270
1271 if (current->fs->users != 1)
1272 return -EINVAL;
1273
1274 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
1275 return -EPERM;
1276
1277 cred = nsset_cred(nsset);
1278 if (!cred)
1279 return -EINVAL;
1280
1281 put_user_ns(cred->user_ns);
1282 set_cred_user_ns(cred, get_user_ns(user_ns));
1283
1284 return 0;
1285}
1286
1287struct ns_common *ns_get_owner(struct ns_common *ns)
1288{
1289 struct user_namespace *my_user_ns = current_user_ns();
1290 struct user_namespace *owner, *p;
1291
1292 /* See if the owner is in the current user namespace */
1293 owner = p = ns->ops->owner(ns);
1294 for (;;) {
1295 if (!p)
1296 return ERR_PTR(-EPERM);
1297 if (p == my_user_ns)
1298 break;
1299 p = p->parent;
1300 }
1301
1302 return &get_user_ns(owner)->ns;
1303}
1304
1305static struct user_namespace *userns_owner(struct ns_common *ns)
1306{
1307 return to_user_ns(ns)->parent;
1308}
1309
1310const struct proc_ns_operations userns_operations = {
1311 .name = "user",
1312 .type = CLONE_NEWUSER,
1313 .get = userns_get,
1314 .put = userns_put,
1315 .install = userns_install,
1316 .owner = userns_owner,
1317 .get_parent = ns_get_owner,
1318};
1319
1320static __init int user_namespaces_init(void)
1321{
1322 user_ns_cachep = KMEM_CACHE(user_namespace, SLAB_PANIC);
1323 return 0;
1324}
1325subsys_initcall(user_namespaces_init);