Loading...
1/*
2 * linux/ipc/namespace.c
3 * Copyright (C) 2006 Pavel Emelyanov <xemul@openvz.org> OpenVZ, SWsoft Inc.
4 */
5
6#include <linux/ipc.h>
7#include <linux/msg.h>
8#include <linux/ipc_namespace.h>
9#include <linux/rcupdate.h>
10#include <linux/nsproxy.h>
11#include <linux/slab.h>
12#include <linux/fs.h>
13#include <linux/mount.h>
14#include <linux/user_namespace.h>
15#include <linux/proc_fs.h>
16
17#include "util.h"
18
19static struct ipc_namespace *create_ipc_ns(struct task_struct *tsk,
20 struct ipc_namespace *old_ns)
21{
22 struct ipc_namespace *ns;
23 int err;
24
25 ns = kmalloc(sizeof(struct ipc_namespace), GFP_KERNEL);
26 if (ns == NULL)
27 return ERR_PTR(-ENOMEM);
28
29 atomic_set(&ns->count, 1);
30 err = mq_init_ns(ns);
31 if (err) {
32 kfree(ns);
33 return ERR_PTR(err);
34 }
35 atomic_inc(&nr_ipc_ns);
36
37 sem_init_ns(ns);
38 msg_init_ns(ns);
39 shm_init_ns(ns);
40
41 /*
42 * msgmni has already been computed for the new ipc ns.
43 * Thus, do the ipcns creation notification before registering that
44 * new ipcns in the chain.
45 */
46 ipcns_notify(IPCNS_CREATED);
47 register_ipcns_notifier(ns);
48
49 ns->user_ns = get_user_ns(task_cred_xxx(tsk, user_ns));
50
51 return ns;
52}
53
54struct ipc_namespace *copy_ipcs(unsigned long flags,
55 struct task_struct *tsk)
56{
57 struct ipc_namespace *ns = tsk->nsproxy->ipc_ns;
58
59 if (!(flags & CLONE_NEWIPC))
60 return get_ipc_ns(ns);
61 return create_ipc_ns(tsk, ns);
62}
63
64/*
65 * free_ipcs - free all ipcs of one type
66 * @ns: the namespace to remove the ipcs from
67 * @ids: the table of ipcs to free
68 * @free: the function called to free each individual ipc
69 *
70 * Called for each kind of ipc when an ipc_namespace exits.
71 */
72void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids,
73 void (*free)(struct ipc_namespace *, struct kern_ipc_perm *))
74{
75 struct kern_ipc_perm *perm;
76 int next_id;
77 int total, in_use;
78
79 down_write(&ids->rw_mutex);
80
81 in_use = ids->in_use;
82
83 for (total = 0, next_id = 0; total < in_use; next_id++) {
84 perm = idr_find(&ids->ipcs_idr, next_id);
85 if (perm == NULL)
86 continue;
87 ipc_lock_by_ptr(perm);
88 free(ns, perm);
89 total++;
90 }
91 up_write(&ids->rw_mutex);
92}
93
94static void free_ipc_ns(struct ipc_namespace *ns)
95{
96 /*
97 * Unregistering the hotplug notifier at the beginning guarantees
98 * that the ipc namespace won't be freed while we are inside the
99 * callback routine. Since the blocking_notifier_chain_XXX routines
100 * hold a rw lock on the notifier list, unregister_ipcns_notifier()
101 * won't take the rw lock before blocking_notifier_call_chain() has
102 * released the rd lock.
103 */
104 unregister_ipcns_notifier(ns);
105 sem_exit_ns(ns);
106 msg_exit_ns(ns);
107 shm_exit_ns(ns);
108 atomic_dec(&nr_ipc_ns);
109
110 /*
111 * Do the ipcns removal notification after decrementing nr_ipc_ns in
112 * order to have a correct value when recomputing msgmni.
113 */
114 ipcns_notify(IPCNS_REMOVED);
115 put_user_ns(ns->user_ns);
116 kfree(ns);
117}
118
119/*
120 * put_ipc_ns - drop a reference to an ipc namespace.
121 * @ns: the namespace to put
122 *
123 * If this is the last task in the namespace exiting, and
124 * it is dropping the refcount to 0, then it can race with
125 * a task in another ipc namespace but in a mounts namespace
126 * which has this ipcns's mqueuefs mounted, doing some action
127 * with one of the mqueuefs files. That can raise the refcount.
128 * So dropping the refcount, and raising the refcount when
129 * accessing it through the VFS, are protected with mq_lock.
130 *
131 * (Clearly, a task raising the refcount on its own ipc_ns
132 * needn't take mq_lock since it can't race with the last task
133 * in the ipcns exiting).
134 */
135void put_ipc_ns(struct ipc_namespace *ns)
136{
137 if (atomic_dec_and_lock(&ns->count, &mq_lock)) {
138 mq_clear_sbinfo(ns);
139 spin_unlock(&mq_lock);
140 mq_put_mnt(ns);
141 free_ipc_ns(ns);
142 }
143}
144
145static void *ipcns_get(struct task_struct *task)
146{
147 struct ipc_namespace *ns = NULL;
148 struct nsproxy *nsproxy;
149
150 rcu_read_lock();
151 nsproxy = task_nsproxy(task);
152 if (nsproxy)
153 ns = get_ipc_ns(nsproxy->ipc_ns);
154 rcu_read_unlock();
155
156 return ns;
157}
158
159static void ipcns_put(void *ns)
160{
161 return put_ipc_ns(ns);
162}
163
164static int ipcns_install(struct nsproxy *nsproxy, void *ns)
165{
166 /* Ditch state from the old ipc namespace */
167 exit_sem(current);
168 put_ipc_ns(nsproxy->ipc_ns);
169 nsproxy->ipc_ns = get_ipc_ns(ns);
170 return 0;
171}
172
173const struct proc_ns_operations ipcns_operations = {
174 .name = "ipc",
175 .type = CLONE_NEWIPC,
176 .get = ipcns_get,
177 .put = ipcns_put,
178 .install = ipcns_install,
179};
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/ipc/namespace.c
4 * Copyright (C) 2006 Pavel Emelyanov <xemul@openvz.org> OpenVZ, SWsoft Inc.
5 */
6
7#include <linux/ipc.h>
8#include <linux/msg.h>
9#include <linux/ipc_namespace.h>
10#include <linux/rcupdate.h>
11#include <linux/nsproxy.h>
12#include <linux/slab.h>
13#include <linux/cred.h>
14#include <linux/fs.h>
15#include <linux/mount.h>
16#include <linux/user_namespace.h>
17#include <linux/proc_ns.h>
18#include <linux/sched/task.h>
19
20#include "util.h"
21
22static struct ucounts *inc_ipc_namespaces(struct user_namespace *ns)
23{
24 return inc_ucount(ns, current_euid(), UCOUNT_IPC_NAMESPACES);
25}
26
27static void dec_ipc_namespaces(struct ucounts *ucounts)
28{
29 dec_ucount(ucounts, UCOUNT_IPC_NAMESPACES);
30}
31
32static struct ipc_namespace *create_ipc_ns(struct user_namespace *user_ns,
33 struct ipc_namespace *old_ns)
34{
35 struct ipc_namespace *ns;
36 struct ucounts *ucounts;
37 int err;
38
39 err = -ENOSPC;
40 ucounts = inc_ipc_namespaces(user_ns);
41 if (!ucounts)
42 goto fail;
43
44 err = -ENOMEM;
45 ns = kmalloc(sizeof(struct ipc_namespace), GFP_KERNEL);
46 if (ns == NULL)
47 goto fail_dec;
48
49 err = ns_alloc_inum(&ns->ns);
50 if (err)
51 goto fail_free;
52 ns->ns.ops = &ipcns_operations;
53
54 refcount_set(&ns->count, 1);
55 ns->user_ns = get_user_ns(user_ns);
56 ns->ucounts = ucounts;
57
58 err = sem_init_ns(ns);
59 if (err)
60 goto fail_put;
61 err = msg_init_ns(ns);
62 if (err)
63 goto fail_destroy_sem;
64 err = shm_init_ns(ns);
65 if (err)
66 goto fail_destroy_msg;
67
68 err = mq_init_ns(ns);
69 if (err)
70 goto fail_destroy_shm;
71
72 return ns;
73
74fail_destroy_shm:
75 shm_exit_ns(ns);
76fail_destroy_msg:
77 msg_exit_ns(ns);
78fail_destroy_sem:
79 sem_exit_ns(ns);
80fail_put:
81 put_user_ns(ns->user_ns);
82 ns_free_inum(&ns->ns);
83fail_free:
84 kfree(ns);
85fail_dec:
86 dec_ipc_namespaces(ucounts);
87fail:
88 return ERR_PTR(err);
89}
90
91struct ipc_namespace *copy_ipcs(unsigned long flags,
92 struct user_namespace *user_ns, struct ipc_namespace *ns)
93{
94 if (!(flags & CLONE_NEWIPC))
95 return get_ipc_ns(ns);
96 return create_ipc_ns(user_ns, ns);
97}
98
99/*
100 * free_ipcs - free all ipcs of one type
101 * @ns: the namespace to remove the ipcs from
102 * @ids: the table of ipcs to free
103 * @free: the function called to free each individual ipc
104 *
105 * Called for each kind of ipc when an ipc_namespace exits.
106 */
107void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids,
108 void (*free)(struct ipc_namespace *, struct kern_ipc_perm *))
109{
110 struct kern_ipc_perm *perm;
111 int next_id;
112 int total, in_use;
113
114 down_write(&ids->rwsem);
115
116 in_use = ids->in_use;
117
118 for (total = 0, next_id = 0; total < in_use; next_id++) {
119 perm = idr_find(&ids->ipcs_idr, next_id);
120 if (perm == NULL)
121 continue;
122 rcu_read_lock();
123 ipc_lock_object(perm);
124 free(ns, perm);
125 total++;
126 }
127 up_write(&ids->rwsem);
128}
129
130static void free_ipc_ns(struct ipc_namespace *ns)
131{
132 sem_exit_ns(ns);
133 msg_exit_ns(ns);
134 shm_exit_ns(ns);
135
136 dec_ipc_namespaces(ns->ucounts);
137 put_user_ns(ns->user_ns);
138 ns_free_inum(&ns->ns);
139 kfree(ns);
140}
141
142/*
143 * put_ipc_ns - drop a reference to an ipc namespace.
144 * @ns: the namespace to put
145 *
146 * If this is the last task in the namespace exiting, and
147 * it is dropping the refcount to 0, then it can race with
148 * a task in another ipc namespace but in a mounts namespace
149 * which has this ipcns's mqueuefs mounted, doing some action
150 * with one of the mqueuefs files. That can raise the refcount.
151 * So dropping the refcount, and raising the refcount when
152 * accessing it through the VFS, are protected with mq_lock.
153 *
154 * (Clearly, a task raising the refcount on its own ipc_ns
155 * needn't take mq_lock since it can't race with the last task
156 * in the ipcns exiting).
157 */
158void put_ipc_ns(struct ipc_namespace *ns)
159{
160 if (refcount_dec_and_lock(&ns->count, &mq_lock)) {
161 mq_clear_sbinfo(ns);
162 spin_unlock(&mq_lock);
163 mq_put_mnt(ns);
164 free_ipc_ns(ns);
165 }
166}
167
168static inline struct ipc_namespace *to_ipc_ns(struct ns_common *ns)
169{
170 return container_of(ns, struct ipc_namespace, ns);
171}
172
173static struct ns_common *ipcns_get(struct task_struct *task)
174{
175 struct ipc_namespace *ns = NULL;
176 struct nsproxy *nsproxy;
177
178 task_lock(task);
179 nsproxy = task->nsproxy;
180 if (nsproxy)
181 ns = get_ipc_ns(nsproxy->ipc_ns);
182 task_unlock(task);
183
184 return ns ? &ns->ns : NULL;
185}
186
187static void ipcns_put(struct ns_common *ns)
188{
189 return put_ipc_ns(to_ipc_ns(ns));
190}
191
192static int ipcns_install(struct nsproxy *nsproxy, struct ns_common *new)
193{
194 struct ipc_namespace *ns = to_ipc_ns(new);
195 if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN) ||
196 !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
197 return -EPERM;
198
199 /* Ditch state from the old ipc namespace */
200 exit_sem(current);
201 put_ipc_ns(nsproxy->ipc_ns);
202 nsproxy->ipc_ns = get_ipc_ns(ns);
203 return 0;
204}
205
206static struct user_namespace *ipcns_owner(struct ns_common *ns)
207{
208 return to_ipc_ns(ns)->user_ns;
209}
210
211const struct proc_ns_operations ipcns_operations = {
212 .name = "ipc",
213 .type = CLONE_NEWIPC,
214 .get = ipcns_get,
215 .put = ipcns_put,
216 .install = ipcns_install,
217 .owner = ipcns_owner,
218};