Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * devtmpfs - kernel-maintained tmpfs-based /dev
4 *
5 * Copyright (C) 2009, Kay Sievers <kay.sievers@vrfy.org>
6 *
7 * During bootup, before any driver core device is registered,
8 * devtmpfs, a tmpfs-based filesystem is created. Every driver-core
9 * device which requests a device node, will add a node in this
10 * filesystem.
11 * By default, all devices are named after the name of the device,
12 * owned by root and have a default mode of 0600. Subsystems can
13 * overwrite the default setting if needed.
14 */
15
16#define pr_fmt(fmt) "devtmpfs: " fmt
17
18#include <linux/kernel.h>
19#include <linux/syscalls.h>
20#include <linux/mount.h>
21#include <linux/device.h>
22#include <linux/blkdev.h>
23#include <linux/namei.h>
24#include <linux/fs.h>
25#include <linux/shmem_fs.h>
26#include <linux/ramfs.h>
27#include <linux/sched.h>
28#include <linux/slab.h>
29#include <linux/kthread.h>
30#include <linux/init_syscalls.h>
31#include <uapi/linux/mount.h>
32#include "base.h"
33
34#ifdef CONFIG_DEVTMPFS_SAFE
35#define DEVTMPFS_MFLAGS (MS_SILENT | MS_NOEXEC | MS_NOSUID)
36#else
37#define DEVTMPFS_MFLAGS (MS_SILENT)
38#endif
39
40static struct task_struct *thread;
41
42static int __initdata mount_dev = IS_ENABLED(CONFIG_DEVTMPFS_MOUNT);
43
44static DEFINE_SPINLOCK(req_lock);
45
46static struct req {
47 struct req *next;
48 struct completion done;
49 int err;
50 const char *name;
51 umode_t mode; /* 0 => delete */
52 kuid_t uid;
53 kgid_t gid;
54 struct device *dev;
55} *requests;
56
57static int __init mount_param(char *str)
58{
59 mount_dev = simple_strtoul(str, NULL, 0);
60 return 1;
61}
62__setup("devtmpfs.mount=", mount_param);
63
64static struct vfsmount *mnt;
65
66static struct dentry *public_dev_mount(struct file_system_type *fs_type, int flags,
67 const char *dev_name, void *data)
68{
69 struct super_block *s = mnt->mnt_sb;
70 int err;
71
72 atomic_inc(&s->s_active);
73 down_write(&s->s_umount);
74 err = reconfigure_single(s, flags, data);
75 if (err < 0) {
76 deactivate_locked_super(s);
77 return ERR_PTR(err);
78 }
79 return dget(s->s_root);
80}
81
82static struct file_system_type internal_fs_type = {
83 .name = "devtmpfs",
84#ifdef CONFIG_TMPFS
85 .init_fs_context = shmem_init_fs_context,
86#else
87 .init_fs_context = ramfs_init_fs_context,
88#endif
89 .kill_sb = kill_litter_super,
90};
91
92static struct file_system_type dev_fs_type = {
93 .name = "devtmpfs",
94 .mount = public_dev_mount,
95};
96
97static int devtmpfs_submit_req(struct req *req, const char *tmp)
98{
99 init_completion(&req->done);
100
101 spin_lock(&req_lock);
102 req->next = requests;
103 requests = req;
104 spin_unlock(&req_lock);
105
106 wake_up_process(thread);
107 wait_for_completion(&req->done);
108
109 kfree(tmp);
110
111 return req->err;
112}
113
114int devtmpfs_create_node(struct device *dev)
115{
116 const char *tmp = NULL;
117 struct req req;
118
119 if (!thread)
120 return 0;
121
122 req.mode = 0;
123 req.uid = GLOBAL_ROOT_UID;
124 req.gid = GLOBAL_ROOT_GID;
125 req.name = device_get_devnode(dev, &req.mode, &req.uid, &req.gid, &tmp);
126 if (!req.name)
127 return -ENOMEM;
128
129 if (req.mode == 0)
130 req.mode = 0600;
131 if (is_blockdev(dev))
132 req.mode |= S_IFBLK;
133 else
134 req.mode |= S_IFCHR;
135
136 req.dev = dev;
137
138 return devtmpfs_submit_req(&req, tmp);
139}
140
141int devtmpfs_delete_node(struct device *dev)
142{
143 const char *tmp = NULL;
144 struct req req;
145
146 if (!thread)
147 return 0;
148
149 req.name = device_get_devnode(dev, NULL, NULL, NULL, &tmp);
150 if (!req.name)
151 return -ENOMEM;
152
153 req.mode = 0;
154 req.dev = dev;
155
156 return devtmpfs_submit_req(&req, tmp);
157}
158
159static int dev_mkdir(const char *name, umode_t mode)
160{
161 struct dentry *dentry;
162 struct path path;
163 int err;
164
165 dentry = kern_path_create(AT_FDCWD, name, &path, LOOKUP_DIRECTORY);
166 if (IS_ERR(dentry))
167 return PTR_ERR(dentry);
168
169 err = vfs_mkdir(&nop_mnt_idmap, d_inode(path.dentry), dentry, mode);
170 if (!err)
171 /* mark as kernel-created inode */
172 d_inode(dentry)->i_private = &thread;
173 done_path_create(&path, dentry);
174 return err;
175}
176
177static int create_path(const char *nodepath)
178{
179 char *path;
180 char *s;
181 int err = 0;
182
183 /* parent directories do not exist, create them */
184 path = kstrdup(nodepath, GFP_KERNEL);
185 if (!path)
186 return -ENOMEM;
187
188 s = path;
189 for (;;) {
190 s = strchr(s, '/');
191 if (!s)
192 break;
193 s[0] = '\0';
194 err = dev_mkdir(path, 0755);
195 if (err && err != -EEXIST)
196 break;
197 s[0] = '/';
198 s++;
199 }
200 kfree(path);
201 return err;
202}
203
204static int handle_create(const char *nodename, umode_t mode, kuid_t uid,
205 kgid_t gid, struct device *dev)
206{
207 struct dentry *dentry;
208 struct path path;
209 int err;
210
211 dentry = kern_path_create(AT_FDCWD, nodename, &path, 0);
212 if (dentry == ERR_PTR(-ENOENT)) {
213 create_path(nodename);
214 dentry = kern_path_create(AT_FDCWD, nodename, &path, 0);
215 }
216 if (IS_ERR(dentry))
217 return PTR_ERR(dentry);
218
219 err = vfs_mknod(&nop_mnt_idmap, d_inode(path.dentry), dentry, mode,
220 dev->devt);
221 if (!err) {
222 struct iattr newattrs;
223
224 newattrs.ia_mode = mode;
225 newattrs.ia_uid = uid;
226 newattrs.ia_gid = gid;
227 newattrs.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID;
228 inode_lock(d_inode(dentry));
229 notify_change(&nop_mnt_idmap, dentry, &newattrs, NULL);
230 inode_unlock(d_inode(dentry));
231
232 /* mark as kernel-created inode */
233 d_inode(dentry)->i_private = &thread;
234 }
235 done_path_create(&path, dentry);
236 return err;
237}
238
239static int dev_rmdir(const char *name)
240{
241 struct path parent;
242 struct dentry *dentry;
243 int err;
244
245 dentry = kern_path_locked(name, &parent);
246 if (IS_ERR(dentry))
247 return PTR_ERR(dentry);
248 if (d_really_is_positive(dentry)) {
249 if (d_inode(dentry)->i_private == &thread)
250 err = vfs_rmdir(&nop_mnt_idmap, d_inode(parent.dentry),
251 dentry);
252 else
253 err = -EPERM;
254 } else {
255 err = -ENOENT;
256 }
257 dput(dentry);
258 inode_unlock(d_inode(parent.dentry));
259 path_put(&parent);
260 return err;
261}
262
263static int delete_path(const char *nodepath)
264{
265 char *path;
266 int err = 0;
267
268 path = kstrdup(nodepath, GFP_KERNEL);
269 if (!path)
270 return -ENOMEM;
271
272 for (;;) {
273 char *base;
274
275 base = strrchr(path, '/');
276 if (!base)
277 break;
278 base[0] = '\0';
279 err = dev_rmdir(path);
280 if (err)
281 break;
282 }
283
284 kfree(path);
285 return err;
286}
287
288static int dev_mynode(struct device *dev, struct inode *inode, struct kstat *stat)
289{
290 /* did we create it */
291 if (inode->i_private != &thread)
292 return 0;
293
294 /* does the dev_t match */
295 if (is_blockdev(dev)) {
296 if (!S_ISBLK(stat->mode))
297 return 0;
298 } else {
299 if (!S_ISCHR(stat->mode))
300 return 0;
301 }
302 if (stat->rdev != dev->devt)
303 return 0;
304
305 /* ours */
306 return 1;
307}
308
309static int handle_remove(const char *nodename, struct device *dev)
310{
311 struct path parent;
312 struct dentry *dentry;
313 int deleted = 0;
314 int err;
315
316 dentry = kern_path_locked(nodename, &parent);
317 if (IS_ERR(dentry))
318 return PTR_ERR(dentry);
319
320 if (d_really_is_positive(dentry)) {
321 struct kstat stat;
322 struct path p = {.mnt = parent.mnt, .dentry = dentry};
323 err = vfs_getattr(&p, &stat, STATX_TYPE | STATX_MODE,
324 AT_STATX_SYNC_AS_STAT);
325 if (!err && dev_mynode(dev, d_inode(dentry), &stat)) {
326 struct iattr newattrs;
327 /*
328 * before unlinking this node, reset permissions
329 * of possible references like hardlinks
330 */
331 newattrs.ia_uid = GLOBAL_ROOT_UID;
332 newattrs.ia_gid = GLOBAL_ROOT_GID;
333 newattrs.ia_mode = stat.mode & ~0777;
334 newattrs.ia_valid =
335 ATTR_UID|ATTR_GID|ATTR_MODE;
336 inode_lock(d_inode(dentry));
337 notify_change(&nop_mnt_idmap, dentry, &newattrs, NULL);
338 inode_unlock(d_inode(dentry));
339 err = vfs_unlink(&nop_mnt_idmap, d_inode(parent.dentry),
340 dentry, NULL);
341 if (!err || err == -ENOENT)
342 deleted = 1;
343 }
344 } else {
345 err = -ENOENT;
346 }
347 dput(dentry);
348 inode_unlock(d_inode(parent.dentry));
349
350 path_put(&parent);
351 if (deleted && strchr(nodename, '/'))
352 delete_path(nodename);
353 return err;
354}
355
356/*
357 * If configured, or requested by the commandline, devtmpfs will be
358 * auto-mounted after the kernel mounted the root filesystem.
359 */
360int __init devtmpfs_mount(void)
361{
362 int err;
363
364 if (!mount_dev)
365 return 0;
366
367 if (!thread)
368 return 0;
369
370 err = init_mount("devtmpfs", "dev", "devtmpfs", DEVTMPFS_MFLAGS, NULL);
371 if (err)
372 pr_info("error mounting %d\n", err);
373 else
374 pr_info("mounted\n");
375 return err;
376}
377
378static __initdata DECLARE_COMPLETION(setup_done);
379
380static int handle(const char *name, umode_t mode, kuid_t uid, kgid_t gid,
381 struct device *dev)
382{
383 if (mode)
384 return handle_create(name, mode, uid, gid, dev);
385 else
386 return handle_remove(name, dev);
387}
388
389static void __noreturn devtmpfs_work_loop(void)
390{
391 while (1) {
392 spin_lock(&req_lock);
393 while (requests) {
394 struct req *req = requests;
395 requests = NULL;
396 spin_unlock(&req_lock);
397 while (req) {
398 struct req *next = req->next;
399 req->err = handle(req->name, req->mode,
400 req->uid, req->gid, req->dev);
401 complete(&req->done);
402 req = next;
403 }
404 spin_lock(&req_lock);
405 }
406 __set_current_state(TASK_INTERRUPTIBLE);
407 spin_unlock(&req_lock);
408 schedule();
409 }
410}
411
412static noinline int __init devtmpfs_setup(void *p)
413{
414 int err;
415
416 err = ksys_unshare(CLONE_NEWNS);
417 if (err)
418 goto out;
419 err = init_mount("devtmpfs", "/", "devtmpfs", DEVTMPFS_MFLAGS, NULL);
420 if (err)
421 goto out;
422 init_chdir("/.."); /* will traverse into overmounted root */
423 init_chroot(".");
424out:
425 *(int *)p = err;
426 return err;
427}
428
429/*
430 * The __ref is because devtmpfs_setup needs to be __init for the routines it
431 * calls. That call is done while devtmpfs_init, which is marked __init,
432 * synchronously waits for it to complete.
433 */
434static int __ref devtmpfsd(void *p)
435{
436 int err = devtmpfs_setup(p);
437
438 complete(&setup_done);
439 if (err)
440 return err;
441 devtmpfs_work_loop();
442 return 0;
443}
444
445/*
446 * Create devtmpfs instance, driver-core devices will add their device
447 * nodes here.
448 */
449int __init devtmpfs_init(void)
450{
451 char opts[] = "mode=0755";
452 int err;
453
454 mnt = vfs_kern_mount(&internal_fs_type, 0, "devtmpfs", opts);
455 if (IS_ERR(mnt)) {
456 pr_err("unable to create devtmpfs %ld\n", PTR_ERR(mnt));
457 return PTR_ERR(mnt);
458 }
459 err = register_filesystem(&dev_fs_type);
460 if (err) {
461 pr_err("unable to register devtmpfs type %d\n", err);
462 return err;
463 }
464
465 thread = kthread_run(devtmpfsd, &err, "kdevtmpfs");
466 if (!IS_ERR(thread)) {
467 wait_for_completion(&setup_done);
468 } else {
469 err = PTR_ERR(thread);
470 thread = NULL;
471 }
472
473 if (err) {
474 pr_err("unable to create devtmpfs %d\n", err);
475 unregister_filesystem(&dev_fs_type);
476 thread = NULL;
477 return err;
478 }
479
480 pr_info("initialized\n");
481 return 0;
482}
1/*
2 * devtmpfs - kernel-maintained tmpfs-based /dev
3 *
4 * Copyright (C) 2009, Kay Sievers <kay.sievers@vrfy.org>
5 *
6 * During bootup, before any driver core device is registered,
7 * devtmpfs, a tmpfs-based filesystem is created. Every driver-core
8 * device which requests a device node, will add a node in this
9 * filesystem.
10 * By default, all devices are named after the name of the device,
11 * owned by root and have a default mode of 0600. Subsystems can
12 * overwrite the default setting if needed.
13 */
14
15#include <linux/kernel.h>
16#include <linux/syscalls.h>
17#include <linux/mount.h>
18#include <linux/device.h>
19#include <linux/genhd.h>
20#include <linux/namei.h>
21#include <linux/fs.h>
22#include <linux/shmem_fs.h>
23#include <linux/ramfs.h>
24#include <linux/sched.h>
25#include <linux/slab.h>
26#include <linux/kthread.h>
27
28static struct task_struct *thread;
29
30#if defined CONFIG_DEVTMPFS_MOUNT
31static int mount_dev = 1;
32#else
33static int mount_dev;
34#endif
35
36static DEFINE_SPINLOCK(req_lock);
37
38static struct req {
39 struct req *next;
40 struct completion done;
41 int err;
42 const char *name;
43 umode_t mode; /* 0 => delete */
44 struct device *dev;
45} *requests;
46
47static int __init mount_param(char *str)
48{
49 mount_dev = simple_strtoul(str, NULL, 0);
50 return 1;
51}
52__setup("devtmpfs.mount=", mount_param);
53
54static struct dentry *dev_mount(struct file_system_type *fs_type, int flags,
55 const char *dev_name, void *data)
56{
57#ifdef CONFIG_TMPFS
58 return mount_single(fs_type, flags, data, shmem_fill_super);
59#else
60 return mount_single(fs_type, flags, data, ramfs_fill_super);
61#endif
62}
63
64static struct file_system_type dev_fs_type = {
65 .name = "devtmpfs",
66 .mount = dev_mount,
67 .kill_sb = kill_litter_super,
68};
69
70#ifdef CONFIG_BLOCK
71static inline int is_blockdev(struct device *dev)
72{
73 return dev->class == &block_class;
74}
75#else
76static inline int is_blockdev(struct device *dev) { return 0; }
77#endif
78
79int devtmpfs_create_node(struct device *dev)
80{
81 const char *tmp = NULL;
82 struct req req;
83
84 if (!thread)
85 return 0;
86
87 req.mode = 0;
88 req.name = device_get_devnode(dev, &req.mode, &tmp);
89 if (!req.name)
90 return -ENOMEM;
91
92 if (req.mode == 0)
93 req.mode = 0600;
94 if (is_blockdev(dev))
95 req.mode |= S_IFBLK;
96 else
97 req.mode |= S_IFCHR;
98
99 req.dev = dev;
100
101 init_completion(&req.done);
102
103 spin_lock(&req_lock);
104 req.next = requests;
105 requests = &req;
106 spin_unlock(&req_lock);
107
108 wake_up_process(thread);
109 wait_for_completion(&req.done);
110
111 kfree(tmp);
112
113 return req.err;
114}
115
116int devtmpfs_delete_node(struct device *dev)
117{
118 const char *tmp = NULL;
119 struct req req;
120
121 if (!thread)
122 return 0;
123
124 req.name = device_get_devnode(dev, NULL, &tmp);
125 if (!req.name)
126 return -ENOMEM;
127
128 req.mode = 0;
129 req.dev = dev;
130
131 init_completion(&req.done);
132
133 spin_lock(&req_lock);
134 req.next = requests;
135 requests = &req;
136 spin_unlock(&req_lock);
137
138 wake_up_process(thread);
139 wait_for_completion(&req.done);
140
141 kfree(tmp);
142 return req.err;
143}
144
145static int dev_mkdir(const char *name, umode_t mode)
146{
147 struct dentry *dentry;
148 struct path path;
149 int err;
150
151 dentry = kern_path_create(AT_FDCWD, name, &path, 1);
152 if (IS_ERR(dentry))
153 return PTR_ERR(dentry);
154
155 err = vfs_mkdir(path.dentry->d_inode, dentry, mode);
156 if (!err)
157 /* mark as kernel-created inode */
158 dentry->d_inode->i_private = &thread;
159 dput(dentry);
160 mutex_unlock(&path.dentry->d_inode->i_mutex);
161 path_put(&path);
162 return err;
163}
164
165static int create_path(const char *nodepath)
166{
167 char *path;
168 char *s;
169 int err = 0;
170
171 /* parent directories do not exist, create them */
172 path = kstrdup(nodepath, GFP_KERNEL);
173 if (!path)
174 return -ENOMEM;
175
176 s = path;
177 for (;;) {
178 s = strchr(s, '/');
179 if (!s)
180 break;
181 s[0] = '\0';
182 err = dev_mkdir(path, 0755);
183 if (err && err != -EEXIST)
184 break;
185 s[0] = '/';
186 s++;
187 }
188 kfree(path);
189 return err;
190}
191
192static int handle_create(const char *nodename, umode_t mode, struct device *dev)
193{
194 struct dentry *dentry;
195 struct path path;
196 int err;
197
198 dentry = kern_path_create(AT_FDCWD, nodename, &path, 0);
199 if (dentry == ERR_PTR(-ENOENT)) {
200 create_path(nodename);
201 dentry = kern_path_create(AT_FDCWD, nodename, &path, 0);
202 }
203 if (IS_ERR(dentry))
204 return PTR_ERR(dentry);
205
206 err = vfs_mknod(path.dentry->d_inode,
207 dentry, mode, dev->devt);
208 if (!err) {
209 struct iattr newattrs;
210
211 /* fixup possibly umasked mode */
212 newattrs.ia_mode = mode;
213 newattrs.ia_valid = ATTR_MODE;
214 mutex_lock(&dentry->d_inode->i_mutex);
215 notify_change(dentry, &newattrs);
216 mutex_unlock(&dentry->d_inode->i_mutex);
217
218 /* mark as kernel-created inode */
219 dentry->d_inode->i_private = &thread;
220 }
221 dput(dentry);
222
223 mutex_unlock(&path.dentry->d_inode->i_mutex);
224 path_put(&path);
225 return err;
226}
227
228static int dev_rmdir(const char *name)
229{
230 struct nameidata nd;
231 struct dentry *dentry;
232 int err;
233
234 err = kern_path_parent(name, &nd);
235 if (err)
236 return err;
237
238 mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
239 dentry = lookup_one_len(nd.last.name, nd.path.dentry, nd.last.len);
240 if (!IS_ERR(dentry)) {
241 if (dentry->d_inode) {
242 if (dentry->d_inode->i_private == &thread)
243 err = vfs_rmdir(nd.path.dentry->d_inode,
244 dentry);
245 else
246 err = -EPERM;
247 } else {
248 err = -ENOENT;
249 }
250 dput(dentry);
251 } else {
252 err = PTR_ERR(dentry);
253 }
254
255 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
256 path_put(&nd.path);
257 return err;
258}
259
260static int delete_path(const char *nodepath)
261{
262 const char *path;
263 int err = 0;
264
265 path = kstrdup(nodepath, GFP_KERNEL);
266 if (!path)
267 return -ENOMEM;
268
269 for (;;) {
270 char *base;
271
272 base = strrchr(path, '/');
273 if (!base)
274 break;
275 base[0] = '\0';
276 err = dev_rmdir(path);
277 if (err)
278 break;
279 }
280
281 kfree(path);
282 return err;
283}
284
285static int dev_mynode(struct device *dev, struct inode *inode, struct kstat *stat)
286{
287 /* did we create it */
288 if (inode->i_private != &thread)
289 return 0;
290
291 /* does the dev_t match */
292 if (is_blockdev(dev)) {
293 if (!S_ISBLK(stat->mode))
294 return 0;
295 } else {
296 if (!S_ISCHR(stat->mode))
297 return 0;
298 }
299 if (stat->rdev != dev->devt)
300 return 0;
301
302 /* ours */
303 return 1;
304}
305
306static int handle_remove(const char *nodename, struct device *dev)
307{
308 struct nameidata nd;
309 struct dentry *dentry;
310 struct kstat stat;
311 int deleted = 1;
312 int err;
313
314 err = kern_path_parent(nodename, &nd);
315 if (err)
316 return err;
317
318 mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
319 dentry = lookup_one_len(nd.last.name, nd.path.dentry, nd.last.len);
320 if (!IS_ERR(dentry)) {
321 if (dentry->d_inode) {
322 err = vfs_getattr(nd.path.mnt, dentry, &stat);
323 if (!err && dev_mynode(dev, dentry->d_inode, &stat)) {
324 struct iattr newattrs;
325 /*
326 * before unlinking this node, reset permissions
327 * of possible references like hardlinks
328 */
329 newattrs.ia_uid = 0;
330 newattrs.ia_gid = 0;
331 newattrs.ia_mode = stat.mode & ~0777;
332 newattrs.ia_valid =
333 ATTR_UID|ATTR_GID|ATTR_MODE;
334 mutex_lock(&dentry->d_inode->i_mutex);
335 notify_change(dentry, &newattrs);
336 mutex_unlock(&dentry->d_inode->i_mutex);
337 err = vfs_unlink(nd.path.dentry->d_inode,
338 dentry);
339 if (!err || err == -ENOENT)
340 deleted = 1;
341 }
342 } else {
343 err = -ENOENT;
344 }
345 dput(dentry);
346 } else {
347 err = PTR_ERR(dentry);
348 }
349 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
350
351 path_put(&nd.path);
352 if (deleted && strchr(nodename, '/'))
353 delete_path(nodename);
354 return err;
355}
356
357/*
358 * If configured, or requested by the commandline, devtmpfs will be
359 * auto-mounted after the kernel mounted the root filesystem.
360 */
361int devtmpfs_mount(const char *mntdir)
362{
363 int err;
364
365 if (!mount_dev)
366 return 0;
367
368 if (!thread)
369 return 0;
370
371 err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
372 if (err)
373 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
374 else
375 printk(KERN_INFO "devtmpfs: mounted\n");
376 return err;
377}
378
379static DECLARE_COMPLETION(setup_done);
380
381static int handle(const char *name, umode_t mode, struct device *dev)
382{
383 if (mode)
384 return handle_create(name, mode, dev);
385 else
386 return handle_remove(name, dev);
387}
388
389static int devtmpfsd(void *p)
390{
391 char options[] = "mode=0755";
392 int *err = p;
393 *err = sys_unshare(CLONE_NEWNS);
394 if (*err)
395 goto out;
396 *err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
397 if (*err)
398 goto out;
399 sys_chdir("/.."); /* will traverse into overmounted root */
400 sys_chroot(".");
401 complete(&setup_done);
402 while (1) {
403 spin_lock(&req_lock);
404 while (requests) {
405 struct req *req = requests;
406 requests = NULL;
407 spin_unlock(&req_lock);
408 while (req) {
409 struct req *next = req->next;
410 req->err = handle(req->name, req->mode, req->dev);
411 complete(&req->done);
412 req = next;
413 }
414 spin_lock(&req_lock);
415 }
416 __set_current_state(TASK_INTERRUPTIBLE);
417 spin_unlock(&req_lock);
418 schedule();
419 }
420 return 0;
421out:
422 complete(&setup_done);
423 return *err;
424}
425
426/*
427 * Create devtmpfs instance, driver-core devices will add their device
428 * nodes here.
429 */
430int __init devtmpfs_init(void)
431{
432 int err = register_filesystem(&dev_fs_type);
433 if (err) {
434 printk(KERN_ERR "devtmpfs: unable to register devtmpfs "
435 "type %i\n", err);
436 return err;
437 }
438
439 thread = kthread_run(devtmpfsd, &err, "kdevtmpfs");
440 if (!IS_ERR(thread)) {
441 wait_for_completion(&setup_done);
442 } else {
443 err = PTR_ERR(thread);
444 thread = NULL;
445 }
446
447 if (err) {
448 printk(KERN_ERR "devtmpfs: unable to create devtmpfs %i\n", err);
449 unregister_filesystem(&dev_fs_type);
450 return err;
451 }
452
453 printk(KERN_INFO "devtmpfs: initialized\n");
454 return 0;
455}