Loading...
1/*
2 * fs/kernfs/dir.c - kernfs directory implementation
3 *
4 * Copyright (c) 2001-3 Patrick Mochel
5 * Copyright (c) 2007 SUSE Linux Products GmbH
6 * Copyright (c) 2007, 2013 Tejun Heo <tj@kernel.org>
7 *
8 * This file is released under the GPLv2.
9 */
10
11#include <linux/sched.h>
12#include <linux/fs.h>
13#include <linux/namei.h>
14#include <linux/idr.h>
15#include <linux/slab.h>
16#include <linux/security.h>
17#include <linux/hash.h>
18
19#include "kernfs-internal.h"
20
21DEFINE_MUTEX(kernfs_mutex);
22static DEFINE_SPINLOCK(kernfs_rename_lock); /* kn->parent and ->name */
23static char kernfs_pr_cont_buf[PATH_MAX]; /* protected by rename_lock */
24static DEFINE_SPINLOCK(kernfs_idr_lock); /* root->ino_idr */
25
26#define rb_to_kn(X) rb_entry((X), struct kernfs_node, rb)
27
28static bool kernfs_active(struct kernfs_node *kn)
29{
30 lockdep_assert_held(&kernfs_mutex);
31 return atomic_read(&kn->active) >= 0;
32}
33
34static bool kernfs_lockdep(struct kernfs_node *kn)
35{
36#ifdef CONFIG_DEBUG_LOCK_ALLOC
37 return kn->flags & KERNFS_LOCKDEP;
38#else
39 return false;
40#endif
41}
42
43static int kernfs_name_locked(struct kernfs_node *kn, char *buf, size_t buflen)
44{
45 if (!kn)
46 return strlcpy(buf, "(null)", buflen);
47
48 return strlcpy(buf, kn->parent ? kn->name : "/", buflen);
49}
50
51/* kernfs_node_depth - compute depth from @from to @to */
52static size_t kernfs_depth(struct kernfs_node *from, struct kernfs_node *to)
53{
54 size_t depth = 0;
55
56 while (to->parent && to != from) {
57 depth++;
58 to = to->parent;
59 }
60 return depth;
61}
62
63static struct kernfs_node *kernfs_common_ancestor(struct kernfs_node *a,
64 struct kernfs_node *b)
65{
66 size_t da, db;
67 struct kernfs_root *ra = kernfs_root(a), *rb = kernfs_root(b);
68
69 if (ra != rb)
70 return NULL;
71
72 da = kernfs_depth(ra->kn, a);
73 db = kernfs_depth(rb->kn, b);
74
75 while (da > db) {
76 a = a->parent;
77 da--;
78 }
79 while (db > da) {
80 b = b->parent;
81 db--;
82 }
83
84 /* worst case b and a will be the same at root */
85 while (b != a) {
86 b = b->parent;
87 a = a->parent;
88 }
89
90 return a;
91}
92
93/**
94 * kernfs_path_from_node_locked - find a pseudo-absolute path to @kn_to,
95 * where kn_from is treated as root of the path.
96 * @kn_from: kernfs node which should be treated as root for the path
97 * @kn_to: kernfs node to which path is needed
98 * @buf: buffer to copy the path into
99 * @buflen: size of @buf
100 *
101 * We need to handle couple of scenarios here:
102 * [1] when @kn_from is an ancestor of @kn_to at some level
103 * kn_from: /n1/n2/n3
104 * kn_to: /n1/n2/n3/n4/n5
105 * result: /n4/n5
106 *
107 * [2] when @kn_from is on a different hierarchy and we need to find common
108 * ancestor between @kn_from and @kn_to.
109 * kn_from: /n1/n2/n3/n4
110 * kn_to: /n1/n2/n5
111 * result: /../../n5
112 * OR
113 * kn_from: /n1/n2/n3/n4/n5 [depth=5]
114 * kn_to: /n1/n2/n3 [depth=3]
115 * result: /../..
116 *
117 * [3] when @kn_to is NULL result will be "(null)"
118 *
119 * Returns the length of the full path. If the full length is equal to or
120 * greater than @buflen, @buf contains the truncated path with the trailing
121 * '\0'. On error, -errno is returned.
122 */
123static int kernfs_path_from_node_locked(struct kernfs_node *kn_to,
124 struct kernfs_node *kn_from,
125 char *buf, size_t buflen)
126{
127 struct kernfs_node *kn, *common;
128 const char parent_str[] = "/..";
129 size_t depth_from, depth_to, len = 0;
130 int i, j;
131
132 if (!kn_to)
133 return strlcpy(buf, "(null)", buflen);
134
135 if (!kn_from)
136 kn_from = kernfs_root(kn_to)->kn;
137
138 if (kn_from == kn_to)
139 return strlcpy(buf, "/", buflen);
140
141 common = kernfs_common_ancestor(kn_from, kn_to);
142 if (WARN_ON(!common))
143 return -EINVAL;
144
145 depth_to = kernfs_depth(common, kn_to);
146 depth_from = kernfs_depth(common, kn_from);
147
148 if (buf)
149 buf[0] = '\0';
150
151 for (i = 0; i < depth_from; i++)
152 len += strlcpy(buf + len, parent_str,
153 len < buflen ? buflen - len : 0);
154
155 /* Calculate how many bytes we need for the rest */
156 for (i = depth_to - 1; i >= 0; i--) {
157 for (kn = kn_to, j = 0; j < i; j++)
158 kn = kn->parent;
159 len += strlcpy(buf + len, "/",
160 len < buflen ? buflen - len : 0);
161 len += strlcpy(buf + len, kn->name,
162 len < buflen ? buflen - len : 0);
163 }
164
165 return len;
166}
167
168/**
169 * kernfs_name - obtain the name of a given node
170 * @kn: kernfs_node of interest
171 * @buf: buffer to copy @kn's name into
172 * @buflen: size of @buf
173 *
174 * Copies the name of @kn into @buf of @buflen bytes. The behavior is
175 * similar to strlcpy(). It returns the length of @kn's name and if @buf
176 * isn't long enough, it's filled upto @buflen-1 and nul terminated.
177 *
178 * Fills buffer with "(null)" if @kn is NULL.
179 *
180 * This function can be called from any context.
181 */
182int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen)
183{
184 unsigned long flags;
185 int ret;
186
187 spin_lock_irqsave(&kernfs_rename_lock, flags);
188 ret = kernfs_name_locked(kn, buf, buflen);
189 spin_unlock_irqrestore(&kernfs_rename_lock, flags);
190 return ret;
191}
192
193/**
194 * kernfs_path_from_node - build path of node @to relative to @from.
195 * @from: parent kernfs_node relative to which we need to build the path
196 * @to: kernfs_node of interest
197 * @buf: buffer to copy @to's path into
198 * @buflen: size of @buf
199 *
200 * Builds @to's path relative to @from in @buf. @from and @to must
201 * be on the same kernfs-root. If @from is not parent of @to, then a relative
202 * path (which includes '..'s) as needed to reach from @from to @to is
203 * returned.
204 *
205 * Returns the length of the full path. If the full length is equal to or
206 * greater than @buflen, @buf contains the truncated path with the trailing
207 * '\0'. On error, -errno is returned.
208 */
209int kernfs_path_from_node(struct kernfs_node *to, struct kernfs_node *from,
210 char *buf, size_t buflen)
211{
212 unsigned long flags;
213 int ret;
214
215 spin_lock_irqsave(&kernfs_rename_lock, flags);
216 ret = kernfs_path_from_node_locked(to, from, buf, buflen);
217 spin_unlock_irqrestore(&kernfs_rename_lock, flags);
218 return ret;
219}
220EXPORT_SYMBOL_GPL(kernfs_path_from_node);
221
222/**
223 * pr_cont_kernfs_name - pr_cont name of a kernfs_node
224 * @kn: kernfs_node of interest
225 *
226 * This function can be called from any context.
227 */
228void pr_cont_kernfs_name(struct kernfs_node *kn)
229{
230 unsigned long flags;
231
232 spin_lock_irqsave(&kernfs_rename_lock, flags);
233
234 kernfs_name_locked(kn, kernfs_pr_cont_buf, sizeof(kernfs_pr_cont_buf));
235 pr_cont("%s", kernfs_pr_cont_buf);
236
237 spin_unlock_irqrestore(&kernfs_rename_lock, flags);
238}
239
240/**
241 * pr_cont_kernfs_path - pr_cont path of a kernfs_node
242 * @kn: kernfs_node of interest
243 *
244 * This function can be called from any context.
245 */
246void pr_cont_kernfs_path(struct kernfs_node *kn)
247{
248 unsigned long flags;
249 int sz;
250
251 spin_lock_irqsave(&kernfs_rename_lock, flags);
252
253 sz = kernfs_path_from_node_locked(kn, NULL, kernfs_pr_cont_buf,
254 sizeof(kernfs_pr_cont_buf));
255 if (sz < 0) {
256 pr_cont("(error)");
257 goto out;
258 }
259
260 if (sz >= sizeof(kernfs_pr_cont_buf)) {
261 pr_cont("(name too long)");
262 goto out;
263 }
264
265 pr_cont("%s", kernfs_pr_cont_buf);
266
267out:
268 spin_unlock_irqrestore(&kernfs_rename_lock, flags);
269}
270
271/**
272 * kernfs_get_parent - determine the parent node and pin it
273 * @kn: kernfs_node of interest
274 *
275 * Determines @kn's parent, pins and returns it. This function can be
276 * called from any context.
277 */
278struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn)
279{
280 struct kernfs_node *parent;
281 unsigned long flags;
282
283 spin_lock_irqsave(&kernfs_rename_lock, flags);
284 parent = kn->parent;
285 kernfs_get(parent);
286 spin_unlock_irqrestore(&kernfs_rename_lock, flags);
287
288 return parent;
289}
290
291/**
292 * kernfs_name_hash
293 * @name: Null terminated string to hash
294 * @ns: Namespace tag to hash
295 *
296 * Returns 31 bit hash of ns + name (so it fits in an off_t )
297 */
298static unsigned int kernfs_name_hash(const char *name, const void *ns)
299{
300 unsigned long hash = init_name_hash(ns);
301 unsigned int len = strlen(name);
302 while (len--)
303 hash = partial_name_hash(*name++, hash);
304 hash = end_name_hash(hash);
305 hash &= 0x7fffffffU;
306 /* Reserve hash numbers 0, 1 and INT_MAX for magic directory entries */
307 if (hash < 2)
308 hash += 2;
309 if (hash >= INT_MAX)
310 hash = INT_MAX - 1;
311 return hash;
312}
313
314static int kernfs_name_compare(unsigned int hash, const char *name,
315 const void *ns, const struct kernfs_node *kn)
316{
317 if (hash < kn->hash)
318 return -1;
319 if (hash > kn->hash)
320 return 1;
321 if (ns < kn->ns)
322 return -1;
323 if (ns > kn->ns)
324 return 1;
325 return strcmp(name, kn->name);
326}
327
328static int kernfs_sd_compare(const struct kernfs_node *left,
329 const struct kernfs_node *right)
330{
331 return kernfs_name_compare(left->hash, left->name, left->ns, right);
332}
333
334/**
335 * kernfs_link_sibling - link kernfs_node into sibling rbtree
336 * @kn: kernfs_node of interest
337 *
338 * Link @kn into its sibling rbtree which starts from
339 * @kn->parent->dir.children.
340 *
341 * Locking:
342 * mutex_lock(kernfs_mutex)
343 *
344 * RETURNS:
345 * 0 on susccess -EEXIST on failure.
346 */
347static int kernfs_link_sibling(struct kernfs_node *kn)
348{
349 struct rb_node **node = &kn->parent->dir.children.rb_node;
350 struct rb_node *parent = NULL;
351
352 while (*node) {
353 struct kernfs_node *pos;
354 int result;
355
356 pos = rb_to_kn(*node);
357 parent = *node;
358 result = kernfs_sd_compare(kn, pos);
359 if (result < 0)
360 node = &pos->rb.rb_left;
361 else if (result > 0)
362 node = &pos->rb.rb_right;
363 else
364 return -EEXIST;
365 }
366
367 /* add new node and rebalance the tree */
368 rb_link_node(&kn->rb, parent, node);
369 rb_insert_color(&kn->rb, &kn->parent->dir.children);
370
371 /* successfully added, account subdir number */
372 if (kernfs_type(kn) == KERNFS_DIR)
373 kn->parent->dir.subdirs++;
374
375 return 0;
376}
377
378/**
379 * kernfs_unlink_sibling - unlink kernfs_node from sibling rbtree
380 * @kn: kernfs_node of interest
381 *
382 * Try to unlink @kn from its sibling rbtree which starts from
383 * kn->parent->dir.children. Returns %true if @kn was actually
384 * removed, %false if @kn wasn't on the rbtree.
385 *
386 * Locking:
387 * mutex_lock(kernfs_mutex)
388 */
389static bool kernfs_unlink_sibling(struct kernfs_node *kn)
390{
391 if (RB_EMPTY_NODE(&kn->rb))
392 return false;
393
394 if (kernfs_type(kn) == KERNFS_DIR)
395 kn->parent->dir.subdirs--;
396
397 rb_erase(&kn->rb, &kn->parent->dir.children);
398 RB_CLEAR_NODE(&kn->rb);
399 return true;
400}
401
402/**
403 * kernfs_get_active - get an active reference to kernfs_node
404 * @kn: kernfs_node to get an active reference to
405 *
406 * Get an active reference of @kn. This function is noop if @kn
407 * is NULL.
408 *
409 * RETURNS:
410 * Pointer to @kn on success, NULL on failure.
411 */
412struct kernfs_node *kernfs_get_active(struct kernfs_node *kn)
413{
414 if (unlikely(!kn))
415 return NULL;
416
417 if (!atomic_inc_unless_negative(&kn->active))
418 return NULL;
419
420 if (kernfs_lockdep(kn))
421 rwsem_acquire_read(&kn->dep_map, 0, 1, _RET_IP_);
422 return kn;
423}
424
425/**
426 * kernfs_put_active - put an active reference to kernfs_node
427 * @kn: kernfs_node to put an active reference to
428 *
429 * Put an active reference to @kn. This function is noop if @kn
430 * is NULL.
431 */
432void kernfs_put_active(struct kernfs_node *kn)
433{
434 struct kernfs_root *root = kernfs_root(kn);
435 int v;
436
437 if (unlikely(!kn))
438 return;
439
440 if (kernfs_lockdep(kn))
441 rwsem_release(&kn->dep_map, 1, _RET_IP_);
442 v = atomic_dec_return(&kn->active);
443 if (likely(v != KN_DEACTIVATED_BIAS))
444 return;
445
446 wake_up_all(&root->deactivate_waitq);
447}
448
449/**
450 * kernfs_drain - drain kernfs_node
451 * @kn: kernfs_node to drain
452 *
453 * Drain existing usages and nuke all existing mmaps of @kn. Mutiple
454 * removers may invoke this function concurrently on @kn and all will
455 * return after draining is complete.
456 */
457static void kernfs_drain(struct kernfs_node *kn)
458 __releases(&kernfs_mutex) __acquires(&kernfs_mutex)
459{
460 struct kernfs_root *root = kernfs_root(kn);
461
462 lockdep_assert_held(&kernfs_mutex);
463 WARN_ON_ONCE(kernfs_active(kn));
464
465 mutex_unlock(&kernfs_mutex);
466
467 if (kernfs_lockdep(kn)) {
468 rwsem_acquire(&kn->dep_map, 0, 0, _RET_IP_);
469 if (atomic_read(&kn->active) != KN_DEACTIVATED_BIAS)
470 lock_contended(&kn->dep_map, _RET_IP_);
471 }
472
473 /* but everyone should wait for draining */
474 wait_event(root->deactivate_waitq,
475 atomic_read(&kn->active) == KN_DEACTIVATED_BIAS);
476
477 if (kernfs_lockdep(kn)) {
478 lock_acquired(&kn->dep_map, _RET_IP_);
479 rwsem_release(&kn->dep_map, 1, _RET_IP_);
480 }
481
482 kernfs_drain_open_files(kn);
483
484 mutex_lock(&kernfs_mutex);
485}
486
487/**
488 * kernfs_get - get a reference count on a kernfs_node
489 * @kn: the target kernfs_node
490 */
491void kernfs_get(struct kernfs_node *kn)
492{
493 if (kn) {
494 WARN_ON(!atomic_read(&kn->count));
495 atomic_inc(&kn->count);
496 }
497}
498EXPORT_SYMBOL_GPL(kernfs_get);
499
500/**
501 * kernfs_put - put a reference count on a kernfs_node
502 * @kn: the target kernfs_node
503 *
504 * Put a reference count of @kn and destroy it if it reached zero.
505 */
506void kernfs_put(struct kernfs_node *kn)
507{
508 struct kernfs_node *parent;
509 struct kernfs_root *root;
510
511 /*
512 * kernfs_node is freed with ->count 0, kernfs_find_and_get_node_by_ino
513 * depends on this to filter reused stale node
514 */
515 if (!kn || !atomic_dec_and_test(&kn->count))
516 return;
517 root = kernfs_root(kn);
518 repeat:
519 /*
520 * Moving/renaming is always done while holding reference.
521 * kn->parent won't change beneath us.
522 */
523 parent = kn->parent;
524
525 WARN_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS,
526 "kernfs_put: %s/%s: released with incorrect active_ref %d\n",
527 parent ? parent->name : "", kn->name, atomic_read(&kn->active));
528
529 if (kernfs_type(kn) == KERNFS_LINK)
530 kernfs_put(kn->symlink.target_kn);
531
532 kfree_const(kn->name);
533
534 if (kn->iattr) {
535 if (kn->iattr->ia_secdata)
536 security_release_secctx(kn->iattr->ia_secdata,
537 kn->iattr->ia_secdata_len);
538 simple_xattrs_free(&kn->iattr->xattrs);
539 }
540 kfree(kn->iattr);
541 spin_lock(&kernfs_idr_lock);
542 idr_remove(&root->ino_idr, kn->id.ino);
543 spin_unlock(&kernfs_idr_lock);
544 kmem_cache_free(kernfs_node_cache, kn);
545
546 kn = parent;
547 if (kn) {
548 if (atomic_dec_and_test(&kn->count))
549 goto repeat;
550 } else {
551 /* just released the root kn, free @root too */
552 idr_destroy(&root->ino_idr);
553 kfree(root);
554 }
555}
556EXPORT_SYMBOL_GPL(kernfs_put);
557
558static int kernfs_dop_revalidate(struct dentry *dentry, unsigned int flags)
559{
560 struct kernfs_node *kn;
561
562 if (flags & LOOKUP_RCU)
563 return -ECHILD;
564
565 /* Always perform fresh lookup for negatives */
566 if (d_really_is_negative(dentry))
567 goto out_bad_unlocked;
568
569 kn = kernfs_dentry_node(dentry);
570 mutex_lock(&kernfs_mutex);
571
572 /* The kernfs node has been deactivated */
573 if (!kernfs_active(kn))
574 goto out_bad;
575
576 /* The kernfs node has been moved? */
577 if (kernfs_dentry_node(dentry->d_parent) != kn->parent)
578 goto out_bad;
579
580 /* The kernfs node has been renamed */
581 if (strcmp(dentry->d_name.name, kn->name) != 0)
582 goto out_bad;
583
584 /* The kernfs node has been moved to a different namespace */
585 if (kn->parent && kernfs_ns_enabled(kn->parent) &&
586 kernfs_info(dentry->d_sb)->ns != kn->ns)
587 goto out_bad;
588
589 mutex_unlock(&kernfs_mutex);
590 return 1;
591out_bad:
592 mutex_unlock(&kernfs_mutex);
593out_bad_unlocked:
594 return 0;
595}
596
597const struct dentry_operations kernfs_dops = {
598 .d_revalidate = kernfs_dop_revalidate,
599};
600
601/**
602 * kernfs_node_from_dentry - determine kernfs_node associated with a dentry
603 * @dentry: the dentry in question
604 *
605 * Return the kernfs_node associated with @dentry. If @dentry is not a
606 * kernfs one, %NULL is returned.
607 *
608 * While the returned kernfs_node will stay accessible as long as @dentry
609 * is accessible, the returned node can be in any state and the caller is
610 * fully responsible for determining what's accessible.
611 */
612struct kernfs_node *kernfs_node_from_dentry(struct dentry *dentry)
613{
614 if (dentry->d_sb->s_op == &kernfs_sops &&
615 !d_really_is_negative(dentry))
616 return kernfs_dentry_node(dentry);
617 return NULL;
618}
619
620static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root,
621 const char *name, umode_t mode,
622 unsigned flags)
623{
624 struct kernfs_node *kn;
625 u32 gen;
626 int cursor;
627 int ret;
628
629 name = kstrdup_const(name, GFP_KERNEL);
630 if (!name)
631 return NULL;
632
633 kn = kmem_cache_zalloc(kernfs_node_cache, GFP_KERNEL);
634 if (!kn)
635 goto err_out1;
636
637 idr_preload(GFP_KERNEL);
638 spin_lock(&kernfs_idr_lock);
639 cursor = idr_get_cursor(&root->ino_idr);
640 ret = idr_alloc_cyclic(&root->ino_idr, kn, 1, 0, GFP_ATOMIC);
641 if (ret >= 0 && ret < cursor)
642 root->next_generation++;
643 gen = root->next_generation;
644 spin_unlock(&kernfs_idr_lock);
645 idr_preload_end();
646 if (ret < 0)
647 goto err_out2;
648 kn->id.ino = ret;
649 kn->id.generation = gen;
650
651 /*
652 * set ino first. This barrier is paired with atomic_inc_not_zero in
653 * kernfs_find_and_get_node_by_ino
654 */
655 smp_mb__before_atomic();
656 atomic_set(&kn->count, 1);
657 atomic_set(&kn->active, KN_DEACTIVATED_BIAS);
658 RB_CLEAR_NODE(&kn->rb);
659
660 kn->name = name;
661 kn->mode = mode;
662 kn->flags = flags;
663
664 return kn;
665
666 err_out2:
667 kmem_cache_free(kernfs_node_cache, kn);
668 err_out1:
669 kfree_const(name);
670 return NULL;
671}
672
673struct kernfs_node *kernfs_new_node(struct kernfs_node *parent,
674 const char *name, umode_t mode,
675 unsigned flags)
676{
677 struct kernfs_node *kn;
678
679 kn = __kernfs_new_node(kernfs_root(parent), name, mode, flags);
680 if (kn) {
681 kernfs_get(parent);
682 kn->parent = parent;
683 }
684 return kn;
685}
686
687/*
688 * kernfs_find_and_get_node_by_ino - get kernfs_node from inode number
689 * @root: the kernfs root
690 * @ino: inode number
691 *
692 * RETURNS:
693 * NULL on failure. Return a kernfs node with reference counter incremented
694 */
695struct kernfs_node *kernfs_find_and_get_node_by_ino(struct kernfs_root *root,
696 unsigned int ino)
697{
698 struct kernfs_node *kn;
699
700 rcu_read_lock();
701 kn = idr_find(&root->ino_idr, ino);
702 if (!kn)
703 goto out;
704
705 /*
706 * Since kernfs_node is freed in RCU, it's possible an old node for ino
707 * is freed, but reused before RCU grace period. But a freed node (see
708 * kernfs_put) or an incompletedly initialized node (see
709 * __kernfs_new_node) should have 'count' 0. We can use this fact to
710 * filter out such node.
711 */
712 if (!atomic_inc_not_zero(&kn->count)) {
713 kn = NULL;
714 goto out;
715 }
716
717 /*
718 * The node could be a new node or a reused node. If it's a new node,
719 * we are ok. If it's reused because of RCU (because of
720 * SLAB_TYPESAFE_BY_RCU), the __kernfs_new_node always sets its 'ino'
721 * before 'count'. So if 'count' is uptodate, 'ino' should be uptodate,
722 * hence we can use 'ino' to filter stale node.
723 */
724 if (kn->id.ino != ino)
725 goto out;
726 rcu_read_unlock();
727
728 return kn;
729out:
730 rcu_read_unlock();
731 kernfs_put(kn);
732 return NULL;
733}
734
735/**
736 * kernfs_add_one - add kernfs_node to parent without warning
737 * @kn: kernfs_node to be added
738 *
739 * The caller must already have initialized @kn->parent. This
740 * function increments nlink of the parent's inode if @kn is a
741 * directory and link into the children list of the parent.
742 *
743 * RETURNS:
744 * 0 on success, -EEXIST if entry with the given name already
745 * exists.
746 */
747int kernfs_add_one(struct kernfs_node *kn)
748{
749 struct kernfs_node *parent = kn->parent;
750 struct kernfs_iattrs *ps_iattr;
751 bool has_ns;
752 int ret;
753
754 mutex_lock(&kernfs_mutex);
755
756 ret = -EINVAL;
757 has_ns = kernfs_ns_enabled(parent);
758 if (WARN(has_ns != (bool)kn->ns, KERN_WARNING "kernfs: ns %s in '%s' for '%s'\n",
759 has_ns ? "required" : "invalid", parent->name, kn->name))
760 goto out_unlock;
761
762 if (kernfs_type(parent) != KERNFS_DIR)
763 goto out_unlock;
764
765 ret = -ENOENT;
766 if (parent->flags & KERNFS_EMPTY_DIR)
767 goto out_unlock;
768
769 if ((parent->flags & KERNFS_ACTIVATED) && !kernfs_active(parent))
770 goto out_unlock;
771
772 kn->hash = kernfs_name_hash(kn->name, kn->ns);
773
774 ret = kernfs_link_sibling(kn);
775 if (ret)
776 goto out_unlock;
777
778 /* Update timestamps on the parent */
779 ps_iattr = parent->iattr;
780 if (ps_iattr) {
781 struct iattr *ps_iattrs = &ps_iattr->ia_iattr;
782 ktime_get_real_ts(&ps_iattrs->ia_ctime);
783 ps_iattrs->ia_mtime = ps_iattrs->ia_ctime;
784 }
785
786 mutex_unlock(&kernfs_mutex);
787
788 /*
789 * Activate the new node unless CREATE_DEACTIVATED is requested.
790 * If not activated here, the kernfs user is responsible for
791 * activating the node with kernfs_activate(). A node which hasn't
792 * been activated is not visible to userland and its removal won't
793 * trigger deactivation.
794 */
795 if (!(kernfs_root(kn)->flags & KERNFS_ROOT_CREATE_DEACTIVATED))
796 kernfs_activate(kn);
797 return 0;
798
799out_unlock:
800 mutex_unlock(&kernfs_mutex);
801 return ret;
802}
803
804/**
805 * kernfs_find_ns - find kernfs_node with the given name
806 * @parent: kernfs_node to search under
807 * @name: name to look for
808 * @ns: the namespace tag to use
809 *
810 * Look for kernfs_node with name @name under @parent. Returns pointer to
811 * the found kernfs_node on success, %NULL on failure.
812 */
813static struct kernfs_node *kernfs_find_ns(struct kernfs_node *parent,
814 const unsigned char *name,
815 const void *ns)
816{
817 struct rb_node *node = parent->dir.children.rb_node;
818 bool has_ns = kernfs_ns_enabled(parent);
819 unsigned int hash;
820
821 lockdep_assert_held(&kernfs_mutex);
822
823 if (has_ns != (bool)ns) {
824 WARN(1, KERN_WARNING "kernfs: ns %s in '%s' for '%s'\n",
825 has_ns ? "required" : "invalid", parent->name, name);
826 return NULL;
827 }
828
829 hash = kernfs_name_hash(name, ns);
830 while (node) {
831 struct kernfs_node *kn;
832 int result;
833
834 kn = rb_to_kn(node);
835 result = kernfs_name_compare(hash, name, ns, kn);
836 if (result < 0)
837 node = node->rb_left;
838 else if (result > 0)
839 node = node->rb_right;
840 else
841 return kn;
842 }
843 return NULL;
844}
845
846static struct kernfs_node *kernfs_walk_ns(struct kernfs_node *parent,
847 const unsigned char *path,
848 const void *ns)
849{
850 size_t len;
851 char *p, *name;
852
853 lockdep_assert_held(&kernfs_mutex);
854
855 /* grab kernfs_rename_lock to piggy back on kernfs_pr_cont_buf */
856 spin_lock_irq(&kernfs_rename_lock);
857
858 len = strlcpy(kernfs_pr_cont_buf, path, sizeof(kernfs_pr_cont_buf));
859
860 if (len >= sizeof(kernfs_pr_cont_buf)) {
861 spin_unlock_irq(&kernfs_rename_lock);
862 return NULL;
863 }
864
865 p = kernfs_pr_cont_buf;
866
867 while ((name = strsep(&p, "/")) && parent) {
868 if (*name == '\0')
869 continue;
870 parent = kernfs_find_ns(parent, name, ns);
871 }
872
873 spin_unlock_irq(&kernfs_rename_lock);
874
875 return parent;
876}
877
878/**
879 * kernfs_find_and_get_ns - find and get kernfs_node with the given name
880 * @parent: kernfs_node to search under
881 * @name: name to look for
882 * @ns: the namespace tag to use
883 *
884 * Look for kernfs_node with name @name under @parent and get a reference
885 * if found. This function may sleep and returns pointer to the found
886 * kernfs_node on success, %NULL on failure.
887 */
888struct kernfs_node *kernfs_find_and_get_ns(struct kernfs_node *parent,
889 const char *name, const void *ns)
890{
891 struct kernfs_node *kn;
892
893 mutex_lock(&kernfs_mutex);
894 kn = kernfs_find_ns(parent, name, ns);
895 kernfs_get(kn);
896 mutex_unlock(&kernfs_mutex);
897
898 return kn;
899}
900EXPORT_SYMBOL_GPL(kernfs_find_and_get_ns);
901
902/**
903 * kernfs_walk_and_get_ns - find and get kernfs_node with the given path
904 * @parent: kernfs_node to search under
905 * @path: path to look for
906 * @ns: the namespace tag to use
907 *
908 * Look for kernfs_node with path @path under @parent and get a reference
909 * if found. This function may sleep and returns pointer to the found
910 * kernfs_node on success, %NULL on failure.
911 */
912struct kernfs_node *kernfs_walk_and_get_ns(struct kernfs_node *parent,
913 const char *path, const void *ns)
914{
915 struct kernfs_node *kn;
916
917 mutex_lock(&kernfs_mutex);
918 kn = kernfs_walk_ns(parent, path, ns);
919 kernfs_get(kn);
920 mutex_unlock(&kernfs_mutex);
921
922 return kn;
923}
924
925/**
926 * kernfs_create_root - create a new kernfs hierarchy
927 * @scops: optional syscall operations for the hierarchy
928 * @flags: KERNFS_ROOT_* flags
929 * @priv: opaque data associated with the new directory
930 *
931 * Returns the root of the new hierarchy on success, ERR_PTR() value on
932 * failure.
933 */
934struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops,
935 unsigned int flags, void *priv)
936{
937 struct kernfs_root *root;
938 struct kernfs_node *kn;
939
940 root = kzalloc(sizeof(*root), GFP_KERNEL);
941 if (!root)
942 return ERR_PTR(-ENOMEM);
943
944 idr_init(&root->ino_idr);
945 INIT_LIST_HEAD(&root->supers);
946 root->next_generation = 1;
947
948 kn = __kernfs_new_node(root, "", S_IFDIR | S_IRUGO | S_IXUGO,
949 KERNFS_DIR);
950 if (!kn) {
951 idr_destroy(&root->ino_idr);
952 kfree(root);
953 return ERR_PTR(-ENOMEM);
954 }
955
956 kn->priv = priv;
957 kn->dir.root = root;
958
959 root->syscall_ops = scops;
960 root->flags = flags;
961 root->kn = kn;
962 init_waitqueue_head(&root->deactivate_waitq);
963
964 if (!(root->flags & KERNFS_ROOT_CREATE_DEACTIVATED))
965 kernfs_activate(kn);
966
967 return root;
968}
969
970/**
971 * kernfs_destroy_root - destroy a kernfs hierarchy
972 * @root: root of the hierarchy to destroy
973 *
974 * Destroy the hierarchy anchored at @root by removing all existing
975 * directories and destroying @root.
976 */
977void kernfs_destroy_root(struct kernfs_root *root)
978{
979 kernfs_remove(root->kn); /* will also free @root */
980}
981
982/**
983 * kernfs_create_dir_ns - create a directory
984 * @parent: parent in which to create a new directory
985 * @name: name of the new directory
986 * @mode: mode of the new directory
987 * @priv: opaque data associated with the new directory
988 * @ns: optional namespace tag of the directory
989 *
990 * Returns the created node on success, ERR_PTR() value on failure.
991 */
992struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent,
993 const char *name, umode_t mode,
994 void *priv, const void *ns)
995{
996 struct kernfs_node *kn;
997 int rc;
998
999 /* allocate */
1000 kn = kernfs_new_node(parent, name, mode | S_IFDIR, KERNFS_DIR);
1001 if (!kn)
1002 return ERR_PTR(-ENOMEM);
1003
1004 kn->dir.root = parent->dir.root;
1005 kn->ns = ns;
1006 kn->priv = priv;
1007
1008 /* link in */
1009 rc = kernfs_add_one(kn);
1010 if (!rc)
1011 return kn;
1012
1013 kernfs_put(kn);
1014 return ERR_PTR(rc);
1015}
1016
1017/**
1018 * kernfs_create_empty_dir - create an always empty directory
1019 * @parent: parent in which to create a new directory
1020 * @name: name of the new directory
1021 *
1022 * Returns the created node on success, ERR_PTR() value on failure.
1023 */
1024struct kernfs_node *kernfs_create_empty_dir(struct kernfs_node *parent,
1025 const char *name)
1026{
1027 struct kernfs_node *kn;
1028 int rc;
1029
1030 /* allocate */
1031 kn = kernfs_new_node(parent, name, S_IRUGO|S_IXUGO|S_IFDIR, KERNFS_DIR);
1032 if (!kn)
1033 return ERR_PTR(-ENOMEM);
1034
1035 kn->flags |= KERNFS_EMPTY_DIR;
1036 kn->dir.root = parent->dir.root;
1037 kn->ns = NULL;
1038 kn->priv = NULL;
1039
1040 /* link in */
1041 rc = kernfs_add_one(kn);
1042 if (!rc)
1043 return kn;
1044
1045 kernfs_put(kn);
1046 return ERR_PTR(rc);
1047}
1048
1049static struct dentry *kernfs_iop_lookup(struct inode *dir,
1050 struct dentry *dentry,
1051 unsigned int flags)
1052{
1053 struct dentry *ret;
1054 struct kernfs_node *parent = dir->i_private;
1055 struct kernfs_node *kn;
1056 struct inode *inode;
1057 const void *ns = NULL;
1058
1059 mutex_lock(&kernfs_mutex);
1060
1061 if (kernfs_ns_enabled(parent))
1062 ns = kernfs_info(dir->i_sb)->ns;
1063
1064 kn = kernfs_find_ns(parent, dentry->d_name.name, ns);
1065
1066 /* no such entry */
1067 if (!kn || !kernfs_active(kn)) {
1068 ret = NULL;
1069 goto out_unlock;
1070 }
1071
1072 /* attach dentry and inode */
1073 inode = kernfs_get_inode(dir->i_sb, kn);
1074 if (!inode) {
1075 ret = ERR_PTR(-ENOMEM);
1076 goto out_unlock;
1077 }
1078
1079 /* instantiate and hash dentry */
1080 ret = d_splice_alias(inode, dentry);
1081 out_unlock:
1082 mutex_unlock(&kernfs_mutex);
1083 return ret;
1084}
1085
1086static int kernfs_iop_mkdir(struct inode *dir, struct dentry *dentry,
1087 umode_t mode)
1088{
1089 struct kernfs_node *parent = dir->i_private;
1090 struct kernfs_syscall_ops *scops = kernfs_root(parent)->syscall_ops;
1091 int ret;
1092
1093 if (!scops || !scops->mkdir)
1094 return -EPERM;
1095
1096 if (!kernfs_get_active(parent))
1097 return -ENODEV;
1098
1099 ret = scops->mkdir(parent, dentry->d_name.name, mode);
1100
1101 kernfs_put_active(parent);
1102 return ret;
1103}
1104
1105static int kernfs_iop_rmdir(struct inode *dir, struct dentry *dentry)
1106{
1107 struct kernfs_node *kn = kernfs_dentry_node(dentry);
1108 struct kernfs_syscall_ops *scops = kernfs_root(kn)->syscall_ops;
1109 int ret;
1110
1111 if (!scops || !scops->rmdir)
1112 return -EPERM;
1113
1114 if (!kernfs_get_active(kn))
1115 return -ENODEV;
1116
1117 ret = scops->rmdir(kn);
1118
1119 kernfs_put_active(kn);
1120 return ret;
1121}
1122
1123static int kernfs_iop_rename(struct inode *old_dir, struct dentry *old_dentry,
1124 struct inode *new_dir, struct dentry *new_dentry,
1125 unsigned int flags)
1126{
1127 struct kernfs_node *kn = kernfs_dentry_node(old_dentry);
1128 struct kernfs_node *new_parent = new_dir->i_private;
1129 struct kernfs_syscall_ops *scops = kernfs_root(kn)->syscall_ops;
1130 int ret;
1131
1132 if (flags)
1133 return -EINVAL;
1134
1135 if (!scops || !scops->rename)
1136 return -EPERM;
1137
1138 if (!kernfs_get_active(kn))
1139 return -ENODEV;
1140
1141 if (!kernfs_get_active(new_parent)) {
1142 kernfs_put_active(kn);
1143 return -ENODEV;
1144 }
1145
1146 ret = scops->rename(kn, new_parent, new_dentry->d_name.name);
1147
1148 kernfs_put_active(new_parent);
1149 kernfs_put_active(kn);
1150 return ret;
1151}
1152
1153const struct inode_operations kernfs_dir_iops = {
1154 .lookup = kernfs_iop_lookup,
1155 .permission = kernfs_iop_permission,
1156 .setattr = kernfs_iop_setattr,
1157 .getattr = kernfs_iop_getattr,
1158 .listxattr = kernfs_iop_listxattr,
1159
1160 .mkdir = kernfs_iop_mkdir,
1161 .rmdir = kernfs_iop_rmdir,
1162 .rename = kernfs_iop_rename,
1163};
1164
1165static struct kernfs_node *kernfs_leftmost_descendant(struct kernfs_node *pos)
1166{
1167 struct kernfs_node *last;
1168
1169 while (true) {
1170 struct rb_node *rbn;
1171
1172 last = pos;
1173
1174 if (kernfs_type(pos) != KERNFS_DIR)
1175 break;
1176
1177 rbn = rb_first(&pos->dir.children);
1178 if (!rbn)
1179 break;
1180
1181 pos = rb_to_kn(rbn);
1182 }
1183
1184 return last;
1185}
1186
1187/**
1188 * kernfs_next_descendant_post - find the next descendant for post-order walk
1189 * @pos: the current position (%NULL to initiate traversal)
1190 * @root: kernfs_node whose descendants to walk
1191 *
1192 * Find the next descendant to visit for post-order traversal of @root's
1193 * descendants. @root is included in the iteration and the last node to be
1194 * visited.
1195 */
1196static struct kernfs_node *kernfs_next_descendant_post(struct kernfs_node *pos,
1197 struct kernfs_node *root)
1198{
1199 struct rb_node *rbn;
1200
1201 lockdep_assert_held(&kernfs_mutex);
1202
1203 /* if first iteration, visit leftmost descendant which may be root */
1204 if (!pos)
1205 return kernfs_leftmost_descendant(root);
1206
1207 /* if we visited @root, we're done */
1208 if (pos == root)
1209 return NULL;
1210
1211 /* if there's an unvisited sibling, visit its leftmost descendant */
1212 rbn = rb_next(&pos->rb);
1213 if (rbn)
1214 return kernfs_leftmost_descendant(rb_to_kn(rbn));
1215
1216 /* no sibling left, visit parent */
1217 return pos->parent;
1218}
1219
1220/**
1221 * kernfs_activate - activate a node which started deactivated
1222 * @kn: kernfs_node whose subtree is to be activated
1223 *
1224 * If the root has KERNFS_ROOT_CREATE_DEACTIVATED set, a newly created node
1225 * needs to be explicitly activated. A node which hasn't been activated
1226 * isn't visible to userland and deactivation is skipped during its
1227 * removal. This is useful to construct atomic init sequences where
1228 * creation of multiple nodes should either succeed or fail atomically.
1229 *
1230 * The caller is responsible for ensuring that this function is not called
1231 * after kernfs_remove*() is invoked on @kn.
1232 */
1233void kernfs_activate(struct kernfs_node *kn)
1234{
1235 struct kernfs_node *pos;
1236
1237 mutex_lock(&kernfs_mutex);
1238
1239 pos = NULL;
1240 while ((pos = kernfs_next_descendant_post(pos, kn))) {
1241 if (!pos || (pos->flags & KERNFS_ACTIVATED))
1242 continue;
1243
1244 WARN_ON_ONCE(pos->parent && RB_EMPTY_NODE(&pos->rb));
1245 WARN_ON_ONCE(atomic_read(&pos->active) != KN_DEACTIVATED_BIAS);
1246
1247 atomic_sub(KN_DEACTIVATED_BIAS, &pos->active);
1248 pos->flags |= KERNFS_ACTIVATED;
1249 }
1250
1251 mutex_unlock(&kernfs_mutex);
1252}
1253
1254static void __kernfs_remove(struct kernfs_node *kn)
1255{
1256 struct kernfs_node *pos;
1257
1258 lockdep_assert_held(&kernfs_mutex);
1259
1260 /*
1261 * Short-circuit if non-root @kn has already finished removal.
1262 * This is for kernfs_remove_self() which plays with active ref
1263 * after removal.
1264 */
1265 if (!kn || (kn->parent && RB_EMPTY_NODE(&kn->rb)))
1266 return;
1267
1268 pr_debug("kernfs %s: removing\n", kn->name);
1269
1270 /* prevent any new usage under @kn by deactivating all nodes */
1271 pos = NULL;
1272 while ((pos = kernfs_next_descendant_post(pos, kn)))
1273 if (kernfs_active(pos))
1274 atomic_add(KN_DEACTIVATED_BIAS, &pos->active);
1275
1276 /* deactivate and unlink the subtree node-by-node */
1277 do {
1278 pos = kernfs_leftmost_descendant(kn);
1279
1280 /*
1281 * kernfs_drain() drops kernfs_mutex temporarily and @pos's
1282 * base ref could have been put by someone else by the time
1283 * the function returns. Make sure it doesn't go away
1284 * underneath us.
1285 */
1286 kernfs_get(pos);
1287
1288 /*
1289 * Drain iff @kn was activated. This avoids draining and
1290 * its lockdep annotations for nodes which have never been
1291 * activated and allows embedding kernfs_remove() in create
1292 * error paths without worrying about draining.
1293 */
1294 if (kn->flags & KERNFS_ACTIVATED)
1295 kernfs_drain(pos);
1296 else
1297 WARN_ON_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS);
1298
1299 /*
1300 * kernfs_unlink_sibling() succeeds once per node. Use it
1301 * to decide who's responsible for cleanups.
1302 */
1303 if (!pos->parent || kernfs_unlink_sibling(pos)) {
1304 struct kernfs_iattrs *ps_iattr =
1305 pos->parent ? pos->parent->iattr : NULL;
1306
1307 /* update timestamps on the parent */
1308 if (ps_iattr) {
1309 ktime_get_real_ts(&ps_iattr->ia_iattr.ia_ctime);
1310 ps_iattr->ia_iattr.ia_mtime =
1311 ps_iattr->ia_iattr.ia_ctime;
1312 }
1313
1314 kernfs_put(pos);
1315 }
1316
1317 kernfs_put(pos);
1318 } while (pos != kn);
1319}
1320
1321/**
1322 * kernfs_remove - remove a kernfs_node recursively
1323 * @kn: the kernfs_node to remove
1324 *
1325 * Remove @kn along with all its subdirectories and files.
1326 */
1327void kernfs_remove(struct kernfs_node *kn)
1328{
1329 mutex_lock(&kernfs_mutex);
1330 __kernfs_remove(kn);
1331 mutex_unlock(&kernfs_mutex);
1332}
1333
1334/**
1335 * kernfs_break_active_protection - break out of active protection
1336 * @kn: the self kernfs_node
1337 *
1338 * The caller must be running off of a kernfs operation which is invoked
1339 * with an active reference - e.g. one of kernfs_ops. Each invocation of
1340 * this function must also be matched with an invocation of
1341 * kernfs_unbreak_active_protection().
1342 *
1343 * This function releases the active reference of @kn the caller is
1344 * holding. Once this function is called, @kn may be removed at any point
1345 * and the caller is solely responsible for ensuring that the objects it
1346 * dereferences are accessible.
1347 */
1348void kernfs_break_active_protection(struct kernfs_node *kn)
1349{
1350 /*
1351 * Take out ourself out of the active ref dependency chain. If
1352 * we're called without an active ref, lockdep will complain.
1353 */
1354 kernfs_put_active(kn);
1355}
1356
1357/**
1358 * kernfs_unbreak_active_protection - undo kernfs_break_active_protection()
1359 * @kn: the self kernfs_node
1360 *
1361 * If kernfs_break_active_protection() was called, this function must be
1362 * invoked before finishing the kernfs operation. Note that while this
1363 * function restores the active reference, it doesn't and can't actually
1364 * restore the active protection - @kn may already or be in the process of
1365 * being removed. Once kernfs_break_active_protection() is invoked, that
1366 * protection is irreversibly gone for the kernfs operation instance.
1367 *
1368 * While this function may be called at any point after
1369 * kernfs_break_active_protection() is invoked, its most useful location
1370 * would be right before the enclosing kernfs operation returns.
1371 */
1372void kernfs_unbreak_active_protection(struct kernfs_node *kn)
1373{
1374 /*
1375 * @kn->active could be in any state; however, the increment we do
1376 * here will be undone as soon as the enclosing kernfs operation
1377 * finishes and this temporary bump can't break anything. If @kn
1378 * is alive, nothing changes. If @kn is being deactivated, the
1379 * soon-to-follow put will either finish deactivation or restore
1380 * deactivated state. If @kn is already removed, the temporary
1381 * bump is guaranteed to be gone before @kn is released.
1382 */
1383 atomic_inc(&kn->active);
1384 if (kernfs_lockdep(kn))
1385 rwsem_acquire(&kn->dep_map, 0, 1, _RET_IP_);
1386}
1387
1388/**
1389 * kernfs_remove_self - remove a kernfs_node from its own method
1390 * @kn: the self kernfs_node to remove
1391 *
1392 * The caller must be running off of a kernfs operation which is invoked
1393 * with an active reference - e.g. one of kernfs_ops. This can be used to
1394 * implement a file operation which deletes itself.
1395 *
1396 * For example, the "delete" file for a sysfs device directory can be
1397 * implemented by invoking kernfs_remove_self() on the "delete" file
1398 * itself. This function breaks the circular dependency of trying to
1399 * deactivate self while holding an active ref itself. It isn't necessary
1400 * to modify the usual removal path to use kernfs_remove_self(). The
1401 * "delete" implementation can simply invoke kernfs_remove_self() on self
1402 * before proceeding with the usual removal path. kernfs will ignore later
1403 * kernfs_remove() on self.
1404 *
1405 * kernfs_remove_self() can be called multiple times concurrently on the
1406 * same kernfs_node. Only the first one actually performs removal and
1407 * returns %true. All others will wait until the kernfs operation which
1408 * won self-removal finishes and return %false. Note that the losers wait
1409 * for the completion of not only the winning kernfs_remove_self() but also
1410 * the whole kernfs_ops which won the arbitration. This can be used to
1411 * guarantee, for example, all concurrent writes to a "delete" file to
1412 * finish only after the whole operation is complete.
1413 */
1414bool kernfs_remove_self(struct kernfs_node *kn)
1415{
1416 bool ret;
1417
1418 mutex_lock(&kernfs_mutex);
1419 kernfs_break_active_protection(kn);
1420
1421 /*
1422 * SUICIDAL is used to arbitrate among competing invocations. Only
1423 * the first one will actually perform removal. When the removal
1424 * is complete, SUICIDED is set and the active ref is restored
1425 * while holding kernfs_mutex. The ones which lost arbitration
1426 * waits for SUICDED && drained which can happen only after the
1427 * enclosing kernfs operation which executed the winning instance
1428 * of kernfs_remove_self() finished.
1429 */
1430 if (!(kn->flags & KERNFS_SUICIDAL)) {
1431 kn->flags |= KERNFS_SUICIDAL;
1432 __kernfs_remove(kn);
1433 kn->flags |= KERNFS_SUICIDED;
1434 ret = true;
1435 } else {
1436 wait_queue_head_t *waitq = &kernfs_root(kn)->deactivate_waitq;
1437 DEFINE_WAIT(wait);
1438
1439 while (true) {
1440 prepare_to_wait(waitq, &wait, TASK_UNINTERRUPTIBLE);
1441
1442 if ((kn->flags & KERNFS_SUICIDED) &&
1443 atomic_read(&kn->active) == KN_DEACTIVATED_BIAS)
1444 break;
1445
1446 mutex_unlock(&kernfs_mutex);
1447 schedule();
1448 mutex_lock(&kernfs_mutex);
1449 }
1450 finish_wait(waitq, &wait);
1451 WARN_ON_ONCE(!RB_EMPTY_NODE(&kn->rb));
1452 ret = false;
1453 }
1454
1455 /*
1456 * This must be done while holding kernfs_mutex; otherwise, waiting
1457 * for SUICIDED && deactivated could finish prematurely.
1458 */
1459 kernfs_unbreak_active_protection(kn);
1460
1461 mutex_unlock(&kernfs_mutex);
1462 return ret;
1463}
1464
1465/**
1466 * kernfs_remove_by_name_ns - find a kernfs_node by name and remove it
1467 * @parent: parent of the target
1468 * @name: name of the kernfs_node to remove
1469 * @ns: namespace tag of the kernfs_node to remove
1470 *
1471 * Look for the kernfs_node with @name and @ns under @parent and remove it.
1472 * Returns 0 on success, -ENOENT if such entry doesn't exist.
1473 */
1474int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name,
1475 const void *ns)
1476{
1477 struct kernfs_node *kn;
1478
1479 if (!parent) {
1480 WARN(1, KERN_WARNING "kernfs: can not remove '%s', no directory\n",
1481 name);
1482 return -ENOENT;
1483 }
1484
1485 mutex_lock(&kernfs_mutex);
1486
1487 kn = kernfs_find_ns(parent, name, ns);
1488 if (kn)
1489 __kernfs_remove(kn);
1490
1491 mutex_unlock(&kernfs_mutex);
1492
1493 if (kn)
1494 return 0;
1495 else
1496 return -ENOENT;
1497}
1498
1499/**
1500 * kernfs_rename_ns - move and rename a kernfs_node
1501 * @kn: target node
1502 * @new_parent: new parent to put @sd under
1503 * @new_name: new name
1504 * @new_ns: new namespace tag
1505 */
1506int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent,
1507 const char *new_name, const void *new_ns)
1508{
1509 struct kernfs_node *old_parent;
1510 const char *old_name = NULL;
1511 int error;
1512
1513 /* can't move or rename root */
1514 if (!kn->parent)
1515 return -EINVAL;
1516
1517 mutex_lock(&kernfs_mutex);
1518
1519 error = -ENOENT;
1520 if (!kernfs_active(kn) || !kernfs_active(new_parent) ||
1521 (new_parent->flags & KERNFS_EMPTY_DIR))
1522 goto out;
1523
1524 error = 0;
1525 if ((kn->parent == new_parent) && (kn->ns == new_ns) &&
1526 (strcmp(kn->name, new_name) == 0))
1527 goto out; /* nothing to rename */
1528
1529 error = -EEXIST;
1530 if (kernfs_find_ns(new_parent, new_name, new_ns))
1531 goto out;
1532
1533 /* rename kernfs_node */
1534 if (strcmp(kn->name, new_name) != 0) {
1535 error = -ENOMEM;
1536 new_name = kstrdup_const(new_name, GFP_KERNEL);
1537 if (!new_name)
1538 goto out;
1539 } else {
1540 new_name = NULL;
1541 }
1542
1543 /*
1544 * Move to the appropriate place in the appropriate directories rbtree.
1545 */
1546 kernfs_unlink_sibling(kn);
1547 kernfs_get(new_parent);
1548
1549 /* rename_lock protects ->parent and ->name accessors */
1550 spin_lock_irq(&kernfs_rename_lock);
1551
1552 old_parent = kn->parent;
1553 kn->parent = new_parent;
1554
1555 kn->ns = new_ns;
1556 if (new_name) {
1557 old_name = kn->name;
1558 kn->name = new_name;
1559 }
1560
1561 spin_unlock_irq(&kernfs_rename_lock);
1562
1563 kn->hash = kernfs_name_hash(kn->name, kn->ns);
1564 kernfs_link_sibling(kn);
1565
1566 kernfs_put(old_parent);
1567 kfree_const(old_name);
1568
1569 error = 0;
1570 out:
1571 mutex_unlock(&kernfs_mutex);
1572 return error;
1573}
1574
1575/* Relationship between s_mode and the DT_xxx types */
1576static inline unsigned char dt_type(struct kernfs_node *kn)
1577{
1578 return (kn->mode >> 12) & 15;
1579}
1580
1581static int kernfs_dir_fop_release(struct inode *inode, struct file *filp)
1582{
1583 kernfs_put(filp->private_data);
1584 return 0;
1585}
1586
1587static struct kernfs_node *kernfs_dir_pos(const void *ns,
1588 struct kernfs_node *parent, loff_t hash, struct kernfs_node *pos)
1589{
1590 if (pos) {
1591 int valid = kernfs_active(pos) &&
1592 pos->parent == parent && hash == pos->hash;
1593 kernfs_put(pos);
1594 if (!valid)
1595 pos = NULL;
1596 }
1597 if (!pos && (hash > 1) && (hash < INT_MAX)) {
1598 struct rb_node *node = parent->dir.children.rb_node;
1599 while (node) {
1600 pos = rb_to_kn(node);
1601
1602 if (hash < pos->hash)
1603 node = node->rb_left;
1604 else if (hash > pos->hash)
1605 node = node->rb_right;
1606 else
1607 break;
1608 }
1609 }
1610 /* Skip over entries which are dying/dead or in the wrong namespace */
1611 while (pos && (!kernfs_active(pos) || pos->ns != ns)) {
1612 struct rb_node *node = rb_next(&pos->rb);
1613 if (!node)
1614 pos = NULL;
1615 else
1616 pos = rb_to_kn(node);
1617 }
1618 return pos;
1619}
1620
1621static struct kernfs_node *kernfs_dir_next_pos(const void *ns,
1622 struct kernfs_node *parent, ino_t ino, struct kernfs_node *pos)
1623{
1624 pos = kernfs_dir_pos(ns, parent, ino, pos);
1625 if (pos) {
1626 do {
1627 struct rb_node *node = rb_next(&pos->rb);
1628 if (!node)
1629 pos = NULL;
1630 else
1631 pos = rb_to_kn(node);
1632 } while (pos && (!kernfs_active(pos) || pos->ns != ns));
1633 }
1634 return pos;
1635}
1636
1637static int kernfs_fop_readdir(struct file *file, struct dir_context *ctx)
1638{
1639 struct dentry *dentry = file->f_path.dentry;
1640 struct kernfs_node *parent = kernfs_dentry_node(dentry);
1641 struct kernfs_node *pos = file->private_data;
1642 const void *ns = NULL;
1643
1644 if (!dir_emit_dots(file, ctx))
1645 return 0;
1646 mutex_lock(&kernfs_mutex);
1647
1648 if (kernfs_ns_enabled(parent))
1649 ns = kernfs_info(dentry->d_sb)->ns;
1650
1651 for (pos = kernfs_dir_pos(ns, parent, ctx->pos, pos);
1652 pos;
1653 pos = kernfs_dir_next_pos(ns, parent, ctx->pos, pos)) {
1654 const char *name = pos->name;
1655 unsigned int type = dt_type(pos);
1656 int len = strlen(name);
1657 ino_t ino = pos->id.ino;
1658
1659 ctx->pos = pos->hash;
1660 file->private_data = pos;
1661 kernfs_get(pos);
1662
1663 mutex_unlock(&kernfs_mutex);
1664 if (!dir_emit(ctx, name, len, ino, type))
1665 return 0;
1666 mutex_lock(&kernfs_mutex);
1667 }
1668 mutex_unlock(&kernfs_mutex);
1669 file->private_data = NULL;
1670 ctx->pos = INT_MAX;
1671 return 0;
1672}
1673
1674const struct file_operations kernfs_dir_fops = {
1675 .read = generic_read_dir,
1676 .iterate_shared = kernfs_fop_readdir,
1677 .release = kernfs_dir_fop_release,
1678 .llseek = generic_file_llseek,
1679};
1/*
2 * fs/kernfs/dir.c - kernfs directory implementation
3 *
4 * Copyright (c) 2001-3 Patrick Mochel
5 * Copyright (c) 2007 SUSE Linux Products GmbH
6 * Copyright (c) 2007, 2013 Tejun Heo <tj@kernel.org>
7 *
8 * This file is released under the GPLv2.
9 */
10
11#include <linux/sched.h>
12#include <linux/fs.h>
13#include <linux/namei.h>
14#include <linux/idr.h>
15#include <linux/slab.h>
16#include <linux/security.h>
17#include <linux/hash.h>
18
19#include "kernfs-internal.h"
20
21DEFINE_MUTEX(kernfs_mutex);
22static DEFINE_SPINLOCK(kernfs_rename_lock); /* kn->parent and ->name */
23static char kernfs_pr_cont_buf[PATH_MAX]; /* protected by rename_lock */
24
25#define rb_to_kn(X) rb_entry((X), struct kernfs_node, rb)
26
27static bool kernfs_active(struct kernfs_node *kn)
28{
29 lockdep_assert_held(&kernfs_mutex);
30 return atomic_read(&kn->active) >= 0;
31}
32
33static bool kernfs_lockdep(struct kernfs_node *kn)
34{
35#ifdef CONFIG_DEBUG_LOCK_ALLOC
36 return kn->flags & KERNFS_LOCKDEP;
37#else
38 return false;
39#endif
40}
41
42static int kernfs_name_locked(struct kernfs_node *kn, char *buf, size_t buflen)
43{
44 return strlcpy(buf, kn->parent ? kn->name : "/", buflen);
45}
46
47/* kernfs_node_depth - compute depth from @from to @to */
48static size_t kernfs_depth(struct kernfs_node *from, struct kernfs_node *to)
49{
50 size_t depth = 0;
51
52 while (to->parent && to != from) {
53 depth++;
54 to = to->parent;
55 }
56 return depth;
57}
58
59static struct kernfs_node *kernfs_common_ancestor(struct kernfs_node *a,
60 struct kernfs_node *b)
61{
62 size_t da, db;
63 struct kernfs_root *ra = kernfs_root(a), *rb = kernfs_root(b);
64
65 if (ra != rb)
66 return NULL;
67
68 da = kernfs_depth(ra->kn, a);
69 db = kernfs_depth(rb->kn, b);
70
71 while (da > db) {
72 a = a->parent;
73 da--;
74 }
75 while (db > da) {
76 b = b->parent;
77 db--;
78 }
79
80 /* worst case b and a will be the same at root */
81 while (b != a) {
82 b = b->parent;
83 a = a->parent;
84 }
85
86 return a;
87}
88
89/**
90 * kernfs_path_from_node_locked - find a pseudo-absolute path to @kn_to,
91 * where kn_from is treated as root of the path.
92 * @kn_from: kernfs node which should be treated as root for the path
93 * @kn_to: kernfs node to which path is needed
94 * @buf: buffer to copy the path into
95 * @buflen: size of @buf
96 *
97 * We need to handle couple of scenarios here:
98 * [1] when @kn_from is an ancestor of @kn_to at some level
99 * kn_from: /n1/n2/n3
100 * kn_to: /n1/n2/n3/n4/n5
101 * result: /n4/n5
102 *
103 * [2] when @kn_from is on a different hierarchy and we need to find common
104 * ancestor between @kn_from and @kn_to.
105 * kn_from: /n1/n2/n3/n4
106 * kn_to: /n1/n2/n5
107 * result: /../../n5
108 * OR
109 * kn_from: /n1/n2/n3/n4/n5 [depth=5]
110 * kn_to: /n1/n2/n3 [depth=3]
111 * result: /../..
112 *
113 * return value: length of the string. If greater than buflen,
114 * then contents of buf are undefined. On error, -1 is returned.
115 */
116static int kernfs_path_from_node_locked(struct kernfs_node *kn_to,
117 struct kernfs_node *kn_from,
118 char *buf, size_t buflen)
119{
120 struct kernfs_node *kn, *common;
121 const char parent_str[] = "/..";
122 size_t depth_from, depth_to, len = 0, nlen = 0;
123 char *p;
124 int i;
125
126 if (!kn_from)
127 kn_from = kernfs_root(kn_to)->kn;
128
129 if (kn_from == kn_to)
130 return strlcpy(buf, "/", buflen);
131
132 common = kernfs_common_ancestor(kn_from, kn_to);
133 if (WARN_ON(!common))
134 return -1;
135
136 depth_to = kernfs_depth(common, kn_to);
137 depth_from = kernfs_depth(common, kn_from);
138
139 if (buf)
140 buf[0] = '\0';
141
142 for (i = 0; i < depth_from; i++)
143 len += strlcpy(buf + len, parent_str,
144 len < buflen ? buflen - len : 0);
145
146 /* Calculate how many bytes we need for the rest */
147 for (kn = kn_to; kn != common; kn = kn->parent)
148 nlen += strlen(kn->name) + 1;
149
150 if (len + nlen >= buflen)
151 return len + nlen;
152
153 p = buf + len + nlen;
154 *p = '\0';
155 for (kn = kn_to; kn != common; kn = kn->parent) {
156 size_t tmp = strlen(kn->name);
157 p -= tmp;
158 memcpy(p, kn->name, tmp);
159 *(--p) = '/';
160 }
161
162 return len + nlen;
163}
164
165/**
166 * kernfs_name - obtain the name of a given node
167 * @kn: kernfs_node of interest
168 * @buf: buffer to copy @kn's name into
169 * @buflen: size of @buf
170 *
171 * Copies the name of @kn into @buf of @buflen bytes. The behavior is
172 * similar to strlcpy(). It returns the length of @kn's name and if @buf
173 * isn't long enough, it's filled upto @buflen-1 and nul terminated.
174 *
175 * This function can be called from any context.
176 */
177int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen)
178{
179 unsigned long flags;
180 int ret;
181
182 spin_lock_irqsave(&kernfs_rename_lock, flags);
183 ret = kernfs_name_locked(kn, buf, buflen);
184 spin_unlock_irqrestore(&kernfs_rename_lock, flags);
185 return ret;
186}
187
188/**
189 * kernfs_path_len - determine the length of the full path of a given node
190 * @kn: kernfs_node of interest
191 *
192 * The returned length doesn't include the space for the terminating '\0'.
193 */
194size_t kernfs_path_len(struct kernfs_node *kn)
195{
196 size_t len = 0;
197 unsigned long flags;
198
199 spin_lock_irqsave(&kernfs_rename_lock, flags);
200
201 do {
202 len += strlen(kn->name) + 1;
203 kn = kn->parent;
204 } while (kn && kn->parent);
205
206 spin_unlock_irqrestore(&kernfs_rename_lock, flags);
207
208 return len;
209}
210
211/**
212 * kernfs_path_from_node - build path of node @to relative to @from.
213 * @from: parent kernfs_node relative to which we need to build the path
214 * @to: kernfs_node of interest
215 * @buf: buffer to copy @to's path into
216 * @buflen: size of @buf
217 *
218 * Builds @to's path relative to @from in @buf. @from and @to must
219 * be on the same kernfs-root. If @from is not parent of @to, then a relative
220 * path (which includes '..'s) as needed to reach from @from to @to is
221 * returned.
222 *
223 * If @buf isn't long enough, the return value will be greater than @buflen
224 * and @buf contents are undefined.
225 */
226int kernfs_path_from_node(struct kernfs_node *to, struct kernfs_node *from,
227 char *buf, size_t buflen)
228{
229 unsigned long flags;
230 int ret;
231
232 spin_lock_irqsave(&kernfs_rename_lock, flags);
233 ret = kernfs_path_from_node_locked(to, from, buf, buflen);
234 spin_unlock_irqrestore(&kernfs_rename_lock, flags);
235 return ret;
236}
237EXPORT_SYMBOL_GPL(kernfs_path_from_node);
238
239/**
240 * kernfs_path - build full path of a given node
241 * @kn: kernfs_node of interest
242 * @buf: buffer to copy @kn's name into
243 * @buflen: size of @buf
244 *
245 * Builds and returns the full path of @kn in @buf of @buflen bytes. The
246 * path is built from the end of @buf so the returned pointer usually
247 * doesn't match @buf. If @buf isn't long enough, @buf is nul terminated
248 * and %NULL is returned.
249 */
250char *kernfs_path(struct kernfs_node *kn, char *buf, size_t buflen)
251{
252 int ret;
253
254 ret = kernfs_path_from_node(kn, NULL, buf, buflen);
255 if (ret < 0 || ret >= buflen)
256 return NULL;
257 return buf;
258}
259EXPORT_SYMBOL_GPL(kernfs_path);
260
261/**
262 * pr_cont_kernfs_name - pr_cont name of a kernfs_node
263 * @kn: kernfs_node of interest
264 *
265 * This function can be called from any context.
266 */
267void pr_cont_kernfs_name(struct kernfs_node *kn)
268{
269 unsigned long flags;
270
271 spin_lock_irqsave(&kernfs_rename_lock, flags);
272
273 kernfs_name_locked(kn, kernfs_pr_cont_buf, sizeof(kernfs_pr_cont_buf));
274 pr_cont("%s", kernfs_pr_cont_buf);
275
276 spin_unlock_irqrestore(&kernfs_rename_lock, flags);
277}
278
279/**
280 * pr_cont_kernfs_path - pr_cont path of a kernfs_node
281 * @kn: kernfs_node of interest
282 *
283 * This function can be called from any context.
284 */
285void pr_cont_kernfs_path(struct kernfs_node *kn)
286{
287 unsigned long flags;
288 int sz;
289
290 spin_lock_irqsave(&kernfs_rename_lock, flags);
291
292 sz = kernfs_path_from_node_locked(kn, NULL, kernfs_pr_cont_buf,
293 sizeof(kernfs_pr_cont_buf));
294 if (sz < 0) {
295 pr_cont("(error)");
296 goto out;
297 }
298
299 if (sz >= sizeof(kernfs_pr_cont_buf)) {
300 pr_cont("(name too long)");
301 goto out;
302 }
303
304 pr_cont("%s", kernfs_pr_cont_buf);
305
306out:
307 spin_unlock_irqrestore(&kernfs_rename_lock, flags);
308}
309
310/**
311 * kernfs_get_parent - determine the parent node and pin it
312 * @kn: kernfs_node of interest
313 *
314 * Determines @kn's parent, pins and returns it. This function can be
315 * called from any context.
316 */
317struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn)
318{
319 struct kernfs_node *parent;
320 unsigned long flags;
321
322 spin_lock_irqsave(&kernfs_rename_lock, flags);
323 parent = kn->parent;
324 kernfs_get(parent);
325 spin_unlock_irqrestore(&kernfs_rename_lock, flags);
326
327 return parent;
328}
329
330/**
331 * kernfs_name_hash
332 * @name: Null terminated string to hash
333 * @ns: Namespace tag to hash
334 *
335 * Returns 31 bit hash of ns + name (so it fits in an off_t )
336 */
337static unsigned int kernfs_name_hash(const char *name, const void *ns)
338{
339 unsigned long hash = init_name_hash();
340 unsigned int len = strlen(name);
341 while (len--)
342 hash = partial_name_hash(*name++, hash);
343 hash = (end_name_hash(hash) ^ hash_ptr((void *)ns, 31));
344 hash &= 0x7fffffffU;
345 /* Reserve hash numbers 0, 1 and INT_MAX for magic directory entries */
346 if (hash < 2)
347 hash += 2;
348 if (hash >= INT_MAX)
349 hash = INT_MAX - 1;
350 return hash;
351}
352
353static int kernfs_name_compare(unsigned int hash, const char *name,
354 const void *ns, const struct kernfs_node *kn)
355{
356 if (hash < kn->hash)
357 return -1;
358 if (hash > kn->hash)
359 return 1;
360 if (ns < kn->ns)
361 return -1;
362 if (ns > kn->ns)
363 return 1;
364 return strcmp(name, kn->name);
365}
366
367static int kernfs_sd_compare(const struct kernfs_node *left,
368 const struct kernfs_node *right)
369{
370 return kernfs_name_compare(left->hash, left->name, left->ns, right);
371}
372
373/**
374 * kernfs_link_sibling - link kernfs_node into sibling rbtree
375 * @kn: kernfs_node of interest
376 *
377 * Link @kn into its sibling rbtree which starts from
378 * @kn->parent->dir.children.
379 *
380 * Locking:
381 * mutex_lock(kernfs_mutex)
382 *
383 * RETURNS:
384 * 0 on susccess -EEXIST on failure.
385 */
386static int kernfs_link_sibling(struct kernfs_node *kn)
387{
388 struct rb_node **node = &kn->parent->dir.children.rb_node;
389 struct rb_node *parent = NULL;
390
391 while (*node) {
392 struct kernfs_node *pos;
393 int result;
394
395 pos = rb_to_kn(*node);
396 parent = *node;
397 result = kernfs_sd_compare(kn, pos);
398 if (result < 0)
399 node = &pos->rb.rb_left;
400 else if (result > 0)
401 node = &pos->rb.rb_right;
402 else
403 return -EEXIST;
404 }
405
406 /* add new node and rebalance the tree */
407 rb_link_node(&kn->rb, parent, node);
408 rb_insert_color(&kn->rb, &kn->parent->dir.children);
409
410 /* successfully added, account subdir number */
411 if (kernfs_type(kn) == KERNFS_DIR)
412 kn->parent->dir.subdirs++;
413
414 return 0;
415}
416
417/**
418 * kernfs_unlink_sibling - unlink kernfs_node from sibling rbtree
419 * @kn: kernfs_node of interest
420 *
421 * Try to unlink @kn from its sibling rbtree which starts from
422 * kn->parent->dir.children. Returns %true if @kn was actually
423 * removed, %false if @kn wasn't on the rbtree.
424 *
425 * Locking:
426 * mutex_lock(kernfs_mutex)
427 */
428static bool kernfs_unlink_sibling(struct kernfs_node *kn)
429{
430 if (RB_EMPTY_NODE(&kn->rb))
431 return false;
432
433 if (kernfs_type(kn) == KERNFS_DIR)
434 kn->parent->dir.subdirs--;
435
436 rb_erase(&kn->rb, &kn->parent->dir.children);
437 RB_CLEAR_NODE(&kn->rb);
438 return true;
439}
440
441/**
442 * kernfs_get_active - get an active reference to kernfs_node
443 * @kn: kernfs_node to get an active reference to
444 *
445 * Get an active reference of @kn. This function is noop if @kn
446 * is NULL.
447 *
448 * RETURNS:
449 * Pointer to @kn on success, NULL on failure.
450 */
451struct kernfs_node *kernfs_get_active(struct kernfs_node *kn)
452{
453 if (unlikely(!kn))
454 return NULL;
455
456 if (!atomic_inc_unless_negative(&kn->active))
457 return NULL;
458
459 if (kernfs_lockdep(kn))
460 rwsem_acquire_read(&kn->dep_map, 0, 1, _RET_IP_);
461 return kn;
462}
463
464/**
465 * kernfs_put_active - put an active reference to kernfs_node
466 * @kn: kernfs_node to put an active reference to
467 *
468 * Put an active reference to @kn. This function is noop if @kn
469 * is NULL.
470 */
471void kernfs_put_active(struct kernfs_node *kn)
472{
473 struct kernfs_root *root = kernfs_root(kn);
474 int v;
475
476 if (unlikely(!kn))
477 return;
478
479 if (kernfs_lockdep(kn))
480 rwsem_release(&kn->dep_map, 1, _RET_IP_);
481 v = atomic_dec_return(&kn->active);
482 if (likely(v != KN_DEACTIVATED_BIAS))
483 return;
484
485 wake_up_all(&root->deactivate_waitq);
486}
487
488/**
489 * kernfs_drain - drain kernfs_node
490 * @kn: kernfs_node to drain
491 *
492 * Drain existing usages and nuke all existing mmaps of @kn. Mutiple
493 * removers may invoke this function concurrently on @kn and all will
494 * return after draining is complete.
495 */
496static void kernfs_drain(struct kernfs_node *kn)
497 __releases(&kernfs_mutex) __acquires(&kernfs_mutex)
498{
499 struct kernfs_root *root = kernfs_root(kn);
500
501 lockdep_assert_held(&kernfs_mutex);
502 WARN_ON_ONCE(kernfs_active(kn));
503
504 mutex_unlock(&kernfs_mutex);
505
506 if (kernfs_lockdep(kn)) {
507 rwsem_acquire(&kn->dep_map, 0, 0, _RET_IP_);
508 if (atomic_read(&kn->active) != KN_DEACTIVATED_BIAS)
509 lock_contended(&kn->dep_map, _RET_IP_);
510 }
511
512 /* but everyone should wait for draining */
513 wait_event(root->deactivate_waitq,
514 atomic_read(&kn->active) == KN_DEACTIVATED_BIAS);
515
516 if (kernfs_lockdep(kn)) {
517 lock_acquired(&kn->dep_map, _RET_IP_);
518 rwsem_release(&kn->dep_map, 1, _RET_IP_);
519 }
520
521 kernfs_unmap_bin_file(kn);
522
523 mutex_lock(&kernfs_mutex);
524}
525
526/**
527 * kernfs_get - get a reference count on a kernfs_node
528 * @kn: the target kernfs_node
529 */
530void kernfs_get(struct kernfs_node *kn)
531{
532 if (kn) {
533 WARN_ON(!atomic_read(&kn->count));
534 atomic_inc(&kn->count);
535 }
536}
537EXPORT_SYMBOL_GPL(kernfs_get);
538
539/**
540 * kernfs_put - put a reference count on a kernfs_node
541 * @kn: the target kernfs_node
542 *
543 * Put a reference count of @kn and destroy it if it reached zero.
544 */
545void kernfs_put(struct kernfs_node *kn)
546{
547 struct kernfs_node *parent;
548 struct kernfs_root *root;
549
550 if (!kn || !atomic_dec_and_test(&kn->count))
551 return;
552 root = kernfs_root(kn);
553 repeat:
554 /*
555 * Moving/renaming is always done while holding reference.
556 * kn->parent won't change beneath us.
557 */
558 parent = kn->parent;
559
560 WARN_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS,
561 "kernfs_put: %s/%s: released with incorrect active_ref %d\n",
562 parent ? parent->name : "", kn->name, atomic_read(&kn->active));
563
564 if (kernfs_type(kn) == KERNFS_LINK)
565 kernfs_put(kn->symlink.target_kn);
566
567 kfree_const(kn->name);
568
569 if (kn->iattr) {
570 if (kn->iattr->ia_secdata)
571 security_release_secctx(kn->iattr->ia_secdata,
572 kn->iattr->ia_secdata_len);
573 simple_xattrs_free(&kn->iattr->xattrs);
574 }
575 kfree(kn->iattr);
576 ida_simple_remove(&root->ino_ida, kn->ino);
577 kmem_cache_free(kernfs_node_cache, kn);
578
579 kn = parent;
580 if (kn) {
581 if (atomic_dec_and_test(&kn->count))
582 goto repeat;
583 } else {
584 /* just released the root kn, free @root too */
585 ida_destroy(&root->ino_ida);
586 kfree(root);
587 }
588}
589EXPORT_SYMBOL_GPL(kernfs_put);
590
591static int kernfs_dop_revalidate(struct dentry *dentry, unsigned int flags)
592{
593 struct kernfs_node *kn;
594
595 if (flags & LOOKUP_RCU)
596 return -ECHILD;
597
598 /* Always perform fresh lookup for negatives */
599 if (d_really_is_negative(dentry))
600 goto out_bad_unlocked;
601
602 kn = dentry->d_fsdata;
603 mutex_lock(&kernfs_mutex);
604
605 /* The kernfs node has been deactivated */
606 if (!kernfs_active(kn))
607 goto out_bad;
608
609 /* The kernfs node has been moved? */
610 if (dentry->d_parent->d_fsdata != kn->parent)
611 goto out_bad;
612
613 /* The kernfs node has been renamed */
614 if (strcmp(dentry->d_name.name, kn->name) != 0)
615 goto out_bad;
616
617 /* The kernfs node has been moved to a different namespace */
618 if (kn->parent && kernfs_ns_enabled(kn->parent) &&
619 kernfs_info(dentry->d_sb)->ns != kn->ns)
620 goto out_bad;
621
622 mutex_unlock(&kernfs_mutex);
623 return 1;
624out_bad:
625 mutex_unlock(&kernfs_mutex);
626out_bad_unlocked:
627 return 0;
628}
629
630static void kernfs_dop_release(struct dentry *dentry)
631{
632 kernfs_put(dentry->d_fsdata);
633}
634
635const struct dentry_operations kernfs_dops = {
636 .d_revalidate = kernfs_dop_revalidate,
637 .d_release = kernfs_dop_release,
638};
639
640/**
641 * kernfs_node_from_dentry - determine kernfs_node associated with a dentry
642 * @dentry: the dentry in question
643 *
644 * Return the kernfs_node associated with @dentry. If @dentry is not a
645 * kernfs one, %NULL is returned.
646 *
647 * While the returned kernfs_node will stay accessible as long as @dentry
648 * is accessible, the returned node can be in any state and the caller is
649 * fully responsible for determining what's accessible.
650 */
651struct kernfs_node *kernfs_node_from_dentry(struct dentry *dentry)
652{
653 if (dentry->d_sb->s_op == &kernfs_sops)
654 return dentry->d_fsdata;
655 return NULL;
656}
657
658static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root,
659 const char *name, umode_t mode,
660 unsigned flags)
661{
662 struct kernfs_node *kn;
663 int ret;
664
665 name = kstrdup_const(name, GFP_KERNEL);
666 if (!name)
667 return NULL;
668
669 kn = kmem_cache_zalloc(kernfs_node_cache, GFP_KERNEL);
670 if (!kn)
671 goto err_out1;
672
673 ret = ida_simple_get(&root->ino_ida, 1, 0, GFP_KERNEL);
674 if (ret < 0)
675 goto err_out2;
676 kn->ino = ret;
677
678 atomic_set(&kn->count, 1);
679 atomic_set(&kn->active, KN_DEACTIVATED_BIAS);
680 RB_CLEAR_NODE(&kn->rb);
681
682 kn->name = name;
683 kn->mode = mode;
684 kn->flags = flags;
685
686 return kn;
687
688 err_out2:
689 kmem_cache_free(kernfs_node_cache, kn);
690 err_out1:
691 kfree_const(name);
692 return NULL;
693}
694
695struct kernfs_node *kernfs_new_node(struct kernfs_node *parent,
696 const char *name, umode_t mode,
697 unsigned flags)
698{
699 struct kernfs_node *kn;
700
701 kn = __kernfs_new_node(kernfs_root(parent), name, mode, flags);
702 if (kn) {
703 kernfs_get(parent);
704 kn->parent = parent;
705 }
706 return kn;
707}
708
709/**
710 * kernfs_add_one - add kernfs_node to parent without warning
711 * @kn: kernfs_node to be added
712 *
713 * The caller must already have initialized @kn->parent. This
714 * function increments nlink of the parent's inode if @kn is a
715 * directory and link into the children list of the parent.
716 *
717 * RETURNS:
718 * 0 on success, -EEXIST if entry with the given name already
719 * exists.
720 */
721int kernfs_add_one(struct kernfs_node *kn)
722{
723 struct kernfs_node *parent = kn->parent;
724 struct kernfs_iattrs *ps_iattr;
725 bool has_ns;
726 int ret;
727
728 mutex_lock(&kernfs_mutex);
729
730 ret = -EINVAL;
731 has_ns = kernfs_ns_enabled(parent);
732 if (WARN(has_ns != (bool)kn->ns, KERN_WARNING "kernfs: ns %s in '%s' for '%s'\n",
733 has_ns ? "required" : "invalid", parent->name, kn->name))
734 goto out_unlock;
735
736 if (kernfs_type(parent) != KERNFS_DIR)
737 goto out_unlock;
738
739 ret = -ENOENT;
740 if (parent->flags & KERNFS_EMPTY_DIR)
741 goto out_unlock;
742
743 if ((parent->flags & KERNFS_ACTIVATED) && !kernfs_active(parent))
744 goto out_unlock;
745
746 kn->hash = kernfs_name_hash(kn->name, kn->ns);
747
748 ret = kernfs_link_sibling(kn);
749 if (ret)
750 goto out_unlock;
751
752 /* Update timestamps on the parent */
753 ps_iattr = parent->iattr;
754 if (ps_iattr) {
755 struct iattr *ps_iattrs = &ps_iattr->ia_iattr;
756 ps_iattrs->ia_ctime = ps_iattrs->ia_mtime = CURRENT_TIME;
757 }
758
759 mutex_unlock(&kernfs_mutex);
760
761 /*
762 * Activate the new node unless CREATE_DEACTIVATED is requested.
763 * If not activated here, the kernfs user is responsible for
764 * activating the node with kernfs_activate(). A node which hasn't
765 * been activated is not visible to userland and its removal won't
766 * trigger deactivation.
767 */
768 if (!(kernfs_root(kn)->flags & KERNFS_ROOT_CREATE_DEACTIVATED))
769 kernfs_activate(kn);
770 return 0;
771
772out_unlock:
773 mutex_unlock(&kernfs_mutex);
774 return ret;
775}
776
777/**
778 * kernfs_find_ns - find kernfs_node with the given name
779 * @parent: kernfs_node to search under
780 * @name: name to look for
781 * @ns: the namespace tag to use
782 *
783 * Look for kernfs_node with name @name under @parent. Returns pointer to
784 * the found kernfs_node on success, %NULL on failure.
785 */
786static struct kernfs_node *kernfs_find_ns(struct kernfs_node *parent,
787 const unsigned char *name,
788 const void *ns)
789{
790 struct rb_node *node = parent->dir.children.rb_node;
791 bool has_ns = kernfs_ns_enabled(parent);
792 unsigned int hash;
793
794 lockdep_assert_held(&kernfs_mutex);
795
796 if (has_ns != (bool)ns) {
797 WARN(1, KERN_WARNING "kernfs: ns %s in '%s' for '%s'\n",
798 has_ns ? "required" : "invalid", parent->name, name);
799 return NULL;
800 }
801
802 hash = kernfs_name_hash(name, ns);
803 while (node) {
804 struct kernfs_node *kn;
805 int result;
806
807 kn = rb_to_kn(node);
808 result = kernfs_name_compare(hash, name, ns, kn);
809 if (result < 0)
810 node = node->rb_left;
811 else if (result > 0)
812 node = node->rb_right;
813 else
814 return kn;
815 }
816 return NULL;
817}
818
819static struct kernfs_node *kernfs_walk_ns(struct kernfs_node *parent,
820 const unsigned char *path,
821 const void *ns)
822{
823 size_t len;
824 char *p, *name;
825
826 lockdep_assert_held(&kernfs_mutex);
827
828 /* grab kernfs_rename_lock to piggy back on kernfs_pr_cont_buf */
829 spin_lock_irq(&kernfs_rename_lock);
830
831 len = strlcpy(kernfs_pr_cont_buf, path, sizeof(kernfs_pr_cont_buf));
832
833 if (len >= sizeof(kernfs_pr_cont_buf)) {
834 spin_unlock_irq(&kernfs_rename_lock);
835 return NULL;
836 }
837
838 p = kernfs_pr_cont_buf;
839
840 while ((name = strsep(&p, "/")) && parent) {
841 if (*name == '\0')
842 continue;
843 parent = kernfs_find_ns(parent, name, ns);
844 }
845
846 spin_unlock_irq(&kernfs_rename_lock);
847
848 return parent;
849}
850
851/**
852 * kernfs_find_and_get_ns - find and get kernfs_node with the given name
853 * @parent: kernfs_node to search under
854 * @name: name to look for
855 * @ns: the namespace tag to use
856 *
857 * Look for kernfs_node with name @name under @parent and get a reference
858 * if found. This function may sleep and returns pointer to the found
859 * kernfs_node on success, %NULL on failure.
860 */
861struct kernfs_node *kernfs_find_and_get_ns(struct kernfs_node *parent,
862 const char *name, const void *ns)
863{
864 struct kernfs_node *kn;
865
866 mutex_lock(&kernfs_mutex);
867 kn = kernfs_find_ns(parent, name, ns);
868 kernfs_get(kn);
869 mutex_unlock(&kernfs_mutex);
870
871 return kn;
872}
873EXPORT_SYMBOL_GPL(kernfs_find_and_get_ns);
874
875/**
876 * kernfs_walk_and_get_ns - find and get kernfs_node with the given path
877 * @parent: kernfs_node to search under
878 * @path: path to look for
879 * @ns: the namespace tag to use
880 *
881 * Look for kernfs_node with path @path under @parent and get a reference
882 * if found. This function may sleep and returns pointer to the found
883 * kernfs_node on success, %NULL on failure.
884 */
885struct kernfs_node *kernfs_walk_and_get_ns(struct kernfs_node *parent,
886 const char *path, const void *ns)
887{
888 struct kernfs_node *kn;
889
890 mutex_lock(&kernfs_mutex);
891 kn = kernfs_walk_ns(parent, path, ns);
892 kernfs_get(kn);
893 mutex_unlock(&kernfs_mutex);
894
895 return kn;
896}
897
898/**
899 * kernfs_create_root - create a new kernfs hierarchy
900 * @scops: optional syscall operations for the hierarchy
901 * @flags: KERNFS_ROOT_* flags
902 * @priv: opaque data associated with the new directory
903 *
904 * Returns the root of the new hierarchy on success, ERR_PTR() value on
905 * failure.
906 */
907struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops,
908 unsigned int flags, void *priv)
909{
910 struct kernfs_root *root;
911 struct kernfs_node *kn;
912
913 root = kzalloc(sizeof(*root), GFP_KERNEL);
914 if (!root)
915 return ERR_PTR(-ENOMEM);
916
917 ida_init(&root->ino_ida);
918 INIT_LIST_HEAD(&root->supers);
919
920 kn = __kernfs_new_node(root, "", S_IFDIR | S_IRUGO | S_IXUGO,
921 KERNFS_DIR);
922 if (!kn) {
923 ida_destroy(&root->ino_ida);
924 kfree(root);
925 return ERR_PTR(-ENOMEM);
926 }
927
928 kn->priv = priv;
929 kn->dir.root = root;
930
931 root->syscall_ops = scops;
932 root->flags = flags;
933 root->kn = kn;
934 init_waitqueue_head(&root->deactivate_waitq);
935
936 if (!(root->flags & KERNFS_ROOT_CREATE_DEACTIVATED))
937 kernfs_activate(kn);
938
939 return root;
940}
941
942/**
943 * kernfs_destroy_root - destroy a kernfs hierarchy
944 * @root: root of the hierarchy to destroy
945 *
946 * Destroy the hierarchy anchored at @root by removing all existing
947 * directories and destroying @root.
948 */
949void kernfs_destroy_root(struct kernfs_root *root)
950{
951 kernfs_remove(root->kn); /* will also free @root */
952}
953
954/**
955 * kernfs_create_dir_ns - create a directory
956 * @parent: parent in which to create a new directory
957 * @name: name of the new directory
958 * @mode: mode of the new directory
959 * @priv: opaque data associated with the new directory
960 * @ns: optional namespace tag of the directory
961 *
962 * Returns the created node on success, ERR_PTR() value on failure.
963 */
964struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent,
965 const char *name, umode_t mode,
966 void *priv, const void *ns)
967{
968 struct kernfs_node *kn;
969 int rc;
970
971 /* allocate */
972 kn = kernfs_new_node(parent, name, mode | S_IFDIR, KERNFS_DIR);
973 if (!kn)
974 return ERR_PTR(-ENOMEM);
975
976 kn->dir.root = parent->dir.root;
977 kn->ns = ns;
978 kn->priv = priv;
979
980 /* link in */
981 rc = kernfs_add_one(kn);
982 if (!rc)
983 return kn;
984
985 kernfs_put(kn);
986 return ERR_PTR(rc);
987}
988
989/**
990 * kernfs_create_empty_dir - create an always empty directory
991 * @parent: parent in which to create a new directory
992 * @name: name of the new directory
993 *
994 * Returns the created node on success, ERR_PTR() value on failure.
995 */
996struct kernfs_node *kernfs_create_empty_dir(struct kernfs_node *parent,
997 const char *name)
998{
999 struct kernfs_node *kn;
1000 int rc;
1001
1002 /* allocate */
1003 kn = kernfs_new_node(parent, name, S_IRUGO|S_IXUGO|S_IFDIR, KERNFS_DIR);
1004 if (!kn)
1005 return ERR_PTR(-ENOMEM);
1006
1007 kn->flags |= KERNFS_EMPTY_DIR;
1008 kn->dir.root = parent->dir.root;
1009 kn->ns = NULL;
1010 kn->priv = NULL;
1011
1012 /* link in */
1013 rc = kernfs_add_one(kn);
1014 if (!rc)
1015 return kn;
1016
1017 kernfs_put(kn);
1018 return ERR_PTR(rc);
1019}
1020
1021static struct dentry *kernfs_iop_lookup(struct inode *dir,
1022 struct dentry *dentry,
1023 unsigned int flags)
1024{
1025 struct dentry *ret;
1026 struct kernfs_node *parent = dentry->d_parent->d_fsdata;
1027 struct kernfs_node *kn;
1028 struct inode *inode;
1029 const void *ns = NULL;
1030
1031 mutex_lock(&kernfs_mutex);
1032
1033 if (kernfs_ns_enabled(parent))
1034 ns = kernfs_info(dir->i_sb)->ns;
1035
1036 kn = kernfs_find_ns(parent, dentry->d_name.name, ns);
1037
1038 /* no such entry */
1039 if (!kn || !kernfs_active(kn)) {
1040 ret = NULL;
1041 goto out_unlock;
1042 }
1043 kernfs_get(kn);
1044 dentry->d_fsdata = kn;
1045
1046 /* attach dentry and inode */
1047 inode = kernfs_get_inode(dir->i_sb, kn);
1048 if (!inode) {
1049 ret = ERR_PTR(-ENOMEM);
1050 goto out_unlock;
1051 }
1052
1053 /* instantiate and hash dentry */
1054 ret = d_splice_alias(inode, dentry);
1055 out_unlock:
1056 mutex_unlock(&kernfs_mutex);
1057 return ret;
1058}
1059
1060static int kernfs_iop_mkdir(struct inode *dir, struct dentry *dentry,
1061 umode_t mode)
1062{
1063 struct kernfs_node *parent = dir->i_private;
1064 struct kernfs_syscall_ops *scops = kernfs_root(parent)->syscall_ops;
1065 int ret;
1066
1067 if (!scops || !scops->mkdir)
1068 return -EPERM;
1069
1070 if (!kernfs_get_active(parent))
1071 return -ENODEV;
1072
1073 ret = scops->mkdir(parent, dentry->d_name.name, mode);
1074
1075 kernfs_put_active(parent);
1076 return ret;
1077}
1078
1079static int kernfs_iop_rmdir(struct inode *dir, struct dentry *dentry)
1080{
1081 struct kernfs_node *kn = dentry->d_fsdata;
1082 struct kernfs_syscall_ops *scops = kernfs_root(kn)->syscall_ops;
1083 int ret;
1084
1085 if (!scops || !scops->rmdir)
1086 return -EPERM;
1087
1088 if (!kernfs_get_active(kn))
1089 return -ENODEV;
1090
1091 ret = scops->rmdir(kn);
1092
1093 kernfs_put_active(kn);
1094 return ret;
1095}
1096
1097static int kernfs_iop_rename(struct inode *old_dir, struct dentry *old_dentry,
1098 struct inode *new_dir, struct dentry *new_dentry)
1099{
1100 struct kernfs_node *kn = old_dentry->d_fsdata;
1101 struct kernfs_node *new_parent = new_dir->i_private;
1102 struct kernfs_syscall_ops *scops = kernfs_root(kn)->syscall_ops;
1103 int ret;
1104
1105 if (!scops || !scops->rename)
1106 return -EPERM;
1107
1108 if (!kernfs_get_active(kn))
1109 return -ENODEV;
1110
1111 if (!kernfs_get_active(new_parent)) {
1112 kernfs_put_active(kn);
1113 return -ENODEV;
1114 }
1115
1116 ret = scops->rename(kn, new_parent, new_dentry->d_name.name);
1117
1118 kernfs_put_active(new_parent);
1119 kernfs_put_active(kn);
1120 return ret;
1121}
1122
1123const struct inode_operations kernfs_dir_iops = {
1124 .lookup = kernfs_iop_lookup,
1125 .permission = kernfs_iop_permission,
1126 .setattr = kernfs_iop_setattr,
1127 .getattr = kernfs_iop_getattr,
1128 .setxattr = kernfs_iop_setxattr,
1129 .removexattr = kernfs_iop_removexattr,
1130 .getxattr = kernfs_iop_getxattr,
1131 .listxattr = kernfs_iop_listxattr,
1132
1133 .mkdir = kernfs_iop_mkdir,
1134 .rmdir = kernfs_iop_rmdir,
1135 .rename = kernfs_iop_rename,
1136};
1137
1138static struct kernfs_node *kernfs_leftmost_descendant(struct kernfs_node *pos)
1139{
1140 struct kernfs_node *last;
1141
1142 while (true) {
1143 struct rb_node *rbn;
1144
1145 last = pos;
1146
1147 if (kernfs_type(pos) != KERNFS_DIR)
1148 break;
1149
1150 rbn = rb_first(&pos->dir.children);
1151 if (!rbn)
1152 break;
1153
1154 pos = rb_to_kn(rbn);
1155 }
1156
1157 return last;
1158}
1159
1160/**
1161 * kernfs_next_descendant_post - find the next descendant for post-order walk
1162 * @pos: the current position (%NULL to initiate traversal)
1163 * @root: kernfs_node whose descendants to walk
1164 *
1165 * Find the next descendant to visit for post-order traversal of @root's
1166 * descendants. @root is included in the iteration and the last node to be
1167 * visited.
1168 */
1169static struct kernfs_node *kernfs_next_descendant_post(struct kernfs_node *pos,
1170 struct kernfs_node *root)
1171{
1172 struct rb_node *rbn;
1173
1174 lockdep_assert_held(&kernfs_mutex);
1175
1176 /* if first iteration, visit leftmost descendant which may be root */
1177 if (!pos)
1178 return kernfs_leftmost_descendant(root);
1179
1180 /* if we visited @root, we're done */
1181 if (pos == root)
1182 return NULL;
1183
1184 /* if there's an unvisited sibling, visit its leftmost descendant */
1185 rbn = rb_next(&pos->rb);
1186 if (rbn)
1187 return kernfs_leftmost_descendant(rb_to_kn(rbn));
1188
1189 /* no sibling left, visit parent */
1190 return pos->parent;
1191}
1192
1193/**
1194 * kernfs_activate - activate a node which started deactivated
1195 * @kn: kernfs_node whose subtree is to be activated
1196 *
1197 * If the root has KERNFS_ROOT_CREATE_DEACTIVATED set, a newly created node
1198 * needs to be explicitly activated. A node which hasn't been activated
1199 * isn't visible to userland and deactivation is skipped during its
1200 * removal. This is useful to construct atomic init sequences where
1201 * creation of multiple nodes should either succeed or fail atomically.
1202 *
1203 * The caller is responsible for ensuring that this function is not called
1204 * after kernfs_remove*() is invoked on @kn.
1205 */
1206void kernfs_activate(struct kernfs_node *kn)
1207{
1208 struct kernfs_node *pos;
1209
1210 mutex_lock(&kernfs_mutex);
1211
1212 pos = NULL;
1213 while ((pos = kernfs_next_descendant_post(pos, kn))) {
1214 if (!pos || (pos->flags & KERNFS_ACTIVATED))
1215 continue;
1216
1217 WARN_ON_ONCE(pos->parent && RB_EMPTY_NODE(&pos->rb));
1218 WARN_ON_ONCE(atomic_read(&pos->active) != KN_DEACTIVATED_BIAS);
1219
1220 atomic_sub(KN_DEACTIVATED_BIAS, &pos->active);
1221 pos->flags |= KERNFS_ACTIVATED;
1222 }
1223
1224 mutex_unlock(&kernfs_mutex);
1225}
1226
1227static void __kernfs_remove(struct kernfs_node *kn)
1228{
1229 struct kernfs_node *pos;
1230
1231 lockdep_assert_held(&kernfs_mutex);
1232
1233 /*
1234 * Short-circuit if non-root @kn has already finished removal.
1235 * This is for kernfs_remove_self() which plays with active ref
1236 * after removal.
1237 */
1238 if (!kn || (kn->parent && RB_EMPTY_NODE(&kn->rb)))
1239 return;
1240
1241 pr_debug("kernfs %s: removing\n", kn->name);
1242
1243 /* prevent any new usage under @kn by deactivating all nodes */
1244 pos = NULL;
1245 while ((pos = kernfs_next_descendant_post(pos, kn)))
1246 if (kernfs_active(pos))
1247 atomic_add(KN_DEACTIVATED_BIAS, &pos->active);
1248
1249 /* deactivate and unlink the subtree node-by-node */
1250 do {
1251 pos = kernfs_leftmost_descendant(kn);
1252
1253 /*
1254 * kernfs_drain() drops kernfs_mutex temporarily and @pos's
1255 * base ref could have been put by someone else by the time
1256 * the function returns. Make sure it doesn't go away
1257 * underneath us.
1258 */
1259 kernfs_get(pos);
1260
1261 /*
1262 * Drain iff @kn was activated. This avoids draining and
1263 * its lockdep annotations for nodes which have never been
1264 * activated and allows embedding kernfs_remove() in create
1265 * error paths without worrying about draining.
1266 */
1267 if (kn->flags & KERNFS_ACTIVATED)
1268 kernfs_drain(pos);
1269 else
1270 WARN_ON_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS);
1271
1272 /*
1273 * kernfs_unlink_sibling() succeeds once per node. Use it
1274 * to decide who's responsible for cleanups.
1275 */
1276 if (!pos->parent || kernfs_unlink_sibling(pos)) {
1277 struct kernfs_iattrs *ps_iattr =
1278 pos->parent ? pos->parent->iattr : NULL;
1279
1280 /* update timestamps on the parent */
1281 if (ps_iattr) {
1282 ps_iattr->ia_iattr.ia_ctime = CURRENT_TIME;
1283 ps_iattr->ia_iattr.ia_mtime = CURRENT_TIME;
1284 }
1285
1286 kernfs_put(pos);
1287 }
1288
1289 kernfs_put(pos);
1290 } while (pos != kn);
1291}
1292
1293/**
1294 * kernfs_remove - remove a kernfs_node recursively
1295 * @kn: the kernfs_node to remove
1296 *
1297 * Remove @kn along with all its subdirectories and files.
1298 */
1299void kernfs_remove(struct kernfs_node *kn)
1300{
1301 mutex_lock(&kernfs_mutex);
1302 __kernfs_remove(kn);
1303 mutex_unlock(&kernfs_mutex);
1304}
1305
1306/**
1307 * kernfs_break_active_protection - break out of active protection
1308 * @kn: the self kernfs_node
1309 *
1310 * The caller must be running off of a kernfs operation which is invoked
1311 * with an active reference - e.g. one of kernfs_ops. Each invocation of
1312 * this function must also be matched with an invocation of
1313 * kernfs_unbreak_active_protection().
1314 *
1315 * This function releases the active reference of @kn the caller is
1316 * holding. Once this function is called, @kn may be removed at any point
1317 * and the caller is solely responsible for ensuring that the objects it
1318 * dereferences are accessible.
1319 */
1320void kernfs_break_active_protection(struct kernfs_node *kn)
1321{
1322 /*
1323 * Take out ourself out of the active ref dependency chain. If
1324 * we're called without an active ref, lockdep will complain.
1325 */
1326 kernfs_put_active(kn);
1327}
1328
1329/**
1330 * kernfs_unbreak_active_protection - undo kernfs_break_active_protection()
1331 * @kn: the self kernfs_node
1332 *
1333 * If kernfs_break_active_protection() was called, this function must be
1334 * invoked before finishing the kernfs operation. Note that while this
1335 * function restores the active reference, it doesn't and can't actually
1336 * restore the active protection - @kn may already or be in the process of
1337 * being removed. Once kernfs_break_active_protection() is invoked, that
1338 * protection is irreversibly gone for the kernfs operation instance.
1339 *
1340 * While this function may be called at any point after
1341 * kernfs_break_active_protection() is invoked, its most useful location
1342 * would be right before the enclosing kernfs operation returns.
1343 */
1344void kernfs_unbreak_active_protection(struct kernfs_node *kn)
1345{
1346 /*
1347 * @kn->active could be in any state; however, the increment we do
1348 * here will be undone as soon as the enclosing kernfs operation
1349 * finishes and this temporary bump can't break anything. If @kn
1350 * is alive, nothing changes. If @kn is being deactivated, the
1351 * soon-to-follow put will either finish deactivation or restore
1352 * deactivated state. If @kn is already removed, the temporary
1353 * bump is guaranteed to be gone before @kn is released.
1354 */
1355 atomic_inc(&kn->active);
1356 if (kernfs_lockdep(kn))
1357 rwsem_acquire(&kn->dep_map, 0, 1, _RET_IP_);
1358}
1359
1360/**
1361 * kernfs_remove_self - remove a kernfs_node from its own method
1362 * @kn: the self kernfs_node to remove
1363 *
1364 * The caller must be running off of a kernfs operation which is invoked
1365 * with an active reference - e.g. one of kernfs_ops. This can be used to
1366 * implement a file operation which deletes itself.
1367 *
1368 * For example, the "delete" file for a sysfs device directory can be
1369 * implemented by invoking kernfs_remove_self() on the "delete" file
1370 * itself. This function breaks the circular dependency of trying to
1371 * deactivate self while holding an active ref itself. It isn't necessary
1372 * to modify the usual removal path to use kernfs_remove_self(). The
1373 * "delete" implementation can simply invoke kernfs_remove_self() on self
1374 * before proceeding with the usual removal path. kernfs will ignore later
1375 * kernfs_remove() on self.
1376 *
1377 * kernfs_remove_self() can be called multiple times concurrently on the
1378 * same kernfs_node. Only the first one actually performs removal and
1379 * returns %true. All others will wait until the kernfs operation which
1380 * won self-removal finishes and return %false. Note that the losers wait
1381 * for the completion of not only the winning kernfs_remove_self() but also
1382 * the whole kernfs_ops which won the arbitration. This can be used to
1383 * guarantee, for example, all concurrent writes to a "delete" file to
1384 * finish only after the whole operation is complete.
1385 */
1386bool kernfs_remove_self(struct kernfs_node *kn)
1387{
1388 bool ret;
1389
1390 mutex_lock(&kernfs_mutex);
1391 kernfs_break_active_protection(kn);
1392
1393 /*
1394 * SUICIDAL is used to arbitrate among competing invocations. Only
1395 * the first one will actually perform removal. When the removal
1396 * is complete, SUICIDED is set and the active ref is restored
1397 * while holding kernfs_mutex. The ones which lost arbitration
1398 * waits for SUICDED && drained which can happen only after the
1399 * enclosing kernfs operation which executed the winning instance
1400 * of kernfs_remove_self() finished.
1401 */
1402 if (!(kn->flags & KERNFS_SUICIDAL)) {
1403 kn->flags |= KERNFS_SUICIDAL;
1404 __kernfs_remove(kn);
1405 kn->flags |= KERNFS_SUICIDED;
1406 ret = true;
1407 } else {
1408 wait_queue_head_t *waitq = &kernfs_root(kn)->deactivate_waitq;
1409 DEFINE_WAIT(wait);
1410
1411 while (true) {
1412 prepare_to_wait(waitq, &wait, TASK_UNINTERRUPTIBLE);
1413
1414 if ((kn->flags & KERNFS_SUICIDED) &&
1415 atomic_read(&kn->active) == KN_DEACTIVATED_BIAS)
1416 break;
1417
1418 mutex_unlock(&kernfs_mutex);
1419 schedule();
1420 mutex_lock(&kernfs_mutex);
1421 }
1422 finish_wait(waitq, &wait);
1423 WARN_ON_ONCE(!RB_EMPTY_NODE(&kn->rb));
1424 ret = false;
1425 }
1426
1427 /*
1428 * This must be done while holding kernfs_mutex; otherwise, waiting
1429 * for SUICIDED && deactivated could finish prematurely.
1430 */
1431 kernfs_unbreak_active_protection(kn);
1432
1433 mutex_unlock(&kernfs_mutex);
1434 return ret;
1435}
1436
1437/**
1438 * kernfs_remove_by_name_ns - find a kernfs_node by name and remove it
1439 * @parent: parent of the target
1440 * @name: name of the kernfs_node to remove
1441 * @ns: namespace tag of the kernfs_node to remove
1442 *
1443 * Look for the kernfs_node with @name and @ns under @parent and remove it.
1444 * Returns 0 on success, -ENOENT if such entry doesn't exist.
1445 */
1446int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name,
1447 const void *ns)
1448{
1449 struct kernfs_node *kn;
1450
1451 if (!parent) {
1452 WARN(1, KERN_WARNING "kernfs: can not remove '%s', no directory\n",
1453 name);
1454 return -ENOENT;
1455 }
1456
1457 mutex_lock(&kernfs_mutex);
1458
1459 kn = kernfs_find_ns(parent, name, ns);
1460 if (kn)
1461 __kernfs_remove(kn);
1462
1463 mutex_unlock(&kernfs_mutex);
1464
1465 if (kn)
1466 return 0;
1467 else
1468 return -ENOENT;
1469}
1470
1471/**
1472 * kernfs_rename_ns - move and rename a kernfs_node
1473 * @kn: target node
1474 * @new_parent: new parent to put @sd under
1475 * @new_name: new name
1476 * @new_ns: new namespace tag
1477 */
1478int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent,
1479 const char *new_name, const void *new_ns)
1480{
1481 struct kernfs_node *old_parent;
1482 const char *old_name = NULL;
1483 int error;
1484
1485 /* can't move or rename root */
1486 if (!kn->parent)
1487 return -EINVAL;
1488
1489 mutex_lock(&kernfs_mutex);
1490
1491 error = -ENOENT;
1492 if (!kernfs_active(kn) || !kernfs_active(new_parent) ||
1493 (new_parent->flags & KERNFS_EMPTY_DIR))
1494 goto out;
1495
1496 error = 0;
1497 if ((kn->parent == new_parent) && (kn->ns == new_ns) &&
1498 (strcmp(kn->name, new_name) == 0))
1499 goto out; /* nothing to rename */
1500
1501 error = -EEXIST;
1502 if (kernfs_find_ns(new_parent, new_name, new_ns))
1503 goto out;
1504
1505 /* rename kernfs_node */
1506 if (strcmp(kn->name, new_name) != 0) {
1507 error = -ENOMEM;
1508 new_name = kstrdup_const(new_name, GFP_KERNEL);
1509 if (!new_name)
1510 goto out;
1511 } else {
1512 new_name = NULL;
1513 }
1514
1515 /*
1516 * Move to the appropriate place in the appropriate directories rbtree.
1517 */
1518 kernfs_unlink_sibling(kn);
1519 kernfs_get(new_parent);
1520
1521 /* rename_lock protects ->parent and ->name accessors */
1522 spin_lock_irq(&kernfs_rename_lock);
1523
1524 old_parent = kn->parent;
1525 kn->parent = new_parent;
1526
1527 kn->ns = new_ns;
1528 if (new_name) {
1529 old_name = kn->name;
1530 kn->name = new_name;
1531 }
1532
1533 spin_unlock_irq(&kernfs_rename_lock);
1534
1535 kn->hash = kernfs_name_hash(kn->name, kn->ns);
1536 kernfs_link_sibling(kn);
1537
1538 kernfs_put(old_parent);
1539 kfree_const(old_name);
1540
1541 error = 0;
1542 out:
1543 mutex_unlock(&kernfs_mutex);
1544 return error;
1545}
1546
1547/* Relationship between s_mode and the DT_xxx types */
1548static inline unsigned char dt_type(struct kernfs_node *kn)
1549{
1550 return (kn->mode >> 12) & 15;
1551}
1552
1553static int kernfs_dir_fop_release(struct inode *inode, struct file *filp)
1554{
1555 kernfs_put(filp->private_data);
1556 return 0;
1557}
1558
1559static struct kernfs_node *kernfs_dir_pos(const void *ns,
1560 struct kernfs_node *parent, loff_t hash, struct kernfs_node *pos)
1561{
1562 if (pos) {
1563 int valid = kernfs_active(pos) &&
1564 pos->parent == parent && hash == pos->hash;
1565 kernfs_put(pos);
1566 if (!valid)
1567 pos = NULL;
1568 }
1569 if (!pos && (hash > 1) && (hash < INT_MAX)) {
1570 struct rb_node *node = parent->dir.children.rb_node;
1571 while (node) {
1572 pos = rb_to_kn(node);
1573
1574 if (hash < pos->hash)
1575 node = node->rb_left;
1576 else if (hash > pos->hash)
1577 node = node->rb_right;
1578 else
1579 break;
1580 }
1581 }
1582 /* Skip over entries which are dying/dead or in the wrong namespace */
1583 while (pos && (!kernfs_active(pos) || pos->ns != ns)) {
1584 struct rb_node *node = rb_next(&pos->rb);
1585 if (!node)
1586 pos = NULL;
1587 else
1588 pos = rb_to_kn(node);
1589 }
1590 return pos;
1591}
1592
1593static struct kernfs_node *kernfs_dir_next_pos(const void *ns,
1594 struct kernfs_node *parent, ino_t ino, struct kernfs_node *pos)
1595{
1596 pos = kernfs_dir_pos(ns, parent, ino, pos);
1597 if (pos) {
1598 do {
1599 struct rb_node *node = rb_next(&pos->rb);
1600 if (!node)
1601 pos = NULL;
1602 else
1603 pos = rb_to_kn(node);
1604 } while (pos && (!kernfs_active(pos) || pos->ns != ns));
1605 }
1606 return pos;
1607}
1608
1609static int kernfs_fop_readdir(struct file *file, struct dir_context *ctx)
1610{
1611 struct dentry *dentry = file->f_path.dentry;
1612 struct kernfs_node *parent = dentry->d_fsdata;
1613 struct kernfs_node *pos = file->private_data;
1614 const void *ns = NULL;
1615
1616 if (!dir_emit_dots(file, ctx))
1617 return 0;
1618 mutex_lock(&kernfs_mutex);
1619
1620 if (kernfs_ns_enabled(parent))
1621 ns = kernfs_info(dentry->d_sb)->ns;
1622
1623 for (pos = kernfs_dir_pos(ns, parent, ctx->pos, pos);
1624 pos;
1625 pos = kernfs_dir_next_pos(ns, parent, ctx->pos, pos)) {
1626 const char *name = pos->name;
1627 unsigned int type = dt_type(pos);
1628 int len = strlen(name);
1629 ino_t ino = pos->ino;
1630
1631 ctx->pos = pos->hash;
1632 file->private_data = pos;
1633 kernfs_get(pos);
1634
1635 mutex_unlock(&kernfs_mutex);
1636 if (!dir_emit(ctx, name, len, ino, type))
1637 return 0;
1638 mutex_lock(&kernfs_mutex);
1639 }
1640 mutex_unlock(&kernfs_mutex);
1641 file->private_data = NULL;
1642 ctx->pos = INT_MAX;
1643 return 0;
1644}
1645
1646static loff_t kernfs_dir_fop_llseek(struct file *file, loff_t offset,
1647 int whence)
1648{
1649 struct inode *inode = file_inode(file);
1650 loff_t ret;
1651
1652 inode_lock(inode);
1653 ret = generic_file_llseek(file, offset, whence);
1654 inode_unlock(inode);
1655
1656 return ret;
1657}
1658
1659const struct file_operations kernfs_dir_fops = {
1660 .read = generic_read_dir,
1661 .iterate = kernfs_fop_readdir,
1662 .release = kernfs_dir_fop_release,
1663 .llseek = kernfs_dir_fop_llseek,
1664};