Loading...
1/*
2 * linux/fs/super.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * super.c contains code to handle: - mount structures
7 * - super-block tables
8 * - filesystem drivers list
9 * - mount system call
10 * - umount system call
11 * - ustat system call
12 *
13 * GK 2/5/95 - Changed to support mounting the root fs via NFS
14 *
15 * Added kerneld support: Jacques Gelinas and Bjorn Ekwall
16 * Added change_root: Werner Almesberger & Hans Lermen, Feb '96
17 * Added options to /proc/mounts:
18 * Torbjörn Lindh (torbjorn.lindh@gopta.se), April 14, 1996.
19 * Added devfs support: Richard Gooch <rgooch@atnf.csiro.au>, 13-JAN-1998
20 * Heavily rewritten for 'one fs - one tree' dcache architecture. AV, Mar 2000
21 */
22
23#include <linux/module.h>
24#include <linux/slab.h>
25#include <linux/acct.h>
26#include <linux/blkdev.h>
27#include <linux/mount.h>
28#include <linux/security.h>
29#include <linux/writeback.h> /* for the emergency remount stuff */
30#include <linux/idr.h>
31#include <linux/mutex.h>
32#include <linux/backing-dev.h>
33#include <linux/rculist_bl.h>
34#include <linux/cleancache.h>
35#include "internal.h"
36
37
38LIST_HEAD(super_blocks);
39DEFINE_SPINLOCK(sb_lock);
40
41/*
42 * One thing we have to be careful of with a per-sb shrinker is that we don't
43 * drop the last active reference to the superblock from within the shrinker.
44 * If that happens we could trigger unregistering the shrinker from within the
45 * shrinker path and that leads to deadlock on the shrinker_rwsem. Hence we
46 * take a passive reference to the superblock to avoid this from occurring.
47 */
48static int prune_super(struct shrinker *shrink, struct shrink_control *sc)
49{
50 struct super_block *sb;
51 int fs_objects = 0;
52 int total_objects;
53
54 sb = container_of(shrink, struct super_block, s_shrink);
55
56 /*
57 * Deadlock avoidance. We may hold various FS locks, and we don't want
58 * to recurse into the FS that called us in clear_inode() and friends..
59 */
60 if (sc->nr_to_scan && !(sc->gfp_mask & __GFP_FS))
61 return -1;
62
63 if (!grab_super_passive(sb))
64 return -1;
65
66 if (sb->s_op && sb->s_op->nr_cached_objects)
67 fs_objects = sb->s_op->nr_cached_objects(sb);
68
69 total_objects = sb->s_nr_dentry_unused +
70 sb->s_nr_inodes_unused + fs_objects + 1;
71
72 if (sc->nr_to_scan) {
73 int dentries;
74 int inodes;
75
76 /* proportion the scan between the caches */
77 dentries = (sc->nr_to_scan * sb->s_nr_dentry_unused) /
78 total_objects;
79 inodes = (sc->nr_to_scan * sb->s_nr_inodes_unused) /
80 total_objects;
81 if (fs_objects)
82 fs_objects = (sc->nr_to_scan * fs_objects) /
83 total_objects;
84 /*
85 * prune the dcache first as the icache is pinned by it, then
86 * prune the icache, followed by the filesystem specific caches
87 */
88 prune_dcache_sb(sb, dentries);
89 prune_icache_sb(sb, inodes);
90
91 if (fs_objects && sb->s_op->free_cached_objects) {
92 sb->s_op->free_cached_objects(sb, fs_objects);
93 fs_objects = sb->s_op->nr_cached_objects(sb);
94 }
95 total_objects = sb->s_nr_dentry_unused +
96 sb->s_nr_inodes_unused + fs_objects;
97 }
98
99 total_objects = (total_objects / 100) * sysctl_vfs_cache_pressure;
100 drop_super(sb);
101 return total_objects;
102}
103
104/**
105 * alloc_super - create new superblock
106 * @type: filesystem type superblock should belong to
107 *
108 * Allocates and initializes a new &struct super_block. alloc_super()
109 * returns a pointer new superblock or %NULL if allocation had failed.
110 */
111static struct super_block *alloc_super(struct file_system_type *type)
112{
113 struct super_block *s = kzalloc(sizeof(struct super_block), GFP_USER);
114 static const struct super_operations default_op;
115
116 if (s) {
117 if (security_sb_alloc(s)) {
118 kfree(s);
119 s = NULL;
120 goto out;
121 }
122#ifdef CONFIG_SMP
123 s->s_files = alloc_percpu(struct list_head);
124 if (!s->s_files) {
125 security_sb_free(s);
126 kfree(s);
127 s = NULL;
128 goto out;
129 } else {
130 int i;
131
132 for_each_possible_cpu(i)
133 INIT_LIST_HEAD(per_cpu_ptr(s->s_files, i));
134 }
135#else
136 INIT_LIST_HEAD(&s->s_files);
137#endif
138 s->s_bdi = &default_backing_dev_info;
139 INIT_LIST_HEAD(&s->s_instances);
140 INIT_HLIST_BL_HEAD(&s->s_anon);
141 INIT_LIST_HEAD(&s->s_inodes);
142 INIT_LIST_HEAD(&s->s_dentry_lru);
143 INIT_LIST_HEAD(&s->s_inode_lru);
144 spin_lock_init(&s->s_inode_lru_lock);
145 init_rwsem(&s->s_umount);
146 mutex_init(&s->s_lock);
147 lockdep_set_class(&s->s_umount, &type->s_umount_key);
148 /*
149 * The locking rules for s_lock are up to the
150 * filesystem. For example ext3fs has different
151 * lock ordering than usbfs:
152 */
153 lockdep_set_class(&s->s_lock, &type->s_lock_key);
154 /*
155 * sget() can have s_umount recursion.
156 *
157 * When it cannot find a suitable sb, it allocates a new
158 * one (this one), and tries again to find a suitable old
159 * one.
160 *
161 * In case that succeeds, it will acquire the s_umount
162 * lock of the old one. Since these are clearly distrinct
163 * locks, and this object isn't exposed yet, there's no
164 * risk of deadlocks.
165 *
166 * Annotate this by putting this lock in a different
167 * subclass.
168 */
169 down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
170 s->s_count = 1;
171 atomic_set(&s->s_active, 1);
172 mutex_init(&s->s_vfs_rename_mutex);
173 lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key);
174 mutex_init(&s->s_dquot.dqio_mutex);
175 mutex_init(&s->s_dquot.dqonoff_mutex);
176 init_rwsem(&s->s_dquot.dqptr_sem);
177 init_waitqueue_head(&s->s_wait_unfrozen);
178 s->s_maxbytes = MAX_NON_LFS;
179 s->s_op = &default_op;
180 s->s_time_gran = 1000000000;
181 s->cleancache_poolid = -1;
182
183 s->s_shrink.seeks = DEFAULT_SEEKS;
184 s->s_shrink.shrink = prune_super;
185 s->s_shrink.batch = 1024;
186 }
187out:
188 return s;
189}
190
191/**
192 * destroy_super - frees a superblock
193 * @s: superblock to free
194 *
195 * Frees a superblock.
196 */
197static inline void destroy_super(struct super_block *s)
198{
199#ifdef CONFIG_SMP
200 free_percpu(s->s_files);
201#endif
202 security_sb_free(s);
203 kfree(s->s_subtype);
204 kfree(s->s_options);
205 kfree(s);
206}
207
208/* Superblock refcounting */
209
210/*
211 * Drop a superblock's refcount. The caller must hold sb_lock.
212 */
213void __put_super(struct super_block *sb)
214{
215 if (!--sb->s_count) {
216 list_del_init(&sb->s_list);
217 destroy_super(sb);
218 }
219}
220
221/**
222 * put_super - drop a temporary reference to superblock
223 * @sb: superblock in question
224 *
225 * Drops a temporary reference, frees superblock if there's no
226 * references left.
227 */
228void put_super(struct super_block *sb)
229{
230 spin_lock(&sb_lock);
231 __put_super(sb);
232 spin_unlock(&sb_lock);
233}
234
235
236/**
237 * deactivate_locked_super - drop an active reference to superblock
238 * @s: superblock to deactivate
239 *
240 * Drops an active reference to superblock, converting it into a temprory
241 * one if there is no other active references left. In that case we
242 * tell fs driver to shut it down and drop the temporary reference we
243 * had just acquired.
244 *
245 * Caller holds exclusive lock on superblock; that lock is released.
246 */
247void deactivate_locked_super(struct super_block *s)
248{
249 struct file_system_type *fs = s->s_type;
250 if (atomic_dec_and_test(&s->s_active)) {
251 cleancache_flush_fs(s);
252 fs->kill_sb(s);
253
254 /* caches are now gone, we can safely kill the shrinker now */
255 unregister_shrinker(&s->s_shrink);
256
257 /*
258 * We need to call rcu_barrier so all the delayed rcu free
259 * inodes are flushed before we release the fs module.
260 */
261 rcu_barrier();
262 put_filesystem(fs);
263 put_super(s);
264 } else {
265 up_write(&s->s_umount);
266 }
267}
268
269EXPORT_SYMBOL(deactivate_locked_super);
270
271/**
272 * deactivate_super - drop an active reference to superblock
273 * @s: superblock to deactivate
274 *
275 * Variant of deactivate_locked_super(), except that superblock is *not*
276 * locked by caller. If we are going to drop the final active reference,
277 * lock will be acquired prior to that.
278 */
279void deactivate_super(struct super_block *s)
280{
281 if (!atomic_add_unless(&s->s_active, -1, 1)) {
282 down_write(&s->s_umount);
283 deactivate_locked_super(s);
284 }
285}
286
287EXPORT_SYMBOL(deactivate_super);
288
289/**
290 * grab_super - acquire an active reference
291 * @s: reference we are trying to make active
292 *
293 * Tries to acquire an active reference. grab_super() is used when we
294 * had just found a superblock in super_blocks or fs_type->fs_supers
295 * and want to turn it into a full-blown active reference. grab_super()
296 * is called with sb_lock held and drops it. Returns 1 in case of
297 * success, 0 if we had failed (superblock contents was already dead or
298 * dying when grab_super() had been called).
299 */
300static int grab_super(struct super_block *s) __releases(sb_lock)
301{
302 if (atomic_inc_not_zero(&s->s_active)) {
303 spin_unlock(&sb_lock);
304 return 1;
305 }
306 /* it's going away */
307 s->s_count++;
308 spin_unlock(&sb_lock);
309 /* wait for it to die */
310 down_write(&s->s_umount);
311 up_write(&s->s_umount);
312 put_super(s);
313 return 0;
314}
315
316/*
317 * grab_super_passive - acquire a passive reference
318 * @s: reference we are trying to grab
319 *
320 * Tries to acquire a passive reference. This is used in places where we
321 * cannot take an active reference but we need to ensure that the
322 * superblock does not go away while we are working on it. It returns
323 * false if a reference was not gained, and returns true with the s_umount
324 * lock held in read mode if a reference is gained. On successful return,
325 * the caller must drop the s_umount lock and the passive reference when
326 * done.
327 */
328bool grab_super_passive(struct super_block *sb)
329{
330 spin_lock(&sb_lock);
331 if (list_empty(&sb->s_instances)) {
332 spin_unlock(&sb_lock);
333 return false;
334 }
335
336 sb->s_count++;
337 spin_unlock(&sb_lock);
338
339 if (down_read_trylock(&sb->s_umount)) {
340 if (sb->s_root)
341 return true;
342 up_read(&sb->s_umount);
343 }
344
345 put_super(sb);
346 return false;
347}
348
349/*
350 * Superblock locking. We really ought to get rid of these two.
351 */
352void lock_super(struct super_block * sb)
353{
354 mutex_lock(&sb->s_lock);
355}
356
357void unlock_super(struct super_block * sb)
358{
359 mutex_unlock(&sb->s_lock);
360}
361
362EXPORT_SYMBOL(lock_super);
363EXPORT_SYMBOL(unlock_super);
364
365/**
366 * generic_shutdown_super - common helper for ->kill_sb()
367 * @sb: superblock to kill
368 *
369 * generic_shutdown_super() does all fs-independent work on superblock
370 * shutdown. Typical ->kill_sb() should pick all fs-specific objects
371 * that need destruction out of superblock, call generic_shutdown_super()
372 * and release aforementioned objects. Note: dentries and inodes _are_
373 * taken care of and do not need specific handling.
374 *
375 * Upon calling this function, the filesystem may no longer alter or
376 * rearrange the set of dentries belonging to this super_block, nor may it
377 * change the attachments of dentries to inodes.
378 */
379void generic_shutdown_super(struct super_block *sb)
380{
381 const struct super_operations *sop = sb->s_op;
382
383 if (sb->s_root) {
384 shrink_dcache_for_umount(sb);
385 sync_filesystem(sb);
386 sb->s_flags &= ~MS_ACTIVE;
387
388 fsnotify_unmount_inodes(&sb->s_inodes);
389
390 evict_inodes(sb);
391
392 if (sop->put_super)
393 sop->put_super(sb);
394
395 if (!list_empty(&sb->s_inodes)) {
396 printk("VFS: Busy inodes after unmount of %s. "
397 "Self-destruct in 5 seconds. Have a nice day...\n",
398 sb->s_id);
399 }
400 }
401 spin_lock(&sb_lock);
402 /* should be initialized for __put_super_and_need_restart() */
403 list_del_init(&sb->s_instances);
404 spin_unlock(&sb_lock);
405 up_write(&sb->s_umount);
406}
407
408EXPORT_SYMBOL(generic_shutdown_super);
409
410/**
411 * sget - find or create a superblock
412 * @type: filesystem type superblock should belong to
413 * @test: comparison callback
414 * @set: setup callback
415 * @data: argument to each of them
416 */
417struct super_block *sget(struct file_system_type *type,
418 int (*test)(struct super_block *,void *),
419 int (*set)(struct super_block *,void *),
420 void *data)
421{
422 struct super_block *s = NULL;
423 struct super_block *old;
424 int err;
425
426retry:
427 spin_lock(&sb_lock);
428 if (test) {
429 list_for_each_entry(old, &type->fs_supers, s_instances) {
430 if (!test(old, data))
431 continue;
432 if (!grab_super(old))
433 goto retry;
434 if (s) {
435 up_write(&s->s_umount);
436 destroy_super(s);
437 s = NULL;
438 }
439 down_write(&old->s_umount);
440 if (unlikely(!(old->s_flags & MS_BORN))) {
441 deactivate_locked_super(old);
442 goto retry;
443 }
444 return old;
445 }
446 }
447 if (!s) {
448 spin_unlock(&sb_lock);
449 s = alloc_super(type);
450 if (!s)
451 return ERR_PTR(-ENOMEM);
452 goto retry;
453 }
454
455 err = set(s, data);
456 if (err) {
457 spin_unlock(&sb_lock);
458 up_write(&s->s_umount);
459 destroy_super(s);
460 return ERR_PTR(err);
461 }
462 s->s_type = type;
463 strlcpy(s->s_id, type->name, sizeof(s->s_id));
464 list_add_tail(&s->s_list, &super_blocks);
465 list_add(&s->s_instances, &type->fs_supers);
466 spin_unlock(&sb_lock);
467 get_filesystem(type);
468 register_shrinker(&s->s_shrink);
469 return s;
470}
471
472EXPORT_SYMBOL(sget);
473
474void drop_super(struct super_block *sb)
475{
476 up_read(&sb->s_umount);
477 put_super(sb);
478}
479
480EXPORT_SYMBOL(drop_super);
481
482/**
483 * sync_supers - helper for periodic superblock writeback
484 *
485 * Call the write_super method if present on all dirty superblocks in
486 * the system. This is for the periodic writeback used by most older
487 * filesystems. For data integrity superblock writeback use
488 * sync_filesystems() instead.
489 *
490 * Note: check the dirty flag before waiting, so we don't
491 * hold up the sync while mounting a device. (The newly
492 * mounted device won't need syncing.)
493 */
494void sync_supers(void)
495{
496 struct super_block *sb, *p = NULL;
497
498 spin_lock(&sb_lock);
499 list_for_each_entry(sb, &super_blocks, s_list) {
500 if (list_empty(&sb->s_instances))
501 continue;
502 if (sb->s_op->write_super && sb->s_dirt) {
503 sb->s_count++;
504 spin_unlock(&sb_lock);
505
506 down_read(&sb->s_umount);
507 if (sb->s_root && sb->s_dirt)
508 sb->s_op->write_super(sb);
509 up_read(&sb->s_umount);
510
511 spin_lock(&sb_lock);
512 if (p)
513 __put_super(p);
514 p = sb;
515 }
516 }
517 if (p)
518 __put_super(p);
519 spin_unlock(&sb_lock);
520}
521
522/**
523 * iterate_supers - call function for all active superblocks
524 * @f: function to call
525 * @arg: argument to pass to it
526 *
527 * Scans the superblock list and calls given function, passing it
528 * locked superblock and given argument.
529 */
530void iterate_supers(void (*f)(struct super_block *, void *), void *arg)
531{
532 struct super_block *sb, *p = NULL;
533
534 spin_lock(&sb_lock);
535 list_for_each_entry(sb, &super_blocks, s_list) {
536 if (list_empty(&sb->s_instances))
537 continue;
538 sb->s_count++;
539 spin_unlock(&sb_lock);
540
541 down_read(&sb->s_umount);
542 if (sb->s_root)
543 f(sb, arg);
544 up_read(&sb->s_umount);
545
546 spin_lock(&sb_lock);
547 if (p)
548 __put_super(p);
549 p = sb;
550 }
551 if (p)
552 __put_super(p);
553 spin_unlock(&sb_lock);
554}
555
556/**
557 * iterate_supers_type - call function for superblocks of given type
558 * @type: fs type
559 * @f: function to call
560 * @arg: argument to pass to it
561 *
562 * Scans the superblock list and calls given function, passing it
563 * locked superblock and given argument.
564 */
565void iterate_supers_type(struct file_system_type *type,
566 void (*f)(struct super_block *, void *), void *arg)
567{
568 struct super_block *sb, *p = NULL;
569
570 spin_lock(&sb_lock);
571 list_for_each_entry(sb, &type->fs_supers, s_instances) {
572 sb->s_count++;
573 spin_unlock(&sb_lock);
574
575 down_read(&sb->s_umount);
576 if (sb->s_root)
577 f(sb, arg);
578 up_read(&sb->s_umount);
579
580 spin_lock(&sb_lock);
581 if (p)
582 __put_super(p);
583 p = sb;
584 }
585 if (p)
586 __put_super(p);
587 spin_unlock(&sb_lock);
588}
589
590EXPORT_SYMBOL(iterate_supers_type);
591
592/**
593 * get_super - get the superblock of a device
594 * @bdev: device to get the superblock for
595 *
596 * Scans the superblock list and finds the superblock of the file system
597 * mounted on the device given. %NULL is returned if no match is found.
598 */
599
600struct super_block *get_super(struct block_device *bdev)
601{
602 struct super_block *sb;
603
604 if (!bdev)
605 return NULL;
606
607 spin_lock(&sb_lock);
608rescan:
609 list_for_each_entry(sb, &super_blocks, s_list) {
610 if (list_empty(&sb->s_instances))
611 continue;
612 if (sb->s_bdev == bdev) {
613 sb->s_count++;
614 spin_unlock(&sb_lock);
615 down_read(&sb->s_umount);
616 /* still alive? */
617 if (sb->s_root)
618 return sb;
619 up_read(&sb->s_umount);
620 /* nope, got unmounted */
621 spin_lock(&sb_lock);
622 __put_super(sb);
623 goto rescan;
624 }
625 }
626 spin_unlock(&sb_lock);
627 return NULL;
628}
629
630EXPORT_SYMBOL(get_super);
631
632/**
633 * get_active_super - get an active reference to the superblock of a device
634 * @bdev: device to get the superblock for
635 *
636 * Scans the superblock list and finds the superblock of the file system
637 * mounted on the device given. Returns the superblock with an active
638 * reference or %NULL if none was found.
639 */
640struct super_block *get_active_super(struct block_device *bdev)
641{
642 struct super_block *sb;
643
644 if (!bdev)
645 return NULL;
646
647restart:
648 spin_lock(&sb_lock);
649 list_for_each_entry(sb, &super_blocks, s_list) {
650 if (list_empty(&sb->s_instances))
651 continue;
652 if (sb->s_bdev == bdev) {
653 if (grab_super(sb)) /* drops sb_lock */
654 return sb;
655 else
656 goto restart;
657 }
658 }
659 spin_unlock(&sb_lock);
660 return NULL;
661}
662
663struct super_block *user_get_super(dev_t dev)
664{
665 struct super_block *sb;
666
667 spin_lock(&sb_lock);
668rescan:
669 list_for_each_entry(sb, &super_blocks, s_list) {
670 if (list_empty(&sb->s_instances))
671 continue;
672 if (sb->s_dev == dev) {
673 sb->s_count++;
674 spin_unlock(&sb_lock);
675 down_read(&sb->s_umount);
676 /* still alive? */
677 if (sb->s_root)
678 return sb;
679 up_read(&sb->s_umount);
680 /* nope, got unmounted */
681 spin_lock(&sb_lock);
682 __put_super(sb);
683 goto rescan;
684 }
685 }
686 spin_unlock(&sb_lock);
687 return NULL;
688}
689
690/**
691 * do_remount_sb - asks filesystem to change mount options.
692 * @sb: superblock in question
693 * @flags: numeric part of options
694 * @data: the rest of options
695 * @force: whether or not to force the change
696 *
697 * Alters the mount options of a mounted file system.
698 */
699int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
700{
701 int retval;
702 int remount_ro;
703
704 if (sb->s_frozen != SB_UNFROZEN)
705 return -EBUSY;
706
707#ifdef CONFIG_BLOCK
708 if (!(flags & MS_RDONLY) && bdev_read_only(sb->s_bdev))
709 return -EACCES;
710#endif
711
712 if (flags & MS_RDONLY)
713 acct_auto_close(sb);
714 shrink_dcache_sb(sb);
715 sync_filesystem(sb);
716
717 remount_ro = (flags & MS_RDONLY) && !(sb->s_flags & MS_RDONLY);
718
719 /* If we are remounting RDONLY and current sb is read/write,
720 make sure there are no rw files opened */
721 if (remount_ro) {
722 if (force)
723 mark_files_ro(sb);
724 else if (!fs_may_remount_ro(sb))
725 return -EBUSY;
726 }
727
728 if (sb->s_op->remount_fs) {
729 retval = sb->s_op->remount_fs(sb, &flags, data);
730 if (retval)
731 return retval;
732 }
733 sb->s_flags = (sb->s_flags & ~MS_RMT_MASK) | (flags & MS_RMT_MASK);
734
735 /*
736 * Some filesystems modify their metadata via some other path than the
737 * bdev buffer cache (eg. use a private mapping, or directories in
738 * pagecache, etc). Also file data modifications go via their own
739 * mappings. So If we try to mount readonly then copy the filesystem
740 * from bdev, we could get stale data, so invalidate it to give a best
741 * effort at coherency.
742 */
743 if (remount_ro && sb->s_bdev)
744 invalidate_bdev(sb->s_bdev);
745 return 0;
746}
747
748static void do_emergency_remount(struct work_struct *work)
749{
750 struct super_block *sb, *p = NULL;
751
752 spin_lock(&sb_lock);
753 list_for_each_entry(sb, &super_blocks, s_list) {
754 if (list_empty(&sb->s_instances))
755 continue;
756 sb->s_count++;
757 spin_unlock(&sb_lock);
758 down_write(&sb->s_umount);
759 if (sb->s_root && sb->s_bdev && !(sb->s_flags & MS_RDONLY)) {
760 /*
761 * What lock protects sb->s_flags??
762 */
763 do_remount_sb(sb, MS_RDONLY, NULL, 1);
764 }
765 up_write(&sb->s_umount);
766 spin_lock(&sb_lock);
767 if (p)
768 __put_super(p);
769 p = sb;
770 }
771 if (p)
772 __put_super(p);
773 spin_unlock(&sb_lock);
774 kfree(work);
775 printk("Emergency Remount complete\n");
776}
777
778void emergency_remount(void)
779{
780 struct work_struct *work;
781
782 work = kmalloc(sizeof(*work), GFP_ATOMIC);
783 if (work) {
784 INIT_WORK(work, do_emergency_remount);
785 schedule_work(work);
786 }
787}
788
789/*
790 * Unnamed block devices are dummy devices used by virtual
791 * filesystems which don't use real block-devices. -- jrs
792 */
793
794static DEFINE_IDA(unnamed_dev_ida);
795static DEFINE_SPINLOCK(unnamed_dev_lock);/* protects the above */
796static int unnamed_dev_start = 0; /* don't bother trying below it */
797
798int get_anon_bdev(dev_t *p)
799{
800 int dev;
801 int error;
802
803 retry:
804 if (ida_pre_get(&unnamed_dev_ida, GFP_ATOMIC) == 0)
805 return -ENOMEM;
806 spin_lock(&unnamed_dev_lock);
807 error = ida_get_new_above(&unnamed_dev_ida, unnamed_dev_start, &dev);
808 if (!error)
809 unnamed_dev_start = dev + 1;
810 spin_unlock(&unnamed_dev_lock);
811 if (error == -EAGAIN)
812 /* We raced and lost with another CPU. */
813 goto retry;
814 else if (error)
815 return -EAGAIN;
816
817 if ((dev & MAX_ID_MASK) == (1 << MINORBITS)) {
818 spin_lock(&unnamed_dev_lock);
819 ida_remove(&unnamed_dev_ida, dev);
820 if (unnamed_dev_start > dev)
821 unnamed_dev_start = dev;
822 spin_unlock(&unnamed_dev_lock);
823 return -EMFILE;
824 }
825 *p = MKDEV(0, dev & MINORMASK);
826 return 0;
827}
828EXPORT_SYMBOL(get_anon_bdev);
829
830void free_anon_bdev(dev_t dev)
831{
832 int slot = MINOR(dev);
833 spin_lock(&unnamed_dev_lock);
834 ida_remove(&unnamed_dev_ida, slot);
835 if (slot < unnamed_dev_start)
836 unnamed_dev_start = slot;
837 spin_unlock(&unnamed_dev_lock);
838}
839EXPORT_SYMBOL(free_anon_bdev);
840
841int set_anon_super(struct super_block *s, void *data)
842{
843 int error = get_anon_bdev(&s->s_dev);
844 if (!error)
845 s->s_bdi = &noop_backing_dev_info;
846 return error;
847}
848
849EXPORT_SYMBOL(set_anon_super);
850
851void kill_anon_super(struct super_block *sb)
852{
853 dev_t dev = sb->s_dev;
854 generic_shutdown_super(sb);
855 free_anon_bdev(dev);
856}
857
858EXPORT_SYMBOL(kill_anon_super);
859
860void kill_litter_super(struct super_block *sb)
861{
862 if (sb->s_root)
863 d_genocide(sb->s_root);
864 kill_anon_super(sb);
865}
866
867EXPORT_SYMBOL(kill_litter_super);
868
869static int ns_test_super(struct super_block *sb, void *data)
870{
871 return sb->s_fs_info == data;
872}
873
874static int ns_set_super(struct super_block *sb, void *data)
875{
876 sb->s_fs_info = data;
877 return set_anon_super(sb, NULL);
878}
879
880struct dentry *mount_ns(struct file_system_type *fs_type, int flags,
881 void *data, int (*fill_super)(struct super_block *, void *, int))
882{
883 struct super_block *sb;
884
885 sb = sget(fs_type, ns_test_super, ns_set_super, data);
886 if (IS_ERR(sb))
887 return ERR_CAST(sb);
888
889 if (!sb->s_root) {
890 int err;
891 sb->s_flags = flags;
892 err = fill_super(sb, data, flags & MS_SILENT ? 1 : 0);
893 if (err) {
894 deactivate_locked_super(sb);
895 return ERR_PTR(err);
896 }
897
898 sb->s_flags |= MS_ACTIVE;
899 }
900
901 return dget(sb->s_root);
902}
903
904EXPORT_SYMBOL(mount_ns);
905
906#ifdef CONFIG_BLOCK
907static int set_bdev_super(struct super_block *s, void *data)
908{
909 s->s_bdev = data;
910 s->s_dev = s->s_bdev->bd_dev;
911
912 /*
913 * We set the bdi here to the queue backing, file systems can
914 * overwrite this in ->fill_super()
915 */
916 s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info;
917 return 0;
918}
919
920static int test_bdev_super(struct super_block *s, void *data)
921{
922 return (void *)s->s_bdev == data;
923}
924
925struct dentry *mount_bdev(struct file_system_type *fs_type,
926 int flags, const char *dev_name, void *data,
927 int (*fill_super)(struct super_block *, void *, int))
928{
929 struct block_device *bdev;
930 struct super_block *s;
931 fmode_t mode = FMODE_READ | FMODE_EXCL;
932 int error = 0;
933
934 if (!(flags & MS_RDONLY))
935 mode |= FMODE_WRITE;
936
937 bdev = blkdev_get_by_path(dev_name, mode, fs_type);
938 if (IS_ERR(bdev))
939 return ERR_CAST(bdev);
940
941 /*
942 * once the super is inserted into the list by sget, s_umount
943 * will protect the lockfs code from trying to start a snapshot
944 * while we are mounting
945 */
946 mutex_lock(&bdev->bd_fsfreeze_mutex);
947 if (bdev->bd_fsfreeze_count > 0) {
948 mutex_unlock(&bdev->bd_fsfreeze_mutex);
949 error = -EBUSY;
950 goto error_bdev;
951 }
952 s = sget(fs_type, test_bdev_super, set_bdev_super, bdev);
953 mutex_unlock(&bdev->bd_fsfreeze_mutex);
954 if (IS_ERR(s))
955 goto error_s;
956
957 if (s->s_root) {
958 if ((flags ^ s->s_flags) & MS_RDONLY) {
959 deactivate_locked_super(s);
960 error = -EBUSY;
961 goto error_bdev;
962 }
963
964 /*
965 * s_umount nests inside bd_mutex during
966 * __invalidate_device(). blkdev_put() acquires
967 * bd_mutex and can't be called under s_umount. Drop
968 * s_umount temporarily. This is safe as we're
969 * holding an active reference.
970 */
971 up_write(&s->s_umount);
972 blkdev_put(bdev, mode);
973 down_write(&s->s_umount);
974 } else {
975 char b[BDEVNAME_SIZE];
976
977 s->s_flags = flags | MS_NOSEC;
978 s->s_mode = mode;
979 strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id));
980 sb_set_blocksize(s, block_size(bdev));
981 error = fill_super(s, data, flags & MS_SILENT ? 1 : 0);
982 if (error) {
983 deactivate_locked_super(s);
984 goto error;
985 }
986
987 s->s_flags |= MS_ACTIVE;
988 bdev->bd_super = s;
989 }
990
991 return dget(s->s_root);
992
993error_s:
994 error = PTR_ERR(s);
995error_bdev:
996 blkdev_put(bdev, mode);
997error:
998 return ERR_PTR(error);
999}
1000EXPORT_SYMBOL(mount_bdev);
1001
1002void kill_block_super(struct super_block *sb)
1003{
1004 struct block_device *bdev = sb->s_bdev;
1005 fmode_t mode = sb->s_mode;
1006
1007 bdev->bd_super = NULL;
1008 generic_shutdown_super(sb);
1009 sync_blockdev(bdev);
1010 WARN_ON_ONCE(!(mode & FMODE_EXCL));
1011 blkdev_put(bdev, mode | FMODE_EXCL);
1012}
1013
1014EXPORT_SYMBOL(kill_block_super);
1015#endif
1016
1017struct dentry *mount_nodev(struct file_system_type *fs_type,
1018 int flags, void *data,
1019 int (*fill_super)(struct super_block *, void *, int))
1020{
1021 int error;
1022 struct super_block *s = sget(fs_type, NULL, set_anon_super, NULL);
1023
1024 if (IS_ERR(s))
1025 return ERR_CAST(s);
1026
1027 s->s_flags = flags;
1028
1029 error = fill_super(s, data, flags & MS_SILENT ? 1 : 0);
1030 if (error) {
1031 deactivate_locked_super(s);
1032 return ERR_PTR(error);
1033 }
1034 s->s_flags |= MS_ACTIVE;
1035 return dget(s->s_root);
1036}
1037EXPORT_SYMBOL(mount_nodev);
1038
1039static int compare_single(struct super_block *s, void *p)
1040{
1041 return 1;
1042}
1043
1044struct dentry *mount_single(struct file_system_type *fs_type,
1045 int flags, void *data,
1046 int (*fill_super)(struct super_block *, void *, int))
1047{
1048 struct super_block *s;
1049 int error;
1050
1051 s = sget(fs_type, compare_single, set_anon_super, NULL);
1052 if (IS_ERR(s))
1053 return ERR_CAST(s);
1054 if (!s->s_root) {
1055 s->s_flags = flags;
1056 error = fill_super(s, data, flags & MS_SILENT ? 1 : 0);
1057 if (error) {
1058 deactivate_locked_super(s);
1059 return ERR_PTR(error);
1060 }
1061 s->s_flags |= MS_ACTIVE;
1062 } else {
1063 do_remount_sb(s, flags, data, 0);
1064 }
1065 return dget(s->s_root);
1066}
1067EXPORT_SYMBOL(mount_single);
1068
1069struct dentry *
1070mount_fs(struct file_system_type *type, int flags, const char *name, void *data)
1071{
1072 struct dentry *root;
1073 struct super_block *sb;
1074 char *secdata = NULL;
1075 int error = -ENOMEM;
1076
1077 if (data && !(type->fs_flags & FS_BINARY_MOUNTDATA)) {
1078 secdata = alloc_secdata();
1079 if (!secdata)
1080 goto out;
1081
1082 error = security_sb_copy_data(data, secdata);
1083 if (error)
1084 goto out_free_secdata;
1085 }
1086
1087 root = type->mount(type, flags, name, data);
1088 if (IS_ERR(root)) {
1089 error = PTR_ERR(root);
1090 goto out_free_secdata;
1091 }
1092 sb = root->d_sb;
1093 BUG_ON(!sb);
1094 WARN_ON(!sb->s_bdi);
1095 WARN_ON(sb->s_bdi == &default_backing_dev_info);
1096 sb->s_flags |= MS_BORN;
1097
1098 error = security_sb_kern_mount(sb, flags, secdata);
1099 if (error)
1100 goto out_sb;
1101
1102 /*
1103 * filesystems should never set s_maxbytes larger than MAX_LFS_FILESIZE
1104 * but s_maxbytes was an unsigned long long for many releases. Throw
1105 * this warning for a little while to try and catch filesystems that
1106 * violate this rule.
1107 */
1108 WARN((sb->s_maxbytes < 0), "%s set sb->s_maxbytes to "
1109 "negative value (%lld)\n", type->name, sb->s_maxbytes);
1110
1111 up_write(&sb->s_umount);
1112 free_secdata(secdata);
1113 return root;
1114out_sb:
1115 dput(root);
1116 deactivate_locked_super(sb);
1117out_free_secdata:
1118 free_secdata(secdata);
1119out:
1120 return ERR_PTR(error);
1121}
1122
1123/**
1124 * freeze_super - lock the filesystem and force it into a consistent state
1125 * @sb: the super to lock
1126 *
1127 * Syncs the super to make sure the filesystem is consistent and calls the fs's
1128 * freeze_fs. Subsequent calls to this without first thawing the fs will return
1129 * -EBUSY.
1130 */
1131int freeze_super(struct super_block *sb)
1132{
1133 int ret;
1134
1135 atomic_inc(&sb->s_active);
1136 down_write(&sb->s_umount);
1137 if (sb->s_frozen) {
1138 deactivate_locked_super(sb);
1139 return -EBUSY;
1140 }
1141
1142 if (sb->s_flags & MS_RDONLY) {
1143 sb->s_frozen = SB_FREEZE_TRANS;
1144 smp_wmb();
1145 up_write(&sb->s_umount);
1146 return 0;
1147 }
1148
1149 sb->s_frozen = SB_FREEZE_WRITE;
1150 smp_wmb();
1151
1152 sync_filesystem(sb);
1153
1154 sb->s_frozen = SB_FREEZE_TRANS;
1155 smp_wmb();
1156
1157 sync_blockdev(sb->s_bdev);
1158 if (sb->s_op->freeze_fs) {
1159 ret = sb->s_op->freeze_fs(sb);
1160 if (ret) {
1161 printk(KERN_ERR
1162 "VFS:Filesystem freeze failed\n");
1163 sb->s_frozen = SB_UNFROZEN;
1164 deactivate_locked_super(sb);
1165 return ret;
1166 }
1167 }
1168 up_write(&sb->s_umount);
1169 return 0;
1170}
1171EXPORT_SYMBOL(freeze_super);
1172
1173/**
1174 * thaw_super -- unlock filesystem
1175 * @sb: the super to thaw
1176 *
1177 * Unlocks the filesystem and marks it writeable again after freeze_super().
1178 */
1179int thaw_super(struct super_block *sb)
1180{
1181 int error;
1182
1183 down_write(&sb->s_umount);
1184 if (sb->s_frozen == SB_UNFROZEN) {
1185 up_write(&sb->s_umount);
1186 return -EINVAL;
1187 }
1188
1189 if (sb->s_flags & MS_RDONLY)
1190 goto out;
1191
1192 if (sb->s_op->unfreeze_fs) {
1193 error = sb->s_op->unfreeze_fs(sb);
1194 if (error) {
1195 printk(KERN_ERR
1196 "VFS:Filesystem thaw failed\n");
1197 sb->s_frozen = SB_FREEZE_TRANS;
1198 up_write(&sb->s_umount);
1199 return error;
1200 }
1201 }
1202
1203out:
1204 sb->s_frozen = SB_UNFROZEN;
1205 smp_wmb();
1206 wake_up(&sb->s_wait_unfrozen);
1207 deactivate_locked_super(sb);
1208
1209 return 0;
1210}
1211EXPORT_SYMBOL(thaw_super);
1/*
2 * linux/fs/super.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * super.c contains code to handle: - mount structures
7 * - super-block tables
8 * - filesystem drivers list
9 * - mount system call
10 * - umount system call
11 * - ustat system call
12 *
13 * GK 2/5/95 - Changed to support mounting the root fs via NFS
14 *
15 * Added kerneld support: Jacques Gelinas and Bjorn Ekwall
16 * Added change_root: Werner Almesberger & Hans Lermen, Feb '96
17 * Added options to /proc/mounts:
18 * Torbjörn Lindh (torbjorn.lindh@gopta.se), April 14, 1996.
19 * Added devfs support: Richard Gooch <rgooch@atnf.csiro.au>, 13-JAN-1998
20 * Heavily rewritten for 'one fs - one tree' dcache architecture. AV, Mar 2000
21 */
22
23#include <linux/export.h>
24#include <linux/slab.h>
25#include <linux/blkdev.h>
26#include <linux/mount.h>
27#include <linux/security.h>
28#include <linux/writeback.h> /* for the emergency remount stuff */
29#include <linux/idr.h>
30#include <linux/mutex.h>
31#include <linux/backing-dev.h>
32#include <linux/rculist_bl.h>
33#include <linux/cleancache.h>
34#include <linux/fsnotify.h>
35#include <linux/lockdep.h>
36#include <linux/user_namespace.h>
37#include "internal.h"
38
39
40static LIST_HEAD(super_blocks);
41static DEFINE_SPINLOCK(sb_lock);
42
43static char *sb_writers_name[SB_FREEZE_LEVELS] = {
44 "sb_writers",
45 "sb_pagefaults",
46 "sb_internal",
47};
48
49/*
50 * One thing we have to be careful of with a per-sb shrinker is that we don't
51 * drop the last active reference to the superblock from within the shrinker.
52 * If that happens we could trigger unregistering the shrinker from within the
53 * shrinker path and that leads to deadlock on the shrinker_rwsem. Hence we
54 * take a passive reference to the superblock to avoid this from occurring.
55 */
56static unsigned long super_cache_scan(struct shrinker *shrink,
57 struct shrink_control *sc)
58{
59 struct super_block *sb;
60 long fs_objects = 0;
61 long total_objects;
62 long freed = 0;
63 long dentries;
64 long inodes;
65
66 sb = container_of(shrink, struct super_block, s_shrink);
67
68 /*
69 * Deadlock avoidance. We may hold various FS locks, and we don't want
70 * to recurse into the FS that called us in clear_inode() and friends..
71 */
72 if (!(sc->gfp_mask & __GFP_FS))
73 return SHRINK_STOP;
74
75 if (!trylock_super(sb))
76 return SHRINK_STOP;
77
78 if (sb->s_op->nr_cached_objects)
79 fs_objects = sb->s_op->nr_cached_objects(sb, sc);
80
81 inodes = list_lru_shrink_count(&sb->s_inode_lru, sc);
82 dentries = list_lru_shrink_count(&sb->s_dentry_lru, sc);
83 total_objects = dentries + inodes + fs_objects + 1;
84 if (!total_objects)
85 total_objects = 1;
86
87 /* proportion the scan between the caches */
88 dentries = mult_frac(sc->nr_to_scan, dentries, total_objects);
89 inodes = mult_frac(sc->nr_to_scan, inodes, total_objects);
90 fs_objects = mult_frac(sc->nr_to_scan, fs_objects, total_objects);
91
92 /*
93 * prune the dcache first as the icache is pinned by it, then
94 * prune the icache, followed by the filesystem specific caches
95 *
96 * Ensure that we always scan at least one object - memcg kmem
97 * accounting uses this to fully empty the caches.
98 */
99 sc->nr_to_scan = dentries + 1;
100 freed = prune_dcache_sb(sb, sc);
101 sc->nr_to_scan = inodes + 1;
102 freed += prune_icache_sb(sb, sc);
103
104 if (fs_objects) {
105 sc->nr_to_scan = fs_objects + 1;
106 freed += sb->s_op->free_cached_objects(sb, sc);
107 }
108
109 up_read(&sb->s_umount);
110 return freed;
111}
112
113static unsigned long super_cache_count(struct shrinker *shrink,
114 struct shrink_control *sc)
115{
116 struct super_block *sb;
117 long total_objects = 0;
118
119 sb = container_of(shrink, struct super_block, s_shrink);
120
121 /*
122 * Don't call trylock_super as it is a potential
123 * scalability bottleneck. The counts could get updated
124 * between super_cache_count and super_cache_scan anyway.
125 * Call to super_cache_count with shrinker_rwsem held
126 * ensures the safety of call to list_lru_shrink_count() and
127 * s_op->nr_cached_objects().
128 */
129 if (sb->s_op && sb->s_op->nr_cached_objects)
130 total_objects = sb->s_op->nr_cached_objects(sb, sc);
131
132 total_objects += list_lru_shrink_count(&sb->s_dentry_lru, sc);
133 total_objects += list_lru_shrink_count(&sb->s_inode_lru, sc);
134
135 total_objects = vfs_pressure_ratio(total_objects);
136 return total_objects;
137}
138
139static void destroy_super_work(struct work_struct *work)
140{
141 struct super_block *s = container_of(work, struct super_block,
142 destroy_work);
143 int i;
144
145 for (i = 0; i < SB_FREEZE_LEVELS; i++)
146 percpu_free_rwsem(&s->s_writers.rw_sem[i]);
147 kfree(s);
148}
149
150static void destroy_super_rcu(struct rcu_head *head)
151{
152 struct super_block *s = container_of(head, struct super_block, rcu);
153 INIT_WORK(&s->destroy_work, destroy_super_work);
154 schedule_work(&s->destroy_work);
155}
156
157/**
158 * destroy_super - frees a superblock
159 * @s: superblock to free
160 *
161 * Frees a superblock.
162 */
163static void destroy_super(struct super_block *s)
164{
165 list_lru_destroy(&s->s_dentry_lru);
166 list_lru_destroy(&s->s_inode_lru);
167 security_sb_free(s);
168 WARN_ON(!list_empty(&s->s_mounts));
169 put_user_ns(s->s_user_ns);
170 kfree(s->s_subtype);
171 kfree(s->s_options);
172 call_rcu(&s->rcu, destroy_super_rcu);
173}
174
175/**
176 * alloc_super - create new superblock
177 * @type: filesystem type superblock should belong to
178 * @flags: the mount flags
179 * @user_ns: User namespace for the super_block
180 *
181 * Allocates and initializes a new &struct super_block. alloc_super()
182 * returns a pointer new superblock or %NULL if allocation had failed.
183 */
184static struct super_block *alloc_super(struct file_system_type *type, int flags,
185 struct user_namespace *user_ns)
186{
187 struct super_block *s = kzalloc(sizeof(struct super_block), GFP_USER);
188 static const struct super_operations default_op;
189 int i;
190
191 if (!s)
192 return NULL;
193
194 INIT_LIST_HEAD(&s->s_mounts);
195 s->s_user_ns = get_user_ns(user_ns);
196
197 if (security_sb_alloc(s))
198 goto fail;
199
200 for (i = 0; i < SB_FREEZE_LEVELS; i++) {
201 if (__percpu_init_rwsem(&s->s_writers.rw_sem[i],
202 sb_writers_name[i],
203 &type->s_writers_key[i]))
204 goto fail;
205 }
206 init_waitqueue_head(&s->s_writers.wait_unfrozen);
207 s->s_bdi = &noop_backing_dev_info;
208 s->s_flags = flags;
209 if (s->s_user_ns != &init_user_ns)
210 s->s_iflags |= SB_I_NODEV;
211 INIT_HLIST_NODE(&s->s_instances);
212 INIT_HLIST_BL_HEAD(&s->s_anon);
213 mutex_init(&s->s_sync_lock);
214 INIT_LIST_HEAD(&s->s_inodes);
215 spin_lock_init(&s->s_inode_list_lock);
216 INIT_LIST_HEAD(&s->s_inodes_wb);
217 spin_lock_init(&s->s_inode_wblist_lock);
218
219 if (list_lru_init_memcg(&s->s_dentry_lru))
220 goto fail;
221 if (list_lru_init_memcg(&s->s_inode_lru))
222 goto fail;
223
224 init_rwsem(&s->s_umount);
225 lockdep_set_class(&s->s_umount, &type->s_umount_key);
226 /*
227 * sget() can have s_umount recursion.
228 *
229 * When it cannot find a suitable sb, it allocates a new
230 * one (this one), and tries again to find a suitable old
231 * one.
232 *
233 * In case that succeeds, it will acquire the s_umount
234 * lock of the old one. Since these are clearly distrinct
235 * locks, and this object isn't exposed yet, there's no
236 * risk of deadlocks.
237 *
238 * Annotate this by putting this lock in a different
239 * subclass.
240 */
241 down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING);
242 s->s_count = 1;
243 atomic_set(&s->s_active, 1);
244 mutex_init(&s->s_vfs_rename_mutex);
245 lockdep_set_class(&s->s_vfs_rename_mutex, &type->s_vfs_rename_key);
246 mutex_init(&s->s_dquot.dqio_mutex);
247 s->s_maxbytes = MAX_NON_LFS;
248 s->s_op = &default_op;
249 s->s_time_gran = 1000000000;
250 s->cleancache_poolid = CLEANCACHE_NO_POOL;
251
252 s->s_shrink.seeks = DEFAULT_SEEKS;
253 s->s_shrink.scan_objects = super_cache_scan;
254 s->s_shrink.count_objects = super_cache_count;
255 s->s_shrink.batch = 1024;
256 s->s_shrink.flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE;
257 return s;
258
259fail:
260 destroy_super(s);
261 return NULL;
262}
263
264/* Superblock refcounting */
265
266/*
267 * Drop a superblock's refcount. The caller must hold sb_lock.
268 */
269static void __put_super(struct super_block *sb)
270{
271 if (!--sb->s_count) {
272 list_del_init(&sb->s_list);
273 destroy_super(sb);
274 }
275}
276
277/**
278 * put_super - drop a temporary reference to superblock
279 * @sb: superblock in question
280 *
281 * Drops a temporary reference, frees superblock if there's no
282 * references left.
283 */
284static void put_super(struct super_block *sb)
285{
286 spin_lock(&sb_lock);
287 __put_super(sb);
288 spin_unlock(&sb_lock);
289}
290
291
292/**
293 * deactivate_locked_super - drop an active reference to superblock
294 * @s: superblock to deactivate
295 *
296 * Drops an active reference to superblock, converting it into a temporary
297 * one if there is no other active references left. In that case we
298 * tell fs driver to shut it down and drop the temporary reference we
299 * had just acquired.
300 *
301 * Caller holds exclusive lock on superblock; that lock is released.
302 */
303void deactivate_locked_super(struct super_block *s)
304{
305 struct file_system_type *fs = s->s_type;
306 if (atomic_dec_and_test(&s->s_active)) {
307 cleancache_invalidate_fs(s);
308 unregister_shrinker(&s->s_shrink);
309 fs->kill_sb(s);
310
311 /*
312 * Since list_lru_destroy() may sleep, we cannot call it from
313 * put_super(), where we hold the sb_lock. Therefore we destroy
314 * the lru lists right now.
315 */
316 list_lru_destroy(&s->s_dentry_lru);
317 list_lru_destroy(&s->s_inode_lru);
318
319 put_filesystem(fs);
320 put_super(s);
321 } else {
322 up_write(&s->s_umount);
323 }
324}
325
326EXPORT_SYMBOL(deactivate_locked_super);
327
328/**
329 * deactivate_super - drop an active reference to superblock
330 * @s: superblock to deactivate
331 *
332 * Variant of deactivate_locked_super(), except that superblock is *not*
333 * locked by caller. If we are going to drop the final active reference,
334 * lock will be acquired prior to that.
335 */
336void deactivate_super(struct super_block *s)
337{
338 if (!atomic_add_unless(&s->s_active, -1, 1)) {
339 down_write(&s->s_umount);
340 deactivate_locked_super(s);
341 }
342}
343
344EXPORT_SYMBOL(deactivate_super);
345
346/**
347 * grab_super - acquire an active reference
348 * @s: reference we are trying to make active
349 *
350 * Tries to acquire an active reference. grab_super() is used when we
351 * had just found a superblock in super_blocks or fs_type->fs_supers
352 * and want to turn it into a full-blown active reference. grab_super()
353 * is called with sb_lock held and drops it. Returns 1 in case of
354 * success, 0 if we had failed (superblock contents was already dead or
355 * dying when grab_super() had been called). Note that this is only
356 * called for superblocks not in rundown mode (== ones still on ->fs_supers
357 * of their type), so increment of ->s_count is OK here.
358 */
359static int grab_super(struct super_block *s) __releases(sb_lock)
360{
361 s->s_count++;
362 spin_unlock(&sb_lock);
363 down_write(&s->s_umount);
364 if ((s->s_flags & MS_BORN) && atomic_inc_not_zero(&s->s_active)) {
365 put_super(s);
366 return 1;
367 }
368 up_write(&s->s_umount);
369 put_super(s);
370 return 0;
371}
372
373/*
374 * trylock_super - try to grab ->s_umount shared
375 * @sb: reference we are trying to grab
376 *
377 * Try to prevent fs shutdown. This is used in places where we
378 * cannot take an active reference but we need to ensure that the
379 * filesystem is not shut down while we are working on it. It returns
380 * false if we cannot acquire s_umount or if we lose the race and
381 * filesystem already got into shutdown, and returns true with the s_umount
382 * lock held in read mode in case of success. On successful return,
383 * the caller must drop the s_umount lock when done.
384 *
385 * Note that unlike get_super() et.al. this one does *not* bump ->s_count.
386 * The reason why it's safe is that we are OK with doing trylock instead
387 * of down_read(). There's a couple of places that are OK with that, but
388 * it's very much not a general-purpose interface.
389 */
390bool trylock_super(struct super_block *sb)
391{
392 if (down_read_trylock(&sb->s_umount)) {
393 if (!hlist_unhashed(&sb->s_instances) &&
394 sb->s_root && (sb->s_flags & MS_BORN))
395 return true;
396 up_read(&sb->s_umount);
397 }
398
399 return false;
400}
401
402/**
403 * generic_shutdown_super - common helper for ->kill_sb()
404 * @sb: superblock to kill
405 *
406 * generic_shutdown_super() does all fs-independent work on superblock
407 * shutdown. Typical ->kill_sb() should pick all fs-specific objects
408 * that need destruction out of superblock, call generic_shutdown_super()
409 * and release aforementioned objects. Note: dentries and inodes _are_
410 * taken care of and do not need specific handling.
411 *
412 * Upon calling this function, the filesystem may no longer alter or
413 * rearrange the set of dentries belonging to this super_block, nor may it
414 * change the attachments of dentries to inodes.
415 */
416void generic_shutdown_super(struct super_block *sb)
417{
418 const struct super_operations *sop = sb->s_op;
419
420 if (sb->s_root) {
421 shrink_dcache_for_umount(sb);
422 sync_filesystem(sb);
423 sb->s_flags &= ~MS_ACTIVE;
424
425 fsnotify_unmount_inodes(sb);
426 cgroup_writeback_umount();
427
428 evict_inodes(sb);
429
430 if (sb->s_dio_done_wq) {
431 destroy_workqueue(sb->s_dio_done_wq);
432 sb->s_dio_done_wq = NULL;
433 }
434
435 if (sop->put_super)
436 sop->put_super(sb);
437
438 if (!list_empty(&sb->s_inodes)) {
439 printk("VFS: Busy inodes after unmount of %s. "
440 "Self-destruct in 5 seconds. Have a nice day...\n",
441 sb->s_id);
442 }
443 }
444 spin_lock(&sb_lock);
445 /* should be initialized for __put_super_and_need_restart() */
446 hlist_del_init(&sb->s_instances);
447 spin_unlock(&sb_lock);
448 up_write(&sb->s_umount);
449}
450
451EXPORT_SYMBOL(generic_shutdown_super);
452
453/**
454 * sget_userns - find or create a superblock
455 * @type: filesystem type superblock should belong to
456 * @test: comparison callback
457 * @set: setup callback
458 * @flags: mount flags
459 * @user_ns: User namespace for the super_block
460 * @data: argument to each of them
461 */
462struct super_block *sget_userns(struct file_system_type *type,
463 int (*test)(struct super_block *,void *),
464 int (*set)(struct super_block *,void *),
465 int flags, struct user_namespace *user_ns,
466 void *data)
467{
468 struct super_block *s = NULL;
469 struct super_block *old;
470 int err;
471
472 if (!(flags & (MS_KERNMOUNT|MS_SUBMOUNT)) &&
473 !(type->fs_flags & FS_USERNS_MOUNT) &&
474 !capable(CAP_SYS_ADMIN))
475 return ERR_PTR(-EPERM);
476retry:
477 spin_lock(&sb_lock);
478 if (test) {
479 hlist_for_each_entry(old, &type->fs_supers, s_instances) {
480 if (!test(old, data))
481 continue;
482 if (user_ns != old->s_user_ns) {
483 spin_unlock(&sb_lock);
484 if (s) {
485 up_write(&s->s_umount);
486 destroy_super(s);
487 }
488 return ERR_PTR(-EBUSY);
489 }
490 if (!grab_super(old))
491 goto retry;
492 if (s) {
493 up_write(&s->s_umount);
494 destroy_super(s);
495 s = NULL;
496 }
497 return old;
498 }
499 }
500 if (!s) {
501 spin_unlock(&sb_lock);
502 s = alloc_super(type, (flags & ~MS_SUBMOUNT), user_ns);
503 if (!s)
504 return ERR_PTR(-ENOMEM);
505 goto retry;
506 }
507
508 err = set(s, data);
509 if (err) {
510 spin_unlock(&sb_lock);
511 up_write(&s->s_umount);
512 destroy_super(s);
513 return ERR_PTR(err);
514 }
515 s->s_type = type;
516 strlcpy(s->s_id, type->name, sizeof(s->s_id));
517 list_add_tail(&s->s_list, &super_blocks);
518 hlist_add_head(&s->s_instances, &type->fs_supers);
519 spin_unlock(&sb_lock);
520 get_filesystem(type);
521 register_shrinker(&s->s_shrink);
522 return s;
523}
524
525EXPORT_SYMBOL(sget_userns);
526
527/**
528 * sget - find or create a superblock
529 * @type: filesystem type superblock should belong to
530 * @test: comparison callback
531 * @set: setup callback
532 * @flags: mount flags
533 * @data: argument to each of them
534 */
535struct super_block *sget(struct file_system_type *type,
536 int (*test)(struct super_block *,void *),
537 int (*set)(struct super_block *,void *),
538 int flags,
539 void *data)
540{
541 struct user_namespace *user_ns = current_user_ns();
542
543 /* We don't yet pass the user namespace of the parent
544 * mount through to here so always use &init_user_ns
545 * until that changes.
546 */
547 if (flags & MS_SUBMOUNT)
548 user_ns = &init_user_ns;
549
550 /* Ensure the requestor has permissions over the target filesystem */
551 if (!(flags & (MS_KERNMOUNT|MS_SUBMOUNT)) && !ns_capable(user_ns, CAP_SYS_ADMIN))
552 return ERR_PTR(-EPERM);
553
554 return sget_userns(type, test, set, flags, user_ns, data);
555}
556
557EXPORT_SYMBOL(sget);
558
559void drop_super(struct super_block *sb)
560{
561 up_read(&sb->s_umount);
562 put_super(sb);
563}
564
565EXPORT_SYMBOL(drop_super);
566
567void drop_super_exclusive(struct super_block *sb)
568{
569 up_write(&sb->s_umount);
570 put_super(sb);
571}
572EXPORT_SYMBOL(drop_super_exclusive);
573
574/**
575 * iterate_supers - call function for all active superblocks
576 * @f: function to call
577 * @arg: argument to pass to it
578 *
579 * Scans the superblock list and calls given function, passing it
580 * locked superblock and given argument.
581 */
582void iterate_supers(void (*f)(struct super_block *, void *), void *arg)
583{
584 struct super_block *sb, *p = NULL;
585
586 spin_lock(&sb_lock);
587 list_for_each_entry(sb, &super_blocks, s_list) {
588 if (hlist_unhashed(&sb->s_instances))
589 continue;
590 sb->s_count++;
591 spin_unlock(&sb_lock);
592
593 down_read(&sb->s_umount);
594 if (sb->s_root && (sb->s_flags & MS_BORN))
595 f(sb, arg);
596 up_read(&sb->s_umount);
597
598 spin_lock(&sb_lock);
599 if (p)
600 __put_super(p);
601 p = sb;
602 }
603 if (p)
604 __put_super(p);
605 spin_unlock(&sb_lock);
606}
607
608/**
609 * iterate_supers_type - call function for superblocks of given type
610 * @type: fs type
611 * @f: function to call
612 * @arg: argument to pass to it
613 *
614 * Scans the superblock list and calls given function, passing it
615 * locked superblock and given argument.
616 */
617void iterate_supers_type(struct file_system_type *type,
618 void (*f)(struct super_block *, void *), void *arg)
619{
620 struct super_block *sb, *p = NULL;
621
622 spin_lock(&sb_lock);
623 hlist_for_each_entry(sb, &type->fs_supers, s_instances) {
624 sb->s_count++;
625 spin_unlock(&sb_lock);
626
627 down_read(&sb->s_umount);
628 if (sb->s_root && (sb->s_flags & MS_BORN))
629 f(sb, arg);
630 up_read(&sb->s_umount);
631
632 spin_lock(&sb_lock);
633 if (p)
634 __put_super(p);
635 p = sb;
636 }
637 if (p)
638 __put_super(p);
639 spin_unlock(&sb_lock);
640}
641
642EXPORT_SYMBOL(iterate_supers_type);
643
644static struct super_block *__get_super(struct block_device *bdev, bool excl)
645{
646 struct super_block *sb;
647
648 if (!bdev)
649 return NULL;
650
651 spin_lock(&sb_lock);
652rescan:
653 list_for_each_entry(sb, &super_blocks, s_list) {
654 if (hlist_unhashed(&sb->s_instances))
655 continue;
656 if (sb->s_bdev == bdev) {
657 sb->s_count++;
658 spin_unlock(&sb_lock);
659 if (!excl)
660 down_read(&sb->s_umount);
661 else
662 down_write(&sb->s_umount);
663 /* still alive? */
664 if (sb->s_root && (sb->s_flags & MS_BORN))
665 return sb;
666 if (!excl)
667 up_read(&sb->s_umount);
668 else
669 up_write(&sb->s_umount);
670 /* nope, got unmounted */
671 spin_lock(&sb_lock);
672 __put_super(sb);
673 goto rescan;
674 }
675 }
676 spin_unlock(&sb_lock);
677 return NULL;
678}
679
680/**
681 * get_super - get the superblock of a device
682 * @bdev: device to get the superblock for
683 *
684 * Scans the superblock list and finds the superblock of the file system
685 * mounted on the device given. %NULL is returned if no match is found.
686 */
687struct super_block *get_super(struct block_device *bdev)
688{
689 return __get_super(bdev, false);
690}
691EXPORT_SYMBOL(get_super);
692
693static struct super_block *__get_super_thawed(struct block_device *bdev,
694 bool excl)
695{
696 while (1) {
697 struct super_block *s = __get_super(bdev, excl);
698 if (!s || s->s_writers.frozen == SB_UNFROZEN)
699 return s;
700 if (!excl)
701 up_read(&s->s_umount);
702 else
703 up_write(&s->s_umount);
704 wait_event(s->s_writers.wait_unfrozen,
705 s->s_writers.frozen == SB_UNFROZEN);
706 put_super(s);
707 }
708}
709
710/**
711 * get_super_thawed - get thawed superblock of a device
712 * @bdev: device to get the superblock for
713 *
714 * Scans the superblock list and finds the superblock of the file system
715 * mounted on the device. The superblock is returned once it is thawed
716 * (or immediately if it was not frozen). %NULL is returned if no match
717 * is found.
718 */
719struct super_block *get_super_thawed(struct block_device *bdev)
720{
721 return __get_super_thawed(bdev, false);
722}
723EXPORT_SYMBOL(get_super_thawed);
724
725/**
726 * get_super_exclusive_thawed - get thawed superblock of a device
727 * @bdev: device to get the superblock for
728 *
729 * Scans the superblock list and finds the superblock of the file system
730 * mounted on the device. The superblock is returned once it is thawed
731 * (or immediately if it was not frozen) and s_umount semaphore is held
732 * in exclusive mode. %NULL is returned if no match is found.
733 */
734struct super_block *get_super_exclusive_thawed(struct block_device *bdev)
735{
736 return __get_super_thawed(bdev, true);
737}
738EXPORT_SYMBOL(get_super_exclusive_thawed);
739
740/**
741 * get_active_super - get an active reference to the superblock of a device
742 * @bdev: device to get the superblock for
743 *
744 * Scans the superblock list and finds the superblock of the file system
745 * mounted on the device given. Returns the superblock with an active
746 * reference or %NULL if none was found.
747 */
748struct super_block *get_active_super(struct block_device *bdev)
749{
750 struct super_block *sb;
751
752 if (!bdev)
753 return NULL;
754
755restart:
756 spin_lock(&sb_lock);
757 list_for_each_entry(sb, &super_blocks, s_list) {
758 if (hlist_unhashed(&sb->s_instances))
759 continue;
760 if (sb->s_bdev == bdev) {
761 if (!grab_super(sb))
762 goto restart;
763 up_write(&sb->s_umount);
764 return sb;
765 }
766 }
767 spin_unlock(&sb_lock);
768 return NULL;
769}
770
771struct super_block *user_get_super(dev_t dev)
772{
773 struct super_block *sb;
774
775 spin_lock(&sb_lock);
776rescan:
777 list_for_each_entry(sb, &super_blocks, s_list) {
778 if (hlist_unhashed(&sb->s_instances))
779 continue;
780 if (sb->s_dev == dev) {
781 sb->s_count++;
782 spin_unlock(&sb_lock);
783 down_read(&sb->s_umount);
784 /* still alive? */
785 if (sb->s_root && (sb->s_flags & MS_BORN))
786 return sb;
787 up_read(&sb->s_umount);
788 /* nope, got unmounted */
789 spin_lock(&sb_lock);
790 __put_super(sb);
791 goto rescan;
792 }
793 }
794 spin_unlock(&sb_lock);
795 return NULL;
796}
797
798/**
799 * do_remount_sb - asks filesystem to change mount options.
800 * @sb: superblock in question
801 * @flags: numeric part of options
802 * @data: the rest of options
803 * @force: whether or not to force the change
804 *
805 * Alters the mount options of a mounted file system.
806 */
807int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
808{
809 int retval;
810 int remount_ro;
811
812 if (sb->s_writers.frozen != SB_UNFROZEN)
813 return -EBUSY;
814
815#ifdef CONFIG_BLOCK
816 if (!(flags & MS_RDONLY) && bdev_read_only(sb->s_bdev))
817 return -EACCES;
818#endif
819
820 remount_ro = (flags & MS_RDONLY) && !(sb->s_flags & MS_RDONLY);
821
822 if (remount_ro) {
823 if (!hlist_empty(&sb->s_pins)) {
824 up_write(&sb->s_umount);
825 group_pin_kill(&sb->s_pins);
826 down_write(&sb->s_umount);
827 if (!sb->s_root)
828 return 0;
829 if (sb->s_writers.frozen != SB_UNFROZEN)
830 return -EBUSY;
831 remount_ro = (flags & MS_RDONLY) && !(sb->s_flags & MS_RDONLY);
832 }
833 }
834 shrink_dcache_sb(sb);
835
836 /* If we are remounting RDONLY and current sb is read/write,
837 make sure there are no rw files opened */
838 if (remount_ro) {
839 if (force) {
840 sb->s_readonly_remount = 1;
841 smp_wmb();
842 } else {
843 retval = sb_prepare_remount_readonly(sb);
844 if (retval)
845 return retval;
846 }
847 }
848
849 if (sb->s_op->remount_fs) {
850 retval = sb->s_op->remount_fs(sb, &flags, data);
851 if (retval) {
852 if (!force)
853 goto cancel_readonly;
854 /* If forced remount, go ahead despite any errors */
855 WARN(1, "forced remount of a %s fs returned %i\n",
856 sb->s_type->name, retval);
857 }
858 }
859 sb->s_flags = (sb->s_flags & ~MS_RMT_MASK) | (flags & MS_RMT_MASK);
860 /* Needs to be ordered wrt mnt_is_readonly() */
861 smp_wmb();
862 sb->s_readonly_remount = 0;
863
864 /*
865 * Some filesystems modify their metadata via some other path than the
866 * bdev buffer cache (eg. use a private mapping, or directories in
867 * pagecache, etc). Also file data modifications go via their own
868 * mappings. So If we try to mount readonly then copy the filesystem
869 * from bdev, we could get stale data, so invalidate it to give a best
870 * effort at coherency.
871 */
872 if (remount_ro && sb->s_bdev)
873 invalidate_bdev(sb->s_bdev);
874 return 0;
875
876cancel_readonly:
877 sb->s_readonly_remount = 0;
878 return retval;
879}
880
881static void do_emergency_remount(struct work_struct *work)
882{
883 struct super_block *sb, *p = NULL;
884
885 spin_lock(&sb_lock);
886 list_for_each_entry(sb, &super_blocks, s_list) {
887 if (hlist_unhashed(&sb->s_instances))
888 continue;
889 sb->s_count++;
890 spin_unlock(&sb_lock);
891 down_write(&sb->s_umount);
892 if (sb->s_root && sb->s_bdev && (sb->s_flags & MS_BORN) &&
893 !(sb->s_flags & MS_RDONLY)) {
894 /*
895 * What lock protects sb->s_flags??
896 */
897 do_remount_sb(sb, MS_RDONLY, NULL, 1);
898 }
899 up_write(&sb->s_umount);
900 spin_lock(&sb_lock);
901 if (p)
902 __put_super(p);
903 p = sb;
904 }
905 if (p)
906 __put_super(p);
907 spin_unlock(&sb_lock);
908 kfree(work);
909 printk("Emergency Remount complete\n");
910}
911
912void emergency_remount(void)
913{
914 struct work_struct *work;
915
916 work = kmalloc(sizeof(*work), GFP_ATOMIC);
917 if (work) {
918 INIT_WORK(work, do_emergency_remount);
919 schedule_work(work);
920 }
921}
922
923/*
924 * Unnamed block devices are dummy devices used by virtual
925 * filesystems which don't use real block-devices. -- jrs
926 */
927
928static DEFINE_IDA(unnamed_dev_ida);
929static DEFINE_SPINLOCK(unnamed_dev_lock);/* protects the above */
930/* Many userspace utilities consider an FSID of 0 invalid.
931 * Always return at least 1 from get_anon_bdev.
932 */
933static int unnamed_dev_start = 1;
934
935int get_anon_bdev(dev_t *p)
936{
937 int dev;
938 int error;
939
940 retry:
941 if (ida_pre_get(&unnamed_dev_ida, GFP_ATOMIC) == 0)
942 return -ENOMEM;
943 spin_lock(&unnamed_dev_lock);
944 error = ida_get_new_above(&unnamed_dev_ida, unnamed_dev_start, &dev);
945 if (!error)
946 unnamed_dev_start = dev + 1;
947 spin_unlock(&unnamed_dev_lock);
948 if (error == -EAGAIN)
949 /* We raced and lost with another CPU. */
950 goto retry;
951 else if (error)
952 return -EAGAIN;
953
954 if (dev >= (1 << MINORBITS)) {
955 spin_lock(&unnamed_dev_lock);
956 ida_remove(&unnamed_dev_ida, dev);
957 if (unnamed_dev_start > dev)
958 unnamed_dev_start = dev;
959 spin_unlock(&unnamed_dev_lock);
960 return -EMFILE;
961 }
962 *p = MKDEV(0, dev & MINORMASK);
963 return 0;
964}
965EXPORT_SYMBOL(get_anon_bdev);
966
967void free_anon_bdev(dev_t dev)
968{
969 int slot = MINOR(dev);
970 spin_lock(&unnamed_dev_lock);
971 ida_remove(&unnamed_dev_ida, slot);
972 if (slot < unnamed_dev_start)
973 unnamed_dev_start = slot;
974 spin_unlock(&unnamed_dev_lock);
975}
976EXPORT_SYMBOL(free_anon_bdev);
977
978int set_anon_super(struct super_block *s, void *data)
979{
980 return get_anon_bdev(&s->s_dev);
981}
982
983EXPORT_SYMBOL(set_anon_super);
984
985void kill_anon_super(struct super_block *sb)
986{
987 dev_t dev = sb->s_dev;
988 generic_shutdown_super(sb);
989 free_anon_bdev(dev);
990}
991
992EXPORT_SYMBOL(kill_anon_super);
993
994void kill_litter_super(struct super_block *sb)
995{
996 if (sb->s_root)
997 d_genocide(sb->s_root);
998 kill_anon_super(sb);
999}
1000
1001EXPORT_SYMBOL(kill_litter_super);
1002
1003static int ns_test_super(struct super_block *sb, void *data)
1004{
1005 return sb->s_fs_info == data;
1006}
1007
1008static int ns_set_super(struct super_block *sb, void *data)
1009{
1010 sb->s_fs_info = data;
1011 return set_anon_super(sb, NULL);
1012}
1013
1014struct dentry *mount_ns(struct file_system_type *fs_type,
1015 int flags, void *data, void *ns, struct user_namespace *user_ns,
1016 int (*fill_super)(struct super_block *, void *, int))
1017{
1018 struct super_block *sb;
1019
1020 /* Don't allow mounting unless the caller has CAP_SYS_ADMIN
1021 * over the namespace.
1022 */
1023 if (!(flags & MS_KERNMOUNT) && !ns_capable(user_ns, CAP_SYS_ADMIN))
1024 return ERR_PTR(-EPERM);
1025
1026 sb = sget_userns(fs_type, ns_test_super, ns_set_super, flags,
1027 user_ns, ns);
1028 if (IS_ERR(sb))
1029 return ERR_CAST(sb);
1030
1031 if (!sb->s_root) {
1032 int err;
1033 err = fill_super(sb, data, flags & MS_SILENT ? 1 : 0);
1034 if (err) {
1035 deactivate_locked_super(sb);
1036 return ERR_PTR(err);
1037 }
1038
1039 sb->s_flags |= MS_ACTIVE;
1040 }
1041
1042 return dget(sb->s_root);
1043}
1044
1045EXPORT_SYMBOL(mount_ns);
1046
1047#ifdef CONFIG_BLOCK
1048static int set_bdev_super(struct super_block *s, void *data)
1049{
1050 s->s_bdev = data;
1051 s->s_dev = s->s_bdev->bd_dev;
1052
1053 /*
1054 * We set the bdi here to the queue backing, file systems can
1055 * overwrite this in ->fill_super()
1056 */
1057 s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info;
1058 return 0;
1059}
1060
1061static int test_bdev_super(struct super_block *s, void *data)
1062{
1063 return (void *)s->s_bdev == data;
1064}
1065
1066struct dentry *mount_bdev(struct file_system_type *fs_type,
1067 int flags, const char *dev_name, void *data,
1068 int (*fill_super)(struct super_block *, void *, int))
1069{
1070 struct block_device *bdev;
1071 struct super_block *s;
1072 fmode_t mode = FMODE_READ | FMODE_EXCL;
1073 int error = 0;
1074
1075 if (!(flags & MS_RDONLY))
1076 mode |= FMODE_WRITE;
1077
1078 bdev = blkdev_get_by_path(dev_name, mode, fs_type);
1079 if (IS_ERR(bdev))
1080 return ERR_CAST(bdev);
1081
1082 /*
1083 * once the super is inserted into the list by sget, s_umount
1084 * will protect the lockfs code from trying to start a snapshot
1085 * while we are mounting
1086 */
1087 mutex_lock(&bdev->bd_fsfreeze_mutex);
1088 if (bdev->bd_fsfreeze_count > 0) {
1089 mutex_unlock(&bdev->bd_fsfreeze_mutex);
1090 error = -EBUSY;
1091 goto error_bdev;
1092 }
1093 s = sget(fs_type, test_bdev_super, set_bdev_super, flags | MS_NOSEC,
1094 bdev);
1095 mutex_unlock(&bdev->bd_fsfreeze_mutex);
1096 if (IS_ERR(s))
1097 goto error_s;
1098
1099 if (s->s_root) {
1100 if ((flags ^ s->s_flags) & MS_RDONLY) {
1101 deactivate_locked_super(s);
1102 error = -EBUSY;
1103 goto error_bdev;
1104 }
1105
1106 /*
1107 * s_umount nests inside bd_mutex during
1108 * __invalidate_device(). blkdev_put() acquires
1109 * bd_mutex and can't be called under s_umount. Drop
1110 * s_umount temporarily. This is safe as we're
1111 * holding an active reference.
1112 */
1113 up_write(&s->s_umount);
1114 blkdev_put(bdev, mode);
1115 down_write(&s->s_umount);
1116 } else {
1117 s->s_mode = mode;
1118 snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev);
1119 sb_set_blocksize(s, block_size(bdev));
1120 error = fill_super(s, data, flags & MS_SILENT ? 1 : 0);
1121 if (error) {
1122 deactivate_locked_super(s);
1123 goto error;
1124 }
1125
1126 s->s_flags |= MS_ACTIVE;
1127 bdev->bd_super = s;
1128 }
1129
1130 return dget(s->s_root);
1131
1132error_s:
1133 error = PTR_ERR(s);
1134error_bdev:
1135 blkdev_put(bdev, mode);
1136error:
1137 return ERR_PTR(error);
1138}
1139EXPORT_SYMBOL(mount_bdev);
1140
1141void kill_block_super(struct super_block *sb)
1142{
1143 struct block_device *bdev = sb->s_bdev;
1144 fmode_t mode = sb->s_mode;
1145
1146 bdev->bd_super = NULL;
1147 generic_shutdown_super(sb);
1148 sync_blockdev(bdev);
1149 WARN_ON_ONCE(!(mode & FMODE_EXCL));
1150 blkdev_put(bdev, mode | FMODE_EXCL);
1151}
1152
1153EXPORT_SYMBOL(kill_block_super);
1154#endif
1155
1156struct dentry *mount_nodev(struct file_system_type *fs_type,
1157 int flags, void *data,
1158 int (*fill_super)(struct super_block *, void *, int))
1159{
1160 int error;
1161 struct super_block *s = sget(fs_type, NULL, set_anon_super, flags, NULL);
1162
1163 if (IS_ERR(s))
1164 return ERR_CAST(s);
1165
1166 error = fill_super(s, data, flags & MS_SILENT ? 1 : 0);
1167 if (error) {
1168 deactivate_locked_super(s);
1169 return ERR_PTR(error);
1170 }
1171 s->s_flags |= MS_ACTIVE;
1172 return dget(s->s_root);
1173}
1174EXPORT_SYMBOL(mount_nodev);
1175
1176static int compare_single(struct super_block *s, void *p)
1177{
1178 return 1;
1179}
1180
1181struct dentry *mount_single(struct file_system_type *fs_type,
1182 int flags, void *data,
1183 int (*fill_super)(struct super_block *, void *, int))
1184{
1185 struct super_block *s;
1186 int error;
1187
1188 s = sget(fs_type, compare_single, set_anon_super, flags, NULL);
1189 if (IS_ERR(s))
1190 return ERR_CAST(s);
1191 if (!s->s_root) {
1192 error = fill_super(s, data, flags & MS_SILENT ? 1 : 0);
1193 if (error) {
1194 deactivate_locked_super(s);
1195 return ERR_PTR(error);
1196 }
1197 s->s_flags |= MS_ACTIVE;
1198 } else {
1199 do_remount_sb(s, flags, data, 0);
1200 }
1201 return dget(s->s_root);
1202}
1203EXPORT_SYMBOL(mount_single);
1204
1205struct dentry *
1206mount_fs(struct file_system_type *type, int flags, const char *name, void *data)
1207{
1208 struct dentry *root;
1209 struct super_block *sb;
1210 char *secdata = NULL;
1211 int error = -ENOMEM;
1212
1213 if (data && !(type->fs_flags & FS_BINARY_MOUNTDATA)) {
1214 secdata = alloc_secdata();
1215 if (!secdata)
1216 goto out;
1217
1218 error = security_sb_copy_data(data, secdata);
1219 if (error)
1220 goto out_free_secdata;
1221 }
1222
1223 root = type->mount(type, flags, name, data);
1224 if (IS_ERR(root)) {
1225 error = PTR_ERR(root);
1226 goto out_free_secdata;
1227 }
1228 sb = root->d_sb;
1229 BUG_ON(!sb);
1230 WARN_ON(!sb->s_bdi);
1231 sb->s_flags |= MS_BORN;
1232
1233 error = security_sb_kern_mount(sb, flags, secdata);
1234 if (error)
1235 goto out_sb;
1236
1237 /*
1238 * filesystems should never set s_maxbytes larger than MAX_LFS_FILESIZE
1239 * but s_maxbytes was an unsigned long long for many releases. Throw
1240 * this warning for a little while to try and catch filesystems that
1241 * violate this rule.
1242 */
1243 WARN((sb->s_maxbytes < 0), "%s set sb->s_maxbytes to "
1244 "negative value (%lld)\n", type->name, sb->s_maxbytes);
1245
1246 up_write(&sb->s_umount);
1247 free_secdata(secdata);
1248 return root;
1249out_sb:
1250 dput(root);
1251 deactivate_locked_super(sb);
1252out_free_secdata:
1253 free_secdata(secdata);
1254out:
1255 return ERR_PTR(error);
1256}
1257
1258/*
1259 * This is an internal function, please use sb_end_{write,pagefault,intwrite}
1260 * instead.
1261 */
1262void __sb_end_write(struct super_block *sb, int level)
1263{
1264 percpu_up_read(sb->s_writers.rw_sem + level-1);
1265}
1266EXPORT_SYMBOL(__sb_end_write);
1267
1268/*
1269 * This is an internal function, please use sb_start_{write,pagefault,intwrite}
1270 * instead.
1271 */
1272int __sb_start_write(struct super_block *sb, int level, bool wait)
1273{
1274 bool force_trylock = false;
1275 int ret = 1;
1276
1277#ifdef CONFIG_LOCKDEP
1278 /*
1279 * We want lockdep to tell us about possible deadlocks with freezing
1280 * but it's it bit tricky to properly instrument it. Getting a freeze
1281 * protection works as getting a read lock but there are subtle
1282 * problems. XFS for example gets freeze protection on internal level
1283 * twice in some cases, which is OK only because we already hold a
1284 * freeze protection also on higher level. Due to these cases we have
1285 * to use wait == F (trylock mode) which must not fail.
1286 */
1287 if (wait) {
1288 int i;
1289
1290 for (i = 0; i < level - 1; i++)
1291 if (percpu_rwsem_is_held(sb->s_writers.rw_sem + i)) {
1292 force_trylock = true;
1293 break;
1294 }
1295 }
1296#endif
1297 if (wait && !force_trylock)
1298 percpu_down_read(sb->s_writers.rw_sem + level-1);
1299 else
1300 ret = percpu_down_read_trylock(sb->s_writers.rw_sem + level-1);
1301
1302 WARN_ON(force_trylock && !ret);
1303 return ret;
1304}
1305EXPORT_SYMBOL(__sb_start_write);
1306
1307/**
1308 * sb_wait_write - wait until all writers to given file system finish
1309 * @sb: the super for which we wait
1310 * @level: type of writers we wait for (normal vs page fault)
1311 *
1312 * This function waits until there are no writers of given type to given file
1313 * system.
1314 */
1315static void sb_wait_write(struct super_block *sb, int level)
1316{
1317 percpu_down_write(sb->s_writers.rw_sem + level-1);
1318}
1319
1320/*
1321 * We are going to return to userspace and forget about these locks, the
1322 * ownership goes to the caller of thaw_super() which does unlock().
1323 */
1324static void lockdep_sb_freeze_release(struct super_block *sb)
1325{
1326 int level;
1327
1328 for (level = SB_FREEZE_LEVELS - 1; level >= 0; level--)
1329 percpu_rwsem_release(sb->s_writers.rw_sem + level, 0, _THIS_IP_);
1330}
1331
1332/*
1333 * Tell lockdep we are holding these locks before we call ->unfreeze_fs(sb).
1334 */
1335static void lockdep_sb_freeze_acquire(struct super_block *sb)
1336{
1337 int level;
1338
1339 for (level = 0; level < SB_FREEZE_LEVELS; ++level)
1340 percpu_rwsem_acquire(sb->s_writers.rw_sem + level, 0, _THIS_IP_);
1341}
1342
1343static void sb_freeze_unlock(struct super_block *sb)
1344{
1345 int level;
1346
1347 for (level = SB_FREEZE_LEVELS - 1; level >= 0; level--)
1348 percpu_up_write(sb->s_writers.rw_sem + level);
1349}
1350
1351/**
1352 * freeze_super - lock the filesystem and force it into a consistent state
1353 * @sb: the super to lock
1354 *
1355 * Syncs the super to make sure the filesystem is consistent and calls the fs's
1356 * freeze_fs. Subsequent calls to this without first thawing the fs will return
1357 * -EBUSY.
1358 *
1359 * During this function, sb->s_writers.frozen goes through these values:
1360 *
1361 * SB_UNFROZEN: File system is normal, all writes progress as usual.
1362 *
1363 * SB_FREEZE_WRITE: The file system is in the process of being frozen. New
1364 * writes should be blocked, though page faults are still allowed. We wait for
1365 * all writes to complete and then proceed to the next stage.
1366 *
1367 * SB_FREEZE_PAGEFAULT: Freezing continues. Now also page faults are blocked
1368 * but internal fs threads can still modify the filesystem (although they
1369 * should not dirty new pages or inodes), writeback can run etc. After waiting
1370 * for all running page faults we sync the filesystem which will clean all
1371 * dirty pages and inodes (no new dirty pages or inodes can be created when
1372 * sync is running).
1373 *
1374 * SB_FREEZE_FS: The file system is frozen. Now all internal sources of fs
1375 * modification are blocked (e.g. XFS preallocation truncation on inode
1376 * reclaim). This is usually implemented by blocking new transactions for
1377 * filesystems that have them and need this additional guard. After all
1378 * internal writers are finished we call ->freeze_fs() to finish filesystem
1379 * freezing. Then we transition to SB_FREEZE_COMPLETE state. This state is
1380 * mostly auxiliary for filesystems to verify they do not modify frozen fs.
1381 *
1382 * sb->s_writers.frozen is protected by sb->s_umount.
1383 */
1384int freeze_super(struct super_block *sb)
1385{
1386 int ret;
1387
1388 atomic_inc(&sb->s_active);
1389 down_write(&sb->s_umount);
1390 if (sb->s_writers.frozen != SB_UNFROZEN) {
1391 deactivate_locked_super(sb);
1392 return -EBUSY;
1393 }
1394
1395 if (!(sb->s_flags & MS_BORN)) {
1396 up_write(&sb->s_umount);
1397 return 0; /* sic - it's "nothing to do" */
1398 }
1399
1400 if (sb->s_flags & MS_RDONLY) {
1401 /* Nothing to do really... */
1402 sb->s_writers.frozen = SB_FREEZE_COMPLETE;
1403 up_write(&sb->s_umount);
1404 return 0;
1405 }
1406
1407 sb->s_writers.frozen = SB_FREEZE_WRITE;
1408 /* Release s_umount to preserve sb_start_write -> s_umount ordering */
1409 up_write(&sb->s_umount);
1410 sb_wait_write(sb, SB_FREEZE_WRITE);
1411 down_write(&sb->s_umount);
1412
1413 /* Now we go and block page faults... */
1414 sb->s_writers.frozen = SB_FREEZE_PAGEFAULT;
1415 sb_wait_write(sb, SB_FREEZE_PAGEFAULT);
1416
1417 /* All writers are done so after syncing there won't be dirty data */
1418 sync_filesystem(sb);
1419
1420 /* Now wait for internal filesystem counter */
1421 sb->s_writers.frozen = SB_FREEZE_FS;
1422 sb_wait_write(sb, SB_FREEZE_FS);
1423
1424 if (sb->s_op->freeze_fs) {
1425 ret = sb->s_op->freeze_fs(sb);
1426 if (ret) {
1427 printk(KERN_ERR
1428 "VFS:Filesystem freeze failed\n");
1429 sb->s_writers.frozen = SB_UNFROZEN;
1430 sb_freeze_unlock(sb);
1431 wake_up(&sb->s_writers.wait_unfrozen);
1432 deactivate_locked_super(sb);
1433 return ret;
1434 }
1435 }
1436 /*
1437 * For debugging purposes so that fs can warn if it sees write activity
1438 * when frozen is set to SB_FREEZE_COMPLETE, and for thaw_super().
1439 */
1440 sb->s_writers.frozen = SB_FREEZE_COMPLETE;
1441 lockdep_sb_freeze_release(sb);
1442 up_write(&sb->s_umount);
1443 return 0;
1444}
1445EXPORT_SYMBOL(freeze_super);
1446
1447/**
1448 * thaw_super -- unlock filesystem
1449 * @sb: the super to thaw
1450 *
1451 * Unlocks the filesystem and marks it writeable again after freeze_super().
1452 */
1453int thaw_super(struct super_block *sb)
1454{
1455 int error;
1456
1457 down_write(&sb->s_umount);
1458 if (sb->s_writers.frozen != SB_FREEZE_COMPLETE) {
1459 up_write(&sb->s_umount);
1460 return -EINVAL;
1461 }
1462
1463 if (sb->s_flags & MS_RDONLY) {
1464 sb->s_writers.frozen = SB_UNFROZEN;
1465 goto out;
1466 }
1467
1468 lockdep_sb_freeze_acquire(sb);
1469
1470 if (sb->s_op->unfreeze_fs) {
1471 error = sb->s_op->unfreeze_fs(sb);
1472 if (error) {
1473 printk(KERN_ERR
1474 "VFS:Filesystem thaw failed\n");
1475 lockdep_sb_freeze_release(sb);
1476 up_write(&sb->s_umount);
1477 return error;
1478 }
1479 }
1480
1481 sb->s_writers.frozen = SB_UNFROZEN;
1482 sb_freeze_unlock(sb);
1483out:
1484 wake_up(&sb->s_writers.wait_unfrozen);
1485 deactivate_locked_super(sb);
1486 return 0;
1487}
1488EXPORT_SYMBOL(thaw_super);