Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6
7#include "xfs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_sb.h"
13#include "xfs_mount.h"
14#include "xfs_inode.h"
15#include "xfs_btree.h"
16#include "xfs_bmap.h"
17#include "xfs_alloc.h"
18#include "xfs_fsops.h"
19#include "xfs_trans.h"
20#include "xfs_buf_item.h"
21#include "xfs_log.h"
22#include "xfs_log_priv.h"
23#include "xfs_dir2.h"
24#include "xfs_extfree_item.h"
25#include "xfs_mru_cache.h"
26#include "xfs_inode_item.h"
27#include "xfs_icache.h"
28#include "xfs_trace.h"
29#include "xfs_icreate_item.h"
30#include "xfs_filestream.h"
31#include "xfs_quota.h"
32#include "xfs_sysfs.h"
33#include "xfs_ondisk.h"
34#include "xfs_rmap_item.h"
35#include "xfs_refcount_item.h"
36#include "xfs_bmap_item.h"
37#include "xfs_reflink.h"
38#include "xfs_pwork.h"
39#include "xfs_ag.h"
40#include "xfs_defer.h"
41#include "xfs_attr_item.h"
42#include "xfs_xattr.h"
43#include "xfs_iunlink_item.h"
44#include "xfs_dahash_test.h"
45#include "xfs_rtbitmap.h"
46#include "scrub/stats.h"
47
48#include <linux/magic.h>
49#include <linux/fs_context.h>
50#include <linux/fs_parser.h>
51
52static const struct super_operations xfs_super_operations;
53
54static struct dentry *xfs_debugfs; /* top-level xfs debugfs dir */
55static struct kset *xfs_kset; /* top-level xfs sysfs dir */
56#ifdef DEBUG
57static struct xfs_kobj xfs_dbg_kobj; /* global debug sysfs attrs */
58#endif
59
60enum xfs_dax_mode {
61 XFS_DAX_INODE = 0,
62 XFS_DAX_ALWAYS = 1,
63 XFS_DAX_NEVER = 2,
64};
65
66static void
67xfs_mount_set_dax_mode(
68 struct xfs_mount *mp,
69 enum xfs_dax_mode mode)
70{
71 switch (mode) {
72 case XFS_DAX_INODE:
73 mp->m_features &= ~(XFS_FEAT_DAX_ALWAYS | XFS_FEAT_DAX_NEVER);
74 break;
75 case XFS_DAX_ALWAYS:
76 mp->m_features |= XFS_FEAT_DAX_ALWAYS;
77 mp->m_features &= ~XFS_FEAT_DAX_NEVER;
78 break;
79 case XFS_DAX_NEVER:
80 mp->m_features |= XFS_FEAT_DAX_NEVER;
81 mp->m_features &= ~XFS_FEAT_DAX_ALWAYS;
82 break;
83 }
84}
85
86static const struct constant_table dax_param_enums[] = {
87 {"inode", XFS_DAX_INODE },
88 {"always", XFS_DAX_ALWAYS },
89 {"never", XFS_DAX_NEVER },
90 {}
91};
92
93/*
94 * Table driven mount option parser.
95 */
96enum {
97 Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev,
98 Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid,
99 Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups,
100 Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, Opt_ikeep,
101 Opt_noikeep, Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2,
102 Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota,
103 Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota,
104 Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce,
105 Opt_discard, Opt_nodiscard, Opt_dax, Opt_dax_enum,
106};
107
108static const struct fs_parameter_spec xfs_fs_parameters[] = {
109 fsparam_u32("logbufs", Opt_logbufs),
110 fsparam_string("logbsize", Opt_logbsize),
111 fsparam_string("logdev", Opt_logdev),
112 fsparam_string("rtdev", Opt_rtdev),
113 fsparam_flag("wsync", Opt_wsync),
114 fsparam_flag("noalign", Opt_noalign),
115 fsparam_flag("swalloc", Opt_swalloc),
116 fsparam_u32("sunit", Opt_sunit),
117 fsparam_u32("swidth", Opt_swidth),
118 fsparam_flag("nouuid", Opt_nouuid),
119 fsparam_flag("grpid", Opt_grpid),
120 fsparam_flag("nogrpid", Opt_nogrpid),
121 fsparam_flag("bsdgroups", Opt_bsdgroups),
122 fsparam_flag("sysvgroups", Opt_sysvgroups),
123 fsparam_string("allocsize", Opt_allocsize),
124 fsparam_flag("norecovery", Opt_norecovery),
125 fsparam_flag("inode64", Opt_inode64),
126 fsparam_flag("inode32", Opt_inode32),
127 fsparam_flag("ikeep", Opt_ikeep),
128 fsparam_flag("noikeep", Opt_noikeep),
129 fsparam_flag("largeio", Opt_largeio),
130 fsparam_flag("nolargeio", Opt_nolargeio),
131 fsparam_flag("attr2", Opt_attr2),
132 fsparam_flag("noattr2", Opt_noattr2),
133 fsparam_flag("filestreams", Opt_filestreams),
134 fsparam_flag("quota", Opt_quota),
135 fsparam_flag("noquota", Opt_noquota),
136 fsparam_flag("usrquota", Opt_usrquota),
137 fsparam_flag("grpquota", Opt_grpquota),
138 fsparam_flag("prjquota", Opt_prjquota),
139 fsparam_flag("uquota", Opt_uquota),
140 fsparam_flag("gquota", Opt_gquota),
141 fsparam_flag("pquota", Opt_pquota),
142 fsparam_flag("uqnoenforce", Opt_uqnoenforce),
143 fsparam_flag("gqnoenforce", Opt_gqnoenforce),
144 fsparam_flag("pqnoenforce", Opt_pqnoenforce),
145 fsparam_flag("qnoenforce", Opt_qnoenforce),
146 fsparam_flag("discard", Opt_discard),
147 fsparam_flag("nodiscard", Opt_nodiscard),
148 fsparam_flag("dax", Opt_dax),
149 fsparam_enum("dax", Opt_dax_enum, dax_param_enums),
150 {}
151};
152
153struct proc_xfs_info {
154 uint64_t flag;
155 char *str;
156};
157
158static int
159xfs_fs_show_options(
160 struct seq_file *m,
161 struct dentry *root)
162{
163 static struct proc_xfs_info xfs_info_set[] = {
164 /* the few simple ones we can get from the mount struct */
165 { XFS_FEAT_IKEEP, ",ikeep" },
166 { XFS_FEAT_WSYNC, ",wsync" },
167 { XFS_FEAT_NOALIGN, ",noalign" },
168 { XFS_FEAT_SWALLOC, ",swalloc" },
169 { XFS_FEAT_NOUUID, ",nouuid" },
170 { XFS_FEAT_NORECOVERY, ",norecovery" },
171 { XFS_FEAT_ATTR2, ",attr2" },
172 { XFS_FEAT_FILESTREAMS, ",filestreams" },
173 { XFS_FEAT_GRPID, ",grpid" },
174 { XFS_FEAT_DISCARD, ",discard" },
175 { XFS_FEAT_LARGE_IOSIZE, ",largeio" },
176 { XFS_FEAT_DAX_ALWAYS, ",dax=always" },
177 { XFS_FEAT_DAX_NEVER, ",dax=never" },
178 { 0, NULL }
179 };
180 struct xfs_mount *mp = XFS_M(root->d_sb);
181 struct proc_xfs_info *xfs_infop;
182
183 for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
184 if (mp->m_features & xfs_infop->flag)
185 seq_puts(m, xfs_infop->str);
186 }
187
188 seq_printf(m, ",inode%d", xfs_has_small_inums(mp) ? 32 : 64);
189
190 if (xfs_has_allocsize(mp))
191 seq_printf(m, ",allocsize=%dk",
192 (1 << mp->m_allocsize_log) >> 10);
193
194 if (mp->m_logbufs > 0)
195 seq_printf(m, ",logbufs=%d", mp->m_logbufs);
196 if (mp->m_logbsize > 0)
197 seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10);
198
199 if (mp->m_logname)
200 seq_show_option(m, "logdev", mp->m_logname);
201 if (mp->m_rtname)
202 seq_show_option(m, "rtdev", mp->m_rtname);
203
204 if (mp->m_dalign > 0)
205 seq_printf(m, ",sunit=%d",
206 (int)XFS_FSB_TO_BB(mp, mp->m_dalign));
207 if (mp->m_swidth > 0)
208 seq_printf(m, ",swidth=%d",
209 (int)XFS_FSB_TO_BB(mp, mp->m_swidth));
210
211 if (mp->m_qflags & XFS_UQUOTA_ENFD)
212 seq_puts(m, ",usrquota");
213 else if (mp->m_qflags & XFS_UQUOTA_ACCT)
214 seq_puts(m, ",uqnoenforce");
215
216 if (mp->m_qflags & XFS_PQUOTA_ENFD)
217 seq_puts(m, ",prjquota");
218 else if (mp->m_qflags & XFS_PQUOTA_ACCT)
219 seq_puts(m, ",pqnoenforce");
220
221 if (mp->m_qflags & XFS_GQUOTA_ENFD)
222 seq_puts(m, ",grpquota");
223 else if (mp->m_qflags & XFS_GQUOTA_ACCT)
224 seq_puts(m, ",gqnoenforce");
225
226 if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
227 seq_puts(m, ",noquota");
228
229 return 0;
230}
231
232static bool
233xfs_set_inode_alloc_perag(
234 struct xfs_perag *pag,
235 xfs_ino_t ino,
236 xfs_agnumber_t max_metadata)
237{
238 if (!xfs_is_inode32(pag->pag_mount)) {
239 set_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate);
240 clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
241 return false;
242 }
243
244 if (ino > XFS_MAXINUMBER_32) {
245 clear_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate);
246 clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
247 return false;
248 }
249
250 set_bit(XFS_AGSTATE_ALLOWS_INODES, &pag->pag_opstate);
251 if (pag->pag_agno < max_metadata)
252 set_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
253 else
254 clear_bit(XFS_AGSTATE_PREFERS_METADATA, &pag->pag_opstate);
255 return true;
256}
257
258/*
259 * Set parameters for inode allocation heuristics, taking into account
260 * filesystem size and inode32/inode64 mount options; i.e. specifically
261 * whether or not XFS_FEAT_SMALL_INUMS is set.
262 *
263 * Inode allocation patterns are altered only if inode32 is requested
264 * (XFS_FEAT_SMALL_INUMS), and the filesystem is sufficiently large.
265 * If altered, XFS_OPSTATE_INODE32 is set as well.
266 *
267 * An agcount independent of that in the mount structure is provided
268 * because in the growfs case, mp->m_sb.sb_agcount is not yet updated
269 * to the potentially higher ag count.
270 *
271 * Returns the maximum AG index which may contain inodes.
272 */
273xfs_agnumber_t
274xfs_set_inode_alloc(
275 struct xfs_mount *mp,
276 xfs_agnumber_t agcount)
277{
278 xfs_agnumber_t index;
279 xfs_agnumber_t maxagi = 0;
280 xfs_sb_t *sbp = &mp->m_sb;
281 xfs_agnumber_t max_metadata;
282 xfs_agino_t agino;
283 xfs_ino_t ino;
284
285 /*
286 * Calculate how much should be reserved for inodes to meet
287 * the max inode percentage. Used only for inode32.
288 */
289 if (M_IGEO(mp)->maxicount) {
290 uint64_t icount;
291
292 icount = sbp->sb_dblocks * sbp->sb_imax_pct;
293 do_div(icount, 100);
294 icount += sbp->sb_agblocks - 1;
295 do_div(icount, sbp->sb_agblocks);
296 max_metadata = icount;
297 } else {
298 max_metadata = agcount;
299 }
300
301 /* Get the last possible inode in the filesystem */
302 agino = XFS_AGB_TO_AGINO(mp, sbp->sb_agblocks - 1);
303 ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
304
305 /*
306 * If user asked for no more than 32-bit inodes, and the fs is
307 * sufficiently large, set XFS_OPSTATE_INODE32 if we must alter
308 * the allocator to accommodate the request.
309 */
310 if (xfs_has_small_inums(mp) && ino > XFS_MAXINUMBER_32)
311 set_bit(XFS_OPSTATE_INODE32, &mp->m_opstate);
312 else
313 clear_bit(XFS_OPSTATE_INODE32, &mp->m_opstate);
314
315 for (index = 0; index < agcount; index++) {
316 struct xfs_perag *pag;
317
318 ino = XFS_AGINO_TO_INO(mp, index, agino);
319
320 pag = xfs_perag_get(mp, index);
321 if (xfs_set_inode_alloc_perag(pag, ino, max_metadata))
322 maxagi++;
323 xfs_perag_put(pag);
324 }
325
326 return xfs_is_inode32(mp) ? maxagi : agcount;
327}
328
329static int
330xfs_setup_dax_always(
331 struct xfs_mount *mp)
332{
333 if (!mp->m_ddev_targp->bt_daxdev &&
334 (!mp->m_rtdev_targp || !mp->m_rtdev_targp->bt_daxdev)) {
335 xfs_alert(mp,
336 "DAX unsupported by block device. Turning off DAX.");
337 goto disable_dax;
338 }
339
340 if (mp->m_super->s_blocksize != PAGE_SIZE) {
341 xfs_alert(mp,
342 "DAX not supported for blocksize. Turning off DAX.");
343 goto disable_dax;
344 }
345
346 if (xfs_has_reflink(mp) &&
347 bdev_is_partition(mp->m_ddev_targp->bt_bdev)) {
348 xfs_alert(mp,
349 "DAX and reflink cannot work with multi-partitions!");
350 return -EINVAL;
351 }
352
353 return 0;
354
355disable_dax:
356 xfs_mount_set_dax_mode(mp, XFS_DAX_NEVER);
357 return 0;
358}
359
360STATIC int
361xfs_blkdev_get(
362 xfs_mount_t *mp,
363 const char *name,
364 struct bdev_handle **handlep)
365{
366 int error = 0;
367
368 *handlep = bdev_open_by_path(name,
369 BLK_OPEN_READ | BLK_OPEN_WRITE | BLK_OPEN_RESTRICT_WRITES,
370 mp->m_super, &fs_holder_ops);
371 if (IS_ERR(*handlep)) {
372 error = PTR_ERR(*handlep);
373 *handlep = NULL;
374 xfs_warn(mp, "Invalid device [%s], error=%d", name, error);
375 }
376
377 return error;
378}
379
380STATIC void
381xfs_shutdown_devices(
382 struct xfs_mount *mp)
383{
384 /*
385 * Udev is triggered whenever anyone closes a block device or unmounts
386 * a file systemm on a block device.
387 * The default udev rules invoke blkid to read the fs super and create
388 * symlinks to the bdev under /dev/disk. For this, it uses buffered
389 * reads through the page cache.
390 *
391 * xfs_db also uses buffered reads to examine metadata. There is no
392 * coordination between xfs_db and udev, which means that they can run
393 * concurrently. Note there is no coordination between the kernel and
394 * blkid either.
395 *
396 * On a system with 64k pages, the page cache can cache the superblock
397 * and the root inode (and hence the root directory) with the same 64k
398 * page. If udev spawns blkid after the mkfs and the system is busy
399 * enough that it is still running when xfs_db starts up, they'll both
400 * read from the same page in the pagecache.
401 *
402 * The unmount writes updated inode metadata to disk directly. The XFS
403 * buffer cache does not use the bdev pagecache, so it needs to
404 * invalidate that pagecache on unmount. If the above scenario occurs,
405 * the pagecache no longer reflects what's on disk, xfs_db reads the
406 * stale metadata, and fails to find /a. Most of the time this succeeds
407 * because closing a bdev invalidates the page cache, but when processes
408 * race, everyone loses.
409 */
410 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
411 blkdev_issue_flush(mp->m_logdev_targp->bt_bdev);
412 invalidate_bdev(mp->m_logdev_targp->bt_bdev);
413 }
414 if (mp->m_rtdev_targp) {
415 blkdev_issue_flush(mp->m_rtdev_targp->bt_bdev);
416 invalidate_bdev(mp->m_rtdev_targp->bt_bdev);
417 }
418 blkdev_issue_flush(mp->m_ddev_targp->bt_bdev);
419 invalidate_bdev(mp->m_ddev_targp->bt_bdev);
420}
421
422/*
423 * The file system configurations are:
424 * (1) device (partition) with data and internal log
425 * (2) logical volume with data and log subvolumes.
426 * (3) logical volume with data, log, and realtime subvolumes.
427 *
428 * We only have to handle opening the log and realtime volumes here if
429 * they are present. The data subvolume has already been opened by
430 * get_sb_bdev() and is stored in sb->s_bdev.
431 */
432STATIC int
433xfs_open_devices(
434 struct xfs_mount *mp)
435{
436 struct super_block *sb = mp->m_super;
437 struct block_device *ddev = sb->s_bdev;
438 struct bdev_handle *logdev_handle = NULL, *rtdev_handle = NULL;
439 int error;
440
441 /*
442 * Open real time and log devices - order is important.
443 */
444 if (mp->m_logname) {
445 error = xfs_blkdev_get(mp, mp->m_logname, &logdev_handle);
446 if (error)
447 return error;
448 }
449
450 if (mp->m_rtname) {
451 error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev_handle);
452 if (error)
453 goto out_close_logdev;
454
455 if (rtdev_handle->bdev == ddev ||
456 (logdev_handle &&
457 rtdev_handle->bdev == logdev_handle->bdev)) {
458 xfs_warn(mp,
459 "Cannot mount filesystem with identical rtdev and ddev/logdev.");
460 error = -EINVAL;
461 goto out_close_rtdev;
462 }
463 }
464
465 /*
466 * Setup xfs_mount buffer target pointers
467 */
468 error = -ENOMEM;
469 mp->m_ddev_targp = xfs_alloc_buftarg(mp, sb->s_bdev_handle);
470 if (!mp->m_ddev_targp)
471 goto out_close_rtdev;
472
473 if (rtdev_handle) {
474 mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev_handle);
475 if (!mp->m_rtdev_targp)
476 goto out_free_ddev_targ;
477 }
478
479 if (logdev_handle && logdev_handle->bdev != ddev) {
480 mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev_handle);
481 if (!mp->m_logdev_targp)
482 goto out_free_rtdev_targ;
483 } else {
484 mp->m_logdev_targp = mp->m_ddev_targp;
485 /* Handle won't be used, drop it */
486 if (logdev_handle)
487 bdev_release(logdev_handle);
488 }
489
490 return 0;
491
492 out_free_rtdev_targ:
493 if (mp->m_rtdev_targp)
494 xfs_free_buftarg(mp->m_rtdev_targp);
495 out_free_ddev_targ:
496 xfs_free_buftarg(mp->m_ddev_targp);
497 out_close_rtdev:
498 if (rtdev_handle)
499 bdev_release(rtdev_handle);
500 out_close_logdev:
501 if (logdev_handle)
502 bdev_release(logdev_handle);
503 return error;
504}
505
506/*
507 * Setup xfs_mount buffer target pointers based on superblock
508 */
509STATIC int
510xfs_setup_devices(
511 struct xfs_mount *mp)
512{
513 int error;
514
515 error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize);
516 if (error)
517 return error;
518
519 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
520 unsigned int log_sector_size = BBSIZE;
521
522 if (xfs_has_sector(mp))
523 log_sector_size = mp->m_sb.sb_logsectsize;
524 error = xfs_setsize_buftarg(mp->m_logdev_targp,
525 log_sector_size);
526 if (error)
527 return error;
528 }
529 if (mp->m_rtdev_targp) {
530 error = xfs_setsize_buftarg(mp->m_rtdev_targp,
531 mp->m_sb.sb_sectsize);
532 if (error)
533 return error;
534 }
535
536 return 0;
537}
538
539STATIC int
540xfs_init_mount_workqueues(
541 struct xfs_mount *mp)
542{
543 mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s",
544 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
545 1, mp->m_super->s_id);
546 if (!mp->m_buf_workqueue)
547 goto out;
548
549 mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s",
550 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
551 0, mp->m_super->s_id);
552 if (!mp->m_unwritten_workqueue)
553 goto out_destroy_buf;
554
555 mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
556 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
557 0, mp->m_super->s_id);
558 if (!mp->m_reclaim_workqueue)
559 goto out_destroy_unwritten;
560
561 mp->m_blockgc_wq = alloc_workqueue("xfs-blockgc/%s",
562 XFS_WQFLAGS(WQ_UNBOUND | WQ_FREEZABLE | WQ_MEM_RECLAIM),
563 0, mp->m_super->s_id);
564 if (!mp->m_blockgc_wq)
565 goto out_destroy_reclaim;
566
567 mp->m_inodegc_wq = alloc_workqueue("xfs-inodegc/%s",
568 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
569 1, mp->m_super->s_id);
570 if (!mp->m_inodegc_wq)
571 goto out_destroy_blockgc;
572
573 mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s",
574 XFS_WQFLAGS(WQ_FREEZABLE), 0, mp->m_super->s_id);
575 if (!mp->m_sync_workqueue)
576 goto out_destroy_inodegc;
577
578 return 0;
579
580out_destroy_inodegc:
581 destroy_workqueue(mp->m_inodegc_wq);
582out_destroy_blockgc:
583 destroy_workqueue(mp->m_blockgc_wq);
584out_destroy_reclaim:
585 destroy_workqueue(mp->m_reclaim_workqueue);
586out_destroy_unwritten:
587 destroy_workqueue(mp->m_unwritten_workqueue);
588out_destroy_buf:
589 destroy_workqueue(mp->m_buf_workqueue);
590out:
591 return -ENOMEM;
592}
593
594STATIC void
595xfs_destroy_mount_workqueues(
596 struct xfs_mount *mp)
597{
598 destroy_workqueue(mp->m_sync_workqueue);
599 destroy_workqueue(mp->m_blockgc_wq);
600 destroy_workqueue(mp->m_inodegc_wq);
601 destroy_workqueue(mp->m_reclaim_workqueue);
602 destroy_workqueue(mp->m_unwritten_workqueue);
603 destroy_workqueue(mp->m_buf_workqueue);
604}
605
606static void
607xfs_flush_inodes_worker(
608 struct work_struct *work)
609{
610 struct xfs_mount *mp = container_of(work, struct xfs_mount,
611 m_flush_inodes_work);
612 struct super_block *sb = mp->m_super;
613
614 if (down_read_trylock(&sb->s_umount)) {
615 sync_inodes_sb(sb);
616 up_read(&sb->s_umount);
617 }
618}
619
620/*
621 * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK
622 * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting
623 * for IO to complete so that we effectively throttle multiple callers to the
624 * rate at which IO is completing.
625 */
626void
627xfs_flush_inodes(
628 struct xfs_mount *mp)
629{
630 /*
631 * If flush_work() returns true then that means we waited for a flush
632 * which was already in progress. Don't bother running another scan.
633 */
634 if (flush_work(&mp->m_flush_inodes_work))
635 return;
636
637 queue_work(mp->m_sync_workqueue, &mp->m_flush_inodes_work);
638 flush_work(&mp->m_flush_inodes_work);
639}
640
641/* Catch misguided souls that try to use this interface on XFS */
642STATIC struct inode *
643xfs_fs_alloc_inode(
644 struct super_block *sb)
645{
646 BUG();
647 return NULL;
648}
649
650/*
651 * Now that the generic code is guaranteed not to be accessing
652 * the linux inode, we can inactivate and reclaim the inode.
653 */
654STATIC void
655xfs_fs_destroy_inode(
656 struct inode *inode)
657{
658 struct xfs_inode *ip = XFS_I(inode);
659
660 trace_xfs_destroy_inode(ip);
661
662 ASSERT(!rwsem_is_locked(&inode->i_rwsem));
663 XFS_STATS_INC(ip->i_mount, vn_rele);
664 XFS_STATS_INC(ip->i_mount, vn_remove);
665 xfs_inode_mark_reclaimable(ip);
666}
667
668static void
669xfs_fs_dirty_inode(
670 struct inode *inode,
671 int flags)
672{
673 struct xfs_inode *ip = XFS_I(inode);
674 struct xfs_mount *mp = ip->i_mount;
675 struct xfs_trans *tp;
676
677 if (!(inode->i_sb->s_flags & SB_LAZYTIME))
678 return;
679
680 /*
681 * Only do the timestamp update if the inode is dirty (I_DIRTY_SYNC)
682 * and has dirty timestamp (I_DIRTY_TIME). I_DIRTY_TIME can be passed
683 * in flags possibly together with I_DIRTY_SYNC.
684 */
685 if ((flags & ~I_DIRTY_TIME) != I_DIRTY_SYNC || !(flags & I_DIRTY_TIME))
686 return;
687
688 if (xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp))
689 return;
690 xfs_ilock(ip, XFS_ILOCK_EXCL);
691 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
692 xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP);
693 xfs_trans_commit(tp);
694}
695
696/*
697 * Slab object creation initialisation for the XFS inode.
698 * This covers only the idempotent fields in the XFS inode;
699 * all other fields need to be initialised on allocation
700 * from the slab. This avoids the need to repeatedly initialise
701 * fields in the xfs inode that left in the initialise state
702 * when freeing the inode.
703 */
704STATIC void
705xfs_fs_inode_init_once(
706 void *inode)
707{
708 struct xfs_inode *ip = inode;
709
710 memset(ip, 0, sizeof(struct xfs_inode));
711
712 /* vfs inode */
713 inode_init_once(VFS_I(ip));
714
715 /* xfs inode */
716 atomic_set(&ip->i_pincount, 0);
717 spin_lock_init(&ip->i_flags_lock);
718
719 mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
720 "xfsino", ip->i_ino);
721}
722
723/*
724 * We do an unlocked check for XFS_IDONTCACHE here because we are already
725 * serialised against cache hits here via the inode->i_lock and igrab() in
726 * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be
727 * racing with us, and it avoids needing to grab a spinlock here for every inode
728 * we drop the final reference on.
729 */
730STATIC int
731xfs_fs_drop_inode(
732 struct inode *inode)
733{
734 struct xfs_inode *ip = XFS_I(inode);
735
736 /*
737 * If this unlinked inode is in the middle of recovery, don't
738 * drop the inode just yet; log recovery will take care of
739 * that. See the comment for this inode flag.
740 */
741 if (ip->i_flags & XFS_IRECOVERY) {
742 ASSERT(xlog_recovery_needed(ip->i_mount->m_log));
743 return 0;
744 }
745
746 return generic_drop_inode(inode);
747}
748
749static void
750xfs_mount_free(
751 struct xfs_mount *mp)
752{
753 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp)
754 xfs_free_buftarg(mp->m_logdev_targp);
755 if (mp->m_rtdev_targp)
756 xfs_free_buftarg(mp->m_rtdev_targp);
757 if (mp->m_ddev_targp)
758 xfs_free_buftarg(mp->m_ddev_targp);
759
760 debugfs_remove(mp->m_debugfs);
761 kfree(mp->m_rtname);
762 kfree(mp->m_logname);
763 kmem_free(mp);
764}
765
766STATIC int
767xfs_fs_sync_fs(
768 struct super_block *sb,
769 int wait)
770{
771 struct xfs_mount *mp = XFS_M(sb);
772 int error;
773
774 trace_xfs_fs_sync_fs(mp, __return_address);
775
776 /*
777 * Doing anything during the async pass would be counterproductive.
778 */
779 if (!wait)
780 return 0;
781
782 error = xfs_log_force(mp, XFS_LOG_SYNC);
783 if (error)
784 return error;
785
786 if (laptop_mode) {
787 /*
788 * The disk must be active because we're syncing.
789 * We schedule log work now (now that the disk is
790 * active) instead of later (when it might not be).
791 */
792 flush_delayed_work(&mp->m_log->l_work);
793 }
794
795 /*
796 * If we are called with page faults frozen out, it means we are about
797 * to freeze the transaction subsystem. Take the opportunity to shut
798 * down inodegc because once SB_FREEZE_FS is set it's too late to
799 * prevent inactivation races with freeze. The fs doesn't get called
800 * again by the freezing process until after SB_FREEZE_FS has been set,
801 * so it's now or never. Same logic applies to speculative allocation
802 * garbage collection.
803 *
804 * We don't care if this is a normal syncfs call that does this or
805 * freeze that does this - we can run this multiple times without issue
806 * and we won't race with a restart because a restart can only occur
807 * when the state is either SB_FREEZE_FS or SB_FREEZE_COMPLETE.
808 */
809 if (sb->s_writers.frozen == SB_FREEZE_PAGEFAULT) {
810 xfs_inodegc_stop(mp);
811 xfs_blockgc_stop(mp);
812 }
813
814 return 0;
815}
816
817STATIC int
818xfs_fs_statfs(
819 struct dentry *dentry,
820 struct kstatfs *statp)
821{
822 struct xfs_mount *mp = XFS_M(dentry->d_sb);
823 xfs_sb_t *sbp = &mp->m_sb;
824 struct xfs_inode *ip = XFS_I(d_inode(dentry));
825 uint64_t fakeinos, id;
826 uint64_t icount;
827 uint64_t ifree;
828 uint64_t fdblocks;
829 xfs_extlen_t lsize;
830 int64_t ffree;
831
832 /*
833 * Expedite background inodegc but don't wait. We do not want to block
834 * here waiting hours for a billion extent file to be truncated.
835 */
836 xfs_inodegc_push(mp);
837
838 statp->f_type = XFS_SUPER_MAGIC;
839 statp->f_namelen = MAXNAMELEN - 1;
840
841 id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
842 statp->f_fsid = u64_to_fsid(id);
843
844 icount = percpu_counter_sum(&mp->m_icount);
845 ifree = percpu_counter_sum(&mp->m_ifree);
846 fdblocks = percpu_counter_sum(&mp->m_fdblocks);
847
848 spin_lock(&mp->m_sb_lock);
849 statp->f_bsize = sbp->sb_blocksize;
850 lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
851 statp->f_blocks = sbp->sb_dblocks - lsize;
852 spin_unlock(&mp->m_sb_lock);
853
854 /* make sure statp->f_bfree does not underflow */
855 statp->f_bfree = max_t(int64_t, 0,
856 fdblocks - xfs_fdblocks_unavailable(mp));
857 statp->f_bavail = statp->f_bfree;
858
859 fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree);
860 statp->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER);
861 if (M_IGEO(mp)->maxicount)
862 statp->f_files = min_t(typeof(statp->f_files),
863 statp->f_files,
864 M_IGEO(mp)->maxicount);
865
866 /* If sb_icount overshot maxicount, report actual allocation */
867 statp->f_files = max_t(typeof(statp->f_files),
868 statp->f_files,
869 sbp->sb_icount);
870
871 /* make sure statp->f_ffree does not underflow */
872 ffree = statp->f_files - (icount - ifree);
873 statp->f_ffree = max_t(int64_t, ffree, 0);
874
875
876 if ((ip->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
877 ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) ==
878 (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))
879 xfs_qm_statvfs(ip, statp);
880
881 if (XFS_IS_REALTIME_MOUNT(mp) &&
882 (ip->i_diflags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) {
883 s64 freertx;
884
885 statp->f_blocks = sbp->sb_rblocks;
886 freertx = percpu_counter_sum_positive(&mp->m_frextents);
887 statp->f_bavail = statp->f_bfree = xfs_rtx_to_rtb(mp, freertx);
888 }
889
890 return 0;
891}
892
893STATIC void
894xfs_save_resvblks(struct xfs_mount *mp)
895{
896 mp->m_resblks_save = mp->m_resblks;
897 xfs_reserve_blocks(mp, 0);
898}
899
900STATIC void
901xfs_restore_resvblks(struct xfs_mount *mp)
902{
903 uint64_t resblks;
904
905 if (mp->m_resblks_save) {
906 resblks = mp->m_resblks_save;
907 mp->m_resblks_save = 0;
908 } else
909 resblks = xfs_default_resblks(mp);
910
911 xfs_reserve_blocks(mp, resblks);
912}
913
914/*
915 * Second stage of a freeze. The data is already frozen so we only
916 * need to take care of the metadata. Once that's done sync the superblock
917 * to the log to dirty it in case of a crash while frozen. This ensures that we
918 * will recover the unlinked inode lists on the next mount.
919 */
920STATIC int
921xfs_fs_freeze(
922 struct super_block *sb)
923{
924 struct xfs_mount *mp = XFS_M(sb);
925 unsigned int flags;
926 int ret;
927
928 /*
929 * The filesystem is now frozen far enough that memory reclaim
930 * cannot safely operate on the filesystem. Hence we need to
931 * set a GFP_NOFS context here to avoid recursion deadlocks.
932 */
933 flags = memalloc_nofs_save();
934 xfs_save_resvblks(mp);
935 ret = xfs_log_quiesce(mp);
936 memalloc_nofs_restore(flags);
937
938 /*
939 * For read-write filesystems, we need to restart the inodegc on error
940 * because we stopped it at SB_FREEZE_PAGEFAULT level and a thaw is not
941 * going to be run to restart it now. We are at SB_FREEZE_FS level
942 * here, so we can restart safely without racing with a stop in
943 * xfs_fs_sync_fs().
944 */
945 if (ret && !xfs_is_readonly(mp)) {
946 xfs_blockgc_start(mp);
947 xfs_inodegc_start(mp);
948 }
949
950 return ret;
951}
952
953STATIC int
954xfs_fs_unfreeze(
955 struct super_block *sb)
956{
957 struct xfs_mount *mp = XFS_M(sb);
958
959 xfs_restore_resvblks(mp);
960 xfs_log_work_queue(mp);
961
962 /*
963 * Don't reactivate the inodegc worker on a readonly filesystem because
964 * inodes are sent directly to reclaim. Don't reactivate the blockgc
965 * worker because there are no speculative preallocations on a readonly
966 * filesystem.
967 */
968 if (!xfs_is_readonly(mp)) {
969 xfs_blockgc_start(mp);
970 xfs_inodegc_start(mp);
971 }
972
973 return 0;
974}
975
976/*
977 * This function fills in xfs_mount_t fields based on mount args.
978 * Note: the superblock _has_ now been read in.
979 */
980STATIC int
981xfs_finish_flags(
982 struct xfs_mount *mp)
983{
984 /* Fail a mount where the logbuf is smaller than the log stripe */
985 if (xfs_has_logv2(mp)) {
986 if (mp->m_logbsize <= 0 &&
987 mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
988 mp->m_logbsize = mp->m_sb.sb_logsunit;
989 } else if (mp->m_logbsize > 0 &&
990 mp->m_logbsize < mp->m_sb.sb_logsunit) {
991 xfs_warn(mp,
992 "logbuf size must be greater than or equal to log stripe size");
993 return -EINVAL;
994 }
995 } else {
996 /* Fail a mount if the logbuf is larger than 32K */
997 if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) {
998 xfs_warn(mp,
999 "logbuf size for version 1 logs must be 16K or 32K");
1000 return -EINVAL;
1001 }
1002 }
1003
1004 /*
1005 * V5 filesystems always use attr2 format for attributes.
1006 */
1007 if (xfs_has_crc(mp) && xfs_has_noattr2(mp)) {
1008 xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. "
1009 "attr2 is always enabled for V5 filesystems.");
1010 return -EINVAL;
1011 }
1012
1013 /*
1014 * prohibit r/w mounts of read-only filesystems
1015 */
1016 if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !xfs_is_readonly(mp)) {
1017 xfs_warn(mp,
1018 "cannot mount a read-only filesystem as read-write");
1019 return -EROFS;
1020 }
1021
1022 if ((mp->m_qflags & XFS_GQUOTA_ACCT) &&
1023 (mp->m_qflags & XFS_PQUOTA_ACCT) &&
1024 !xfs_has_pquotino(mp)) {
1025 xfs_warn(mp,
1026 "Super block does not support project and group quota together");
1027 return -EINVAL;
1028 }
1029
1030 return 0;
1031}
1032
1033static int
1034xfs_init_percpu_counters(
1035 struct xfs_mount *mp)
1036{
1037 int error;
1038
1039 error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL);
1040 if (error)
1041 return -ENOMEM;
1042
1043 error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL);
1044 if (error)
1045 goto free_icount;
1046
1047 error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL);
1048 if (error)
1049 goto free_ifree;
1050
1051 error = percpu_counter_init(&mp->m_delalloc_blks, 0, GFP_KERNEL);
1052 if (error)
1053 goto free_fdblocks;
1054
1055 error = percpu_counter_init(&mp->m_frextents, 0, GFP_KERNEL);
1056 if (error)
1057 goto free_delalloc;
1058
1059 return 0;
1060
1061free_delalloc:
1062 percpu_counter_destroy(&mp->m_delalloc_blks);
1063free_fdblocks:
1064 percpu_counter_destroy(&mp->m_fdblocks);
1065free_ifree:
1066 percpu_counter_destroy(&mp->m_ifree);
1067free_icount:
1068 percpu_counter_destroy(&mp->m_icount);
1069 return -ENOMEM;
1070}
1071
1072void
1073xfs_reinit_percpu_counters(
1074 struct xfs_mount *mp)
1075{
1076 percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount);
1077 percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree);
1078 percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks);
1079 percpu_counter_set(&mp->m_frextents, mp->m_sb.sb_frextents);
1080}
1081
1082static void
1083xfs_destroy_percpu_counters(
1084 struct xfs_mount *mp)
1085{
1086 percpu_counter_destroy(&mp->m_icount);
1087 percpu_counter_destroy(&mp->m_ifree);
1088 percpu_counter_destroy(&mp->m_fdblocks);
1089 ASSERT(xfs_is_shutdown(mp) ||
1090 percpu_counter_sum(&mp->m_delalloc_blks) == 0);
1091 percpu_counter_destroy(&mp->m_delalloc_blks);
1092 percpu_counter_destroy(&mp->m_frextents);
1093}
1094
1095static int
1096xfs_inodegc_init_percpu(
1097 struct xfs_mount *mp)
1098{
1099 struct xfs_inodegc *gc;
1100 int cpu;
1101
1102 mp->m_inodegc = alloc_percpu(struct xfs_inodegc);
1103 if (!mp->m_inodegc)
1104 return -ENOMEM;
1105
1106 for_each_possible_cpu(cpu) {
1107 gc = per_cpu_ptr(mp->m_inodegc, cpu);
1108 gc->cpu = cpu;
1109 gc->mp = mp;
1110 init_llist_head(&gc->list);
1111 gc->items = 0;
1112 gc->error = 0;
1113 INIT_DELAYED_WORK(&gc->work, xfs_inodegc_worker);
1114 }
1115 return 0;
1116}
1117
1118static void
1119xfs_inodegc_free_percpu(
1120 struct xfs_mount *mp)
1121{
1122 if (!mp->m_inodegc)
1123 return;
1124 free_percpu(mp->m_inodegc);
1125}
1126
1127static void
1128xfs_fs_put_super(
1129 struct super_block *sb)
1130{
1131 struct xfs_mount *mp = XFS_M(sb);
1132
1133 xfs_notice(mp, "Unmounting Filesystem %pU", &mp->m_sb.sb_uuid);
1134 xfs_filestream_unmount(mp);
1135 xfs_unmountfs(mp);
1136
1137 xfs_freesb(mp);
1138 xchk_mount_stats_free(mp);
1139 free_percpu(mp->m_stats.xs_stats);
1140 xfs_inodegc_free_percpu(mp);
1141 xfs_destroy_percpu_counters(mp);
1142 xfs_destroy_mount_workqueues(mp);
1143 xfs_shutdown_devices(mp);
1144}
1145
1146static long
1147xfs_fs_nr_cached_objects(
1148 struct super_block *sb,
1149 struct shrink_control *sc)
1150{
1151 /* Paranoia: catch incorrect calls during mount setup or teardown */
1152 if (WARN_ON_ONCE(!sb->s_fs_info))
1153 return 0;
1154 return xfs_reclaim_inodes_count(XFS_M(sb));
1155}
1156
1157static long
1158xfs_fs_free_cached_objects(
1159 struct super_block *sb,
1160 struct shrink_control *sc)
1161{
1162 return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan);
1163}
1164
1165static void
1166xfs_fs_shutdown(
1167 struct super_block *sb)
1168{
1169 xfs_force_shutdown(XFS_M(sb), SHUTDOWN_DEVICE_REMOVED);
1170}
1171
1172static const struct super_operations xfs_super_operations = {
1173 .alloc_inode = xfs_fs_alloc_inode,
1174 .destroy_inode = xfs_fs_destroy_inode,
1175 .dirty_inode = xfs_fs_dirty_inode,
1176 .drop_inode = xfs_fs_drop_inode,
1177 .put_super = xfs_fs_put_super,
1178 .sync_fs = xfs_fs_sync_fs,
1179 .freeze_fs = xfs_fs_freeze,
1180 .unfreeze_fs = xfs_fs_unfreeze,
1181 .statfs = xfs_fs_statfs,
1182 .show_options = xfs_fs_show_options,
1183 .nr_cached_objects = xfs_fs_nr_cached_objects,
1184 .free_cached_objects = xfs_fs_free_cached_objects,
1185 .shutdown = xfs_fs_shutdown,
1186};
1187
1188static int
1189suffix_kstrtoint(
1190 const char *s,
1191 unsigned int base,
1192 int *res)
1193{
1194 int last, shift_left_factor = 0, _res;
1195 char *value;
1196 int ret = 0;
1197
1198 value = kstrdup(s, GFP_KERNEL);
1199 if (!value)
1200 return -ENOMEM;
1201
1202 last = strlen(value) - 1;
1203 if (value[last] == 'K' || value[last] == 'k') {
1204 shift_left_factor = 10;
1205 value[last] = '\0';
1206 }
1207 if (value[last] == 'M' || value[last] == 'm') {
1208 shift_left_factor = 20;
1209 value[last] = '\0';
1210 }
1211 if (value[last] == 'G' || value[last] == 'g') {
1212 shift_left_factor = 30;
1213 value[last] = '\0';
1214 }
1215
1216 if (kstrtoint(value, base, &_res))
1217 ret = -EINVAL;
1218 kfree(value);
1219 *res = _res << shift_left_factor;
1220 return ret;
1221}
1222
1223static inline void
1224xfs_fs_warn_deprecated(
1225 struct fs_context *fc,
1226 struct fs_parameter *param,
1227 uint64_t flag,
1228 bool value)
1229{
1230 /* Don't print the warning if reconfiguring and current mount point
1231 * already had the flag set
1232 */
1233 if ((fc->purpose & FS_CONTEXT_FOR_RECONFIGURE) &&
1234 !!(XFS_M(fc->root->d_sb)->m_features & flag) == value)
1235 return;
1236 xfs_warn(fc->s_fs_info, "%s mount option is deprecated.", param->key);
1237}
1238
1239/*
1240 * Set mount state from a mount option.
1241 *
1242 * NOTE: mp->m_super is NULL here!
1243 */
1244static int
1245xfs_fs_parse_param(
1246 struct fs_context *fc,
1247 struct fs_parameter *param)
1248{
1249 struct xfs_mount *parsing_mp = fc->s_fs_info;
1250 struct fs_parse_result result;
1251 int size = 0;
1252 int opt;
1253
1254 opt = fs_parse(fc, xfs_fs_parameters, param, &result);
1255 if (opt < 0)
1256 return opt;
1257
1258 switch (opt) {
1259 case Opt_logbufs:
1260 parsing_mp->m_logbufs = result.uint_32;
1261 return 0;
1262 case Opt_logbsize:
1263 if (suffix_kstrtoint(param->string, 10, &parsing_mp->m_logbsize))
1264 return -EINVAL;
1265 return 0;
1266 case Opt_logdev:
1267 kfree(parsing_mp->m_logname);
1268 parsing_mp->m_logname = kstrdup(param->string, GFP_KERNEL);
1269 if (!parsing_mp->m_logname)
1270 return -ENOMEM;
1271 return 0;
1272 case Opt_rtdev:
1273 kfree(parsing_mp->m_rtname);
1274 parsing_mp->m_rtname = kstrdup(param->string, GFP_KERNEL);
1275 if (!parsing_mp->m_rtname)
1276 return -ENOMEM;
1277 return 0;
1278 case Opt_allocsize:
1279 if (suffix_kstrtoint(param->string, 10, &size))
1280 return -EINVAL;
1281 parsing_mp->m_allocsize_log = ffs(size) - 1;
1282 parsing_mp->m_features |= XFS_FEAT_ALLOCSIZE;
1283 return 0;
1284 case Opt_grpid:
1285 case Opt_bsdgroups:
1286 parsing_mp->m_features |= XFS_FEAT_GRPID;
1287 return 0;
1288 case Opt_nogrpid:
1289 case Opt_sysvgroups:
1290 parsing_mp->m_features &= ~XFS_FEAT_GRPID;
1291 return 0;
1292 case Opt_wsync:
1293 parsing_mp->m_features |= XFS_FEAT_WSYNC;
1294 return 0;
1295 case Opt_norecovery:
1296 parsing_mp->m_features |= XFS_FEAT_NORECOVERY;
1297 return 0;
1298 case Opt_noalign:
1299 parsing_mp->m_features |= XFS_FEAT_NOALIGN;
1300 return 0;
1301 case Opt_swalloc:
1302 parsing_mp->m_features |= XFS_FEAT_SWALLOC;
1303 return 0;
1304 case Opt_sunit:
1305 parsing_mp->m_dalign = result.uint_32;
1306 return 0;
1307 case Opt_swidth:
1308 parsing_mp->m_swidth = result.uint_32;
1309 return 0;
1310 case Opt_inode32:
1311 parsing_mp->m_features |= XFS_FEAT_SMALL_INUMS;
1312 return 0;
1313 case Opt_inode64:
1314 parsing_mp->m_features &= ~XFS_FEAT_SMALL_INUMS;
1315 return 0;
1316 case Opt_nouuid:
1317 parsing_mp->m_features |= XFS_FEAT_NOUUID;
1318 return 0;
1319 case Opt_largeio:
1320 parsing_mp->m_features |= XFS_FEAT_LARGE_IOSIZE;
1321 return 0;
1322 case Opt_nolargeio:
1323 parsing_mp->m_features &= ~XFS_FEAT_LARGE_IOSIZE;
1324 return 0;
1325 case Opt_filestreams:
1326 parsing_mp->m_features |= XFS_FEAT_FILESTREAMS;
1327 return 0;
1328 case Opt_noquota:
1329 parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT;
1330 parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD;
1331 return 0;
1332 case Opt_quota:
1333 case Opt_uquota:
1334 case Opt_usrquota:
1335 parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ENFD);
1336 return 0;
1337 case Opt_qnoenforce:
1338 case Opt_uqnoenforce:
1339 parsing_mp->m_qflags |= XFS_UQUOTA_ACCT;
1340 parsing_mp->m_qflags &= ~XFS_UQUOTA_ENFD;
1341 return 0;
1342 case Opt_pquota:
1343 case Opt_prjquota:
1344 parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ENFD);
1345 return 0;
1346 case Opt_pqnoenforce:
1347 parsing_mp->m_qflags |= XFS_PQUOTA_ACCT;
1348 parsing_mp->m_qflags &= ~XFS_PQUOTA_ENFD;
1349 return 0;
1350 case Opt_gquota:
1351 case Opt_grpquota:
1352 parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ENFD);
1353 return 0;
1354 case Opt_gqnoenforce:
1355 parsing_mp->m_qflags |= XFS_GQUOTA_ACCT;
1356 parsing_mp->m_qflags &= ~XFS_GQUOTA_ENFD;
1357 return 0;
1358 case Opt_discard:
1359 parsing_mp->m_features |= XFS_FEAT_DISCARD;
1360 return 0;
1361 case Opt_nodiscard:
1362 parsing_mp->m_features &= ~XFS_FEAT_DISCARD;
1363 return 0;
1364#ifdef CONFIG_FS_DAX
1365 case Opt_dax:
1366 xfs_mount_set_dax_mode(parsing_mp, XFS_DAX_ALWAYS);
1367 return 0;
1368 case Opt_dax_enum:
1369 xfs_mount_set_dax_mode(parsing_mp, result.uint_32);
1370 return 0;
1371#endif
1372 /* Following mount options will be removed in September 2025 */
1373 case Opt_ikeep:
1374 xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, true);
1375 parsing_mp->m_features |= XFS_FEAT_IKEEP;
1376 return 0;
1377 case Opt_noikeep:
1378 xfs_fs_warn_deprecated(fc, param, XFS_FEAT_IKEEP, false);
1379 parsing_mp->m_features &= ~XFS_FEAT_IKEEP;
1380 return 0;
1381 case Opt_attr2:
1382 xfs_fs_warn_deprecated(fc, param, XFS_FEAT_ATTR2, true);
1383 parsing_mp->m_features |= XFS_FEAT_ATTR2;
1384 return 0;
1385 case Opt_noattr2:
1386 xfs_fs_warn_deprecated(fc, param, XFS_FEAT_NOATTR2, true);
1387 parsing_mp->m_features |= XFS_FEAT_NOATTR2;
1388 return 0;
1389 default:
1390 xfs_warn(parsing_mp, "unknown mount option [%s].", param->key);
1391 return -EINVAL;
1392 }
1393
1394 return 0;
1395}
1396
1397static int
1398xfs_fs_validate_params(
1399 struct xfs_mount *mp)
1400{
1401 /* No recovery flag requires a read-only mount */
1402 if (xfs_has_norecovery(mp) && !xfs_is_readonly(mp)) {
1403 xfs_warn(mp, "no-recovery mounts must be read-only.");
1404 return -EINVAL;
1405 }
1406
1407 /*
1408 * We have not read the superblock at this point, so only the attr2
1409 * mount option can set the attr2 feature by this stage.
1410 */
1411 if (xfs_has_attr2(mp) && xfs_has_noattr2(mp)) {
1412 xfs_warn(mp, "attr2 and noattr2 cannot both be specified.");
1413 return -EINVAL;
1414 }
1415
1416
1417 if (xfs_has_noalign(mp) && (mp->m_dalign || mp->m_swidth)) {
1418 xfs_warn(mp,
1419 "sunit and swidth options incompatible with the noalign option");
1420 return -EINVAL;
1421 }
1422
1423 if (!IS_ENABLED(CONFIG_XFS_QUOTA) && mp->m_qflags != 0) {
1424 xfs_warn(mp, "quota support not available in this kernel.");
1425 return -EINVAL;
1426 }
1427
1428 if ((mp->m_dalign && !mp->m_swidth) ||
1429 (!mp->m_dalign && mp->m_swidth)) {
1430 xfs_warn(mp, "sunit and swidth must be specified together");
1431 return -EINVAL;
1432 }
1433
1434 if (mp->m_dalign && (mp->m_swidth % mp->m_dalign != 0)) {
1435 xfs_warn(mp,
1436 "stripe width (%d) must be a multiple of the stripe unit (%d)",
1437 mp->m_swidth, mp->m_dalign);
1438 return -EINVAL;
1439 }
1440
1441 if (mp->m_logbufs != -1 &&
1442 mp->m_logbufs != 0 &&
1443 (mp->m_logbufs < XLOG_MIN_ICLOGS ||
1444 mp->m_logbufs > XLOG_MAX_ICLOGS)) {
1445 xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
1446 mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
1447 return -EINVAL;
1448 }
1449
1450 if (mp->m_logbsize != -1 &&
1451 mp->m_logbsize != 0 &&
1452 (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
1453 mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
1454 !is_power_of_2(mp->m_logbsize))) {
1455 xfs_warn(mp,
1456 "invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
1457 mp->m_logbsize);
1458 return -EINVAL;
1459 }
1460
1461 if (xfs_has_allocsize(mp) &&
1462 (mp->m_allocsize_log > XFS_MAX_IO_LOG ||
1463 mp->m_allocsize_log < XFS_MIN_IO_LOG)) {
1464 xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
1465 mp->m_allocsize_log, XFS_MIN_IO_LOG, XFS_MAX_IO_LOG);
1466 return -EINVAL;
1467 }
1468
1469 return 0;
1470}
1471
1472struct dentry *
1473xfs_debugfs_mkdir(
1474 const char *name,
1475 struct dentry *parent)
1476{
1477 struct dentry *child;
1478
1479 /* Apparently we're expected to ignore error returns?? */
1480 child = debugfs_create_dir(name, parent);
1481 if (IS_ERR(child))
1482 return NULL;
1483
1484 return child;
1485}
1486
1487static int
1488xfs_fs_fill_super(
1489 struct super_block *sb,
1490 struct fs_context *fc)
1491{
1492 struct xfs_mount *mp = sb->s_fs_info;
1493 struct inode *root;
1494 int flags = 0, error;
1495
1496 mp->m_super = sb;
1497
1498 /*
1499 * Copy VFS mount flags from the context now that all parameter parsing
1500 * is guaranteed to have been completed by either the old mount API or
1501 * the newer fsopen/fsconfig API.
1502 */
1503 if (fc->sb_flags & SB_RDONLY)
1504 set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
1505 if (fc->sb_flags & SB_DIRSYNC)
1506 mp->m_features |= XFS_FEAT_DIRSYNC;
1507 if (fc->sb_flags & SB_SYNCHRONOUS)
1508 mp->m_features |= XFS_FEAT_WSYNC;
1509
1510 error = xfs_fs_validate_params(mp);
1511 if (error)
1512 return error;
1513
1514 sb_min_blocksize(sb, BBSIZE);
1515 sb->s_xattr = xfs_xattr_handlers;
1516 sb->s_export_op = &xfs_export_operations;
1517#ifdef CONFIG_XFS_QUOTA
1518 sb->s_qcop = &xfs_quotactl_operations;
1519 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
1520#endif
1521 sb->s_op = &xfs_super_operations;
1522
1523 /*
1524 * Delay mount work if the debug hook is set. This is debug
1525 * instrumention to coordinate simulation of xfs mount failures with
1526 * VFS superblock operations
1527 */
1528 if (xfs_globals.mount_delay) {
1529 xfs_notice(mp, "Delaying mount for %d seconds.",
1530 xfs_globals.mount_delay);
1531 msleep(xfs_globals.mount_delay * 1000);
1532 }
1533
1534 if (fc->sb_flags & SB_SILENT)
1535 flags |= XFS_MFSI_QUIET;
1536
1537 error = xfs_open_devices(mp);
1538 if (error)
1539 return error;
1540
1541 if (xfs_debugfs) {
1542 mp->m_debugfs = xfs_debugfs_mkdir(mp->m_super->s_id,
1543 xfs_debugfs);
1544 } else {
1545 mp->m_debugfs = NULL;
1546 }
1547
1548 error = xfs_init_mount_workqueues(mp);
1549 if (error)
1550 goto out_shutdown_devices;
1551
1552 error = xfs_init_percpu_counters(mp);
1553 if (error)
1554 goto out_destroy_workqueues;
1555
1556 error = xfs_inodegc_init_percpu(mp);
1557 if (error)
1558 goto out_destroy_counters;
1559
1560 /* Allocate stats memory before we do operations that might use it */
1561 mp->m_stats.xs_stats = alloc_percpu(struct xfsstats);
1562 if (!mp->m_stats.xs_stats) {
1563 error = -ENOMEM;
1564 goto out_destroy_inodegc;
1565 }
1566
1567 error = xchk_mount_stats_alloc(mp);
1568 if (error)
1569 goto out_free_stats;
1570
1571 error = xfs_readsb(mp, flags);
1572 if (error)
1573 goto out_free_scrub_stats;
1574
1575 error = xfs_finish_flags(mp);
1576 if (error)
1577 goto out_free_sb;
1578
1579 error = xfs_setup_devices(mp);
1580 if (error)
1581 goto out_free_sb;
1582
1583 /* V4 support is undergoing deprecation. */
1584 if (!xfs_has_crc(mp)) {
1585#ifdef CONFIG_XFS_SUPPORT_V4
1586 xfs_warn_once(mp,
1587 "Deprecated V4 format (crc=0) will not be supported after September 2030.");
1588#else
1589 xfs_warn(mp,
1590 "Deprecated V4 format (crc=0) not supported by kernel.");
1591 error = -EINVAL;
1592 goto out_free_sb;
1593#endif
1594 }
1595
1596 /* ASCII case insensitivity is undergoing deprecation. */
1597 if (xfs_has_asciici(mp)) {
1598#ifdef CONFIG_XFS_SUPPORT_ASCII_CI
1599 xfs_warn_once(mp,
1600 "Deprecated ASCII case-insensitivity feature (ascii-ci=1) will not be supported after September 2030.");
1601#else
1602 xfs_warn(mp,
1603 "Deprecated ASCII case-insensitivity feature (ascii-ci=1) not supported by kernel.");
1604 error = -EINVAL;
1605 goto out_free_sb;
1606#endif
1607 }
1608
1609 /* Filesystem claims it needs repair, so refuse the mount. */
1610 if (xfs_has_needsrepair(mp)) {
1611 xfs_warn(mp, "Filesystem needs repair. Please run xfs_repair.");
1612 error = -EFSCORRUPTED;
1613 goto out_free_sb;
1614 }
1615
1616 /*
1617 * Don't touch the filesystem if a user tool thinks it owns the primary
1618 * superblock. mkfs doesn't clear the flag from secondary supers, so
1619 * we don't check them at all.
1620 */
1621 if (mp->m_sb.sb_inprogress) {
1622 xfs_warn(mp, "Offline file system operation in progress!");
1623 error = -EFSCORRUPTED;
1624 goto out_free_sb;
1625 }
1626
1627 /*
1628 * Until this is fixed only page-sized or smaller data blocks work.
1629 */
1630 if (mp->m_sb.sb_blocksize > PAGE_SIZE) {
1631 xfs_warn(mp,
1632 "File system with blocksize %d bytes. "
1633 "Only pagesize (%ld) or less will currently work.",
1634 mp->m_sb.sb_blocksize, PAGE_SIZE);
1635 error = -ENOSYS;
1636 goto out_free_sb;
1637 }
1638
1639 /* Ensure this filesystem fits in the page cache limits */
1640 if (xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_dblocks) ||
1641 xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_rblocks)) {
1642 xfs_warn(mp,
1643 "file system too large to be mounted on this system.");
1644 error = -EFBIG;
1645 goto out_free_sb;
1646 }
1647
1648 /*
1649 * XFS block mappings use 54 bits to store the logical block offset.
1650 * This should suffice to handle the maximum file size that the VFS
1651 * supports (currently 2^63 bytes on 64-bit and ULONG_MAX << PAGE_SHIFT
1652 * bytes on 32-bit), but as XFS and VFS have gotten the s_maxbytes
1653 * calculation wrong on 32-bit kernels in the past, we'll add a WARN_ON
1654 * to check this assertion.
1655 *
1656 * Avoid integer overflow by comparing the maximum bmbt offset to the
1657 * maximum pagecache offset in units of fs blocks.
1658 */
1659 if (!xfs_verify_fileoff(mp, XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE))) {
1660 xfs_warn(mp,
1661"MAX_LFS_FILESIZE block offset (%llu) exceeds extent map maximum (%llu)!",
1662 XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE),
1663 XFS_MAX_FILEOFF);
1664 error = -EINVAL;
1665 goto out_free_sb;
1666 }
1667
1668 error = xfs_filestream_mount(mp);
1669 if (error)
1670 goto out_free_sb;
1671
1672 /*
1673 * we must configure the block size in the superblock before we run the
1674 * full mount process as the mount process can lookup and cache inodes.
1675 */
1676 sb->s_magic = XFS_SUPER_MAGIC;
1677 sb->s_blocksize = mp->m_sb.sb_blocksize;
1678 sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
1679 sb->s_maxbytes = MAX_LFS_FILESIZE;
1680 sb->s_max_links = XFS_MAXLINK;
1681 sb->s_time_gran = 1;
1682 if (xfs_has_bigtime(mp)) {
1683 sb->s_time_min = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MIN);
1684 sb->s_time_max = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MAX);
1685 } else {
1686 sb->s_time_min = XFS_LEGACY_TIME_MIN;
1687 sb->s_time_max = XFS_LEGACY_TIME_MAX;
1688 }
1689 trace_xfs_inode_timestamp_range(mp, sb->s_time_min, sb->s_time_max);
1690 sb->s_iflags |= SB_I_CGROUPWB;
1691
1692 set_posix_acl_flag(sb);
1693
1694 /* version 5 superblocks support inode version counters. */
1695 if (xfs_has_crc(mp))
1696 sb->s_flags |= SB_I_VERSION;
1697
1698 if (xfs_has_dax_always(mp)) {
1699 error = xfs_setup_dax_always(mp);
1700 if (error)
1701 goto out_filestream_unmount;
1702 }
1703
1704 if (xfs_has_discard(mp) && !bdev_max_discard_sectors(sb->s_bdev)) {
1705 xfs_warn(mp,
1706 "mounting with \"discard\" option, but the device does not support discard");
1707 mp->m_features &= ~XFS_FEAT_DISCARD;
1708 }
1709
1710 if (xfs_has_reflink(mp)) {
1711 if (mp->m_sb.sb_rblocks) {
1712 xfs_alert(mp,
1713 "reflink not compatible with realtime device!");
1714 error = -EINVAL;
1715 goto out_filestream_unmount;
1716 }
1717
1718 if (xfs_globals.always_cow) {
1719 xfs_info(mp, "using DEBUG-only always_cow mode.");
1720 mp->m_always_cow = true;
1721 }
1722 }
1723
1724 if (xfs_has_rmapbt(mp) && mp->m_sb.sb_rblocks) {
1725 xfs_alert(mp,
1726 "reverse mapping btree not compatible with realtime device!");
1727 error = -EINVAL;
1728 goto out_filestream_unmount;
1729 }
1730
1731 error = xfs_mountfs(mp);
1732 if (error)
1733 goto out_filestream_unmount;
1734
1735 root = igrab(VFS_I(mp->m_rootip));
1736 if (!root) {
1737 error = -ENOENT;
1738 goto out_unmount;
1739 }
1740 sb->s_root = d_make_root(root);
1741 if (!sb->s_root) {
1742 error = -ENOMEM;
1743 goto out_unmount;
1744 }
1745
1746 return 0;
1747
1748 out_filestream_unmount:
1749 xfs_filestream_unmount(mp);
1750 out_free_sb:
1751 xfs_freesb(mp);
1752 out_free_scrub_stats:
1753 xchk_mount_stats_free(mp);
1754 out_free_stats:
1755 free_percpu(mp->m_stats.xs_stats);
1756 out_destroy_inodegc:
1757 xfs_inodegc_free_percpu(mp);
1758 out_destroy_counters:
1759 xfs_destroy_percpu_counters(mp);
1760 out_destroy_workqueues:
1761 xfs_destroy_mount_workqueues(mp);
1762 out_shutdown_devices:
1763 xfs_shutdown_devices(mp);
1764 return error;
1765
1766 out_unmount:
1767 xfs_filestream_unmount(mp);
1768 xfs_unmountfs(mp);
1769 goto out_free_sb;
1770}
1771
1772static int
1773xfs_fs_get_tree(
1774 struct fs_context *fc)
1775{
1776 return get_tree_bdev(fc, xfs_fs_fill_super);
1777}
1778
1779static int
1780xfs_remount_rw(
1781 struct xfs_mount *mp)
1782{
1783 struct xfs_sb *sbp = &mp->m_sb;
1784 int error;
1785
1786 if (xfs_has_norecovery(mp)) {
1787 xfs_warn(mp,
1788 "ro->rw transition prohibited on norecovery mount");
1789 return -EINVAL;
1790 }
1791
1792 if (xfs_sb_is_v5(sbp) &&
1793 xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
1794 xfs_warn(mp,
1795 "ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem",
1796 (sbp->sb_features_ro_compat &
1797 XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
1798 return -EINVAL;
1799 }
1800
1801 clear_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
1802
1803 /*
1804 * If this is the first remount to writeable state we might have some
1805 * superblock changes to update.
1806 */
1807 if (mp->m_update_sb) {
1808 error = xfs_sync_sb(mp, false);
1809 if (error) {
1810 xfs_warn(mp, "failed to write sb changes");
1811 return error;
1812 }
1813 mp->m_update_sb = false;
1814 }
1815
1816 /*
1817 * Fill out the reserve pool if it is empty. Use the stashed value if
1818 * it is non-zero, otherwise go with the default.
1819 */
1820 xfs_restore_resvblks(mp);
1821 xfs_log_work_queue(mp);
1822 xfs_blockgc_start(mp);
1823
1824 /* Create the per-AG metadata reservation pool .*/
1825 error = xfs_fs_reserve_ag_blocks(mp);
1826 if (error && error != -ENOSPC)
1827 return error;
1828
1829 /* Re-enable the background inode inactivation worker. */
1830 xfs_inodegc_start(mp);
1831
1832 return 0;
1833}
1834
1835static int
1836xfs_remount_ro(
1837 struct xfs_mount *mp)
1838{
1839 struct xfs_icwalk icw = {
1840 .icw_flags = XFS_ICWALK_FLAG_SYNC,
1841 };
1842 int error;
1843
1844 /* Flush all the dirty data to disk. */
1845 error = sync_filesystem(mp->m_super);
1846 if (error)
1847 return error;
1848
1849 /*
1850 * Cancel background eofb scanning so it cannot race with the final
1851 * log force+buftarg wait and deadlock the remount.
1852 */
1853 xfs_blockgc_stop(mp);
1854
1855 /*
1856 * Clear out all remaining COW staging extents and speculative post-EOF
1857 * preallocations so that we don't leave inodes requiring inactivation
1858 * cleanups during reclaim on a read-only mount. We must process every
1859 * cached inode, so this requires a synchronous cache scan.
1860 */
1861 error = xfs_blockgc_free_space(mp, &icw);
1862 if (error) {
1863 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1864 return error;
1865 }
1866
1867 /*
1868 * Stop the inodegc background worker. xfs_fs_reconfigure already
1869 * flushed all pending inodegc work when it sync'd the filesystem.
1870 * The VFS holds s_umount, so we know that inodes cannot enter
1871 * xfs_fs_destroy_inode during a remount operation. In readonly mode
1872 * we send inodes straight to reclaim, so no inodes will be queued.
1873 */
1874 xfs_inodegc_stop(mp);
1875
1876 /* Free the per-AG metadata reservation pool. */
1877 error = xfs_fs_unreserve_ag_blocks(mp);
1878 if (error) {
1879 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1880 return error;
1881 }
1882
1883 /*
1884 * Before we sync the metadata, we need to free up the reserve block
1885 * pool so that the used block count in the superblock on disk is
1886 * correct at the end of the remount. Stash the current* reserve pool
1887 * size so that if we get remounted rw, we can return it to the same
1888 * size.
1889 */
1890 xfs_save_resvblks(mp);
1891
1892 xfs_log_clean(mp);
1893 set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
1894
1895 return 0;
1896}
1897
1898/*
1899 * Logically we would return an error here to prevent users from believing
1900 * they might have changed mount options using remount which can't be changed.
1901 *
1902 * But unfortunately mount(8) adds all options from mtab and fstab to the mount
1903 * arguments in some cases so we can't blindly reject options, but have to
1904 * check for each specified option if it actually differs from the currently
1905 * set option and only reject it if that's the case.
1906 *
1907 * Until that is implemented we return success for every remount request, and
1908 * silently ignore all options that we can't actually change.
1909 */
1910static int
1911xfs_fs_reconfigure(
1912 struct fs_context *fc)
1913{
1914 struct xfs_mount *mp = XFS_M(fc->root->d_sb);
1915 struct xfs_mount *new_mp = fc->s_fs_info;
1916 int flags = fc->sb_flags;
1917 int error;
1918
1919 /* version 5 superblocks always support version counters. */
1920 if (xfs_has_crc(mp))
1921 fc->sb_flags |= SB_I_VERSION;
1922
1923 error = xfs_fs_validate_params(new_mp);
1924 if (error)
1925 return error;
1926
1927 /* inode32 -> inode64 */
1928 if (xfs_has_small_inums(mp) && !xfs_has_small_inums(new_mp)) {
1929 mp->m_features &= ~XFS_FEAT_SMALL_INUMS;
1930 mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount);
1931 }
1932
1933 /* inode64 -> inode32 */
1934 if (!xfs_has_small_inums(mp) && xfs_has_small_inums(new_mp)) {
1935 mp->m_features |= XFS_FEAT_SMALL_INUMS;
1936 mp->m_maxagi = xfs_set_inode_alloc(mp, mp->m_sb.sb_agcount);
1937 }
1938
1939 /* ro -> rw */
1940 if (xfs_is_readonly(mp) && !(flags & SB_RDONLY)) {
1941 error = xfs_remount_rw(mp);
1942 if (error)
1943 return error;
1944 }
1945
1946 /* rw -> ro */
1947 if (!xfs_is_readonly(mp) && (flags & SB_RDONLY)) {
1948 error = xfs_remount_ro(mp);
1949 if (error)
1950 return error;
1951 }
1952
1953 return 0;
1954}
1955
1956static void
1957xfs_fs_free(
1958 struct fs_context *fc)
1959{
1960 struct xfs_mount *mp = fc->s_fs_info;
1961
1962 /*
1963 * mp is stored in the fs_context when it is initialized.
1964 * mp is transferred to the superblock on a successful mount,
1965 * but if an error occurs before the transfer we have to free
1966 * it here.
1967 */
1968 if (mp)
1969 xfs_mount_free(mp);
1970}
1971
1972static const struct fs_context_operations xfs_context_ops = {
1973 .parse_param = xfs_fs_parse_param,
1974 .get_tree = xfs_fs_get_tree,
1975 .reconfigure = xfs_fs_reconfigure,
1976 .free = xfs_fs_free,
1977};
1978
1979/*
1980 * WARNING: do not initialise any parameters in this function that depend on
1981 * mount option parsing having already been performed as this can be called from
1982 * fsopen() before any parameters have been set.
1983 */
1984static int xfs_init_fs_context(
1985 struct fs_context *fc)
1986{
1987 struct xfs_mount *mp;
1988
1989 mp = kmem_alloc(sizeof(struct xfs_mount), KM_ZERO);
1990 if (!mp)
1991 return -ENOMEM;
1992
1993 spin_lock_init(&mp->m_sb_lock);
1994 INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
1995 spin_lock_init(&mp->m_perag_lock);
1996 mutex_init(&mp->m_growlock);
1997 INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker);
1998 INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
1999 mp->m_kobj.kobject.kset = xfs_kset;
2000 /*
2001 * We don't create the finobt per-ag space reservation until after log
2002 * recovery, so we must set this to true so that an ifree transaction
2003 * started during log recovery will not depend on space reservations
2004 * for finobt expansion.
2005 */
2006 mp->m_finobt_nores = true;
2007
2008 /*
2009 * These can be overridden by the mount option parsing.
2010 */
2011 mp->m_logbufs = -1;
2012 mp->m_logbsize = -1;
2013 mp->m_allocsize_log = 16; /* 64k */
2014
2015 fc->s_fs_info = mp;
2016 fc->ops = &xfs_context_ops;
2017
2018 return 0;
2019}
2020
2021static void
2022xfs_kill_sb(
2023 struct super_block *sb)
2024{
2025 kill_block_super(sb);
2026 xfs_mount_free(XFS_M(sb));
2027}
2028
2029static struct file_system_type xfs_fs_type = {
2030 .owner = THIS_MODULE,
2031 .name = "xfs",
2032 .init_fs_context = xfs_init_fs_context,
2033 .parameters = xfs_fs_parameters,
2034 .kill_sb = xfs_kill_sb,
2035 .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
2036};
2037MODULE_ALIAS_FS("xfs");
2038
2039STATIC int __init
2040xfs_init_caches(void)
2041{
2042 int error;
2043
2044 xfs_buf_cache = kmem_cache_create("xfs_buf", sizeof(struct xfs_buf), 0,
2045 SLAB_HWCACHE_ALIGN |
2046 SLAB_RECLAIM_ACCOUNT |
2047 SLAB_MEM_SPREAD,
2048 NULL);
2049 if (!xfs_buf_cache)
2050 goto out;
2051
2052 xfs_log_ticket_cache = kmem_cache_create("xfs_log_ticket",
2053 sizeof(struct xlog_ticket),
2054 0, 0, NULL);
2055 if (!xfs_log_ticket_cache)
2056 goto out_destroy_buf_cache;
2057
2058 error = xfs_btree_init_cur_caches();
2059 if (error)
2060 goto out_destroy_log_ticket_cache;
2061
2062 error = xfs_defer_init_item_caches();
2063 if (error)
2064 goto out_destroy_btree_cur_cache;
2065
2066 xfs_da_state_cache = kmem_cache_create("xfs_da_state",
2067 sizeof(struct xfs_da_state),
2068 0, 0, NULL);
2069 if (!xfs_da_state_cache)
2070 goto out_destroy_defer_item_cache;
2071
2072 xfs_ifork_cache = kmem_cache_create("xfs_ifork",
2073 sizeof(struct xfs_ifork),
2074 0, 0, NULL);
2075 if (!xfs_ifork_cache)
2076 goto out_destroy_da_state_cache;
2077
2078 xfs_trans_cache = kmem_cache_create("xfs_trans",
2079 sizeof(struct xfs_trans),
2080 0, 0, NULL);
2081 if (!xfs_trans_cache)
2082 goto out_destroy_ifork_cache;
2083
2084
2085 /*
2086 * The size of the cache-allocated buf log item is the maximum
2087 * size possible under XFS. This wastes a little bit of memory,
2088 * but it is much faster.
2089 */
2090 xfs_buf_item_cache = kmem_cache_create("xfs_buf_item",
2091 sizeof(struct xfs_buf_log_item),
2092 0, 0, NULL);
2093 if (!xfs_buf_item_cache)
2094 goto out_destroy_trans_cache;
2095
2096 xfs_efd_cache = kmem_cache_create("xfs_efd_item",
2097 xfs_efd_log_item_sizeof(XFS_EFD_MAX_FAST_EXTENTS),
2098 0, 0, NULL);
2099 if (!xfs_efd_cache)
2100 goto out_destroy_buf_item_cache;
2101
2102 xfs_efi_cache = kmem_cache_create("xfs_efi_item",
2103 xfs_efi_log_item_sizeof(XFS_EFI_MAX_FAST_EXTENTS),
2104 0, 0, NULL);
2105 if (!xfs_efi_cache)
2106 goto out_destroy_efd_cache;
2107
2108 xfs_inode_cache = kmem_cache_create("xfs_inode",
2109 sizeof(struct xfs_inode), 0,
2110 (SLAB_HWCACHE_ALIGN |
2111 SLAB_RECLAIM_ACCOUNT |
2112 SLAB_MEM_SPREAD | SLAB_ACCOUNT),
2113 xfs_fs_inode_init_once);
2114 if (!xfs_inode_cache)
2115 goto out_destroy_efi_cache;
2116
2117 xfs_ili_cache = kmem_cache_create("xfs_ili",
2118 sizeof(struct xfs_inode_log_item), 0,
2119 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
2120 NULL);
2121 if (!xfs_ili_cache)
2122 goto out_destroy_inode_cache;
2123
2124 xfs_icreate_cache = kmem_cache_create("xfs_icr",
2125 sizeof(struct xfs_icreate_item),
2126 0, 0, NULL);
2127 if (!xfs_icreate_cache)
2128 goto out_destroy_ili_cache;
2129
2130 xfs_rud_cache = kmem_cache_create("xfs_rud_item",
2131 sizeof(struct xfs_rud_log_item),
2132 0, 0, NULL);
2133 if (!xfs_rud_cache)
2134 goto out_destroy_icreate_cache;
2135
2136 xfs_rui_cache = kmem_cache_create("xfs_rui_item",
2137 xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS),
2138 0, 0, NULL);
2139 if (!xfs_rui_cache)
2140 goto out_destroy_rud_cache;
2141
2142 xfs_cud_cache = kmem_cache_create("xfs_cud_item",
2143 sizeof(struct xfs_cud_log_item),
2144 0, 0, NULL);
2145 if (!xfs_cud_cache)
2146 goto out_destroy_rui_cache;
2147
2148 xfs_cui_cache = kmem_cache_create("xfs_cui_item",
2149 xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS),
2150 0, 0, NULL);
2151 if (!xfs_cui_cache)
2152 goto out_destroy_cud_cache;
2153
2154 xfs_bud_cache = kmem_cache_create("xfs_bud_item",
2155 sizeof(struct xfs_bud_log_item),
2156 0, 0, NULL);
2157 if (!xfs_bud_cache)
2158 goto out_destroy_cui_cache;
2159
2160 xfs_bui_cache = kmem_cache_create("xfs_bui_item",
2161 xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS),
2162 0, 0, NULL);
2163 if (!xfs_bui_cache)
2164 goto out_destroy_bud_cache;
2165
2166 xfs_attrd_cache = kmem_cache_create("xfs_attrd_item",
2167 sizeof(struct xfs_attrd_log_item),
2168 0, 0, NULL);
2169 if (!xfs_attrd_cache)
2170 goto out_destroy_bui_cache;
2171
2172 xfs_attri_cache = kmem_cache_create("xfs_attri_item",
2173 sizeof(struct xfs_attri_log_item),
2174 0, 0, NULL);
2175 if (!xfs_attri_cache)
2176 goto out_destroy_attrd_cache;
2177
2178 xfs_iunlink_cache = kmem_cache_create("xfs_iul_item",
2179 sizeof(struct xfs_iunlink_item),
2180 0, 0, NULL);
2181 if (!xfs_iunlink_cache)
2182 goto out_destroy_attri_cache;
2183
2184 return 0;
2185
2186 out_destroy_attri_cache:
2187 kmem_cache_destroy(xfs_attri_cache);
2188 out_destroy_attrd_cache:
2189 kmem_cache_destroy(xfs_attrd_cache);
2190 out_destroy_bui_cache:
2191 kmem_cache_destroy(xfs_bui_cache);
2192 out_destroy_bud_cache:
2193 kmem_cache_destroy(xfs_bud_cache);
2194 out_destroy_cui_cache:
2195 kmem_cache_destroy(xfs_cui_cache);
2196 out_destroy_cud_cache:
2197 kmem_cache_destroy(xfs_cud_cache);
2198 out_destroy_rui_cache:
2199 kmem_cache_destroy(xfs_rui_cache);
2200 out_destroy_rud_cache:
2201 kmem_cache_destroy(xfs_rud_cache);
2202 out_destroy_icreate_cache:
2203 kmem_cache_destroy(xfs_icreate_cache);
2204 out_destroy_ili_cache:
2205 kmem_cache_destroy(xfs_ili_cache);
2206 out_destroy_inode_cache:
2207 kmem_cache_destroy(xfs_inode_cache);
2208 out_destroy_efi_cache:
2209 kmem_cache_destroy(xfs_efi_cache);
2210 out_destroy_efd_cache:
2211 kmem_cache_destroy(xfs_efd_cache);
2212 out_destroy_buf_item_cache:
2213 kmem_cache_destroy(xfs_buf_item_cache);
2214 out_destroy_trans_cache:
2215 kmem_cache_destroy(xfs_trans_cache);
2216 out_destroy_ifork_cache:
2217 kmem_cache_destroy(xfs_ifork_cache);
2218 out_destroy_da_state_cache:
2219 kmem_cache_destroy(xfs_da_state_cache);
2220 out_destroy_defer_item_cache:
2221 xfs_defer_destroy_item_caches();
2222 out_destroy_btree_cur_cache:
2223 xfs_btree_destroy_cur_caches();
2224 out_destroy_log_ticket_cache:
2225 kmem_cache_destroy(xfs_log_ticket_cache);
2226 out_destroy_buf_cache:
2227 kmem_cache_destroy(xfs_buf_cache);
2228 out:
2229 return -ENOMEM;
2230}
2231
2232STATIC void
2233xfs_destroy_caches(void)
2234{
2235 /*
2236 * Make sure all delayed rcu free are flushed before we
2237 * destroy caches.
2238 */
2239 rcu_barrier();
2240 kmem_cache_destroy(xfs_iunlink_cache);
2241 kmem_cache_destroy(xfs_attri_cache);
2242 kmem_cache_destroy(xfs_attrd_cache);
2243 kmem_cache_destroy(xfs_bui_cache);
2244 kmem_cache_destroy(xfs_bud_cache);
2245 kmem_cache_destroy(xfs_cui_cache);
2246 kmem_cache_destroy(xfs_cud_cache);
2247 kmem_cache_destroy(xfs_rui_cache);
2248 kmem_cache_destroy(xfs_rud_cache);
2249 kmem_cache_destroy(xfs_icreate_cache);
2250 kmem_cache_destroy(xfs_ili_cache);
2251 kmem_cache_destroy(xfs_inode_cache);
2252 kmem_cache_destroy(xfs_efi_cache);
2253 kmem_cache_destroy(xfs_efd_cache);
2254 kmem_cache_destroy(xfs_buf_item_cache);
2255 kmem_cache_destroy(xfs_trans_cache);
2256 kmem_cache_destroy(xfs_ifork_cache);
2257 kmem_cache_destroy(xfs_da_state_cache);
2258 xfs_defer_destroy_item_caches();
2259 xfs_btree_destroy_cur_caches();
2260 kmem_cache_destroy(xfs_log_ticket_cache);
2261 kmem_cache_destroy(xfs_buf_cache);
2262}
2263
2264STATIC int __init
2265xfs_init_workqueues(void)
2266{
2267 /*
2268 * The allocation workqueue can be used in memory reclaim situations
2269 * (writepage path), and parallelism is only limited by the number of
2270 * AGs in all the filesystems mounted. Hence use the default large
2271 * max_active value for this workqueue.
2272 */
2273 xfs_alloc_wq = alloc_workqueue("xfsalloc",
2274 XFS_WQFLAGS(WQ_MEM_RECLAIM | WQ_FREEZABLE), 0);
2275 if (!xfs_alloc_wq)
2276 return -ENOMEM;
2277
2278 xfs_discard_wq = alloc_workqueue("xfsdiscard", XFS_WQFLAGS(WQ_UNBOUND),
2279 0);
2280 if (!xfs_discard_wq)
2281 goto out_free_alloc_wq;
2282
2283 return 0;
2284out_free_alloc_wq:
2285 destroy_workqueue(xfs_alloc_wq);
2286 return -ENOMEM;
2287}
2288
2289STATIC void
2290xfs_destroy_workqueues(void)
2291{
2292 destroy_workqueue(xfs_discard_wq);
2293 destroy_workqueue(xfs_alloc_wq);
2294}
2295
2296STATIC int __init
2297init_xfs_fs(void)
2298{
2299 int error;
2300
2301 xfs_check_ondisk_structs();
2302
2303 error = xfs_dahash_test();
2304 if (error)
2305 return error;
2306
2307 printk(KERN_INFO XFS_VERSION_STRING " with "
2308 XFS_BUILD_OPTIONS " enabled\n");
2309
2310 xfs_dir_startup();
2311
2312 error = xfs_init_caches();
2313 if (error)
2314 goto out;
2315
2316 error = xfs_init_workqueues();
2317 if (error)
2318 goto out_destroy_caches;
2319
2320 error = xfs_mru_cache_init();
2321 if (error)
2322 goto out_destroy_wq;
2323
2324 error = xfs_init_procfs();
2325 if (error)
2326 goto out_mru_cache_uninit;
2327
2328 error = xfs_sysctl_register();
2329 if (error)
2330 goto out_cleanup_procfs;
2331
2332 xfs_debugfs = xfs_debugfs_mkdir("xfs", NULL);
2333
2334 xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj);
2335 if (!xfs_kset) {
2336 error = -ENOMEM;
2337 goto out_debugfs_unregister;
2338 }
2339
2340 xfsstats.xs_kobj.kobject.kset = xfs_kset;
2341
2342 xfsstats.xs_stats = alloc_percpu(struct xfsstats);
2343 if (!xfsstats.xs_stats) {
2344 error = -ENOMEM;
2345 goto out_kset_unregister;
2346 }
2347
2348 error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL,
2349 "stats");
2350 if (error)
2351 goto out_free_stats;
2352
2353 error = xchk_global_stats_setup(xfs_debugfs);
2354 if (error)
2355 goto out_remove_stats_kobj;
2356
2357#ifdef DEBUG
2358 xfs_dbg_kobj.kobject.kset = xfs_kset;
2359 error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug");
2360 if (error)
2361 goto out_remove_scrub_stats;
2362#endif
2363
2364 error = xfs_qm_init();
2365 if (error)
2366 goto out_remove_dbg_kobj;
2367
2368 error = register_filesystem(&xfs_fs_type);
2369 if (error)
2370 goto out_qm_exit;
2371 return 0;
2372
2373 out_qm_exit:
2374 xfs_qm_exit();
2375 out_remove_dbg_kobj:
2376#ifdef DEBUG
2377 xfs_sysfs_del(&xfs_dbg_kobj);
2378 out_remove_scrub_stats:
2379#endif
2380 xchk_global_stats_teardown();
2381 out_remove_stats_kobj:
2382 xfs_sysfs_del(&xfsstats.xs_kobj);
2383 out_free_stats:
2384 free_percpu(xfsstats.xs_stats);
2385 out_kset_unregister:
2386 kset_unregister(xfs_kset);
2387 out_debugfs_unregister:
2388 debugfs_remove(xfs_debugfs);
2389 xfs_sysctl_unregister();
2390 out_cleanup_procfs:
2391 xfs_cleanup_procfs();
2392 out_mru_cache_uninit:
2393 xfs_mru_cache_uninit();
2394 out_destroy_wq:
2395 xfs_destroy_workqueues();
2396 out_destroy_caches:
2397 xfs_destroy_caches();
2398 out:
2399 return error;
2400}
2401
2402STATIC void __exit
2403exit_xfs_fs(void)
2404{
2405 xfs_qm_exit();
2406 unregister_filesystem(&xfs_fs_type);
2407#ifdef DEBUG
2408 xfs_sysfs_del(&xfs_dbg_kobj);
2409#endif
2410 xchk_global_stats_teardown();
2411 xfs_sysfs_del(&xfsstats.xs_kobj);
2412 free_percpu(xfsstats.xs_stats);
2413 kset_unregister(xfs_kset);
2414 debugfs_remove(xfs_debugfs);
2415 xfs_sysctl_unregister();
2416 xfs_cleanup_procfs();
2417 xfs_mru_cache_uninit();
2418 xfs_destroy_workqueues();
2419 xfs_destroy_caches();
2420 xfs_uuid_table_free();
2421}
2422
2423module_init(init_xfs_fs);
2424module_exit(exit_xfs_fs);
2425
2426MODULE_AUTHOR("Silicon Graphics, Inc.");
2427MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
2428MODULE_LICENSE("GPL");
1/*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18
19#include "xfs.h"
20#include "xfs_shared.h"
21#include "xfs_format.h"
22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h"
24#include "xfs_sb.h"
25#include "xfs_mount.h"
26#include "xfs_da_format.h"
27#include "xfs_inode.h"
28#include "xfs_btree.h"
29#include "xfs_bmap.h"
30#include "xfs_alloc.h"
31#include "xfs_error.h"
32#include "xfs_fsops.h"
33#include "xfs_trans.h"
34#include "xfs_buf_item.h"
35#include "xfs_log.h"
36#include "xfs_log_priv.h"
37#include "xfs_da_btree.h"
38#include "xfs_dir2.h"
39#include "xfs_extfree_item.h"
40#include "xfs_mru_cache.h"
41#include "xfs_inode_item.h"
42#include "xfs_icache.h"
43#include "xfs_trace.h"
44#include "xfs_icreate_item.h"
45#include "xfs_filestream.h"
46#include "xfs_quota.h"
47#include "xfs_sysfs.h"
48#include "xfs_ondisk.h"
49#include "xfs_rmap_item.h"
50#include "xfs_refcount_item.h"
51#include "xfs_bmap_item.h"
52#include "xfs_reflink.h"
53
54#include <linux/namei.h>
55#include <linux/init.h>
56#include <linux/slab.h>
57#include <linux/mount.h>
58#include <linux/mempool.h>
59#include <linux/writeback.h>
60#include <linux/kthread.h>
61#include <linux/freezer.h>
62#include <linux/parser.h>
63
64static const struct super_operations xfs_super_operations;
65struct bio_set *xfs_ioend_bioset;
66
67static struct kset *xfs_kset; /* top-level xfs sysfs dir */
68#ifdef DEBUG
69static struct xfs_kobj xfs_dbg_kobj; /* global debug sysfs attrs */
70#endif
71
72/*
73 * Table driven mount option parser.
74 */
75enum {
76 Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev, Opt_biosize,
77 Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid,
78 Opt_mtpt, Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups,
79 Opt_allocsize, Opt_norecovery, Opt_barrier, Opt_nobarrier,
80 Opt_inode64, Opt_inode32, Opt_ikeep, Opt_noikeep,
81 Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2, Opt_filestreams,
82 Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota, Opt_prjquota,
83 Opt_uquota, Opt_gquota, Opt_pquota,
84 Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce,
85 Opt_discard, Opt_nodiscard, Opt_dax, Opt_err,
86};
87
88static const match_table_t tokens = {
89 {Opt_logbufs, "logbufs=%u"}, /* number of XFS log buffers */
90 {Opt_logbsize, "logbsize=%s"}, /* size of XFS log buffers */
91 {Opt_logdev, "logdev=%s"}, /* log device */
92 {Opt_rtdev, "rtdev=%s"}, /* realtime I/O device */
93 {Opt_biosize, "biosize=%u"}, /* log2 of preferred buffered io size */
94 {Opt_wsync, "wsync"}, /* safe-mode nfs compatible mount */
95 {Opt_noalign, "noalign"}, /* turn off stripe alignment */
96 {Opt_swalloc, "swalloc"}, /* turn on stripe width allocation */
97 {Opt_sunit, "sunit=%u"}, /* data volume stripe unit */
98 {Opt_swidth, "swidth=%u"}, /* data volume stripe width */
99 {Opt_nouuid, "nouuid"}, /* ignore filesystem UUID */
100 {Opt_mtpt, "mtpt"}, /* filesystem mount point */
101 {Opt_grpid, "grpid"}, /* group-ID from parent directory */
102 {Opt_nogrpid, "nogrpid"}, /* group-ID from current process */
103 {Opt_bsdgroups, "bsdgroups"}, /* group-ID from parent directory */
104 {Opt_sysvgroups,"sysvgroups"}, /* group-ID from current process */
105 {Opt_allocsize, "allocsize=%s"},/* preferred allocation size */
106 {Opt_norecovery,"norecovery"}, /* don't run XFS recovery */
107 {Opt_inode64, "inode64"}, /* inodes can be allocated anywhere */
108 {Opt_inode32, "inode32"}, /* inode allocation limited to
109 * XFS_MAXINUMBER_32 */
110 {Opt_ikeep, "ikeep"}, /* do not free empty inode clusters */
111 {Opt_noikeep, "noikeep"}, /* free empty inode clusters */
112 {Opt_largeio, "largeio"}, /* report large I/O sizes in stat() */
113 {Opt_nolargeio, "nolargeio"}, /* do not report large I/O sizes
114 * in stat(). */
115 {Opt_attr2, "attr2"}, /* do use attr2 attribute format */
116 {Opt_noattr2, "noattr2"}, /* do not use attr2 attribute format */
117 {Opt_filestreams,"filestreams"},/* use filestreams allocator */
118 {Opt_quota, "quota"}, /* disk quotas (user) */
119 {Opt_noquota, "noquota"}, /* no quotas */
120 {Opt_usrquota, "usrquota"}, /* user quota enabled */
121 {Opt_grpquota, "grpquota"}, /* group quota enabled */
122 {Opt_prjquota, "prjquota"}, /* project quota enabled */
123 {Opt_uquota, "uquota"}, /* user quota (IRIX variant) */
124 {Opt_gquota, "gquota"}, /* group quota (IRIX variant) */
125 {Opt_pquota, "pquota"}, /* project quota (IRIX variant) */
126 {Opt_uqnoenforce,"uqnoenforce"},/* user quota limit enforcement */
127 {Opt_gqnoenforce,"gqnoenforce"},/* group quota limit enforcement */
128 {Opt_pqnoenforce,"pqnoenforce"},/* project quota limit enforcement */
129 {Opt_qnoenforce, "qnoenforce"}, /* same as uqnoenforce */
130 {Opt_discard, "discard"}, /* Discard unused blocks */
131 {Opt_nodiscard, "nodiscard"}, /* Do not discard unused blocks */
132
133 {Opt_dax, "dax"}, /* Enable direct access to bdev pages */
134
135 /* Deprecated mount options scheduled for removal */
136 {Opt_barrier, "barrier"}, /* use writer barriers for log write and
137 * unwritten extent conversion */
138 {Opt_nobarrier, "nobarrier"}, /* .. disable */
139
140 {Opt_err, NULL},
141};
142
143
144STATIC int
145suffix_kstrtoint(const substring_t *s, unsigned int base, int *res)
146{
147 int last, shift_left_factor = 0, _res;
148 char *value;
149 int ret = 0;
150
151 value = match_strdup(s);
152 if (!value)
153 return -ENOMEM;
154
155 last = strlen(value) - 1;
156 if (value[last] == 'K' || value[last] == 'k') {
157 shift_left_factor = 10;
158 value[last] = '\0';
159 }
160 if (value[last] == 'M' || value[last] == 'm') {
161 shift_left_factor = 20;
162 value[last] = '\0';
163 }
164 if (value[last] == 'G' || value[last] == 'g') {
165 shift_left_factor = 30;
166 value[last] = '\0';
167 }
168
169 if (kstrtoint(value, base, &_res))
170 ret = -EINVAL;
171 kfree(value);
172 *res = _res << shift_left_factor;
173 return ret;
174}
175
176/*
177 * This function fills in xfs_mount_t fields based on mount args.
178 * Note: the superblock has _not_ yet been read in.
179 *
180 * Note that this function leaks the various device name allocations on
181 * failure. The caller takes care of them.
182 *
183 * *sb is const because this is also used to test options on the remount
184 * path, and we don't want this to have any side effects at remount time.
185 * Today this function does not change *sb, but just to future-proof...
186 */
187STATIC int
188xfs_parseargs(
189 struct xfs_mount *mp,
190 char *options)
191{
192 const struct super_block *sb = mp->m_super;
193 char *p;
194 substring_t args[MAX_OPT_ARGS];
195 int dsunit = 0;
196 int dswidth = 0;
197 int iosize = 0;
198 __uint8_t iosizelog = 0;
199
200 /*
201 * set up the mount name first so all the errors will refer to the
202 * correct device.
203 */
204 mp->m_fsname = kstrndup(sb->s_id, MAXNAMELEN, GFP_KERNEL);
205 if (!mp->m_fsname)
206 return -ENOMEM;
207 mp->m_fsname_len = strlen(mp->m_fsname) + 1;
208
209 /*
210 * Copy binary VFS mount flags we are interested in.
211 */
212 if (sb->s_flags & MS_RDONLY)
213 mp->m_flags |= XFS_MOUNT_RDONLY;
214 if (sb->s_flags & MS_DIRSYNC)
215 mp->m_flags |= XFS_MOUNT_DIRSYNC;
216 if (sb->s_flags & MS_SYNCHRONOUS)
217 mp->m_flags |= XFS_MOUNT_WSYNC;
218
219 /*
220 * Set some default flags that could be cleared by the mount option
221 * parsing.
222 */
223 mp->m_flags |= XFS_MOUNT_BARRIER;
224 mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
225
226 /*
227 * These can be overridden by the mount option parsing.
228 */
229 mp->m_logbufs = -1;
230 mp->m_logbsize = -1;
231
232 if (!options)
233 goto done;
234
235 while ((p = strsep(&options, ",")) != NULL) {
236 int token;
237
238 if (!*p)
239 continue;
240
241 token = match_token(p, tokens, args);
242 switch (token) {
243 case Opt_logbufs:
244 if (match_int(args, &mp->m_logbufs))
245 return -EINVAL;
246 break;
247 case Opt_logbsize:
248 if (suffix_kstrtoint(args, 10, &mp->m_logbsize))
249 return -EINVAL;
250 break;
251 case Opt_logdev:
252 mp->m_logname = match_strdup(args);
253 if (!mp->m_logname)
254 return -ENOMEM;
255 break;
256 case Opt_mtpt:
257 xfs_warn(mp, "%s option not allowed on this system", p);
258 return -EINVAL;
259 case Opt_rtdev:
260 mp->m_rtname = match_strdup(args);
261 if (!mp->m_rtname)
262 return -ENOMEM;
263 break;
264 case Opt_allocsize:
265 case Opt_biosize:
266 if (suffix_kstrtoint(args, 10, &iosize))
267 return -EINVAL;
268 iosizelog = ffs(iosize) - 1;
269 break;
270 case Opt_grpid:
271 case Opt_bsdgroups:
272 mp->m_flags |= XFS_MOUNT_GRPID;
273 break;
274 case Opt_nogrpid:
275 case Opt_sysvgroups:
276 mp->m_flags &= ~XFS_MOUNT_GRPID;
277 break;
278 case Opt_wsync:
279 mp->m_flags |= XFS_MOUNT_WSYNC;
280 break;
281 case Opt_norecovery:
282 mp->m_flags |= XFS_MOUNT_NORECOVERY;
283 break;
284 case Opt_noalign:
285 mp->m_flags |= XFS_MOUNT_NOALIGN;
286 break;
287 case Opt_swalloc:
288 mp->m_flags |= XFS_MOUNT_SWALLOC;
289 break;
290 case Opt_sunit:
291 if (match_int(args, &dsunit))
292 return -EINVAL;
293 break;
294 case Opt_swidth:
295 if (match_int(args, &dswidth))
296 return -EINVAL;
297 break;
298 case Opt_inode32:
299 mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
300 break;
301 case Opt_inode64:
302 mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
303 break;
304 case Opt_nouuid:
305 mp->m_flags |= XFS_MOUNT_NOUUID;
306 break;
307 case Opt_ikeep:
308 mp->m_flags |= XFS_MOUNT_IKEEP;
309 break;
310 case Opt_noikeep:
311 mp->m_flags &= ~XFS_MOUNT_IKEEP;
312 break;
313 case Opt_largeio:
314 mp->m_flags &= ~XFS_MOUNT_COMPAT_IOSIZE;
315 break;
316 case Opt_nolargeio:
317 mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
318 break;
319 case Opt_attr2:
320 mp->m_flags |= XFS_MOUNT_ATTR2;
321 break;
322 case Opt_noattr2:
323 mp->m_flags &= ~XFS_MOUNT_ATTR2;
324 mp->m_flags |= XFS_MOUNT_NOATTR2;
325 break;
326 case Opt_filestreams:
327 mp->m_flags |= XFS_MOUNT_FILESTREAMS;
328 break;
329 case Opt_noquota:
330 mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT;
331 mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD;
332 mp->m_qflags &= ~XFS_ALL_QUOTA_ACTIVE;
333 break;
334 case Opt_quota:
335 case Opt_uquota:
336 case Opt_usrquota:
337 mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE |
338 XFS_UQUOTA_ENFD);
339 break;
340 case Opt_qnoenforce:
341 case Opt_uqnoenforce:
342 mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE);
343 mp->m_qflags &= ~XFS_UQUOTA_ENFD;
344 break;
345 case Opt_pquota:
346 case Opt_prjquota:
347 mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE |
348 XFS_PQUOTA_ENFD);
349 break;
350 case Opt_pqnoenforce:
351 mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE);
352 mp->m_qflags &= ~XFS_PQUOTA_ENFD;
353 break;
354 case Opt_gquota:
355 case Opt_grpquota:
356 mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE |
357 XFS_GQUOTA_ENFD);
358 break;
359 case Opt_gqnoenforce:
360 mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
361 mp->m_qflags &= ~XFS_GQUOTA_ENFD;
362 break;
363 case Opt_discard:
364 mp->m_flags |= XFS_MOUNT_DISCARD;
365 break;
366 case Opt_nodiscard:
367 mp->m_flags &= ~XFS_MOUNT_DISCARD;
368 break;
369#ifdef CONFIG_FS_DAX
370 case Opt_dax:
371 mp->m_flags |= XFS_MOUNT_DAX;
372 break;
373#endif
374 case Opt_barrier:
375 xfs_warn(mp, "%s option is deprecated, ignoring.", p);
376 mp->m_flags |= XFS_MOUNT_BARRIER;
377 break;
378 case Opt_nobarrier:
379 xfs_warn(mp, "%s option is deprecated, ignoring.", p);
380 mp->m_flags &= ~XFS_MOUNT_BARRIER;
381 break;
382 default:
383 xfs_warn(mp, "unknown mount option [%s].", p);
384 return -EINVAL;
385 }
386 }
387
388 /*
389 * no recovery flag requires a read-only mount
390 */
391 if ((mp->m_flags & XFS_MOUNT_NORECOVERY) &&
392 !(mp->m_flags & XFS_MOUNT_RDONLY)) {
393 xfs_warn(mp, "no-recovery mounts must be read-only.");
394 return -EINVAL;
395 }
396
397 if ((mp->m_flags & XFS_MOUNT_NOALIGN) && (dsunit || dswidth)) {
398 xfs_warn(mp,
399 "sunit and swidth options incompatible with the noalign option");
400 return -EINVAL;
401 }
402
403#ifndef CONFIG_XFS_QUOTA
404 if (XFS_IS_QUOTA_RUNNING(mp)) {
405 xfs_warn(mp, "quota support not available in this kernel.");
406 return -EINVAL;
407 }
408#endif
409
410 if ((dsunit && !dswidth) || (!dsunit && dswidth)) {
411 xfs_warn(mp, "sunit and swidth must be specified together");
412 return -EINVAL;
413 }
414
415 if (dsunit && (dswidth % dsunit != 0)) {
416 xfs_warn(mp,
417 "stripe width (%d) must be a multiple of the stripe unit (%d)",
418 dswidth, dsunit);
419 return -EINVAL;
420 }
421
422done:
423 if (dsunit && !(mp->m_flags & XFS_MOUNT_NOALIGN)) {
424 /*
425 * At this point the superblock has not been read
426 * in, therefore we do not know the block size.
427 * Before the mount call ends we will convert
428 * these to FSBs.
429 */
430 mp->m_dalign = dsunit;
431 mp->m_swidth = dswidth;
432 }
433
434 if (mp->m_logbufs != -1 &&
435 mp->m_logbufs != 0 &&
436 (mp->m_logbufs < XLOG_MIN_ICLOGS ||
437 mp->m_logbufs > XLOG_MAX_ICLOGS)) {
438 xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
439 mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
440 return -EINVAL;
441 }
442 if (mp->m_logbsize != -1 &&
443 mp->m_logbsize != 0 &&
444 (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
445 mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
446 !is_power_of_2(mp->m_logbsize))) {
447 xfs_warn(mp,
448 "invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
449 mp->m_logbsize);
450 return -EINVAL;
451 }
452
453 if (iosizelog) {
454 if (iosizelog > XFS_MAX_IO_LOG ||
455 iosizelog < XFS_MIN_IO_LOG) {
456 xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
457 iosizelog, XFS_MIN_IO_LOG,
458 XFS_MAX_IO_LOG);
459 return -EINVAL;
460 }
461
462 mp->m_flags |= XFS_MOUNT_DFLT_IOSIZE;
463 mp->m_readio_log = iosizelog;
464 mp->m_writeio_log = iosizelog;
465 }
466
467 return 0;
468}
469
470struct proc_xfs_info {
471 uint64_t flag;
472 char *str;
473};
474
475STATIC int
476xfs_showargs(
477 struct xfs_mount *mp,
478 struct seq_file *m)
479{
480 static struct proc_xfs_info xfs_info_set[] = {
481 /* the few simple ones we can get from the mount struct */
482 { XFS_MOUNT_IKEEP, ",ikeep" },
483 { XFS_MOUNT_WSYNC, ",wsync" },
484 { XFS_MOUNT_NOALIGN, ",noalign" },
485 { XFS_MOUNT_SWALLOC, ",swalloc" },
486 { XFS_MOUNT_NOUUID, ",nouuid" },
487 { XFS_MOUNT_NORECOVERY, ",norecovery" },
488 { XFS_MOUNT_ATTR2, ",attr2" },
489 { XFS_MOUNT_FILESTREAMS, ",filestreams" },
490 { XFS_MOUNT_GRPID, ",grpid" },
491 { XFS_MOUNT_DISCARD, ",discard" },
492 { XFS_MOUNT_SMALL_INUMS, ",inode32" },
493 { XFS_MOUNT_DAX, ",dax" },
494 { 0, NULL }
495 };
496 static struct proc_xfs_info xfs_info_unset[] = {
497 /* the few simple ones we can get from the mount struct */
498 { XFS_MOUNT_COMPAT_IOSIZE, ",largeio" },
499 { XFS_MOUNT_BARRIER, ",nobarrier" },
500 { XFS_MOUNT_SMALL_INUMS, ",inode64" },
501 { 0, NULL }
502 };
503 struct proc_xfs_info *xfs_infop;
504
505 for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
506 if (mp->m_flags & xfs_infop->flag)
507 seq_puts(m, xfs_infop->str);
508 }
509 for (xfs_infop = xfs_info_unset; xfs_infop->flag; xfs_infop++) {
510 if (!(mp->m_flags & xfs_infop->flag))
511 seq_puts(m, xfs_infop->str);
512 }
513
514 if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)
515 seq_printf(m, ",allocsize=%dk",
516 (int)(1 << mp->m_writeio_log) >> 10);
517
518 if (mp->m_logbufs > 0)
519 seq_printf(m, ",logbufs=%d", mp->m_logbufs);
520 if (mp->m_logbsize > 0)
521 seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10);
522
523 if (mp->m_logname)
524 seq_show_option(m, "logdev", mp->m_logname);
525 if (mp->m_rtname)
526 seq_show_option(m, "rtdev", mp->m_rtname);
527
528 if (mp->m_dalign > 0)
529 seq_printf(m, ",sunit=%d",
530 (int)XFS_FSB_TO_BB(mp, mp->m_dalign));
531 if (mp->m_swidth > 0)
532 seq_printf(m, ",swidth=%d",
533 (int)XFS_FSB_TO_BB(mp, mp->m_swidth));
534
535 if (mp->m_qflags & (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD))
536 seq_puts(m, ",usrquota");
537 else if (mp->m_qflags & XFS_UQUOTA_ACCT)
538 seq_puts(m, ",uqnoenforce");
539
540 if (mp->m_qflags & XFS_PQUOTA_ACCT) {
541 if (mp->m_qflags & XFS_PQUOTA_ENFD)
542 seq_puts(m, ",prjquota");
543 else
544 seq_puts(m, ",pqnoenforce");
545 }
546 if (mp->m_qflags & XFS_GQUOTA_ACCT) {
547 if (mp->m_qflags & XFS_GQUOTA_ENFD)
548 seq_puts(m, ",grpquota");
549 else
550 seq_puts(m, ",gqnoenforce");
551 }
552
553 if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
554 seq_puts(m, ",noquota");
555
556 return 0;
557}
558static __uint64_t
559xfs_max_file_offset(
560 unsigned int blockshift)
561{
562 unsigned int pagefactor = 1;
563 unsigned int bitshift = BITS_PER_LONG - 1;
564
565 /* Figure out maximum filesize, on Linux this can depend on
566 * the filesystem blocksize (on 32 bit platforms).
567 * __block_write_begin does this in an [unsigned] long...
568 * page->index << (PAGE_SHIFT - bbits)
569 * So, for page sized blocks (4K on 32 bit platforms),
570 * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is
571 * (((u64)PAGE_SIZE << (BITS_PER_LONG-1))-1)
572 * but for smaller blocksizes it is less (bbits = log2 bsize).
573 * Note1: get_block_t takes a long (implicit cast from above)
574 * Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch
575 * can optionally convert the [unsigned] long from above into
576 * an [unsigned] long long.
577 */
578
579#if BITS_PER_LONG == 32
580# if defined(CONFIG_LBDAF)
581 ASSERT(sizeof(sector_t) == 8);
582 pagefactor = PAGE_SIZE;
583 bitshift = BITS_PER_LONG;
584# else
585 pagefactor = PAGE_SIZE >> (PAGE_SHIFT - blockshift);
586# endif
587#endif
588
589 return (((__uint64_t)pagefactor) << bitshift) - 1;
590}
591
592/*
593 * Set parameters for inode allocation heuristics, taking into account
594 * filesystem size and inode32/inode64 mount options; i.e. specifically
595 * whether or not XFS_MOUNT_SMALL_INUMS is set.
596 *
597 * Inode allocation patterns are altered only if inode32 is requested
598 * (XFS_MOUNT_SMALL_INUMS), and the filesystem is sufficiently large.
599 * If altered, XFS_MOUNT_32BITINODES is set as well.
600 *
601 * An agcount independent of that in the mount structure is provided
602 * because in the growfs case, mp->m_sb.sb_agcount is not yet updated
603 * to the potentially higher ag count.
604 *
605 * Returns the maximum AG index which may contain inodes.
606 */
607xfs_agnumber_t
608xfs_set_inode_alloc(
609 struct xfs_mount *mp,
610 xfs_agnumber_t agcount)
611{
612 xfs_agnumber_t index;
613 xfs_agnumber_t maxagi = 0;
614 xfs_sb_t *sbp = &mp->m_sb;
615 xfs_agnumber_t max_metadata;
616 xfs_agino_t agino;
617 xfs_ino_t ino;
618
619 /*
620 * Calculate how much should be reserved for inodes to meet
621 * the max inode percentage. Used only for inode32.
622 */
623 if (mp->m_maxicount) {
624 __uint64_t icount;
625
626 icount = sbp->sb_dblocks * sbp->sb_imax_pct;
627 do_div(icount, 100);
628 icount += sbp->sb_agblocks - 1;
629 do_div(icount, sbp->sb_agblocks);
630 max_metadata = icount;
631 } else {
632 max_metadata = agcount;
633 }
634
635 /* Get the last possible inode in the filesystem */
636 agino = XFS_OFFBNO_TO_AGINO(mp, sbp->sb_agblocks - 1, 0);
637 ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
638
639 /*
640 * If user asked for no more than 32-bit inodes, and the fs is
641 * sufficiently large, set XFS_MOUNT_32BITINODES if we must alter
642 * the allocator to accommodate the request.
643 */
644 if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) && ino > XFS_MAXINUMBER_32)
645 mp->m_flags |= XFS_MOUNT_32BITINODES;
646 else
647 mp->m_flags &= ~XFS_MOUNT_32BITINODES;
648
649 for (index = 0; index < agcount; index++) {
650 struct xfs_perag *pag;
651
652 ino = XFS_AGINO_TO_INO(mp, index, agino);
653
654 pag = xfs_perag_get(mp, index);
655
656 if (mp->m_flags & XFS_MOUNT_32BITINODES) {
657 if (ino > XFS_MAXINUMBER_32) {
658 pag->pagi_inodeok = 0;
659 pag->pagf_metadata = 0;
660 } else {
661 pag->pagi_inodeok = 1;
662 maxagi++;
663 if (index < max_metadata)
664 pag->pagf_metadata = 1;
665 else
666 pag->pagf_metadata = 0;
667 }
668 } else {
669 pag->pagi_inodeok = 1;
670 pag->pagf_metadata = 0;
671 }
672
673 xfs_perag_put(pag);
674 }
675
676 return (mp->m_flags & XFS_MOUNT_32BITINODES) ? maxagi : agcount;
677}
678
679STATIC int
680xfs_blkdev_get(
681 xfs_mount_t *mp,
682 const char *name,
683 struct block_device **bdevp)
684{
685 int error = 0;
686
687 *bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
688 mp);
689 if (IS_ERR(*bdevp)) {
690 error = PTR_ERR(*bdevp);
691 xfs_warn(mp, "Invalid device [%s], error=%d", name, error);
692 }
693
694 return error;
695}
696
697STATIC void
698xfs_blkdev_put(
699 struct block_device *bdev)
700{
701 if (bdev)
702 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
703}
704
705void
706xfs_blkdev_issue_flush(
707 xfs_buftarg_t *buftarg)
708{
709 blkdev_issue_flush(buftarg->bt_bdev, GFP_NOFS, NULL);
710}
711
712STATIC void
713xfs_close_devices(
714 struct xfs_mount *mp)
715{
716 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
717 struct block_device *logdev = mp->m_logdev_targp->bt_bdev;
718 xfs_free_buftarg(mp, mp->m_logdev_targp);
719 xfs_blkdev_put(logdev);
720 }
721 if (mp->m_rtdev_targp) {
722 struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev;
723 xfs_free_buftarg(mp, mp->m_rtdev_targp);
724 xfs_blkdev_put(rtdev);
725 }
726 xfs_free_buftarg(mp, mp->m_ddev_targp);
727}
728
729/*
730 * The file system configurations are:
731 * (1) device (partition) with data and internal log
732 * (2) logical volume with data and log subvolumes.
733 * (3) logical volume with data, log, and realtime subvolumes.
734 *
735 * We only have to handle opening the log and realtime volumes here if
736 * they are present. The data subvolume has already been opened by
737 * get_sb_bdev() and is stored in sb->s_bdev.
738 */
739STATIC int
740xfs_open_devices(
741 struct xfs_mount *mp)
742{
743 struct block_device *ddev = mp->m_super->s_bdev;
744 struct block_device *logdev = NULL, *rtdev = NULL;
745 int error;
746
747 /*
748 * Open real time and log devices - order is important.
749 */
750 if (mp->m_logname) {
751 error = xfs_blkdev_get(mp, mp->m_logname, &logdev);
752 if (error)
753 goto out;
754 }
755
756 if (mp->m_rtname) {
757 error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev);
758 if (error)
759 goto out_close_logdev;
760
761 if (rtdev == ddev || rtdev == logdev) {
762 xfs_warn(mp,
763 "Cannot mount filesystem with identical rtdev and ddev/logdev.");
764 error = -EINVAL;
765 goto out_close_rtdev;
766 }
767 }
768
769 /*
770 * Setup xfs_mount buffer target pointers
771 */
772 error = -ENOMEM;
773 mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev);
774 if (!mp->m_ddev_targp)
775 goto out_close_rtdev;
776
777 if (rtdev) {
778 mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev);
779 if (!mp->m_rtdev_targp)
780 goto out_free_ddev_targ;
781 }
782
783 if (logdev && logdev != ddev) {
784 mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev);
785 if (!mp->m_logdev_targp)
786 goto out_free_rtdev_targ;
787 } else {
788 mp->m_logdev_targp = mp->m_ddev_targp;
789 }
790
791 return 0;
792
793 out_free_rtdev_targ:
794 if (mp->m_rtdev_targp)
795 xfs_free_buftarg(mp, mp->m_rtdev_targp);
796 out_free_ddev_targ:
797 xfs_free_buftarg(mp, mp->m_ddev_targp);
798 out_close_rtdev:
799 xfs_blkdev_put(rtdev);
800 out_close_logdev:
801 if (logdev && logdev != ddev)
802 xfs_blkdev_put(logdev);
803 out:
804 return error;
805}
806
807/*
808 * Setup xfs_mount buffer target pointers based on superblock
809 */
810STATIC int
811xfs_setup_devices(
812 struct xfs_mount *mp)
813{
814 int error;
815
816 error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize);
817 if (error)
818 return error;
819
820 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
821 unsigned int log_sector_size = BBSIZE;
822
823 if (xfs_sb_version_hassector(&mp->m_sb))
824 log_sector_size = mp->m_sb.sb_logsectsize;
825 error = xfs_setsize_buftarg(mp->m_logdev_targp,
826 log_sector_size);
827 if (error)
828 return error;
829 }
830 if (mp->m_rtdev_targp) {
831 error = xfs_setsize_buftarg(mp->m_rtdev_targp,
832 mp->m_sb.sb_sectsize);
833 if (error)
834 return error;
835 }
836
837 return 0;
838}
839
840STATIC int
841xfs_init_mount_workqueues(
842 struct xfs_mount *mp)
843{
844 mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s",
845 WQ_MEM_RECLAIM|WQ_FREEZABLE, 1, mp->m_fsname);
846 if (!mp->m_buf_workqueue)
847 goto out;
848
849 mp->m_data_workqueue = alloc_workqueue("xfs-data/%s",
850 WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname);
851 if (!mp->m_data_workqueue)
852 goto out_destroy_buf;
853
854 mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s",
855 WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname);
856 if (!mp->m_unwritten_workqueue)
857 goto out_destroy_data_iodone_queue;
858
859 mp->m_cil_workqueue = alloc_workqueue("xfs-cil/%s",
860 WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname);
861 if (!mp->m_cil_workqueue)
862 goto out_destroy_unwritten;
863
864 mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
865 WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname);
866 if (!mp->m_reclaim_workqueue)
867 goto out_destroy_cil;
868
869 mp->m_log_workqueue = alloc_workqueue("xfs-log/%s",
870 WQ_MEM_RECLAIM|WQ_FREEZABLE|WQ_HIGHPRI, 0,
871 mp->m_fsname);
872 if (!mp->m_log_workqueue)
873 goto out_destroy_reclaim;
874
875 mp->m_eofblocks_workqueue = alloc_workqueue("xfs-eofblocks/%s",
876 WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname);
877 if (!mp->m_eofblocks_workqueue)
878 goto out_destroy_log;
879
880 return 0;
881
882out_destroy_log:
883 destroy_workqueue(mp->m_log_workqueue);
884out_destroy_reclaim:
885 destroy_workqueue(mp->m_reclaim_workqueue);
886out_destroy_cil:
887 destroy_workqueue(mp->m_cil_workqueue);
888out_destroy_unwritten:
889 destroy_workqueue(mp->m_unwritten_workqueue);
890out_destroy_data_iodone_queue:
891 destroy_workqueue(mp->m_data_workqueue);
892out_destroy_buf:
893 destroy_workqueue(mp->m_buf_workqueue);
894out:
895 return -ENOMEM;
896}
897
898STATIC void
899xfs_destroy_mount_workqueues(
900 struct xfs_mount *mp)
901{
902 destroy_workqueue(mp->m_eofblocks_workqueue);
903 destroy_workqueue(mp->m_log_workqueue);
904 destroy_workqueue(mp->m_reclaim_workqueue);
905 destroy_workqueue(mp->m_cil_workqueue);
906 destroy_workqueue(mp->m_data_workqueue);
907 destroy_workqueue(mp->m_unwritten_workqueue);
908 destroy_workqueue(mp->m_buf_workqueue);
909}
910
911/*
912 * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK
913 * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting
914 * for IO to complete so that we effectively throttle multiple callers to the
915 * rate at which IO is completing.
916 */
917void
918xfs_flush_inodes(
919 struct xfs_mount *mp)
920{
921 struct super_block *sb = mp->m_super;
922
923 if (down_read_trylock(&sb->s_umount)) {
924 sync_inodes_sb(sb);
925 up_read(&sb->s_umount);
926 }
927}
928
929/* Catch misguided souls that try to use this interface on XFS */
930STATIC struct inode *
931xfs_fs_alloc_inode(
932 struct super_block *sb)
933{
934 BUG();
935 return NULL;
936}
937
938/*
939 * Now that the generic code is guaranteed not to be accessing
940 * the linux inode, we can inactivate and reclaim the inode.
941 */
942STATIC void
943xfs_fs_destroy_inode(
944 struct inode *inode)
945{
946 struct xfs_inode *ip = XFS_I(inode);
947 int error;
948
949 trace_xfs_destroy_inode(ip);
950
951 ASSERT(!rwsem_is_locked(&inode->i_rwsem));
952 XFS_STATS_INC(ip->i_mount, vn_rele);
953 XFS_STATS_INC(ip->i_mount, vn_remove);
954
955 if (xfs_is_reflink_inode(ip)) {
956 error = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true);
957 if (error && !XFS_FORCED_SHUTDOWN(ip->i_mount))
958 xfs_warn(ip->i_mount,
959"Error %d while evicting CoW blocks for inode %llu.",
960 error, ip->i_ino);
961 }
962
963 xfs_inactive(ip);
964
965 ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0);
966 XFS_STATS_INC(ip->i_mount, vn_reclaim);
967
968 /*
969 * We should never get here with one of the reclaim flags already set.
970 */
971 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIMABLE));
972 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM));
973
974 /*
975 * We always use background reclaim here because even if the
976 * inode is clean, it still may be under IO and hence we have
977 * to take the flush lock. The background reclaim path handles
978 * this more efficiently than we can here, so simply let background
979 * reclaim tear down all inodes.
980 */
981 xfs_inode_set_reclaim_tag(ip);
982}
983
984/*
985 * Slab object creation initialisation for the XFS inode.
986 * This covers only the idempotent fields in the XFS inode;
987 * all other fields need to be initialised on allocation
988 * from the slab. This avoids the need to repeatedly initialise
989 * fields in the xfs inode that left in the initialise state
990 * when freeing the inode.
991 */
992STATIC void
993xfs_fs_inode_init_once(
994 void *inode)
995{
996 struct xfs_inode *ip = inode;
997
998 memset(ip, 0, sizeof(struct xfs_inode));
999
1000 /* vfs inode */
1001 inode_init_once(VFS_I(ip));
1002
1003 /* xfs inode */
1004 atomic_set(&ip->i_pincount, 0);
1005 spin_lock_init(&ip->i_flags_lock);
1006
1007 mrlock_init(&ip->i_mmaplock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
1008 "xfsino", ip->i_ino);
1009 mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
1010 "xfsino", ip->i_ino);
1011}
1012
1013/*
1014 * We do an unlocked check for XFS_IDONTCACHE here because we are already
1015 * serialised against cache hits here via the inode->i_lock and igrab() in
1016 * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be
1017 * racing with us, and it avoids needing to grab a spinlock here for every inode
1018 * we drop the final reference on.
1019 */
1020STATIC int
1021xfs_fs_drop_inode(
1022 struct inode *inode)
1023{
1024 struct xfs_inode *ip = XFS_I(inode);
1025
1026 /*
1027 * If this unlinked inode is in the middle of recovery, don't
1028 * drop the inode just yet; log recovery will take care of
1029 * that. See the comment for this inode flag.
1030 */
1031 if (ip->i_flags & XFS_IRECOVERY) {
1032 ASSERT(ip->i_mount->m_log->l_flags & XLOG_RECOVERY_NEEDED);
1033 return 0;
1034 }
1035
1036 return generic_drop_inode(inode) || (ip->i_flags & XFS_IDONTCACHE);
1037}
1038
1039STATIC void
1040xfs_free_fsname(
1041 struct xfs_mount *mp)
1042{
1043 kfree(mp->m_fsname);
1044 kfree(mp->m_rtname);
1045 kfree(mp->m_logname);
1046}
1047
1048STATIC int
1049xfs_fs_sync_fs(
1050 struct super_block *sb,
1051 int wait)
1052{
1053 struct xfs_mount *mp = XFS_M(sb);
1054
1055 /*
1056 * Doing anything during the async pass would be counterproductive.
1057 */
1058 if (!wait)
1059 return 0;
1060
1061 xfs_log_force(mp, XFS_LOG_SYNC);
1062 if (laptop_mode) {
1063 /*
1064 * The disk must be active because we're syncing.
1065 * We schedule log work now (now that the disk is
1066 * active) instead of later (when it might not be).
1067 */
1068 flush_delayed_work(&mp->m_log->l_work);
1069 }
1070
1071 return 0;
1072}
1073
1074STATIC int
1075xfs_fs_statfs(
1076 struct dentry *dentry,
1077 struct kstatfs *statp)
1078{
1079 struct xfs_mount *mp = XFS_M(dentry->d_sb);
1080 xfs_sb_t *sbp = &mp->m_sb;
1081 struct xfs_inode *ip = XFS_I(d_inode(dentry));
1082 __uint64_t fakeinos, id;
1083 __uint64_t icount;
1084 __uint64_t ifree;
1085 __uint64_t fdblocks;
1086 xfs_extlen_t lsize;
1087 __int64_t ffree;
1088
1089 statp->f_type = XFS_SB_MAGIC;
1090 statp->f_namelen = MAXNAMELEN - 1;
1091
1092 id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
1093 statp->f_fsid.val[0] = (u32)id;
1094 statp->f_fsid.val[1] = (u32)(id >> 32);
1095
1096 icount = percpu_counter_sum(&mp->m_icount);
1097 ifree = percpu_counter_sum(&mp->m_ifree);
1098 fdblocks = percpu_counter_sum(&mp->m_fdblocks);
1099
1100 spin_lock(&mp->m_sb_lock);
1101 statp->f_bsize = sbp->sb_blocksize;
1102 lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
1103 statp->f_blocks = sbp->sb_dblocks - lsize;
1104 spin_unlock(&mp->m_sb_lock);
1105
1106 statp->f_bfree = fdblocks - mp->m_alloc_set_aside;
1107 statp->f_bavail = statp->f_bfree;
1108
1109 fakeinos = statp->f_bfree << sbp->sb_inopblog;
1110 statp->f_files = MIN(icount + fakeinos, (__uint64_t)XFS_MAXINUMBER);
1111 if (mp->m_maxicount)
1112 statp->f_files = min_t(typeof(statp->f_files),
1113 statp->f_files,
1114 mp->m_maxicount);
1115
1116 /* If sb_icount overshot maxicount, report actual allocation */
1117 statp->f_files = max_t(typeof(statp->f_files),
1118 statp->f_files,
1119 sbp->sb_icount);
1120
1121 /* make sure statp->f_ffree does not underflow */
1122 ffree = statp->f_files - (icount - ifree);
1123 statp->f_ffree = max_t(__int64_t, ffree, 0);
1124
1125
1126 if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
1127 ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) ==
1128 (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))
1129 xfs_qm_statvfs(ip, statp);
1130 return 0;
1131}
1132
1133STATIC void
1134xfs_save_resvblks(struct xfs_mount *mp)
1135{
1136 __uint64_t resblks = 0;
1137
1138 mp->m_resblks_save = mp->m_resblks;
1139 xfs_reserve_blocks(mp, &resblks, NULL);
1140}
1141
1142STATIC void
1143xfs_restore_resvblks(struct xfs_mount *mp)
1144{
1145 __uint64_t resblks;
1146
1147 if (mp->m_resblks_save) {
1148 resblks = mp->m_resblks_save;
1149 mp->m_resblks_save = 0;
1150 } else
1151 resblks = xfs_default_resblks(mp);
1152
1153 xfs_reserve_blocks(mp, &resblks, NULL);
1154}
1155
1156/*
1157 * Trigger writeback of all the dirty metadata in the file system.
1158 *
1159 * This ensures that the metadata is written to their location on disk rather
1160 * than just existing in transactions in the log. This means after a quiesce
1161 * there is no log replay required to write the inodes to disk - this is the
1162 * primary difference between a sync and a quiesce.
1163 *
1164 * Note: xfs_log_quiesce() stops background log work - the callers must ensure
1165 * it is started again when appropriate.
1166 */
1167void
1168xfs_quiesce_attr(
1169 struct xfs_mount *mp)
1170{
1171 int error = 0;
1172
1173 /* wait for all modifications to complete */
1174 while (atomic_read(&mp->m_active_trans) > 0)
1175 delay(100);
1176
1177 /* force the log to unpin objects from the now complete transactions */
1178 xfs_log_force(mp, XFS_LOG_SYNC);
1179
1180 /* reclaim inodes to do any IO before the freeze completes */
1181 xfs_reclaim_inodes(mp, 0);
1182 xfs_reclaim_inodes(mp, SYNC_WAIT);
1183
1184 /* Push the superblock and write an unmount record */
1185 error = xfs_log_sbcount(mp);
1186 if (error)
1187 xfs_warn(mp, "xfs_attr_quiesce: failed to log sb changes. "
1188 "Frozen image may not be consistent.");
1189 /*
1190 * Just warn here till VFS can correctly support
1191 * read-only remount without racing.
1192 */
1193 WARN_ON(atomic_read(&mp->m_active_trans) != 0);
1194
1195 xfs_log_quiesce(mp);
1196}
1197
1198STATIC int
1199xfs_test_remount_options(
1200 struct super_block *sb,
1201 struct xfs_mount *mp,
1202 char *options)
1203{
1204 int error = 0;
1205 struct xfs_mount *tmp_mp;
1206
1207 tmp_mp = kmem_zalloc(sizeof(*tmp_mp), KM_MAYFAIL);
1208 if (!tmp_mp)
1209 return -ENOMEM;
1210
1211 tmp_mp->m_super = sb;
1212 error = xfs_parseargs(tmp_mp, options);
1213 xfs_free_fsname(tmp_mp);
1214 kfree(tmp_mp);
1215
1216 return error;
1217}
1218
1219STATIC int
1220xfs_fs_remount(
1221 struct super_block *sb,
1222 int *flags,
1223 char *options)
1224{
1225 struct xfs_mount *mp = XFS_M(sb);
1226 xfs_sb_t *sbp = &mp->m_sb;
1227 substring_t args[MAX_OPT_ARGS];
1228 char *p;
1229 int error;
1230
1231 /* First, check for complete junk; i.e. invalid options */
1232 error = xfs_test_remount_options(sb, mp, options);
1233 if (error)
1234 return error;
1235
1236 sync_filesystem(sb);
1237 while ((p = strsep(&options, ",")) != NULL) {
1238 int token;
1239
1240 if (!*p)
1241 continue;
1242
1243 token = match_token(p, tokens, args);
1244 switch (token) {
1245 case Opt_barrier:
1246 xfs_warn(mp, "%s option is deprecated, ignoring.", p);
1247 mp->m_flags |= XFS_MOUNT_BARRIER;
1248 break;
1249 case Opt_nobarrier:
1250 xfs_warn(mp, "%s option is deprecated, ignoring.", p);
1251 mp->m_flags &= ~XFS_MOUNT_BARRIER;
1252 break;
1253 case Opt_inode64:
1254 mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
1255 mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount);
1256 break;
1257 case Opt_inode32:
1258 mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
1259 mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount);
1260 break;
1261 default:
1262 /*
1263 * Logically we would return an error here to prevent
1264 * users from believing they might have changed
1265 * mount options using remount which can't be changed.
1266 *
1267 * But unfortunately mount(8) adds all options from
1268 * mtab and fstab to the mount arguments in some cases
1269 * so we can't blindly reject options, but have to
1270 * check for each specified option if it actually
1271 * differs from the currently set option and only
1272 * reject it if that's the case.
1273 *
1274 * Until that is implemented we return success for
1275 * every remount request, and silently ignore all
1276 * options that we can't actually change.
1277 */
1278#if 0
1279 xfs_info(mp,
1280 "mount option \"%s\" not supported for remount", p);
1281 return -EINVAL;
1282#else
1283 break;
1284#endif
1285 }
1286 }
1287
1288 /* ro -> rw */
1289 if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & MS_RDONLY)) {
1290 if (mp->m_flags & XFS_MOUNT_NORECOVERY) {
1291 xfs_warn(mp,
1292 "ro->rw transition prohibited on norecovery mount");
1293 return -EINVAL;
1294 }
1295
1296 if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 &&
1297 xfs_sb_has_ro_compat_feature(sbp,
1298 XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
1299 xfs_warn(mp,
1300"ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem",
1301 (sbp->sb_features_ro_compat &
1302 XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
1303 return -EINVAL;
1304 }
1305
1306 mp->m_flags &= ~XFS_MOUNT_RDONLY;
1307
1308 /*
1309 * If this is the first remount to writeable state we
1310 * might have some superblock changes to update.
1311 */
1312 if (mp->m_update_sb) {
1313 error = xfs_sync_sb(mp, false);
1314 if (error) {
1315 xfs_warn(mp, "failed to write sb changes");
1316 return error;
1317 }
1318 mp->m_update_sb = false;
1319 }
1320
1321 /*
1322 * Fill out the reserve pool if it is empty. Use the stashed
1323 * value if it is non-zero, otherwise go with the default.
1324 */
1325 xfs_restore_resvblks(mp);
1326 xfs_log_work_queue(mp);
1327 xfs_queue_eofblocks(mp);
1328
1329 /* Recover any CoW blocks that never got remapped. */
1330 error = xfs_reflink_recover_cow(mp);
1331 if (error) {
1332 xfs_err(mp,
1333 "Error %d recovering leftover CoW allocations.", error);
1334 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1335 return error;
1336 }
1337
1338 /* Create the per-AG metadata reservation pool .*/
1339 error = xfs_fs_reserve_ag_blocks(mp);
1340 if (error && error != -ENOSPC)
1341 return error;
1342 }
1343
1344 /* rw -> ro */
1345 if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & MS_RDONLY)) {
1346 /* Free the per-AG metadata reservation pool. */
1347 error = xfs_fs_unreserve_ag_blocks(mp);
1348 if (error) {
1349 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1350 return error;
1351 }
1352
1353 /*
1354 * Before we sync the metadata, we need to free up the reserve
1355 * block pool so that the used block count in the superblock on
1356 * disk is correct at the end of the remount. Stash the current
1357 * reserve pool size so that if we get remounted rw, we can
1358 * return it to the same size.
1359 */
1360 xfs_save_resvblks(mp);
1361
1362 /*
1363 * Cancel background eofb scanning so it cannot race with the
1364 * final log force+buftarg wait and deadlock the remount.
1365 */
1366 cancel_delayed_work_sync(&mp->m_eofblocks_work);
1367
1368 xfs_quiesce_attr(mp);
1369 mp->m_flags |= XFS_MOUNT_RDONLY;
1370 }
1371
1372 return 0;
1373}
1374
1375/*
1376 * Second stage of a freeze. The data is already frozen so we only
1377 * need to take care of the metadata. Once that's done sync the superblock
1378 * to the log to dirty it in case of a crash while frozen. This ensures that we
1379 * will recover the unlinked inode lists on the next mount.
1380 */
1381STATIC int
1382xfs_fs_freeze(
1383 struct super_block *sb)
1384{
1385 struct xfs_mount *mp = XFS_M(sb);
1386
1387 xfs_save_resvblks(mp);
1388 xfs_quiesce_attr(mp);
1389 return xfs_sync_sb(mp, true);
1390}
1391
1392STATIC int
1393xfs_fs_unfreeze(
1394 struct super_block *sb)
1395{
1396 struct xfs_mount *mp = XFS_M(sb);
1397
1398 xfs_restore_resvblks(mp);
1399 xfs_log_work_queue(mp);
1400 return 0;
1401}
1402
1403STATIC int
1404xfs_fs_show_options(
1405 struct seq_file *m,
1406 struct dentry *root)
1407{
1408 return xfs_showargs(XFS_M(root->d_sb), m);
1409}
1410
1411/*
1412 * This function fills in xfs_mount_t fields based on mount args.
1413 * Note: the superblock _has_ now been read in.
1414 */
1415STATIC int
1416xfs_finish_flags(
1417 struct xfs_mount *mp)
1418{
1419 int ronly = (mp->m_flags & XFS_MOUNT_RDONLY);
1420
1421 /* Fail a mount where the logbuf is smaller than the log stripe */
1422 if (xfs_sb_version_haslogv2(&mp->m_sb)) {
1423 if (mp->m_logbsize <= 0 &&
1424 mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
1425 mp->m_logbsize = mp->m_sb.sb_logsunit;
1426 } else if (mp->m_logbsize > 0 &&
1427 mp->m_logbsize < mp->m_sb.sb_logsunit) {
1428 xfs_warn(mp,
1429 "logbuf size must be greater than or equal to log stripe size");
1430 return -EINVAL;
1431 }
1432 } else {
1433 /* Fail a mount if the logbuf is larger than 32K */
1434 if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) {
1435 xfs_warn(mp,
1436 "logbuf size for version 1 logs must be 16K or 32K");
1437 return -EINVAL;
1438 }
1439 }
1440
1441 /*
1442 * V5 filesystems always use attr2 format for attributes.
1443 */
1444 if (xfs_sb_version_hascrc(&mp->m_sb) &&
1445 (mp->m_flags & XFS_MOUNT_NOATTR2)) {
1446 xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. "
1447 "attr2 is always enabled for V5 filesystems.");
1448 return -EINVAL;
1449 }
1450
1451 /*
1452 * mkfs'ed attr2 will turn on attr2 mount unless explicitly
1453 * told by noattr2 to turn it off
1454 */
1455 if (xfs_sb_version_hasattr2(&mp->m_sb) &&
1456 !(mp->m_flags & XFS_MOUNT_NOATTR2))
1457 mp->m_flags |= XFS_MOUNT_ATTR2;
1458
1459 /*
1460 * prohibit r/w mounts of read-only filesystems
1461 */
1462 if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) {
1463 xfs_warn(mp,
1464 "cannot mount a read-only filesystem as read-write");
1465 return -EROFS;
1466 }
1467
1468 if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) &&
1469 (mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE)) &&
1470 !xfs_sb_version_has_pquotino(&mp->m_sb)) {
1471 xfs_warn(mp,
1472 "Super block does not support project and group quota together");
1473 return -EINVAL;
1474 }
1475
1476 return 0;
1477}
1478
1479static int
1480xfs_init_percpu_counters(
1481 struct xfs_mount *mp)
1482{
1483 int error;
1484
1485 error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL);
1486 if (error)
1487 return -ENOMEM;
1488
1489 error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL);
1490 if (error)
1491 goto free_icount;
1492
1493 error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL);
1494 if (error)
1495 goto free_ifree;
1496
1497 return 0;
1498
1499free_ifree:
1500 percpu_counter_destroy(&mp->m_ifree);
1501free_icount:
1502 percpu_counter_destroy(&mp->m_icount);
1503 return -ENOMEM;
1504}
1505
1506void
1507xfs_reinit_percpu_counters(
1508 struct xfs_mount *mp)
1509{
1510 percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount);
1511 percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree);
1512 percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks);
1513}
1514
1515static void
1516xfs_destroy_percpu_counters(
1517 struct xfs_mount *mp)
1518{
1519 percpu_counter_destroy(&mp->m_icount);
1520 percpu_counter_destroy(&mp->m_ifree);
1521 percpu_counter_destroy(&mp->m_fdblocks);
1522}
1523
1524STATIC int
1525xfs_fs_fill_super(
1526 struct super_block *sb,
1527 void *data,
1528 int silent)
1529{
1530 struct inode *root;
1531 struct xfs_mount *mp = NULL;
1532 int flags = 0, error = -ENOMEM;
1533
1534 mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL);
1535 if (!mp)
1536 goto out;
1537
1538 spin_lock_init(&mp->m_sb_lock);
1539 mutex_init(&mp->m_growlock);
1540 atomic_set(&mp->m_active_trans, 0);
1541 INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
1542 INIT_DELAYED_WORK(&mp->m_eofblocks_work, xfs_eofblocks_worker);
1543 INIT_DELAYED_WORK(&mp->m_cowblocks_work, xfs_cowblocks_worker);
1544 mp->m_kobj.kobject.kset = xfs_kset;
1545
1546 mp->m_super = sb;
1547 sb->s_fs_info = mp;
1548
1549 error = xfs_parseargs(mp, (char *)data);
1550 if (error)
1551 goto out_free_fsname;
1552
1553 sb_min_blocksize(sb, BBSIZE);
1554 sb->s_xattr = xfs_xattr_handlers;
1555 sb->s_export_op = &xfs_export_operations;
1556#ifdef CONFIG_XFS_QUOTA
1557 sb->s_qcop = &xfs_quotactl_operations;
1558 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
1559#endif
1560 sb->s_op = &xfs_super_operations;
1561
1562 if (silent)
1563 flags |= XFS_MFSI_QUIET;
1564
1565 error = xfs_open_devices(mp);
1566 if (error)
1567 goto out_free_fsname;
1568
1569 error = xfs_init_mount_workqueues(mp);
1570 if (error)
1571 goto out_close_devices;
1572
1573 error = xfs_init_percpu_counters(mp);
1574 if (error)
1575 goto out_destroy_workqueues;
1576
1577 /* Allocate stats memory before we do operations that might use it */
1578 mp->m_stats.xs_stats = alloc_percpu(struct xfsstats);
1579 if (!mp->m_stats.xs_stats) {
1580 error = -ENOMEM;
1581 goto out_destroy_counters;
1582 }
1583
1584 error = xfs_readsb(mp, flags);
1585 if (error)
1586 goto out_free_stats;
1587
1588 error = xfs_finish_flags(mp);
1589 if (error)
1590 goto out_free_sb;
1591
1592 error = xfs_setup_devices(mp);
1593 if (error)
1594 goto out_free_sb;
1595
1596 error = xfs_filestream_mount(mp);
1597 if (error)
1598 goto out_free_sb;
1599
1600 /*
1601 * we must configure the block size in the superblock before we run the
1602 * full mount process as the mount process can lookup and cache inodes.
1603 */
1604 sb->s_magic = XFS_SB_MAGIC;
1605 sb->s_blocksize = mp->m_sb.sb_blocksize;
1606 sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
1607 sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits);
1608 sb->s_max_links = XFS_MAXLINK;
1609 sb->s_time_gran = 1;
1610 set_posix_acl_flag(sb);
1611
1612 /* version 5 superblocks support inode version counters. */
1613 if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5)
1614 sb->s_flags |= MS_I_VERSION;
1615
1616 if (mp->m_flags & XFS_MOUNT_DAX) {
1617 xfs_warn(mp,
1618 "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
1619
1620 error = bdev_dax_supported(sb, sb->s_blocksize);
1621 if (error) {
1622 xfs_alert(mp,
1623 "DAX unsupported by block device. Turning off DAX.");
1624 mp->m_flags &= ~XFS_MOUNT_DAX;
1625 }
1626 if (xfs_sb_version_hasreflink(&mp->m_sb))
1627 xfs_alert(mp,
1628 "DAX and reflink have not been tested together!");
1629 }
1630
1631 if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
1632 if (mp->m_sb.sb_rblocks) {
1633 xfs_alert(mp,
1634 "EXPERIMENTAL reverse mapping btree not compatible with realtime device!");
1635 error = -EINVAL;
1636 goto out_filestream_unmount;
1637 }
1638 xfs_alert(mp,
1639 "EXPERIMENTAL reverse mapping btree feature enabled. Use at your own risk!");
1640 }
1641
1642 if (xfs_sb_version_hasreflink(&mp->m_sb))
1643 xfs_alert(mp,
1644 "EXPERIMENTAL reflink feature enabled. Use at your own risk!");
1645
1646 error = xfs_mountfs(mp);
1647 if (error)
1648 goto out_filestream_unmount;
1649
1650 root = igrab(VFS_I(mp->m_rootip));
1651 if (!root) {
1652 error = -ENOENT;
1653 goto out_unmount;
1654 }
1655 sb->s_root = d_make_root(root);
1656 if (!sb->s_root) {
1657 error = -ENOMEM;
1658 goto out_unmount;
1659 }
1660
1661 return 0;
1662
1663 out_filestream_unmount:
1664 xfs_filestream_unmount(mp);
1665 out_free_sb:
1666 xfs_freesb(mp);
1667 out_free_stats:
1668 free_percpu(mp->m_stats.xs_stats);
1669 out_destroy_counters:
1670 xfs_destroy_percpu_counters(mp);
1671 out_destroy_workqueues:
1672 xfs_destroy_mount_workqueues(mp);
1673 out_close_devices:
1674 xfs_close_devices(mp);
1675 out_free_fsname:
1676 xfs_free_fsname(mp);
1677 kfree(mp);
1678 out:
1679 return error;
1680
1681 out_unmount:
1682 xfs_filestream_unmount(mp);
1683 xfs_unmountfs(mp);
1684 goto out_free_sb;
1685}
1686
1687STATIC void
1688xfs_fs_put_super(
1689 struct super_block *sb)
1690{
1691 struct xfs_mount *mp = XFS_M(sb);
1692
1693 xfs_notice(mp, "Unmounting Filesystem");
1694 xfs_filestream_unmount(mp);
1695 xfs_unmountfs(mp);
1696
1697 xfs_freesb(mp);
1698 free_percpu(mp->m_stats.xs_stats);
1699 xfs_destroy_percpu_counters(mp);
1700 xfs_destroy_mount_workqueues(mp);
1701 xfs_close_devices(mp);
1702 xfs_free_fsname(mp);
1703 kfree(mp);
1704}
1705
1706STATIC struct dentry *
1707xfs_fs_mount(
1708 struct file_system_type *fs_type,
1709 int flags,
1710 const char *dev_name,
1711 void *data)
1712{
1713 return mount_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super);
1714}
1715
1716static long
1717xfs_fs_nr_cached_objects(
1718 struct super_block *sb,
1719 struct shrink_control *sc)
1720{
1721 return xfs_reclaim_inodes_count(XFS_M(sb));
1722}
1723
1724static long
1725xfs_fs_free_cached_objects(
1726 struct super_block *sb,
1727 struct shrink_control *sc)
1728{
1729 return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan);
1730}
1731
1732static const struct super_operations xfs_super_operations = {
1733 .alloc_inode = xfs_fs_alloc_inode,
1734 .destroy_inode = xfs_fs_destroy_inode,
1735 .drop_inode = xfs_fs_drop_inode,
1736 .put_super = xfs_fs_put_super,
1737 .sync_fs = xfs_fs_sync_fs,
1738 .freeze_fs = xfs_fs_freeze,
1739 .unfreeze_fs = xfs_fs_unfreeze,
1740 .statfs = xfs_fs_statfs,
1741 .remount_fs = xfs_fs_remount,
1742 .show_options = xfs_fs_show_options,
1743 .nr_cached_objects = xfs_fs_nr_cached_objects,
1744 .free_cached_objects = xfs_fs_free_cached_objects,
1745};
1746
1747static struct file_system_type xfs_fs_type = {
1748 .owner = THIS_MODULE,
1749 .name = "xfs",
1750 .mount = xfs_fs_mount,
1751 .kill_sb = kill_block_super,
1752 .fs_flags = FS_REQUIRES_DEV,
1753};
1754MODULE_ALIAS_FS("xfs");
1755
1756STATIC int __init
1757xfs_init_zones(void)
1758{
1759 xfs_ioend_bioset = bioset_create(4 * MAX_BUF_PER_PAGE,
1760 offsetof(struct xfs_ioend, io_inline_bio));
1761 if (!xfs_ioend_bioset)
1762 goto out;
1763
1764 xfs_log_ticket_zone = kmem_zone_init(sizeof(xlog_ticket_t),
1765 "xfs_log_ticket");
1766 if (!xfs_log_ticket_zone)
1767 goto out_free_ioend_bioset;
1768
1769 xfs_bmap_free_item_zone = kmem_zone_init(
1770 sizeof(struct xfs_extent_free_item),
1771 "xfs_bmap_free_item");
1772 if (!xfs_bmap_free_item_zone)
1773 goto out_destroy_log_ticket_zone;
1774
1775 xfs_btree_cur_zone = kmem_zone_init(sizeof(xfs_btree_cur_t),
1776 "xfs_btree_cur");
1777 if (!xfs_btree_cur_zone)
1778 goto out_destroy_bmap_free_item_zone;
1779
1780 xfs_da_state_zone = kmem_zone_init(sizeof(xfs_da_state_t),
1781 "xfs_da_state");
1782 if (!xfs_da_state_zone)
1783 goto out_destroy_btree_cur_zone;
1784
1785 xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork");
1786 if (!xfs_ifork_zone)
1787 goto out_destroy_da_state_zone;
1788
1789 xfs_trans_zone = kmem_zone_init(sizeof(xfs_trans_t), "xfs_trans");
1790 if (!xfs_trans_zone)
1791 goto out_destroy_ifork_zone;
1792
1793 xfs_log_item_desc_zone =
1794 kmem_zone_init(sizeof(struct xfs_log_item_desc),
1795 "xfs_log_item_desc");
1796 if (!xfs_log_item_desc_zone)
1797 goto out_destroy_trans_zone;
1798
1799 /*
1800 * The size of the zone allocated buf log item is the maximum
1801 * size possible under XFS. This wastes a little bit of memory,
1802 * but it is much faster.
1803 */
1804 xfs_buf_item_zone = kmem_zone_init(sizeof(struct xfs_buf_log_item),
1805 "xfs_buf_item");
1806 if (!xfs_buf_item_zone)
1807 goto out_destroy_log_item_desc_zone;
1808
1809 xfs_efd_zone = kmem_zone_init((sizeof(xfs_efd_log_item_t) +
1810 ((XFS_EFD_MAX_FAST_EXTENTS - 1) *
1811 sizeof(xfs_extent_t))), "xfs_efd_item");
1812 if (!xfs_efd_zone)
1813 goto out_destroy_buf_item_zone;
1814
1815 xfs_efi_zone = kmem_zone_init((sizeof(xfs_efi_log_item_t) +
1816 ((XFS_EFI_MAX_FAST_EXTENTS - 1) *
1817 sizeof(xfs_extent_t))), "xfs_efi_item");
1818 if (!xfs_efi_zone)
1819 goto out_destroy_efd_zone;
1820
1821 xfs_inode_zone =
1822 kmem_zone_init_flags(sizeof(xfs_inode_t), "xfs_inode",
1823 KM_ZONE_HWALIGN | KM_ZONE_RECLAIM | KM_ZONE_SPREAD |
1824 KM_ZONE_ACCOUNT, xfs_fs_inode_init_once);
1825 if (!xfs_inode_zone)
1826 goto out_destroy_efi_zone;
1827
1828 xfs_ili_zone =
1829 kmem_zone_init_flags(sizeof(xfs_inode_log_item_t), "xfs_ili",
1830 KM_ZONE_SPREAD, NULL);
1831 if (!xfs_ili_zone)
1832 goto out_destroy_inode_zone;
1833 xfs_icreate_zone = kmem_zone_init(sizeof(struct xfs_icreate_item),
1834 "xfs_icr");
1835 if (!xfs_icreate_zone)
1836 goto out_destroy_ili_zone;
1837
1838 xfs_rud_zone = kmem_zone_init(sizeof(struct xfs_rud_log_item),
1839 "xfs_rud_item");
1840 if (!xfs_rud_zone)
1841 goto out_destroy_icreate_zone;
1842
1843 xfs_rui_zone = kmem_zone_init(
1844 xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS),
1845 "xfs_rui_item");
1846 if (!xfs_rui_zone)
1847 goto out_destroy_rud_zone;
1848
1849 xfs_cud_zone = kmem_zone_init(sizeof(struct xfs_cud_log_item),
1850 "xfs_cud_item");
1851 if (!xfs_cud_zone)
1852 goto out_destroy_rui_zone;
1853
1854 xfs_cui_zone = kmem_zone_init(
1855 xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS),
1856 "xfs_cui_item");
1857 if (!xfs_cui_zone)
1858 goto out_destroy_cud_zone;
1859
1860 xfs_bud_zone = kmem_zone_init(sizeof(struct xfs_bud_log_item),
1861 "xfs_bud_item");
1862 if (!xfs_bud_zone)
1863 goto out_destroy_cui_zone;
1864
1865 xfs_bui_zone = kmem_zone_init(
1866 xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS),
1867 "xfs_bui_item");
1868 if (!xfs_bui_zone)
1869 goto out_destroy_bud_zone;
1870
1871 return 0;
1872
1873 out_destroy_bud_zone:
1874 kmem_zone_destroy(xfs_bud_zone);
1875 out_destroy_cui_zone:
1876 kmem_zone_destroy(xfs_cui_zone);
1877 out_destroy_cud_zone:
1878 kmem_zone_destroy(xfs_cud_zone);
1879 out_destroy_rui_zone:
1880 kmem_zone_destroy(xfs_rui_zone);
1881 out_destroy_rud_zone:
1882 kmem_zone_destroy(xfs_rud_zone);
1883 out_destroy_icreate_zone:
1884 kmem_zone_destroy(xfs_icreate_zone);
1885 out_destroy_ili_zone:
1886 kmem_zone_destroy(xfs_ili_zone);
1887 out_destroy_inode_zone:
1888 kmem_zone_destroy(xfs_inode_zone);
1889 out_destroy_efi_zone:
1890 kmem_zone_destroy(xfs_efi_zone);
1891 out_destroy_efd_zone:
1892 kmem_zone_destroy(xfs_efd_zone);
1893 out_destroy_buf_item_zone:
1894 kmem_zone_destroy(xfs_buf_item_zone);
1895 out_destroy_log_item_desc_zone:
1896 kmem_zone_destroy(xfs_log_item_desc_zone);
1897 out_destroy_trans_zone:
1898 kmem_zone_destroy(xfs_trans_zone);
1899 out_destroy_ifork_zone:
1900 kmem_zone_destroy(xfs_ifork_zone);
1901 out_destroy_da_state_zone:
1902 kmem_zone_destroy(xfs_da_state_zone);
1903 out_destroy_btree_cur_zone:
1904 kmem_zone_destroy(xfs_btree_cur_zone);
1905 out_destroy_bmap_free_item_zone:
1906 kmem_zone_destroy(xfs_bmap_free_item_zone);
1907 out_destroy_log_ticket_zone:
1908 kmem_zone_destroy(xfs_log_ticket_zone);
1909 out_free_ioend_bioset:
1910 bioset_free(xfs_ioend_bioset);
1911 out:
1912 return -ENOMEM;
1913}
1914
1915STATIC void
1916xfs_destroy_zones(void)
1917{
1918 /*
1919 * Make sure all delayed rcu free are flushed before we
1920 * destroy caches.
1921 */
1922 rcu_barrier();
1923 kmem_zone_destroy(xfs_bui_zone);
1924 kmem_zone_destroy(xfs_bud_zone);
1925 kmem_zone_destroy(xfs_cui_zone);
1926 kmem_zone_destroy(xfs_cud_zone);
1927 kmem_zone_destroy(xfs_rui_zone);
1928 kmem_zone_destroy(xfs_rud_zone);
1929 kmem_zone_destroy(xfs_icreate_zone);
1930 kmem_zone_destroy(xfs_ili_zone);
1931 kmem_zone_destroy(xfs_inode_zone);
1932 kmem_zone_destroy(xfs_efi_zone);
1933 kmem_zone_destroy(xfs_efd_zone);
1934 kmem_zone_destroy(xfs_buf_item_zone);
1935 kmem_zone_destroy(xfs_log_item_desc_zone);
1936 kmem_zone_destroy(xfs_trans_zone);
1937 kmem_zone_destroy(xfs_ifork_zone);
1938 kmem_zone_destroy(xfs_da_state_zone);
1939 kmem_zone_destroy(xfs_btree_cur_zone);
1940 kmem_zone_destroy(xfs_bmap_free_item_zone);
1941 kmem_zone_destroy(xfs_log_ticket_zone);
1942 bioset_free(xfs_ioend_bioset);
1943}
1944
1945STATIC int __init
1946xfs_init_workqueues(void)
1947{
1948 /*
1949 * The allocation workqueue can be used in memory reclaim situations
1950 * (writepage path), and parallelism is only limited by the number of
1951 * AGs in all the filesystems mounted. Hence use the default large
1952 * max_active value for this workqueue.
1953 */
1954 xfs_alloc_wq = alloc_workqueue("xfsalloc",
1955 WQ_MEM_RECLAIM|WQ_FREEZABLE, 0);
1956 if (!xfs_alloc_wq)
1957 return -ENOMEM;
1958
1959 return 0;
1960}
1961
1962STATIC void
1963xfs_destroy_workqueues(void)
1964{
1965 destroy_workqueue(xfs_alloc_wq);
1966}
1967
1968STATIC int __init
1969init_xfs_fs(void)
1970{
1971 int error;
1972
1973 xfs_check_ondisk_structs();
1974
1975 printk(KERN_INFO XFS_VERSION_STRING " with "
1976 XFS_BUILD_OPTIONS " enabled\n");
1977
1978 xfs_extent_free_init_defer_op();
1979 xfs_rmap_update_init_defer_op();
1980 xfs_refcount_update_init_defer_op();
1981 xfs_bmap_update_init_defer_op();
1982
1983 xfs_dir_startup();
1984
1985 error = xfs_init_zones();
1986 if (error)
1987 goto out;
1988
1989 error = xfs_init_workqueues();
1990 if (error)
1991 goto out_destroy_zones;
1992
1993 error = xfs_mru_cache_init();
1994 if (error)
1995 goto out_destroy_wq;
1996
1997 error = xfs_buf_init();
1998 if (error)
1999 goto out_mru_cache_uninit;
2000
2001 error = xfs_init_procfs();
2002 if (error)
2003 goto out_buf_terminate;
2004
2005 error = xfs_sysctl_register();
2006 if (error)
2007 goto out_cleanup_procfs;
2008
2009 xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj);
2010 if (!xfs_kset) {
2011 error = -ENOMEM;
2012 goto out_sysctl_unregister;
2013 }
2014
2015 xfsstats.xs_kobj.kobject.kset = xfs_kset;
2016
2017 xfsstats.xs_stats = alloc_percpu(struct xfsstats);
2018 if (!xfsstats.xs_stats) {
2019 error = -ENOMEM;
2020 goto out_kset_unregister;
2021 }
2022
2023 error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL,
2024 "stats");
2025 if (error)
2026 goto out_free_stats;
2027
2028#ifdef DEBUG
2029 xfs_dbg_kobj.kobject.kset = xfs_kset;
2030 error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug");
2031 if (error)
2032 goto out_remove_stats_kobj;
2033#endif
2034
2035 error = xfs_qm_init();
2036 if (error)
2037 goto out_remove_dbg_kobj;
2038
2039 error = register_filesystem(&xfs_fs_type);
2040 if (error)
2041 goto out_qm_exit;
2042 return 0;
2043
2044 out_qm_exit:
2045 xfs_qm_exit();
2046 out_remove_dbg_kobj:
2047#ifdef DEBUG
2048 xfs_sysfs_del(&xfs_dbg_kobj);
2049 out_remove_stats_kobj:
2050#endif
2051 xfs_sysfs_del(&xfsstats.xs_kobj);
2052 out_free_stats:
2053 free_percpu(xfsstats.xs_stats);
2054 out_kset_unregister:
2055 kset_unregister(xfs_kset);
2056 out_sysctl_unregister:
2057 xfs_sysctl_unregister();
2058 out_cleanup_procfs:
2059 xfs_cleanup_procfs();
2060 out_buf_terminate:
2061 xfs_buf_terminate();
2062 out_mru_cache_uninit:
2063 xfs_mru_cache_uninit();
2064 out_destroy_wq:
2065 xfs_destroy_workqueues();
2066 out_destroy_zones:
2067 xfs_destroy_zones();
2068 out:
2069 return error;
2070}
2071
2072STATIC void __exit
2073exit_xfs_fs(void)
2074{
2075 xfs_qm_exit();
2076 unregister_filesystem(&xfs_fs_type);
2077#ifdef DEBUG
2078 xfs_sysfs_del(&xfs_dbg_kobj);
2079#endif
2080 xfs_sysfs_del(&xfsstats.xs_kobj);
2081 free_percpu(xfsstats.xs_stats);
2082 kset_unregister(xfs_kset);
2083 xfs_sysctl_unregister();
2084 xfs_cleanup_procfs();
2085 xfs_buf_terminate();
2086 xfs_mru_cache_uninit();
2087 xfs_destroy_workqueues();
2088 xfs_destroy_zones();
2089 xfs_uuid_table_free();
2090}
2091
2092module_init(init_xfs_fs);
2093module_exit(exit_xfs_fs);
2094
2095MODULE_AUTHOR("Silicon Graphics, Inc.");
2096MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
2097MODULE_LICENSE("GPL");