Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6
7#include "xfs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_sb.h"
13#include "xfs_mount.h"
14#include "xfs_inode.h"
15#include "xfs_btree.h"
16#include "xfs_bmap.h"
17#include "xfs_alloc.h"
18#include "xfs_fsops.h"
19#include "xfs_trans.h"
20#include "xfs_buf_item.h"
21#include "xfs_log.h"
22#include "xfs_log_priv.h"
23#include "xfs_dir2.h"
24#include "xfs_extfree_item.h"
25#include "xfs_mru_cache.h"
26#include "xfs_inode_item.h"
27#include "xfs_icache.h"
28#include "xfs_trace.h"
29#include "xfs_icreate_item.h"
30#include "xfs_filestream.h"
31#include "xfs_quota.h"
32#include "xfs_sysfs.h"
33#include "xfs_ondisk.h"
34#include "xfs_rmap_item.h"
35#include "xfs_refcount_item.h"
36#include "xfs_bmap_item.h"
37#include "xfs_reflink.h"
38
39#include <linux/magic.h>
40#include <linux/parser.h>
41
42static const struct super_operations xfs_super_operations;
43struct bio_set xfs_ioend_bioset;
44
45static struct kset *xfs_kset; /* top-level xfs sysfs dir */
46#ifdef DEBUG
47static struct xfs_kobj xfs_dbg_kobj; /* global debug sysfs attrs */
48#endif
49
50/*
51 * Table driven mount option parser.
52 */
53enum {
54 Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev, Opt_biosize,
55 Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid,
56 Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups,
57 Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, Opt_ikeep,
58 Opt_noikeep, Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2,
59 Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota,
60 Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota,
61 Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce,
62 Opt_discard, Opt_nodiscard, Opt_dax, Opt_err,
63};
64
65static const match_table_t tokens = {
66 {Opt_logbufs, "logbufs=%u"}, /* number of XFS log buffers */
67 {Opt_logbsize, "logbsize=%s"}, /* size of XFS log buffers */
68 {Opt_logdev, "logdev=%s"}, /* log device */
69 {Opt_rtdev, "rtdev=%s"}, /* realtime I/O device */
70 {Opt_biosize, "biosize=%u"}, /* log2 of preferred buffered io size */
71 {Opt_wsync, "wsync"}, /* safe-mode nfs compatible mount */
72 {Opt_noalign, "noalign"}, /* turn off stripe alignment */
73 {Opt_swalloc, "swalloc"}, /* turn on stripe width allocation */
74 {Opt_sunit, "sunit=%u"}, /* data volume stripe unit */
75 {Opt_swidth, "swidth=%u"}, /* data volume stripe width */
76 {Opt_nouuid, "nouuid"}, /* ignore filesystem UUID */
77 {Opt_grpid, "grpid"}, /* group-ID from parent directory */
78 {Opt_nogrpid, "nogrpid"}, /* group-ID from current process */
79 {Opt_bsdgroups, "bsdgroups"}, /* group-ID from parent directory */
80 {Opt_sysvgroups,"sysvgroups"}, /* group-ID from current process */
81 {Opt_allocsize, "allocsize=%s"},/* preferred allocation size */
82 {Opt_norecovery,"norecovery"}, /* don't run XFS recovery */
83 {Opt_inode64, "inode64"}, /* inodes can be allocated anywhere */
84 {Opt_inode32, "inode32"}, /* inode allocation limited to
85 * XFS_MAXINUMBER_32 */
86 {Opt_ikeep, "ikeep"}, /* do not free empty inode clusters */
87 {Opt_noikeep, "noikeep"}, /* free empty inode clusters */
88 {Opt_largeio, "largeio"}, /* report large I/O sizes in stat() */
89 {Opt_nolargeio, "nolargeio"}, /* do not report large I/O sizes
90 * in stat(). */
91 {Opt_attr2, "attr2"}, /* do use attr2 attribute format */
92 {Opt_noattr2, "noattr2"}, /* do not use attr2 attribute format */
93 {Opt_filestreams,"filestreams"},/* use filestreams allocator */
94 {Opt_quota, "quota"}, /* disk quotas (user) */
95 {Opt_noquota, "noquota"}, /* no quotas */
96 {Opt_usrquota, "usrquota"}, /* user quota enabled */
97 {Opt_grpquota, "grpquota"}, /* group quota enabled */
98 {Opt_prjquota, "prjquota"}, /* project quota enabled */
99 {Opt_uquota, "uquota"}, /* user quota (IRIX variant) */
100 {Opt_gquota, "gquota"}, /* group quota (IRIX variant) */
101 {Opt_pquota, "pquota"}, /* project quota (IRIX variant) */
102 {Opt_uqnoenforce,"uqnoenforce"},/* user quota limit enforcement */
103 {Opt_gqnoenforce,"gqnoenforce"},/* group quota limit enforcement */
104 {Opt_pqnoenforce,"pqnoenforce"},/* project quota limit enforcement */
105 {Opt_qnoenforce, "qnoenforce"}, /* same as uqnoenforce */
106 {Opt_discard, "discard"}, /* Discard unused blocks */
107 {Opt_nodiscard, "nodiscard"}, /* Do not discard unused blocks */
108 {Opt_dax, "dax"}, /* Enable direct access to bdev pages */
109 {Opt_err, NULL},
110};
111
112
113STATIC int
114suffix_kstrtoint(const substring_t *s, unsigned int base, int *res)
115{
116 int last, shift_left_factor = 0, _res;
117 char *value;
118 int ret = 0;
119
120 value = match_strdup(s);
121 if (!value)
122 return -ENOMEM;
123
124 last = strlen(value) - 1;
125 if (value[last] == 'K' || value[last] == 'k') {
126 shift_left_factor = 10;
127 value[last] = '\0';
128 }
129 if (value[last] == 'M' || value[last] == 'm') {
130 shift_left_factor = 20;
131 value[last] = '\0';
132 }
133 if (value[last] == 'G' || value[last] == 'g') {
134 shift_left_factor = 30;
135 value[last] = '\0';
136 }
137
138 if (kstrtoint(value, base, &_res))
139 ret = -EINVAL;
140 kfree(value);
141 *res = _res << shift_left_factor;
142 return ret;
143}
144
145/*
146 * This function fills in xfs_mount_t fields based on mount args.
147 * Note: the superblock has _not_ yet been read in.
148 *
149 * Note that this function leaks the various device name allocations on
150 * failure. The caller takes care of them.
151 *
152 * *sb is const because this is also used to test options on the remount
153 * path, and we don't want this to have any side effects at remount time.
154 * Today this function does not change *sb, but just to future-proof...
155 */
156STATIC int
157xfs_parseargs(
158 struct xfs_mount *mp,
159 char *options)
160{
161 const struct super_block *sb = mp->m_super;
162 char *p;
163 substring_t args[MAX_OPT_ARGS];
164 int dsunit = 0;
165 int dswidth = 0;
166 int iosize = 0;
167 uint8_t iosizelog = 0;
168
169 /*
170 * set up the mount name first so all the errors will refer to the
171 * correct device.
172 */
173 mp->m_fsname = kstrndup(sb->s_id, MAXNAMELEN, GFP_KERNEL);
174 if (!mp->m_fsname)
175 return -ENOMEM;
176 mp->m_fsname_len = strlen(mp->m_fsname) + 1;
177
178 /*
179 * Copy binary VFS mount flags we are interested in.
180 */
181 if (sb_rdonly(sb))
182 mp->m_flags |= XFS_MOUNT_RDONLY;
183 if (sb->s_flags & SB_DIRSYNC)
184 mp->m_flags |= XFS_MOUNT_DIRSYNC;
185 if (sb->s_flags & SB_SYNCHRONOUS)
186 mp->m_flags |= XFS_MOUNT_WSYNC;
187
188 /*
189 * Set some default flags that could be cleared by the mount option
190 * parsing.
191 */
192 mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
193
194 /*
195 * These can be overridden by the mount option parsing.
196 */
197 mp->m_logbufs = -1;
198 mp->m_logbsize = -1;
199
200 if (!options)
201 goto done;
202
203 while ((p = strsep(&options, ",")) != NULL) {
204 int token;
205
206 if (!*p)
207 continue;
208
209 token = match_token(p, tokens, args);
210 switch (token) {
211 case Opt_logbufs:
212 if (match_int(args, &mp->m_logbufs))
213 return -EINVAL;
214 break;
215 case Opt_logbsize:
216 if (suffix_kstrtoint(args, 10, &mp->m_logbsize))
217 return -EINVAL;
218 break;
219 case Opt_logdev:
220 kfree(mp->m_logname);
221 mp->m_logname = match_strdup(args);
222 if (!mp->m_logname)
223 return -ENOMEM;
224 break;
225 case Opt_rtdev:
226 kfree(mp->m_rtname);
227 mp->m_rtname = match_strdup(args);
228 if (!mp->m_rtname)
229 return -ENOMEM;
230 break;
231 case Opt_allocsize:
232 case Opt_biosize:
233 if (suffix_kstrtoint(args, 10, &iosize))
234 return -EINVAL;
235 iosizelog = ffs(iosize) - 1;
236 break;
237 case Opt_grpid:
238 case Opt_bsdgroups:
239 mp->m_flags |= XFS_MOUNT_GRPID;
240 break;
241 case Opt_nogrpid:
242 case Opt_sysvgroups:
243 mp->m_flags &= ~XFS_MOUNT_GRPID;
244 break;
245 case Opt_wsync:
246 mp->m_flags |= XFS_MOUNT_WSYNC;
247 break;
248 case Opt_norecovery:
249 mp->m_flags |= XFS_MOUNT_NORECOVERY;
250 break;
251 case Opt_noalign:
252 mp->m_flags |= XFS_MOUNT_NOALIGN;
253 break;
254 case Opt_swalloc:
255 mp->m_flags |= XFS_MOUNT_SWALLOC;
256 break;
257 case Opt_sunit:
258 if (match_int(args, &dsunit))
259 return -EINVAL;
260 break;
261 case Opt_swidth:
262 if (match_int(args, &dswidth))
263 return -EINVAL;
264 break;
265 case Opt_inode32:
266 mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
267 break;
268 case Opt_inode64:
269 mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
270 break;
271 case Opt_nouuid:
272 mp->m_flags |= XFS_MOUNT_NOUUID;
273 break;
274 case Opt_ikeep:
275 mp->m_flags |= XFS_MOUNT_IKEEP;
276 break;
277 case Opt_noikeep:
278 mp->m_flags &= ~XFS_MOUNT_IKEEP;
279 break;
280 case Opt_largeio:
281 mp->m_flags &= ~XFS_MOUNT_COMPAT_IOSIZE;
282 break;
283 case Opt_nolargeio:
284 mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
285 break;
286 case Opt_attr2:
287 mp->m_flags |= XFS_MOUNT_ATTR2;
288 break;
289 case Opt_noattr2:
290 mp->m_flags &= ~XFS_MOUNT_ATTR2;
291 mp->m_flags |= XFS_MOUNT_NOATTR2;
292 break;
293 case Opt_filestreams:
294 mp->m_flags |= XFS_MOUNT_FILESTREAMS;
295 break;
296 case Opt_noquota:
297 mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT;
298 mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD;
299 mp->m_qflags &= ~XFS_ALL_QUOTA_ACTIVE;
300 break;
301 case Opt_quota:
302 case Opt_uquota:
303 case Opt_usrquota:
304 mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE |
305 XFS_UQUOTA_ENFD);
306 break;
307 case Opt_qnoenforce:
308 case Opt_uqnoenforce:
309 mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE);
310 mp->m_qflags &= ~XFS_UQUOTA_ENFD;
311 break;
312 case Opt_pquota:
313 case Opt_prjquota:
314 mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE |
315 XFS_PQUOTA_ENFD);
316 break;
317 case Opt_pqnoenforce:
318 mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE);
319 mp->m_qflags &= ~XFS_PQUOTA_ENFD;
320 break;
321 case Opt_gquota:
322 case Opt_grpquota:
323 mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE |
324 XFS_GQUOTA_ENFD);
325 break;
326 case Opt_gqnoenforce:
327 mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
328 mp->m_qflags &= ~XFS_GQUOTA_ENFD;
329 break;
330 case Opt_discard:
331 mp->m_flags |= XFS_MOUNT_DISCARD;
332 break;
333 case Opt_nodiscard:
334 mp->m_flags &= ~XFS_MOUNT_DISCARD;
335 break;
336#ifdef CONFIG_FS_DAX
337 case Opt_dax:
338 mp->m_flags |= XFS_MOUNT_DAX;
339 break;
340#endif
341 default:
342 xfs_warn(mp, "unknown mount option [%s].", p);
343 return -EINVAL;
344 }
345 }
346
347 /*
348 * no recovery flag requires a read-only mount
349 */
350 if ((mp->m_flags & XFS_MOUNT_NORECOVERY) &&
351 !(mp->m_flags & XFS_MOUNT_RDONLY)) {
352 xfs_warn(mp, "no-recovery mounts must be read-only.");
353 return -EINVAL;
354 }
355
356 if ((mp->m_flags & XFS_MOUNT_NOALIGN) && (dsunit || dswidth)) {
357 xfs_warn(mp,
358 "sunit and swidth options incompatible with the noalign option");
359 return -EINVAL;
360 }
361
362#ifndef CONFIG_XFS_QUOTA
363 if (XFS_IS_QUOTA_RUNNING(mp)) {
364 xfs_warn(mp, "quota support not available in this kernel.");
365 return -EINVAL;
366 }
367#endif
368
369 if ((dsunit && !dswidth) || (!dsunit && dswidth)) {
370 xfs_warn(mp, "sunit and swidth must be specified together");
371 return -EINVAL;
372 }
373
374 if (dsunit && (dswidth % dsunit != 0)) {
375 xfs_warn(mp,
376 "stripe width (%d) must be a multiple of the stripe unit (%d)",
377 dswidth, dsunit);
378 return -EINVAL;
379 }
380
381done:
382 if (dsunit && !(mp->m_flags & XFS_MOUNT_NOALIGN)) {
383 /*
384 * At this point the superblock has not been read
385 * in, therefore we do not know the block size.
386 * Before the mount call ends we will convert
387 * these to FSBs.
388 */
389 mp->m_dalign = dsunit;
390 mp->m_swidth = dswidth;
391 }
392
393 if (mp->m_logbufs != -1 &&
394 mp->m_logbufs != 0 &&
395 (mp->m_logbufs < XLOG_MIN_ICLOGS ||
396 mp->m_logbufs > XLOG_MAX_ICLOGS)) {
397 xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
398 mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
399 return -EINVAL;
400 }
401 if (mp->m_logbsize != -1 &&
402 mp->m_logbsize != 0 &&
403 (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
404 mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
405 !is_power_of_2(mp->m_logbsize))) {
406 xfs_warn(mp,
407 "invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
408 mp->m_logbsize);
409 return -EINVAL;
410 }
411
412 if (iosizelog) {
413 if (iosizelog > XFS_MAX_IO_LOG ||
414 iosizelog < XFS_MIN_IO_LOG) {
415 xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
416 iosizelog, XFS_MIN_IO_LOG,
417 XFS_MAX_IO_LOG);
418 return -EINVAL;
419 }
420
421 mp->m_flags |= XFS_MOUNT_DFLT_IOSIZE;
422 mp->m_readio_log = iosizelog;
423 mp->m_writeio_log = iosizelog;
424 }
425
426 return 0;
427}
428
429struct proc_xfs_info {
430 uint64_t flag;
431 char *str;
432};
433
434STATIC void
435xfs_showargs(
436 struct xfs_mount *mp,
437 struct seq_file *m)
438{
439 static struct proc_xfs_info xfs_info_set[] = {
440 /* the few simple ones we can get from the mount struct */
441 { XFS_MOUNT_IKEEP, ",ikeep" },
442 { XFS_MOUNT_WSYNC, ",wsync" },
443 { XFS_MOUNT_NOALIGN, ",noalign" },
444 { XFS_MOUNT_SWALLOC, ",swalloc" },
445 { XFS_MOUNT_NOUUID, ",nouuid" },
446 { XFS_MOUNT_NORECOVERY, ",norecovery" },
447 { XFS_MOUNT_ATTR2, ",attr2" },
448 { XFS_MOUNT_FILESTREAMS, ",filestreams" },
449 { XFS_MOUNT_GRPID, ",grpid" },
450 { XFS_MOUNT_DISCARD, ",discard" },
451 { XFS_MOUNT_SMALL_INUMS, ",inode32" },
452 { XFS_MOUNT_DAX, ",dax" },
453 { 0, NULL }
454 };
455 static struct proc_xfs_info xfs_info_unset[] = {
456 /* the few simple ones we can get from the mount struct */
457 { XFS_MOUNT_COMPAT_IOSIZE, ",largeio" },
458 { XFS_MOUNT_SMALL_INUMS, ",inode64" },
459 { 0, NULL }
460 };
461 struct proc_xfs_info *xfs_infop;
462
463 for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
464 if (mp->m_flags & xfs_infop->flag)
465 seq_puts(m, xfs_infop->str);
466 }
467 for (xfs_infop = xfs_info_unset; xfs_infop->flag; xfs_infop++) {
468 if (!(mp->m_flags & xfs_infop->flag))
469 seq_puts(m, xfs_infop->str);
470 }
471
472 if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)
473 seq_printf(m, ",allocsize=%dk",
474 (int)(1 << mp->m_writeio_log) >> 10);
475
476 if (mp->m_logbufs > 0)
477 seq_printf(m, ",logbufs=%d", mp->m_logbufs);
478 if (mp->m_logbsize > 0)
479 seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10);
480
481 if (mp->m_logname)
482 seq_show_option(m, "logdev", mp->m_logname);
483 if (mp->m_rtname)
484 seq_show_option(m, "rtdev", mp->m_rtname);
485
486 if (mp->m_dalign > 0)
487 seq_printf(m, ",sunit=%d",
488 (int)XFS_FSB_TO_BB(mp, mp->m_dalign));
489 if (mp->m_swidth > 0)
490 seq_printf(m, ",swidth=%d",
491 (int)XFS_FSB_TO_BB(mp, mp->m_swidth));
492
493 if (mp->m_qflags & (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD))
494 seq_puts(m, ",usrquota");
495 else if (mp->m_qflags & XFS_UQUOTA_ACCT)
496 seq_puts(m, ",uqnoenforce");
497
498 if (mp->m_qflags & XFS_PQUOTA_ACCT) {
499 if (mp->m_qflags & XFS_PQUOTA_ENFD)
500 seq_puts(m, ",prjquota");
501 else
502 seq_puts(m, ",pqnoenforce");
503 }
504 if (mp->m_qflags & XFS_GQUOTA_ACCT) {
505 if (mp->m_qflags & XFS_GQUOTA_ENFD)
506 seq_puts(m, ",grpquota");
507 else
508 seq_puts(m, ",gqnoenforce");
509 }
510
511 if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
512 seq_puts(m, ",noquota");
513}
514
515static uint64_t
516xfs_max_file_offset(
517 unsigned int blockshift)
518{
519 unsigned int pagefactor = 1;
520 unsigned int bitshift = BITS_PER_LONG - 1;
521
522 /* Figure out maximum filesize, on Linux this can depend on
523 * the filesystem blocksize (on 32 bit platforms).
524 * __block_write_begin does this in an [unsigned] long long...
525 * page->index << (PAGE_SHIFT - bbits)
526 * So, for page sized blocks (4K on 32 bit platforms),
527 * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is
528 * (((u64)PAGE_SIZE << (BITS_PER_LONG-1))-1)
529 * but for smaller blocksizes it is less (bbits = log2 bsize).
530 */
531
532#if BITS_PER_LONG == 32
533 ASSERT(sizeof(sector_t) == 8);
534 pagefactor = PAGE_SIZE;
535 bitshift = BITS_PER_LONG;
536#endif
537
538 return (((uint64_t)pagefactor) << bitshift) - 1;
539}
540
541/*
542 * Set parameters for inode allocation heuristics, taking into account
543 * filesystem size and inode32/inode64 mount options; i.e. specifically
544 * whether or not XFS_MOUNT_SMALL_INUMS is set.
545 *
546 * Inode allocation patterns are altered only if inode32 is requested
547 * (XFS_MOUNT_SMALL_INUMS), and the filesystem is sufficiently large.
548 * If altered, XFS_MOUNT_32BITINODES is set as well.
549 *
550 * An agcount independent of that in the mount structure is provided
551 * because in the growfs case, mp->m_sb.sb_agcount is not yet updated
552 * to the potentially higher ag count.
553 *
554 * Returns the maximum AG index which may contain inodes.
555 */
556xfs_agnumber_t
557xfs_set_inode_alloc(
558 struct xfs_mount *mp,
559 xfs_agnumber_t agcount)
560{
561 xfs_agnumber_t index;
562 xfs_agnumber_t maxagi = 0;
563 xfs_sb_t *sbp = &mp->m_sb;
564 xfs_agnumber_t max_metadata;
565 xfs_agino_t agino;
566 xfs_ino_t ino;
567
568 /*
569 * Calculate how much should be reserved for inodes to meet
570 * the max inode percentage. Used only for inode32.
571 */
572 if (M_IGEO(mp)->maxicount) {
573 uint64_t icount;
574
575 icount = sbp->sb_dblocks * sbp->sb_imax_pct;
576 do_div(icount, 100);
577 icount += sbp->sb_agblocks - 1;
578 do_div(icount, sbp->sb_agblocks);
579 max_metadata = icount;
580 } else {
581 max_metadata = agcount;
582 }
583
584 /* Get the last possible inode in the filesystem */
585 agino = XFS_AGB_TO_AGINO(mp, sbp->sb_agblocks - 1);
586 ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
587
588 /*
589 * If user asked for no more than 32-bit inodes, and the fs is
590 * sufficiently large, set XFS_MOUNT_32BITINODES if we must alter
591 * the allocator to accommodate the request.
592 */
593 if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) && ino > XFS_MAXINUMBER_32)
594 mp->m_flags |= XFS_MOUNT_32BITINODES;
595 else
596 mp->m_flags &= ~XFS_MOUNT_32BITINODES;
597
598 for (index = 0; index < agcount; index++) {
599 struct xfs_perag *pag;
600
601 ino = XFS_AGINO_TO_INO(mp, index, agino);
602
603 pag = xfs_perag_get(mp, index);
604
605 if (mp->m_flags & XFS_MOUNT_32BITINODES) {
606 if (ino > XFS_MAXINUMBER_32) {
607 pag->pagi_inodeok = 0;
608 pag->pagf_metadata = 0;
609 } else {
610 pag->pagi_inodeok = 1;
611 maxagi++;
612 if (index < max_metadata)
613 pag->pagf_metadata = 1;
614 else
615 pag->pagf_metadata = 0;
616 }
617 } else {
618 pag->pagi_inodeok = 1;
619 pag->pagf_metadata = 0;
620 }
621
622 xfs_perag_put(pag);
623 }
624
625 return (mp->m_flags & XFS_MOUNT_32BITINODES) ? maxagi : agcount;
626}
627
628STATIC int
629xfs_blkdev_get(
630 xfs_mount_t *mp,
631 const char *name,
632 struct block_device **bdevp)
633{
634 int error = 0;
635
636 *bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
637 mp);
638 if (IS_ERR(*bdevp)) {
639 error = PTR_ERR(*bdevp);
640 xfs_warn(mp, "Invalid device [%s], error=%d", name, error);
641 }
642
643 return error;
644}
645
646STATIC void
647xfs_blkdev_put(
648 struct block_device *bdev)
649{
650 if (bdev)
651 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
652}
653
654void
655xfs_blkdev_issue_flush(
656 xfs_buftarg_t *buftarg)
657{
658 blkdev_issue_flush(buftarg->bt_bdev, GFP_NOFS, NULL);
659}
660
661STATIC void
662xfs_close_devices(
663 struct xfs_mount *mp)
664{
665 struct dax_device *dax_ddev = mp->m_ddev_targp->bt_daxdev;
666
667 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
668 struct block_device *logdev = mp->m_logdev_targp->bt_bdev;
669 struct dax_device *dax_logdev = mp->m_logdev_targp->bt_daxdev;
670
671 xfs_free_buftarg(mp->m_logdev_targp);
672 xfs_blkdev_put(logdev);
673 fs_put_dax(dax_logdev);
674 }
675 if (mp->m_rtdev_targp) {
676 struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev;
677 struct dax_device *dax_rtdev = mp->m_rtdev_targp->bt_daxdev;
678
679 xfs_free_buftarg(mp->m_rtdev_targp);
680 xfs_blkdev_put(rtdev);
681 fs_put_dax(dax_rtdev);
682 }
683 xfs_free_buftarg(mp->m_ddev_targp);
684 fs_put_dax(dax_ddev);
685}
686
687/*
688 * The file system configurations are:
689 * (1) device (partition) with data and internal log
690 * (2) logical volume with data and log subvolumes.
691 * (3) logical volume with data, log, and realtime subvolumes.
692 *
693 * We only have to handle opening the log and realtime volumes here if
694 * they are present. The data subvolume has already been opened by
695 * get_sb_bdev() and is stored in sb->s_bdev.
696 */
697STATIC int
698xfs_open_devices(
699 struct xfs_mount *mp)
700{
701 struct block_device *ddev = mp->m_super->s_bdev;
702 struct dax_device *dax_ddev = fs_dax_get_by_bdev(ddev);
703 struct dax_device *dax_logdev = NULL, *dax_rtdev = NULL;
704 struct block_device *logdev = NULL, *rtdev = NULL;
705 int error;
706
707 /*
708 * Open real time and log devices - order is important.
709 */
710 if (mp->m_logname) {
711 error = xfs_blkdev_get(mp, mp->m_logname, &logdev);
712 if (error)
713 goto out;
714 dax_logdev = fs_dax_get_by_bdev(logdev);
715 }
716
717 if (mp->m_rtname) {
718 error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev);
719 if (error)
720 goto out_close_logdev;
721
722 if (rtdev == ddev || rtdev == logdev) {
723 xfs_warn(mp,
724 "Cannot mount filesystem with identical rtdev and ddev/logdev.");
725 error = -EINVAL;
726 goto out_close_rtdev;
727 }
728 dax_rtdev = fs_dax_get_by_bdev(rtdev);
729 }
730
731 /*
732 * Setup xfs_mount buffer target pointers
733 */
734 error = -ENOMEM;
735 mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev, dax_ddev);
736 if (!mp->m_ddev_targp)
737 goto out_close_rtdev;
738
739 if (rtdev) {
740 mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev, dax_rtdev);
741 if (!mp->m_rtdev_targp)
742 goto out_free_ddev_targ;
743 }
744
745 if (logdev && logdev != ddev) {
746 mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev, dax_logdev);
747 if (!mp->m_logdev_targp)
748 goto out_free_rtdev_targ;
749 } else {
750 mp->m_logdev_targp = mp->m_ddev_targp;
751 }
752
753 return 0;
754
755 out_free_rtdev_targ:
756 if (mp->m_rtdev_targp)
757 xfs_free_buftarg(mp->m_rtdev_targp);
758 out_free_ddev_targ:
759 xfs_free_buftarg(mp->m_ddev_targp);
760 out_close_rtdev:
761 xfs_blkdev_put(rtdev);
762 fs_put_dax(dax_rtdev);
763 out_close_logdev:
764 if (logdev && logdev != ddev) {
765 xfs_blkdev_put(logdev);
766 fs_put_dax(dax_logdev);
767 }
768 out:
769 fs_put_dax(dax_ddev);
770 return error;
771}
772
773/*
774 * Setup xfs_mount buffer target pointers based on superblock
775 */
776STATIC int
777xfs_setup_devices(
778 struct xfs_mount *mp)
779{
780 int error;
781
782 error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize);
783 if (error)
784 return error;
785
786 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
787 unsigned int log_sector_size = BBSIZE;
788
789 if (xfs_sb_version_hassector(&mp->m_sb))
790 log_sector_size = mp->m_sb.sb_logsectsize;
791 error = xfs_setsize_buftarg(mp->m_logdev_targp,
792 log_sector_size);
793 if (error)
794 return error;
795 }
796 if (mp->m_rtdev_targp) {
797 error = xfs_setsize_buftarg(mp->m_rtdev_targp,
798 mp->m_sb.sb_sectsize);
799 if (error)
800 return error;
801 }
802
803 return 0;
804}
805
806STATIC int
807xfs_init_mount_workqueues(
808 struct xfs_mount *mp)
809{
810 mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s",
811 WQ_MEM_RECLAIM|WQ_FREEZABLE, 1, mp->m_fsname);
812 if (!mp->m_buf_workqueue)
813 goto out;
814
815 mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s",
816 WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname);
817 if (!mp->m_unwritten_workqueue)
818 goto out_destroy_buf;
819
820 mp->m_cil_workqueue = alloc_workqueue("xfs-cil/%s",
821 WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND,
822 0, mp->m_fsname);
823 if (!mp->m_cil_workqueue)
824 goto out_destroy_unwritten;
825
826 mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
827 WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname);
828 if (!mp->m_reclaim_workqueue)
829 goto out_destroy_cil;
830
831 mp->m_eofblocks_workqueue = alloc_workqueue("xfs-eofblocks/%s",
832 WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname);
833 if (!mp->m_eofblocks_workqueue)
834 goto out_destroy_reclaim;
835
836 mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s", WQ_FREEZABLE, 0,
837 mp->m_fsname);
838 if (!mp->m_sync_workqueue)
839 goto out_destroy_eofb;
840
841 return 0;
842
843out_destroy_eofb:
844 destroy_workqueue(mp->m_eofblocks_workqueue);
845out_destroy_reclaim:
846 destroy_workqueue(mp->m_reclaim_workqueue);
847out_destroy_cil:
848 destroy_workqueue(mp->m_cil_workqueue);
849out_destroy_unwritten:
850 destroy_workqueue(mp->m_unwritten_workqueue);
851out_destroy_buf:
852 destroy_workqueue(mp->m_buf_workqueue);
853out:
854 return -ENOMEM;
855}
856
857STATIC void
858xfs_destroy_mount_workqueues(
859 struct xfs_mount *mp)
860{
861 destroy_workqueue(mp->m_sync_workqueue);
862 destroy_workqueue(mp->m_eofblocks_workqueue);
863 destroy_workqueue(mp->m_reclaim_workqueue);
864 destroy_workqueue(mp->m_cil_workqueue);
865 destroy_workqueue(mp->m_unwritten_workqueue);
866 destroy_workqueue(mp->m_buf_workqueue);
867}
868
869/*
870 * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK
871 * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting
872 * for IO to complete so that we effectively throttle multiple callers to the
873 * rate at which IO is completing.
874 */
875void
876xfs_flush_inodes(
877 struct xfs_mount *mp)
878{
879 struct super_block *sb = mp->m_super;
880
881 if (down_read_trylock(&sb->s_umount)) {
882 sync_inodes_sb(sb);
883 up_read(&sb->s_umount);
884 }
885}
886
887/* Catch misguided souls that try to use this interface on XFS */
888STATIC struct inode *
889xfs_fs_alloc_inode(
890 struct super_block *sb)
891{
892 BUG();
893 return NULL;
894}
895
896#ifdef DEBUG
897static void
898xfs_check_delalloc(
899 struct xfs_inode *ip,
900 int whichfork)
901{
902 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
903 struct xfs_bmbt_irec got;
904 struct xfs_iext_cursor icur;
905
906 if (!ifp || !xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got))
907 return;
908 do {
909 if (isnullstartblock(got.br_startblock)) {
910 xfs_warn(ip->i_mount,
911 "ino %llx %s fork has delalloc extent at [0x%llx:0x%llx]",
912 ip->i_ino,
913 whichfork == XFS_DATA_FORK ? "data" : "cow",
914 got.br_startoff, got.br_blockcount);
915 }
916 } while (xfs_iext_next_extent(ifp, &icur, &got));
917}
918#else
919#define xfs_check_delalloc(ip, whichfork) do { } while (0)
920#endif
921
922/*
923 * Now that the generic code is guaranteed not to be accessing
924 * the linux inode, we can inactivate and reclaim the inode.
925 */
926STATIC void
927xfs_fs_destroy_inode(
928 struct inode *inode)
929{
930 struct xfs_inode *ip = XFS_I(inode);
931
932 trace_xfs_destroy_inode(ip);
933
934 ASSERT(!rwsem_is_locked(&inode->i_rwsem));
935 XFS_STATS_INC(ip->i_mount, vn_rele);
936 XFS_STATS_INC(ip->i_mount, vn_remove);
937
938 xfs_inactive(ip);
939
940 if (!XFS_FORCED_SHUTDOWN(ip->i_mount) && ip->i_delayed_blks) {
941 xfs_check_delalloc(ip, XFS_DATA_FORK);
942 xfs_check_delalloc(ip, XFS_COW_FORK);
943 ASSERT(0);
944 }
945
946 XFS_STATS_INC(ip->i_mount, vn_reclaim);
947
948 /*
949 * We should never get here with one of the reclaim flags already set.
950 */
951 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIMABLE));
952 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM));
953
954 /*
955 * We always use background reclaim here because even if the
956 * inode is clean, it still may be under IO and hence we have
957 * to take the flush lock. The background reclaim path handles
958 * this more efficiently than we can here, so simply let background
959 * reclaim tear down all inodes.
960 */
961 xfs_inode_set_reclaim_tag(ip);
962}
963
964static void
965xfs_fs_dirty_inode(
966 struct inode *inode,
967 int flag)
968{
969 struct xfs_inode *ip = XFS_I(inode);
970 struct xfs_mount *mp = ip->i_mount;
971 struct xfs_trans *tp;
972
973 if (!(inode->i_sb->s_flags & SB_LAZYTIME))
974 return;
975 if (flag != I_DIRTY_SYNC || !(inode->i_state & I_DIRTY_TIME))
976 return;
977
978 if (xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp))
979 return;
980 xfs_ilock(ip, XFS_ILOCK_EXCL);
981 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
982 xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP);
983 xfs_trans_commit(tp);
984}
985
986/*
987 * Slab object creation initialisation for the XFS inode.
988 * This covers only the idempotent fields in the XFS inode;
989 * all other fields need to be initialised on allocation
990 * from the slab. This avoids the need to repeatedly initialise
991 * fields in the xfs inode that left in the initialise state
992 * when freeing the inode.
993 */
994STATIC void
995xfs_fs_inode_init_once(
996 void *inode)
997{
998 struct xfs_inode *ip = inode;
999
1000 memset(ip, 0, sizeof(struct xfs_inode));
1001
1002 /* vfs inode */
1003 inode_init_once(VFS_I(ip));
1004
1005 /* xfs inode */
1006 atomic_set(&ip->i_pincount, 0);
1007 spin_lock_init(&ip->i_flags_lock);
1008
1009 mrlock_init(&ip->i_mmaplock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
1010 "xfsino", ip->i_ino);
1011 mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
1012 "xfsino", ip->i_ino);
1013}
1014
1015/*
1016 * We do an unlocked check for XFS_IDONTCACHE here because we are already
1017 * serialised against cache hits here via the inode->i_lock and igrab() in
1018 * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be
1019 * racing with us, and it avoids needing to grab a spinlock here for every inode
1020 * we drop the final reference on.
1021 */
1022STATIC int
1023xfs_fs_drop_inode(
1024 struct inode *inode)
1025{
1026 struct xfs_inode *ip = XFS_I(inode);
1027
1028 /*
1029 * If this unlinked inode is in the middle of recovery, don't
1030 * drop the inode just yet; log recovery will take care of
1031 * that. See the comment for this inode flag.
1032 */
1033 if (ip->i_flags & XFS_IRECOVERY) {
1034 ASSERT(ip->i_mount->m_log->l_flags & XLOG_RECOVERY_NEEDED);
1035 return 0;
1036 }
1037
1038 return generic_drop_inode(inode) || (ip->i_flags & XFS_IDONTCACHE);
1039}
1040
1041STATIC void
1042xfs_free_fsname(
1043 struct xfs_mount *mp)
1044{
1045 kfree(mp->m_fsname);
1046 kfree(mp->m_rtname);
1047 kfree(mp->m_logname);
1048}
1049
1050STATIC int
1051xfs_fs_sync_fs(
1052 struct super_block *sb,
1053 int wait)
1054{
1055 struct xfs_mount *mp = XFS_M(sb);
1056
1057 /*
1058 * Doing anything during the async pass would be counterproductive.
1059 */
1060 if (!wait)
1061 return 0;
1062
1063 xfs_log_force(mp, XFS_LOG_SYNC);
1064 if (laptop_mode) {
1065 /*
1066 * The disk must be active because we're syncing.
1067 * We schedule log work now (now that the disk is
1068 * active) instead of later (when it might not be).
1069 */
1070 flush_delayed_work(&mp->m_log->l_work);
1071 }
1072
1073 return 0;
1074}
1075
1076STATIC int
1077xfs_fs_statfs(
1078 struct dentry *dentry,
1079 struct kstatfs *statp)
1080{
1081 struct xfs_mount *mp = XFS_M(dentry->d_sb);
1082 xfs_sb_t *sbp = &mp->m_sb;
1083 struct xfs_inode *ip = XFS_I(d_inode(dentry));
1084 uint64_t fakeinos, id;
1085 uint64_t icount;
1086 uint64_t ifree;
1087 uint64_t fdblocks;
1088 xfs_extlen_t lsize;
1089 int64_t ffree;
1090
1091 statp->f_type = XFS_SUPER_MAGIC;
1092 statp->f_namelen = MAXNAMELEN - 1;
1093
1094 id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
1095 statp->f_fsid.val[0] = (u32)id;
1096 statp->f_fsid.val[1] = (u32)(id >> 32);
1097
1098 icount = percpu_counter_sum(&mp->m_icount);
1099 ifree = percpu_counter_sum(&mp->m_ifree);
1100 fdblocks = percpu_counter_sum(&mp->m_fdblocks);
1101
1102 spin_lock(&mp->m_sb_lock);
1103 statp->f_bsize = sbp->sb_blocksize;
1104 lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
1105 statp->f_blocks = sbp->sb_dblocks - lsize;
1106 spin_unlock(&mp->m_sb_lock);
1107
1108 statp->f_bfree = fdblocks - mp->m_alloc_set_aside;
1109 statp->f_bavail = statp->f_bfree;
1110
1111 fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree);
1112 statp->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER);
1113 if (M_IGEO(mp)->maxicount)
1114 statp->f_files = min_t(typeof(statp->f_files),
1115 statp->f_files,
1116 M_IGEO(mp)->maxicount);
1117
1118 /* If sb_icount overshot maxicount, report actual allocation */
1119 statp->f_files = max_t(typeof(statp->f_files),
1120 statp->f_files,
1121 sbp->sb_icount);
1122
1123 /* make sure statp->f_ffree does not underflow */
1124 ffree = statp->f_files - (icount - ifree);
1125 statp->f_ffree = max_t(int64_t, ffree, 0);
1126
1127
1128 if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
1129 ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) ==
1130 (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))
1131 xfs_qm_statvfs(ip, statp);
1132
1133 if (XFS_IS_REALTIME_MOUNT(mp) &&
1134 (ip->i_d.di_flags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) {
1135 statp->f_blocks = sbp->sb_rblocks;
1136 statp->f_bavail = statp->f_bfree =
1137 sbp->sb_frextents * sbp->sb_rextsize;
1138 }
1139
1140 return 0;
1141}
1142
1143STATIC void
1144xfs_save_resvblks(struct xfs_mount *mp)
1145{
1146 uint64_t resblks = 0;
1147
1148 mp->m_resblks_save = mp->m_resblks;
1149 xfs_reserve_blocks(mp, &resblks, NULL);
1150}
1151
1152STATIC void
1153xfs_restore_resvblks(struct xfs_mount *mp)
1154{
1155 uint64_t resblks;
1156
1157 if (mp->m_resblks_save) {
1158 resblks = mp->m_resblks_save;
1159 mp->m_resblks_save = 0;
1160 } else
1161 resblks = xfs_default_resblks(mp);
1162
1163 xfs_reserve_blocks(mp, &resblks, NULL);
1164}
1165
1166/*
1167 * Trigger writeback of all the dirty metadata in the file system.
1168 *
1169 * This ensures that the metadata is written to their location on disk rather
1170 * than just existing in transactions in the log. This means after a quiesce
1171 * there is no log replay required to write the inodes to disk - this is the
1172 * primary difference between a sync and a quiesce.
1173 *
1174 * Note: xfs_log_quiesce() stops background log work - the callers must ensure
1175 * it is started again when appropriate.
1176 */
1177void
1178xfs_quiesce_attr(
1179 struct xfs_mount *mp)
1180{
1181 int error = 0;
1182
1183 /* wait for all modifications to complete */
1184 while (atomic_read(&mp->m_active_trans) > 0)
1185 delay(100);
1186
1187 /* force the log to unpin objects from the now complete transactions */
1188 xfs_log_force(mp, XFS_LOG_SYNC);
1189
1190 /* reclaim inodes to do any IO before the freeze completes */
1191 xfs_reclaim_inodes(mp, 0);
1192 xfs_reclaim_inodes(mp, SYNC_WAIT);
1193
1194 /* Push the superblock and write an unmount record */
1195 error = xfs_log_sbcount(mp);
1196 if (error)
1197 xfs_warn(mp, "xfs_attr_quiesce: failed to log sb changes. "
1198 "Frozen image may not be consistent.");
1199 /*
1200 * Just warn here till VFS can correctly support
1201 * read-only remount without racing.
1202 */
1203 WARN_ON(atomic_read(&mp->m_active_trans) != 0);
1204
1205 xfs_log_quiesce(mp);
1206}
1207
1208STATIC int
1209xfs_test_remount_options(
1210 struct super_block *sb,
1211 char *options)
1212{
1213 int error = 0;
1214 struct xfs_mount *tmp_mp;
1215
1216 tmp_mp = kmem_zalloc(sizeof(*tmp_mp), KM_MAYFAIL);
1217 if (!tmp_mp)
1218 return -ENOMEM;
1219
1220 tmp_mp->m_super = sb;
1221 error = xfs_parseargs(tmp_mp, options);
1222 xfs_free_fsname(tmp_mp);
1223 kmem_free(tmp_mp);
1224
1225 return error;
1226}
1227
1228STATIC int
1229xfs_fs_remount(
1230 struct super_block *sb,
1231 int *flags,
1232 char *options)
1233{
1234 struct xfs_mount *mp = XFS_M(sb);
1235 xfs_sb_t *sbp = &mp->m_sb;
1236 substring_t args[MAX_OPT_ARGS];
1237 char *p;
1238 int error;
1239
1240 /* First, check for complete junk; i.e. invalid options */
1241 error = xfs_test_remount_options(sb, options);
1242 if (error)
1243 return error;
1244
1245 sync_filesystem(sb);
1246 while ((p = strsep(&options, ",")) != NULL) {
1247 int token;
1248
1249 if (!*p)
1250 continue;
1251
1252 token = match_token(p, tokens, args);
1253 switch (token) {
1254 case Opt_inode64:
1255 mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
1256 mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount);
1257 break;
1258 case Opt_inode32:
1259 mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
1260 mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount);
1261 break;
1262 default:
1263 /*
1264 * Logically we would return an error here to prevent
1265 * users from believing they might have changed
1266 * mount options using remount which can't be changed.
1267 *
1268 * But unfortunately mount(8) adds all options from
1269 * mtab and fstab to the mount arguments in some cases
1270 * so we can't blindly reject options, but have to
1271 * check for each specified option if it actually
1272 * differs from the currently set option and only
1273 * reject it if that's the case.
1274 *
1275 * Until that is implemented we return success for
1276 * every remount request, and silently ignore all
1277 * options that we can't actually change.
1278 */
1279#if 0
1280 xfs_info(mp,
1281 "mount option \"%s\" not supported for remount", p);
1282 return -EINVAL;
1283#else
1284 break;
1285#endif
1286 }
1287 }
1288
1289 /* ro -> rw */
1290 if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & SB_RDONLY)) {
1291 if (mp->m_flags & XFS_MOUNT_NORECOVERY) {
1292 xfs_warn(mp,
1293 "ro->rw transition prohibited on norecovery mount");
1294 return -EINVAL;
1295 }
1296
1297 if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 &&
1298 xfs_sb_has_ro_compat_feature(sbp,
1299 XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
1300 xfs_warn(mp,
1301"ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem",
1302 (sbp->sb_features_ro_compat &
1303 XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
1304 return -EINVAL;
1305 }
1306
1307 mp->m_flags &= ~XFS_MOUNT_RDONLY;
1308
1309 /*
1310 * If this is the first remount to writeable state we
1311 * might have some superblock changes to update.
1312 */
1313 if (mp->m_update_sb) {
1314 error = xfs_sync_sb(mp, false);
1315 if (error) {
1316 xfs_warn(mp, "failed to write sb changes");
1317 return error;
1318 }
1319 mp->m_update_sb = false;
1320 }
1321
1322 /*
1323 * Fill out the reserve pool if it is empty. Use the stashed
1324 * value if it is non-zero, otherwise go with the default.
1325 */
1326 xfs_restore_resvblks(mp);
1327 xfs_log_work_queue(mp);
1328
1329 /* Recover any CoW blocks that never got remapped. */
1330 error = xfs_reflink_recover_cow(mp);
1331 if (error) {
1332 xfs_err(mp,
1333 "Error %d recovering leftover CoW allocations.", error);
1334 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1335 return error;
1336 }
1337 xfs_start_block_reaping(mp);
1338
1339 /* Create the per-AG metadata reservation pool .*/
1340 error = xfs_fs_reserve_ag_blocks(mp);
1341 if (error && error != -ENOSPC)
1342 return error;
1343 }
1344
1345 /* rw -> ro */
1346 if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & SB_RDONLY)) {
1347 /*
1348 * Cancel background eofb scanning so it cannot race with the
1349 * final log force+buftarg wait and deadlock the remount.
1350 */
1351 xfs_stop_block_reaping(mp);
1352
1353 /* Get rid of any leftover CoW reservations... */
1354 error = xfs_icache_free_cowblocks(mp, NULL);
1355 if (error) {
1356 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1357 return error;
1358 }
1359
1360 /* Free the per-AG metadata reservation pool. */
1361 error = xfs_fs_unreserve_ag_blocks(mp);
1362 if (error) {
1363 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1364 return error;
1365 }
1366
1367 /*
1368 * Before we sync the metadata, we need to free up the reserve
1369 * block pool so that the used block count in the superblock on
1370 * disk is correct at the end of the remount. Stash the current
1371 * reserve pool size so that if we get remounted rw, we can
1372 * return it to the same size.
1373 */
1374 xfs_save_resvblks(mp);
1375
1376 xfs_quiesce_attr(mp);
1377 mp->m_flags |= XFS_MOUNT_RDONLY;
1378 }
1379
1380 return 0;
1381}
1382
1383/*
1384 * Second stage of a freeze. The data is already frozen so we only
1385 * need to take care of the metadata. Once that's done sync the superblock
1386 * to the log to dirty it in case of a crash while frozen. This ensures that we
1387 * will recover the unlinked inode lists on the next mount.
1388 */
1389STATIC int
1390xfs_fs_freeze(
1391 struct super_block *sb)
1392{
1393 struct xfs_mount *mp = XFS_M(sb);
1394
1395 xfs_stop_block_reaping(mp);
1396 xfs_save_resvblks(mp);
1397 xfs_quiesce_attr(mp);
1398 return xfs_sync_sb(mp, true);
1399}
1400
1401STATIC int
1402xfs_fs_unfreeze(
1403 struct super_block *sb)
1404{
1405 struct xfs_mount *mp = XFS_M(sb);
1406
1407 xfs_restore_resvblks(mp);
1408 xfs_log_work_queue(mp);
1409 xfs_start_block_reaping(mp);
1410 return 0;
1411}
1412
1413STATIC int
1414xfs_fs_show_options(
1415 struct seq_file *m,
1416 struct dentry *root)
1417{
1418 xfs_showargs(XFS_M(root->d_sb), m);
1419 return 0;
1420}
1421
1422/*
1423 * This function fills in xfs_mount_t fields based on mount args.
1424 * Note: the superblock _has_ now been read in.
1425 */
1426STATIC int
1427xfs_finish_flags(
1428 struct xfs_mount *mp)
1429{
1430 int ronly = (mp->m_flags & XFS_MOUNT_RDONLY);
1431
1432 /* Fail a mount where the logbuf is smaller than the log stripe */
1433 if (xfs_sb_version_haslogv2(&mp->m_sb)) {
1434 if (mp->m_logbsize <= 0 &&
1435 mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
1436 mp->m_logbsize = mp->m_sb.sb_logsunit;
1437 } else if (mp->m_logbsize > 0 &&
1438 mp->m_logbsize < mp->m_sb.sb_logsunit) {
1439 xfs_warn(mp,
1440 "logbuf size must be greater than or equal to log stripe size");
1441 return -EINVAL;
1442 }
1443 } else {
1444 /* Fail a mount if the logbuf is larger than 32K */
1445 if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) {
1446 xfs_warn(mp,
1447 "logbuf size for version 1 logs must be 16K or 32K");
1448 return -EINVAL;
1449 }
1450 }
1451
1452 /*
1453 * V5 filesystems always use attr2 format for attributes.
1454 */
1455 if (xfs_sb_version_hascrc(&mp->m_sb) &&
1456 (mp->m_flags & XFS_MOUNT_NOATTR2)) {
1457 xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. "
1458 "attr2 is always enabled for V5 filesystems.");
1459 return -EINVAL;
1460 }
1461
1462 /*
1463 * mkfs'ed attr2 will turn on attr2 mount unless explicitly
1464 * told by noattr2 to turn it off
1465 */
1466 if (xfs_sb_version_hasattr2(&mp->m_sb) &&
1467 !(mp->m_flags & XFS_MOUNT_NOATTR2))
1468 mp->m_flags |= XFS_MOUNT_ATTR2;
1469
1470 /*
1471 * prohibit r/w mounts of read-only filesystems
1472 */
1473 if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) {
1474 xfs_warn(mp,
1475 "cannot mount a read-only filesystem as read-write");
1476 return -EROFS;
1477 }
1478
1479 if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) &&
1480 (mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE)) &&
1481 !xfs_sb_version_has_pquotino(&mp->m_sb)) {
1482 xfs_warn(mp,
1483 "Super block does not support project and group quota together");
1484 return -EINVAL;
1485 }
1486
1487 return 0;
1488}
1489
1490static int
1491xfs_init_percpu_counters(
1492 struct xfs_mount *mp)
1493{
1494 int error;
1495
1496 error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL);
1497 if (error)
1498 return -ENOMEM;
1499
1500 error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL);
1501 if (error)
1502 goto free_icount;
1503
1504 error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL);
1505 if (error)
1506 goto free_ifree;
1507
1508 error = percpu_counter_init(&mp->m_delalloc_blks, 0, GFP_KERNEL);
1509 if (error)
1510 goto free_fdblocks;
1511
1512 return 0;
1513
1514free_fdblocks:
1515 percpu_counter_destroy(&mp->m_fdblocks);
1516free_ifree:
1517 percpu_counter_destroy(&mp->m_ifree);
1518free_icount:
1519 percpu_counter_destroy(&mp->m_icount);
1520 return -ENOMEM;
1521}
1522
1523void
1524xfs_reinit_percpu_counters(
1525 struct xfs_mount *mp)
1526{
1527 percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount);
1528 percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree);
1529 percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks);
1530}
1531
1532static void
1533xfs_destroy_percpu_counters(
1534 struct xfs_mount *mp)
1535{
1536 percpu_counter_destroy(&mp->m_icount);
1537 percpu_counter_destroy(&mp->m_ifree);
1538 percpu_counter_destroy(&mp->m_fdblocks);
1539 ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
1540 percpu_counter_sum(&mp->m_delalloc_blks) == 0);
1541 percpu_counter_destroy(&mp->m_delalloc_blks);
1542}
1543
1544static struct xfs_mount *
1545xfs_mount_alloc(
1546 struct super_block *sb)
1547{
1548 struct xfs_mount *mp;
1549
1550 mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL);
1551 if (!mp)
1552 return NULL;
1553
1554 mp->m_super = sb;
1555 spin_lock_init(&mp->m_sb_lock);
1556 spin_lock_init(&mp->m_agirotor_lock);
1557 INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
1558 spin_lock_init(&mp->m_perag_lock);
1559 mutex_init(&mp->m_growlock);
1560 atomic_set(&mp->m_active_trans, 0);
1561 INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
1562 INIT_DELAYED_WORK(&mp->m_eofblocks_work, xfs_eofblocks_worker);
1563 INIT_DELAYED_WORK(&mp->m_cowblocks_work, xfs_cowblocks_worker);
1564 mp->m_kobj.kobject.kset = xfs_kset;
1565 /*
1566 * We don't create the finobt per-ag space reservation until after log
1567 * recovery, so we must set this to true so that an ifree transaction
1568 * started during log recovery will not depend on space reservations
1569 * for finobt expansion.
1570 */
1571 mp->m_finobt_nores = true;
1572 return mp;
1573}
1574
1575
1576STATIC int
1577xfs_fs_fill_super(
1578 struct super_block *sb,
1579 void *data,
1580 int silent)
1581{
1582 struct inode *root;
1583 struct xfs_mount *mp = NULL;
1584 int flags = 0, error = -ENOMEM;
1585
1586 /*
1587 * allocate mp and do all low-level struct initializations before we
1588 * attach it to the super
1589 */
1590 mp = xfs_mount_alloc(sb);
1591 if (!mp)
1592 goto out;
1593 sb->s_fs_info = mp;
1594
1595 error = xfs_parseargs(mp, (char *)data);
1596 if (error)
1597 goto out_free_fsname;
1598
1599 sb_min_blocksize(sb, BBSIZE);
1600 sb->s_xattr = xfs_xattr_handlers;
1601 sb->s_export_op = &xfs_export_operations;
1602#ifdef CONFIG_XFS_QUOTA
1603 sb->s_qcop = &xfs_quotactl_operations;
1604 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
1605#endif
1606 sb->s_op = &xfs_super_operations;
1607
1608 /*
1609 * Delay mount work if the debug hook is set. This is debug
1610 * instrumention to coordinate simulation of xfs mount failures with
1611 * VFS superblock operations
1612 */
1613 if (xfs_globals.mount_delay) {
1614 xfs_notice(mp, "Delaying mount for %d seconds.",
1615 xfs_globals.mount_delay);
1616 msleep(xfs_globals.mount_delay * 1000);
1617 }
1618
1619 if (silent)
1620 flags |= XFS_MFSI_QUIET;
1621
1622 error = xfs_open_devices(mp);
1623 if (error)
1624 goto out_free_fsname;
1625
1626 error = xfs_init_mount_workqueues(mp);
1627 if (error)
1628 goto out_close_devices;
1629
1630 error = xfs_init_percpu_counters(mp);
1631 if (error)
1632 goto out_destroy_workqueues;
1633
1634 /* Allocate stats memory before we do operations that might use it */
1635 mp->m_stats.xs_stats = alloc_percpu(struct xfsstats);
1636 if (!mp->m_stats.xs_stats) {
1637 error = -ENOMEM;
1638 goto out_destroy_counters;
1639 }
1640
1641 error = xfs_readsb(mp, flags);
1642 if (error)
1643 goto out_free_stats;
1644
1645 error = xfs_finish_flags(mp);
1646 if (error)
1647 goto out_free_sb;
1648
1649 error = xfs_setup_devices(mp);
1650 if (error)
1651 goto out_free_sb;
1652
1653 error = xfs_filestream_mount(mp);
1654 if (error)
1655 goto out_free_sb;
1656
1657 /*
1658 * we must configure the block size in the superblock before we run the
1659 * full mount process as the mount process can lookup and cache inodes.
1660 */
1661 sb->s_magic = XFS_SUPER_MAGIC;
1662 sb->s_blocksize = mp->m_sb.sb_blocksize;
1663 sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
1664 sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits);
1665 sb->s_max_links = XFS_MAXLINK;
1666 sb->s_time_gran = 1;
1667 sb->s_time_min = S32_MIN;
1668 sb->s_time_max = S32_MAX;
1669 sb->s_iflags |= SB_I_CGROUPWB;
1670
1671 set_posix_acl_flag(sb);
1672
1673 /* version 5 superblocks support inode version counters. */
1674 if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5)
1675 sb->s_flags |= SB_I_VERSION;
1676
1677 if (mp->m_flags & XFS_MOUNT_DAX) {
1678 bool rtdev_is_dax = false, datadev_is_dax;
1679
1680 xfs_warn(mp,
1681 "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
1682
1683 datadev_is_dax = bdev_dax_supported(mp->m_ddev_targp->bt_bdev,
1684 sb->s_blocksize);
1685 if (mp->m_rtdev_targp)
1686 rtdev_is_dax = bdev_dax_supported(
1687 mp->m_rtdev_targp->bt_bdev, sb->s_blocksize);
1688 if (!rtdev_is_dax && !datadev_is_dax) {
1689 xfs_alert(mp,
1690 "DAX unsupported by block device. Turning off DAX.");
1691 mp->m_flags &= ~XFS_MOUNT_DAX;
1692 }
1693 if (xfs_sb_version_hasreflink(&mp->m_sb)) {
1694 xfs_alert(mp,
1695 "DAX and reflink cannot be used together!");
1696 error = -EINVAL;
1697 goto out_filestream_unmount;
1698 }
1699 }
1700
1701 if (mp->m_flags & XFS_MOUNT_DISCARD) {
1702 struct request_queue *q = bdev_get_queue(sb->s_bdev);
1703
1704 if (!blk_queue_discard(q)) {
1705 xfs_warn(mp, "mounting with \"discard\" option, but "
1706 "the device does not support discard");
1707 mp->m_flags &= ~XFS_MOUNT_DISCARD;
1708 }
1709 }
1710
1711 if (xfs_sb_version_hasreflink(&mp->m_sb)) {
1712 if (mp->m_sb.sb_rblocks) {
1713 xfs_alert(mp,
1714 "reflink not compatible with realtime device!");
1715 error = -EINVAL;
1716 goto out_filestream_unmount;
1717 }
1718
1719 if (xfs_globals.always_cow) {
1720 xfs_info(mp, "using DEBUG-only always_cow mode.");
1721 mp->m_always_cow = true;
1722 }
1723 }
1724
1725 if (xfs_sb_version_hasrmapbt(&mp->m_sb) && mp->m_sb.sb_rblocks) {
1726 xfs_alert(mp,
1727 "reverse mapping btree not compatible with realtime device!");
1728 error = -EINVAL;
1729 goto out_filestream_unmount;
1730 }
1731
1732 error = xfs_mountfs(mp);
1733 if (error)
1734 goto out_filestream_unmount;
1735
1736 root = igrab(VFS_I(mp->m_rootip));
1737 if (!root) {
1738 error = -ENOENT;
1739 goto out_unmount;
1740 }
1741 sb->s_root = d_make_root(root);
1742 if (!sb->s_root) {
1743 error = -ENOMEM;
1744 goto out_unmount;
1745 }
1746
1747 return 0;
1748
1749 out_filestream_unmount:
1750 xfs_filestream_unmount(mp);
1751 out_free_sb:
1752 xfs_freesb(mp);
1753 out_free_stats:
1754 free_percpu(mp->m_stats.xs_stats);
1755 out_destroy_counters:
1756 xfs_destroy_percpu_counters(mp);
1757 out_destroy_workqueues:
1758 xfs_destroy_mount_workqueues(mp);
1759 out_close_devices:
1760 xfs_close_devices(mp);
1761 out_free_fsname:
1762 sb->s_fs_info = NULL;
1763 xfs_free_fsname(mp);
1764 kfree(mp);
1765 out:
1766 return error;
1767
1768 out_unmount:
1769 xfs_filestream_unmount(mp);
1770 xfs_unmountfs(mp);
1771 goto out_free_sb;
1772}
1773
1774STATIC void
1775xfs_fs_put_super(
1776 struct super_block *sb)
1777{
1778 struct xfs_mount *mp = XFS_M(sb);
1779
1780 /* if ->fill_super failed, we have no mount to tear down */
1781 if (!sb->s_fs_info)
1782 return;
1783
1784 xfs_notice(mp, "Unmounting Filesystem");
1785 xfs_filestream_unmount(mp);
1786 xfs_unmountfs(mp);
1787
1788 xfs_freesb(mp);
1789 free_percpu(mp->m_stats.xs_stats);
1790 xfs_destroy_percpu_counters(mp);
1791 xfs_destroy_mount_workqueues(mp);
1792 xfs_close_devices(mp);
1793
1794 sb->s_fs_info = NULL;
1795 xfs_free_fsname(mp);
1796 kfree(mp);
1797}
1798
1799STATIC struct dentry *
1800xfs_fs_mount(
1801 struct file_system_type *fs_type,
1802 int flags,
1803 const char *dev_name,
1804 void *data)
1805{
1806 return mount_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super);
1807}
1808
1809static long
1810xfs_fs_nr_cached_objects(
1811 struct super_block *sb,
1812 struct shrink_control *sc)
1813{
1814 /* Paranoia: catch incorrect calls during mount setup or teardown */
1815 if (WARN_ON_ONCE(!sb->s_fs_info))
1816 return 0;
1817 return xfs_reclaim_inodes_count(XFS_M(sb));
1818}
1819
1820static long
1821xfs_fs_free_cached_objects(
1822 struct super_block *sb,
1823 struct shrink_control *sc)
1824{
1825 return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan);
1826}
1827
1828static const struct super_operations xfs_super_operations = {
1829 .alloc_inode = xfs_fs_alloc_inode,
1830 .destroy_inode = xfs_fs_destroy_inode,
1831 .dirty_inode = xfs_fs_dirty_inode,
1832 .drop_inode = xfs_fs_drop_inode,
1833 .put_super = xfs_fs_put_super,
1834 .sync_fs = xfs_fs_sync_fs,
1835 .freeze_fs = xfs_fs_freeze,
1836 .unfreeze_fs = xfs_fs_unfreeze,
1837 .statfs = xfs_fs_statfs,
1838 .remount_fs = xfs_fs_remount,
1839 .show_options = xfs_fs_show_options,
1840 .nr_cached_objects = xfs_fs_nr_cached_objects,
1841 .free_cached_objects = xfs_fs_free_cached_objects,
1842};
1843
1844static struct file_system_type xfs_fs_type = {
1845 .owner = THIS_MODULE,
1846 .name = "xfs",
1847 .mount = xfs_fs_mount,
1848 .kill_sb = kill_block_super,
1849 .fs_flags = FS_REQUIRES_DEV,
1850};
1851MODULE_ALIAS_FS("xfs");
1852
1853STATIC int __init
1854xfs_init_zones(void)
1855{
1856 if (bioset_init(&xfs_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
1857 offsetof(struct xfs_ioend, io_inline_bio),
1858 BIOSET_NEED_BVECS))
1859 goto out;
1860
1861 xfs_log_ticket_zone = kmem_zone_init(sizeof(xlog_ticket_t),
1862 "xfs_log_ticket");
1863 if (!xfs_log_ticket_zone)
1864 goto out_free_ioend_bioset;
1865
1866 xfs_bmap_free_item_zone = kmem_zone_init(
1867 sizeof(struct xfs_extent_free_item),
1868 "xfs_bmap_free_item");
1869 if (!xfs_bmap_free_item_zone)
1870 goto out_destroy_log_ticket_zone;
1871
1872 xfs_btree_cur_zone = kmem_zone_init(sizeof(xfs_btree_cur_t),
1873 "xfs_btree_cur");
1874 if (!xfs_btree_cur_zone)
1875 goto out_destroy_bmap_free_item_zone;
1876
1877 xfs_da_state_zone = kmem_zone_init(sizeof(xfs_da_state_t),
1878 "xfs_da_state");
1879 if (!xfs_da_state_zone)
1880 goto out_destroy_btree_cur_zone;
1881
1882 xfs_ifork_zone = kmem_zone_init(sizeof(struct xfs_ifork), "xfs_ifork");
1883 if (!xfs_ifork_zone)
1884 goto out_destroy_da_state_zone;
1885
1886 xfs_trans_zone = kmem_zone_init(sizeof(xfs_trans_t), "xfs_trans");
1887 if (!xfs_trans_zone)
1888 goto out_destroy_ifork_zone;
1889
1890
1891 /*
1892 * The size of the zone allocated buf log item is the maximum
1893 * size possible under XFS. This wastes a little bit of memory,
1894 * but it is much faster.
1895 */
1896 xfs_buf_item_zone = kmem_zone_init(sizeof(struct xfs_buf_log_item),
1897 "xfs_buf_item");
1898 if (!xfs_buf_item_zone)
1899 goto out_destroy_trans_zone;
1900
1901 xfs_efd_zone = kmem_zone_init((sizeof(xfs_efd_log_item_t) +
1902 ((XFS_EFD_MAX_FAST_EXTENTS - 1) *
1903 sizeof(xfs_extent_t))), "xfs_efd_item");
1904 if (!xfs_efd_zone)
1905 goto out_destroy_buf_item_zone;
1906
1907 xfs_efi_zone = kmem_zone_init((sizeof(xfs_efi_log_item_t) +
1908 ((XFS_EFI_MAX_FAST_EXTENTS - 1) *
1909 sizeof(xfs_extent_t))), "xfs_efi_item");
1910 if (!xfs_efi_zone)
1911 goto out_destroy_efd_zone;
1912
1913 xfs_inode_zone =
1914 kmem_zone_init_flags(sizeof(xfs_inode_t), "xfs_inode",
1915 KM_ZONE_HWALIGN | KM_ZONE_RECLAIM | KM_ZONE_SPREAD |
1916 KM_ZONE_ACCOUNT, xfs_fs_inode_init_once);
1917 if (!xfs_inode_zone)
1918 goto out_destroy_efi_zone;
1919
1920 xfs_ili_zone =
1921 kmem_zone_init_flags(sizeof(xfs_inode_log_item_t), "xfs_ili",
1922 KM_ZONE_SPREAD, NULL);
1923 if (!xfs_ili_zone)
1924 goto out_destroy_inode_zone;
1925 xfs_icreate_zone = kmem_zone_init(sizeof(struct xfs_icreate_item),
1926 "xfs_icr");
1927 if (!xfs_icreate_zone)
1928 goto out_destroy_ili_zone;
1929
1930 xfs_rud_zone = kmem_zone_init(sizeof(struct xfs_rud_log_item),
1931 "xfs_rud_item");
1932 if (!xfs_rud_zone)
1933 goto out_destroy_icreate_zone;
1934
1935 xfs_rui_zone = kmem_zone_init(
1936 xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS),
1937 "xfs_rui_item");
1938 if (!xfs_rui_zone)
1939 goto out_destroy_rud_zone;
1940
1941 xfs_cud_zone = kmem_zone_init(sizeof(struct xfs_cud_log_item),
1942 "xfs_cud_item");
1943 if (!xfs_cud_zone)
1944 goto out_destroy_rui_zone;
1945
1946 xfs_cui_zone = kmem_zone_init(
1947 xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS),
1948 "xfs_cui_item");
1949 if (!xfs_cui_zone)
1950 goto out_destroy_cud_zone;
1951
1952 xfs_bud_zone = kmem_zone_init(sizeof(struct xfs_bud_log_item),
1953 "xfs_bud_item");
1954 if (!xfs_bud_zone)
1955 goto out_destroy_cui_zone;
1956
1957 xfs_bui_zone = kmem_zone_init(
1958 xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS),
1959 "xfs_bui_item");
1960 if (!xfs_bui_zone)
1961 goto out_destroy_bud_zone;
1962
1963 return 0;
1964
1965 out_destroy_bud_zone:
1966 kmem_zone_destroy(xfs_bud_zone);
1967 out_destroy_cui_zone:
1968 kmem_zone_destroy(xfs_cui_zone);
1969 out_destroy_cud_zone:
1970 kmem_zone_destroy(xfs_cud_zone);
1971 out_destroy_rui_zone:
1972 kmem_zone_destroy(xfs_rui_zone);
1973 out_destroy_rud_zone:
1974 kmem_zone_destroy(xfs_rud_zone);
1975 out_destroy_icreate_zone:
1976 kmem_zone_destroy(xfs_icreate_zone);
1977 out_destroy_ili_zone:
1978 kmem_zone_destroy(xfs_ili_zone);
1979 out_destroy_inode_zone:
1980 kmem_zone_destroy(xfs_inode_zone);
1981 out_destroy_efi_zone:
1982 kmem_zone_destroy(xfs_efi_zone);
1983 out_destroy_efd_zone:
1984 kmem_zone_destroy(xfs_efd_zone);
1985 out_destroy_buf_item_zone:
1986 kmem_zone_destroy(xfs_buf_item_zone);
1987 out_destroy_trans_zone:
1988 kmem_zone_destroy(xfs_trans_zone);
1989 out_destroy_ifork_zone:
1990 kmem_zone_destroy(xfs_ifork_zone);
1991 out_destroy_da_state_zone:
1992 kmem_zone_destroy(xfs_da_state_zone);
1993 out_destroy_btree_cur_zone:
1994 kmem_zone_destroy(xfs_btree_cur_zone);
1995 out_destroy_bmap_free_item_zone:
1996 kmem_zone_destroy(xfs_bmap_free_item_zone);
1997 out_destroy_log_ticket_zone:
1998 kmem_zone_destroy(xfs_log_ticket_zone);
1999 out_free_ioend_bioset:
2000 bioset_exit(&xfs_ioend_bioset);
2001 out:
2002 return -ENOMEM;
2003}
2004
2005STATIC void
2006xfs_destroy_zones(void)
2007{
2008 /*
2009 * Make sure all delayed rcu free are flushed before we
2010 * destroy caches.
2011 */
2012 rcu_barrier();
2013 kmem_zone_destroy(xfs_bui_zone);
2014 kmem_zone_destroy(xfs_bud_zone);
2015 kmem_zone_destroy(xfs_cui_zone);
2016 kmem_zone_destroy(xfs_cud_zone);
2017 kmem_zone_destroy(xfs_rui_zone);
2018 kmem_zone_destroy(xfs_rud_zone);
2019 kmem_zone_destroy(xfs_icreate_zone);
2020 kmem_zone_destroy(xfs_ili_zone);
2021 kmem_zone_destroy(xfs_inode_zone);
2022 kmem_zone_destroy(xfs_efi_zone);
2023 kmem_zone_destroy(xfs_efd_zone);
2024 kmem_zone_destroy(xfs_buf_item_zone);
2025 kmem_zone_destroy(xfs_trans_zone);
2026 kmem_zone_destroy(xfs_ifork_zone);
2027 kmem_zone_destroy(xfs_da_state_zone);
2028 kmem_zone_destroy(xfs_btree_cur_zone);
2029 kmem_zone_destroy(xfs_bmap_free_item_zone);
2030 kmem_zone_destroy(xfs_log_ticket_zone);
2031 bioset_exit(&xfs_ioend_bioset);
2032}
2033
2034STATIC int __init
2035xfs_init_workqueues(void)
2036{
2037 /*
2038 * The allocation workqueue can be used in memory reclaim situations
2039 * (writepage path), and parallelism is only limited by the number of
2040 * AGs in all the filesystems mounted. Hence use the default large
2041 * max_active value for this workqueue.
2042 */
2043 xfs_alloc_wq = alloc_workqueue("xfsalloc",
2044 WQ_MEM_RECLAIM|WQ_FREEZABLE, 0);
2045 if (!xfs_alloc_wq)
2046 return -ENOMEM;
2047
2048 xfs_discard_wq = alloc_workqueue("xfsdiscard", WQ_UNBOUND, 0);
2049 if (!xfs_discard_wq)
2050 goto out_free_alloc_wq;
2051
2052 return 0;
2053out_free_alloc_wq:
2054 destroy_workqueue(xfs_alloc_wq);
2055 return -ENOMEM;
2056}
2057
2058STATIC void
2059xfs_destroy_workqueues(void)
2060{
2061 destroy_workqueue(xfs_discard_wq);
2062 destroy_workqueue(xfs_alloc_wq);
2063}
2064
2065STATIC int __init
2066init_xfs_fs(void)
2067{
2068 int error;
2069
2070 xfs_check_ondisk_structs();
2071
2072 printk(KERN_INFO XFS_VERSION_STRING " with "
2073 XFS_BUILD_OPTIONS " enabled\n");
2074
2075 xfs_dir_startup();
2076
2077 error = xfs_init_zones();
2078 if (error)
2079 goto out;
2080
2081 error = xfs_init_workqueues();
2082 if (error)
2083 goto out_destroy_zones;
2084
2085 error = xfs_mru_cache_init();
2086 if (error)
2087 goto out_destroy_wq;
2088
2089 error = xfs_buf_init();
2090 if (error)
2091 goto out_mru_cache_uninit;
2092
2093 error = xfs_init_procfs();
2094 if (error)
2095 goto out_buf_terminate;
2096
2097 error = xfs_sysctl_register();
2098 if (error)
2099 goto out_cleanup_procfs;
2100
2101 xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj);
2102 if (!xfs_kset) {
2103 error = -ENOMEM;
2104 goto out_sysctl_unregister;
2105 }
2106
2107 xfsstats.xs_kobj.kobject.kset = xfs_kset;
2108
2109 xfsstats.xs_stats = alloc_percpu(struct xfsstats);
2110 if (!xfsstats.xs_stats) {
2111 error = -ENOMEM;
2112 goto out_kset_unregister;
2113 }
2114
2115 error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL,
2116 "stats");
2117 if (error)
2118 goto out_free_stats;
2119
2120#ifdef DEBUG
2121 xfs_dbg_kobj.kobject.kset = xfs_kset;
2122 error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug");
2123 if (error)
2124 goto out_remove_stats_kobj;
2125#endif
2126
2127 error = xfs_qm_init();
2128 if (error)
2129 goto out_remove_dbg_kobj;
2130
2131 error = register_filesystem(&xfs_fs_type);
2132 if (error)
2133 goto out_qm_exit;
2134 return 0;
2135
2136 out_qm_exit:
2137 xfs_qm_exit();
2138 out_remove_dbg_kobj:
2139#ifdef DEBUG
2140 xfs_sysfs_del(&xfs_dbg_kobj);
2141 out_remove_stats_kobj:
2142#endif
2143 xfs_sysfs_del(&xfsstats.xs_kobj);
2144 out_free_stats:
2145 free_percpu(xfsstats.xs_stats);
2146 out_kset_unregister:
2147 kset_unregister(xfs_kset);
2148 out_sysctl_unregister:
2149 xfs_sysctl_unregister();
2150 out_cleanup_procfs:
2151 xfs_cleanup_procfs();
2152 out_buf_terminate:
2153 xfs_buf_terminate();
2154 out_mru_cache_uninit:
2155 xfs_mru_cache_uninit();
2156 out_destroy_wq:
2157 xfs_destroy_workqueues();
2158 out_destroy_zones:
2159 xfs_destroy_zones();
2160 out:
2161 return error;
2162}
2163
2164STATIC void __exit
2165exit_xfs_fs(void)
2166{
2167 xfs_qm_exit();
2168 unregister_filesystem(&xfs_fs_type);
2169#ifdef DEBUG
2170 xfs_sysfs_del(&xfs_dbg_kobj);
2171#endif
2172 xfs_sysfs_del(&xfsstats.xs_kobj);
2173 free_percpu(xfsstats.xs_stats);
2174 kset_unregister(xfs_kset);
2175 xfs_sysctl_unregister();
2176 xfs_cleanup_procfs();
2177 xfs_buf_terminate();
2178 xfs_mru_cache_uninit();
2179 xfs_destroy_workqueues();
2180 xfs_destroy_zones();
2181 xfs_uuid_table_free();
2182}
2183
2184module_init(init_xfs_fs);
2185module_exit(exit_xfs_fs);
2186
2187MODULE_AUTHOR("Silicon Graphics, Inc.");
2188MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
2189MODULE_LICENSE("GPL");
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6
7#include "xfs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_sb.h"
13#include "xfs_mount.h"
14#include "xfs_inode.h"
15#include "xfs_btree.h"
16#include "xfs_bmap.h"
17#include "xfs_alloc.h"
18#include "xfs_fsops.h"
19#include "xfs_trans.h"
20#include "xfs_buf_item.h"
21#include "xfs_log.h"
22#include "xfs_log_priv.h"
23#include "xfs_dir2.h"
24#include "xfs_extfree_item.h"
25#include "xfs_mru_cache.h"
26#include "xfs_inode_item.h"
27#include "xfs_icache.h"
28#include "xfs_trace.h"
29#include "xfs_icreate_item.h"
30#include "xfs_filestream.h"
31#include "xfs_quota.h"
32#include "xfs_sysfs.h"
33#include "xfs_ondisk.h"
34#include "xfs_rmap_item.h"
35#include "xfs_refcount_item.h"
36#include "xfs_bmap_item.h"
37#include "xfs_reflink.h"
38#include "xfs_pwork.h"
39#include "xfs_ag.h"
40
41#include <linux/magic.h>
42#include <linux/fs_context.h>
43#include <linux/fs_parser.h>
44
45static const struct super_operations xfs_super_operations;
46
47static struct kset *xfs_kset; /* top-level xfs sysfs dir */
48#ifdef DEBUG
49static struct xfs_kobj xfs_dbg_kobj; /* global debug sysfs attrs */
50#endif
51
52enum xfs_dax_mode {
53 XFS_DAX_INODE = 0,
54 XFS_DAX_ALWAYS = 1,
55 XFS_DAX_NEVER = 2,
56};
57
58static void
59xfs_mount_set_dax_mode(
60 struct xfs_mount *mp,
61 enum xfs_dax_mode mode)
62{
63 switch (mode) {
64 case XFS_DAX_INODE:
65 mp->m_flags &= ~(XFS_MOUNT_DAX_ALWAYS | XFS_MOUNT_DAX_NEVER);
66 break;
67 case XFS_DAX_ALWAYS:
68 mp->m_flags |= XFS_MOUNT_DAX_ALWAYS;
69 mp->m_flags &= ~XFS_MOUNT_DAX_NEVER;
70 break;
71 case XFS_DAX_NEVER:
72 mp->m_flags |= XFS_MOUNT_DAX_NEVER;
73 mp->m_flags &= ~XFS_MOUNT_DAX_ALWAYS;
74 break;
75 }
76}
77
78static const struct constant_table dax_param_enums[] = {
79 {"inode", XFS_DAX_INODE },
80 {"always", XFS_DAX_ALWAYS },
81 {"never", XFS_DAX_NEVER },
82 {}
83};
84
85/*
86 * Table driven mount option parser.
87 */
88enum {
89 Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev,
90 Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid,
91 Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups,
92 Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, Opt_ikeep,
93 Opt_noikeep, Opt_largeio, Opt_nolargeio, Opt_attr2, Opt_noattr2,
94 Opt_filestreams, Opt_quota, Opt_noquota, Opt_usrquota, Opt_grpquota,
95 Opt_prjquota, Opt_uquota, Opt_gquota, Opt_pquota,
96 Opt_uqnoenforce, Opt_gqnoenforce, Opt_pqnoenforce, Opt_qnoenforce,
97 Opt_discard, Opt_nodiscard, Opt_dax, Opt_dax_enum,
98};
99
100static const struct fs_parameter_spec xfs_fs_parameters[] = {
101 fsparam_u32("logbufs", Opt_logbufs),
102 fsparam_string("logbsize", Opt_logbsize),
103 fsparam_string("logdev", Opt_logdev),
104 fsparam_string("rtdev", Opt_rtdev),
105 fsparam_flag("wsync", Opt_wsync),
106 fsparam_flag("noalign", Opt_noalign),
107 fsparam_flag("swalloc", Opt_swalloc),
108 fsparam_u32("sunit", Opt_sunit),
109 fsparam_u32("swidth", Opt_swidth),
110 fsparam_flag("nouuid", Opt_nouuid),
111 fsparam_flag("grpid", Opt_grpid),
112 fsparam_flag("nogrpid", Opt_nogrpid),
113 fsparam_flag("bsdgroups", Opt_bsdgroups),
114 fsparam_flag("sysvgroups", Opt_sysvgroups),
115 fsparam_string("allocsize", Opt_allocsize),
116 fsparam_flag("norecovery", Opt_norecovery),
117 fsparam_flag("inode64", Opt_inode64),
118 fsparam_flag("inode32", Opt_inode32),
119 fsparam_flag("ikeep", Opt_ikeep),
120 fsparam_flag("noikeep", Opt_noikeep),
121 fsparam_flag("largeio", Opt_largeio),
122 fsparam_flag("nolargeio", Opt_nolargeio),
123 fsparam_flag("attr2", Opt_attr2),
124 fsparam_flag("noattr2", Opt_noattr2),
125 fsparam_flag("filestreams", Opt_filestreams),
126 fsparam_flag("quota", Opt_quota),
127 fsparam_flag("noquota", Opt_noquota),
128 fsparam_flag("usrquota", Opt_usrquota),
129 fsparam_flag("grpquota", Opt_grpquota),
130 fsparam_flag("prjquota", Opt_prjquota),
131 fsparam_flag("uquota", Opt_uquota),
132 fsparam_flag("gquota", Opt_gquota),
133 fsparam_flag("pquota", Opt_pquota),
134 fsparam_flag("uqnoenforce", Opt_uqnoenforce),
135 fsparam_flag("gqnoenforce", Opt_gqnoenforce),
136 fsparam_flag("pqnoenforce", Opt_pqnoenforce),
137 fsparam_flag("qnoenforce", Opt_qnoenforce),
138 fsparam_flag("discard", Opt_discard),
139 fsparam_flag("nodiscard", Opt_nodiscard),
140 fsparam_flag("dax", Opt_dax),
141 fsparam_enum("dax", Opt_dax_enum, dax_param_enums),
142 {}
143};
144
145struct proc_xfs_info {
146 uint64_t flag;
147 char *str;
148};
149
150static int
151xfs_fs_show_options(
152 struct seq_file *m,
153 struct dentry *root)
154{
155 static struct proc_xfs_info xfs_info_set[] = {
156 /* the few simple ones we can get from the mount struct */
157 { XFS_MOUNT_IKEEP, ",ikeep" },
158 { XFS_MOUNT_WSYNC, ",wsync" },
159 { XFS_MOUNT_NOALIGN, ",noalign" },
160 { XFS_MOUNT_SWALLOC, ",swalloc" },
161 { XFS_MOUNT_NOUUID, ",nouuid" },
162 { XFS_MOUNT_NORECOVERY, ",norecovery" },
163 { XFS_MOUNT_ATTR2, ",attr2" },
164 { XFS_MOUNT_FILESTREAMS, ",filestreams" },
165 { XFS_MOUNT_GRPID, ",grpid" },
166 { XFS_MOUNT_DISCARD, ",discard" },
167 { XFS_MOUNT_LARGEIO, ",largeio" },
168 { XFS_MOUNT_DAX_ALWAYS, ",dax=always" },
169 { XFS_MOUNT_DAX_NEVER, ",dax=never" },
170 { 0, NULL }
171 };
172 struct xfs_mount *mp = XFS_M(root->d_sb);
173 struct proc_xfs_info *xfs_infop;
174
175 for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
176 if (mp->m_flags & xfs_infop->flag)
177 seq_puts(m, xfs_infop->str);
178 }
179
180 seq_printf(m, ",inode%d",
181 (mp->m_flags & XFS_MOUNT_SMALL_INUMS) ? 32 : 64);
182
183 if (mp->m_flags & XFS_MOUNT_ALLOCSIZE)
184 seq_printf(m, ",allocsize=%dk",
185 (1 << mp->m_allocsize_log) >> 10);
186
187 if (mp->m_logbufs > 0)
188 seq_printf(m, ",logbufs=%d", mp->m_logbufs);
189 if (mp->m_logbsize > 0)
190 seq_printf(m, ",logbsize=%dk", mp->m_logbsize >> 10);
191
192 if (mp->m_logname)
193 seq_show_option(m, "logdev", mp->m_logname);
194 if (mp->m_rtname)
195 seq_show_option(m, "rtdev", mp->m_rtname);
196
197 if (mp->m_dalign > 0)
198 seq_printf(m, ",sunit=%d",
199 (int)XFS_FSB_TO_BB(mp, mp->m_dalign));
200 if (mp->m_swidth > 0)
201 seq_printf(m, ",swidth=%d",
202 (int)XFS_FSB_TO_BB(mp, mp->m_swidth));
203
204 if (mp->m_qflags & XFS_UQUOTA_ACCT) {
205 if (mp->m_qflags & XFS_UQUOTA_ENFD)
206 seq_puts(m, ",usrquota");
207 else
208 seq_puts(m, ",uqnoenforce");
209 }
210
211 if (mp->m_qflags & XFS_PQUOTA_ACCT) {
212 if (mp->m_qflags & XFS_PQUOTA_ENFD)
213 seq_puts(m, ",prjquota");
214 else
215 seq_puts(m, ",pqnoenforce");
216 }
217 if (mp->m_qflags & XFS_GQUOTA_ACCT) {
218 if (mp->m_qflags & XFS_GQUOTA_ENFD)
219 seq_puts(m, ",grpquota");
220 else
221 seq_puts(m, ",gqnoenforce");
222 }
223
224 if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
225 seq_puts(m, ",noquota");
226
227 return 0;
228}
229
230/*
231 * Set parameters for inode allocation heuristics, taking into account
232 * filesystem size and inode32/inode64 mount options; i.e. specifically
233 * whether or not XFS_MOUNT_SMALL_INUMS is set.
234 *
235 * Inode allocation patterns are altered only if inode32 is requested
236 * (XFS_MOUNT_SMALL_INUMS), and the filesystem is sufficiently large.
237 * If altered, XFS_MOUNT_32BITINODES is set as well.
238 *
239 * An agcount independent of that in the mount structure is provided
240 * because in the growfs case, mp->m_sb.sb_agcount is not yet updated
241 * to the potentially higher ag count.
242 *
243 * Returns the maximum AG index which may contain inodes.
244 */
245xfs_agnumber_t
246xfs_set_inode_alloc(
247 struct xfs_mount *mp,
248 xfs_agnumber_t agcount)
249{
250 xfs_agnumber_t index;
251 xfs_agnumber_t maxagi = 0;
252 xfs_sb_t *sbp = &mp->m_sb;
253 xfs_agnumber_t max_metadata;
254 xfs_agino_t agino;
255 xfs_ino_t ino;
256
257 /*
258 * Calculate how much should be reserved for inodes to meet
259 * the max inode percentage. Used only for inode32.
260 */
261 if (M_IGEO(mp)->maxicount) {
262 uint64_t icount;
263
264 icount = sbp->sb_dblocks * sbp->sb_imax_pct;
265 do_div(icount, 100);
266 icount += sbp->sb_agblocks - 1;
267 do_div(icount, sbp->sb_agblocks);
268 max_metadata = icount;
269 } else {
270 max_metadata = agcount;
271 }
272
273 /* Get the last possible inode in the filesystem */
274 agino = XFS_AGB_TO_AGINO(mp, sbp->sb_agblocks - 1);
275 ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
276
277 /*
278 * If user asked for no more than 32-bit inodes, and the fs is
279 * sufficiently large, set XFS_MOUNT_32BITINODES if we must alter
280 * the allocator to accommodate the request.
281 */
282 if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) && ino > XFS_MAXINUMBER_32)
283 mp->m_flags |= XFS_MOUNT_32BITINODES;
284 else
285 mp->m_flags &= ~XFS_MOUNT_32BITINODES;
286
287 for (index = 0; index < agcount; index++) {
288 struct xfs_perag *pag;
289
290 ino = XFS_AGINO_TO_INO(mp, index, agino);
291
292 pag = xfs_perag_get(mp, index);
293
294 if (mp->m_flags & XFS_MOUNT_32BITINODES) {
295 if (ino > XFS_MAXINUMBER_32) {
296 pag->pagi_inodeok = 0;
297 pag->pagf_metadata = 0;
298 } else {
299 pag->pagi_inodeok = 1;
300 maxagi++;
301 if (index < max_metadata)
302 pag->pagf_metadata = 1;
303 else
304 pag->pagf_metadata = 0;
305 }
306 } else {
307 pag->pagi_inodeok = 1;
308 pag->pagf_metadata = 0;
309 }
310
311 xfs_perag_put(pag);
312 }
313
314 return (mp->m_flags & XFS_MOUNT_32BITINODES) ? maxagi : agcount;
315}
316
317STATIC int
318xfs_blkdev_get(
319 xfs_mount_t *mp,
320 const char *name,
321 struct block_device **bdevp)
322{
323 int error = 0;
324
325 *bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
326 mp);
327 if (IS_ERR(*bdevp)) {
328 error = PTR_ERR(*bdevp);
329 xfs_warn(mp, "Invalid device [%s], error=%d", name, error);
330 }
331
332 return error;
333}
334
335STATIC void
336xfs_blkdev_put(
337 struct block_device *bdev)
338{
339 if (bdev)
340 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
341}
342
343STATIC void
344xfs_close_devices(
345 struct xfs_mount *mp)
346{
347 struct dax_device *dax_ddev = mp->m_ddev_targp->bt_daxdev;
348
349 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
350 struct block_device *logdev = mp->m_logdev_targp->bt_bdev;
351 struct dax_device *dax_logdev = mp->m_logdev_targp->bt_daxdev;
352
353 xfs_free_buftarg(mp->m_logdev_targp);
354 xfs_blkdev_put(logdev);
355 fs_put_dax(dax_logdev);
356 }
357 if (mp->m_rtdev_targp) {
358 struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev;
359 struct dax_device *dax_rtdev = mp->m_rtdev_targp->bt_daxdev;
360
361 xfs_free_buftarg(mp->m_rtdev_targp);
362 xfs_blkdev_put(rtdev);
363 fs_put_dax(dax_rtdev);
364 }
365 xfs_free_buftarg(mp->m_ddev_targp);
366 fs_put_dax(dax_ddev);
367}
368
369/*
370 * The file system configurations are:
371 * (1) device (partition) with data and internal log
372 * (2) logical volume with data and log subvolumes.
373 * (3) logical volume with data, log, and realtime subvolumes.
374 *
375 * We only have to handle opening the log and realtime volumes here if
376 * they are present. The data subvolume has already been opened by
377 * get_sb_bdev() and is stored in sb->s_bdev.
378 */
379STATIC int
380xfs_open_devices(
381 struct xfs_mount *mp)
382{
383 struct block_device *ddev = mp->m_super->s_bdev;
384 struct dax_device *dax_ddev = fs_dax_get_by_bdev(ddev);
385 struct dax_device *dax_logdev = NULL, *dax_rtdev = NULL;
386 struct block_device *logdev = NULL, *rtdev = NULL;
387 int error;
388
389 /*
390 * Open real time and log devices - order is important.
391 */
392 if (mp->m_logname) {
393 error = xfs_blkdev_get(mp, mp->m_logname, &logdev);
394 if (error)
395 goto out;
396 dax_logdev = fs_dax_get_by_bdev(logdev);
397 }
398
399 if (mp->m_rtname) {
400 error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev);
401 if (error)
402 goto out_close_logdev;
403
404 if (rtdev == ddev || rtdev == logdev) {
405 xfs_warn(mp,
406 "Cannot mount filesystem with identical rtdev and ddev/logdev.");
407 error = -EINVAL;
408 goto out_close_rtdev;
409 }
410 dax_rtdev = fs_dax_get_by_bdev(rtdev);
411 }
412
413 /*
414 * Setup xfs_mount buffer target pointers
415 */
416 error = -ENOMEM;
417 mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev, dax_ddev);
418 if (!mp->m_ddev_targp)
419 goto out_close_rtdev;
420
421 if (rtdev) {
422 mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev, dax_rtdev);
423 if (!mp->m_rtdev_targp)
424 goto out_free_ddev_targ;
425 }
426
427 if (logdev && logdev != ddev) {
428 mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev, dax_logdev);
429 if (!mp->m_logdev_targp)
430 goto out_free_rtdev_targ;
431 } else {
432 mp->m_logdev_targp = mp->m_ddev_targp;
433 }
434
435 return 0;
436
437 out_free_rtdev_targ:
438 if (mp->m_rtdev_targp)
439 xfs_free_buftarg(mp->m_rtdev_targp);
440 out_free_ddev_targ:
441 xfs_free_buftarg(mp->m_ddev_targp);
442 out_close_rtdev:
443 xfs_blkdev_put(rtdev);
444 fs_put_dax(dax_rtdev);
445 out_close_logdev:
446 if (logdev && logdev != ddev) {
447 xfs_blkdev_put(logdev);
448 fs_put_dax(dax_logdev);
449 }
450 out:
451 fs_put_dax(dax_ddev);
452 return error;
453}
454
455/*
456 * Setup xfs_mount buffer target pointers based on superblock
457 */
458STATIC int
459xfs_setup_devices(
460 struct xfs_mount *mp)
461{
462 int error;
463
464 error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_sectsize);
465 if (error)
466 return error;
467
468 if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
469 unsigned int log_sector_size = BBSIZE;
470
471 if (xfs_sb_version_hassector(&mp->m_sb))
472 log_sector_size = mp->m_sb.sb_logsectsize;
473 error = xfs_setsize_buftarg(mp->m_logdev_targp,
474 log_sector_size);
475 if (error)
476 return error;
477 }
478 if (mp->m_rtdev_targp) {
479 error = xfs_setsize_buftarg(mp->m_rtdev_targp,
480 mp->m_sb.sb_sectsize);
481 if (error)
482 return error;
483 }
484
485 return 0;
486}
487
488STATIC int
489xfs_init_mount_workqueues(
490 struct xfs_mount *mp)
491{
492 mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s",
493 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
494 1, mp->m_super->s_id);
495 if (!mp->m_buf_workqueue)
496 goto out;
497
498 mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s",
499 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
500 0, mp->m_super->s_id);
501 if (!mp->m_unwritten_workqueue)
502 goto out_destroy_buf;
503
504 mp->m_cil_workqueue = alloc_workqueue("xfs-cil/%s",
505 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_UNBOUND),
506 0, mp->m_super->s_id);
507 if (!mp->m_cil_workqueue)
508 goto out_destroy_unwritten;
509
510 mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
511 XFS_WQFLAGS(WQ_FREEZABLE | WQ_MEM_RECLAIM),
512 0, mp->m_super->s_id);
513 if (!mp->m_reclaim_workqueue)
514 goto out_destroy_cil;
515
516 mp->m_gc_workqueue = alloc_workqueue("xfs-gc/%s",
517 WQ_SYSFS | WQ_UNBOUND | WQ_FREEZABLE | WQ_MEM_RECLAIM,
518 0, mp->m_super->s_id);
519 if (!mp->m_gc_workqueue)
520 goto out_destroy_reclaim;
521
522 mp->m_sync_workqueue = alloc_workqueue("xfs-sync/%s",
523 XFS_WQFLAGS(WQ_FREEZABLE), 0, mp->m_super->s_id);
524 if (!mp->m_sync_workqueue)
525 goto out_destroy_eofb;
526
527 return 0;
528
529out_destroy_eofb:
530 destroy_workqueue(mp->m_gc_workqueue);
531out_destroy_reclaim:
532 destroy_workqueue(mp->m_reclaim_workqueue);
533out_destroy_cil:
534 destroy_workqueue(mp->m_cil_workqueue);
535out_destroy_unwritten:
536 destroy_workqueue(mp->m_unwritten_workqueue);
537out_destroy_buf:
538 destroy_workqueue(mp->m_buf_workqueue);
539out:
540 return -ENOMEM;
541}
542
543STATIC void
544xfs_destroy_mount_workqueues(
545 struct xfs_mount *mp)
546{
547 destroy_workqueue(mp->m_sync_workqueue);
548 destroy_workqueue(mp->m_gc_workqueue);
549 destroy_workqueue(mp->m_reclaim_workqueue);
550 destroy_workqueue(mp->m_cil_workqueue);
551 destroy_workqueue(mp->m_unwritten_workqueue);
552 destroy_workqueue(mp->m_buf_workqueue);
553}
554
555static void
556xfs_flush_inodes_worker(
557 struct work_struct *work)
558{
559 struct xfs_mount *mp = container_of(work, struct xfs_mount,
560 m_flush_inodes_work);
561 struct super_block *sb = mp->m_super;
562
563 if (down_read_trylock(&sb->s_umount)) {
564 sync_inodes_sb(sb);
565 up_read(&sb->s_umount);
566 }
567}
568
569/*
570 * Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK
571 * or a page lock. We use sync_inodes_sb() here to ensure we block while waiting
572 * for IO to complete so that we effectively throttle multiple callers to the
573 * rate at which IO is completing.
574 */
575void
576xfs_flush_inodes(
577 struct xfs_mount *mp)
578{
579 /*
580 * If flush_work() returns true then that means we waited for a flush
581 * which was already in progress. Don't bother running another scan.
582 */
583 if (flush_work(&mp->m_flush_inodes_work))
584 return;
585
586 queue_work(mp->m_sync_workqueue, &mp->m_flush_inodes_work);
587 flush_work(&mp->m_flush_inodes_work);
588}
589
590/* Catch misguided souls that try to use this interface on XFS */
591STATIC struct inode *
592xfs_fs_alloc_inode(
593 struct super_block *sb)
594{
595 BUG();
596 return NULL;
597}
598
599#ifdef DEBUG
600static void
601xfs_check_delalloc(
602 struct xfs_inode *ip,
603 int whichfork)
604{
605 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
606 struct xfs_bmbt_irec got;
607 struct xfs_iext_cursor icur;
608
609 if (!ifp || !xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got))
610 return;
611 do {
612 if (isnullstartblock(got.br_startblock)) {
613 xfs_warn(ip->i_mount,
614 "ino %llx %s fork has delalloc extent at [0x%llx:0x%llx]",
615 ip->i_ino,
616 whichfork == XFS_DATA_FORK ? "data" : "cow",
617 got.br_startoff, got.br_blockcount);
618 }
619 } while (xfs_iext_next_extent(ifp, &icur, &got));
620}
621#else
622#define xfs_check_delalloc(ip, whichfork) do { } while (0)
623#endif
624
625/*
626 * Now that the generic code is guaranteed not to be accessing
627 * the linux inode, we can inactivate and reclaim the inode.
628 */
629STATIC void
630xfs_fs_destroy_inode(
631 struct inode *inode)
632{
633 struct xfs_inode *ip = XFS_I(inode);
634
635 trace_xfs_destroy_inode(ip);
636
637 ASSERT(!rwsem_is_locked(&inode->i_rwsem));
638 XFS_STATS_INC(ip->i_mount, vn_rele);
639 XFS_STATS_INC(ip->i_mount, vn_remove);
640
641 xfs_inactive(ip);
642
643 if (!XFS_FORCED_SHUTDOWN(ip->i_mount) && ip->i_delayed_blks) {
644 xfs_check_delalloc(ip, XFS_DATA_FORK);
645 xfs_check_delalloc(ip, XFS_COW_FORK);
646 ASSERT(0);
647 }
648
649 XFS_STATS_INC(ip->i_mount, vn_reclaim);
650
651 /*
652 * We should never get here with one of the reclaim flags already set.
653 */
654 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIMABLE));
655 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM));
656
657 /*
658 * We always use background reclaim here because even if the inode is
659 * clean, it still may be under IO and hence we have wait for IO
660 * completion to occur before we can reclaim the inode. The background
661 * reclaim path handles this more efficiently than we can here, so
662 * simply let background reclaim tear down all inodes.
663 */
664 xfs_inode_mark_reclaimable(ip);
665}
666
667static void
668xfs_fs_dirty_inode(
669 struct inode *inode,
670 int flag)
671{
672 struct xfs_inode *ip = XFS_I(inode);
673 struct xfs_mount *mp = ip->i_mount;
674 struct xfs_trans *tp;
675
676 if (!(inode->i_sb->s_flags & SB_LAZYTIME))
677 return;
678 if (flag != I_DIRTY_SYNC || !(inode->i_state & I_DIRTY_TIME))
679 return;
680
681 if (xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp))
682 return;
683 xfs_ilock(ip, XFS_ILOCK_EXCL);
684 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
685 xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP);
686 xfs_trans_commit(tp);
687}
688
689/*
690 * Slab object creation initialisation for the XFS inode.
691 * This covers only the idempotent fields in the XFS inode;
692 * all other fields need to be initialised on allocation
693 * from the slab. This avoids the need to repeatedly initialise
694 * fields in the xfs inode that left in the initialise state
695 * when freeing the inode.
696 */
697STATIC void
698xfs_fs_inode_init_once(
699 void *inode)
700{
701 struct xfs_inode *ip = inode;
702
703 memset(ip, 0, sizeof(struct xfs_inode));
704
705 /* vfs inode */
706 inode_init_once(VFS_I(ip));
707
708 /* xfs inode */
709 atomic_set(&ip->i_pincount, 0);
710 spin_lock_init(&ip->i_flags_lock);
711
712 mrlock_init(&ip->i_mmaplock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
713 "xfsino", ip->i_ino);
714 mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
715 "xfsino", ip->i_ino);
716}
717
718/*
719 * We do an unlocked check for XFS_IDONTCACHE here because we are already
720 * serialised against cache hits here via the inode->i_lock and igrab() in
721 * xfs_iget_cache_hit(). Hence a lookup that might clear this flag will not be
722 * racing with us, and it avoids needing to grab a spinlock here for every inode
723 * we drop the final reference on.
724 */
725STATIC int
726xfs_fs_drop_inode(
727 struct inode *inode)
728{
729 struct xfs_inode *ip = XFS_I(inode);
730
731 /*
732 * If this unlinked inode is in the middle of recovery, don't
733 * drop the inode just yet; log recovery will take care of
734 * that. See the comment for this inode flag.
735 */
736 if (ip->i_flags & XFS_IRECOVERY) {
737 ASSERT(ip->i_mount->m_log->l_flags & XLOG_RECOVERY_NEEDED);
738 return 0;
739 }
740
741 return generic_drop_inode(inode);
742}
743
744static void
745xfs_mount_free(
746 struct xfs_mount *mp)
747{
748 kfree(mp->m_rtname);
749 kfree(mp->m_logname);
750 kmem_free(mp);
751}
752
753STATIC int
754xfs_fs_sync_fs(
755 struct super_block *sb,
756 int wait)
757{
758 struct xfs_mount *mp = XFS_M(sb);
759
760 /*
761 * Doing anything during the async pass would be counterproductive.
762 */
763 if (!wait)
764 return 0;
765
766 xfs_log_force(mp, XFS_LOG_SYNC);
767 if (laptop_mode) {
768 /*
769 * The disk must be active because we're syncing.
770 * We schedule log work now (now that the disk is
771 * active) instead of later (when it might not be).
772 */
773 flush_delayed_work(&mp->m_log->l_work);
774 }
775
776 return 0;
777}
778
779STATIC int
780xfs_fs_statfs(
781 struct dentry *dentry,
782 struct kstatfs *statp)
783{
784 struct xfs_mount *mp = XFS_M(dentry->d_sb);
785 xfs_sb_t *sbp = &mp->m_sb;
786 struct xfs_inode *ip = XFS_I(d_inode(dentry));
787 uint64_t fakeinos, id;
788 uint64_t icount;
789 uint64_t ifree;
790 uint64_t fdblocks;
791 xfs_extlen_t lsize;
792 int64_t ffree;
793
794 statp->f_type = XFS_SUPER_MAGIC;
795 statp->f_namelen = MAXNAMELEN - 1;
796
797 id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
798 statp->f_fsid = u64_to_fsid(id);
799
800 icount = percpu_counter_sum(&mp->m_icount);
801 ifree = percpu_counter_sum(&mp->m_ifree);
802 fdblocks = percpu_counter_sum(&mp->m_fdblocks);
803
804 spin_lock(&mp->m_sb_lock);
805 statp->f_bsize = sbp->sb_blocksize;
806 lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
807 statp->f_blocks = sbp->sb_dblocks - lsize;
808 spin_unlock(&mp->m_sb_lock);
809
810 /* make sure statp->f_bfree does not underflow */
811 statp->f_bfree = max_t(int64_t, fdblocks - mp->m_alloc_set_aside, 0);
812 statp->f_bavail = statp->f_bfree;
813
814 fakeinos = XFS_FSB_TO_INO(mp, statp->f_bfree);
815 statp->f_files = min(icount + fakeinos, (uint64_t)XFS_MAXINUMBER);
816 if (M_IGEO(mp)->maxicount)
817 statp->f_files = min_t(typeof(statp->f_files),
818 statp->f_files,
819 M_IGEO(mp)->maxicount);
820
821 /* If sb_icount overshot maxicount, report actual allocation */
822 statp->f_files = max_t(typeof(statp->f_files),
823 statp->f_files,
824 sbp->sb_icount);
825
826 /* make sure statp->f_ffree does not underflow */
827 ffree = statp->f_files - (icount - ifree);
828 statp->f_ffree = max_t(int64_t, ffree, 0);
829
830
831 if ((ip->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
832 ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) ==
833 (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))
834 xfs_qm_statvfs(ip, statp);
835
836 if (XFS_IS_REALTIME_MOUNT(mp) &&
837 (ip->i_diflags & (XFS_DIFLAG_RTINHERIT | XFS_DIFLAG_REALTIME))) {
838 statp->f_blocks = sbp->sb_rblocks;
839 statp->f_bavail = statp->f_bfree =
840 sbp->sb_frextents * sbp->sb_rextsize;
841 }
842
843 return 0;
844}
845
846STATIC void
847xfs_save_resvblks(struct xfs_mount *mp)
848{
849 uint64_t resblks = 0;
850
851 mp->m_resblks_save = mp->m_resblks;
852 xfs_reserve_blocks(mp, &resblks, NULL);
853}
854
855STATIC void
856xfs_restore_resvblks(struct xfs_mount *mp)
857{
858 uint64_t resblks;
859
860 if (mp->m_resblks_save) {
861 resblks = mp->m_resblks_save;
862 mp->m_resblks_save = 0;
863 } else
864 resblks = xfs_default_resblks(mp);
865
866 xfs_reserve_blocks(mp, &resblks, NULL);
867}
868
869/*
870 * Second stage of a freeze. The data is already frozen so we only
871 * need to take care of the metadata. Once that's done sync the superblock
872 * to the log to dirty it in case of a crash while frozen. This ensures that we
873 * will recover the unlinked inode lists on the next mount.
874 */
875STATIC int
876xfs_fs_freeze(
877 struct super_block *sb)
878{
879 struct xfs_mount *mp = XFS_M(sb);
880 unsigned int flags;
881 int ret;
882
883 /*
884 * The filesystem is now frozen far enough that memory reclaim
885 * cannot safely operate on the filesystem. Hence we need to
886 * set a GFP_NOFS context here to avoid recursion deadlocks.
887 */
888 flags = memalloc_nofs_save();
889 xfs_blockgc_stop(mp);
890 xfs_save_resvblks(mp);
891 ret = xfs_log_quiesce(mp);
892 memalloc_nofs_restore(flags);
893 return ret;
894}
895
896STATIC int
897xfs_fs_unfreeze(
898 struct super_block *sb)
899{
900 struct xfs_mount *mp = XFS_M(sb);
901
902 xfs_restore_resvblks(mp);
903 xfs_log_work_queue(mp);
904 xfs_blockgc_start(mp);
905 return 0;
906}
907
908/*
909 * This function fills in xfs_mount_t fields based on mount args.
910 * Note: the superblock _has_ now been read in.
911 */
912STATIC int
913xfs_finish_flags(
914 struct xfs_mount *mp)
915{
916 int ronly = (mp->m_flags & XFS_MOUNT_RDONLY);
917
918 /* Fail a mount where the logbuf is smaller than the log stripe */
919 if (xfs_sb_version_haslogv2(&mp->m_sb)) {
920 if (mp->m_logbsize <= 0 &&
921 mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
922 mp->m_logbsize = mp->m_sb.sb_logsunit;
923 } else if (mp->m_logbsize > 0 &&
924 mp->m_logbsize < mp->m_sb.sb_logsunit) {
925 xfs_warn(mp,
926 "logbuf size must be greater than or equal to log stripe size");
927 return -EINVAL;
928 }
929 } else {
930 /* Fail a mount if the logbuf is larger than 32K */
931 if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) {
932 xfs_warn(mp,
933 "logbuf size for version 1 logs must be 16K or 32K");
934 return -EINVAL;
935 }
936 }
937
938 /*
939 * V5 filesystems always use attr2 format for attributes.
940 */
941 if (xfs_sb_version_hascrc(&mp->m_sb) &&
942 (mp->m_flags & XFS_MOUNT_NOATTR2)) {
943 xfs_warn(mp, "Cannot mount a V5 filesystem as noattr2. "
944 "attr2 is always enabled for V5 filesystems.");
945 return -EINVAL;
946 }
947
948 /*
949 * mkfs'ed attr2 will turn on attr2 mount unless explicitly
950 * told by noattr2 to turn it off
951 */
952 if (xfs_sb_version_hasattr2(&mp->m_sb) &&
953 !(mp->m_flags & XFS_MOUNT_NOATTR2))
954 mp->m_flags |= XFS_MOUNT_ATTR2;
955
956 /*
957 * prohibit r/w mounts of read-only filesystems
958 */
959 if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) {
960 xfs_warn(mp,
961 "cannot mount a read-only filesystem as read-write");
962 return -EROFS;
963 }
964
965 if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) &&
966 (mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE)) &&
967 !xfs_sb_version_has_pquotino(&mp->m_sb)) {
968 xfs_warn(mp,
969 "Super block does not support project and group quota together");
970 return -EINVAL;
971 }
972
973 return 0;
974}
975
976static int
977xfs_init_percpu_counters(
978 struct xfs_mount *mp)
979{
980 int error;
981
982 error = percpu_counter_init(&mp->m_icount, 0, GFP_KERNEL);
983 if (error)
984 return -ENOMEM;
985
986 error = percpu_counter_init(&mp->m_ifree, 0, GFP_KERNEL);
987 if (error)
988 goto free_icount;
989
990 error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL);
991 if (error)
992 goto free_ifree;
993
994 error = percpu_counter_init(&mp->m_delalloc_blks, 0, GFP_KERNEL);
995 if (error)
996 goto free_fdblocks;
997
998 return 0;
999
1000free_fdblocks:
1001 percpu_counter_destroy(&mp->m_fdblocks);
1002free_ifree:
1003 percpu_counter_destroy(&mp->m_ifree);
1004free_icount:
1005 percpu_counter_destroy(&mp->m_icount);
1006 return -ENOMEM;
1007}
1008
1009void
1010xfs_reinit_percpu_counters(
1011 struct xfs_mount *mp)
1012{
1013 percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount);
1014 percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree);
1015 percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks);
1016}
1017
1018static void
1019xfs_destroy_percpu_counters(
1020 struct xfs_mount *mp)
1021{
1022 percpu_counter_destroy(&mp->m_icount);
1023 percpu_counter_destroy(&mp->m_ifree);
1024 percpu_counter_destroy(&mp->m_fdblocks);
1025 ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
1026 percpu_counter_sum(&mp->m_delalloc_blks) == 0);
1027 percpu_counter_destroy(&mp->m_delalloc_blks);
1028}
1029
1030static void
1031xfs_fs_put_super(
1032 struct super_block *sb)
1033{
1034 struct xfs_mount *mp = XFS_M(sb);
1035
1036 /* if ->fill_super failed, we have no mount to tear down */
1037 if (!sb->s_fs_info)
1038 return;
1039
1040 xfs_notice(mp, "Unmounting Filesystem");
1041 xfs_filestream_unmount(mp);
1042 xfs_unmountfs(mp);
1043
1044 xfs_freesb(mp);
1045 free_percpu(mp->m_stats.xs_stats);
1046 xfs_destroy_percpu_counters(mp);
1047 xfs_destroy_mount_workqueues(mp);
1048 xfs_close_devices(mp);
1049
1050 sb->s_fs_info = NULL;
1051 xfs_mount_free(mp);
1052}
1053
1054static long
1055xfs_fs_nr_cached_objects(
1056 struct super_block *sb,
1057 struct shrink_control *sc)
1058{
1059 /* Paranoia: catch incorrect calls during mount setup or teardown */
1060 if (WARN_ON_ONCE(!sb->s_fs_info))
1061 return 0;
1062 return xfs_reclaim_inodes_count(XFS_M(sb));
1063}
1064
1065static long
1066xfs_fs_free_cached_objects(
1067 struct super_block *sb,
1068 struct shrink_control *sc)
1069{
1070 return xfs_reclaim_inodes_nr(XFS_M(sb), sc->nr_to_scan);
1071}
1072
1073static const struct super_operations xfs_super_operations = {
1074 .alloc_inode = xfs_fs_alloc_inode,
1075 .destroy_inode = xfs_fs_destroy_inode,
1076 .dirty_inode = xfs_fs_dirty_inode,
1077 .drop_inode = xfs_fs_drop_inode,
1078 .put_super = xfs_fs_put_super,
1079 .sync_fs = xfs_fs_sync_fs,
1080 .freeze_fs = xfs_fs_freeze,
1081 .unfreeze_fs = xfs_fs_unfreeze,
1082 .statfs = xfs_fs_statfs,
1083 .show_options = xfs_fs_show_options,
1084 .nr_cached_objects = xfs_fs_nr_cached_objects,
1085 .free_cached_objects = xfs_fs_free_cached_objects,
1086};
1087
1088static int
1089suffix_kstrtoint(
1090 const char *s,
1091 unsigned int base,
1092 int *res)
1093{
1094 int last, shift_left_factor = 0, _res;
1095 char *value;
1096 int ret = 0;
1097
1098 value = kstrdup(s, GFP_KERNEL);
1099 if (!value)
1100 return -ENOMEM;
1101
1102 last = strlen(value) - 1;
1103 if (value[last] == 'K' || value[last] == 'k') {
1104 shift_left_factor = 10;
1105 value[last] = '\0';
1106 }
1107 if (value[last] == 'M' || value[last] == 'm') {
1108 shift_left_factor = 20;
1109 value[last] = '\0';
1110 }
1111 if (value[last] == 'G' || value[last] == 'g') {
1112 shift_left_factor = 30;
1113 value[last] = '\0';
1114 }
1115
1116 if (kstrtoint(value, base, &_res))
1117 ret = -EINVAL;
1118 kfree(value);
1119 *res = _res << shift_left_factor;
1120 return ret;
1121}
1122
1123static inline void
1124xfs_fs_warn_deprecated(
1125 struct fs_context *fc,
1126 struct fs_parameter *param,
1127 uint64_t flag,
1128 bool value)
1129{
1130 /* Don't print the warning if reconfiguring and current mount point
1131 * already had the flag set
1132 */
1133 if ((fc->purpose & FS_CONTEXT_FOR_RECONFIGURE) &&
1134 !!(XFS_M(fc->root->d_sb)->m_flags & flag) == value)
1135 return;
1136 xfs_warn(fc->s_fs_info, "%s mount option is deprecated.", param->key);
1137}
1138
1139/*
1140 * Set mount state from a mount option.
1141 *
1142 * NOTE: mp->m_super is NULL here!
1143 */
1144static int
1145xfs_fs_parse_param(
1146 struct fs_context *fc,
1147 struct fs_parameter *param)
1148{
1149 struct xfs_mount *parsing_mp = fc->s_fs_info;
1150 struct fs_parse_result result;
1151 int size = 0;
1152 int opt;
1153
1154 opt = fs_parse(fc, xfs_fs_parameters, param, &result);
1155 if (opt < 0)
1156 return opt;
1157
1158 switch (opt) {
1159 case Opt_logbufs:
1160 parsing_mp->m_logbufs = result.uint_32;
1161 return 0;
1162 case Opt_logbsize:
1163 if (suffix_kstrtoint(param->string, 10, &parsing_mp->m_logbsize))
1164 return -EINVAL;
1165 return 0;
1166 case Opt_logdev:
1167 kfree(parsing_mp->m_logname);
1168 parsing_mp->m_logname = kstrdup(param->string, GFP_KERNEL);
1169 if (!parsing_mp->m_logname)
1170 return -ENOMEM;
1171 return 0;
1172 case Opt_rtdev:
1173 kfree(parsing_mp->m_rtname);
1174 parsing_mp->m_rtname = kstrdup(param->string, GFP_KERNEL);
1175 if (!parsing_mp->m_rtname)
1176 return -ENOMEM;
1177 return 0;
1178 case Opt_allocsize:
1179 if (suffix_kstrtoint(param->string, 10, &size))
1180 return -EINVAL;
1181 parsing_mp->m_allocsize_log = ffs(size) - 1;
1182 parsing_mp->m_flags |= XFS_MOUNT_ALLOCSIZE;
1183 return 0;
1184 case Opt_grpid:
1185 case Opt_bsdgroups:
1186 parsing_mp->m_flags |= XFS_MOUNT_GRPID;
1187 return 0;
1188 case Opt_nogrpid:
1189 case Opt_sysvgroups:
1190 parsing_mp->m_flags &= ~XFS_MOUNT_GRPID;
1191 return 0;
1192 case Opt_wsync:
1193 parsing_mp->m_flags |= XFS_MOUNT_WSYNC;
1194 return 0;
1195 case Opt_norecovery:
1196 parsing_mp->m_flags |= XFS_MOUNT_NORECOVERY;
1197 return 0;
1198 case Opt_noalign:
1199 parsing_mp->m_flags |= XFS_MOUNT_NOALIGN;
1200 return 0;
1201 case Opt_swalloc:
1202 parsing_mp->m_flags |= XFS_MOUNT_SWALLOC;
1203 return 0;
1204 case Opt_sunit:
1205 parsing_mp->m_dalign = result.uint_32;
1206 return 0;
1207 case Opt_swidth:
1208 parsing_mp->m_swidth = result.uint_32;
1209 return 0;
1210 case Opt_inode32:
1211 parsing_mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
1212 return 0;
1213 case Opt_inode64:
1214 parsing_mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
1215 return 0;
1216 case Opt_nouuid:
1217 parsing_mp->m_flags |= XFS_MOUNT_NOUUID;
1218 return 0;
1219 case Opt_largeio:
1220 parsing_mp->m_flags |= XFS_MOUNT_LARGEIO;
1221 return 0;
1222 case Opt_nolargeio:
1223 parsing_mp->m_flags &= ~XFS_MOUNT_LARGEIO;
1224 return 0;
1225 case Opt_filestreams:
1226 parsing_mp->m_flags |= XFS_MOUNT_FILESTREAMS;
1227 return 0;
1228 case Opt_noquota:
1229 parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ACCT;
1230 parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ENFD;
1231 parsing_mp->m_qflags &= ~XFS_ALL_QUOTA_ACTIVE;
1232 return 0;
1233 case Opt_quota:
1234 case Opt_uquota:
1235 case Opt_usrquota:
1236 parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE |
1237 XFS_UQUOTA_ENFD);
1238 return 0;
1239 case Opt_qnoenforce:
1240 case Opt_uqnoenforce:
1241 parsing_mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE);
1242 parsing_mp->m_qflags &= ~XFS_UQUOTA_ENFD;
1243 return 0;
1244 case Opt_pquota:
1245 case Opt_prjquota:
1246 parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE |
1247 XFS_PQUOTA_ENFD);
1248 return 0;
1249 case Opt_pqnoenforce:
1250 parsing_mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE);
1251 parsing_mp->m_qflags &= ~XFS_PQUOTA_ENFD;
1252 return 0;
1253 case Opt_gquota:
1254 case Opt_grpquota:
1255 parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE |
1256 XFS_GQUOTA_ENFD);
1257 return 0;
1258 case Opt_gqnoenforce:
1259 parsing_mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
1260 parsing_mp->m_qflags &= ~XFS_GQUOTA_ENFD;
1261 return 0;
1262 case Opt_discard:
1263 parsing_mp->m_flags |= XFS_MOUNT_DISCARD;
1264 return 0;
1265 case Opt_nodiscard:
1266 parsing_mp->m_flags &= ~XFS_MOUNT_DISCARD;
1267 return 0;
1268#ifdef CONFIG_FS_DAX
1269 case Opt_dax:
1270 xfs_mount_set_dax_mode(parsing_mp, XFS_DAX_ALWAYS);
1271 return 0;
1272 case Opt_dax_enum:
1273 xfs_mount_set_dax_mode(parsing_mp, result.uint_32);
1274 return 0;
1275#endif
1276 /* Following mount options will be removed in September 2025 */
1277 case Opt_ikeep:
1278 xfs_fs_warn_deprecated(fc, param, XFS_MOUNT_IKEEP, true);
1279 parsing_mp->m_flags |= XFS_MOUNT_IKEEP;
1280 return 0;
1281 case Opt_noikeep:
1282 xfs_fs_warn_deprecated(fc, param, XFS_MOUNT_IKEEP, false);
1283 parsing_mp->m_flags &= ~XFS_MOUNT_IKEEP;
1284 return 0;
1285 case Opt_attr2:
1286 xfs_fs_warn_deprecated(fc, param, XFS_MOUNT_ATTR2, true);
1287 parsing_mp->m_flags |= XFS_MOUNT_ATTR2;
1288 return 0;
1289 case Opt_noattr2:
1290 xfs_fs_warn_deprecated(fc, param, XFS_MOUNT_NOATTR2, true);
1291 parsing_mp->m_flags &= ~XFS_MOUNT_ATTR2;
1292 parsing_mp->m_flags |= XFS_MOUNT_NOATTR2;
1293 return 0;
1294 default:
1295 xfs_warn(parsing_mp, "unknown mount option [%s].", param->key);
1296 return -EINVAL;
1297 }
1298
1299 return 0;
1300}
1301
1302static int
1303xfs_fs_validate_params(
1304 struct xfs_mount *mp)
1305{
1306 /*
1307 * no recovery flag requires a read-only mount
1308 */
1309 if ((mp->m_flags & XFS_MOUNT_NORECOVERY) &&
1310 !(mp->m_flags & XFS_MOUNT_RDONLY)) {
1311 xfs_warn(mp, "no-recovery mounts must be read-only.");
1312 return -EINVAL;
1313 }
1314
1315 if ((mp->m_flags & XFS_MOUNT_NOALIGN) &&
1316 (mp->m_dalign || mp->m_swidth)) {
1317 xfs_warn(mp,
1318 "sunit and swidth options incompatible with the noalign option");
1319 return -EINVAL;
1320 }
1321
1322 if (!IS_ENABLED(CONFIG_XFS_QUOTA) && mp->m_qflags != 0) {
1323 xfs_warn(mp, "quota support not available in this kernel.");
1324 return -EINVAL;
1325 }
1326
1327 if ((mp->m_dalign && !mp->m_swidth) ||
1328 (!mp->m_dalign && mp->m_swidth)) {
1329 xfs_warn(mp, "sunit and swidth must be specified together");
1330 return -EINVAL;
1331 }
1332
1333 if (mp->m_dalign && (mp->m_swidth % mp->m_dalign != 0)) {
1334 xfs_warn(mp,
1335 "stripe width (%d) must be a multiple of the stripe unit (%d)",
1336 mp->m_swidth, mp->m_dalign);
1337 return -EINVAL;
1338 }
1339
1340 if (mp->m_logbufs != -1 &&
1341 mp->m_logbufs != 0 &&
1342 (mp->m_logbufs < XLOG_MIN_ICLOGS ||
1343 mp->m_logbufs > XLOG_MAX_ICLOGS)) {
1344 xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
1345 mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
1346 return -EINVAL;
1347 }
1348
1349 if (mp->m_logbsize != -1 &&
1350 mp->m_logbsize != 0 &&
1351 (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
1352 mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
1353 !is_power_of_2(mp->m_logbsize))) {
1354 xfs_warn(mp,
1355 "invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
1356 mp->m_logbsize);
1357 return -EINVAL;
1358 }
1359
1360 if ((mp->m_flags & XFS_MOUNT_ALLOCSIZE) &&
1361 (mp->m_allocsize_log > XFS_MAX_IO_LOG ||
1362 mp->m_allocsize_log < XFS_MIN_IO_LOG)) {
1363 xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
1364 mp->m_allocsize_log, XFS_MIN_IO_LOG, XFS_MAX_IO_LOG);
1365 return -EINVAL;
1366 }
1367
1368 return 0;
1369}
1370
1371static int
1372xfs_fs_fill_super(
1373 struct super_block *sb,
1374 struct fs_context *fc)
1375{
1376 struct xfs_mount *mp = sb->s_fs_info;
1377 struct inode *root;
1378 int flags = 0, error;
1379
1380 mp->m_super = sb;
1381
1382 error = xfs_fs_validate_params(mp);
1383 if (error)
1384 goto out_free_names;
1385
1386 sb_min_blocksize(sb, BBSIZE);
1387 sb->s_xattr = xfs_xattr_handlers;
1388 sb->s_export_op = &xfs_export_operations;
1389#ifdef CONFIG_XFS_QUOTA
1390 sb->s_qcop = &xfs_quotactl_operations;
1391 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
1392#endif
1393 sb->s_op = &xfs_super_operations;
1394
1395 /*
1396 * Delay mount work if the debug hook is set. This is debug
1397 * instrumention to coordinate simulation of xfs mount failures with
1398 * VFS superblock operations
1399 */
1400 if (xfs_globals.mount_delay) {
1401 xfs_notice(mp, "Delaying mount for %d seconds.",
1402 xfs_globals.mount_delay);
1403 msleep(xfs_globals.mount_delay * 1000);
1404 }
1405
1406 if (fc->sb_flags & SB_SILENT)
1407 flags |= XFS_MFSI_QUIET;
1408
1409 error = xfs_open_devices(mp);
1410 if (error)
1411 goto out_free_names;
1412
1413 error = xfs_init_mount_workqueues(mp);
1414 if (error)
1415 goto out_close_devices;
1416
1417 error = xfs_init_percpu_counters(mp);
1418 if (error)
1419 goto out_destroy_workqueues;
1420
1421 /* Allocate stats memory before we do operations that might use it */
1422 mp->m_stats.xs_stats = alloc_percpu(struct xfsstats);
1423 if (!mp->m_stats.xs_stats) {
1424 error = -ENOMEM;
1425 goto out_destroy_counters;
1426 }
1427
1428 error = xfs_readsb(mp, flags);
1429 if (error)
1430 goto out_free_stats;
1431
1432 error = xfs_finish_flags(mp);
1433 if (error)
1434 goto out_free_sb;
1435
1436 error = xfs_setup_devices(mp);
1437 if (error)
1438 goto out_free_sb;
1439
1440 /* V4 support is undergoing deprecation. */
1441 if (!xfs_sb_version_hascrc(&mp->m_sb)) {
1442#ifdef CONFIG_XFS_SUPPORT_V4
1443 xfs_warn_once(mp,
1444 "Deprecated V4 format (crc=0) will not be supported after September 2030.");
1445#else
1446 xfs_warn(mp,
1447 "Deprecated V4 format (crc=0) not supported by kernel.");
1448 error = -EINVAL;
1449 goto out_free_sb;
1450#endif
1451 }
1452
1453 /* Filesystem claims it needs repair, so refuse the mount. */
1454 if (xfs_sb_version_needsrepair(&mp->m_sb)) {
1455 xfs_warn(mp, "Filesystem needs repair. Please run xfs_repair.");
1456 error = -EFSCORRUPTED;
1457 goto out_free_sb;
1458 }
1459
1460 /*
1461 * Don't touch the filesystem if a user tool thinks it owns the primary
1462 * superblock. mkfs doesn't clear the flag from secondary supers, so
1463 * we don't check them at all.
1464 */
1465 if (mp->m_sb.sb_inprogress) {
1466 xfs_warn(mp, "Offline file system operation in progress!");
1467 error = -EFSCORRUPTED;
1468 goto out_free_sb;
1469 }
1470
1471 /*
1472 * Until this is fixed only page-sized or smaller data blocks work.
1473 */
1474 if (mp->m_sb.sb_blocksize > PAGE_SIZE) {
1475 xfs_warn(mp,
1476 "File system with blocksize %d bytes. "
1477 "Only pagesize (%ld) or less will currently work.",
1478 mp->m_sb.sb_blocksize, PAGE_SIZE);
1479 error = -ENOSYS;
1480 goto out_free_sb;
1481 }
1482
1483 /* Ensure this filesystem fits in the page cache limits */
1484 if (xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_dblocks) ||
1485 xfs_sb_validate_fsb_count(&mp->m_sb, mp->m_sb.sb_rblocks)) {
1486 xfs_warn(mp,
1487 "file system too large to be mounted on this system.");
1488 error = -EFBIG;
1489 goto out_free_sb;
1490 }
1491
1492 /*
1493 * XFS block mappings use 54 bits to store the logical block offset.
1494 * This should suffice to handle the maximum file size that the VFS
1495 * supports (currently 2^63 bytes on 64-bit and ULONG_MAX << PAGE_SHIFT
1496 * bytes on 32-bit), but as XFS and VFS have gotten the s_maxbytes
1497 * calculation wrong on 32-bit kernels in the past, we'll add a WARN_ON
1498 * to check this assertion.
1499 *
1500 * Avoid integer overflow by comparing the maximum bmbt offset to the
1501 * maximum pagecache offset in units of fs blocks.
1502 */
1503 if (!xfs_verify_fileoff(mp, XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE))) {
1504 xfs_warn(mp,
1505"MAX_LFS_FILESIZE block offset (%llu) exceeds extent map maximum (%llu)!",
1506 XFS_B_TO_FSBT(mp, MAX_LFS_FILESIZE),
1507 XFS_MAX_FILEOFF);
1508 error = -EINVAL;
1509 goto out_free_sb;
1510 }
1511
1512 error = xfs_filestream_mount(mp);
1513 if (error)
1514 goto out_free_sb;
1515
1516 /*
1517 * we must configure the block size in the superblock before we run the
1518 * full mount process as the mount process can lookup and cache inodes.
1519 */
1520 sb->s_magic = XFS_SUPER_MAGIC;
1521 sb->s_blocksize = mp->m_sb.sb_blocksize;
1522 sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
1523 sb->s_maxbytes = MAX_LFS_FILESIZE;
1524 sb->s_max_links = XFS_MAXLINK;
1525 sb->s_time_gran = 1;
1526 if (xfs_sb_version_hasbigtime(&mp->m_sb)) {
1527 sb->s_time_min = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MIN);
1528 sb->s_time_max = xfs_bigtime_to_unix(XFS_BIGTIME_TIME_MAX);
1529 } else {
1530 sb->s_time_min = XFS_LEGACY_TIME_MIN;
1531 sb->s_time_max = XFS_LEGACY_TIME_MAX;
1532 }
1533 trace_xfs_inode_timestamp_range(mp, sb->s_time_min, sb->s_time_max);
1534 sb->s_iflags |= SB_I_CGROUPWB;
1535
1536 set_posix_acl_flag(sb);
1537
1538 /* version 5 superblocks support inode version counters. */
1539 if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5)
1540 sb->s_flags |= SB_I_VERSION;
1541
1542 if (xfs_sb_version_hasbigtime(&mp->m_sb))
1543 xfs_warn(mp,
1544 "EXPERIMENTAL big timestamp feature in use. Use at your own risk!");
1545
1546 if (mp->m_flags & XFS_MOUNT_DAX_ALWAYS) {
1547 bool rtdev_is_dax = false, datadev_is_dax;
1548
1549 xfs_warn(mp,
1550 "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
1551
1552 datadev_is_dax = bdev_dax_supported(mp->m_ddev_targp->bt_bdev,
1553 sb->s_blocksize);
1554 if (mp->m_rtdev_targp)
1555 rtdev_is_dax = bdev_dax_supported(
1556 mp->m_rtdev_targp->bt_bdev, sb->s_blocksize);
1557 if (!rtdev_is_dax && !datadev_is_dax) {
1558 xfs_alert(mp,
1559 "DAX unsupported by block device. Turning off DAX.");
1560 xfs_mount_set_dax_mode(mp, XFS_DAX_NEVER);
1561 }
1562 if (xfs_sb_version_hasreflink(&mp->m_sb)) {
1563 xfs_alert(mp,
1564 "DAX and reflink cannot be used together!");
1565 error = -EINVAL;
1566 goto out_filestream_unmount;
1567 }
1568 }
1569
1570 if (mp->m_flags & XFS_MOUNT_DISCARD) {
1571 struct request_queue *q = bdev_get_queue(sb->s_bdev);
1572
1573 if (!blk_queue_discard(q)) {
1574 xfs_warn(mp, "mounting with \"discard\" option, but "
1575 "the device does not support discard");
1576 mp->m_flags &= ~XFS_MOUNT_DISCARD;
1577 }
1578 }
1579
1580 if (xfs_sb_version_hasreflink(&mp->m_sb)) {
1581 if (mp->m_sb.sb_rblocks) {
1582 xfs_alert(mp,
1583 "reflink not compatible with realtime device!");
1584 error = -EINVAL;
1585 goto out_filestream_unmount;
1586 }
1587
1588 if (xfs_globals.always_cow) {
1589 xfs_info(mp, "using DEBUG-only always_cow mode.");
1590 mp->m_always_cow = true;
1591 }
1592 }
1593
1594 if (xfs_sb_version_hasrmapbt(&mp->m_sb) && mp->m_sb.sb_rblocks) {
1595 xfs_alert(mp,
1596 "reverse mapping btree not compatible with realtime device!");
1597 error = -EINVAL;
1598 goto out_filestream_unmount;
1599 }
1600
1601 if (xfs_sb_version_hasinobtcounts(&mp->m_sb))
1602 xfs_warn(mp,
1603 "EXPERIMENTAL inode btree counters feature in use. Use at your own risk!");
1604
1605 error = xfs_mountfs(mp);
1606 if (error)
1607 goto out_filestream_unmount;
1608
1609 root = igrab(VFS_I(mp->m_rootip));
1610 if (!root) {
1611 error = -ENOENT;
1612 goto out_unmount;
1613 }
1614 sb->s_root = d_make_root(root);
1615 if (!sb->s_root) {
1616 error = -ENOMEM;
1617 goto out_unmount;
1618 }
1619
1620 return 0;
1621
1622 out_filestream_unmount:
1623 xfs_filestream_unmount(mp);
1624 out_free_sb:
1625 xfs_freesb(mp);
1626 out_free_stats:
1627 free_percpu(mp->m_stats.xs_stats);
1628 out_destroy_counters:
1629 xfs_destroy_percpu_counters(mp);
1630 out_destroy_workqueues:
1631 xfs_destroy_mount_workqueues(mp);
1632 out_close_devices:
1633 xfs_close_devices(mp);
1634 out_free_names:
1635 sb->s_fs_info = NULL;
1636 xfs_mount_free(mp);
1637 return error;
1638
1639 out_unmount:
1640 xfs_filestream_unmount(mp);
1641 xfs_unmountfs(mp);
1642 goto out_free_sb;
1643}
1644
1645static int
1646xfs_fs_get_tree(
1647 struct fs_context *fc)
1648{
1649 return get_tree_bdev(fc, xfs_fs_fill_super);
1650}
1651
1652static int
1653xfs_remount_rw(
1654 struct xfs_mount *mp)
1655{
1656 struct xfs_sb *sbp = &mp->m_sb;
1657 int error;
1658
1659 if (mp->m_flags & XFS_MOUNT_NORECOVERY) {
1660 xfs_warn(mp,
1661 "ro->rw transition prohibited on norecovery mount");
1662 return -EINVAL;
1663 }
1664
1665 if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 &&
1666 xfs_sb_has_ro_compat_feature(sbp, XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
1667 xfs_warn(mp,
1668 "ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem",
1669 (sbp->sb_features_ro_compat &
1670 XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
1671 return -EINVAL;
1672 }
1673
1674 mp->m_flags &= ~XFS_MOUNT_RDONLY;
1675
1676 /*
1677 * If this is the first remount to writeable state we might have some
1678 * superblock changes to update.
1679 */
1680 if (mp->m_update_sb) {
1681 error = xfs_sync_sb(mp, false);
1682 if (error) {
1683 xfs_warn(mp, "failed to write sb changes");
1684 return error;
1685 }
1686 mp->m_update_sb = false;
1687 }
1688
1689 /*
1690 * Fill out the reserve pool if it is empty. Use the stashed value if
1691 * it is non-zero, otherwise go with the default.
1692 */
1693 xfs_restore_resvblks(mp);
1694 xfs_log_work_queue(mp);
1695
1696 /* Recover any CoW blocks that never got remapped. */
1697 error = xfs_reflink_recover_cow(mp);
1698 if (error) {
1699 xfs_err(mp,
1700 "Error %d recovering leftover CoW allocations.", error);
1701 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1702 return error;
1703 }
1704 xfs_blockgc_start(mp);
1705
1706 /* Create the per-AG metadata reservation pool .*/
1707 error = xfs_fs_reserve_ag_blocks(mp);
1708 if (error && error != -ENOSPC)
1709 return error;
1710
1711 return 0;
1712}
1713
1714static int
1715xfs_remount_ro(
1716 struct xfs_mount *mp)
1717{
1718 int error;
1719
1720 /*
1721 * Cancel background eofb scanning so it cannot race with the final
1722 * log force+buftarg wait and deadlock the remount.
1723 */
1724 xfs_blockgc_stop(mp);
1725
1726 /* Get rid of any leftover CoW reservations... */
1727 error = xfs_blockgc_free_space(mp, NULL);
1728 if (error) {
1729 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1730 return error;
1731 }
1732
1733 /* Free the per-AG metadata reservation pool. */
1734 error = xfs_fs_unreserve_ag_blocks(mp);
1735 if (error) {
1736 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1737 return error;
1738 }
1739
1740 /*
1741 * Before we sync the metadata, we need to free up the reserve block
1742 * pool so that the used block count in the superblock on disk is
1743 * correct at the end of the remount. Stash the current* reserve pool
1744 * size so that if we get remounted rw, we can return it to the same
1745 * size.
1746 */
1747 xfs_save_resvblks(mp);
1748
1749 xfs_log_clean(mp);
1750 mp->m_flags |= XFS_MOUNT_RDONLY;
1751
1752 return 0;
1753}
1754
1755/*
1756 * Logically we would return an error here to prevent users from believing
1757 * they might have changed mount options using remount which can't be changed.
1758 *
1759 * But unfortunately mount(8) adds all options from mtab and fstab to the mount
1760 * arguments in some cases so we can't blindly reject options, but have to
1761 * check for each specified option if it actually differs from the currently
1762 * set option and only reject it if that's the case.
1763 *
1764 * Until that is implemented we return success for every remount request, and
1765 * silently ignore all options that we can't actually change.
1766 */
1767static int
1768xfs_fs_reconfigure(
1769 struct fs_context *fc)
1770{
1771 struct xfs_mount *mp = XFS_M(fc->root->d_sb);
1772 struct xfs_mount *new_mp = fc->s_fs_info;
1773 xfs_sb_t *sbp = &mp->m_sb;
1774 int flags = fc->sb_flags;
1775 int error;
1776
1777 /* version 5 superblocks always support version counters. */
1778 if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5)
1779 fc->sb_flags |= SB_I_VERSION;
1780
1781 error = xfs_fs_validate_params(new_mp);
1782 if (error)
1783 return error;
1784
1785 sync_filesystem(mp->m_super);
1786
1787 /* inode32 -> inode64 */
1788 if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) &&
1789 !(new_mp->m_flags & XFS_MOUNT_SMALL_INUMS)) {
1790 mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
1791 mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount);
1792 }
1793
1794 /* inode64 -> inode32 */
1795 if (!(mp->m_flags & XFS_MOUNT_SMALL_INUMS) &&
1796 (new_mp->m_flags & XFS_MOUNT_SMALL_INUMS)) {
1797 mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
1798 mp->m_maxagi = xfs_set_inode_alloc(mp, sbp->sb_agcount);
1799 }
1800
1801 /* ro -> rw */
1802 if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(flags & SB_RDONLY)) {
1803 error = xfs_remount_rw(mp);
1804 if (error)
1805 return error;
1806 }
1807
1808 /* rw -> ro */
1809 if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (flags & SB_RDONLY)) {
1810 error = xfs_remount_ro(mp);
1811 if (error)
1812 return error;
1813 }
1814
1815 return 0;
1816}
1817
1818static void xfs_fs_free(
1819 struct fs_context *fc)
1820{
1821 struct xfs_mount *mp = fc->s_fs_info;
1822
1823 /*
1824 * mp is stored in the fs_context when it is initialized.
1825 * mp is transferred to the superblock on a successful mount,
1826 * but if an error occurs before the transfer we have to free
1827 * it here.
1828 */
1829 if (mp)
1830 xfs_mount_free(mp);
1831}
1832
1833static const struct fs_context_operations xfs_context_ops = {
1834 .parse_param = xfs_fs_parse_param,
1835 .get_tree = xfs_fs_get_tree,
1836 .reconfigure = xfs_fs_reconfigure,
1837 .free = xfs_fs_free,
1838};
1839
1840static int xfs_init_fs_context(
1841 struct fs_context *fc)
1842{
1843 struct xfs_mount *mp;
1844
1845 mp = kmem_alloc(sizeof(struct xfs_mount), KM_ZERO);
1846 if (!mp)
1847 return -ENOMEM;
1848
1849 spin_lock_init(&mp->m_sb_lock);
1850 spin_lock_init(&mp->m_agirotor_lock);
1851 INIT_RADIX_TREE(&mp->m_perag_tree, GFP_ATOMIC);
1852 spin_lock_init(&mp->m_perag_lock);
1853 mutex_init(&mp->m_growlock);
1854 INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker);
1855 INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
1856 mp->m_kobj.kobject.kset = xfs_kset;
1857 /*
1858 * We don't create the finobt per-ag space reservation until after log
1859 * recovery, so we must set this to true so that an ifree transaction
1860 * started during log recovery will not depend on space reservations
1861 * for finobt expansion.
1862 */
1863 mp->m_finobt_nores = true;
1864
1865 /*
1866 * These can be overridden by the mount option parsing.
1867 */
1868 mp->m_logbufs = -1;
1869 mp->m_logbsize = -1;
1870 mp->m_allocsize_log = 16; /* 64k */
1871
1872 /*
1873 * Copy binary VFS mount flags we are interested in.
1874 */
1875 if (fc->sb_flags & SB_RDONLY)
1876 mp->m_flags |= XFS_MOUNT_RDONLY;
1877 if (fc->sb_flags & SB_DIRSYNC)
1878 mp->m_flags |= XFS_MOUNT_DIRSYNC;
1879 if (fc->sb_flags & SB_SYNCHRONOUS)
1880 mp->m_flags |= XFS_MOUNT_WSYNC;
1881
1882 fc->s_fs_info = mp;
1883 fc->ops = &xfs_context_ops;
1884
1885 return 0;
1886}
1887
1888static struct file_system_type xfs_fs_type = {
1889 .owner = THIS_MODULE,
1890 .name = "xfs",
1891 .init_fs_context = xfs_init_fs_context,
1892 .parameters = xfs_fs_parameters,
1893 .kill_sb = kill_block_super,
1894 .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
1895};
1896MODULE_ALIAS_FS("xfs");
1897
1898STATIC int __init
1899xfs_init_zones(void)
1900{
1901 xfs_log_ticket_zone = kmem_cache_create("xfs_log_ticket",
1902 sizeof(struct xlog_ticket),
1903 0, 0, NULL);
1904 if (!xfs_log_ticket_zone)
1905 goto out;
1906
1907 xfs_bmap_free_item_zone = kmem_cache_create("xfs_bmap_free_item",
1908 sizeof(struct xfs_extent_free_item),
1909 0, 0, NULL);
1910 if (!xfs_bmap_free_item_zone)
1911 goto out_destroy_log_ticket_zone;
1912
1913 xfs_btree_cur_zone = kmem_cache_create("xfs_btree_cur",
1914 sizeof(struct xfs_btree_cur),
1915 0, 0, NULL);
1916 if (!xfs_btree_cur_zone)
1917 goto out_destroy_bmap_free_item_zone;
1918
1919 xfs_da_state_zone = kmem_cache_create("xfs_da_state",
1920 sizeof(struct xfs_da_state),
1921 0, 0, NULL);
1922 if (!xfs_da_state_zone)
1923 goto out_destroy_btree_cur_zone;
1924
1925 xfs_ifork_zone = kmem_cache_create("xfs_ifork",
1926 sizeof(struct xfs_ifork),
1927 0, 0, NULL);
1928 if (!xfs_ifork_zone)
1929 goto out_destroy_da_state_zone;
1930
1931 xfs_trans_zone = kmem_cache_create("xfs_trans",
1932 sizeof(struct xfs_trans),
1933 0, 0, NULL);
1934 if (!xfs_trans_zone)
1935 goto out_destroy_ifork_zone;
1936
1937
1938 /*
1939 * The size of the zone allocated buf log item is the maximum
1940 * size possible under XFS. This wastes a little bit of memory,
1941 * but it is much faster.
1942 */
1943 xfs_buf_item_zone = kmem_cache_create("xfs_buf_item",
1944 sizeof(struct xfs_buf_log_item),
1945 0, 0, NULL);
1946 if (!xfs_buf_item_zone)
1947 goto out_destroy_trans_zone;
1948
1949 xfs_efd_zone = kmem_cache_create("xfs_efd_item",
1950 (sizeof(struct xfs_efd_log_item) +
1951 (XFS_EFD_MAX_FAST_EXTENTS - 1) *
1952 sizeof(struct xfs_extent)),
1953 0, 0, NULL);
1954 if (!xfs_efd_zone)
1955 goto out_destroy_buf_item_zone;
1956
1957 xfs_efi_zone = kmem_cache_create("xfs_efi_item",
1958 (sizeof(struct xfs_efi_log_item) +
1959 (XFS_EFI_MAX_FAST_EXTENTS - 1) *
1960 sizeof(struct xfs_extent)),
1961 0, 0, NULL);
1962 if (!xfs_efi_zone)
1963 goto out_destroy_efd_zone;
1964
1965 xfs_inode_zone = kmem_cache_create("xfs_inode",
1966 sizeof(struct xfs_inode), 0,
1967 (SLAB_HWCACHE_ALIGN |
1968 SLAB_RECLAIM_ACCOUNT |
1969 SLAB_MEM_SPREAD | SLAB_ACCOUNT),
1970 xfs_fs_inode_init_once);
1971 if (!xfs_inode_zone)
1972 goto out_destroy_efi_zone;
1973
1974 xfs_ili_zone = kmem_cache_create("xfs_ili",
1975 sizeof(struct xfs_inode_log_item), 0,
1976 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
1977 NULL);
1978 if (!xfs_ili_zone)
1979 goto out_destroy_inode_zone;
1980
1981 xfs_icreate_zone = kmem_cache_create("xfs_icr",
1982 sizeof(struct xfs_icreate_item),
1983 0, 0, NULL);
1984 if (!xfs_icreate_zone)
1985 goto out_destroy_ili_zone;
1986
1987 xfs_rud_zone = kmem_cache_create("xfs_rud_item",
1988 sizeof(struct xfs_rud_log_item),
1989 0, 0, NULL);
1990 if (!xfs_rud_zone)
1991 goto out_destroy_icreate_zone;
1992
1993 xfs_rui_zone = kmem_cache_create("xfs_rui_item",
1994 xfs_rui_log_item_sizeof(XFS_RUI_MAX_FAST_EXTENTS),
1995 0, 0, NULL);
1996 if (!xfs_rui_zone)
1997 goto out_destroy_rud_zone;
1998
1999 xfs_cud_zone = kmem_cache_create("xfs_cud_item",
2000 sizeof(struct xfs_cud_log_item),
2001 0, 0, NULL);
2002 if (!xfs_cud_zone)
2003 goto out_destroy_rui_zone;
2004
2005 xfs_cui_zone = kmem_cache_create("xfs_cui_item",
2006 xfs_cui_log_item_sizeof(XFS_CUI_MAX_FAST_EXTENTS),
2007 0, 0, NULL);
2008 if (!xfs_cui_zone)
2009 goto out_destroy_cud_zone;
2010
2011 xfs_bud_zone = kmem_cache_create("xfs_bud_item",
2012 sizeof(struct xfs_bud_log_item),
2013 0, 0, NULL);
2014 if (!xfs_bud_zone)
2015 goto out_destroy_cui_zone;
2016
2017 xfs_bui_zone = kmem_cache_create("xfs_bui_item",
2018 xfs_bui_log_item_sizeof(XFS_BUI_MAX_FAST_EXTENTS),
2019 0, 0, NULL);
2020 if (!xfs_bui_zone)
2021 goto out_destroy_bud_zone;
2022
2023 return 0;
2024
2025 out_destroy_bud_zone:
2026 kmem_cache_destroy(xfs_bud_zone);
2027 out_destroy_cui_zone:
2028 kmem_cache_destroy(xfs_cui_zone);
2029 out_destroy_cud_zone:
2030 kmem_cache_destroy(xfs_cud_zone);
2031 out_destroy_rui_zone:
2032 kmem_cache_destroy(xfs_rui_zone);
2033 out_destroy_rud_zone:
2034 kmem_cache_destroy(xfs_rud_zone);
2035 out_destroy_icreate_zone:
2036 kmem_cache_destroy(xfs_icreate_zone);
2037 out_destroy_ili_zone:
2038 kmem_cache_destroy(xfs_ili_zone);
2039 out_destroy_inode_zone:
2040 kmem_cache_destroy(xfs_inode_zone);
2041 out_destroy_efi_zone:
2042 kmem_cache_destroy(xfs_efi_zone);
2043 out_destroy_efd_zone:
2044 kmem_cache_destroy(xfs_efd_zone);
2045 out_destroy_buf_item_zone:
2046 kmem_cache_destroy(xfs_buf_item_zone);
2047 out_destroy_trans_zone:
2048 kmem_cache_destroy(xfs_trans_zone);
2049 out_destroy_ifork_zone:
2050 kmem_cache_destroy(xfs_ifork_zone);
2051 out_destroy_da_state_zone:
2052 kmem_cache_destroy(xfs_da_state_zone);
2053 out_destroy_btree_cur_zone:
2054 kmem_cache_destroy(xfs_btree_cur_zone);
2055 out_destroy_bmap_free_item_zone:
2056 kmem_cache_destroy(xfs_bmap_free_item_zone);
2057 out_destroy_log_ticket_zone:
2058 kmem_cache_destroy(xfs_log_ticket_zone);
2059 out:
2060 return -ENOMEM;
2061}
2062
2063STATIC void
2064xfs_destroy_zones(void)
2065{
2066 /*
2067 * Make sure all delayed rcu free are flushed before we
2068 * destroy caches.
2069 */
2070 rcu_barrier();
2071 kmem_cache_destroy(xfs_bui_zone);
2072 kmem_cache_destroy(xfs_bud_zone);
2073 kmem_cache_destroy(xfs_cui_zone);
2074 kmem_cache_destroy(xfs_cud_zone);
2075 kmem_cache_destroy(xfs_rui_zone);
2076 kmem_cache_destroy(xfs_rud_zone);
2077 kmem_cache_destroy(xfs_icreate_zone);
2078 kmem_cache_destroy(xfs_ili_zone);
2079 kmem_cache_destroy(xfs_inode_zone);
2080 kmem_cache_destroy(xfs_efi_zone);
2081 kmem_cache_destroy(xfs_efd_zone);
2082 kmem_cache_destroy(xfs_buf_item_zone);
2083 kmem_cache_destroy(xfs_trans_zone);
2084 kmem_cache_destroy(xfs_ifork_zone);
2085 kmem_cache_destroy(xfs_da_state_zone);
2086 kmem_cache_destroy(xfs_btree_cur_zone);
2087 kmem_cache_destroy(xfs_bmap_free_item_zone);
2088 kmem_cache_destroy(xfs_log_ticket_zone);
2089}
2090
2091STATIC int __init
2092xfs_init_workqueues(void)
2093{
2094 /*
2095 * The allocation workqueue can be used in memory reclaim situations
2096 * (writepage path), and parallelism is only limited by the number of
2097 * AGs in all the filesystems mounted. Hence use the default large
2098 * max_active value for this workqueue.
2099 */
2100 xfs_alloc_wq = alloc_workqueue("xfsalloc",
2101 XFS_WQFLAGS(WQ_MEM_RECLAIM | WQ_FREEZABLE), 0);
2102 if (!xfs_alloc_wq)
2103 return -ENOMEM;
2104
2105 xfs_discard_wq = alloc_workqueue("xfsdiscard", XFS_WQFLAGS(WQ_UNBOUND),
2106 0);
2107 if (!xfs_discard_wq)
2108 goto out_free_alloc_wq;
2109
2110 return 0;
2111out_free_alloc_wq:
2112 destroy_workqueue(xfs_alloc_wq);
2113 return -ENOMEM;
2114}
2115
2116STATIC void
2117xfs_destroy_workqueues(void)
2118{
2119 destroy_workqueue(xfs_discard_wq);
2120 destroy_workqueue(xfs_alloc_wq);
2121}
2122
2123STATIC int __init
2124init_xfs_fs(void)
2125{
2126 int error;
2127
2128 xfs_check_ondisk_structs();
2129
2130 printk(KERN_INFO XFS_VERSION_STRING " with "
2131 XFS_BUILD_OPTIONS " enabled\n");
2132
2133 xfs_dir_startup();
2134
2135 error = xfs_init_zones();
2136 if (error)
2137 goto out;
2138
2139 error = xfs_init_workqueues();
2140 if (error)
2141 goto out_destroy_zones;
2142
2143 error = xfs_mru_cache_init();
2144 if (error)
2145 goto out_destroy_wq;
2146
2147 error = xfs_buf_init();
2148 if (error)
2149 goto out_mru_cache_uninit;
2150
2151 error = xfs_init_procfs();
2152 if (error)
2153 goto out_buf_terminate;
2154
2155 error = xfs_sysctl_register();
2156 if (error)
2157 goto out_cleanup_procfs;
2158
2159 xfs_kset = kset_create_and_add("xfs", NULL, fs_kobj);
2160 if (!xfs_kset) {
2161 error = -ENOMEM;
2162 goto out_sysctl_unregister;
2163 }
2164
2165 xfsstats.xs_kobj.kobject.kset = xfs_kset;
2166
2167 xfsstats.xs_stats = alloc_percpu(struct xfsstats);
2168 if (!xfsstats.xs_stats) {
2169 error = -ENOMEM;
2170 goto out_kset_unregister;
2171 }
2172
2173 error = xfs_sysfs_init(&xfsstats.xs_kobj, &xfs_stats_ktype, NULL,
2174 "stats");
2175 if (error)
2176 goto out_free_stats;
2177
2178#ifdef DEBUG
2179 xfs_dbg_kobj.kobject.kset = xfs_kset;
2180 error = xfs_sysfs_init(&xfs_dbg_kobj, &xfs_dbg_ktype, NULL, "debug");
2181 if (error)
2182 goto out_remove_stats_kobj;
2183#endif
2184
2185 error = xfs_qm_init();
2186 if (error)
2187 goto out_remove_dbg_kobj;
2188
2189 error = register_filesystem(&xfs_fs_type);
2190 if (error)
2191 goto out_qm_exit;
2192 return 0;
2193
2194 out_qm_exit:
2195 xfs_qm_exit();
2196 out_remove_dbg_kobj:
2197#ifdef DEBUG
2198 xfs_sysfs_del(&xfs_dbg_kobj);
2199 out_remove_stats_kobj:
2200#endif
2201 xfs_sysfs_del(&xfsstats.xs_kobj);
2202 out_free_stats:
2203 free_percpu(xfsstats.xs_stats);
2204 out_kset_unregister:
2205 kset_unregister(xfs_kset);
2206 out_sysctl_unregister:
2207 xfs_sysctl_unregister();
2208 out_cleanup_procfs:
2209 xfs_cleanup_procfs();
2210 out_buf_terminate:
2211 xfs_buf_terminate();
2212 out_mru_cache_uninit:
2213 xfs_mru_cache_uninit();
2214 out_destroy_wq:
2215 xfs_destroy_workqueues();
2216 out_destroy_zones:
2217 xfs_destroy_zones();
2218 out:
2219 return error;
2220}
2221
2222STATIC void __exit
2223exit_xfs_fs(void)
2224{
2225 xfs_qm_exit();
2226 unregister_filesystem(&xfs_fs_type);
2227#ifdef DEBUG
2228 xfs_sysfs_del(&xfs_dbg_kobj);
2229#endif
2230 xfs_sysfs_del(&xfsstats.xs_kobj);
2231 free_percpu(xfsstats.xs_stats);
2232 kset_unregister(xfs_kset);
2233 xfs_sysctl_unregister();
2234 xfs_cleanup_procfs();
2235 xfs_buf_terminate();
2236 xfs_mru_cache_uninit();
2237 xfs_destroy_workqueues();
2238 xfs_destroy_zones();
2239 xfs_uuid_table_free();
2240}
2241
2242module_init(init_xfs_fs);
2243module_exit(exit_xfs_fs);
2244
2245MODULE_AUTHOR("Silicon Graphics, Inc.");
2246MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
2247MODULE_LICENSE("GPL");