Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2017-2018 HUAWEI, Inc.
4 * https://www.huawei.com/
5 * Copyright (C) 2021, Alibaba Cloud
6 */
7#include <linux/statfs.h>
8#include <linux/seq_file.h>
9#include <linux/crc32c.h>
10#include <linux/fs_context.h>
11#include <linux/fs_parser.h>
12#include <linux/exportfs.h>
13#include <linux/backing-dev.h>
14#include "xattr.h"
15
16#define CREATE_TRACE_POINTS
17#include <trace/events/erofs.h>
18
19static struct kmem_cache *erofs_inode_cachep __read_mostly;
20
21void _erofs_printk(struct super_block *sb, const char *fmt, ...)
22{
23 struct va_format vaf;
24 va_list args;
25 int level;
26
27 va_start(args, fmt);
28
29 level = printk_get_level(fmt);
30 vaf.fmt = printk_skip_level(fmt);
31 vaf.va = &args;
32 if (sb)
33 printk("%c%cerofs (device %s): %pV",
34 KERN_SOH_ASCII, level, sb->s_id, &vaf);
35 else
36 printk("%c%cerofs: %pV", KERN_SOH_ASCII, level, &vaf);
37 va_end(args);
38}
39
40static int erofs_superblock_csum_verify(struct super_block *sb, void *sbdata)
41{
42 size_t len = 1 << EROFS_SB(sb)->blkszbits;
43 struct erofs_super_block *dsb;
44 u32 expected_crc, crc;
45
46 if (len > EROFS_SUPER_OFFSET)
47 len -= EROFS_SUPER_OFFSET;
48
49 dsb = kmemdup(sbdata + EROFS_SUPER_OFFSET, len, GFP_KERNEL);
50 if (!dsb)
51 return -ENOMEM;
52
53 expected_crc = le32_to_cpu(dsb->checksum);
54 dsb->checksum = 0;
55 /* to allow for x86 boot sectors and other oddities. */
56 crc = crc32c(~0, dsb, len);
57 kfree(dsb);
58
59 if (crc != expected_crc) {
60 erofs_err(sb, "invalid checksum 0x%08x, 0x%08x expected",
61 crc, expected_crc);
62 return -EBADMSG;
63 }
64 return 0;
65}
66
67static void erofs_inode_init_once(void *ptr)
68{
69 struct erofs_inode *vi = ptr;
70
71 inode_init_once(&vi->vfs_inode);
72}
73
74static struct inode *erofs_alloc_inode(struct super_block *sb)
75{
76 struct erofs_inode *vi =
77 alloc_inode_sb(sb, erofs_inode_cachep, GFP_KERNEL);
78
79 if (!vi)
80 return NULL;
81
82 /* zero out everything except vfs_inode */
83 memset(vi, 0, offsetof(struct erofs_inode, vfs_inode));
84 return &vi->vfs_inode;
85}
86
87static void erofs_free_inode(struct inode *inode)
88{
89 struct erofs_inode *vi = EROFS_I(inode);
90
91 if (inode->i_op == &erofs_fast_symlink_iops)
92 kfree(inode->i_link);
93 kfree(vi->xattr_shared_xattrs);
94 kmem_cache_free(erofs_inode_cachep, vi);
95}
96
97/* read variable-sized metadata, offset will be aligned by 4-byte */
98void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf,
99 erofs_off_t *offset, int *lengthp)
100{
101 u8 *buffer, *ptr;
102 int len, i, cnt;
103
104 *offset = round_up(*offset, 4);
105 ptr = erofs_bread(buf, *offset, EROFS_KMAP);
106 if (IS_ERR(ptr))
107 return ptr;
108
109 len = le16_to_cpu(*(__le16 *)ptr);
110 if (!len)
111 len = U16_MAX + 1;
112 buffer = kmalloc(len, GFP_KERNEL);
113 if (!buffer)
114 return ERR_PTR(-ENOMEM);
115 *offset += sizeof(__le16);
116 *lengthp = len;
117
118 for (i = 0; i < len; i += cnt) {
119 cnt = min_t(int, sb->s_blocksize - erofs_blkoff(sb, *offset),
120 len - i);
121 ptr = erofs_bread(buf, *offset, EROFS_KMAP);
122 if (IS_ERR(ptr)) {
123 kfree(buffer);
124 return ptr;
125 }
126 memcpy(buffer + i, ptr, cnt);
127 *offset += cnt;
128 }
129 return buffer;
130}
131
132#ifndef CONFIG_EROFS_FS_ZIP
133static int z_erofs_parse_cfgs(struct super_block *sb,
134 struct erofs_super_block *dsb)
135{
136 if (!dsb->u1.available_compr_algs)
137 return 0;
138
139 erofs_err(sb, "compression disabled, unable to mount compressed EROFS");
140 return -EOPNOTSUPP;
141}
142#endif
143
144static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb,
145 struct erofs_device_info *dif, erofs_off_t *pos)
146{
147 struct erofs_sb_info *sbi = EROFS_SB(sb);
148 struct erofs_fscache *fscache;
149 struct erofs_deviceslot *dis;
150 struct file *file;
151
152 dis = erofs_read_metabuf(buf, sb, *pos, EROFS_KMAP);
153 if (IS_ERR(dis))
154 return PTR_ERR(dis);
155
156 if (!sbi->devs->flatdev && !dif->path) {
157 if (!dis->tag[0]) {
158 erofs_err(sb, "empty device tag @ pos %llu", *pos);
159 return -EINVAL;
160 }
161 dif->path = kmemdup_nul(dis->tag, sizeof(dis->tag), GFP_KERNEL);
162 if (!dif->path)
163 return -ENOMEM;
164 }
165
166 if (erofs_is_fscache_mode(sb)) {
167 fscache = erofs_fscache_register_cookie(sb, dif->path, 0);
168 if (IS_ERR(fscache))
169 return PTR_ERR(fscache);
170 dif->fscache = fscache;
171 } else if (!sbi->devs->flatdev) {
172 file = erofs_is_fileio_mode(sbi) ?
173 filp_open(dif->path, O_RDONLY | O_LARGEFILE, 0) :
174 bdev_file_open_by_path(dif->path,
175 BLK_OPEN_READ, sb->s_type, NULL);
176 if (IS_ERR(file))
177 return PTR_ERR(file);
178
179 if (!erofs_is_fileio_mode(sbi)) {
180 dif->dax_dev = fs_dax_get_by_bdev(file_bdev(file),
181 &dif->dax_part_off, NULL, NULL);
182 } else if (!S_ISREG(file_inode(file)->i_mode)) {
183 fput(file);
184 return -EINVAL;
185 }
186 dif->file = file;
187 }
188
189 dif->blocks = le32_to_cpu(dis->blocks);
190 dif->mapped_blkaddr = le32_to_cpu(dis->mapped_blkaddr);
191 sbi->total_blocks += dif->blocks;
192 *pos += EROFS_DEVT_SLOT_SIZE;
193 return 0;
194}
195
196static int erofs_scan_devices(struct super_block *sb,
197 struct erofs_super_block *dsb)
198{
199 struct erofs_sb_info *sbi = EROFS_SB(sb);
200 unsigned int ondisk_extradevs;
201 erofs_off_t pos;
202 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
203 struct erofs_device_info *dif;
204 int id, err = 0;
205
206 sbi->total_blocks = sbi->dif0.blocks;
207 if (!erofs_sb_has_device_table(sbi))
208 ondisk_extradevs = 0;
209 else
210 ondisk_extradevs = le16_to_cpu(dsb->extra_devices);
211
212 if (sbi->devs->extra_devices &&
213 ondisk_extradevs != sbi->devs->extra_devices) {
214 erofs_err(sb, "extra devices don't match (ondisk %u, given %u)",
215 ondisk_extradevs, sbi->devs->extra_devices);
216 return -EINVAL;
217 }
218 if (!ondisk_extradevs)
219 return 0;
220
221 if (!sbi->devs->extra_devices && !erofs_is_fscache_mode(sb))
222 sbi->devs->flatdev = true;
223
224 sbi->device_id_mask = roundup_pow_of_two(ondisk_extradevs + 1) - 1;
225 pos = le16_to_cpu(dsb->devt_slotoff) * EROFS_DEVT_SLOT_SIZE;
226 down_read(&sbi->devs->rwsem);
227 if (sbi->devs->extra_devices) {
228 idr_for_each_entry(&sbi->devs->tree, dif, id) {
229 err = erofs_init_device(&buf, sb, dif, &pos);
230 if (err)
231 break;
232 }
233 } else {
234 for (id = 0; id < ondisk_extradevs; id++) {
235 dif = kzalloc(sizeof(*dif), GFP_KERNEL);
236 if (!dif) {
237 err = -ENOMEM;
238 break;
239 }
240
241 err = idr_alloc(&sbi->devs->tree, dif, 0, 0, GFP_KERNEL);
242 if (err < 0) {
243 kfree(dif);
244 break;
245 }
246 ++sbi->devs->extra_devices;
247
248 err = erofs_init_device(&buf, sb, dif, &pos);
249 if (err)
250 break;
251 }
252 }
253 up_read(&sbi->devs->rwsem);
254 erofs_put_metabuf(&buf);
255 return err;
256}
257
258static int erofs_read_superblock(struct super_block *sb)
259{
260 struct erofs_sb_info *sbi = EROFS_SB(sb);
261 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
262 struct erofs_super_block *dsb;
263 void *data;
264 int ret;
265
266 data = erofs_read_metabuf(&buf, sb, 0, EROFS_KMAP);
267 if (IS_ERR(data)) {
268 erofs_err(sb, "cannot read erofs superblock");
269 return PTR_ERR(data);
270 }
271
272 dsb = (struct erofs_super_block *)(data + EROFS_SUPER_OFFSET);
273 ret = -EINVAL;
274 if (le32_to_cpu(dsb->magic) != EROFS_SUPER_MAGIC_V1) {
275 erofs_err(sb, "cannot find valid erofs superblock");
276 goto out;
277 }
278
279 sbi->blkszbits = dsb->blkszbits;
280 if (sbi->blkszbits < 9 || sbi->blkszbits > PAGE_SHIFT) {
281 erofs_err(sb, "blkszbits %u isn't supported", sbi->blkszbits);
282 goto out;
283 }
284 if (dsb->dirblkbits) {
285 erofs_err(sb, "dirblkbits %u isn't supported", dsb->dirblkbits);
286 goto out;
287 }
288
289 sbi->feature_compat = le32_to_cpu(dsb->feature_compat);
290 if (erofs_sb_has_sb_chksum(sbi)) {
291 ret = erofs_superblock_csum_verify(sb, data);
292 if (ret)
293 goto out;
294 }
295
296 ret = -EINVAL;
297 sbi->feature_incompat = le32_to_cpu(dsb->feature_incompat);
298 if (sbi->feature_incompat & ~EROFS_ALL_FEATURE_INCOMPAT) {
299 erofs_err(sb, "unidentified incompatible feature %x, please upgrade kernel",
300 sbi->feature_incompat & ~EROFS_ALL_FEATURE_INCOMPAT);
301 goto out;
302 }
303
304 sbi->sb_size = 128 + dsb->sb_extslots * EROFS_SB_EXTSLOT_SIZE;
305 if (sbi->sb_size > PAGE_SIZE - EROFS_SUPER_OFFSET) {
306 erofs_err(sb, "invalid sb_extslots %u (more than a fs block)",
307 sbi->sb_size);
308 goto out;
309 }
310 sbi->dif0.blocks = le32_to_cpu(dsb->blocks);
311 sbi->meta_blkaddr = le32_to_cpu(dsb->meta_blkaddr);
312#ifdef CONFIG_EROFS_FS_XATTR
313 sbi->xattr_blkaddr = le32_to_cpu(dsb->xattr_blkaddr);
314 sbi->xattr_prefix_start = le32_to_cpu(dsb->xattr_prefix_start);
315 sbi->xattr_prefix_count = dsb->xattr_prefix_count;
316 sbi->xattr_filter_reserved = dsb->xattr_filter_reserved;
317#endif
318 sbi->islotbits = ilog2(sizeof(struct erofs_inode_compact));
319 sbi->root_nid = le16_to_cpu(dsb->root_nid);
320 sbi->packed_nid = le64_to_cpu(dsb->packed_nid);
321 sbi->inos = le64_to_cpu(dsb->inos);
322
323 sbi->build_time = le64_to_cpu(dsb->build_time);
324 sbi->build_time_nsec = le32_to_cpu(dsb->build_time_nsec);
325
326 super_set_uuid(sb, (void *)dsb->uuid, sizeof(dsb->uuid));
327
328 ret = strscpy(sbi->volume_name, dsb->volume_name,
329 sizeof(dsb->volume_name));
330 if (ret < 0) { /* -E2BIG */
331 erofs_err(sb, "bad volume name without NIL terminator");
332 ret = -EFSCORRUPTED;
333 goto out;
334 }
335
336 /* parse on-disk compression configurations */
337 ret = z_erofs_parse_cfgs(sb, dsb);
338 if (ret < 0)
339 goto out;
340
341 /* handle multiple devices */
342 ret = erofs_scan_devices(sb, dsb);
343
344 if (erofs_is_fscache_mode(sb))
345 erofs_info(sb, "[deprecated] fscache-based on-demand read feature in use. Use at your own risk!");
346out:
347 erofs_put_metabuf(&buf);
348 return ret;
349}
350
351static void erofs_default_options(struct erofs_sb_info *sbi)
352{
353#ifdef CONFIG_EROFS_FS_ZIP
354 sbi->opt.cache_strategy = EROFS_ZIP_CACHE_READAROUND;
355 sbi->opt.max_sync_decompress_pages = 3;
356 sbi->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_AUTO;
357#endif
358#ifdef CONFIG_EROFS_FS_XATTR
359 set_opt(&sbi->opt, XATTR_USER);
360#endif
361#ifdef CONFIG_EROFS_FS_POSIX_ACL
362 set_opt(&sbi->opt, POSIX_ACL);
363#endif
364}
365
366enum {
367 Opt_user_xattr, Opt_acl, Opt_cache_strategy, Opt_dax, Opt_dax_enum,
368 Opt_device, Opt_fsid, Opt_domain_id, Opt_directio,
369 Opt_err
370};
371
372static const struct constant_table erofs_param_cache_strategy[] = {
373 {"disabled", EROFS_ZIP_CACHE_DISABLED},
374 {"readahead", EROFS_ZIP_CACHE_READAHEAD},
375 {"readaround", EROFS_ZIP_CACHE_READAROUND},
376 {}
377};
378
379static const struct constant_table erofs_dax_param_enums[] = {
380 {"always", EROFS_MOUNT_DAX_ALWAYS},
381 {"never", EROFS_MOUNT_DAX_NEVER},
382 {}
383};
384
385static const struct fs_parameter_spec erofs_fs_parameters[] = {
386 fsparam_flag_no("user_xattr", Opt_user_xattr),
387 fsparam_flag_no("acl", Opt_acl),
388 fsparam_enum("cache_strategy", Opt_cache_strategy,
389 erofs_param_cache_strategy),
390 fsparam_flag("dax", Opt_dax),
391 fsparam_enum("dax", Opt_dax_enum, erofs_dax_param_enums),
392 fsparam_string("device", Opt_device),
393 fsparam_string("fsid", Opt_fsid),
394 fsparam_string("domain_id", Opt_domain_id),
395 fsparam_flag_no("directio", Opt_directio),
396 {}
397};
398
399static bool erofs_fc_set_dax_mode(struct fs_context *fc, unsigned int mode)
400{
401#ifdef CONFIG_FS_DAX
402 struct erofs_sb_info *sbi = fc->s_fs_info;
403
404 switch (mode) {
405 case EROFS_MOUNT_DAX_ALWAYS:
406 set_opt(&sbi->opt, DAX_ALWAYS);
407 clear_opt(&sbi->opt, DAX_NEVER);
408 return true;
409 case EROFS_MOUNT_DAX_NEVER:
410 set_opt(&sbi->opt, DAX_NEVER);
411 clear_opt(&sbi->opt, DAX_ALWAYS);
412 return true;
413 default:
414 DBG_BUGON(1);
415 return false;
416 }
417#else
418 errorfc(fc, "dax options not supported");
419 return false;
420#endif
421}
422
423static int erofs_fc_parse_param(struct fs_context *fc,
424 struct fs_parameter *param)
425{
426 struct erofs_sb_info *sbi = fc->s_fs_info;
427 struct fs_parse_result result;
428 struct erofs_device_info *dif;
429 int opt, ret;
430
431 opt = fs_parse(fc, erofs_fs_parameters, param, &result);
432 if (opt < 0)
433 return opt;
434
435 switch (opt) {
436 case Opt_user_xattr:
437#ifdef CONFIG_EROFS_FS_XATTR
438 if (result.boolean)
439 set_opt(&sbi->opt, XATTR_USER);
440 else
441 clear_opt(&sbi->opt, XATTR_USER);
442#else
443 errorfc(fc, "{,no}user_xattr options not supported");
444#endif
445 break;
446 case Opt_acl:
447#ifdef CONFIG_EROFS_FS_POSIX_ACL
448 if (result.boolean)
449 set_opt(&sbi->opt, POSIX_ACL);
450 else
451 clear_opt(&sbi->opt, POSIX_ACL);
452#else
453 errorfc(fc, "{,no}acl options not supported");
454#endif
455 break;
456 case Opt_cache_strategy:
457#ifdef CONFIG_EROFS_FS_ZIP
458 sbi->opt.cache_strategy = result.uint_32;
459#else
460 errorfc(fc, "compression not supported, cache_strategy ignored");
461#endif
462 break;
463 case Opt_dax:
464 if (!erofs_fc_set_dax_mode(fc, EROFS_MOUNT_DAX_ALWAYS))
465 return -EINVAL;
466 break;
467 case Opt_dax_enum:
468 if (!erofs_fc_set_dax_mode(fc, result.uint_32))
469 return -EINVAL;
470 break;
471 case Opt_device:
472 dif = kzalloc(sizeof(*dif), GFP_KERNEL);
473 if (!dif)
474 return -ENOMEM;
475 dif->path = kstrdup(param->string, GFP_KERNEL);
476 if (!dif->path) {
477 kfree(dif);
478 return -ENOMEM;
479 }
480 down_write(&sbi->devs->rwsem);
481 ret = idr_alloc(&sbi->devs->tree, dif, 0, 0, GFP_KERNEL);
482 up_write(&sbi->devs->rwsem);
483 if (ret < 0) {
484 kfree(dif->path);
485 kfree(dif);
486 return ret;
487 }
488 ++sbi->devs->extra_devices;
489 break;
490#ifdef CONFIG_EROFS_FS_ONDEMAND
491 case Opt_fsid:
492 kfree(sbi->fsid);
493 sbi->fsid = kstrdup(param->string, GFP_KERNEL);
494 if (!sbi->fsid)
495 return -ENOMEM;
496 break;
497 case Opt_domain_id:
498 kfree(sbi->domain_id);
499 sbi->domain_id = kstrdup(param->string, GFP_KERNEL);
500 if (!sbi->domain_id)
501 return -ENOMEM;
502 break;
503#else
504 case Opt_fsid:
505 case Opt_domain_id:
506 errorfc(fc, "%s option not supported", erofs_fs_parameters[opt].name);
507 break;
508#endif
509 case Opt_directio:
510#ifdef CONFIG_EROFS_FS_BACKED_BY_FILE
511 if (result.boolean)
512 set_opt(&sbi->opt, DIRECT_IO);
513 else
514 clear_opt(&sbi->opt, DIRECT_IO);
515#else
516 errorfc(fc, "%s option not supported", erofs_fs_parameters[opt].name);
517#endif
518 break;
519 default:
520 return -ENOPARAM;
521 }
522 return 0;
523}
524
525static struct inode *erofs_nfs_get_inode(struct super_block *sb,
526 u64 ino, u32 generation)
527{
528 return erofs_iget(sb, ino);
529}
530
531static struct dentry *erofs_fh_to_dentry(struct super_block *sb,
532 struct fid *fid, int fh_len, int fh_type)
533{
534 return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
535 erofs_nfs_get_inode);
536}
537
538static struct dentry *erofs_fh_to_parent(struct super_block *sb,
539 struct fid *fid, int fh_len, int fh_type)
540{
541 return generic_fh_to_parent(sb, fid, fh_len, fh_type,
542 erofs_nfs_get_inode);
543}
544
545static struct dentry *erofs_get_parent(struct dentry *child)
546{
547 erofs_nid_t nid;
548 unsigned int d_type;
549 int err;
550
551 err = erofs_namei(d_inode(child), &dotdot_name, &nid, &d_type);
552 if (err)
553 return ERR_PTR(err);
554 return d_obtain_alias(erofs_iget(child->d_sb, nid));
555}
556
557static const struct export_operations erofs_export_ops = {
558 .encode_fh = generic_encode_ino32_fh,
559 .fh_to_dentry = erofs_fh_to_dentry,
560 .fh_to_parent = erofs_fh_to_parent,
561 .get_parent = erofs_get_parent,
562};
563
564static void erofs_set_sysfs_name(struct super_block *sb)
565{
566 struct erofs_sb_info *sbi = EROFS_SB(sb);
567
568 if (sbi->domain_id)
569 super_set_sysfs_name_generic(sb, "%s,%s", sbi->domain_id,
570 sbi->fsid);
571 else if (sbi->fsid)
572 super_set_sysfs_name_generic(sb, "%s", sbi->fsid);
573 else if (erofs_is_fileio_mode(sbi))
574 super_set_sysfs_name_generic(sb, "%s",
575 bdi_dev_name(sb->s_bdi));
576 else
577 super_set_sysfs_name_id(sb);
578}
579
580static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
581{
582 struct inode *inode;
583 struct erofs_sb_info *sbi = EROFS_SB(sb);
584 int err;
585
586 sb->s_magic = EROFS_SUPER_MAGIC;
587 sb->s_flags |= SB_RDONLY | SB_NOATIME;
588 sb->s_maxbytes = MAX_LFS_FILESIZE;
589 sb->s_op = &erofs_sops;
590
591 sbi->blkszbits = PAGE_SHIFT;
592 if (!sb->s_bdev) {
593 sb->s_blocksize = PAGE_SIZE;
594 sb->s_blocksize_bits = PAGE_SHIFT;
595
596 if (erofs_is_fscache_mode(sb)) {
597 err = erofs_fscache_register_fs(sb);
598 if (err)
599 return err;
600 }
601 err = super_setup_bdi(sb);
602 if (err)
603 return err;
604 } else {
605 if (!sb_set_blocksize(sb, PAGE_SIZE)) {
606 errorfc(fc, "failed to set initial blksize");
607 return -EINVAL;
608 }
609
610 sbi->dif0.dax_dev = fs_dax_get_by_bdev(sb->s_bdev,
611 &sbi->dif0.dax_part_off, NULL, NULL);
612 }
613
614 err = erofs_read_superblock(sb);
615 if (err)
616 return err;
617
618 if (sb->s_blocksize_bits != sbi->blkszbits) {
619 if (erofs_is_fscache_mode(sb)) {
620 errorfc(fc, "unsupported blksize for fscache mode");
621 return -EINVAL;
622 }
623
624 if (erofs_is_fileio_mode(sbi)) {
625 sb->s_blocksize = 1 << sbi->blkszbits;
626 sb->s_blocksize_bits = sbi->blkszbits;
627 } else if (!sb_set_blocksize(sb, 1 << sbi->blkszbits)) {
628 errorfc(fc, "failed to set erofs blksize");
629 return -EINVAL;
630 }
631 }
632
633 if (test_opt(&sbi->opt, DAX_ALWAYS)) {
634 if (!sbi->dif0.dax_dev) {
635 errorfc(fc, "DAX unsupported by block device. Turning off DAX.");
636 clear_opt(&sbi->opt, DAX_ALWAYS);
637 } else if (sbi->blkszbits != PAGE_SHIFT) {
638 errorfc(fc, "unsupported blocksize for DAX");
639 clear_opt(&sbi->opt, DAX_ALWAYS);
640 }
641 }
642
643 sb->s_time_gran = 1;
644 sb->s_xattr = erofs_xattr_handlers;
645 sb->s_export_op = &erofs_export_ops;
646
647 if (test_opt(&sbi->opt, POSIX_ACL))
648 sb->s_flags |= SB_POSIXACL;
649 else
650 sb->s_flags &= ~SB_POSIXACL;
651
652#ifdef CONFIG_EROFS_FS_ZIP
653 xa_init(&sbi->managed_pslots);
654#endif
655
656 inode = erofs_iget(sb, sbi->root_nid);
657 if (IS_ERR(inode))
658 return PTR_ERR(inode);
659
660 if (!S_ISDIR(inode->i_mode)) {
661 erofs_err(sb, "rootino(nid %llu) is not a directory(i_mode %o)",
662 sbi->root_nid, inode->i_mode);
663 iput(inode);
664 return -EINVAL;
665 }
666
667 sb->s_root = d_make_root(inode);
668 if (!sb->s_root)
669 return -ENOMEM;
670
671 erofs_shrinker_register(sb);
672 if (erofs_sb_has_fragments(sbi) && sbi->packed_nid) {
673 sbi->packed_inode = erofs_iget(sb, sbi->packed_nid);
674 if (IS_ERR(sbi->packed_inode)) {
675 err = PTR_ERR(sbi->packed_inode);
676 sbi->packed_inode = NULL;
677 return err;
678 }
679 }
680 err = erofs_init_managed_cache(sb);
681 if (err)
682 return err;
683
684 err = erofs_xattr_prefixes_init(sb);
685 if (err)
686 return err;
687
688 erofs_set_sysfs_name(sb);
689 err = erofs_register_sysfs(sb);
690 if (err)
691 return err;
692
693 erofs_info(sb, "mounted with root inode @ nid %llu.", sbi->root_nid);
694 return 0;
695}
696
697static int erofs_fc_get_tree(struct fs_context *fc)
698{
699 struct erofs_sb_info *sbi = fc->s_fs_info;
700 int ret;
701
702 if (IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && sbi->fsid)
703 return get_tree_nodev(fc, erofs_fc_fill_super);
704
705 ret = get_tree_bdev_flags(fc, erofs_fc_fill_super,
706 IS_ENABLED(CONFIG_EROFS_FS_BACKED_BY_FILE) ?
707 GET_TREE_BDEV_QUIET_LOOKUP : 0);
708#ifdef CONFIG_EROFS_FS_BACKED_BY_FILE
709 if (ret == -ENOTBLK) {
710 struct file *file;
711
712 if (!fc->source)
713 return invalf(fc, "No source specified");
714 file = filp_open(fc->source, O_RDONLY | O_LARGEFILE, 0);
715 if (IS_ERR(file))
716 return PTR_ERR(file);
717 sbi->dif0.file = file;
718
719 if (S_ISREG(file_inode(sbi->dif0.file)->i_mode) &&
720 sbi->dif0.file->f_mapping->a_ops->read_folio)
721 return get_tree_nodev(fc, erofs_fc_fill_super);
722 }
723#endif
724 return ret;
725}
726
727static int erofs_fc_reconfigure(struct fs_context *fc)
728{
729 struct super_block *sb = fc->root->d_sb;
730 struct erofs_sb_info *sbi = EROFS_SB(sb);
731 struct erofs_sb_info *new_sbi = fc->s_fs_info;
732
733 DBG_BUGON(!sb_rdonly(sb));
734
735 if (new_sbi->fsid || new_sbi->domain_id)
736 erofs_info(sb, "ignoring reconfiguration for fsid|domain_id.");
737
738 if (test_opt(&new_sbi->opt, POSIX_ACL))
739 fc->sb_flags |= SB_POSIXACL;
740 else
741 fc->sb_flags &= ~SB_POSIXACL;
742
743 sbi->opt = new_sbi->opt;
744
745 fc->sb_flags |= SB_RDONLY;
746 return 0;
747}
748
749static int erofs_release_device_info(int id, void *ptr, void *data)
750{
751 struct erofs_device_info *dif = ptr;
752
753 fs_put_dax(dif->dax_dev, NULL);
754 if (dif->file)
755 fput(dif->file);
756 erofs_fscache_unregister_cookie(dif->fscache);
757 dif->fscache = NULL;
758 kfree(dif->path);
759 kfree(dif);
760 return 0;
761}
762
763static void erofs_free_dev_context(struct erofs_dev_context *devs)
764{
765 if (!devs)
766 return;
767 idr_for_each(&devs->tree, &erofs_release_device_info, NULL);
768 idr_destroy(&devs->tree);
769 kfree(devs);
770}
771
772static void erofs_sb_free(struct erofs_sb_info *sbi)
773{
774 erofs_free_dev_context(sbi->devs);
775 kfree(sbi->fsid);
776 kfree(sbi->domain_id);
777 if (sbi->dif0.file)
778 fput(sbi->dif0.file);
779 kfree(sbi);
780}
781
782static void erofs_fc_free(struct fs_context *fc)
783{
784 struct erofs_sb_info *sbi = fc->s_fs_info;
785
786 if (sbi) /* free here if an error occurs before transferring to sb */
787 erofs_sb_free(sbi);
788}
789
790static const struct fs_context_operations erofs_context_ops = {
791 .parse_param = erofs_fc_parse_param,
792 .get_tree = erofs_fc_get_tree,
793 .reconfigure = erofs_fc_reconfigure,
794 .free = erofs_fc_free,
795};
796
797static int erofs_init_fs_context(struct fs_context *fc)
798{
799 struct erofs_sb_info *sbi;
800
801 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
802 if (!sbi)
803 return -ENOMEM;
804
805 sbi->devs = kzalloc(sizeof(struct erofs_dev_context), GFP_KERNEL);
806 if (!sbi->devs) {
807 kfree(sbi);
808 return -ENOMEM;
809 }
810 fc->s_fs_info = sbi;
811
812 idr_init(&sbi->devs->tree);
813 init_rwsem(&sbi->devs->rwsem);
814 erofs_default_options(sbi);
815 fc->ops = &erofs_context_ops;
816 return 0;
817}
818
819static void erofs_kill_sb(struct super_block *sb)
820{
821 struct erofs_sb_info *sbi = EROFS_SB(sb);
822
823 if ((IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && sbi->fsid) ||
824 sbi->dif0.file)
825 kill_anon_super(sb);
826 else
827 kill_block_super(sb);
828 fs_put_dax(sbi->dif0.dax_dev, NULL);
829 erofs_fscache_unregister_fs(sb);
830 erofs_sb_free(sbi);
831 sb->s_fs_info = NULL;
832}
833
834static void erofs_put_super(struct super_block *sb)
835{
836 struct erofs_sb_info *const sbi = EROFS_SB(sb);
837
838 DBG_BUGON(!sbi);
839
840 erofs_unregister_sysfs(sb);
841 erofs_shrinker_unregister(sb);
842 erofs_xattr_prefixes_cleanup(sb);
843#ifdef CONFIG_EROFS_FS_ZIP
844 iput(sbi->managed_cache);
845 sbi->managed_cache = NULL;
846#endif
847 iput(sbi->packed_inode);
848 sbi->packed_inode = NULL;
849 erofs_free_dev_context(sbi->devs);
850 sbi->devs = NULL;
851 erofs_fscache_unregister_fs(sb);
852}
853
854static struct file_system_type erofs_fs_type = {
855 .owner = THIS_MODULE,
856 .name = "erofs",
857 .init_fs_context = erofs_init_fs_context,
858 .kill_sb = erofs_kill_sb,
859 .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
860};
861MODULE_ALIAS_FS("erofs");
862
863static int __init erofs_module_init(void)
864{
865 int err;
866
867 erofs_check_ondisk_layout_definitions();
868
869 erofs_inode_cachep = kmem_cache_create("erofs_inode",
870 sizeof(struct erofs_inode), 0,
871 SLAB_RECLAIM_ACCOUNT | SLAB_ACCOUNT,
872 erofs_inode_init_once);
873 if (!erofs_inode_cachep)
874 return -ENOMEM;
875
876 err = erofs_init_shrinker();
877 if (err)
878 goto shrinker_err;
879
880 err = z_erofs_init_subsystem();
881 if (err)
882 goto zip_err;
883
884 err = erofs_init_sysfs();
885 if (err)
886 goto sysfs_err;
887
888 err = register_filesystem(&erofs_fs_type);
889 if (err)
890 goto fs_err;
891
892 return 0;
893
894fs_err:
895 erofs_exit_sysfs();
896sysfs_err:
897 z_erofs_exit_subsystem();
898zip_err:
899 erofs_exit_shrinker();
900shrinker_err:
901 kmem_cache_destroy(erofs_inode_cachep);
902 return err;
903}
904
905static void __exit erofs_module_exit(void)
906{
907 unregister_filesystem(&erofs_fs_type);
908
909 /* Ensure all RCU free inodes / pclusters are safe to be destroyed. */
910 rcu_barrier();
911
912 erofs_exit_sysfs();
913 z_erofs_exit_subsystem();
914 erofs_exit_shrinker();
915 kmem_cache_destroy(erofs_inode_cachep);
916}
917
918static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf)
919{
920 struct super_block *sb = dentry->d_sb;
921 struct erofs_sb_info *sbi = EROFS_SB(sb);
922
923 buf->f_type = sb->s_magic;
924 buf->f_bsize = sb->s_blocksize;
925 buf->f_blocks = sbi->total_blocks;
926 buf->f_bfree = buf->f_bavail = 0;
927 buf->f_files = ULLONG_MAX;
928 buf->f_ffree = ULLONG_MAX - sbi->inos;
929 buf->f_namelen = EROFS_NAME_LEN;
930
931 if (uuid_is_null(&sb->s_uuid))
932 buf->f_fsid = u64_to_fsid(!sb->s_bdev ? 0 :
933 huge_encode_dev(sb->s_bdev->bd_dev));
934 else
935 buf->f_fsid = uuid_to_fsid(sb->s_uuid.b);
936 return 0;
937}
938
939static int erofs_show_options(struct seq_file *seq, struct dentry *root)
940{
941 struct erofs_sb_info *sbi = EROFS_SB(root->d_sb);
942 struct erofs_mount_opts *opt = &sbi->opt;
943
944 if (IS_ENABLED(CONFIG_EROFS_FS_XATTR))
945 seq_puts(seq, test_opt(opt, XATTR_USER) ?
946 ",user_xattr" : ",nouser_xattr");
947 if (IS_ENABLED(CONFIG_EROFS_FS_POSIX_ACL))
948 seq_puts(seq, test_opt(opt, POSIX_ACL) ? ",acl" : ",noacl");
949 if (IS_ENABLED(CONFIG_EROFS_FS_ZIP))
950 seq_printf(seq, ",cache_strategy=%s",
951 erofs_param_cache_strategy[opt->cache_strategy].name);
952 if (test_opt(opt, DAX_ALWAYS))
953 seq_puts(seq, ",dax=always");
954 if (test_opt(opt, DAX_NEVER))
955 seq_puts(seq, ",dax=never");
956 if (erofs_is_fileio_mode(sbi) && test_opt(opt, DIRECT_IO))
957 seq_puts(seq, ",directio");
958#ifdef CONFIG_EROFS_FS_ONDEMAND
959 if (sbi->fsid)
960 seq_printf(seq, ",fsid=%s", sbi->fsid);
961 if (sbi->domain_id)
962 seq_printf(seq, ",domain_id=%s", sbi->domain_id);
963#endif
964 return 0;
965}
966
967const struct super_operations erofs_sops = {
968 .put_super = erofs_put_super,
969 .alloc_inode = erofs_alloc_inode,
970 .free_inode = erofs_free_inode,
971 .statfs = erofs_statfs,
972 .show_options = erofs_show_options,
973};
974
975module_init(erofs_module_init);
976module_exit(erofs_module_exit);
977
978MODULE_DESCRIPTION("Enhanced ROM File System");
979MODULE_AUTHOR("Gao Xiang, Chao Yu, Miao Xie, CONSUMER BG, HUAWEI Inc.");
980MODULE_LICENSE("GPL");
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2017-2018 HUAWEI, Inc.
4 * https://www.huawei.com/
5 * Copyright (C) 2021, Alibaba Cloud
6 */
7#include <linux/module.h>
8#include <linux/buffer_head.h>
9#include <linux/statfs.h>
10#include <linux/parser.h>
11#include <linux/seq_file.h>
12#include <linux/crc32c.h>
13#include <linux/fs_context.h>
14#include <linux/fs_parser.h>
15#include <linux/dax.h>
16#include <linux/exportfs.h>
17#include "xattr.h"
18
19#define CREATE_TRACE_POINTS
20#include <trace/events/erofs.h>
21
22static struct kmem_cache *erofs_inode_cachep __read_mostly;
23
24void _erofs_err(struct super_block *sb, const char *function,
25 const char *fmt, ...)
26{
27 struct va_format vaf;
28 va_list args;
29
30 va_start(args, fmt);
31
32 vaf.fmt = fmt;
33 vaf.va = &args;
34
35 pr_err("(device %s): %s: %pV", sb->s_id, function, &vaf);
36 va_end(args);
37}
38
39void _erofs_info(struct super_block *sb, const char *function,
40 const char *fmt, ...)
41{
42 struct va_format vaf;
43 va_list args;
44
45 va_start(args, fmt);
46
47 vaf.fmt = fmt;
48 vaf.va = &args;
49
50 pr_info("(device %s): %pV", sb->s_id, &vaf);
51 va_end(args);
52}
53
54static int erofs_superblock_csum_verify(struct super_block *sb, void *sbdata)
55{
56 struct erofs_super_block *dsb;
57 u32 expected_crc, crc;
58
59 dsb = kmemdup(sbdata + EROFS_SUPER_OFFSET,
60 EROFS_BLKSIZ - EROFS_SUPER_OFFSET, GFP_KERNEL);
61 if (!dsb)
62 return -ENOMEM;
63
64 expected_crc = le32_to_cpu(dsb->checksum);
65 dsb->checksum = 0;
66 /* to allow for x86 boot sectors and other oddities. */
67 crc = crc32c(~0, dsb, EROFS_BLKSIZ - EROFS_SUPER_OFFSET);
68 kfree(dsb);
69
70 if (crc != expected_crc) {
71 erofs_err(sb, "invalid checksum 0x%08x, 0x%08x expected",
72 crc, expected_crc);
73 return -EBADMSG;
74 }
75 return 0;
76}
77
78static void erofs_inode_init_once(void *ptr)
79{
80 struct erofs_inode *vi = ptr;
81
82 inode_init_once(&vi->vfs_inode);
83}
84
85static struct inode *erofs_alloc_inode(struct super_block *sb)
86{
87 struct erofs_inode *vi =
88 alloc_inode_sb(sb, erofs_inode_cachep, GFP_KERNEL);
89
90 if (!vi)
91 return NULL;
92
93 /* zero out everything except vfs_inode */
94 memset(vi, 0, offsetof(struct erofs_inode, vfs_inode));
95 return &vi->vfs_inode;
96}
97
98static void erofs_free_inode(struct inode *inode)
99{
100 struct erofs_inode *vi = EROFS_I(inode);
101
102 /* be careful of RCU symlink path */
103 if (inode->i_op == &erofs_fast_symlink_iops)
104 kfree(inode->i_link);
105 kfree(vi->xattr_shared_xattrs);
106
107 kmem_cache_free(erofs_inode_cachep, vi);
108}
109
110static bool check_layout_compatibility(struct super_block *sb,
111 struct erofs_super_block *dsb)
112{
113 const unsigned int feature = le32_to_cpu(dsb->feature_incompat);
114
115 EROFS_SB(sb)->feature_incompat = feature;
116
117 /* check if current kernel meets all mandatory requirements */
118 if (feature & (~EROFS_ALL_FEATURE_INCOMPAT)) {
119 erofs_err(sb,
120 "unidentified incompatible feature %x, please upgrade kernel version",
121 feature & ~EROFS_ALL_FEATURE_INCOMPAT);
122 return false;
123 }
124 return true;
125}
126
127#ifdef CONFIG_EROFS_FS_ZIP
128/* read variable-sized metadata, offset will be aligned by 4-byte */
129static void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf,
130 erofs_off_t *offset, int *lengthp)
131{
132 u8 *buffer, *ptr;
133 int len, i, cnt;
134
135 *offset = round_up(*offset, 4);
136 ptr = erofs_read_metabuf(buf, sb, erofs_blknr(*offset), EROFS_KMAP);
137 if (IS_ERR(ptr))
138 return ptr;
139
140 len = le16_to_cpu(*(__le16 *)&ptr[erofs_blkoff(*offset)]);
141 if (!len)
142 len = U16_MAX + 1;
143 buffer = kmalloc(len, GFP_KERNEL);
144 if (!buffer)
145 return ERR_PTR(-ENOMEM);
146 *offset += sizeof(__le16);
147 *lengthp = len;
148
149 for (i = 0; i < len; i += cnt) {
150 cnt = min(EROFS_BLKSIZ - (int)erofs_blkoff(*offset), len - i);
151 ptr = erofs_read_metabuf(buf, sb, erofs_blknr(*offset),
152 EROFS_KMAP);
153 if (IS_ERR(ptr)) {
154 kfree(buffer);
155 return ptr;
156 }
157 memcpy(buffer + i, ptr + erofs_blkoff(*offset), cnt);
158 *offset += cnt;
159 }
160 return buffer;
161}
162
163static int erofs_load_compr_cfgs(struct super_block *sb,
164 struct erofs_super_block *dsb)
165{
166 struct erofs_sb_info *sbi = EROFS_SB(sb);
167 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
168 unsigned int algs, alg;
169 erofs_off_t offset;
170 int size, ret = 0;
171
172 sbi->available_compr_algs = le16_to_cpu(dsb->u1.available_compr_algs);
173 if (sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS) {
174 erofs_err(sb, "try to load compressed fs with unsupported algorithms %x",
175 sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS);
176 return -EINVAL;
177 }
178
179 offset = EROFS_SUPER_OFFSET + sbi->sb_size;
180 alg = 0;
181 for (algs = sbi->available_compr_algs; algs; algs >>= 1, ++alg) {
182 void *data;
183
184 if (!(algs & 1))
185 continue;
186
187 data = erofs_read_metadata(sb, &buf, &offset, &size);
188 if (IS_ERR(data)) {
189 ret = PTR_ERR(data);
190 break;
191 }
192
193 switch (alg) {
194 case Z_EROFS_COMPRESSION_LZ4:
195 ret = z_erofs_load_lz4_config(sb, dsb, data, size);
196 break;
197 case Z_EROFS_COMPRESSION_LZMA:
198 ret = z_erofs_load_lzma_config(sb, dsb, data, size);
199 break;
200 default:
201 DBG_BUGON(1);
202 ret = -EFAULT;
203 }
204 kfree(data);
205 if (ret)
206 break;
207 }
208 erofs_put_metabuf(&buf);
209 return ret;
210}
211#else
212static int erofs_load_compr_cfgs(struct super_block *sb,
213 struct erofs_super_block *dsb)
214{
215 if (dsb->u1.available_compr_algs) {
216 erofs_err(sb, "try to load compressed fs when compression is disabled");
217 return -EINVAL;
218 }
219 return 0;
220}
221#endif
222
223static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb,
224 struct erofs_device_info *dif, erofs_off_t *pos)
225{
226 struct erofs_sb_info *sbi = EROFS_SB(sb);
227 struct erofs_fscache *fscache;
228 struct erofs_deviceslot *dis;
229 struct block_device *bdev;
230 void *ptr;
231
232 ptr = erofs_read_metabuf(buf, sb, erofs_blknr(*pos), EROFS_KMAP);
233 if (IS_ERR(ptr))
234 return PTR_ERR(ptr);
235 dis = ptr + erofs_blkoff(*pos);
236
237 if (!dif->path) {
238 if (!dis->tag[0]) {
239 erofs_err(sb, "empty device tag @ pos %llu", *pos);
240 return -EINVAL;
241 }
242 dif->path = kmemdup_nul(dis->tag, sizeof(dis->tag), GFP_KERNEL);
243 if (!dif->path)
244 return -ENOMEM;
245 }
246
247 if (erofs_is_fscache_mode(sb)) {
248 fscache = erofs_fscache_register_cookie(sb, dif->path, 0);
249 if (IS_ERR(fscache))
250 return PTR_ERR(fscache);
251 dif->fscache = fscache;
252 } else {
253 bdev = blkdev_get_by_path(dif->path, FMODE_READ | FMODE_EXCL,
254 sb->s_type);
255 if (IS_ERR(bdev))
256 return PTR_ERR(bdev);
257 dif->bdev = bdev;
258 dif->dax_dev = fs_dax_get_by_bdev(bdev, &dif->dax_part_off,
259 NULL, NULL);
260 }
261
262 dif->blocks = le32_to_cpu(dis->blocks);
263 dif->mapped_blkaddr = le32_to_cpu(dis->mapped_blkaddr);
264 sbi->total_blocks += dif->blocks;
265 *pos += EROFS_DEVT_SLOT_SIZE;
266 return 0;
267}
268
269static int erofs_scan_devices(struct super_block *sb,
270 struct erofs_super_block *dsb)
271{
272 struct erofs_sb_info *sbi = EROFS_SB(sb);
273 unsigned int ondisk_extradevs;
274 erofs_off_t pos;
275 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
276 struct erofs_device_info *dif;
277 int id, err = 0;
278
279 sbi->total_blocks = sbi->primarydevice_blocks;
280 if (!erofs_sb_has_device_table(sbi))
281 ondisk_extradevs = 0;
282 else
283 ondisk_extradevs = le16_to_cpu(dsb->extra_devices);
284
285 if (sbi->devs->extra_devices &&
286 ondisk_extradevs != sbi->devs->extra_devices) {
287 erofs_err(sb, "extra devices don't match (ondisk %u, given %u)",
288 ondisk_extradevs, sbi->devs->extra_devices);
289 return -EINVAL;
290 }
291 if (!ondisk_extradevs)
292 return 0;
293
294 sbi->device_id_mask = roundup_pow_of_two(ondisk_extradevs + 1) - 1;
295 pos = le16_to_cpu(dsb->devt_slotoff) * EROFS_DEVT_SLOT_SIZE;
296 down_read(&sbi->devs->rwsem);
297 if (sbi->devs->extra_devices) {
298 idr_for_each_entry(&sbi->devs->tree, dif, id) {
299 err = erofs_init_device(&buf, sb, dif, &pos);
300 if (err)
301 break;
302 }
303 } else {
304 for (id = 0; id < ondisk_extradevs; id++) {
305 dif = kzalloc(sizeof(*dif), GFP_KERNEL);
306 if (!dif) {
307 err = -ENOMEM;
308 break;
309 }
310
311 err = idr_alloc(&sbi->devs->tree, dif, 0, 0, GFP_KERNEL);
312 if (err < 0) {
313 kfree(dif);
314 break;
315 }
316 ++sbi->devs->extra_devices;
317
318 err = erofs_init_device(&buf, sb, dif, &pos);
319 if (err)
320 break;
321 }
322 }
323 up_read(&sbi->devs->rwsem);
324 erofs_put_metabuf(&buf);
325 return err;
326}
327
328static int erofs_read_superblock(struct super_block *sb)
329{
330 struct erofs_sb_info *sbi;
331 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
332 struct erofs_super_block *dsb;
333 unsigned int blkszbits;
334 void *data;
335 int ret;
336
337 data = erofs_read_metabuf(&buf, sb, 0, EROFS_KMAP);
338 if (IS_ERR(data)) {
339 erofs_err(sb, "cannot read erofs superblock");
340 return PTR_ERR(data);
341 }
342
343 sbi = EROFS_SB(sb);
344 dsb = (struct erofs_super_block *)(data + EROFS_SUPER_OFFSET);
345
346 ret = -EINVAL;
347 if (le32_to_cpu(dsb->magic) != EROFS_SUPER_MAGIC_V1) {
348 erofs_err(sb, "cannot find valid erofs superblock");
349 goto out;
350 }
351
352 sbi->feature_compat = le32_to_cpu(dsb->feature_compat);
353 if (erofs_sb_has_sb_chksum(sbi)) {
354 ret = erofs_superblock_csum_verify(sb, data);
355 if (ret)
356 goto out;
357 }
358
359 ret = -EINVAL;
360 blkszbits = dsb->blkszbits;
361 /* 9(512 bytes) + LOG_SECTORS_PER_BLOCK == LOG_BLOCK_SIZE */
362 if (blkszbits != LOG_BLOCK_SIZE) {
363 erofs_err(sb, "blkszbits %u isn't supported on this platform",
364 blkszbits);
365 goto out;
366 }
367
368 if (!check_layout_compatibility(sb, dsb))
369 goto out;
370
371 sbi->sb_size = 128 + dsb->sb_extslots * EROFS_SB_EXTSLOT_SIZE;
372 if (sbi->sb_size > EROFS_BLKSIZ) {
373 erofs_err(sb, "invalid sb_extslots %u (more than a fs block)",
374 sbi->sb_size);
375 goto out;
376 }
377 sbi->primarydevice_blocks = le32_to_cpu(dsb->blocks);
378 sbi->meta_blkaddr = le32_to_cpu(dsb->meta_blkaddr);
379#ifdef CONFIG_EROFS_FS_XATTR
380 sbi->xattr_blkaddr = le32_to_cpu(dsb->xattr_blkaddr);
381#endif
382 sbi->islotbits = ilog2(sizeof(struct erofs_inode_compact));
383 sbi->root_nid = le16_to_cpu(dsb->root_nid);
384#ifdef CONFIG_EROFS_FS_ZIP
385 sbi->packed_inode = NULL;
386 if (erofs_sb_has_fragments(sbi) && dsb->packed_nid) {
387 sbi->packed_inode =
388 erofs_iget(sb, le64_to_cpu(dsb->packed_nid));
389 if (IS_ERR(sbi->packed_inode)) {
390 ret = PTR_ERR(sbi->packed_inode);
391 goto out;
392 }
393 }
394#endif
395 sbi->inos = le64_to_cpu(dsb->inos);
396
397 sbi->build_time = le64_to_cpu(dsb->build_time);
398 sbi->build_time_nsec = le32_to_cpu(dsb->build_time_nsec);
399
400 memcpy(&sb->s_uuid, dsb->uuid, sizeof(dsb->uuid));
401
402 ret = strscpy(sbi->volume_name, dsb->volume_name,
403 sizeof(dsb->volume_name));
404 if (ret < 0) { /* -E2BIG */
405 erofs_err(sb, "bad volume name without NIL terminator");
406 ret = -EFSCORRUPTED;
407 goto out;
408 }
409
410 /* parse on-disk compression configurations */
411 if (erofs_sb_has_compr_cfgs(sbi))
412 ret = erofs_load_compr_cfgs(sb, dsb);
413 else
414 ret = z_erofs_load_lz4_config(sb, dsb, NULL, 0);
415 if (ret < 0)
416 goto out;
417
418 /* handle multiple devices */
419 ret = erofs_scan_devices(sb, dsb);
420
421 if (erofs_sb_has_ztailpacking(sbi))
422 erofs_info(sb, "EXPERIMENTAL compressed inline data feature in use. Use at your own risk!");
423 if (erofs_is_fscache_mode(sb))
424 erofs_info(sb, "EXPERIMENTAL fscache-based on-demand read feature in use. Use at your own risk!");
425 if (erofs_sb_has_fragments(sbi))
426 erofs_info(sb, "EXPERIMENTAL compressed fragments feature in use. Use at your own risk!");
427 if (erofs_sb_has_dedupe(sbi))
428 erofs_info(sb, "EXPERIMENTAL global deduplication feature in use. Use at your own risk!");
429out:
430 erofs_put_metabuf(&buf);
431 return ret;
432}
433
434/* set up default EROFS parameters */
435static void erofs_default_options(struct erofs_fs_context *ctx)
436{
437#ifdef CONFIG_EROFS_FS_ZIP
438 ctx->opt.cache_strategy = EROFS_ZIP_CACHE_READAROUND;
439 ctx->opt.max_sync_decompress_pages = 3;
440 ctx->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_AUTO;
441#endif
442#ifdef CONFIG_EROFS_FS_XATTR
443 set_opt(&ctx->opt, XATTR_USER);
444#endif
445#ifdef CONFIG_EROFS_FS_POSIX_ACL
446 set_opt(&ctx->opt, POSIX_ACL);
447#endif
448}
449
450enum {
451 Opt_user_xattr,
452 Opt_acl,
453 Opt_cache_strategy,
454 Opt_dax,
455 Opt_dax_enum,
456 Opt_device,
457 Opt_fsid,
458 Opt_domain_id,
459 Opt_err
460};
461
462static const struct constant_table erofs_param_cache_strategy[] = {
463 {"disabled", EROFS_ZIP_CACHE_DISABLED},
464 {"readahead", EROFS_ZIP_CACHE_READAHEAD},
465 {"readaround", EROFS_ZIP_CACHE_READAROUND},
466 {}
467};
468
469static const struct constant_table erofs_dax_param_enums[] = {
470 {"always", EROFS_MOUNT_DAX_ALWAYS},
471 {"never", EROFS_MOUNT_DAX_NEVER},
472 {}
473};
474
475static const struct fs_parameter_spec erofs_fs_parameters[] = {
476 fsparam_flag_no("user_xattr", Opt_user_xattr),
477 fsparam_flag_no("acl", Opt_acl),
478 fsparam_enum("cache_strategy", Opt_cache_strategy,
479 erofs_param_cache_strategy),
480 fsparam_flag("dax", Opt_dax),
481 fsparam_enum("dax", Opt_dax_enum, erofs_dax_param_enums),
482 fsparam_string("device", Opt_device),
483 fsparam_string("fsid", Opt_fsid),
484 fsparam_string("domain_id", Opt_domain_id),
485 {}
486};
487
488static bool erofs_fc_set_dax_mode(struct fs_context *fc, unsigned int mode)
489{
490#ifdef CONFIG_FS_DAX
491 struct erofs_fs_context *ctx = fc->fs_private;
492
493 switch (mode) {
494 case EROFS_MOUNT_DAX_ALWAYS:
495 warnfc(fc, "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
496 set_opt(&ctx->opt, DAX_ALWAYS);
497 clear_opt(&ctx->opt, DAX_NEVER);
498 return true;
499 case EROFS_MOUNT_DAX_NEVER:
500 set_opt(&ctx->opt, DAX_NEVER);
501 clear_opt(&ctx->opt, DAX_ALWAYS);
502 return true;
503 default:
504 DBG_BUGON(1);
505 return false;
506 }
507#else
508 errorfc(fc, "dax options not supported");
509 return false;
510#endif
511}
512
513static int erofs_fc_parse_param(struct fs_context *fc,
514 struct fs_parameter *param)
515{
516 struct erofs_fs_context *ctx = fc->fs_private;
517 struct fs_parse_result result;
518 struct erofs_device_info *dif;
519 int opt, ret;
520
521 opt = fs_parse(fc, erofs_fs_parameters, param, &result);
522 if (opt < 0)
523 return opt;
524
525 switch (opt) {
526 case Opt_user_xattr:
527#ifdef CONFIG_EROFS_FS_XATTR
528 if (result.boolean)
529 set_opt(&ctx->opt, XATTR_USER);
530 else
531 clear_opt(&ctx->opt, XATTR_USER);
532#else
533 errorfc(fc, "{,no}user_xattr options not supported");
534#endif
535 break;
536 case Opt_acl:
537#ifdef CONFIG_EROFS_FS_POSIX_ACL
538 if (result.boolean)
539 set_opt(&ctx->opt, POSIX_ACL);
540 else
541 clear_opt(&ctx->opt, POSIX_ACL);
542#else
543 errorfc(fc, "{,no}acl options not supported");
544#endif
545 break;
546 case Opt_cache_strategy:
547#ifdef CONFIG_EROFS_FS_ZIP
548 ctx->opt.cache_strategy = result.uint_32;
549#else
550 errorfc(fc, "compression not supported, cache_strategy ignored");
551#endif
552 break;
553 case Opt_dax:
554 if (!erofs_fc_set_dax_mode(fc, EROFS_MOUNT_DAX_ALWAYS))
555 return -EINVAL;
556 break;
557 case Opt_dax_enum:
558 if (!erofs_fc_set_dax_mode(fc, result.uint_32))
559 return -EINVAL;
560 break;
561 case Opt_device:
562 dif = kzalloc(sizeof(*dif), GFP_KERNEL);
563 if (!dif)
564 return -ENOMEM;
565 dif->path = kstrdup(param->string, GFP_KERNEL);
566 if (!dif->path) {
567 kfree(dif);
568 return -ENOMEM;
569 }
570 down_write(&ctx->devs->rwsem);
571 ret = idr_alloc(&ctx->devs->tree, dif, 0, 0, GFP_KERNEL);
572 up_write(&ctx->devs->rwsem);
573 if (ret < 0) {
574 kfree(dif->path);
575 kfree(dif);
576 return ret;
577 }
578 ++ctx->devs->extra_devices;
579 break;
580#ifdef CONFIG_EROFS_FS_ONDEMAND
581 case Opt_fsid:
582 kfree(ctx->fsid);
583 ctx->fsid = kstrdup(param->string, GFP_KERNEL);
584 if (!ctx->fsid)
585 return -ENOMEM;
586 break;
587 case Opt_domain_id:
588 kfree(ctx->domain_id);
589 ctx->domain_id = kstrdup(param->string, GFP_KERNEL);
590 if (!ctx->domain_id)
591 return -ENOMEM;
592 break;
593#else
594 case Opt_fsid:
595 case Opt_domain_id:
596 errorfc(fc, "%s option not supported", erofs_fs_parameters[opt].name);
597 break;
598#endif
599 default:
600 return -ENOPARAM;
601 }
602 return 0;
603}
604
605#ifdef CONFIG_EROFS_FS_ZIP
606static const struct address_space_operations managed_cache_aops;
607
608static bool erofs_managed_cache_release_folio(struct folio *folio, gfp_t gfp)
609{
610 bool ret = true;
611 struct address_space *const mapping = folio->mapping;
612
613 DBG_BUGON(!folio_test_locked(folio));
614 DBG_BUGON(mapping->a_ops != &managed_cache_aops);
615
616 if (folio_test_private(folio))
617 ret = erofs_try_to_free_cached_page(&folio->page);
618
619 return ret;
620}
621
622/*
623 * It will be called only on inode eviction. In case that there are still some
624 * decompression requests in progress, wait with rescheduling for a bit here.
625 * We could introduce an extra locking instead but it seems unnecessary.
626 */
627static void erofs_managed_cache_invalidate_folio(struct folio *folio,
628 size_t offset, size_t length)
629{
630 const size_t stop = length + offset;
631
632 DBG_BUGON(!folio_test_locked(folio));
633
634 /* Check for potential overflow in debug mode */
635 DBG_BUGON(stop > folio_size(folio) || stop < length);
636
637 if (offset == 0 && stop == folio_size(folio))
638 while (!erofs_managed_cache_release_folio(folio, GFP_NOFS))
639 cond_resched();
640}
641
642static const struct address_space_operations managed_cache_aops = {
643 .release_folio = erofs_managed_cache_release_folio,
644 .invalidate_folio = erofs_managed_cache_invalidate_folio,
645};
646
647static int erofs_init_managed_cache(struct super_block *sb)
648{
649 struct erofs_sb_info *const sbi = EROFS_SB(sb);
650 struct inode *const inode = new_inode(sb);
651
652 if (!inode)
653 return -ENOMEM;
654
655 set_nlink(inode, 1);
656 inode->i_size = OFFSET_MAX;
657
658 inode->i_mapping->a_ops = &managed_cache_aops;
659 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
660 sbi->managed_cache = inode;
661 return 0;
662}
663#else
664static int erofs_init_managed_cache(struct super_block *sb) { return 0; }
665#endif
666
667static struct inode *erofs_nfs_get_inode(struct super_block *sb,
668 u64 ino, u32 generation)
669{
670 return erofs_iget(sb, ino);
671}
672
673static struct dentry *erofs_fh_to_dentry(struct super_block *sb,
674 struct fid *fid, int fh_len, int fh_type)
675{
676 return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
677 erofs_nfs_get_inode);
678}
679
680static struct dentry *erofs_fh_to_parent(struct super_block *sb,
681 struct fid *fid, int fh_len, int fh_type)
682{
683 return generic_fh_to_parent(sb, fid, fh_len, fh_type,
684 erofs_nfs_get_inode);
685}
686
687static struct dentry *erofs_get_parent(struct dentry *child)
688{
689 erofs_nid_t nid;
690 unsigned int d_type;
691 int err;
692
693 err = erofs_namei(d_inode(child), &dotdot_name, &nid, &d_type);
694 if (err)
695 return ERR_PTR(err);
696 return d_obtain_alias(erofs_iget(child->d_sb, nid));
697}
698
699static const struct export_operations erofs_export_ops = {
700 .fh_to_dentry = erofs_fh_to_dentry,
701 .fh_to_parent = erofs_fh_to_parent,
702 .get_parent = erofs_get_parent,
703};
704
705static int erofs_fc_fill_pseudo_super(struct super_block *sb, struct fs_context *fc)
706{
707 static const struct tree_descr empty_descr = {""};
708
709 return simple_fill_super(sb, EROFS_SUPER_MAGIC, &empty_descr);
710}
711
712static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
713{
714 struct inode *inode;
715 struct erofs_sb_info *sbi;
716 struct erofs_fs_context *ctx = fc->fs_private;
717 int err;
718
719 sb->s_magic = EROFS_SUPER_MAGIC;
720 sb->s_flags |= SB_RDONLY | SB_NOATIME;
721 sb->s_maxbytes = MAX_LFS_FILESIZE;
722 sb->s_op = &erofs_sops;
723
724 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
725 if (!sbi)
726 return -ENOMEM;
727
728 sb->s_fs_info = sbi;
729 sbi->opt = ctx->opt;
730 sbi->devs = ctx->devs;
731 ctx->devs = NULL;
732 sbi->fsid = ctx->fsid;
733 ctx->fsid = NULL;
734 sbi->domain_id = ctx->domain_id;
735 ctx->domain_id = NULL;
736
737 if (erofs_is_fscache_mode(sb)) {
738 sb->s_blocksize = EROFS_BLKSIZ;
739 sb->s_blocksize_bits = LOG_BLOCK_SIZE;
740
741 err = erofs_fscache_register_fs(sb);
742 if (err)
743 return err;
744
745 err = super_setup_bdi(sb);
746 if (err)
747 return err;
748 } else {
749 if (!sb_set_blocksize(sb, EROFS_BLKSIZ)) {
750 erofs_err(sb, "failed to set erofs blksize");
751 return -EINVAL;
752 }
753
754 sbi->dax_dev = fs_dax_get_by_bdev(sb->s_bdev,
755 &sbi->dax_part_off,
756 NULL, NULL);
757 }
758
759 err = erofs_read_superblock(sb);
760 if (err)
761 return err;
762
763 if (test_opt(&sbi->opt, DAX_ALWAYS)) {
764 BUILD_BUG_ON(EROFS_BLKSIZ != PAGE_SIZE);
765
766 if (!sbi->dax_dev) {
767 errorfc(fc, "DAX unsupported by block device. Turning off DAX.");
768 clear_opt(&sbi->opt, DAX_ALWAYS);
769 }
770 }
771
772 sb->s_time_gran = 1;
773 sb->s_xattr = erofs_xattr_handlers;
774 sb->s_export_op = &erofs_export_ops;
775
776 if (test_opt(&sbi->opt, POSIX_ACL))
777 sb->s_flags |= SB_POSIXACL;
778 else
779 sb->s_flags &= ~SB_POSIXACL;
780
781#ifdef CONFIG_EROFS_FS_ZIP
782 xa_init(&sbi->managed_pslots);
783#endif
784
785 /* get the root inode */
786 inode = erofs_iget(sb, ROOT_NID(sbi));
787 if (IS_ERR(inode))
788 return PTR_ERR(inode);
789
790 if (!S_ISDIR(inode->i_mode)) {
791 erofs_err(sb, "rootino(nid %llu) is not a directory(i_mode %o)",
792 ROOT_NID(sbi), inode->i_mode);
793 iput(inode);
794 return -EINVAL;
795 }
796
797 sb->s_root = d_make_root(inode);
798 if (!sb->s_root)
799 return -ENOMEM;
800
801 erofs_shrinker_register(sb);
802 /* sb->s_umount is already locked, SB_ACTIVE and SB_BORN are not set */
803 err = erofs_init_managed_cache(sb);
804 if (err)
805 return err;
806
807 err = erofs_register_sysfs(sb);
808 if (err)
809 return err;
810
811 erofs_info(sb, "mounted with root inode @ nid %llu.", ROOT_NID(sbi));
812 return 0;
813}
814
815static int erofs_fc_anon_get_tree(struct fs_context *fc)
816{
817 return get_tree_nodev(fc, erofs_fc_fill_pseudo_super);
818}
819
820static int erofs_fc_get_tree(struct fs_context *fc)
821{
822 struct erofs_fs_context *ctx = fc->fs_private;
823
824 if (IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && ctx->fsid)
825 return get_tree_nodev(fc, erofs_fc_fill_super);
826
827 return get_tree_bdev(fc, erofs_fc_fill_super);
828}
829
830static int erofs_fc_reconfigure(struct fs_context *fc)
831{
832 struct super_block *sb = fc->root->d_sb;
833 struct erofs_sb_info *sbi = EROFS_SB(sb);
834 struct erofs_fs_context *ctx = fc->fs_private;
835
836 DBG_BUGON(!sb_rdonly(sb));
837
838 if (ctx->fsid || ctx->domain_id)
839 erofs_info(sb, "ignoring reconfiguration for fsid|domain_id.");
840
841 if (test_opt(&ctx->opt, POSIX_ACL))
842 fc->sb_flags |= SB_POSIXACL;
843 else
844 fc->sb_flags &= ~SB_POSIXACL;
845
846 sbi->opt = ctx->opt;
847
848 fc->sb_flags |= SB_RDONLY;
849 return 0;
850}
851
852static int erofs_release_device_info(int id, void *ptr, void *data)
853{
854 struct erofs_device_info *dif = ptr;
855
856 fs_put_dax(dif->dax_dev, NULL);
857 if (dif->bdev)
858 blkdev_put(dif->bdev, FMODE_READ | FMODE_EXCL);
859 erofs_fscache_unregister_cookie(dif->fscache);
860 dif->fscache = NULL;
861 kfree(dif->path);
862 kfree(dif);
863 return 0;
864}
865
866static void erofs_free_dev_context(struct erofs_dev_context *devs)
867{
868 if (!devs)
869 return;
870 idr_for_each(&devs->tree, &erofs_release_device_info, NULL);
871 idr_destroy(&devs->tree);
872 kfree(devs);
873}
874
875static void erofs_fc_free(struct fs_context *fc)
876{
877 struct erofs_fs_context *ctx = fc->fs_private;
878
879 erofs_free_dev_context(ctx->devs);
880 kfree(ctx->fsid);
881 kfree(ctx->domain_id);
882 kfree(ctx);
883}
884
885static const struct fs_context_operations erofs_context_ops = {
886 .parse_param = erofs_fc_parse_param,
887 .get_tree = erofs_fc_get_tree,
888 .reconfigure = erofs_fc_reconfigure,
889 .free = erofs_fc_free,
890};
891
892static const struct fs_context_operations erofs_anon_context_ops = {
893 .get_tree = erofs_fc_anon_get_tree,
894};
895
896static int erofs_init_fs_context(struct fs_context *fc)
897{
898 struct erofs_fs_context *ctx;
899
900 /* pseudo mount for anon inodes */
901 if (fc->sb_flags & SB_KERNMOUNT) {
902 fc->ops = &erofs_anon_context_ops;
903 return 0;
904 }
905
906 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
907 if (!ctx)
908 return -ENOMEM;
909 ctx->devs = kzalloc(sizeof(struct erofs_dev_context), GFP_KERNEL);
910 if (!ctx->devs) {
911 kfree(ctx);
912 return -ENOMEM;
913 }
914 fc->fs_private = ctx;
915
916 idr_init(&ctx->devs->tree);
917 init_rwsem(&ctx->devs->rwsem);
918 erofs_default_options(ctx);
919 fc->ops = &erofs_context_ops;
920 return 0;
921}
922
923/*
924 * could be triggered after deactivate_locked_super()
925 * is called, thus including umount and failed to initialize.
926 */
927static void erofs_kill_sb(struct super_block *sb)
928{
929 struct erofs_sb_info *sbi;
930
931 WARN_ON(sb->s_magic != EROFS_SUPER_MAGIC);
932
933 /* pseudo mount for anon inodes */
934 if (sb->s_flags & SB_KERNMOUNT) {
935 kill_anon_super(sb);
936 return;
937 }
938
939 if (erofs_is_fscache_mode(sb))
940 kill_anon_super(sb);
941 else
942 kill_block_super(sb);
943
944 sbi = EROFS_SB(sb);
945 if (!sbi)
946 return;
947
948 erofs_free_dev_context(sbi->devs);
949 fs_put_dax(sbi->dax_dev, NULL);
950 erofs_fscache_unregister_fs(sb);
951 kfree(sbi->fsid);
952 kfree(sbi->domain_id);
953 kfree(sbi);
954 sb->s_fs_info = NULL;
955}
956
957/* called when ->s_root is non-NULL */
958static void erofs_put_super(struct super_block *sb)
959{
960 struct erofs_sb_info *const sbi = EROFS_SB(sb);
961
962 DBG_BUGON(!sbi);
963
964 erofs_unregister_sysfs(sb);
965 erofs_shrinker_unregister(sb);
966#ifdef CONFIG_EROFS_FS_ZIP
967 iput(sbi->managed_cache);
968 sbi->managed_cache = NULL;
969 iput(sbi->packed_inode);
970 sbi->packed_inode = NULL;
971#endif
972 erofs_fscache_unregister_fs(sb);
973}
974
975struct file_system_type erofs_fs_type = {
976 .owner = THIS_MODULE,
977 .name = "erofs",
978 .init_fs_context = erofs_init_fs_context,
979 .kill_sb = erofs_kill_sb,
980 .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
981};
982MODULE_ALIAS_FS("erofs");
983
984static int __init erofs_module_init(void)
985{
986 int err;
987
988 erofs_check_ondisk_layout_definitions();
989
990 erofs_inode_cachep = kmem_cache_create("erofs_inode",
991 sizeof(struct erofs_inode), 0,
992 SLAB_RECLAIM_ACCOUNT,
993 erofs_inode_init_once);
994 if (!erofs_inode_cachep) {
995 err = -ENOMEM;
996 goto icache_err;
997 }
998
999 err = erofs_init_shrinker();
1000 if (err)
1001 goto shrinker_err;
1002
1003 err = z_erofs_lzma_init();
1004 if (err)
1005 goto lzma_err;
1006
1007 erofs_pcpubuf_init();
1008 err = z_erofs_init_zip_subsystem();
1009 if (err)
1010 goto zip_err;
1011
1012 err = erofs_init_sysfs();
1013 if (err)
1014 goto sysfs_err;
1015
1016 err = register_filesystem(&erofs_fs_type);
1017 if (err)
1018 goto fs_err;
1019
1020 return 0;
1021
1022fs_err:
1023 erofs_exit_sysfs();
1024sysfs_err:
1025 z_erofs_exit_zip_subsystem();
1026zip_err:
1027 z_erofs_lzma_exit();
1028lzma_err:
1029 erofs_exit_shrinker();
1030shrinker_err:
1031 kmem_cache_destroy(erofs_inode_cachep);
1032icache_err:
1033 return err;
1034}
1035
1036static void __exit erofs_module_exit(void)
1037{
1038 unregister_filesystem(&erofs_fs_type);
1039
1040 /* Ensure all RCU free inodes / pclusters are safe to be destroyed. */
1041 rcu_barrier();
1042
1043 erofs_exit_sysfs();
1044 z_erofs_exit_zip_subsystem();
1045 z_erofs_lzma_exit();
1046 erofs_exit_shrinker();
1047 kmem_cache_destroy(erofs_inode_cachep);
1048 erofs_pcpubuf_exit();
1049}
1050
1051/* get filesystem statistics */
1052static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf)
1053{
1054 struct super_block *sb = dentry->d_sb;
1055 struct erofs_sb_info *sbi = EROFS_SB(sb);
1056 u64 id = 0;
1057
1058 if (!erofs_is_fscache_mode(sb))
1059 id = huge_encode_dev(sb->s_bdev->bd_dev);
1060
1061 buf->f_type = sb->s_magic;
1062 buf->f_bsize = EROFS_BLKSIZ;
1063 buf->f_blocks = sbi->total_blocks;
1064 buf->f_bfree = buf->f_bavail = 0;
1065
1066 buf->f_files = ULLONG_MAX;
1067 buf->f_ffree = ULLONG_MAX - sbi->inos;
1068
1069 buf->f_namelen = EROFS_NAME_LEN;
1070
1071 buf->f_fsid = u64_to_fsid(id);
1072 return 0;
1073}
1074
1075static int erofs_show_options(struct seq_file *seq, struct dentry *root)
1076{
1077 struct erofs_sb_info *sbi = EROFS_SB(root->d_sb);
1078 struct erofs_mount_opts *opt = &sbi->opt;
1079
1080#ifdef CONFIG_EROFS_FS_XATTR
1081 if (test_opt(opt, XATTR_USER))
1082 seq_puts(seq, ",user_xattr");
1083 else
1084 seq_puts(seq, ",nouser_xattr");
1085#endif
1086#ifdef CONFIG_EROFS_FS_POSIX_ACL
1087 if (test_opt(opt, POSIX_ACL))
1088 seq_puts(seq, ",acl");
1089 else
1090 seq_puts(seq, ",noacl");
1091#endif
1092#ifdef CONFIG_EROFS_FS_ZIP
1093 if (opt->cache_strategy == EROFS_ZIP_CACHE_DISABLED)
1094 seq_puts(seq, ",cache_strategy=disabled");
1095 else if (opt->cache_strategy == EROFS_ZIP_CACHE_READAHEAD)
1096 seq_puts(seq, ",cache_strategy=readahead");
1097 else if (opt->cache_strategy == EROFS_ZIP_CACHE_READAROUND)
1098 seq_puts(seq, ",cache_strategy=readaround");
1099#endif
1100 if (test_opt(opt, DAX_ALWAYS))
1101 seq_puts(seq, ",dax=always");
1102 if (test_opt(opt, DAX_NEVER))
1103 seq_puts(seq, ",dax=never");
1104#ifdef CONFIG_EROFS_FS_ONDEMAND
1105 if (sbi->fsid)
1106 seq_printf(seq, ",fsid=%s", sbi->fsid);
1107 if (sbi->domain_id)
1108 seq_printf(seq, ",domain_id=%s", sbi->domain_id);
1109#endif
1110 return 0;
1111}
1112
1113const struct super_operations erofs_sops = {
1114 .put_super = erofs_put_super,
1115 .alloc_inode = erofs_alloc_inode,
1116 .free_inode = erofs_free_inode,
1117 .statfs = erofs_statfs,
1118 .show_options = erofs_show_options,
1119};
1120
1121module_init(erofs_module_init);
1122module_exit(erofs_module_exit);
1123
1124MODULE_DESCRIPTION("Enhanced ROM File System");
1125MODULE_AUTHOR("Gao Xiang, Chao Yu, Miao Xie, CONSUMER BG, HUAWEI Inc.");
1126MODULE_LICENSE("GPL");