Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2017-2018 HUAWEI, Inc.
4 * https://www.huawei.com/
5 * Copyright (C) 2021, Alibaba Cloud
6 */
7#include <linux/statfs.h>
8#include <linux/seq_file.h>
9#include <linux/crc32c.h>
10#include <linux/fs_context.h>
11#include <linux/fs_parser.h>
12#include <linux/exportfs.h>
13#include "xattr.h"
14
15#define CREATE_TRACE_POINTS
16#include <trace/events/erofs.h>
17
18static struct kmem_cache *erofs_inode_cachep __read_mostly;
19
20void _erofs_err(struct super_block *sb, const char *func, const char *fmt, ...)
21{
22 struct va_format vaf;
23 va_list args;
24
25 va_start(args, fmt);
26
27 vaf.fmt = fmt;
28 vaf.va = &args;
29
30 if (sb)
31 pr_err("(device %s): %s: %pV", sb->s_id, func, &vaf);
32 else
33 pr_err("%s: %pV", func, &vaf);
34 va_end(args);
35}
36
37void _erofs_info(struct super_block *sb, const char *func, const char *fmt, ...)
38{
39 struct va_format vaf;
40 va_list args;
41
42 va_start(args, fmt);
43
44 vaf.fmt = fmt;
45 vaf.va = &args;
46
47 if (sb)
48 pr_info("(device %s): %pV", sb->s_id, &vaf);
49 else
50 pr_info("%pV", &vaf);
51 va_end(args);
52}
53
54static int erofs_superblock_csum_verify(struct super_block *sb, void *sbdata)
55{
56 size_t len = 1 << EROFS_SB(sb)->blkszbits;
57 struct erofs_super_block *dsb;
58 u32 expected_crc, crc;
59
60 if (len > EROFS_SUPER_OFFSET)
61 len -= EROFS_SUPER_OFFSET;
62
63 dsb = kmemdup(sbdata + EROFS_SUPER_OFFSET, len, GFP_KERNEL);
64 if (!dsb)
65 return -ENOMEM;
66
67 expected_crc = le32_to_cpu(dsb->checksum);
68 dsb->checksum = 0;
69 /* to allow for x86 boot sectors and other oddities. */
70 crc = crc32c(~0, dsb, len);
71 kfree(dsb);
72
73 if (crc != expected_crc) {
74 erofs_err(sb, "invalid checksum 0x%08x, 0x%08x expected",
75 crc, expected_crc);
76 return -EBADMSG;
77 }
78 return 0;
79}
80
81static void erofs_inode_init_once(void *ptr)
82{
83 struct erofs_inode *vi = ptr;
84
85 inode_init_once(&vi->vfs_inode);
86}
87
88static struct inode *erofs_alloc_inode(struct super_block *sb)
89{
90 struct erofs_inode *vi =
91 alloc_inode_sb(sb, erofs_inode_cachep, GFP_KERNEL);
92
93 if (!vi)
94 return NULL;
95
96 /* zero out everything except vfs_inode */
97 memset(vi, 0, offsetof(struct erofs_inode, vfs_inode));
98 return &vi->vfs_inode;
99}
100
101static void erofs_free_inode(struct inode *inode)
102{
103 struct erofs_inode *vi = EROFS_I(inode);
104
105 if (inode->i_op == &erofs_fast_symlink_iops)
106 kfree(inode->i_link);
107 kfree(vi->xattr_shared_xattrs);
108 kmem_cache_free(erofs_inode_cachep, vi);
109}
110
111static bool check_layout_compatibility(struct super_block *sb,
112 struct erofs_super_block *dsb)
113{
114 const unsigned int feature = le32_to_cpu(dsb->feature_incompat);
115
116 EROFS_SB(sb)->feature_incompat = feature;
117
118 /* check if current kernel meets all mandatory requirements */
119 if (feature & (~EROFS_ALL_FEATURE_INCOMPAT)) {
120 erofs_err(sb, "unidentified incompatible feature %x, please upgrade kernel",
121 feature & ~EROFS_ALL_FEATURE_INCOMPAT);
122 return false;
123 }
124 return true;
125}
126
127/* read variable-sized metadata, offset will be aligned by 4-byte */
128void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf,
129 erofs_off_t *offset, int *lengthp)
130{
131 u8 *buffer, *ptr;
132 int len, i, cnt;
133
134 *offset = round_up(*offset, 4);
135 ptr = erofs_bread(buf, erofs_blknr(sb, *offset), EROFS_KMAP);
136 if (IS_ERR(ptr))
137 return ptr;
138
139 len = le16_to_cpu(*(__le16 *)&ptr[erofs_blkoff(sb, *offset)]);
140 if (!len)
141 len = U16_MAX + 1;
142 buffer = kmalloc(len, GFP_KERNEL);
143 if (!buffer)
144 return ERR_PTR(-ENOMEM);
145 *offset += sizeof(__le16);
146 *lengthp = len;
147
148 for (i = 0; i < len; i += cnt) {
149 cnt = min_t(int, sb->s_blocksize - erofs_blkoff(sb, *offset),
150 len - i);
151 ptr = erofs_bread(buf, erofs_blknr(sb, *offset), EROFS_KMAP);
152 if (IS_ERR(ptr)) {
153 kfree(buffer);
154 return ptr;
155 }
156 memcpy(buffer + i, ptr + erofs_blkoff(sb, *offset), cnt);
157 *offset += cnt;
158 }
159 return buffer;
160}
161
162#ifndef CONFIG_EROFS_FS_ZIP
163static int z_erofs_parse_cfgs(struct super_block *sb,
164 struct erofs_super_block *dsb)
165{
166 if (!dsb->u1.available_compr_algs)
167 return 0;
168
169 erofs_err(sb, "compression disabled, unable to mount compressed EROFS");
170 return -EOPNOTSUPP;
171}
172#endif
173
174static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb,
175 struct erofs_device_info *dif, erofs_off_t *pos)
176{
177 struct erofs_sb_info *sbi = EROFS_SB(sb);
178 struct erofs_fscache *fscache;
179 struct erofs_deviceslot *dis;
180 struct bdev_handle *bdev_handle;
181 void *ptr;
182
183 ptr = erofs_read_metabuf(buf, sb, erofs_blknr(sb, *pos), EROFS_KMAP);
184 if (IS_ERR(ptr))
185 return PTR_ERR(ptr);
186 dis = ptr + erofs_blkoff(sb, *pos);
187
188 if (!sbi->devs->flatdev && !dif->path) {
189 if (!dis->tag[0]) {
190 erofs_err(sb, "empty device tag @ pos %llu", *pos);
191 return -EINVAL;
192 }
193 dif->path = kmemdup_nul(dis->tag, sizeof(dis->tag), GFP_KERNEL);
194 if (!dif->path)
195 return -ENOMEM;
196 }
197
198 if (erofs_is_fscache_mode(sb)) {
199 fscache = erofs_fscache_register_cookie(sb, dif->path, 0);
200 if (IS_ERR(fscache))
201 return PTR_ERR(fscache);
202 dif->fscache = fscache;
203 } else if (!sbi->devs->flatdev) {
204 bdev_handle = bdev_open_by_path(dif->path, BLK_OPEN_READ,
205 sb->s_type, NULL);
206 if (IS_ERR(bdev_handle))
207 return PTR_ERR(bdev_handle);
208 dif->bdev_handle = bdev_handle;
209 dif->dax_dev = fs_dax_get_by_bdev(bdev_handle->bdev,
210 &dif->dax_part_off, NULL, NULL);
211 }
212
213 dif->blocks = le32_to_cpu(dis->blocks);
214 dif->mapped_blkaddr = le32_to_cpu(dis->mapped_blkaddr);
215 sbi->total_blocks += dif->blocks;
216 *pos += EROFS_DEVT_SLOT_SIZE;
217 return 0;
218}
219
220static int erofs_scan_devices(struct super_block *sb,
221 struct erofs_super_block *dsb)
222{
223 struct erofs_sb_info *sbi = EROFS_SB(sb);
224 unsigned int ondisk_extradevs;
225 erofs_off_t pos;
226 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
227 struct erofs_device_info *dif;
228 int id, err = 0;
229
230 sbi->total_blocks = sbi->primarydevice_blocks;
231 if (!erofs_sb_has_device_table(sbi))
232 ondisk_extradevs = 0;
233 else
234 ondisk_extradevs = le16_to_cpu(dsb->extra_devices);
235
236 if (sbi->devs->extra_devices &&
237 ondisk_extradevs != sbi->devs->extra_devices) {
238 erofs_err(sb, "extra devices don't match (ondisk %u, given %u)",
239 ondisk_extradevs, sbi->devs->extra_devices);
240 return -EINVAL;
241 }
242 if (!ondisk_extradevs)
243 return 0;
244
245 if (!sbi->devs->extra_devices && !erofs_is_fscache_mode(sb))
246 sbi->devs->flatdev = true;
247
248 sbi->device_id_mask = roundup_pow_of_two(ondisk_extradevs + 1) - 1;
249 pos = le16_to_cpu(dsb->devt_slotoff) * EROFS_DEVT_SLOT_SIZE;
250 down_read(&sbi->devs->rwsem);
251 if (sbi->devs->extra_devices) {
252 idr_for_each_entry(&sbi->devs->tree, dif, id) {
253 err = erofs_init_device(&buf, sb, dif, &pos);
254 if (err)
255 break;
256 }
257 } else {
258 for (id = 0; id < ondisk_extradevs; id++) {
259 dif = kzalloc(sizeof(*dif), GFP_KERNEL);
260 if (!dif) {
261 err = -ENOMEM;
262 break;
263 }
264
265 err = idr_alloc(&sbi->devs->tree, dif, 0, 0, GFP_KERNEL);
266 if (err < 0) {
267 kfree(dif);
268 break;
269 }
270 ++sbi->devs->extra_devices;
271
272 err = erofs_init_device(&buf, sb, dif, &pos);
273 if (err)
274 break;
275 }
276 }
277 up_read(&sbi->devs->rwsem);
278 erofs_put_metabuf(&buf);
279 return err;
280}
281
282static int erofs_read_superblock(struct super_block *sb)
283{
284 struct erofs_sb_info *sbi;
285 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
286 struct erofs_super_block *dsb;
287 void *data;
288 int ret;
289
290 data = erofs_read_metabuf(&buf, sb, 0, EROFS_KMAP);
291 if (IS_ERR(data)) {
292 erofs_err(sb, "cannot read erofs superblock");
293 return PTR_ERR(data);
294 }
295
296 sbi = EROFS_SB(sb);
297 dsb = (struct erofs_super_block *)(data + EROFS_SUPER_OFFSET);
298
299 ret = -EINVAL;
300 if (le32_to_cpu(dsb->magic) != EROFS_SUPER_MAGIC_V1) {
301 erofs_err(sb, "cannot find valid erofs superblock");
302 goto out;
303 }
304
305 sbi->blkszbits = dsb->blkszbits;
306 if (sbi->blkszbits < 9 || sbi->blkszbits > PAGE_SHIFT) {
307 erofs_err(sb, "blkszbits %u isn't supported", sbi->blkszbits);
308 goto out;
309 }
310 if (dsb->dirblkbits) {
311 erofs_err(sb, "dirblkbits %u isn't supported", dsb->dirblkbits);
312 goto out;
313 }
314
315 sbi->feature_compat = le32_to_cpu(dsb->feature_compat);
316 if (erofs_sb_has_sb_chksum(sbi)) {
317 ret = erofs_superblock_csum_verify(sb, data);
318 if (ret)
319 goto out;
320 }
321
322 ret = -EINVAL;
323 if (!check_layout_compatibility(sb, dsb))
324 goto out;
325
326 sbi->sb_size = 128 + dsb->sb_extslots * EROFS_SB_EXTSLOT_SIZE;
327 if (sbi->sb_size > PAGE_SIZE - EROFS_SUPER_OFFSET) {
328 erofs_err(sb, "invalid sb_extslots %u (more than a fs block)",
329 sbi->sb_size);
330 goto out;
331 }
332 sbi->primarydevice_blocks = le32_to_cpu(dsb->blocks);
333 sbi->meta_blkaddr = le32_to_cpu(dsb->meta_blkaddr);
334#ifdef CONFIG_EROFS_FS_XATTR
335 sbi->xattr_blkaddr = le32_to_cpu(dsb->xattr_blkaddr);
336 sbi->xattr_prefix_start = le32_to_cpu(dsb->xattr_prefix_start);
337 sbi->xattr_prefix_count = dsb->xattr_prefix_count;
338 sbi->xattr_filter_reserved = dsb->xattr_filter_reserved;
339#endif
340 sbi->islotbits = ilog2(sizeof(struct erofs_inode_compact));
341 sbi->root_nid = le16_to_cpu(dsb->root_nid);
342 sbi->packed_nid = le64_to_cpu(dsb->packed_nid);
343 sbi->inos = le64_to_cpu(dsb->inos);
344
345 sbi->build_time = le64_to_cpu(dsb->build_time);
346 sbi->build_time_nsec = le32_to_cpu(dsb->build_time_nsec);
347
348 memcpy(&sb->s_uuid, dsb->uuid, sizeof(dsb->uuid));
349
350 ret = strscpy(sbi->volume_name, dsb->volume_name,
351 sizeof(dsb->volume_name));
352 if (ret < 0) { /* -E2BIG */
353 erofs_err(sb, "bad volume name without NIL terminator");
354 ret = -EFSCORRUPTED;
355 goto out;
356 }
357
358 /* parse on-disk compression configurations */
359 ret = z_erofs_parse_cfgs(sb, dsb);
360 if (ret < 0)
361 goto out;
362
363 /* handle multiple devices */
364 ret = erofs_scan_devices(sb, dsb);
365
366 if (erofs_is_fscache_mode(sb))
367 erofs_info(sb, "EXPERIMENTAL fscache-based on-demand read feature in use. Use at your own risk!");
368out:
369 erofs_put_metabuf(&buf);
370 return ret;
371}
372
373static void erofs_default_options(struct erofs_fs_context *ctx)
374{
375#ifdef CONFIG_EROFS_FS_ZIP
376 ctx->opt.cache_strategy = EROFS_ZIP_CACHE_READAROUND;
377 ctx->opt.max_sync_decompress_pages = 3;
378 ctx->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_AUTO;
379#endif
380#ifdef CONFIG_EROFS_FS_XATTR
381 set_opt(&ctx->opt, XATTR_USER);
382#endif
383#ifdef CONFIG_EROFS_FS_POSIX_ACL
384 set_opt(&ctx->opt, POSIX_ACL);
385#endif
386}
387
388enum {
389 Opt_user_xattr,
390 Opt_acl,
391 Opt_cache_strategy,
392 Opt_dax,
393 Opt_dax_enum,
394 Opt_device,
395 Opt_fsid,
396 Opt_domain_id,
397 Opt_err
398};
399
400static const struct constant_table erofs_param_cache_strategy[] = {
401 {"disabled", EROFS_ZIP_CACHE_DISABLED},
402 {"readahead", EROFS_ZIP_CACHE_READAHEAD},
403 {"readaround", EROFS_ZIP_CACHE_READAROUND},
404 {}
405};
406
407static const struct constant_table erofs_dax_param_enums[] = {
408 {"always", EROFS_MOUNT_DAX_ALWAYS},
409 {"never", EROFS_MOUNT_DAX_NEVER},
410 {}
411};
412
413static const struct fs_parameter_spec erofs_fs_parameters[] = {
414 fsparam_flag_no("user_xattr", Opt_user_xattr),
415 fsparam_flag_no("acl", Opt_acl),
416 fsparam_enum("cache_strategy", Opt_cache_strategy,
417 erofs_param_cache_strategy),
418 fsparam_flag("dax", Opt_dax),
419 fsparam_enum("dax", Opt_dax_enum, erofs_dax_param_enums),
420 fsparam_string("device", Opt_device),
421 fsparam_string("fsid", Opt_fsid),
422 fsparam_string("domain_id", Opt_domain_id),
423 {}
424};
425
426static bool erofs_fc_set_dax_mode(struct fs_context *fc, unsigned int mode)
427{
428#ifdef CONFIG_FS_DAX
429 struct erofs_fs_context *ctx = fc->fs_private;
430
431 switch (mode) {
432 case EROFS_MOUNT_DAX_ALWAYS:
433 warnfc(fc, "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
434 set_opt(&ctx->opt, DAX_ALWAYS);
435 clear_opt(&ctx->opt, DAX_NEVER);
436 return true;
437 case EROFS_MOUNT_DAX_NEVER:
438 set_opt(&ctx->opt, DAX_NEVER);
439 clear_opt(&ctx->opt, DAX_ALWAYS);
440 return true;
441 default:
442 DBG_BUGON(1);
443 return false;
444 }
445#else
446 errorfc(fc, "dax options not supported");
447 return false;
448#endif
449}
450
451static int erofs_fc_parse_param(struct fs_context *fc,
452 struct fs_parameter *param)
453{
454 struct erofs_fs_context *ctx = fc->fs_private;
455 struct fs_parse_result result;
456 struct erofs_device_info *dif;
457 int opt, ret;
458
459 opt = fs_parse(fc, erofs_fs_parameters, param, &result);
460 if (opt < 0)
461 return opt;
462
463 switch (opt) {
464 case Opt_user_xattr:
465#ifdef CONFIG_EROFS_FS_XATTR
466 if (result.boolean)
467 set_opt(&ctx->opt, XATTR_USER);
468 else
469 clear_opt(&ctx->opt, XATTR_USER);
470#else
471 errorfc(fc, "{,no}user_xattr options not supported");
472#endif
473 break;
474 case Opt_acl:
475#ifdef CONFIG_EROFS_FS_POSIX_ACL
476 if (result.boolean)
477 set_opt(&ctx->opt, POSIX_ACL);
478 else
479 clear_opt(&ctx->opt, POSIX_ACL);
480#else
481 errorfc(fc, "{,no}acl options not supported");
482#endif
483 break;
484 case Opt_cache_strategy:
485#ifdef CONFIG_EROFS_FS_ZIP
486 ctx->opt.cache_strategy = result.uint_32;
487#else
488 errorfc(fc, "compression not supported, cache_strategy ignored");
489#endif
490 break;
491 case Opt_dax:
492 if (!erofs_fc_set_dax_mode(fc, EROFS_MOUNT_DAX_ALWAYS))
493 return -EINVAL;
494 break;
495 case Opt_dax_enum:
496 if (!erofs_fc_set_dax_mode(fc, result.uint_32))
497 return -EINVAL;
498 break;
499 case Opt_device:
500 dif = kzalloc(sizeof(*dif), GFP_KERNEL);
501 if (!dif)
502 return -ENOMEM;
503 dif->path = kstrdup(param->string, GFP_KERNEL);
504 if (!dif->path) {
505 kfree(dif);
506 return -ENOMEM;
507 }
508 down_write(&ctx->devs->rwsem);
509 ret = idr_alloc(&ctx->devs->tree, dif, 0, 0, GFP_KERNEL);
510 up_write(&ctx->devs->rwsem);
511 if (ret < 0) {
512 kfree(dif->path);
513 kfree(dif);
514 return ret;
515 }
516 ++ctx->devs->extra_devices;
517 break;
518#ifdef CONFIG_EROFS_FS_ONDEMAND
519 case Opt_fsid:
520 kfree(ctx->fsid);
521 ctx->fsid = kstrdup(param->string, GFP_KERNEL);
522 if (!ctx->fsid)
523 return -ENOMEM;
524 break;
525 case Opt_domain_id:
526 kfree(ctx->domain_id);
527 ctx->domain_id = kstrdup(param->string, GFP_KERNEL);
528 if (!ctx->domain_id)
529 return -ENOMEM;
530 break;
531#else
532 case Opt_fsid:
533 case Opt_domain_id:
534 errorfc(fc, "%s option not supported", erofs_fs_parameters[opt].name);
535 break;
536#endif
537 default:
538 return -ENOPARAM;
539 }
540 return 0;
541}
542
543static struct inode *erofs_nfs_get_inode(struct super_block *sb,
544 u64 ino, u32 generation)
545{
546 return erofs_iget(sb, ino);
547}
548
549static struct dentry *erofs_fh_to_dentry(struct super_block *sb,
550 struct fid *fid, int fh_len, int fh_type)
551{
552 return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
553 erofs_nfs_get_inode);
554}
555
556static struct dentry *erofs_fh_to_parent(struct super_block *sb,
557 struct fid *fid, int fh_len, int fh_type)
558{
559 return generic_fh_to_parent(sb, fid, fh_len, fh_type,
560 erofs_nfs_get_inode);
561}
562
563static struct dentry *erofs_get_parent(struct dentry *child)
564{
565 erofs_nid_t nid;
566 unsigned int d_type;
567 int err;
568
569 err = erofs_namei(d_inode(child), &dotdot_name, &nid, &d_type);
570 if (err)
571 return ERR_PTR(err);
572 return d_obtain_alias(erofs_iget(child->d_sb, nid));
573}
574
575static const struct export_operations erofs_export_ops = {
576 .encode_fh = generic_encode_ino32_fh,
577 .fh_to_dentry = erofs_fh_to_dentry,
578 .fh_to_parent = erofs_fh_to_parent,
579 .get_parent = erofs_get_parent,
580};
581
582static int erofs_fc_fill_pseudo_super(struct super_block *sb, struct fs_context *fc)
583{
584 static const struct tree_descr empty_descr = {""};
585
586 return simple_fill_super(sb, EROFS_SUPER_MAGIC, &empty_descr);
587}
588
589static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
590{
591 struct inode *inode;
592 struct erofs_sb_info *sbi;
593 struct erofs_fs_context *ctx = fc->fs_private;
594 int err;
595
596 sb->s_magic = EROFS_SUPER_MAGIC;
597 sb->s_flags |= SB_RDONLY | SB_NOATIME;
598 sb->s_maxbytes = MAX_LFS_FILESIZE;
599 sb->s_op = &erofs_sops;
600
601 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
602 if (!sbi)
603 return -ENOMEM;
604
605 sb->s_fs_info = sbi;
606 sbi->opt = ctx->opt;
607 sbi->devs = ctx->devs;
608 ctx->devs = NULL;
609 sbi->fsid = ctx->fsid;
610 ctx->fsid = NULL;
611 sbi->domain_id = ctx->domain_id;
612 ctx->domain_id = NULL;
613
614 sbi->blkszbits = PAGE_SHIFT;
615 if (erofs_is_fscache_mode(sb)) {
616 sb->s_blocksize = PAGE_SIZE;
617 sb->s_blocksize_bits = PAGE_SHIFT;
618
619 err = erofs_fscache_register_fs(sb);
620 if (err)
621 return err;
622
623 err = super_setup_bdi(sb);
624 if (err)
625 return err;
626 } else {
627 if (!sb_set_blocksize(sb, PAGE_SIZE)) {
628 errorfc(fc, "failed to set initial blksize");
629 return -EINVAL;
630 }
631
632 sbi->dax_dev = fs_dax_get_by_bdev(sb->s_bdev,
633 &sbi->dax_part_off,
634 NULL, NULL);
635 }
636
637 err = erofs_read_superblock(sb);
638 if (err)
639 return err;
640
641 if (sb->s_blocksize_bits != sbi->blkszbits) {
642 if (erofs_is_fscache_mode(sb)) {
643 errorfc(fc, "unsupported blksize for fscache mode");
644 return -EINVAL;
645 }
646 if (!sb_set_blocksize(sb, 1 << sbi->blkszbits)) {
647 errorfc(fc, "failed to set erofs blksize");
648 return -EINVAL;
649 }
650 }
651
652 if (test_opt(&sbi->opt, DAX_ALWAYS)) {
653 if (!sbi->dax_dev) {
654 errorfc(fc, "DAX unsupported by block device. Turning off DAX.");
655 clear_opt(&sbi->opt, DAX_ALWAYS);
656 } else if (sbi->blkszbits != PAGE_SHIFT) {
657 errorfc(fc, "unsupported blocksize for DAX");
658 clear_opt(&sbi->opt, DAX_ALWAYS);
659 }
660 }
661
662 sb->s_time_gran = 1;
663 sb->s_xattr = erofs_xattr_handlers;
664 sb->s_export_op = &erofs_export_ops;
665
666 if (test_opt(&sbi->opt, POSIX_ACL))
667 sb->s_flags |= SB_POSIXACL;
668 else
669 sb->s_flags &= ~SB_POSIXACL;
670
671#ifdef CONFIG_EROFS_FS_ZIP
672 xa_init(&sbi->managed_pslots);
673#endif
674
675 inode = erofs_iget(sb, sbi->root_nid);
676 if (IS_ERR(inode))
677 return PTR_ERR(inode);
678
679 if (!S_ISDIR(inode->i_mode)) {
680 erofs_err(sb, "rootino(nid %llu) is not a directory(i_mode %o)",
681 sbi->root_nid, inode->i_mode);
682 iput(inode);
683 return -EINVAL;
684 }
685
686 sb->s_root = d_make_root(inode);
687 if (!sb->s_root)
688 return -ENOMEM;
689
690 erofs_shrinker_register(sb);
691 if (erofs_sb_has_fragments(sbi) && sbi->packed_nid) {
692 sbi->packed_inode = erofs_iget(sb, sbi->packed_nid);
693 if (IS_ERR(sbi->packed_inode)) {
694 err = PTR_ERR(sbi->packed_inode);
695 sbi->packed_inode = NULL;
696 return err;
697 }
698 }
699 err = erofs_init_managed_cache(sb);
700 if (err)
701 return err;
702
703 err = erofs_xattr_prefixes_init(sb);
704 if (err)
705 return err;
706
707 err = erofs_register_sysfs(sb);
708 if (err)
709 return err;
710
711 erofs_info(sb, "mounted with root inode @ nid %llu.", sbi->root_nid);
712 return 0;
713}
714
715static int erofs_fc_anon_get_tree(struct fs_context *fc)
716{
717 return get_tree_nodev(fc, erofs_fc_fill_pseudo_super);
718}
719
720static int erofs_fc_get_tree(struct fs_context *fc)
721{
722 struct erofs_fs_context *ctx = fc->fs_private;
723
724 if (IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && ctx->fsid)
725 return get_tree_nodev(fc, erofs_fc_fill_super);
726
727 return get_tree_bdev(fc, erofs_fc_fill_super);
728}
729
730static int erofs_fc_reconfigure(struct fs_context *fc)
731{
732 struct super_block *sb = fc->root->d_sb;
733 struct erofs_sb_info *sbi = EROFS_SB(sb);
734 struct erofs_fs_context *ctx = fc->fs_private;
735
736 DBG_BUGON(!sb_rdonly(sb));
737
738 if (ctx->fsid || ctx->domain_id)
739 erofs_info(sb, "ignoring reconfiguration for fsid|domain_id.");
740
741 if (test_opt(&ctx->opt, POSIX_ACL))
742 fc->sb_flags |= SB_POSIXACL;
743 else
744 fc->sb_flags &= ~SB_POSIXACL;
745
746 sbi->opt = ctx->opt;
747
748 fc->sb_flags |= SB_RDONLY;
749 return 0;
750}
751
752static int erofs_release_device_info(int id, void *ptr, void *data)
753{
754 struct erofs_device_info *dif = ptr;
755
756 fs_put_dax(dif->dax_dev, NULL);
757 if (dif->bdev_handle)
758 bdev_release(dif->bdev_handle);
759 erofs_fscache_unregister_cookie(dif->fscache);
760 dif->fscache = NULL;
761 kfree(dif->path);
762 kfree(dif);
763 return 0;
764}
765
766static void erofs_free_dev_context(struct erofs_dev_context *devs)
767{
768 if (!devs)
769 return;
770 idr_for_each(&devs->tree, &erofs_release_device_info, NULL);
771 idr_destroy(&devs->tree);
772 kfree(devs);
773}
774
775static void erofs_fc_free(struct fs_context *fc)
776{
777 struct erofs_fs_context *ctx = fc->fs_private;
778
779 erofs_free_dev_context(ctx->devs);
780 kfree(ctx->fsid);
781 kfree(ctx->domain_id);
782 kfree(ctx);
783}
784
785static const struct fs_context_operations erofs_context_ops = {
786 .parse_param = erofs_fc_parse_param,
787 .get_tree = erofs_fc_get_tree,
788 .reconfigure = erofs_fc_reconfigure,
789 .free = erofs_fc_free,
790};
791
792static const struct fs_context_operations erofs_anon_context_ops = {
793 .get_tree = erofs_fc_anon_get_tree,
794};
795
796static int erofs_init_fs_context(struct fs_context *fc)
797{
798 struct erofs_fs_context *ctx;
799
800 /* pseudo mount for anon inodes */
801 if (fc->sb_flags & SB_KERNMOUNT) {
802 fc->ops = &erofs_anon_context_ops;
803 return 0;
804 }
805
806 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
807 if (!ctx)
808 return -ENOMEM;
809 ctx->devs = kzalloc(sizeof(struct erofs_dev_context), GFP_KERNEL);
810 if (!ctx->devs) {
811 kfree(ctx);
812 return -ENOMEM;
813 }
814 fc->fs_private = ctx;
815
816 idr_init(&ctx->devs->tree);
817 init_rwsem(&ctx->devs->rwsem);
818 erofs_default_options(ctx);
819 fc->ops = &erofs_context_ops;
820 return 0;
821}
822
823static void erofs_kill_sb(struct super_block *sb)
824{
825 struct erofs_sb_info *sbi;
826
827 /* pseudo mount for anon inodes */
828 if (sb->s_flags & SB_KERNMOUNT) {
829 kill_anon_super(sb);
830 return;
831 }
832
833 if (erofs_is_fscache_mode(sb))
834 kill_anon_super(sb);
835 else
836 kill_block_super(sb);
837
838 sbi = EROFS_SB(sb);
839 if (!sbi)
840 return;
841
842 erofs_free_dev_context(sbi->devs);
843 fs_put_dax(sbi->dax_dev, NULL);
844 erofs_fscache_unregister_fs(sb);
845 kfree(sbi->fsid);
846 kfree(sbi->domain_id);
847 kfree(sbi);
848 sb->s_fs_info = NULL;
849}
850
851static void erofs_put_super(struct super_block *sb)
852{
853 struct erofs_sb_info *const sbi = EROFS_SB(sb);
854
855 DBG_BUGON(!sbi);
856
857 erofs_unregister_sysfs(sb);
858 erofs_shrinker_unregister(sb);
859 erofs_xattr_prefixes_cleanup(sb);
860#ifdef CONFIG_EROFS_FS_ZIP
861 iput(sbi->managed_cache);
862 sbi->managed_cache = NULL;
863#endif
864 iput(sbi->packed_inode);
865 sbi->packed_inode = NULL;
866 erofs_free_dev_context(sbi->devs);
867 sbi->devs = NULL;
868 erofs_fscache_unregister_fs(sb);
869}
870
871struct file_system_type erofs_fs_type = {
872 .owner = THIS_MODULE,
873 .name = "erofs",
874 .init_fs_context = erofs_init_fs_context,
875 .kill_sb = erofs_kill_sb,
876 .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
877};
878MODULE_ALIAS_FS("erofs");
879
880static int __init erofs_module_init(void)
881{
882 int err;
883
884 erofs_check_ondisk_layout_definitions();
885
886 erofs_inode_cachep = kmem_cache_create("erofs_inode",
887 sizeof(struct erofs_inode), 0,
888 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD | SLAB_ACCOUNT,
889 erofs_inode_init_once);
890 if (!erofs_inode_cachep)
891 return -ENOMEM;
892
893 err = erofs_init_shrinker();
894 if (err)
895 goto shrinker_err;
896
897 err = z_erofs_lzma_init();
898 if (err)
899 goto lzma_err;
900
901 err = z_erofs_deflate_init();
902 if (err)
903 goto deflate_err;
904
905 erofs_pcpubuf_init();
906 err = z_erofs_init_zip_subsystem();
907 if (err)
908 goto zip_err;
909
910 err = erofs_init_sysfs();
911 if (err)
912 goto sysfs_err;
913
914 err = register_filesystem(&erofs_fs_type);
915 if (err)
916 goto fs_err;
917
918 return 0;
919
920fs_err:
921 erofs_exit_sysfs();
922sysfs_err:
923 z_erofs_exit_zip_subsystem();
924zip_err:
925 z_erofs_deflate_exit();
926deflate_err:
927 z_erofs_lzma_exit();
928lzma_err:
929 erofs_exit_shrinker();
930shrinker_err:
931 kmem_cache_destroy(erofs_inode_cachep);
932 return err;
933}
934
935static void __exit erofs_module_exit(void)
936{
937 unregister_filesystem(&erofs_fs_type);
938
939 /* Ensure all RCU free inodes / pclusters are safe to be destroyed. */
940 rcu_barrier();
941
942 erofs_exit_sysfs();
943 z_erofs_exit_zip_subsystem();
944 z_erofs_deflate_exit();
945 z_erofs_lzma_exit();
946 erofs_exit_shrinker();
947 kmem_cache_destroy(erofs_inode_cachep);
948 erofs_pcpubuf_exit();
949}
950
951static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf)
952{
953 struct super_block *sb = dentry->d_sb;
954 struct erofs_sb_info *sbi = EROFS_SB(sb);
955 u64 id = 0;
956
957 if (!erofs_is_fscache_mode(sb))
958 id = huge_encode_dev(sb->s_bdev->bd_dev);
959
960 buf->f_type = sb->s_magic;
961 buf->f_bsize = sb->s_blocksize;
962 buf->f_blocks = sbi->total_blocks;
963 buf->f_bfree = buf->f_bavail = 0;
964
965 buf->f_files = ULLONG_MAX;
966 buf->f_ffree = ULLONG_MAX - sbi->inos;
967
968 buf->f_namelen = EROFS_NAME_LEN;
969
970 buf->f_fsid = u64_to_fsid(id);
971 return 0;
972}
973
974static int erofs_show_options(struct seq_file *seq, struct dentry *root)
975{
976 struct erofs_sb_info *sbi = EROFS_SB(root->d_sb);
977 struct erofs_mount_opts *opt = &sbi->opt;
978
979#ifdef CONFIG_EROFS_FS_XATTR
980 if (test_opt(opt, XATTR_USER))
981 seq_puts(seq, ",user_xattr");
982 else
983 seq_puts(seq, ",nouser_xattr");
984#endif
985#ifdef CONFIG_EROFS_FS_POSIX_ACL
986 if (test_opt(opt, POSIX_ACL))
987 seq_puts(seq, ",acl");
988 else
989 seq_puts(seq, ",noacl");
990#endif
991#ifdef CONFIG_EROFS_FS_ZIP
992 if (opt->cache_strategy == EROFS_ZIP_CACHE_DISABLED)
993 seq_puts(seq, ",cache_strategy=disabled");
994 else if (opt->cache_strategy == EROFS_ZIP_CACHE_READAHEAD)
995 seq_puts(seq, ",cache_strategy=readahead");
996 else if (opt->cache_strategy == EROFS_ZIP_CACHE_READAROUND)
997 seq_puts(seq, ",cache_strategy=readaround");
998#endif
999 if (test_opt(opt, DAX_ALWAYS))
1000 seq_puts(seq, ",dax=always");
1001 if (test_opt(opt, DAX_NEVER))
1002 seq_puts(seq, ",dax=never");
1003#ifdef CONFIG_EROFS_FS_ONDEMAND
1004 if (sbi->fsid)
1005 seq_printf(seq, ",fsid=%s", sbi->fsid);
1006 if (sbi->domain_id)
1007 seq_printf(seq, ",domain_id=%s", sbi->domain_id);
1008#endif
1009 return 0;
1010}
1011
1012const struct super_operations erofs_sops = {
1013 .put_super = erofs_put_super,
1014 .alloc_inode = erofs_alloc_inode,
1015 .free_inode = erofs_free_inode,
1016 .statfs = erofs_statfs,
1017 .show_options = erofs_show_options,
1018};
1019
1020module_init(erofs_module_init);
1021module_exit(erofs_module_exit);
1022
1023MODULE_DESCRIPTION("Enhanced ROM File System");
1024MODULE_AUTHOR("Gao Xiang, Chao Yu, Miao Xie, CONSUMER BG, HUAWEI Inc.");
1025MODULE_LICENSE("GPL");
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2017-2018 HUAWEI, Inc.
4 * https://www.huawei.com/
5 * Copyright (C) 2021, Alibaba Cloud
6 */
7#include <linux/statfs.h>
8#include <linux/seq_file.h>
9#include <linux/crc32c.h>
10#include <linux/fs_context.h>
11#include <linux/fs_parser.h>
12#include <linux/exportfs.h>
13#include "xattr.h"
14
15#define CREATE_TRACE_POINTS
16#include <trace/events/erofs.h>
17
18static struct kmem_cache *erofs_inode_cachep __read_mostly;
19
20void _erofs_err(struct super_block *sb, const char *func, const char *fmt, ...)
21{
22 struct va_format vaf;
23 va_list args;
24
25 va_start(args, fmt);
26
27 vaf.fmt = fmt;
28 vaf.va = &args;
29
30 if (sb)
31 pr_err("(device %s): %s: %pV", sb->s_id, func, &vaf);
32 else
33 pr_err("%s: %pV", func, &vaf);
34 va_end(args);
35}
36
37void _erofs_info(struct super_block *sb, const char *func, const char *fmt, ...)
38{
39 struct va_format vaf;
40 va_list args;
41
42 va_start(args, fmt);
43
44 vaf.fmt = fmt;
45 vaf.va = &args;
46
47 if (sb)
48 pr_info("(device %s): %pV", sb->s_id, &vaf);
49 else
50 pr_info("%pV", &vaf);
51 va_end(args);
52}
53
54static int erofs_superblock_csum_verify(struct super_block *sb, void *sbdata)
55{
56 size_t len = 1 << EROFS_SB(sb)->blkszbits;
57 struct erofs_super_block *dsb;
58 u32 expected_crc, crc;
59
60 if (len > EROFS_SUPER_OFFSET)
61 len -= EROFS_SUPER_OFFSET;
62
63 dsb = kmemdup(sbdata + EROFS_SUPER_OFFSET, len, GFP_KERNEL);
64 if (!dsb)
65 return -ENOMEM;
66
67 expected_crc = le32_to_cpu(dsb->checksum);
68 dsb->checksum = 0;
69 /* to allow for x86 boot sectors and other oddities. */
70 crc = crc32c(~0, dsb, len);
71 kfree(dsb);
72
73 if (crc != expected_crc) {
74 erofs_err(sb, "invalid checksum 0x%08x, 0x%08x expected",
75 crc, expected_crc);
76 return -EBADMSG;
77 }
78 return 0;
79}
80
81static void erofs_inode_init_once(void *ptr)
82{
83 struct erofs_inode *vi = ptr;
84
85 inode_init_once(&vi->vfs_inode);
86}
87
88static struct inode *erofs_alloc_inode(struct super_block *sb)
89{
90 struct erofs_inode *vi =
91 alloc_inode_sb(sb, erofs_inode_cachep, GFP_KERNEL);
92
93 if (!vi)
94 return NULL;
95
96 /* zero out everything except vfs_inode */
97 memset(vi, 0, offsetof(struct erofs_inode, vfs_inode));
98 return &vi->vfs_inode;
99}
100
101static void erofs_free_inode(struct inode *inode)
102{
103 struct erofs_inode *vi = EROFS_I(inode);
104
105 if (inode->i_op == &erofs_fast_symlink_iops)
106 kfree(inode->i_link);
107 kfree(vi->xattr_shared_xattrs);
108 kmem_cache_free(erofs_inode_cachep, vi);
109}
110
111static bool check_layout_compatibility(struct super_block *sb,
112 struct erofs_super_block *dsb)
113{
114 const unsigned int feature = le32_to_cpu(dsb->feature_incompat);
115
116 EROFS_SB(sb)->feature_incompat = feature;
117
118 /* check if current kernel meets all mandatory requirements */
119 if (feature & (~EROFS_ALL_FEATURE_INCOMPAT)) {
120 erofs_err(sb, "unidentified incompatible feature %x, please upgrade kernel",
121 feature & ~EROFS_ALL_FEATURE_INCOMPAT);
122 return false;
123 }
124 return true;
125}
126
127/* read variable-sized metadata, offset will be aligned by 4-byte */
128void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf,
129 erofs_off_t *offset, int *lengthp)
130{
131 u8 *buffer, *ptr;
132 int len, i, cnt;
133
134 *offset = round_up(*offset, 4);
135 ptr = erofs_bread(buf, erofs_blknr(sb, *offset), EROFS_KMAP);
136 if (IS_ERR(ptr))
137 return ptr;
138
139 len = le16_to_cpu(*(__le16 *)&ptr[erofs_blkoff(sb, *offset)]);
140 if (!len)
141 len = U16_MAX + 1;
142 buffer = kmalloc(len, GFP_KERNEL);
143 if (!buffer)
144 return ERR_PTR(-ENOMEM);
145 *offset += sizeof(__le16);
146 *lengthp = len;
147
148 for (i = 0; i < len; i += cnt) {
149 cnt = min_t(int, sb->s_blocksize - erofs_blkoff(sb, *offset),
150 len - i);
151 ptr = erofs_bread(buf, erofs_blknr(sb, *offset), EROFS_KMAP);
152 if (IS_ERR(ptr)) {
153 kfree(buffer);
154 return ptr;
155 }
156 memcpy(buffer + i, ptr + erofs_blkoff(sb, *offset), cnt);
157 *offset += cnt;
158 }
159 return buffer;
160}
161
162#ifndef CONFIG_EROFS_FS_ZIP
163static int z_erofs_parse_cfgs(struct super_block *sb,
164 struct erofs_super_block *dsb)
165{
166 if (!dsb->u1.available_compr_algs)
167 return 0;
168
169 erofs_err(sb, "compression disabled, unable to mount compressed EROFS");
170 return -EOPNOTSUPP;
171}
172#endif
173
174static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb,
175 struct erofs_device_info *dif, erofs_off_t *pos)
176{
177 struct erofs_sb_info *sbi = EROFS_SB(sb);
178 struct erofs_fscache *fscache;
179 struct erofs_deviceslot *dis;
180 struct file *bdev_file;
181 void *ptr;
182
183 ptr = erofs_read_metabuf(buf, sb, erofs_blknr(sb, *pos), EROFS_KMAP);
184 if (IS_ERR(ptr))
185 return PTR_ERR(ptr);
186 dis = ptr + erofs_blkoff(sb, *pos);
187
188 if (!sbi->devs->flatdev && !dif->path) {
189 if (!dis->tag[0]) {
190 erofs_err(sb, "empty device tag @ pos %llu", *pos);
191 return -EINVAL;
192 }
193 dif->path = kmemdup_nul(dis->tag, sizeof(dis->tag), GFP_KERNEL);
194 if (!dif->path)
195 return -ENOMEM;
196 }
197
198 if (erofs_is_fscache_mode(sb)) {
199 fscache = erofs_fscache_register_cookie(sb, dif->path, 0);
200 if (IS_ERR(fscache))
201 return PTR_ERR(fscache);
202 dif->fscache = fscache;
203 } else if (!sbi->devs->flatdev) {
204 bdev_file = bdev_file_open_by_path(dif->path, BLK_OPEN_READ,
205 sb->s_type, NULL);
206 if (IS_ERR(bdev_file))
207 return PTR_ERR(bdev_file);
208 dif->bdev_file = bdev_file;
209 dif->dax_dev = fs_dax_get_by_bdev(file_bdev(bdev_file),
210 &dif->dax_part_off, NULL, NULL);
211 }
212
213 dif->blocks = le32_to_cpu(dis->blocks);
214 dif->mapped_blkaddr = le32_to_cpu(dis->mapped_blkaddr);
215 sbi->total_blocks += dif->blocks;
216 *pos += EROFS_DEVT_SLOT_SIZE;
217 return 0;
218}
219
220static int erofs_scan_devices(struct super_block *sb,
221 struct erofs_super_block *dsb)
222{
223 struct erofs_sb_info *sbi = EROFS_SB(sb);
224 unsigned int ondisk_extradevs;
225 erofs_off_t pos;
226 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
227 struct erofs_device_info *dif;
228 int id, err = 0;
229
230 sbi->total_blocks = sbi->primarydevice_blocks;
231 if (!erofs_sb_has_device_table(sbi))
232 ondisk_extradevs = 0;
233 else
234 ondisk_extradevs = le16_to_cpu(dsb->extra_devices);
235
236 if (sbi->devs->extra_devices &&
237 ondisk_extradevs != sbi->devs->extra_devices) {
238 erofs_err(sb, "extra devices don't match (ondisk %u, given %u)",
239 ondisk_extradevs, sbi->devs->extra_devices);
240 return -EINVAL;
241 }
242 if (!ondisk_extradevs)
243 return 0;
244
245 if (!sbi->devs->extra_devices && !erofs_is_fscache_mode(sb))
246 sbi->devs->flatdev = true;
247
248 sbi->device_id_mask = roundup_pow_of_two(ondisk_extradevs + 1) - 1;
249 pos = le16_to_cpu(dsb->devt_slotoff) * EROFS_DEVT_SLOT_SIZE;
250 down_read(&sbi->devs->rwsem);
251 if (sbi->devs->extra_devices) {
252 idr_for_each_entry(&sbi->devs->tree, dif, id) {
253 err = erofs_init_device(&buf, sb, dif, &pos);
254 if (err)
255 break;
256 }
257 } else {
258 for (id = 0; id < ondisk_extradevs; id++) {
259 dif = kzalloc(sizeof(*dif), GFP_KERNEL);
260 if (!dif) {
261 err = -ENOMEM;
262 break;
263 }
264
265 err = idr_alloc(&sbi->devs->tree, dif, 0, 0, GFP_KERNEL);
266 if (err < 0) {
267 kfree(dif);
268 break;
269 }
270 ++sbi->devs->extra_devices;
271
272 err = erofs_init_device(&buf, sb, dif, &pos);
273 if (err)
274 break;
275 }
276 }
277 up_read(&sbi->devs->rwsem);
278 erofs_put_metabuf(&buf);
279 return err;
280}
281
282static int erofs_read_superblock(struct super_block *sb)
283{
284 struct erofs_sb_info *sbi;
285 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
286 struct erofs_super_block *dsb;
287 void *data;
288 int ret;
289
290 data = erofs_read_metabuf(&buf, sb, 0, EROFS_KMAP);
291 if (IS_ERR(data)) {
292 erofs_err(sb, "cannot read erofs superblock");
293 return PTR_ERR(data);
294 }
295
296 sbi = EROFS_SB(sb);
297 dsb = (struct erofs_super_block *)(data + EROFS_SUPER_OFFSET);
298
299 ret = -EINVAL;
300 if (le32_to_cpu(dsb->magic) != EROFS_SUPER_MAGIC_V1) {
301 erofs_err(sb, "cannot find valid erofs superblock");
302 goto out;
303 }
304
305 sbi->blkszbits = dsb->blkszbits;
306 if (sbi->blkszbits < 9 || sbi->blkszbits > PAGE_SHIFT) {
307 erofs_err(sb, "blkszbits %u isn't supported", sbi->blkszbits);
308 goto out;
309 }
310 if (dsb->dirblkbits) {
311 erofs_err(sb, "dirblkbits %u isn't supported", dsb->dirblkbits);
312 goto out;
313 }
314
315 sbi->feature_compat = le32_to_cpu(dsb->feature_compat);
316 if (erofs_sb_has_sb_chksum(sbi)) {
317 ret = erofs_superblock_csum_verify(sb, data);
318 if (ret)
319 goto out;
320 }
321
322 ret = -EINVAL;
323 if (!check_layout_compatibility(sb, dsb))
324 goto out;
325
326 sbi->sb_size = 128 + dsb->sb_extslots * EROFS_SB_EXTSLOT_SIZE;
327 if (sbi->sb_size > PAGE_SIZE - EROFS_SUPER_OFFSET) {
328 erofs_err(sb, "invalid sb_extslots %u (more than a fs block)",
329 sbi->sb_size);
330 goto out;
331 }
332 sbi->primarydevice_blocks = le32_to_cpu(dsb->blocks);
333 sbi->meta_blkaddr = le32_to_cpu(dsb->meta_blkaddr);
334#ifdef CONFIG_EROFS_FS_XATTR
335 sbi->xattr_blkaddr = le32_to_cpu(dsb->xattr_blkaddr);
336 sbi->xattr_prefix_start = le32_to_cpu(dsb->xattr_prefix_start);
337 sbi->xattr_prefix_count = dsb->xattr_prefix_count;
338 sbi->xattr_filter_reserved = dsb->xattr_filter_reserved;
339#endif
340 sbi->islotbits = ilog2(sizeof(struct erofs_inode_compact));
341 sbi->root_nid = le16_to_cpu(dsb->root_nid);
342 sbi->packed_nid = le64_to_cpu(dsb->packed_nid);
343 sbi->inos = le64_to_cpu(dsb->inos);
344
345 sbi->build_time = le64_to_cpu(dsb->build_time);
346 sbi->build_time_nsec = le32_to_cpu(dsb->build_time_nsec);
347
348 memcpy(&sb->s_uuid, dsb->uuid, sizeof(dsb->uuid));
349
350 ret = strscpy(sbi->volume_name, dsb->volume_name,
351 sizeof(dsb->volume_name));
352 if (ret < 0) { /* -E2BIG */
353 erofs_err(sb, "bad volume name without NIL terminator");
354 ret = -EFSCORRUPTED;
355 goto out;
356 }
357
358 /* parse on-disk compression configurations */
359 ret = z_erofs_parse_cfgs(sb, dsb);
360 if (ret < 0)
361 goto out;
362
363 /* handle multiple devices */
364 ret = erofs_scan_devices(sb, dsb);
365
366 if (erofs_is_fscache_mode(sb))
367 erofs_info(sb, "EXPERIMENTAL fscache-based on-demand read feature in use. Use at your own risk!");
368out:
369 erofs_put_metabuf(&buf);
370 return ret;
371}
372
373static void erofs_default_options(struct erofs_sb_info *sbi)
374{
375#ifdef CONFIG_EROFS_FS_ZIP
376 sbi->opt.cache_strategy = EROFS_ZIP_CACHE_READAROUND;
377 sbi->opt.max_sync_decompress_pages = 3;
378 sbi->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_AUTO;
379#endif
380#ifdef CONFIG_EROFS_FS_XATTR
381 set_opt(&sbi->opt, XATTR_USER);
382#endif
383#ifdef CONFIG_EROFS_FS_POSIX_ACL
384 set_opt(&sbi->opt, POSIX_ACL);
385#endif
386}
387
388enum {
389 Opt_user_xattr,
390 Opt_acl,
391 Opt_cache_strategy,
392 Opt_dax,
393 Opt_dax_enum,
394 Opt_device,
395 Opt_fsid,
396 Opt_domain_id,
397 Opt_err
398};
399
400static const struct constant_table erofs_param_cache_strategy[] = {
401 {"disabled", EROFS_ZIP_CACHE_DISABLED},
402 {"readahead", EROFS_ZIP_CACHE_READAHEAD},
403 {"readaround", EROFS_ZIP_CACHE_READAROUND},
404 {}
405};
406
407static const struct constant_table erofs_dax_param_enums[] = {
408 {"always", EROFS_MOUNT_DAX_ALWAYS},
409 {"never", EROFS_MOUNT_DAX_NEVER},
410 {}
411};
412
413static const struct fs_parameter_spec erofs_fs_parameters[] = {
414 fsparam_flag_no("user_xattr", Opt_user_xattr),
415 fsparam_flag_no("acl", Opt_acl),
416 fsparam_enum("cache_strategy", Opt_cache_strategy,
417 erofs_param_cache_strategy),
418 fsparam_flag("dax", Opt_dax),
419 fsparam_enum("dax", Opt_dax_enum, erofs_dax_param_enums),
420 fsparam_string("device", Opt_device),
421 fsparam_string("fsid", Opt_fsid),
422 fsparam_string("domain_id", Opt_domain_id),
423 {}
424};
425
426static bool erofs_fc_set_dax_mode(struct fs_context *fc, unsigned int mode)
427{
428#ifdef CONFIG_FS_DAX
429 struct erofs_sb_info *sbi = fc->s_fs_info;
430
431 switch (mode) {
432 case EROFS_MOUNT_DAX_ALWAYS:
433 set_opt(&sbi->opt, DAX_ALWAYS);
434 clear_opt(&sbi->opt, DAX_NEVER);
435 return true;
436 case EROFS_MOUNT_DAX_NEVER:
437 set_opt(&sbi->opt, DAX_NEVER);
438 clear_opt(&sbi->opt, DAX_ALWAYS);
439 return true;
440 default:
441 DBG_BUGON(1);
442 return false;
443 }
444#else
445 errorfc(fc, "dax options not supported");
446 return false;
447#endif
448}
449
450static int erofs_fc_parse_param(struct fs_context *fc,
451 struct fs_parameter *param)
452{
453 struct erofs_sb_info *sbi = fc->s_fs_info;
454 struct fs_parse_result result;
455 struct erofs_device_info *dif;
456 int opt, ret;
457
458 opt = fs_parse(fc, erofs_fs_parameters, param, &result);
459 if (opt < 0)
460 return opt;
461
462 switch (opt) {
463 case Opt_user_xattr:
464#ifdef CONFIG_EROFS_FS_XATTR
465 if (result.boolean)
466 set_opt(&sbi->opt, XATTR_USER);
467 else
468 clear_opt(&sbi->opt, XATTR_USER);
469#else
470 errorfc(fc, "{,no}user_xattr options not supported");
471#endif
472 break;
473 case Opt_acl:
474#ifdef CONFIG_EROFS_FS_POSIX_ACL
475 if (result.boolean)
476 set_opt(&sbi->opt, POSIX_ACL);
477 else
478 clear_opt(&sbi->opt, POSIX_ACL);
479#else
480 errorfc(fc, "{,no}acl options not supported");
481#endif
482 break;
483 case Opt_cache_strategy:
484#ifdef CONFIG_EROFS_FS_ZIP
485 sbi->opt.cache_strategy = result.uint_32;
486#else
487 errorfc(fc, "compression not supported, cache_strategy ignored");
488#endif
489 break;
490 case Opt_dax:
491 if (!erofs_fc_set_dax_mode(fc, EROFS_MOUNT_DAX_ALWAYS))
492 return -EINVAL;
493 break;
494 case Opt_dax_enum:
495 if (!erofs_fc_set_dax_mode(fc, result.uint_32))
496 return -EINVAL;
497 break;
498 case Opt_device:
499 dif = kzalloc(sizeof(*dif), GFP_KERNEL);
500 if (!dif)
501 return -ENOMEM;
502 dif->path = kstrdup(param->string, GFP_KERNEL);
503 if (!dif->path) {
504 kfree(dif);
505 return -ENOMEM;
506 }
507 down_write(&sbi->devs->rwsem);
508 ret = idr_alloc(&sbi->devs->tree, dif, 0, 0, GFP_KERNEL);
509 up_write(&sbi->devs->rwsem);
510 if (ret < 0) {
511 kfree(dif->path);
512 kfree(dif);
513 return ret;
514 }
515 ++sbi->devs->extra_devices;
516 break;
517#ifdef CONFIG_EROFS_FS_ONDEMAND
518 case Opt_fsid:
519 kfree(sbi->fsid);
520 sbi->fsid = kstrdup(param->string, GFP_KERNEL);
521 if (!sbi->fsid)
522 return -ENOMEM;
523 break;
524 case Opt_domain_id:
525 kfree(sbi->domain_id);
526 sbi->domain_id = kstrdup(param->string, GFP_KERNEL);
527 if (!sbi->domain_id)
528 return -ENOMEM;
529 break;
530#else
531 case Opt_fsid:
532 case Opt_domain_id:
533 errorfc(fc, "%s option not supported", erofs_fs_parameters[opt].name);
534 break;
535#endif
536 default:
537 return -ENOPARAM;
538 }
539 return 0;
540}
541
542static struct inode *erofs_nfs_get_inode(struct super_block *sb,
543 u64 ino, u32 generation)
544{
545 return erofs_iget(sb, ino);
546}
547
548static struct dentry *erofs_fh_to_dentry(struct super_block *sb,
549 struct fid *fid, int fh_len, int fh_type)
550{
551 return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
552 erofs_nfs_get_inode);
553}
554
555static struct dentry *erofs_fh_to_parent(struct super_block *sb,
556 struct fid *fid, int fh_len, int fh_type)
557{
558 return generic_fh_to_parent(sb, fid, fh_len, fh_type,
559 erofs_nfs_get_inode);
560}
561
562static struct dentry *erofs_get_parent(struct dentry *child)
563{
564 erofs_nid_t nid;
565 unsigned int d_type;
566 int err;
567
568 err = erofs_namei(d_inode(child), &dotdot_name, &nid, &d_type);
569 if (err)
570 return ERR_PTR(err);
571 return d_obtain_alias(erofs_iget(child->d_sb, nid));
572}
573
574static const struct export_operations erofs_export_ops = {
575 .encode_fh = generic_encode_ino32_fh,
576 .fh_to_dentry = erofs_fh_to_dentry,
577 .fh_to_parent = erofs_fh_to_parent,
578 .get_parent = erofs_get_parent,
579};
580
581static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
582{
583 struct inode *inode;
584 struct erofs_sb_info *sbi = EROFS_SB(sb);
585 int err;
586
587 sb->s_magic = EROFS_SUPER_MAGIC;
588 sb->s_flags |= SB_RDONLY | SB_NOATIME;
589 sb->s_maxbytes = MAX_LFS_FILESIZE;
590 sb->s_op = &erofs_sops;
591
592 sbi->blkszbits = PAGE_SHIFT;
593 if (erofs_is_fscache_mode(sb)) {
594 sb->s_blocksize = PAGE_SIZE;
595 sb->s_blocksize_bits = PAGE_SHIFT;
596
597 err = erofs_fscache_register_fs(sb);
598 if (err)
599 return err;
600
601 err = super_setup_bdi(sb);
602 if (err)
603 return err;
604 } else {
605 if (!sb_set_blocksize(sb, PAGE_SIZE)) {
606 errorfc(fc, "failed to set initial blksize");
607 return -EINVAL;
608 }
609
610 sbi->dax_dev = fs_dax_get_by_bdev(sb->s_bdev,
611 &sbi->dax_part_off,
612 NULL, NULL);
613 }
614
615 err = erofs_read_superblock(sb);
616 if (err)
617 return err;
618
619 if (sb->s_blocksize_bits != sbi->blkszbits) {
620 if (erofs_is_fscache_mode(sb)) {
621 errorfc(fc, "unsupported blksize for fscache mode");
622 return -EINVAL;
623 }
624 if (!sb_set_blocksize(sb, 1 << sbi->blkszbits)) {
625 errorfc(fc, "failed to set erofs blksize");
626 return -EINVAL;
627 }
628 }
629
630 if (test_opt(&sbi->opt, DAX_ALWAYS)) {
631 if (!sbi->dax_dev) {
632 errorfc(fc, "DAX unsupported by block device. Turning off DAX.");
633 clear_opt(&sbi->opt, DAX_ALWAYS);
634 } else if (sbi->blkszbits != PAGE_SHIFT) {
635 errorfc(fc, "unsupported blocksize for DAX");
636 clear_opt(&sbi->opt, DAX_ALWAYS);
637 }
638 }
639
640 sb->s_time_gran = 1;
641 sb->s_xattr = erofs_xattr_handlers;
642 sb->s_export_op = &erofs_export_ops;
643
644 if (test_opt(&sbi->opt, POSIX_ACL))
645 sb->s_flags |= SB_POSIXACL;
646 else
647 sb->s_flags &= ~SB_POSIXACL;
648
649#ifdef CONFIG_EROFS_FS_ZIP
650 xa_init(&sbi->managed_pslots);
651#endif
652
653 inode = erofs_iget(sb, sbi->root_nid);
654 if (IS_ERR(inode))
655 return PTR_ERR(inode);
656
657 if (!S_ISDIR(inode->i_mode)) {
658 erofs_err(sb, "rootino(nid %llu) is not a directory(i_mode %o)",
659 sbi->root_nid, inode->i_mode);
660 iput(inode);
661 return -EINVAL;
662 }
663
664 sb->s_root = d_make_root(inode);
665 if (!sb->s_root)
666 return -ENOMEM;
667
668 erofs_shrinker_register(sb);
669 if (erofs_sb_has_fragments(sbi) && sbi->packed_nid) {
670 sbi->packed_inode = erofs_iget(sb, sbi->packed_nid);
671 if (IS_ERR(sbi->packed_inode)) {
672 err = PTR_ERR(sbi->packed_inode);
673 sbi->packed_inode = NULL;
674 return err;
675 }
676 }
677 err = erofs_init_managed_cache(sb);
678 if (err)
679 return err;
680
681 err = erofs_xattr_prefixes_init(sb);
682 if (err)
683 return err;
684
685 err = erofs_register_sysfs(sb);
686 if (err)
687 return err;
688
689 erofs_info(sb, "mounted with root inode @ nid %llu.", sbi->root_nid);
690 return 0;
691}
692
693static int erofs_fc_get_tree(struct fs_context *fc)
694{
695 struct erofs_sb_info *sbi = fc->s_fs_info;
696
697 if (IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && sbi->fsid)
698 return get_tree_nodev(fc, erofs_fc_fill_super);
699
700 return get_tree_bdev(fc, erofs_fc_fill_super);
701}
702
703static int erofs_fc_reconfigure(struct fs_context *fc)
704{
705 struct super_block *sb = fc->root->d_sb;
706 struct erofs_sb_info *sbi = EROFS_SB(sb);
707 struct erofs_sb_info *new_sbi = fc->s_fs_info;
708
709 DBG_BUGON(!sb_rdonly(sb));
710
711 if (new_sbi->fsid || new_sbi->domain_id)
712 erofs_info(sb, "ignoring reconfiguration for fsid|domain_id.");
713
714 if (test_opt(&new_sbi->opt, POSIX_ACL))
715 fc->sb_flags |= SB_POSIXACL;
716 else
717 fc->sb_flags &= ~SB_POSIXACL;
718
719 sbi->opt = new_sbi->opt;
720
721 fc->sb_flags |= SB_RDONLY;
722 return 0;
723}
724
725static int erofs_release_device_info(int id, void *ptr, void *data)
726{
727 struct erofs_device_info *dif = ptr;
728
729 fs_put_dax(dif->dax_dev, NULL);
730 if (dif->bdev_file)
731 fput(dif->bdev_file);
732 erofs_fscache_unregister_cookie(dif->fscache);
733 dif->fscache = NULL;
734 kfree(dif->path);
735 kfree(dif);
736 return 0;
737}
738
739static void erofs_free_dev_context(struct erofs_dev_context *devs)
740{
741 if (!devs)
742 return;
743 idr_for_each(&devs->tree, &erofs_release_device_info, NULL);
744 idr_destroy(&devs->tree);
745 kfree(devs);
746}
747
748static void erofs_fc_free(struct fs_context *fc)
749{
750 struct erofs_sb_info *sbi = fc->s_fs_info;
751
752 if (!sbi)
753 return;
754
755 erofs_free_dev_context(sbi->devs);
756 kfree(sbi->fsid);
757 kfree(sbi->domain_id);
758 kfree(sbi);
759}
760
761static const struct fs_context_operations erofs_context_ops = {
762 .parse_param = erofs_fc_parse_param,
763 .get_tree = erofs_fc_get_tree,
764 .reconfigure = erofs_fc_reconfigure,
765 .free = erofs_fc_free,
766};
767
768static int erofs_init_fs_context(struct fs_context *fc)
769{
770 struct erofs_sb_info *sbi;
771
772 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
773 if (!sbi)
774 return -ENOMEM;
775
776 sbi->devs = kzalloc(sizeof(struct erofs_dev_context), GFP_KERNEL);
777 if (!sbi->devs) {
778 kfree(sbi);
779 return -ENOMEM;
780 }
781 fc->s_fs_info = sbi;
782
783 idr_init(&sbi->devs->tree);
784 init_rwsem(&sbi->devs->rwsem);
785 erofs_default_options(sbi);
786 fc->ops = &erofs_context_ops;
787 return 0;
788}
789
790static void erofs_kill_sb(struct super_block *sb)
791{
792 struct erofs_sb_info *sbi = EROFS_SB(sb);
793
794 if (IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && sbi->fsid)
795 kill_anon_super(sb);
796 else
797 kill_block_super(sb);
798
799 erofs_free_dev_context(sbi->devs);
800 fs_put_dax(sbi->dax_dev, NULL);
801 erofs_fscache_unregister_fs(sb);
802 kfree(sbi->fsid);
803 kfree(sbi->domain_id);
804 kfree(sbi);
805 sb->s_fs_info = NULL;
806}
807
808static void erofs_put_super(struct super_block *sb)
809{
810 struct erofs_sb_info *const sbi = EROFS_SB(sb);
811
812 DBG_BUGON(!sbi);
813
814 erofs_unregister_sysfs(sb);
815 erofs_shrinker_unregister(sb);
816 erofs_xattr_prefixes_cleanup(sb);
817#ifdef CONFIG_EROFS_FS_ZIP
818 iput(sbi->managed_cache);
819 sbi->managed_cache = NULL;
820#endif
821 iput(sbi->packed_inode);
822 sbi->packed_inode = NULL;
823 erofs_free_dev_context(sbi->devs);
824 sbi->devs = NULL;
825 erofs_fscache_unregister_fs(sb);
826}
827
828static struct file_system_type erofs_fs_type = {
829 .owner = THIS_MODULE,
830 .name = "erofs",
831 .init_fs_context = erofs_init_fs_context,
832 .kill_sb = erofs_kill_sb,
833 .fs_flags = FS_REQUIRES_DEV | FS_ALLOW_IDMAP,
834};
835MODULE_ALIAS_FS("erofs");
836
837static int __init erofs_module_init(void)
838{
839 int err;
840
841 erofs_check_ondisk_layout_definitions();
842
843 erofs_inode_cachep = kmem_cache_create("erofs_inode",
844 sizeof(struct erofs_inode), 0,
845 SLAB_RECLAIM_ACCOUNT | SLAB_ACCOUNT,
846 erofs_inode_init_once);
847 if (!erofs_inode_cachep)
848 return -ENOMEM;
849
850 err = erofs_init_shrinker();
851 if (err)
852 goto shrinker_err;
853
854 err = z_erofs_lzma_init();
855 if (err)
856 goto lzma_err;
857
858 err = z_erofs_deflate_init();
859 if (err)
860 goto deflate_err;
861
862 erofs_pcpubuf_init();
863 err = z_erofs_init_zip_subsystem();
864 if (err)
865 goto zip_err;
866
867 err = erofs_init_sysfs();
868 if (err)
869 goto sysfs_err;
870
871 err = register_filesystem(&erofs_fs_type);
872 if (err)
873 goto fs_err;
874
875 return 0;
876
877fs_err:
878 erofs_exit_sysfs();
879sysfs_err:
880 z_erofs_exit_zip_subsystem();
881zip_err:
882 z_erofs_deflate_exit();
883deflate_err:
884 z_erofs_lzma_exit();
885lzma_err:
886 erofs_exit_shrinker();
887shrinker_err:
888 kmem_cache_destroy(erofs_inode_cachep);
889 return err;
890}
891
892static void __exit erofs_module_exit(void)
893{
894 unregister_filesystem(&erofs_fs_type);
895
896 /* Ensure all RCU free inodes / pclusters are safe to be destroyed. */
897 rcu_barrier();
898
899 erofs_exit_sysfs();
900 z_erofs_exit_zip_subsystem();
901 z_erofs_deflate_exit();
902 z_erofs_lzma_exit();
903 erofs_exit_shrinker();
904 kmem_cache_destroy(erofs_inode_cachep);
905 erofs_pcpubuf_exit();
906}
907
908static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf)
909{
910 struct super_block *sb = dentry->d_sb;
911 struct erofs_sb_info *sbi = EROFS_SB(sb);
912 u64 id = 0;
913
914 if (!erofs_is_fscache_mode(sb))
915 id = huge_encode_dev(sb->s_bdev->bd_dev);
916
917 buf->f_type = sb->s_magic;
918 buf->f_bsize = sb->s_blocksize;
919 buf->f_blocks = sbi->total_blocks;
920 buf->f_bfree = buf->f_bavail = 0;
921
922 buf->f_files = ULLONG_MAX;
923 buf->f_ffree = ULLONG_MAX - sbi->inos;
924
925 buf->f_namelen = EROFS_NAME_LEN;
926
927 buf->f_fsid = u64_to_fsid(id);
928 return 0;
929}
930
931static int erofs_show_options(struct seq_file *seq, struct dentry *root)
932{
933 struct erofs_sb_info *sbi = EROFS_SB(root->d_sb);
934 struct erofs_mount_opts *opt = &sbi->opt;
935
936#ifdef CONFIG_EROFS_FS_XATTR
937 if (test_opt(opt, XATTR_USER))
938 seq_puts(seq, ",user_xattr");
939 else
940 seq_puts(seq, ",nouser_xattr");
941#endif
942#ifdef CONFIG_EROFS_FS_POSIX_ACL
943 if (test_opt(opt, POSIX_ACL))
944 seq_puts(seq, ",acl");
945 else
946 seq_puts(seq, ",noacl");
947#endif
948#ifdef CONFIG_EROFS_FS_ZIP
949 if (opt->cache_strategy == EROFS_ZIP_CACHE_DISABLED)
950 seq_puts(seq, ",cache_strategy=disabled");
951 else if (opt->cache_strategy == EROFS_ZIP_CACHE_READAHEAD)
952 seq_puts(seq, ",cache_strategy=readahead");
953 else if (opt->cache_strategy == EROFS_ZIP_CACHE_READAROUND)
954 seq_puts(seq, ",cache_strategy=readaround");
955#endif
956 if (test_opt(opt, DAX_ALWAYS))
957 seq_puts(seq, ",dax=always");
958 if (test_opt(opt, DAX_NEVER))
959 seq_puts(seq, ",dax=never");
960#ifdef CONFIG_EROFS_FS_ONDEMAND
961 if (sbi->fsid)
962 seq_printf(seq, ",fsid=%s", sbi->fsid);
963 if (sbi->domain_id)
964 seq_printf(seq, ",domain_id=%s", sbi->domain_id);
965#endif
966 return 0;
967}
968
969const struct super_operations erofs_sops = {
970 .put_super = erofs_put_super,
971 .alloc_inode = erofs_alloc_inode,
972 .free_inode = erofs_free_inode,
973 .statfs = erofs_statfs,
974 .show_options = erofs_show_options,
975};
976
977module_init(erofs_module_init);
978module_exit(erofs_module_exit);
979
980MODULE_DESCRIPTION("Enhanced ROM File System");
981MODULE_AUTHOR("Gao Xiang, Chao Yu, Miao Xie, CONSUMER BG, HUAWEI Inc.");
982MODULE_LICENSE("GPL");