Loading...
Note: File does not exist in v3.1.
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * fs/f2fs/data.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
7 */
8#include <linux/fs.h>
9#include <linux/f2fs_fs.h>
10#include <linux/sched/mm.h>
11#include <linux/mpage.h>
12#include <linux/writeback.h>
13#include <linux/pagevec.h>
14#include <linux/blkdev.h>
15#include <linux/bio.h>
16#include <linux/blk-crypto.h>
17#include <linux/swap.h>
18#include <linux/prefetch.h>
19#include <linux/uio.h>
20#include <linux/sched/signal.h>
21#include <linux/fiemap.h>
22#include <linux/iomap.h>
23
24#include "f2fs.h"
25#include "node.h"
26#include "segment.h"
27#include "iostat.h"
28#include <trace/events/f2fs.h>
29
30#define NUM_PREALLOC_POST_READ_CTXS 128
31
32static struct kmem_cache *bio_post_read_ctx_cache;
33static struct kmem_cache *bio_entry_slab;
34static mempool_t *bio_post_read_ctx_pool;
35static struct bio_set f2fs_bioset;
36
37#define F2FS_BIO_POOL_SIZE NR_CURSEG_TYPE
38
39int __init f2fs_init_bioset(void)
40{
41 return bioset_init(&f2fs_bioset, F2FS_BIO_POOL_SIZE,
42 0, BIOSET_NEED_BVECS);
43}
44
45void f2fs_destroy_bioset(void)
46{
47 bioset_exit(&f2fs_bioset);
48}
49
50bool f2fs_is_cp_guaranteed(struct page *page)
51{
52 struct address_space *mapping = page->mapping;
53 struct inode *inode;
54 struct f2fs_sb_info *sbi;
55
56 if (!mapping)
57 return false;
58
59 inode = mapping->host;
60 sbi = F2FS_I_SB(inode);
61
62 if (inode->i_ino == F2FS_META_INO(sbi) ||
63 inode->i_ino == F2FS_NODE_INO(sbi) ||
64 S_ISDIR(inode->i_mode))
65 return true;
66
67 if ((S_ISREG(inode->i_mode) && IS_NOQUOTA(inode)) ||
68 page_private_gcing(page))
69 return true;
70 return false;
71}
72
73static enum count_type __read_io_type(struct page *page)
74{
75 struct address_space *mapping = page_file_mapping(page);
76
77 if (mapping) {
78 struct inode *inode = mapping->host;
79 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
80
81 if (inode->i_ino == F2FS_META_INO(sbi))
82 return F2FS_RD_META;
83
84 if (inode->i_ino == F2FS_NODE_INO(sbi))
85 return F2FS_RD_NODE;
86 }
87 return F2FS_RD_DATA;
88}
89
90/* postprocessing steps for read bios */
91enum bio_post_read_step {
92#ifdef CONFIG_FS_ENCRYPTION
93 STEP_DECRYPT = BIT(0),
94#else
95 STEP_DECRYPT = 0, /* compile out the decryption-related code */
96#endif
97#ifdef CONFIG_F2FS_FS_COMPRESSION
98 STEP_DECOMPRESS = BIT(1),
99#else
100 STEP_DECOMPRESS = 0, /* compile out the decompression-related code */
101#endif
102#ifdef CONFIG_FS_VERITY
103 STEP_VERITY = BIT(2),
104#else
105 STEP_VERITY = 0, /* compile out the verity-related code */
106#endif
107};
108
109struct bio_post_read_ctx {
110 struct bio *bio;
111 struct f2fs_sb_info *sbi;
112 struct work_struct work;
113 unsigned int enabled_steps;
114 /*
115 * decompression_attempted keeps track of whether
116 * f2fs_end_read_compressed_page() has been called on the pages in the
117 * bio that belong to a compressed cluster yet.
118 */
119 bool decompression_attempted;
120 block_t fs_blkaddr;
121};
122
123/*
124 * Update and unlock a bio's pages, and free the bio.
125 *
126 * This marks pages up-to-date only if there was no error in the bio (I/O error,
127 * decryption error, or verity error), as indicated by bio->bi_status.
128 *
129 * "Compressed pages" (pagecache pages backed by a compressed cluster on-disk)
130 * aren't marked up-to-date here, as decompression is done on a per-compression-
131 * cluster basis rather than a per-bio basis. Instead, we only must do two
132 * things for each compressed page here: call f2fs_end_read_compressed_page()
133 * with failed=true if an error occurred before it would have normally gotten
134 * called (i.e., I/O error or decryption error, but *not* verity error), and
135 * release the bio's reference to the decompress_io_ctx of the page's cluster.
136 */
137static void f2fs_finish_read_bio(struct bio *bio, bool in_task)
138{
139 struct bio_vec *bv;
140 struct bvec_iter_all iter_all;
141 struct bio_post_read_ctx *ctx = bio->bi_private;
142
143 bio_for_each_segment_all(bv, bio, iter_all) {
144 struct page *page = bv->bv_page;
145
146 if (f2fs_is_compressed_page(page)) {
147 if (ctx && !ctx->decompression_attempted)
148 f2fs_end_read_compressed_page(page, true, 0,
149 in_task);
150 f2fs_put_page_dic(page, in_task);
151 continue;
152 }
153
154 if (bio->bi_status)
155 ClearPageUptodate(page);
156 else
157 SetPageUptodate(page);
158 dec_page_count(F2FS_P_SB(page), __read_io_type(page));
159 unlock_page(page);
160 }
161
162 if (ctx)
163 mempool_free(ctx, bio_post_read_ctx_pool);
164 bio_put(bio);
165}
166
167static void f2fs_verify_bio(struct work_struct *work)
168{
169 struct bio_post_read_ctx *ctx =
170 container_of(work, struct bio_post_read_ctx, work);
171 struct bio *bio = ctx->bio;
172 bool may_have_compressed_pages = (ctx->enabled_steps & STEP_DECOMPRESS);
173
174 /*
175 * fsverity_verify_bio() may call readahead() again, and while verity
176 * will be disabled for this, decryption and/or decompression may still
177 * be needed, resulting in another bio_post_read_ctx being allocated.
178 * So to prevent deadlocks we need to release the current ctx to the
179 * mempool first. This assumes that verity is the last post-read step.
180 */
181 mempool_free(ctx, bio_post_read_ctx_pool);
182 bio->bi_private = NULL;
183
184 /*
185 * Verify the bio's pages with fs-verity. Exclude compressed pages,
186 * as those were handled separately by f2fs_end_read_compressed_page().
187 */
188 if (may_have_compressed_pages) {
189 struct bio_vec *bv;
190 struct bvec_iter_all iter_all;
191
192 bio_for_each_segment_all(bv, bio, iter_all) {
193 struct page *page = bv->bv_page;
194
195 if (!f2fs_is_compressed_page(page) &&
196 !fsverity_verify_page(page)) {
197 bio->bi_status = BLK_STS_IOERR;
198 break;
199 }
200 }
201 } else {
202 fsverity_verify_bio(bio);
203 }
204
205 f2fs_finish_read_bio(bio, true);
206}
207
208/*
209 * If the bio's data needs to be verified with fs-verity, then enqueue the
210 * verity work for the bio. Otherwise finish the bio now.
211 *
212 * Note that to avoid deadlocks, the verity work can't be done on the
213 * decryption/decompression workqueue. This is because verifying the data pages
214 * can involve reading verity metadata pages from the file, and these verity
215 * metadata pages may be encrypted and/or compressed.
216 */
217static void f2fs_verify_and_finish_bio(struct bio *bio, bool in_task)
218{
219 struct bio_post_read_ctx *ctx = bio->bi_private;
220
221 if (ctx && (ctx->enabled_steps & STEP_VERITY)) {
222 INIT_WORK(&ctx->work, f2fs_verify_bio);
223 fsverity_enqueue_verify_work(&ctx->work);
224 } else {
225 f2fs_finish_read_bio(bio, in_task);
226 }
227}
228
229/*
230 * Handle STEP_DECOMPRESS by decompressing any compressed clusters whose last
231 * remaining page was read by @ctx->bio.
232 *
233 * Note that a bio may span clusters (even a mix of compressed and uncompressed
234 * clusters) or be for just part of a cluster. STEP_DECOMPRESS just indicates
235 * that the bio includes at least one compressed page. The actual decompression
236 * is done on a per-cluster basis, not a per-bio basis.
237 */
238static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx,
239 bool in_task)
240{
241 struct bio_vec *bv;
242 struct bvec_iter_all iter_all;
243 bool all_compressed = true;
244 block_t blkaddr = ctx->fs_blkaddr;
245
246 bio_for_each_segment_all(bv, ctx->bio, iter_all) {
247 struct page *page = bv->bv_page;
248
249 if (f2fs_is_compressed_page(page))
250 f2fs_end_read_compressed_page(page, false, blkaddr,
251 in_task);
252 else
253 all_compressed = false;
254
255 blkaddr++;
256 }
257
258 ctx->decompression_attempted = true;
259
260 /*
261 * Optimization: if all the bio's pages are compressed, then scheduling
262 * the per-bio verity work is unnecessary, as verity will be fully
263 * handled at the compression cluster level.
264 */
265 if (all_compressed)
266 ctx->enabled_steps &= ~STEP_VERITY;
267}
268
269static void f2fs_post_read_work(struct work_struct *work)
270{
271 struct bio_post_read_ctx *ctx =
272 container_of(work, struct bio_post_read_ctx, work);
273 struct bio *bio = ctx->bio;
274
275 if ((ctx->enabled_steps & STEP_DECRYPT) && !fscrypt_decrypt_bio(bio)) {
276 f2fs_finish_read_bio(bio, true);
277 return;
278 }
279
280 if (ctx->enabled_steps & STEP_DECOMPRESS)
281 f2fs_handle_step_decompress(ctx, true);
282
283 f2fs_verify_and_finish_bio(bio, true);
284}
285
286static void f2fs_read_end_io(struct bio *bio)
287{
288 struct f2fs_sb_info *sbi = F2FS_P_SB(bio_first_page_all(bio));
289 struct bio_post_read_ctx *ctx;
290 bool intask = in_task();
291
292 iostat_update_and_unbind_ctx(bio);
293 ctx = bio->bi_private;
294
295 if (time_to_inject(sbi, FAULT_READ_IO))
296 bio->bi_status = BLK_STS_IOERR;
297
298 if (bio->bi_status) {
299 f2fs_finish_read_bio(bio, intask);
300 return;
301 }
302
303 if (ctx) {
304 unsigned int enabled_steps = ctx->enabled_steps &
305 (STEP_DECRYPT | STEP_DECOMPRESS);
306
307 /*
308 * If we have only decompression step between decompression and
309 * decrypt, we don't need post processing for this.
310 */
311 if (enabled_steps == STEP_DECOMPRESS &&
312 !f2fs_low_mem_mode(sbi)) {
313 f2fs_handle_step_decompress(ctx, intask);
314 } else if (enabled_steps) {
315 INIT_WORK(&ctx->work, f2fs_post_read_work);
316 queue_work(ctx->sbi->post_read_wq, &ctx->work);
317 return;
318 }
319 }
320
321 f2fs_verify_and_finish_bio(bio, intask);
322}
323
324static void f2fs_write_end_io(struct bio *bio)
325{
326 struct f2fs_sb_info *sbi;
327 struct bio_vec *bvec;
328 struct bvec_iter_all iter_all;
329
330 iostat_update_and_unbind_ctx(bio);
331 sbi = bio->bi_private;
332
333 if (time_to_inject(sbi, FAULT_WRITE_IO))
334 bio->bi_status = BLK_STS_IOERR;
335
336 bio_for_each_segment_all(bvec, bio, iter_all) {
337 struct page *page = bvec->bv_page;
338 enum count_type type = WB_DATA_TYPE(page, false);
339
340 fscrypt_finalize_bounce_page(&page);
341
342#ifdef CONFIG_F2FS_FS_COMPRESSION
343 if (f2fs_is_compressed_page(page)) {
344 f2fs_compress_write_end_io(bio, page);
345 continue;
346 }
347#endif
348
349 if (unlikely(bio->bi_status)) {
350 mapping_set_error(page->mapping, -EIO);
351 if (type == F2FS_WB_CP_DATA)
352 f2fs_stop_checkpoint(sbi, true,
353 STOP_CP_REASON_WRITE_FAIL);
354 }
355
356 f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) &&
357 page_folio(page)->index != nid_of_node(page));
358
359 dec_page_count(sbi, type);
360 if (f2fs_in_warm_node_list(sbi, page))
361 f2fs_del_fsync_node_entry(sbi, page);
362 clear_page_private_gcing(page);
363 end_page_writeback(page);
364 }
365 if (!get_pages(sbi, F2FS_WB_CP_DATA) &&
366 wq_has_sleeper(&sbi->cp_wait))
367 wake_up(&sbi->cp_wait);
368
369 bio_put(bio);
370}
371
372#ifdef CONFIG_BLK_DEV_ZONED
373static void f2fs_zone_write_end_io(struct bio *bio)
374{
375 struct f2fs_bio_info *io = (struct f2fs_bio_info *)bio->bi_private;
376
377 bio->bi_private = io->bi_private;
378 complete(&io->zone_wait);
379 f2fs_write_end_io(bio);
380}
381#endif
382
383struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
384 block_t blk_addr, sector_t *sector)
385{
386 struct block_device *bdev = sbi->sb->s_bdev;
387 int i;
388
389 if (f2fs_is_multi_device(sbi)) {
390 for (i = 0; i < sbi->s_ndevs; i++) {
391 if (FDEV(i).start_blk <= blk_addr &&
392 FDEV(i).end_blk >= blk_addr) {
393 blk_addr -= FDEV(i).start_blk;
394 bdev = FDEV(i).bdev;
395 break;
396 }
397 }
398 }
399
400 if (sector)
401 *sector = SECTOR_FROM_BLOCK(blk_addr);
402 return bdev;
403}
404
405int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
406{
407 int i;
408
409 if (!f2fs_is_multi_device(sbi))
410 return 0;
411
412 for (i = 0; i < sbi->s_ndevs; i++)
413 if (FDEV(i).start_blk <= blkaddr && FDEV(i).end_blk >= blkaddr)
414 return i;
415 return 0;
416}
417
418static blk_opf_t f2fs_io_flags(struct f2fs_io_info *fio)
419{
420 unsigned int temp_mask = GENMASK(NR_TEMP_TYPE - 1, 0);
421 unsigned int fua_flag, meta_flag, io_flag;
422 blk_opf_t op_flags = 0;
423
424 if (fio->op != REQ_OP_WRITE)
425 return 0;
426 if (fio->type == DATA)
427 io_flag = fio->sbi->data_io_flag;
428 else if (fio->type == NODE)
429 io_flag = fio->sbi->node_io_flag;
430 else
431 return 0;
432
433 fua_flag = io_flag & temp_mask;
434 meta_flag = (io_flag >> NR_TEMP_TYPE) & temp_mask;
435
436 /*
437 * data/node io flag bits per temp:
438 * REQ_META | REQ_FUA |
439 * 5 | 4 | 3 | 2 | 1 | 0 |
440 * Cold | Warm | Hot | Cold | Warm | Hot |
441 */
442 if (BIT(fio->temp) & meta_flag)
443 op_flags |= REQ_META;
444 if (BIT(fio->temp) & fua_flag)
445 op_flags |= REQ_FUA;
446 return op_flags;
447}
448
449static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages)
450{
451 struct f2fs_sb_info *sbi = fio->sbi;
452 struct block_device *bdev;
453 sector_t sector;
454 struct bio *bio;
455
456 bdev = f2fs_target_device(sbi, fio->new_blkaddr, §or);
457 bio = bio_alloc_bioset(bdev, npages,
458 fio->op | fio->op_flags | f2fs_io_flags(fio),
459 GFP_NOIO, &f2fs_bioset);
460 bio->bi_iter.bi_sector = sector;
461 if (is_read_io(fio->op)) {
462 bio->bi_end_io = f2fs_read_end_io;
463 bio->bi_private = NULL;
464 } else {
465 bio->bi_end_io = f2fs_write_end_io;
466 bio->bi_private = sbi;
467 bio->bi_write_hint = f2fs_io_type_to_rw_hint(sbi,
468 fio->type, fio->temp);
469 }
470 iostat_alloc_and_bind_ctx(sbi, bio, NULL);
471
472 if (fio->io_wbc)
473 wbc_init_bio(fio->io_wbc, bio);
474
475 return bio;
476}
477
478static void f2fs_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
479 pgoff_t first_idx,
480 const struct f2fs_io_info *fio,
481 gfp_t gfp_mask)
482{
483 /*
484 * The f2fs garbage collector sets ->encrypted_page when it wants to
485 * read/write raw data without encryption.
486 */
487 if (!fio || !fio->encrypted_page)
488 fscrypt_set_bio_crypt_ctx(bio, inode, first_idx, gfp_mask);
489}
490
491static bool f2fs_crypt_mergeable_bio(struct bio *bio, const struct inode *inode,
492 pgoff_t next_idx,
493 const struct f2fs_io_info *fio)
494{
495 /*
496 * The f2fs garbage collector sets ->encrypted_page when it wants to
497 * read/write raw data without encryption.
498 */
499 if (fio && fio->encrypted_page)
500 return !bio_has_crypt_ctx(bio);
501
502 return fscrypt_mergeable_bio(bio, inode, next_idx);
503}
504
505void f2fs_submit_read_bio(struct f2fs_sb_info *sbi, struct bio *bio,
506 enum page_type type)
507{
508 WARN_ON_ONCE(!is_read_io(bio_op(bio)));
509 trace_f2fs_submit_read_bio(sbi->sb, type, bio);
510
511 iostat_update_submit_ctx(bio, type);
512 submit_bio(bio);
513}
514
515static void f2fs_submit_write_bio(struct f2fs_sb_info *sbi, struct bio *bio,
516 enum page_type type)
517{
518 WARN_ON_ONCE(is_read_io(bio_op(bio)));
519
520 if (f2fs_lfs_mode(sbi) && current->plug && PAGE_TYPE_ON_MAIN(type))
521 blk_finish_plug(current->plug);
522
523 trace_f2fs_submit_write_bio(sbi->sb, type, bio);
524 iostat_update_submit_ctx(bio, type);
525 submit_bio(bio);
526}
527
528static void __submit_merged_bio(struct f2fs_bio_info *io)
529{
530 struct f2fs_io_info *fio = &io->fio;
531
532 if (!io->bio)
533 return;
534
535 if (is_read_io(fio->op)) {
536 trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio);
537 f2fs_submit_read_bio(io->sbi, io->bio, fio->type);
538 } else {
539 trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio);
540 f2fs_submit_write_bio(io->sbi, io->bio, fio->type);
541 }
542 io->bio = NULL;
543}
544
545static bool __has_merged_page(struct bio *bio, struct inode *inode,
546 struct page *page, nid_t ino)
547{
548 struct bio_vec *bvec;
549 struct bvec_iter_all iter_all;
550
551 if (!bio)
552 return false;
553
554 if (!inode && !page && !ino)
555 return true;
556
557 bio_for_each_segment_all(bvec, bio, iter_all) {
558 struct page *target = bvec->bv_page;
559
560 if (fscrypt_is_bounce_page(target)) {
561 target = fscrypt_pagecache_page(target);
562 if (IS_ERR(target))
563 continue;
564 }
565 if (f2fs_is_compressed_page(target)) {
566 target = f2fs_compress_control_page(target);
567 if (IS_ERR(target))
568 continue;
569 }
570
571 if (inode && inode == target->mapping->host)
572 return true;
573 if (page && page == target)
574 return true;
575 if (ino && ino == ino_of_node(target))
576 return true;
577 }
578
579 return false;
580}
581
582int f2fs_init_write_merge_io(struct f2fs_sb_info *sbi)
583{
584 int i;
585
586 for (i = 0; i < NR_PAGE_TYPE; i++) {
587 int n = (i == META) ? 1 : NR_TEMP_TYPE;
588 int j;
589
590 sbi->write_io[i] = f2fs_kmalloc(sbi,
591 array_size(n, sizeof(struct f2fs_bio_info)),
592 GFP_KERNEL);
593 if (!sbi->write_io[i])
594 return -ENOMEM;
595
596 for (j = HOT; j < n; j++) {
597 struct f2fs_bio_info *io = &sbi->write_io[i][j];
598
599 init_f2fs_rwsem(&io->io_rwsem);
600 io->sbi = sbi;
601 io->bio = NULL;
602 io->last_block_in_bio = 0;
603 spin_lock_init(&io->io_lock);
604 INIT_LIST_HEAD(&io->io_list);
605 INIT_LIST_HEAD(&io->bio_list);
606 init_f2fs_rwsem(&io->bio_list_lock);
607#ifdef CONFIG_BLK_DEV_ZONED
608 init_completion(&io->zone_wait);
609 io->zone_pending_bio = NULL;
610 io->bi_private = NULL;
611#endif
612 }
613 }
614
615 return 0;
616}
617
618static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
619 enum page_type type, enum temp_type temp)
620{
621 enum page_type btype = PAGE_TYPE_OF_BIO(type);
622 struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
623
624 f2fs_down_write(&io->io_rwsem);
625
626 if (!io->bio)
627 goto unlock_out;
628
629 /* change META to META_FLUSH in the checkpoint procedure */
630 if (type >= META_FLUSH) {
631 io->fio.type = META_FLUSH;
632 io->bio->bi_opf |= REQ_META | REQ_PRIO | REQ_SYNC;
633 if (!test_opt(sbi, NOBARRIER))
634 io->bio->bi_opf |= REQ_PREFLUSH | REQ_FUA;
635 }
636 __submit_merged_bio(io);
637unlock_out:
638 f2fs_up_write(&io->io_rwsem);
639}
640
641static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
642 struct inode *inode, struct page *page,
643 nid_t ino, enum page_type type, bool force)
644{
645 enum temp_type temp;
646 bool ret = true;
647
648 for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {
649 if (!force) {
650 enum page_type btype = PAGE_TYPE_OF_BIO(type);
651 struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
652
653 f2fs_down_read(&io->io_rwsem);
654 ret = __has_merged_page(io->bio, inode, page, ino);
655 f2fs_up_read(&io->io_rwsem);
656 }
657 if (ret)
658 __f2fs_submit_merged_write(sbi, type, temp);
659
660 /* TODO: use HOT temp only for meta pages now. */
661 if (type >= META)
662 break;
663 }
664}
665
666void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type)
667{
668 __submit_merged_write_cond(sbi, NULL, NULL, 0, type, true);
669}
670
671void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
672 struct inode *inode, struct page *page,
673 nid_t ino, enum page_type type)
674{
675 __submit_merged_write_cond(sbi, inode, page, ino, type, false);
676}
677
678void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi)
679{
680 f2fs_submit_merged_write(sbi, DATA);
681 f2fs_submit_merged_write(sbi, NODE);
682 f2fs_submit_merged_write(sbi, META);
683}
684
685/*
686 * Fill the locked page with data located in the block address.
687 * A caller needs to unlock the page on failure.
688 */
689int f2fs_submit_page_bio(struct f2fs_io_info *fio)
690{
691 struct bio *bio;
692 struct page *page = fio->encrypted_page ?
693 fio->encrypted_page : fio->page;
694
695 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
696 fio->is_por ? META_POR : (__is_meta_io(fio) ?
697 META_GENERIC : DATA_GENERIC_ENHANCE)))
698 return -EFSCORRUPTED;
699
700 trace_f2fs_submit_page_bio(page, fio);
701
702 /* Allocate a new bio */
703 bio = __bio_alloc(fio, 1);
704
705 f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
706 page_folio(fio->page)->index, fio, GFP_NOIO);
707
708 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
709 bio_put(bio);
710 return -EFAULT;
711 }
712
713 if (fio->io_wbc && !is_read_io(fio->op))
714 wbc_account_cgroup_owner(fio->io_wbc, page_folio(fio->page),
715 PAGE_SIZE);
716
717 inc_page_count(fio->sbi, is_read_io(fio->op) ?
718 __read_io_type(page) : WB_DATA_TYPE(fio->page, false));
719
720 if (is_read_io(bio_op(bio)))
721 f2fs_submit_read_bio(fio->sbi, bio, fio->type);
722 else
723 f2fs_submit_write_bio(fio->sbi, bio, fio->type);
724 return 0;
725}
726
727static bool page_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
728 block_t last_blkaddr, block_t cur_blkaddr)
729{
730 if (unlikely(sbi->max_io_bytes &&
731 bio->bi_iter.bi_size >= sbi->max_io_bytes))
732 return false;
733 if (last_blkaddr + 1 != cur_blkaddr)
734 return false;
735 return bio->bi_bdev == f2fs_target_device(sbi, cur_blkaddr, NULL);
736}
737
738static bool io_type_is_mergeable(struct f2fs_bio_info *io,
739 struct f2fs_io_info *fio)
740{
741 if (io->fio.op != fio->op)
742 return false;
743 return io->fio.op_flags == fio->op_flags;
744}
745
746static bool io_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
747 struct f2fs_bio_info *io,
748 struct f2fs_io_info *fio,
749 block_t last_blkaddr,
750 block_t cur_blkaddr)
751{
752 if (!page_is_mergeable(sbi, bio, last_blkaddr, cur_blkaddr))
753 return false;
754 return io_type_is_mergeable(io, fio);
755}
756
757static void add_bio_entry(struct f2fs_sb_info *sbi, struct bio *bio,
758 struct page *page, enum temp_type temp)
759{
760 struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
761 struct bio_entry *be;
762
763 be = f2fs_kmem_cache_alloc(bio_entry_slab, GFP_NOFS, true, NULL);
764 be->bio = bio;
765 bio_get(bio);
766
767 if (bio_add_page(bio, page, PAGE_SIZE, 0) != PAGE_SIZE)
768 f2fs_bug_on(sbi, 1);
769
770 f2fs_down_write(&io->bio_list_lock);
771 list_add_tail(&be->list, &io->bio_list);
772 f2fs_up_write(&io->bio_list_lock);
773}
774
775static void del_bio_entry(struct bio_entry *be)
776{
777 list_del(&be->list);
778 kmem_cache_free(bio_entry_slab, be);
779}
780
781static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio,
782 struct page *page)
783{
784 struct f2fs_sb_info *sbi = fio->sbi;
785 enum temp_type temp;
786 bool found = false;
787 int ret = -EAGAIN;
788
789 for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
790 struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
791 struct list_head *head = &io->bio_list;
792 struct bio_entry *be;
793
794 f2fs_down_write(&io->bio_list_lock);
795 list_for_each_entry(be, head, list) {
796 if (be->bio != *bio)
797 continue;
798
799 found = true;
800
801 f2fs_bug_on(sbi, !page_is_mergeable(sbi, *bio,
802 *fio->last_block,
803 fio->new_blkaddr));
804 if (f2fs_crypt_mergeable_bio(*bio,
805 fio->page->mapping->host,
806 page_folio(fio->page)->index, fio) &&
807 bio_add_page(*bio, page, PAGE_SIZE, 0) ==
808 PAGE_SIZE) {
809 ret = 0;
810 break;
811 }
812
813 /* page can't be merged into bio; submit the bio */
814 del_bio_entry(be);
815 f2fs_submit_write_bio(sbi, *bio, DATA);
816 break;
817 }
818 f2fs_up_write(&io->bio_list_lock);
819 }
820
821 if (ret) {
822 bio_put(*bio);
823 *bio = NULL;
824 }
825
826 return ret;
827}
828
829void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
830 struct bio **bio, struct page *page)
831{
832 enum temp_type temp;
833 bool found = false;
834 struct bio *target = bio ? *bio : NULL;
835
836 f2fs_bug_on(sbi, !target && !page);
837
838 for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
839 struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
840 struct list_head *head = &io->bio_list;
841 struct bio_entry *be;
842
843 if (list_empty(head))
844 continue;
845
846 f2fs_down_read(&io->bio_list_lock);
847 list_for_each_entry(be, head, list) {
848 if (target)
849 found = (target == be->bio);
850 else
851 found = __has_merged_page(be->bio, NULL,
852 page, 0);
853 if (found)
854 break;
855 }
856 f2fs_up_read(&io->bio_list_lock);
857
858 if (!found)
859 continue;
860
861 found = false;
862
863 f2fs_down_write(&io->bio_list_lock);
864 list_for_each_entry(be, head, list) {
865 if (target)
866 found = (target == be->bio);
867 else
868 found = __has_merged_page(be->bio, NULL,
869 page, 0);
870 if (found) {
871 target = be->bio;
872 del_bio_entry(be);
873 break;
874 }
875 }
876 f2fs_up_write(&io->bio_list_lock);
877 }
878
879 if (found)
880 f2fs_submit_write_bio(sbi, target, DATA);
881 if (bio && *bio) {
882 bio_put(*bio);
883 *bio = NULL;
884 }
885}
886
887int f2fs_merge_page_bio(struct f2fs_io_info *fio)
888{
889 struct bio *bio = *fio->bio;
890 struct page *page = fio->encrypted_page ?
891 fio->encrypted_page : fio->page;
892
893 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
894 __is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
895 return -EFSCORRUPTED;
896
897 trace_f2fs_submit_page_bio(page, fio);
898
899 if (bio && !page_is_mergeable(fio->sbi, bio, *fio->last_block,
900 fio->new_blkaddr))
901 f2fs_submit_merged_ipu_write(fio->sbi, &bio, NULL);
902alloc_new:
903 if (!bio) {
904 bio = __bio_alloc(fio, BIO_MAX_VECS);
905 f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
906 page_folio(fio->page)->index, fio, GFP_NOIO);
907
908 add_bio_entry(fio->sbi, bio, page, fio->temp);
909 } else {
910 if (add_ipu_page(fio, &bio, page))
911 goto alloc_new;
912 }
913
914 if (fio->io_wbc)
915 wbc_account_cgroup_owner(fio->io_wbc, page_folio(fio->page),
916 PAGE_SIZE);
917
918 inc_page_count(fio->sbi, WB_DATA_TYPE(page, false));
919
920 *fio->last_block = fio->new_blkaddr;
921 *fio->bio = bio;
922
923 return 0;
924}
925
926#ifdef CONFIG_BLK_DEV_ZONED
927static bool is_end_zone_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr)
928{
929 struct block_device *bdev = sbi->sb->s_bdev;
930 int devi = 0;
931
932 if (f2fs_is_multi_device(sbi)) {
933 devi = f2fs_target_device_index(sbi, blkaddr);
934 if (blkaddr < FDEV(devi).start_blk ||
935 blkaddr > FDEV(devi).end_blk) {
936 f2fs_err(sbi, "Invalid block %x", blkaddr);
937 return false;
938 }
939 blkaddr -= FDEV(devi).start_blk;
940 bdev = FDEV(devi).bdev;
941 }
942 return bdev_is_zoned(bdev) &&
943 f2fs_blkz_is_seq(sbi, devi, blkaddr) &&
944 (blkaddr % sbi->blocks_per_blkz == sbi->blocks_per_blkz - 1);
945}
946#endif
947
948void f2fs_submit_page_write(struct f2fs_io_info *fio)
949{
950 struct f2fs_sb_info *sbi = fio->sbi;
951 enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
952 struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
953 struct page *bio_page;
954 enum count_type type;
955
956 f2fs_bug_on(sbi, is_read_io(fio->op));
957
958 f2fs_down_write(&io->io_rwsem);
959next:
960#ifdef CONFIG_BLK_DEV_ZONED
961 if (f2fs_sb_has_blkzoned(sbi) && btype < META && io->zone_pending_bio) {
962 wait_for_completion_io(&io->zone_wait);
963 bio_put(io->zone_pending_bio);
964 io->zone_pending_bio = NULL;
965 io->bi_private = NULL;
966 }
967#endif
968
969 if (fio->in_list) {
970 spin_lock(&io->io_lock);
971 if (list_empty(&io->io_list)) {
972 spin_unlock(&io->io_lock);
973 goto out;
974 }
975 fio = list_first_entry(&io->io_list,
976 struct f2fs_io_info, list);
977 list_del(&fio->list);
978 spin_unlock(&io->io_lock);
979 }
980
981 verify_fio_blkaddr(fio);
982
983 if (fio->encrypted_page)
984 bio_page = fio->encrypted_page;
985 else if (fio->compressed_page)
986 bio_page = fio->compressed_page;
987 else
988 bio_page = fio->page;
989
990 /* set submitted = true as a return value */
991 fio->submitted = 1;
992
993 type = WB_DATA_TYPE(bio_page, fio->compressed_page);
994 inc_page_count(sbi, type);
995
996 if (io->bio &&
997 (!io_is_mergeable(sbi, io->bio, io, fio, io->last_block_in_bio,
998 fio->new_blkaddr) ||
999 !f2fs_crypt_mergeable_bio(io->bio, fio->page->mapping->host,
1000 page_folio(bio_page)->index, fio)))
1001 __submit_merged_bio(io);
1002alloc_new:
1003 if (io->bio == NULL) {
1004 io->bio = __bio_alloc(fio, BIO_MAX_VECS);
1005 f2fs_set_bio_crypt_ctx(io->bio, fio->page->mapping->host,
1006 page_folio(bio_page)->index, fio, GFP_NOIO);
1007 io->fio = *fio;
1008 }
1009
1010 if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < PAGE_SIZE) {
1011 __submit_merged_bio(io);
1012 goto alloc_new;
1013 }
1014
1015 if (fio->io_wbc)
1016 wbc_account_cgroup_owner(fio->io_wbc, page_folio(fio->page),
1017 PAGE_SIZE);
1018
1019 io->last_block_in_bio = fio->new_blkaddr;
1020
1021 trace_f2fs_submit_page_write(fio->page, fio);
1022#ifdef CONFIG_BLK_DEV_ZONED
1023 if (f2fs_sb_has_blkzoned(sbi) && btype < META &&
1024 is_end_zone_blkaddr(sbi, fio->new_blkaddr)) {
1025 bio_get(io->bio);
1026 reinit_completion(&io->zone_wait);
1027 io->bi_private = io->bio->bi_private;
1028 io->bio->bi_private = io;
1029 io->bio->bi_end_io = f2fs_zone_write_end_io;
1030 io->zone_pending_bio = io->bio;
1031 __submit_merged_bio(io);
1032 }
1033#endif
1034 if (fio->in_list)
1035 goto next;
1036out:
1037 if (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
1038 !f2fs_is_checkpoint_ready(sbi))
1039 __submit_merged_bio(io);
1040 f2fs_up_write(&io->io_rwsem);
1041}
1042
1043static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
1044 unsigned nr_pages, blk_opf_t op_flag,
1045 pgoff_t first_idx, bool for_write)
1046{
1047 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1048 struct bio *bio;
1049 struct bio_post_read_ctx *ctx = NULL;
1050 unsigned int post_read_steps = 0;
1051 sector_t sector;
1052 struct block_device *bdev = f2fs_target_device(sbi, blkaddr, §or);
1053
1054 bio = bio_alloc_bioset(bdev, bio_max_segs(nr_pages),
1055 REQ_OP_READ | op_flag,
1056 for_write ? GFP_NOIO : GFP_KERNEL, &f2fs_bioset);
1057 if (!bio)
1058 return ERR_PTR(-ENOMEM);
1059 bio->bi_iter.bi_sector = sector;
1060 f2fs_set_bio_crypt_ctx(bio, inode, first_idx, NULL, GFP_NOFS);
1061 bio->bi_end_io = f2fs_read_end_io;
1062
1063 if (fscrypt_inode_uses_fs_layer_crypto(inode))
1064 post_read_steps |= STEP_DECRYPT;
1065
1066 if (f2fs_need_verity(inode, first_idx))
1067 post_read_steps |= STEP_VERITY;
1068
1069 /*
1070 * STEP_DECOMPRESS is handled specially, since a compressed file might
1071 * contain both compressed and uncompressed clusters. We'll allocate a
1072 * bio_post_read_ctx if the file is compressed, but the caller is
1073 * responsible for enabling STEP_DECOMPRESS if it's actually needed.
1074 */
1075
1076 if (post_read_steps || f2fs_compressed_file(inode)) {
1077 /* Due to the mempool, this never fails. */
1078 ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
1079 ctx->bio = bio;
1080 ctx->sbi = sbi;
1081 ctx->enabled_steps = post_read_steps;
1082 ctx->fs_blkaddr = blkaddr;
1083 ctx->decompression_attempted = false;
1084 bio->bi_private = ctx;
1085 }
1086 iostat_alloc_and_bind_ctx(sbi, bio, ctx);
1087
1088 return bio;
1089}
1090
1091/* This can handle encryption stuffs */
1092static int f2fs_submit_page_read(struct inode *inode, struct folio *folio,
1093 block_t blkaddr, blk_opf_t op_flags,
1094 bool for_write)
1095{
1096 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1097 struct bio *bio;
1098
1099 bio = f2fs_grab_read_bio(inode, blkaddr, 1, op_flags,
1100 folio->index, for_write);
1101 if (IS_ERR(bio))
1102 return PTR_ERR(bio);
1103
1104 /* wait for GCed page writeback via META_MAPPING */
1105 f2fs_wait_on_block_writeback(inode, blkaddr);
1106
1107 if (!bio_add_folio(bio, folio, PAGE_SIZE, 0)) {
1108 iostat_update_and_unbind_ctx(bio);
1109 if (bio->bi_private)
1110 mempool_free(bio->bi_private, bio_post_read_ctx_pool);
1111 bio_put(bio);
1112 return -EFAULT;
1113 }
1114 inc_page_count(sbi, F2FS_RD_DATA);
1115 f2fs_update_iostat(sbi, NULL, FS_DATA_READ_IO, F2FS_BLKSIZE);
1116 f2fs_submit_read_bio(sbi, bio, DATA);
1117 return 0;
1118}
1119
1120static void __set_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
1121{
1122 __le32 *addr = get_dnode_addr(dn->inode, dn->node_page);
1123
1124 dn->data_blkaddr = blkaddr;
1125 addr[dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
1126}
1127
1128/*
1129 * Lock ordering for the change of data block address:
1130 * ->data_page
1131 * ->node_page
1132 * update block addresses in the node page
1133 */
1134void f2fs_set_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
1135{
1136 f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
1137 __set_data_blkaddr(dn, blkaddr);
1138 if (set_page_dirty(dn->node_page))
1139 dn->node_changed = true;
1140}
1141
1142void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
1143{
1144 f2fs_set_data_blkaddr(dn, blkaddr);
1145 f2fs_update_read_extent_cache(dn);
1146}
1147
1148/* dn->ofs_in_node will be returned with up-to-date last block pointer */
1149int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
1150{
1151 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1152 int err;
1153
1154 if (!count)
1155 return 0;
1156
1157 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1158 return -EPERM;
1159 err = inc_valid_block_count(sbi, dn->inode, &count, true);
1160 if (unlikely(err))
1161 return err;
1162
1163 trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
1164 dn->ofs_in_node, count);
1165
1166 f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
1167
1168 for (; count > 0; dn->ofs_in_node++) {
1169 block_t blkaddr = f2fs_data_blkaddr(dn);
1170
1171 if (blkaddr == NULL_ADDR) {
1172 __set_data_blkaddr(dn, NEW_ADDR);
1173 count--;
1174 }
1175 }
1176
1177 if (set_page_dirty(dn->node_page))
1178 dn->node_changed = true;
1179 return 0;
1180}
1181
1182/* Should keep dn->ofs_in_node unchanged */
1183int f2fs_reserve_new_block(struct dnode_of_data *dn)
1184{
1185 unsigned int ofs_in_node = dn->ofs_in_node;
1186 int ret;
1187
1188 ret = f2fs_reserve_new_blocks(dn, 1);
1189 dn->ofs_in_node = ofs_in_node;
1190 return ret;
1191}
1192
1193int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
1194{
1195 bool need_put = dn->inode_page ? false : true;
1196 int err;
1197
1198 err = f2fs_get_dnode_of_data(dn, index, ALLOC_NODE);
1199 if (err)
1200 return err;
1201
1202 if (dn->data_blkaddr == NULL_ADDR)
1203 err = f2fs_reserve_new_block(dn);
1204 if (err || need_put)
1205 f2fs_put_dnode(dn);
1206 return err;
1207}
1208
1209struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
1210 blk_opf_t op_flags, bool for_write,
1211 pgoff_t *next_pgofs)
1212{
1213 struct address_space *mapping = inode->i_mapping;
1214 struct dnode_of_data dn;
1215 struct page *page;
1216 int err;
1217
1218 page = f2fs_grab_cache_page(mapping, index, for_write);
1219 if (!page)
1220 return ERR_PTR(-ENOMEM);
1221
1222 if (f2fs_lookup_read_extent_cache_block(inode, index,
1223 &dn.data_blkaddr)) {
1224 if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), dn.data_blkaddr,
1225 DATA_GENERIC_ENHANCE_READ)) {
1226 err = -EFSCORRUPTED;
1227 goto put_err;
1228 }
1229 goto got_it;
1230 }
1231
1232 set_new_dnode(&dn, inode, NULL, NULL, 0);
1233 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
1234 if (err) {
1235 if (err == -ENOENT && next_pgofs)
1236 *next_pgofs = f2fs_get_next_page_offset(&dn, index);
1237 goto put_err;
1238 }
1239 f2fs_put_dnode(&dn);
1240
1241 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
1242 err = -ENOENT;
1243 if (next_pgofs)
1244 *next_pgofs = index + 1;
1245 goto put_err;
1246 }
1247 if (dn.data_blkaddr != NEW_ADDR &&
1248 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
1249 dn.data_blkaddr,
1250 DATA_GENERIC_ENHANCE)) {
1251 err = -EFSCORRUPTED;
1252 goto put_err;
1253 }
1254got_it:
1255 if (PageUptodate(page)) {
1256 unlock_page(page);
1257 return page;
1258 }
1259
1260 /*
1261 * A new dentry page is allocated but not able to be written, since its
1262 * new inode page couldn't be allocated due to -ENOSPC.
1263 * In such the case, its blkaddr can be remained as NEW_ADDR.
1264 * see, f2fs_add_link -> f2fs_get_new_data_page ->
1265 * f2fs_init_inode_metadata.
1266 */
1267 if (dn.data_blkaddr == NEW_ADDR) {
1268 zero_user_segment(page, 0, PAGE_SIZE);
1269 if (!PageUptodate(page))
1270 SetPageUptodate(page);
1271 unlock_page(page);
1272 return page;
1273 }
1274
1275 err = f2fs_submit_page_read(inode, page_folio(page), dn.data_blkaddr,
1276 op_flags, for_write);
1277 if (err)
1278 goto put_err;
1279 return page;
1280
1281put_err:
1282 f2fs_put_page(page, 1);
1283 return ERR_PTR(err);
1284}
1285
1286struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index,
1287 pgoff_t *next_pgofs)
1288{
1289 struct address_space *mapping = inode->i_mapping;
1290 struct page *page;
1291
1292 page = find_get_page(mapping, index);
1293 if (page && PageUptodate(page))
1294 return page;
1295 f2fs_put_page(page, 0);
1296
1297 page = f2fs_get_read_data_page(inode, index, 0, false, next_pgofs);
1298 if (IS_ERR(page))
1299 return page;
1300
1301 if (PageUptodate(page))
1302 return page;
1303
1304 wait_on_page_locked(page);
1305 if (unlikely(!PageUptodate(page))) {
1306 f2fs_put_page(page, 0);
1307 return ERR_PTR(-EIO);
1308 }
1309 return page;
1310}
1311
1312/*
1313 * If it tries to access a hole, return an error.
1314 * Because, the callers, functions in dir.c and GC, should be able to know
1315 * whether this page exists or not.
1316 */
1317struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
1318 bool for_write)
1319{
1320 struct address_space *mapping = inode->i_mapping;
1321 struct page *page;
1322
1323 page = f2fs_get_read_data_page(inode, index, 0, for_write, NULL);
1324 if (IS_ERR(page))
1325 return page;
1326
1327 /* wait for read completion */
1328 lock_page(page);
1329 if (unlikely(page->mapping != mapping || !PageUptodate(page))) {
1330 f2fs_put_page(page, 1);
1331 return ERR_PTR(-EIO);
1332 }
1333 return page;
1334}
1335
1336/*
1337 * Caller ensures that this data page is never allocated.
1338 * A new zero-filled data page is allocated in the page cache.
1339 *
1340 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
1341 * f2fs_unlock_op().
1342 * Note that, ipage is set only by make_empty_dir, and if any error occur,
1343 * ipage should be released by this function.
1344 */
1345struct page *f2fs_get_new_data_page(struct inode *inode,
1346 struct page *ipage, pgoff_t index, bool new_i_size)
1347{
1348 struct address_space *mapping = inode->i_mapping;
1349 struct page *page;
1350 struct dnode_of_data dn;
1351 int err;
1352
1353 page = f2fs_grab_cache_page(mapping, index, true);
1354 if (!page) {
1355 /*
1356 * before exiting, we should make sure ipage will be released
1357 * if any error occur.
1358 */
1359 f2fs_put_page(ipage, 1);
1360 return ERR_PTR(-ENOMEM);
1361 }
1362
1363 set_new_dnode(&dn, inode, ipage, NULL, 0);
1364 err = f2fs_reserve_block(&dn, index);
1365 if (err) {
1366 f2fs_put_page(page, 1);
1367 return ERR_PTR(err);
1368 }
1369 if (!ipage)
1370 f2fs_put_dnode(&dn);
1371
1372 if (PageUptodate(page))
1373 goto got_it;
1374
1375 if (dn.data_blkaddr == NEW_ADDR) {
1376 zero_user_segment(page, 0, PAGE_SIZE);
1377 if (!PageUptodate(page))
1378 SetPageUptodate(page);
1379 } else {
1380 f2fs_put_page(page, 1);
1381
1382 /* if ipage exists, blkaddr should be NEW_ADDR */
1383 f2fs_bug_on(F2FS_I_SB(inode), ipage);
1384 page = f2fs_get_lock_data_page(inode, index, true);
1385 if (IS_ERR(page))
1386 return page;
1387 }
1388got_it:
1389 if (new_i_size && i_size_read(inode) <
1390 ((loff_t)(index + 1) << PAGE_SHIFT))
1391 f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
1392 return page;
1393}
1394
1395static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
1396{
1397 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1398 struct f2fs_summary sum;
1399 struct node_info ni;
1400 block_t old_blkaddr;
1401 blkcnt_t count = 1;
1402 int err;
1403
1404 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1405 return -EPERM;
1406
1407 err = f2fs_get_node_info(sbi, dn->nid, &ni, false);
1408 if (err)
1409 return err;
1410
1411 dn->data_blkaddr = f2fs_data_blkaddr(dn);
1412 if (dn->data_blkaddr == NULL_ADDR) {
1413 err = inc_valid_block_count(sbi, dn->inode, &count, true);
1414 if (unlikely(err))
1415 return err;
1416 }
1417
1418 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
1419 old_blkaddr = dn->data_blkaddr;
1420 err = f2fs_allocate_data_block(sbi, NULL, old_blkaddr,
1421 &dn->data_blkaddr, &sum, seg_type, NULL);
1422 if (err)
1423 return err;
1424
1425 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
1426 f2fs_invalidate_internal_cache(sbi, old_blkaddr);
1427
1428 f2fs_update_data_blkaddr(dn, dn->data_blkaddr);
1429 return 0;
1430}
1431
1432static void f2fs_map_lock(struct f2fs_sb_info *sbi, int flag)
1433{
1434 if (flag == F2FS_GET_BLOCK_PRE_AIO)
1435 f2fs_down_read(&sbi->node_change);
1436 else
1437 f2fs_lock_op(sbi);
1438}
1439
1440static void f2fs_map_unlock(struct f2fs_sb_info *sbi, int flag)
1441{
1442 if (flag == F2FS_GET_BLOCK_PRE_AIO)
1443 f2fs_up_read(&sbi->node_change);
1444 else
1445 f2fs_unlock_op(sbi);
1446}
1447
1448int f2fs_get_block_locked(struct dnode_of_data *dn, pgoff_t index)
1449{
1450 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1451 int err = 0;
1452
1453 f2fs_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO);
1454 if (!f2fs_lookup_read_extent_cache_block(dn->inode, index,
1455 &dn->data_blkaddr))
1456 err = f2fs_reserve_block(dn, index);
1457 f2fs_map_unlock(sbi, F2FS_GET_BLOCK_PRE_AIO);
1458
1459 return err;
1460}
1461
1462static int f2fs_map_no_dnode(struct inode *inode,
1463 struct f2fs_map_blocks *map, struct dnode_of_data *dn,
1464 pgoff_t pgoff)
1465{
1466 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1467
1468 /*
1469 * There is one exceptional case that read_node_page() may return
1470 * -ENOENT due to filesystem has been shutdown or cp_error, return
1471 * -EIO in that case.
1472 */
1473 if (map->m_may_create &&
1474 (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) || f2fs_cp_error(sbi)))
1475 return -EIO;
1476
1477 if (map->m_next_pgofs)
1478 *map->m_next_pgofs = f2fs_get_next_page_offset(dn, pgoff);
1479 if (map->m_next_extent)
1480 *map->m_next_extent = f2fs_get_next_page_offset(dn, pgoff);
1481 return 0;
1482}
1483
1484static bool f2fs_map_blocks_cached(struct inode *inode,
1485 struct f2fs_map_blocks *map, int flag)
1486{
1487 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1488 unsigned int maxblocks = map->m_len;
1489 pgoff_t pgoff = (pgoff_t)map->m_lblk;
1490 struct extent_info ei = {};
1491
1492 if (!f2fs_lookup_read_extent_cache(inode, pgoff, &ei))
1493 return false;
1494
1495 map->m_pblk = ei.blk + pgoff - ei.fofs;
1496 map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgoff);
1497 map->m_flags = F2FS_MAP_MAPPED;
1498 if (map->m_next_extent)
1499 *map->m_next_extent = pgoff + map->m_len;
1500
1501 /* for hardware encryption, but to avoid potential issue in future */
1502 if (flag == F2FS_GET_BLOCK_DIO)
1503 f2fs_wait_on_block_writeback_range(inode,
1504 map->m_pblk, map->m_len);
1505
1506 if (f2fs_allow_multi_device_dio(sbi, flag)) {
1507 int bidx = f2fs_target_device_index(sbi, map->m_pblk);
1508 struct f2fs_dev_info *dev = &sbi->devs[bidx];
1509
1510 map->m_bdev = dev->bdev;
1511 map->m_pblk -= dev->start_blk;
1512 map->m_len = min(map->m_len, dev->end_blk + 1 - map->m_pblk);
1513 } else {
1514 map->m_bdev = inode->i_sb->s_bdev;
1515 }
1516 return true;
1517}
1518
1519static bool map_is_mergeable(struct f2fs_sb_info *sbi,
1520 struct f2fs_map_blocks *map,
1521 block_t blkaddr, int flag, int bidx,
1522 int ofs)
1523{
1524 if (map->m_multidev_dio && map->m_bdev != FDEV(bidx).bdev)
1525 return false;
1526 if (map->m_pblk != NEW_ADDR && blkaddr == (map->m_pblk + ofs))
1527 return true;
1528 if (map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR)
1529 return true;
1530 if (flag == F2FS_GET_BLOCK_PRE_DIO)
1531 return true;
1532 if (flag == F2FS_GET_BLOCK_DIO &&
1533 map->m_pblk == NULL_ADDR && blkaddr == NULL_ADDR)
1534 return true;
1535 return false;
1536}
1537
1538/*
1539 * f2fs_map_blocks() tries to find or build mapping relationship which
1540 * maps continuous logical blocks to physical blocks, and return such
1541 * info via f2fs_map_blocks structure.
1542 */
1543int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int flag)
1544{
1545 unsigned int maxblocks = map->m_len;
1546 struct dnode_of_data dn;
1547 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1548 int mode = map->m_may_create ? ALLOC_NODE : LOOKUP_NODE;
1549 pgoff_t pgofs, end_offset, end;
1550 int err = 0, ofs = 1;
1551 unsigned int ofs_in_node, last_ofs_in_node;
1552 blkcnt_t prealloc;
1553 block_t blkaddr;
1554 unsigned int start_pgofs;
1555 int bidx = 0;
1556 bool is_hole;
1557
1558 if (!maxblocks)
1559 return 0;
1560
1561 if (!map->m_may_create && f2fs_map_blocks_cached(inode, map, flag))
1562 goto out;
1563
1564 map->m_bdev = inode->i_sb->s_bdev;
1565 map->m_multidev_dio =
1566 f2fs_allow_multi_device_dio(F2FS_I_SB(inode), flag);
1567
1568 map->m_len = 0;
1569 map->m_flags = 0;
1570
1571 /* it only supports block size == page size */
1572 pgofs = (pgoff_t)map->m_lblk;
1573 end = pgofs + maxblocks;
1574
1575next_dnode:
1576 if (map->m_may_create)
1577 f2fs_map_lock(sbi, flag);
1578
1579 /* When reading holes, we need its node page */
1580 set_new_dnode(&dn, inode, NULL, NULL, 0);
1581 err = f2fs_get_dnode_of_data(&dn, pgofs, mode);
1582 if (err) {
1583 if (flag == F2FS_GET_BLOCK_BMAP)
1584 map->m_pblk = 0;
1585 if (err == -ENOENT)
1586 err = f2fs_map_no_dnode(inode, map, &dn, pgofs);
1587 goto unlock_out;
1588 }
1589
1590 start_pgofs = pgofs;
1591 prealloc = 0;
1592 last_ofs_in_node = ofs_in_node = dn.ofs_in_node;
1593 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1594
1595next_block:
1596 blkaddr = f2fs_data_blkaddr(&dn);
1597 is_hole = !__is_valid_data_blkaddr(blkaddr);
1598 if (!is_hole &&
1599 !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) {
1600 err = -EFSCORRUPTED;
1601 goto sync_out;
1602 }
1603
1604 /* use out-place-update for direct IO under LFS mode */
1605 if (map->m_may_create && (is_hole ||
1606 (flag == F2FS_GET_BLOCK_DIO && f2fs_lfs_mode(sbi) &&
1607 !f2fs_is_pinned_file(inode)))) {
1608 if (unlikely(f2fs_cp_error(sbi))) {
1609 err = -EIO;
1610 goto sync_out;
1611 }
1612
1613 switch (flag) {
1614 case F2FS_GET_BLOCK_PRE_AIO:
1615 if (blkaddr == NULL_ADDR) {
1616 prealloc++;
1617 last_ofs_in_node = dn.ofs_in_node;
1618 }
1619 break;
1620 case F2FS_GET_BLOCK_PRE_DIO:
1621 case F2FS_GET_BLOCK_DIO:
1622 err = __allocate_data_block(&dn, map->m_seg_type);
1623 if (err)
1624 goto sync_out;
1625 if (flag == F2FS_GET_BLOCK_PRE_DIO)
1626 file_need_truncate(inode);
1627 set_inode_flag(inode, FI_APPEND_WRITE);
1628 break;
1629 default:
1630 WARN_ON_ONCE(1);
1631 err = -EIO;
1632 goto sync_out;
1633 }
1634
1635 blkaddr = dn.data_blkaddr;
1636 if (is_hole)
1637 map->m_flags |= F2FS_MAP_NEW;
1638 } else if (is_hole) {
1639 if (f2fs_compressed_file(inode) &&
1640 f2fs_sanity_check_cluster(&dn)) {
1641 err = -EFSCORRUPTED;
1642 f2fs_handle_error(sbi,
1643 ERROR_CORRUPTED_CLUSTER);
1644 goto sync_out;
1645 }
1646
1647 switch (flag) {
1648 case F2FS_GET_BLOCK_PRECACHE:
1649 goto sync_out;
1650 case F2FS_GET_BLOCK_BMAP:
1651 map->m_pblk = 0;
1652 goto sync_out;
1653 case F2FS_GET_BLOCK_FIEMAP:
1654 if (blkaddr == NULL_ADDR) {
1655 if (map->m_next_pgofs)
1656 *map->m_next_pgofs = pgofs + 1;
1657 goto sync_out;
1658 }
1659 break;
1660 case F2FS_GET_BLOCK_DIO:
1661 if (map->m_next_pgofs)
1662 *map->m_next_pgofs = pgofs + 1;
1663 break;
1664 default:
1665 /* for defragment case */
1666 if (map->m_next_pgofs)
1667 *map->m_next_pgofs = pgofs + 1;
1668 goto sync_out;
1669 }
1670 }
1671
1672 if (flag == F2FS_GET_BLOCK_PRE_AIO)
1673 goto skip;
1674
1675 if (map->m_multidev_dio)
1676 bidx = f2fs_target_device_index(sbi, blkaddr);
1677
1678 if (map->m_len == 0) {
1679 /* reserved delalloc block should be mapped for fiemap. */
1680 if (blkaddr == NEW_ADDR)
1681 map->m_flags |= F2FS_MAP_DELALLOC;
1682 /* DIO READ and hole case, should not map the blocks. */
1683 if (!(flag == F2FS_GET_BLOCK_DIO && is_hole && !map->m_may_create))
1684 map->m_flags |= F2FS_MAP_MAPPED;
1685
1686 map->m_pblk = blkaddr;
1687 map->m_len = 1;
1688
1689 if (map->m_multidev_dio)
1690 map->m_bdev = FDEV(bidx).bdev;
1691 } else if (map_is_mergeable(sbi, map, blkaddr, flag, bidx, ofs)) {
1692 ofs++;
1693 map->m_len++;
1694 } else {
1695 goto sync_out;
1696 }
1697
1698skip:
1699 dn.ofs_in_node++;
1700 pgofs++;
1701
1702 /* preallocate blocks in batch for one dnode page */
1703 if (flag == F2FS_GET_BLOCK_PRE_AIO &&
1704 (pgofs == end || dn.ofs_in_node == end_offset)) {
1705
1706 dn.ofs_in_node = ofs_in_node;
1707 err = f2fs_reserve_new_blocks(&dn, prealloc);
1708 if (err)
1709 goto sync_out;
1710
1711 map->m_len += dn.ofs_in_node - ofs_in_node;
1712 if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
1713 err = -ENOSPC;
1714 goto sync_out;
1715 }
1716 dn.ofs_in_node = end_offset;
1717 }
1718
1719 if (flag == F2FS_GET_BLOCK_DIO && f2fs_lfs_mode(sbi) &&
1720 map->m_may_create) {
1721 /* the next block to be allocated may not be contiguous. */
1722 if (GET_SEGOFF_FROM_SEG0(sbi, blkaddr) % BLKS_PER_SEC(sbi) ==
1723 CAP_BLKS_PER_SEC(sbi) - 1)
1724 goto sync_out;
1725 }
1726
1727 if (pgofs >= end)
1728 goto sync_out;
1729 else if (dn.ofs_in_node < end_offset)
1730 goto next_block;
1731
1732 if (flag == F2FS_GET_BLOCK_PRECACHE) {
1733 if (map->m_flags & F2FS_MAP_MAPPED) {
1734 unsigned int ofs = start_pgofs - map->m_lblk;
1735
1736 f2fs_update_read_extent_cache_range(&dn,
1737 start_pgofs, map->m_pblk + ofs,
1738 map->m_len - ofs);
1739 }
1740 }
1741
1742 f2fs_put_dnode(&dn);
1743
1744 if (map->m_may_create) {
1745 f2fs_map_unlock(sbi, flag);
1746 f2fs_balance_fs(sbi, dn.node_changed);
1747 }
1748 goto next_dnode;
1749
1750sync_out:
1751
1752 if (flag == F2FS_GET_BLOCK_DIO && map->m_flags & F2FS_MAP_MAPPED) {
1753 /*
1754 * for hardware encryption, but to avoid potential issue
1755 * in future
1756 */
1757 f2fs_wait_on_block_writeback_range(inode,
1758 map->m_pblk, map->m_len);
1759
1760 if (map->m_multidev_dio) {
1761 block_t blk_addr = map->m_pblk;
1762
1763 bidx = f2fs_target_device_index(sbi, map->m_pblk);
1764
1765 map->m_bdev = FDEV(bidx).bdev;
1766 map->m_pblk -= FDEV(bidx).start_blk;
1767
1768 if (map->m_may_create)
1769 f2fs_update_device_state(sbi, inode->i_ino,
1770 blk_addr, map->m_len);
1771
1772 f2fs_bug_on(sbi, blk_addr + map->m_len >
1773 FDEV(bidx).end_blk + 1);
1774 }
1775 }
1776
1777 if (flag == F2FS_GET_BLOCK_PRECACHE) {
1778 if (map->m_flags & F2FS_MAP_MAPPED) {
1779 unsigned int ofs = start_pgofs - map->m_lblk;
1780
1781 f2fs_update_read_extent_cache_range(&dn,
1782 start_pgofs, map->m_pblk + ofs,
1783 map->m_len - ofs);
1784 }
1785 if (map->m_next_extent)
1786 *map->m_next_extent = pgofs + 1;
1787 }
1788 f2fs_put_dnode(&dn);
1789unlock_out:
1790 if (map->m_may_create) {
1791 f2fs_map_unlock(sbi, flag);
1792 f2fs_balance_fs(sbi, dn.node_changed);
1793 }
1794out:
1795 trace_f2fs_map_blocks(inode, map, flag, err);
1796 return err;
1797}
1798
1799bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len)
1800{
1801 struct f2fs_map_blocks map;
1802 block_t last_lblk;
1803 int err;
1804
1805 if (pos + len > i_size_read(inode))
1806 return false;
1807
1808 map.m_lblk = F2FS_BYTES_TO_BLK(pos);
1809 map.m_next_pgofs = NULL;
1810 map.m_next_extent = NULL;
1811 map.m_seg_type = NO_CHECK_TYPE;
1812 map.m_may_create = false;
1813 last_lblk = F2FS_BLK_ALIGN(pos + len);
1814
1815 while (map.m_lblk < last_lblk) {
1816 map.m_len = last_lblk - map.m_lblk;
1817 err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_DEFAULT);
1818 if (err || map.m_len == 0)
1819 return false;
1820 map.m_lblk += map.m_len;
1821 }
1822 return true;
1823}
1824
1825static int f2fs_xattr_fiemap(struct inode *inode,
1826 struct fiemap_extent_info *fieinfo)
1827{
1828 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1829 struct page *page;
1830 struct node_info ni;
1831 __u64 phys = 0, len;
1832 __u32 flags;
1833 nid_t xnid = F2FS_I(inode)->i_xattr_nid;
1834 int err = 0;
1835
1836 if (f2fs_has_inline_xattr(inode)) {
1837 int offset;
1838
1839 page = f2fs_grab_cache_page(NODE_MAPPING(sbi),
1840 inode->i_ino, false);
1841 if (!page)
1842 return -ENOMEM;
1843
1844 err = f2fs_get_node_info(sbi, inode->i_ino, &ni, false);
1845 if (err) {
1846 f2fs_put_page(page, 1);
1847 return err;
1848 }
1849
1850 phys = F2FS_BLK_TO_BYTES(ni.blk_addr);
1851 offset = offsetof(struct f2fs_inode, i_addr) +
1852 sizeof(__le32) * (DEF_ADDRS_PER_INODE -
1853 get_inline_xattr_addrs(inode));
1854
1855 phys += offset;
1856 len = inline_xattr_size(inode);
1857
1858 f2fs_put_page(page, 1);
1859
1860 flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED;
1861
1862 if (!xnid)
1863 flags |= FIEMAP_EXTENT_LAST;
1864
1865 err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
1866 trace_f2fs_fiemap(inode, 0, phys, len, flags, err);
1867 if (err)
1868 return err;
1869 }
1870
1871 if (xnid) {
1872 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), xnid, false);
1873 if (!page)
1874 return -ENOMEM;
1875
1876 err = f2fs_get_node_info(sbi, xnid, &ni, false);
1877 if (err) {
1878 f2fs_put_page(page, 1);
1879 return err;
1880 }
1881
1882 phys = F2FS_BLK_TO_BYTES(ni.blk_addr);
1883 len = inode->i_sb->s_blocksize;
1884
1885 f2fs_put_page(page, 1);
1886
1887 flags = FIEMAP_EXTENT_LAST;
1888 }
1889
1890 if (phys) {
1891 err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
1892 trace_f2fs_fiemap(inode, 0, phys, len, flags, err);
1893 }
1894
1895 return (err < 0 ? err : 0);
1896}
1897
1898int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1899 u64 start, u64 len)
1900{
1901 struct f2fs_map_blocks map;
1902 sector_t start_blk, last_blk, blk_len, max_len;
1903 pgoff_t next_pgofs;
1904 u64 logical = 0, phys = 0, size = 0;
1905 u32 flags = 0;
1906 int ret = 0;
1907 bool compr_cluster = false, compr_appended;
1908 unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
1909 unsigned int count_in_cluster = 0;
1910 loff_t maxbytes;
1911
1912 if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
1913 ret = f2fs_precache_extents(inode);
1914 if (ret)
1915 return ret;
1916 }
1917
1918 ret = fiemap_prep(inode, fieinfo, start, &len, FIEMAP_FLAG_XATTR);
1919 if (ret)
1920 return ret;
1921
1922 inode_lock_shared(inode);
1923
1924 maxbytes = F2FS_BLK_TO_BYTES(max_file_blocks(inode));
1925 if (start > maxbytes) {
1926 ret = -EFBIG;
1927 goto out;
1928 }
1929
1930 if (len > maxbytes || (maxbytes - len) < start)
1931 len = maxbytes - start;
1932
1933 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
1934 ret = f2fs_xattr_fiemap(inode, fieinfo);
1935 goto out;
1936 }
1937
1938 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
1939 ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
1940 if (ret != -EAGAIN)
1941 goto out;
1942 }
1943
1944 start_blk = F2FS_BYTES_TO_BLK(start);
1945 last_blk = F2FS_BYTES_TO_BLK(start + len - 1);
1946 blk_len = last_blk - start_blk + 1;
1947 max_len = F2FS_BYTES_TO_BLK(maxbytes) - start_blk;
1948
1949next:
1950 memset(&map, 0, sizeof(map));
1951 map.m_lblk = start_blk;
1952 map.m_len = blk_len;
1953 map.m_next_pgofs = &next_pgofs;
1954 map.m_seg_type = NO_CHECK_TYPE;
1955
1956 if (compr_cluster) {
1957 map.m_lblk += 1;
1958 map.m_len = cluster_size - count_in_cluster;
1959 }
1960
1961 ret = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_FIEMAP);
1962 if (ret)
1963 goto out;
1964
1965 /* HOLE */
1966 if (!compr_cluster && !(map.m_flags & F2FS_MAP_FLAGS)) {
1967 start_blk = next_pgofs;
1968
1969 if (F2FS_BLK_TO_BYTES(start_blk) < maxbytes)
1970 goto prep_next;
1971
1972 flags |= FIEMAP_EXTENT_LAST;
1973 }
1974
1975 /*
1976 * current extent may cross boundary of inquiry, increase len to
1977 * requery.
1978 */
1979 if (!compr_cluster && (map.m_flags & F2FS_MAP_MAPPED) &&
1980 map.m_lblk + map.m_len - 1 == last_blk &&
1981 blk_len != max_len) {
1982 blk_len = max_len;
1983 goto next;
1984 }
1985
1986 compr_appended = false;
1987 /* In a case of compressed cluster, append this to the last extent */
1988 if (compr_cluster && ((map.m_flags & F2FS_MAP_DELALLOC) ||
1989 !(map.m_flags & F2FS_MAP_FLAGS))) {
1990 compr_appended = true;
1991 goto skip_fill;
1992 }
1993
1994 if (size) {
1995 flags |= FIEMAP_EXTENT_MERGED;
1996 if (IS_ENCRYPTED(inode))
1997 flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;
1998
1999 ret = fiemap_fill_next_extent(fieinfo, logical,
2000 phys, size, flags);
2001 trace_f2fs_fiemap(inode, logical, phys, size, flags, ret);
2002 if (ret)
2003 goto out;
2004 size = 0;
2005 }
2006
2007 if (start_blk > last_blk)
2008 goto out;
2009
2010skip_fill:
2011 if (map.m_pblk == COMPRESS_ADDR) {
2012 compr_cluster = true;
2013 count_in_cluster = 1;
2014 } else if (compr_appended) {
2015 unsigned int appended_blks = cluster_size -
2016 count_in_cluster + 1;
2017 size += F2FS_BLK_TO_BYTES(appended_blks);
2018 start_blk += appended_blks;
2019 compr_cluster = false;
2020 } else {
2021 logical = F2FS_BLK_TO_BYTES(start_blk);
2022 phys = __is_valid_data_blkaddr(map.m_pblk) ?
2023 F2FS_BLK_TO_BYTES(map.m_pblk) : 0;
2024 size = F2FS_BLK_TO_BYTES(map.m_len);
2025 flags = 0;
2026
2027 if (compr_cluster) {
2028 flags = FIEMAP_EXTENT_ENCODED;
2029 count_in_cluster += map.m_len;
2030 if (count_in_cluster == cluster_size) {
2031 compr_cluster = false;
2032 size += F2FS_BLKSIZE;
2033 }
2034 } else if (map.m_flags & F2FS_MAP_DELALLOC) {
2035 flags = FIEMAP_EXTENT_UNWRITTEN;
2036 }
2037
2038 start_blk += F2FS_BYTES_TO_BLK(size);
2039 }
2040
2041prep_next:
2042 cond_resched();
2043 if (fatal_signal_pending(current))
2044 ret = -EINTR;
2045 else
2046 goto next;
2047out:
2048 if (ret == 1)
2049 ret = 0;
2050
2051 inode_unlock_shared(inode);
2052 return ret;
2053}
2054
2055static inline loff_t f2fs_readpage_limit(struct inode *inode)
2056{
2057 if (IS_ENABLED(CONFIG_FS_VERITY) && IS_VERITY(inode))
2058 return F2FS_BLK_TO_BYTES(max_file_blocks(inode));
2059
2060 return i_size_read(inode);
2061}
2062
2063static inline blk_opf_t f2fs_ra_op_flags(struct readahead_control *rac)
2064{
2065 return rac ? REQ_RAHEAD : 0;
2066}
2067
2068static int f2fs_read_single_page(struct inode *inode, struct folio *folio,
2069 unsigned nr_pages,
2070 struct f2fs_map_blocks *map,
2071 struct bio **bio_ret,
2072 sector_t *last_block_in_bio,
2073 struct readahead_control *rac)
2074{
2075 struct bio *bio = *bio_ret;
2076 const unsigned int blocksize = F2FS_BLKSIZE;
2077 sector_t block_in_file;
2078 sector_t last_block;
2079 sector_t last_block_in_file;
2080 sector_t block_nr;
2081 pgoff_t index = folio_index(folio);
2082 int ret = 0;
2083
2084 block_in_file = (sector_t)index;
2085 last_block = block_in_file + nr_pages;
2086 last_block_in_file = F2FS_BYTES_TO_BLK(f2fs_readpage_limit(inode) +
2087 blocksize - 1);
2088 if (last_block > last_block_in_file)
2089 last_block = last_block_in_file;
2090
2091 /* just zeroing out page which is beyond EOF */
2092 if (block_in_file >= last_block)
2093 goto zero_out;
2094 /*
2095 * Map blocks using the previous result first.
2096 */
2097 if ((map->m_flags & F2FS_MAP_MAPPED) &&
2098 block_in_file > map->m_lblk &&
2099 block_in_file < (map->m_lblk + map->m_len))
2100 goto got_it;
2101
2102 /*
2103 * Then do more f2fs_map_blocks() calls until we are
2104 * done with this page.
2105 */
2106 map->m_lblk = block_in_file;
2107 map->m_len = last_block - block_in_file;
2108
2109 ret = f2fs_map_blocks(inode, map, F2FS_GET_BLOCK_DEFAULT);
2110 if (ret)
2111 goto out;
2112got_it:
2113 if ((map->m_flags & F2FS_MAP_MAPPED)) {
2114 block_nr = map->m_pblk + block_in_file - map->m_lblk;
2115 folio_set_mappedtodisk(folio);
2116
2117 if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
2118 DATA_GENERIC_ENHANCE_READ)) {
2119 ret = -EFSCORRUPTED;
2120 goto out;
2121 }
2122 } else {
2123zero_out:
2124 folio_zero_segment(folio, 0, folio_size(folio));
2125 if (f2fs_need_verity(inode, index) &&
2126 !fsverity_verify_folio(folio)) {
2127 ret = -EIO;
2128 goto out;
2129 }
2130 if (!folio_test_uptodate(folio))
2131 folio_mark_uptodate(folio);
2132 folio_unlock(folio);
2133 goto out;
2134 }
2135
2136 /*
2137 * This page will go to BIO. Do we need to send this
2138 * BIO off first?
2139 */
2140 if (bio && (!page_is_mergeable(F2FS_I_SB(inode), bio,
2141 *last_block_in_bio, block_nr) ||
2142 !f2fs_crypt_mergeable_bio(bio, inode, index, NULL))) {
2143submit_and_realloc:
2144 f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
2145 bio = NULL;
2146 }
2147 if (bio == NULL) {
2148 bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
2149 f2fs_ra_op_flags(rac), index,
2150 false);
2151 if (IS_ERR(bio)) {
2152 ret = PTR_ERR(bio);
2153 bio = NULL;
2154 goto out;
2155 }
2156 }
2157
2158 /*
2159 * If the page is under writeback, we need to wait for
2160 * its completion to see the correct decrypted data.
2161 */
2162 f2fs_wait_on_block_writeback(inode, block_nr);
2163
2164 if (!bio_add_folio(bio, folio, blocksize, 0))
2165 goto submit_and_realloc;
2166
2167 inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
2168 f2fs_update_iostat(F2FS_I_SB(inode), NULL, FS_DATA_READ_IO,
2169 F2FS_BLKSIZE);
2170 *last_block_in_bio = block_nr;
2171out:
2172 *bio_ret = bio;
2173 return ret;
2174}
2175
2176#ifdef CONFIG_F2FS_FS_COMPRESSION
2177int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
2178 unsigned nr_pages, sector_t *last_block_in_bio,
2179 struct readahead_control *rac, bool for_write)
2180{
2181 struct dnode_of_data dn;
2182 struct inode *inode = cc->inode;
2183 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2184 struct bio *bio = *bio_ret;
2185 unsigned int start_idx = cc->cluster_idx << cc->log_cluster_size;
2186 sector_t last_block_in_file;
2187 const unsigned int blocksize = F2FS_BLKSIZE;
2188 struct decompress_io_ctx *dic = NULL;
2189 struct extent_info ei = {};
2190 bool from_dnode = true;
2191 int i;
2192 int ret = 0;
2193
2194 f2fs_bug_on(sbi, f2fs_cluster_is_empty(cc));
2195
2196 last_block_in_file = F2FS_BYTES_TO_BLK(f2fs_readpage_limit(inode) +
2197 blocksize - 1);
2198
2199 /* get rid of pages beyond EOF */
2200 for (i = 0; i < cc->cluster_size; i++) {
2201 struct page *page = cc->rpages[i];
2202 struct folio *folio;
2203
2204 if (!page)
2205 continue;
2206
2207 folio = page_folio(page);
2208 if ((sector_t)folio->index >= last_block_in_file) {
2209 folio_zero_segment(folio, 0, folio_size(folio));
2210 if (!folio_test_uptodate(folio))
2211 folio_mark_uptodate(folio);
2212 } else if (!folio_test_uptodate(folio)) {
2213 continue;
2214 }
2215 folio_unlock(folio);
2216 if (for_write)
2217 folio_put(folio);
2218 cc->rpages[i] = NULL;
2219 cc->nr_rpages--;
2220 }
2221
2222 /* we are done since all pages are beyond EOF */
2223 if (f2fs_cluster_is_empty(cc))
2224 goto out;
2225
2226 if (f2fs_lookup_read_extent_cache(inode, start_idx, &ei))
2227 from_dnode = false;
2228
2229 if (!from_dnode)
2230 goto skip_reading_dnode;
2231
2232 set_new_dnode(&dn, inode, NULL, NULL, 0);
2233 ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
2234 if (ret)
2235 goto out;
2236
2237 if (unlikely(f2fs_cp_error(sbi))) {
2238 ret = -EIO;
2239 goto out_put_dnode;
2240 }
2241 f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR);
2242
2243skip_reading_dnode:
2244 for (i = 1; i < cc->cluster_size; i++) {
2245 block_t blkaddr;
2246
2247 blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_page,
2248 dn.ofs_in_node + i) :
2249 ei.blk + i - 1;
2250
2251 if (!__is_valid_data_blkaddr(blkaddr))
2252 break;
2253
2254 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC)) {
2255 ret = -EFAULT;
2256 goto out_put_dnode;
2257 }
2258 cc->nr_cpages++;
2259
2260 if (!from_dnode && i >= ei.c_len)
2261 break;
2262 }
2263
2264 /* nothing to decompress */
2265 if (cc->nr_cpages == 0) {
2266 ret = 0;
2267 goto out_put_dnode;
2268 }
2269
2270 dic = f2fs_alloc_dic(cc);
2271 if (IS_ERR(dic)) {
2272 ret = PTR_ERR(dic);
2273 goto out_put_dnode;
2274 }
2275
2276 for (i = 0; i < cc->nr_cpages; i++) {
2277 struct folio *folio = page_folio(dic->cpages[i]);
2278 block_t blkaddr;
2279 struct bio_post_read_ctx *ctx;
2280
2281 blkaddr = from_dnode ? data_blkaddr(dn.inode, dn.node_page,
2282 dn.ofs_in_node + i + 1) :
2283 ei.blk + i;
2284
2285 f2fs_wait_on_block_writeback(inode, blkaddr);
2286
2287 if (f2fs_load_compressed_page(sbi, folio_page(folio, 0),
2288 blkaddr)) {
2289 if (atomic_dec_and_test(&dic->remaining_pages)) {
2290 f2fs_decompress_cluster(dic, true);
2291 break;
2292 }
2293 continue;
2294 }
2295
2296 if (bio && (!page_is_mergeable(sbi, bio,
2297 *last_block_in_bio, blkaddr) ||
2298 !f2fs_crypt_mergeable_bio(bio, inode, folio->index, NULL))) {
2299submit_and_realloc:
2300 f2fs_submit_read_bio(sbi, bio, DATA);
2301 bio = NULL;
2302 }
2303
2304 if (!bio) {
2305 bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages,
2306 f2fs_ra_op_flags(rac),
2307 folio->index, for_write);
2308 if (IS_ERR(bio)) {
2309 ret = PTR_ERR(bio);
2310 f2fs_decompress_end_io(dic, ret, true);
2311 f2fs_put_dnode(&dn);
2312 *bio_ret = NULL;
2313 return ret;
2314 }
2315 }
2316
2317 if (!bio_add_folio(bio, folio, blocksize, 0))
2318 goto submit_and_realloc;
2319
2320 ctx = get_post_read_ctx(bio);
2321 ctx->enabled_steps |= STEP_DECOMPRESS;
2322 refcount_inc(&dic->refcnt);
2323
2324 inc_page_count(sbi, F2FS_RD_DATA);
2325 f2fs_update_iostat(sbi, inode, FS_DATA_READ_IO, F2FS_BLKSIZE);
2326 *last_block_in_bio = blkaddr;
2327 }
2328
2329 if (from_dnode)
2330 f2fs_put_dnode(&dn);
2331
2332 *bio_ret = bio;
2333 return 0;
2334
2335out_put_dnode:
2336 if (from_dnode)
2337 f2fs_put_dnode(&dn);
2338out:
2339 for (i = 0; i < cc->cluster_size; i++) {
2340 if (cc->rpages[i]) {
2341 ClearPageUptodate(cc->rpages[i]);
2342 unlock_page(cc->rpages[i]);
2343 }
2344 }
2345 *bio_ret = bio;
2346 return ret;
2347}
2348#endif
2349
2350/*
2351 * This function was originally taken from fs/mpage.c, and customized for f2fs.
2352 * Major change was from block_size == page_size in f2fs by default.
2353 */
2354static int f2fs_mpage_readpages(struct inode *inode,
2355 struct readahead_control *rac, struct folio *folio)
2356{
2357 struct bio *bio = NULL;
2358 sector_t last_block_in_bio = 0;
2359 struct f2fs_map_blocks map;
2360#ifdef CONFIG_F2FS_FS_COMPRESSION
2361 struct compress_ctx cc = {
2362 .inode = inode,
2363 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
2364 .cluster_size = F2FS_I(inode)->i_cluster_size,
2365 .cluster_idx = NULL_CLUSTER,
2366 .rpages = NULL,
2367 .cpages = NULL,
2368 .nr_rpages = 0,
2369 .nr_cpages = 0,
2370 };
2371 pgoff_t nc_cluster_idx = NULL_CLUSTER;
2372 pgoff_t index;
2373#endif
2374 unsigned nr_pages = rac ? readahead_count(rac) : 1;
2375 unsigned max_nr_pages = nr_pages;
2376 int ret = 0;
2377
2378 map.m_pblk = 0;
2379 map.m_lblk = 0;
2380 map.m_len = 0;
2381 map.m_flags = 0;
2382 map.m_next_pgofs = NULL;
2383 map.m_next_extent = NULL;
2384 map.m_seg_type = NO_CHECK_TYPE;
2385 map.m_may_create = false;
2386
2387 for (; nr_pages; nr_pages--) {
2388 if (rac) {
2389 folio = readahead_folio(rac);
2390 prefetchw(&folio->flags);
2391 }
2392
2393#ifdef CONFIG_F2FS_FS_COMPRESSION
2394 index = folio_index(folio);
2395
2396 if (!f2fs_compressed_file(inode))
2397 goto read_single_page;
2398
2399 /* there are remained compressed pages, submit them */
2400 if (!f2fs_cluster_can_merge_page(&cc, index)) {
2401 ret = f2fs_read_multi_pages(&cc, &bio,
2402 max_nr_pages,
2403 &last_block_in_bio,
2404 rac, false);
2405 f2fs_destroy_compress_ctx(&cc, false);
2406 if (ret)
2407 goto set_error_page;
2408 }
2409 if (cc.cluster_idx == NULL_CLUSTER) {
2410 if (nc_cluster_idx == index >> cc.log_cluster_size)
2411 goto read_single_page;
2412
2413 ret = f2fs_is_compressed_cluster(inode, index);
2414 if (ret < 0)
2415 goto set_error_page;
2416 else if (!ret) {
2417 nc_cluster_idx =
2418 index >> cc.log_cluster_size;
2419 goto read_single_page;
2420 }
2421
2422 nc_cluster_idx = NULL_CLUSTER;
2423 }
2424 ret = f2fs_init_compress_ctx(&cc);
2425 if (ret)
2426 goto set_error_page;
2427
2428 f2fs_compress_ctx_add_page(&cc, folio);
2429
2430 goto next_page;
2431read_single_page:
2432#endif
2433
2434 ret = f2fs_read_single_page(inode, folio, max_nr_pages, &map,
2435 &bio, &last_block_in_bio, rac);
2436 if (ret) {
2437#ifdef CONFIG_F2FS_FS_COMPRESSION
2438set_error_page:
2439#endif
2440 folio_zero_segment(folio, 0, folio_size(folio));
2441 folio_unlock(folio);
2442 }
2443#ifdef CONFIG_F2FS_FS_COMPRESSION
2444next_page:
2445#endif
2446
2447#ifdef CONFIG_F2FS_FS_COMPRESSION
2448 if (f2fs_compressed_file(inode)) {
2449 /* last page */
2450 if (nr_pages == 1 && !f2fs_cluster_is_empty(&cc)) {
2451 ret = f2fs_read_multi_pages(&cc, &bio,
2452 max_nr_pages,
2453 &last_block_in_bio,
2454 rac, false);
2455 f2fs_destroy_compress_ctx(&cc, false);
2456 }
2457 }
2458#endif
2459 }
2460 if (bio)
2461 f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
2462 return ret;
2463}
2464
2465static int f2fs_read_data_folio(struct file *file, struct folio *folio)
2466{
2467 struct inode *inode = folio_file_mapping(folio)->host;
2468 int ret = -EAGAIN;
2469
2470 trace_f2fs_readpage(folio, DATA);
2471
2472 if (!f2fs_is_compress_backend_ready(inode)) {
2473 folio_unlock(folio);
2474 return -EOPNOTSUPP;
2475 }
2476
2477 /* If the file has inline data, try to read it directly */
2478 if (f2fs_has_inline_data(inode))
2479 ret = f2fs_read_inline_data(inode, folio);
2480 if (ret == -EAGAIN)
2481 ret = f2fs_mpage_readpages(inode, NULL, folio);
2482 return ret;
2483}
2484
2485static void f2fs_readahead(struct readahead_control *rac)
2486{
2487 struct inode *inode = rac->mapping->host;
2488
2489 trace_f2fs_readpages(inode, readahead_index(rac), readahead_count(rac));
2490
2491 if (!f2fs_is_compress_backend_ready(inode))
2492 return;
2493
2494 /* If the file has inline data, skip readahead */
2495 if (f2fs_has_inline_data(inode))
2496 return;
2497
2498 f2fs_mpage_readpages(inode, rac, NULL);
2499}
2500
2501int f2fs_encrypt_one_page(struct f2fs_io_info *fio)
2502{
2503 struct inode *inode = fio->page->mapping->host;
2504 struct page *mpage, *page;
2505 gfp_t gfp_flags = GFP_NOFS;
2506
2507 if (!f2fs_encrypted_file(inode))
2508 return 0;
2509
2510 page = fio->compressed_page ? fio->compressed_page : fio->page;
2511
2512 if (fscrypt_inode_uses_inline_crypto(inode))
2513 return 0;
2514
2515retry_encrypt:
2516 fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page,
2517 PAGE_SIZE, 0, gfp_flags);
2518 if (IS_ERR(fio->encrypted_page)) {
2519 /* flush pending IOs and wait for a while in the ENOMEM case */
2520 if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
2521 f2fs_flush_merged_writes(fio->sbi);
2522 memalloc_retry_wait(GFP_NOFS);
2523 gfp_flags |= __GFP_NOFAIL;
2524 goto retry_encrypt;
2525 }
2526 return PTR_ERR(fio->encrypted_page);
2527 }
2528
2529 mpage = find_lock_page(META_MAPPING(fio->sbi), fio->old_blkaddr);
2530 if (mpage) {
2531 if (PageUptodate(mpage))
2532 memcpy(page_address(mpage),
2533 page_address(fio->encrypted_page), PAGE_SIZE);
2534 f2fs_put_page(mpage, 1);
2535 }
2536 return 0;
2537}
2538
2539static inline bool check_inplace_update_policy(struct inode *inode,
2540 struct f2fs_io_info *fio)
2541{
2542 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2543
2544 if (IS_F2FS_IPU_HONOR_OPU_WRITE(sbi) &&
2545 is_inode_flag_set(inode, FI_OPU_WRITE))
2546 return false;
2547 if (IS_F2FS_IPU_FORCE(sbi))
2548 return true;
2549 if (IS_F2FS_IPU_SSR(sbi) && f2fs_need_SSR(sbi))
2550 return true;
2551 if (IS_F2FS_IPU_UTIL(sbi) && utilization(sbi) > SM_I(sbi)->min_ipu_util)
2552 return true;
2553 if (IS_F2FS_IPU_SSR_UTIL(sbi) && f2fs_need_SSR(sbi) &&
2554 utilization(sbi) > SM_I(sbi)->min_ipu_util)
2555 return true;
2556
2557 /*
2558 * IPU for rewrite async pages
2559 */
2560 if (IS_F2FS_IPU_ASYNC(sbi) && fio && fio->op == REQ_OP_WRITE &&
2561 !(fio->op_flags & REQ_SYNC) && !IS_ENCRYPTED(inode))
2562 return true;
2563
2564 /* this is only set during fdatasync */
2565 if (IS_F2FS_IPU_FSYNC(sbi) && is_inode_flag_set(inode, FI_NEED_IPU))
2566 return true;
2567
2568 if (unlikely(fio && is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
2569 !f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
2570 return true;
2571
2572 return false;
2573}
2574
2575bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio)
2576{
2577 /* swap file is migrating in aligned write mode */
2578 if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
2579 return false;
2580
2581 if (f2fs_is_pinned_file(inode))
2582 return true;
2583
2584 /* if this is cold file, we should overwrite to avoid fragmentation */
2585 if (file_is_cold(inode) && !is_inode_flag_set(inode, FI_OPU_WRITE))
2586 return true;
2587
2588 return check_inplace_update_policy(inode, fio);
2589}
2590
2591bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
2592{
2593 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2594
2595 /* The below cases were checked when setting it. */
2596 if (f2fs_is_pinned_file(inode))
2597 return false;
2598 if (fio && is_sbi_flag_set(sbi, SBI_NEED_FSCK))
2599 return true;
2600 if (f2fs_lfs_mode(sbi))
2601 return true;
2602 if (S_ISDIR(inode->i_mode))
2603 return true;
2604 if (IS_NOQUOTA(inode))
2605 return true;
2606 if (f2fs_used_in_atomic_write(inode))
2607 return true;
2608 /* rewrite low ratio compress data w/ OPU mode to avoid fragmentation */
2609 if (f2fs_compressed_file(inode) &&
2610 F2FS_OPTION(sbi).compress_mode == COMPR_MODE_USER &&
2611 is_inode_flag_set(inode, FI_ENABLE_COMPRESS))
2612 return true;
2613
2614 /* swap file is migrating in aligned write mode */
2615 if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
2616 return true;
2617
2618 if (is_inode_flag_set(inode, FI_OPU_WRITE))
2619 return true;
2620
2621 if (fio) {
2622 if (page_private_gcing(fio->page))
2623 return true;
2624 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
2625 f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
2626 return true;
2627 }
2628 return false;
2629}
2630
2631static inline bool need_inplace_update(struct f2fs_io_info *fio)
2632{
2633 struct inode *inode = fio->page->mapping->host;
2634
2635 if (f2fs_should_update_outplace(inode, fio))
2636 return false;
2637
2638 return f2fs_should_update_inplace(inode, fio);
2639}
2640
2641int f2fs_do_write_data_page(struct f2fs_io_info *fio)
2642{
2643 struct folio *folio = page_folio(fio->page);
2644 struct inode *inode = folio->mapping->host;
2645 struct dnode_of_data dn;
2646 struct node_info ni;
2647 bool ipu_force = false;
2648 bool atomic_commit;
2649 int err = 0;
2650
2651 /* Use COW inode to make dnode_of_data for atomic write */
2652 atomic_commit = f2fs_is_atomic_file(inode) &&
2653 page_private_atomic(folio_page(folio, 0));
2654 if (atomic_commit)
2655 set_new_dnode(&dn, F2FS_I(inode)->cow_inode, NULL, NULL, 0);
2656 else
2657 set_new_dnode(&dn, inode, NULL, NULL, 0);
2658
2659 if (need_inplace_update(fio) &&
2660 f2fs_lookup_read_extent_cache_block(inode, folio->index,
2661 &fio->old_blkaddr)) {
2662 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
2663 DATA_GENERIC_ENHANCE))
2664 return -EFSCORRUPTED;
2665
2666 ipu_force = true;
2667 fio->need_lock = LOCK_DONE;
2668 goto got_it;
2669 }
2670
2671 /* Deadlock due to between page->lock and f2fs_lock_op */
2672 if (fio->need_lock == LOCK_REQ && !f2fs_trylock_op(fio->sbi))
2673 return -EAGAIN;
2674
2675 err = f2fs_get_dnode_of_data(&dn, folio->index, LOOKUP_NODE);
2676 if (err)
2677 goto out;
2678
2679 fio->old_blkaddr = dn.data_blkaddr;
2680
2681 /* This page is already truncated */
2682 if (fio->old_blkaddr == NULL_ADDR) {
2683 folio_clear_uptodate(folio);
2684 clear_page_private_gcing(folio_page(folio, 0));
2685 goto out_writepage;
2686 }
2687got_it:
2688 if (__is_valid_data_blkaddr(fio->old_blkaddr) &&
2689 !f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
2690 DATA_GENERIC_ENHANCE)) {
2691 err = -EFSCORRUPTED;
2692 goto out_writepage;
2693 }
2694
2695 /* wait for GCed page writeback via META_MAPPING */
2696 if (fio->meta_gc)
2697 f2fs_wait_on_block_writeback(inode, fio->old_blkaddr);
2698
2699 /*
2700 * If current allocation needs SSR,
2701 * it had better in-place writes for updated data.
2702 */
2703 if (ipu_force ||
2704 (__is_valid_data_blkaddr(fio->old_blkaddr) &&
2705 need_inplace_update(fio))) {
2706 err = f2fs_encrypt_one_page(fio);
2707 if (err)
2708 goto out_writepage;
2709
2710 folio_start_writeback(folio);
2711 f2fs_put_dnode(&dn);
2712 if (fio->need_lock == LOCK_REQ)
2713 f2fs_unlock_op(fio->sbi);
2714 err = f2fs_inplace_write_data(fio);
2715 if (err) {
2716 if (fscrypt_inode_uses_fs_layer_crypto(inode))
2717 fscrypt_finalize_bounce_page(&fio->encrypted_page);
2718 folio_end_writeback(folio);
2719 } else {
2720 set_inode_flag(inode, FI_UPDATE_WRITE);
2721 }
2722 trace_f2fs_do_write_data_page(folio, IPU);
2723 return err;
2724 }
2725
2726 if (fio->need_lock == LOCK_RETRY) {
2727 if (!f2fs_trylock_op(fio->sbi)) {
2728 err = -EAGAIN;
2729 goto out_writepage;
2730 }
2731 fio->need_lock = LOCK_REQ;
2732 }
2733
2734 err = f2fs_get_node_info(fio->sbi, dn.nid, &ni, false);
2735 if (err)
2736 goto out_writepage;
2737
2738 fio->version = ni.version;
2739
2740 err = f2fs_encrypt_one_page(fio);
2741 if (err)
2742 goto out_writepage;
2743
2744 folio_start_writeback(folio);
2745
2746 if (fio->compr_blocks && fio->old_blkaddr == COMPRESS_ADDR)
2747 f2fs_i_compr_blocks_update(inode, fio->compr_blocks - 1, false);
2748
2749 /* LFS mode write path */
2750 f2fs_outplace_write_data(&dn, fio);
2751 trace_f2fs_do_write_data_page(folio, OPU);
2752 set_inode_flag(inode, FI_APPEND_WRITE);
2753 if (atomic_commit)
2754 clear_page_private_atomic(folio_page(folio, 0));
2755out_writepage:
2756 f2fs_put_dnode(&dn);
2757out:
2758 if (fio->need_lock == LOCK_REQ)
2759 f2fs_unlock_op(fio->sbi);
2760 return err;
2761}
2762
2763int f2fs_write_single_data_page(struct folio *folio, int *submitted,
2764 struct bio **bio,
2765 sector_t *last_block,
2766 struct writeback_control *wbc,
2767 enum iostat_type io_type,
2768 int compr_blocks,
2769 bool allow_balance)
2770{
2771 struct inode *inode = folio->mapping->host;
2772 struct page *page = folio_page(folio, 0);
2773 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2774 loff_t i_size = i_size_read(inode);
2775 const pgoff_t end_index = ((unsigned long long)i_size)
2776 >> PAGE_SHIFT;
2777 loff_t psize = (loff_t)(folio->index + 1) << PAGE_SHIFT;
2778 unsigned offset = 0;
2779 bool need_balance_fs = false;
2780 bool quota_inode = IS_NOQUOTA(inode);
2781 int err = 0;
2782 struct f2fs_io_info fio = {
2783 .sbi = sbi,
2784 .ino = inode->i_ino,
2785 .type = DATA,
2786 .op = REQ_OP_WRITE,
2787 .op_flags = wbc_to_write_flags(wbc),
2788 .old_blkaddr = NULL_ADDR,
2789 .page = page,
2790 .encrypted_page = NULL,
2791 .submitted = 0,
2792 .compr_blocks = compr_blocks,
2793 .need_lock = compr_blocks ? LOCK_DONE : LOCK_RETRY,
2794 .meta_gc = f2fs_meta_inode_gc_required(inode) ? 1 : 0,
2795 .io_type = io_type,
2796 .io_wbc = wbc,
2797 .bio = bio,
2798 .last_block = last_block,
2799 };
2800
2801 trace_f2fs_writepage(folio, DATA);
2802
2803 /* we should bypass data pages to proceed the kworker jobs */
2804 if (unlikely(f2fs_cp_error(sbi))) {
2805 mapping_set_error(folio->mapping, -EIO);
2806 /*
2807 * don't drop any dirty dentry pages for keeping lastest
2808 * directory structure.
2809 */
2810 if (S_ISDIR(inode->i_mode) &&
2811 !is_sbi_flag_set(sbi, SBI_IS_CLOSE))
2812 goto redirty_out;
2813
2814 /* keep data pages in remount-ro mode */
2815 if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_READONLY)
2816 goto redirty_out;
2817 goto out;
2818 }
2819
2820 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
2821 goto redirty_out;
2822
2823 if (folio->index < end_index ||
2824 f2fs_verity_in_progress(inode) ||
2825 compr_blocks)
2826 goto write;
2827
2828 /*
2829 * If the offset is out-of-range of file size,
2830 * this page does not have to be written to disk.
2831 */
2832 offset = i_size & (PAGE_SIZE - 1);
2833 if ((folio->index >= end_index + 1) || !offset)
2834 goto out;
2835
2836 folio_zero_segment(folio, offset, folio_size(folio));
2837write:
2838 /* Dentry/quota blocks are controlled by checkpoint */
2839 if (S_ISDIR(inode->i_mode) || quota_inode) {
2840 /*
2841 * We need to wait for node_write to avoid block allocation during
2842 * checkpoint. This can only happen to quota writes which can cause
2843 * the below discard race condition.
2844 */
2845 if (quota_inode)
2846 f2fs_down_read(&sbi->node_write);
2847
2848 fio.need_lock = LOCK_DONE;
2849 err = f2fs_do_write_data_page(&fio);
2850
2851 if (quota_inode)
2852 f2fs_up_read(&sbi->node_write);
2853
2854 goto done;
2855 }
2856
2857 if (!wbc->for_reclaim)
2858 need_balance_fs = true;
2859 else if (has_not_enough_free_secs(sbi, 0, 0))
2860 goto redirty_out;
2861 else
2862 set_inode_flag(inode, FI_HOT_DATA);
2863
2864 err = -EAGAIN;
2865 if (f2fs_has_inline_data(inode)) {
2866 err = f2fs_write_inline_data(inode, folio);
2867 if (!err)
2868 goto out;
2869 }
2870
2871 if (err == -EAGAIN) {
2872 err = f2fs_do_write_data_page(&fio);
2873 if (err == -EAGAIN) {
2874 f2fs_bug_on(sbi, compr_blocks);
2875 fio.need_lock = LOCK_REQ;
2876 err = f2fs_do_write_data_page(&fio);
2877 }
2878 }
2879
2880 if (err) {
2881 file_set_keep_isize(inode);
2882 } else {
2883 spin_lock(&F2FS_I(inode)->i_size_lock);
2884 if (F2FS_I(inode)->last_disk_size < psize)
2885 F2FS_I(inode)->last_disk_size = psize;
2886 spin_unlock(&F2FS_I(inode)->i_size_lock);
2887 }
2888
2889done:
2890 if (err && err != -ENOENT)
2891 goto redirty_out;
2892
2893out:
2894 inode_dec_dirty_pages(inode);
2895 if (err) {
2896 folio_clear_uptodate(folio);
2897 clear_page_private_gcing(page);
2898 }
2899
2900 if (wbc->for_reclaim) {
2901 f2fs_submit_merged_write_cond(sbi, NULL, page, 0, DATA);
2902 clear_inode_flag(inode, FI_HOT_DATA);
2903 f2fs_remove_dirty_inode(inode);
2904 submitted = NULL;
2905 }
2906 folio_unlock(folio);
2907 if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) &&
2908 !F2FS_I(inode)->wb_task && allow_balance)
2909 f2fs_balance_fs(sbi, need_balance_fs);
2910
2911 if (unlikely(f2fs_cp_error(sbi))) {
2912 f2fs_submit_merged_write(sbi, DATA);
2913 if (bio && *bio)
2914 f2fs_submit_merged_ipu_write(sbi, bio, NULL);
2915 submitted = NULL;
2916 }
2917
2918 if (submitted)
2919 *submitted = fio.submitted;
2920
2921 return 0;
2922
2923redirty_out:
2924 folio_redirty_for_writepage(wbc, folio);
2925 /*
2926 * pageout() in MM translates EAGAIN, so calls handle_write_error()
2927 * -> mapping_set_error() -> set_bit(AS_EIO, ...).
2928 * file_write_and_wait_range() will see EIO error, which is critical
2929 * to return value of fsync() followed by atomic_write failure to user.
2930 */
2931 if (!err || wbc->for_reclaim)
2932 return AOP_WRITEPAGE_ACTIVATE;
2933 folio_unlock(folio);
2934 return err;
2935}
2936
2937static int f2fs_write_data_page(struct page *page,
2938 struct writeback_control *wbc)
2939{
2940 struct folio *folio = page_folio(page);
2941#ifdef CONFIG_F2FS_FS_COMPRESSION
2942 struct inode *inode = folio->mapping->host;
2943
2944 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
2945 goto out;
2946
2947 if (f2fs_compressed_file(inode)) {
2948 if (f2fs_is_compressed_cluster(inode, folio->index)) {
2949 folio_redirty_for_writepage(wbc, folio);
2950 return AOP_WRITEPAGE_ACTIVATE;
2951 }
2952 }
2953out:
2954#endif
2955
2956 return f2fs_write_single_data_page(folio, NULL, NULL, NULL,
2957 wbc, FS_DATA_IO, 0, true);
2958}
2959
2960/*
2961 * This function was copied from write_cache_pages from mm/page-writeback.c.
2962 * The major change is making write step of cold data page separately from
2963 * warm/hot data page.
2964 */
2965static int f2fs_write_cache_pages(struct address_space *mapping,
2966 struct writeback_control *wbc,
2967 enum iostat_type io_type)
2968{
2969 int ret = 0;
2970 int done = 0, retry = 0;
2971 struct page *pages_local[F2FS_ONSTACK_PAGES];
2972 struct page **pages = pages_local;
2973 struct folio_batch fbatch;
2974 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
2975 struct bio *bio = NULL;
2976 sector_t last_block;
2977#ifdef CONFIG_F2FS_FS_COMPRESSION
2978 struct inode *inode = mapping->host;
2979 struct compress_ctx cc = {
2980 .inode = inode,
2981 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
2982 .cluster_size = F2FS_I(inode)->i_cluster_size,
2983 .cluster_idx = NULL_CLUSTER,
2984 .rpages = NULL,
2985 .nr_rpages = 0,
2986 .cpages = NULL,
2987 .valid_nr_cpages = 0,
2988 .rbuf = NULL,
2989 .cbuf = NULL,
2990 .rlen = PAGE_SIZE * F2FS_I(inode)->i_cluster_size,
2991 .private = NULL,
2992 };
2993#endif
2994 int nr_folios, p, idx;
2995 int nr_pages;
2996 unsigned int max_pages = F2FS_ONSTACK_PAGES;
2997 pgoff_t index;
2998 pgoff_t end; /* Inclusive */
2999 pgoff_t done_index;
3000 int range_whole = 0;
3001 xa_mark_t tag;
3002 int nwritten = 0;
3003 int submitted = 0;
3004 int i;
3005
3006#ifdef CONFIG_F2FS_FS_COMPRESSION
3007 if (f2fs_compressed_file(inode) &&
3008 1 << cc.log_cluster_size > F2FS_ONSTACK_PAGES) {
3009 pages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
3010 cc.log_cluster_size, GFP_NOFS | __GFP_NOFAIL);
3011 max_pages = 1 << cc.log_cluster_size;
3012 }
3013#endif
3014
3015 folio_batch_init(&fbatch);
3016
3017 if (get_dirty_pages(mapping->host) <=
3018 SM_I(F2FS_M_SB(mapping))->min_hot_blocks)
3019 set_inode_flag(mapping->host, FI_HOT_DATA);
3020 else
3021 clear_inode_flag(mapping->host, FI_HOT_DATA);
3022
3023 if (wbc->range_cyclic) {
3024 index = mapping->writeback_index; /* prev offset */
3025 end = -1;
3026 } else {
3027 index = wbc->range_start >> PAGE_SHIFT;
3028 end = wbc->range_end >> PAGE_SHIFT;
3029 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
3030 range_whole = 1;
3031 }
3032 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
3033 tag = PAGECACHE_TAG_TOWRITE;
3034 else
3035 tag = PAGECACHE_TAG_DIRTY;
3036retry:
3037 retry = 0;
3038 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
3039 tag_pages_for_writeback(mapping, index, end);
3040 done_index = index;
3041 while (!done && !retry && (index <= end)) {
3042 nr_pages = 0;
3043again:
3044 nr_folios = filemap_get_folios_tag(mapping, &index, end,
3045 tag, &fbatch);
3046 if (nr_folios == 0) {
3047 if (nr_pages)
3048 goto write;
3049 break;
3050 }
3051
3052 for (i = 0; i < nr_folios; i++) {
3053 struct folio *folio = fbatch.folios[i];
3054
3055 idx = 0;
3056 p = folio_nr_pages(folio);
3057add_more:
3058 pages[nr_pages] = folio_page(folio, idx);
3059 folio_get(folio);
3060 if (++nr_pages == max_pages) {
3061 index = folio->index + idx + 1;
3062 folio_batch_release(&fbatch);
3063 goto write;
3064 }
3065 if (++idx < p)
3066 goto add_more;
3067 }
3068 folio_batch_release(&fbatch);
3069 goto again;
3070write:
3071 for (i = 0; i < nr_pages; i++) {
3072 struct page *page = pages[i];
3073 struct folio *folio = page_folio(page);
3074 bool need_readd;
3075readd:
3076 need_readd = false;
3077#ifdef CONFIG_F2FS_FS_COMPRESSION
3078 if (f2fs_compressed_file(inode)) {
3079 void *fsdata = NULL;
3080 struct page *pagep;
3081 int ret2;
3082
3083 ret = f2fs_init_compress_ctx(&cc);
3084 if (ret) {
3085 done = 1;
3086 break;
3087 }
3088
3089 if (!f2fs_cluster_can_merge_page(&cc,
3090 folio->index)) {
3091 ret = f2fs_write_multi_pages(&cc,
3092 &submitted, wbc, io_type);
3093 if (!ret)
3094 need_readd = true;
3095 goto result;
3096 }
3097
3098 if (unlikely(f2fs_cp_error(sbi)))
3099 goto lock_folio;
3100
3101 if (!f2fs_cluster_is_empty(&cc))
3102 goto lock_folio;
3103
3104 if (f2fs_all_cluster_page_ready(&cc,
3105 pages, i, nr_pages, true))
3106 goto lock_folio;
3107
3108 ret2 = f2fs_prepare_compress_overwrite(
3109 inode, &pagep,
3110 folio->index, &fsdata);
3111 if (ret2 < 0) {
3112 ret = ret2;
3113 done = 1;
3114 break;
3115 } else if (ret2 &&
3116 (!f2fs_compress_write_end(inode,
3117 fsdata, folio->index, 1) ||
3118 !f2fs_all_cluster_page_ready(&cc,
3119 pages, i, nr_pages,
3120 false))) {
3121 retry = 1;
3122 break;
3123 }
3124 }
3125#endif
3126 /* give a priority to WB_SYNC threads */
3127 if (atomic_read(&sbi->wb_sync_req[DATA]) &&
3128 wbc->sync_mode == WB_SYNC_NONE) {
3129 done = 1;
3130 break;
3131 }
3132#ifdef CONFIG_F2FS_FS_COMPRESSION
3133lock_folio:
3134#endif
3135 done_index = folio->index;
3136retry_write:
3137 folio_lock(folio);
3138
3139 if (unlikely(folio->mapping != mapping)) {
3140continue_unlock:
3141 folio_unlock(folio);
3142 continue;
3143 }
3144
3145 if (!folio_test_dirty(folio)) {
3146 /* someone wrote it for us */
3147 goto continue_unlock;
3148 }
3149
3150 if (folio_test_writeback(folio)) {
3151 if (wbc->sync_mode == WB_SYNC_NONE)
3152 goto continue_unlock;
3153 f2fs_wait_on_page_writeback(&folio->page, DATA, true, true);
3154 }
3155
3156 if (!folio_clear_dirty_for_io(folio))
3157 goto continue_unlock;
3158
3159#ifdef CONFIG_F2FS_FS_COMPRESSION
3160 if (f2fs_compressed_file(inode)) {
3161 folio_get(folio);
3162 f2fs_compress_ctx_add_page(&cc, folio);
3163 continue;
3164 }
3165#endif
3166 ret = f2fs_write_single_data_page(folio,
3167 &submitted, &bio, &last_block,
3168 wbc, io_type, 0, true);
3169 if (ret == AOP_WRITEPAGE_ACTIVATE)
3170 folio_unlock(folio);
3171#ifdef CONFIG_F2FS_FS_COMPRESSION
3172result:
3173#endif
3174 nwritten += submitted;
3175 wbc->nr_to_write -= submitted;
3176
3177 if (unlikely(ret)) {
3178 /*
3179 * keep nr_to_write, since vfs uses this to
3180 * get # of written pages.
3181 */
3182 if (ret == AOP_WRITEPAGE_ACTIVATE) {
3183 ret = 0;
3184 goto next;
3185 } else if (ret == -EAGAIN) {
3186 ret = 0;
3187 if (wbc->sync_mode == WB_SYNC_ALL) {
3188 f2fs_io_schedule_timeout(
3189 DEFAULT_IO_TIMEOUT);
3190 goto retry_write;
3191 }
3192 goto next;
3193 }
3194 done_index = folio_next_index(folio);
3195 done = 1;
3196 break;
3197 }
3198
3199 if (wbc->nr_to_write <= 0 &&
3200 wbc->sync_mode == WB_SYNC_NONE) {
3201 done = 1;
3202 break;
3203 }
3204next:
3205 if (need_readd)
3206 goto readd;
3207 }
3208 release_pages(pages, nr_pages);
3209 cond_resched();
3210 }
3211#ifdef CONFIG_F2FS_FS_COMPRESSION
3212 /* flush remained pages in compress cluster */
3213 if (f2fs_compressed_file(inode) && !f2fs_cluster_is_empty(&cc)) {
3214 ret = f2fs_write_multi_pages(&cc, &submitted, wbc, io_type);
3215 nwritten += submitted;
3216 wbc->nr_to_write -= submitted;
3217 if (ret) {
3218 done = 1;
3219 retry = 0;
3220 }
3221 }
3222 if (f2fs_compressed_file(inode))
3223 f2fs_destroy_compress_ctx(&cc, false);
3224#endif
3225 if (retry) {
3226 index = 0;
3227 end = -1;
3228 goto retry;
3229 }
3230 if (wbc->range_cyclic && !done)
3231 done_index = 0;
3232 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
3233 mapping->writeback_index = done_index;
3234
3235 if (nwritten)
3236 f2fs_submit_merged_write_cond(F2FS_M_SB(mapping), mapping->host,
3237 NULL, 0, DATA);
3238 /* submit cached bio of IPU write */
3239 if (bio)
3240 f2fs_submit_merged_ipu_write(sbi, &bio, NULL);
3241
3242#ifdef CONFIG_F2FS_FS_COMPRESSION
3243 if (pages != pages_local)
3244 kfree(pages);
3245#endif
3246
3247 return ret;
3248}
3249
3250static inline bool __should_serialize_io(struct inode *inode,
3251 struct writeback_control *wbc)
3252{
3253 /* to avoid deadlock in path of data flush */
3254 if (F2FS_I(inode)->wb_task)
3255 return false;
3256
3257 if (!S_ISREG(inode->i_mode))
3258 return false;
3259 if (IS_NOQUOTA(inode))
3260 return false;
3261
3262 if (f2fs_need_compress_data(inode))
3263 return true;
3264 if (wbc->sync_mode != WB_SYNC_ALL)
3265 return true;
3266 if (get_dirty_pages(inode) >= SM_I(F2FS_I_SB(inode))->min_seq_blocks)
3267 return true;
3268 return false;
3269}
3270
3271static int __f2fs_write_data_pages(struct address_space *mapping,
3272 struct writeback_control *wbc,
3273 enum iostat_type io_type)
3274{
3275 struct inode *inode = mapping->host;
3276 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3277 struct blk_plug plug;
3278 int ret;
3279 bool locked = false;
3280
3281 /* deal with chardevs and other special file */
3282 if (!mapping->a_ops->writepage)
3283 return 0;
3284
3285 /* skip writing if there is no dirty page in this inode */
3286 if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
3287 return 0;
3288
3289 /* during POR, we don't need to trigger writepage at all. */
3290 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
3291 goto skip_write;
3292
3293 if ((S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) &&
3294 wbc->sync_mode == WB_SYNC_NONE &&
3295 get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
3296 f2fs_available_free_memory(sbi, DIRTY_DENTS))
3297 goto skip_write;
3298
3299 /* skip writing in file defragment preparing stage */
3300 if (is_inode_flag_set(inode, FI_SKIP_WRITES))
3301 goto skip_write;
3302
3303 trace_f2fs_writepages(mapping->host, wbc, DATA);
3304
3305 /* to avoid spliting IOs due to mixed WB_SYNC_ALL and WB_SYNC_NONE */
3306 if (wbc->sync_mode == WB_SYNC_ALL)
3307 atomic_inc(&sbi->wb_sync_req[DATA]);
3308 else if (atomic_read(&sbi->wb_sync_req[DATA])) {
3309 /* to avoid potential deadlock */
3310 if (current->plug)
3311 blk_finish_plug(current->plug);
3312 goto skip_write;
3313 }
3314
3315 if (__should_serialize_io(inode, wbc)) {
3316 mutex_lock(&sbi->writepages);
3317 locked = true;
3318 }
3319
3320 blk_start_plug(&plug);
3321 ret = f2fs_write_cache_pages(mapping, wbc, io_type);
3322 blk_finish_plug(&plug);
3323
3324 if (locked)
3325 mutex_unlock(&sbi->writepages);
3326
3327 if (wbc->sync_mode == WB_SYNC_ALL)
3328 atomic_dec(&sbi->wb_sync_req[DATA]);
3329 /*
3330 * if some pages were truncated, we cannot guarantee its mapping->host
3331 * to detect pending bios.
3332 */
3333
3334 f2fs_remove_dirty_inode(inode);
3335 return ret;
3336
3337skip_write:
3338 wbc->pages_skipped += get_dirty_pages(inode);
3339 trace_f2fs_writepages(mapping->host, wbc, DATA);
3340 return 0;
3341}
3342
3343static int f2fs_write_data_pages(struct address_space *mapping,
3344 struct writeback_control *wbc)
3345{
3346 struct inode *inode = mapping->host;
3347
3348 return __f2fs_write_data_pages(mapping, wbc,
3349 F2FS_I(inode)->cp_task == current ?
3350 FS_CP_DATA_IO : FS_DATA_IO);
3351}
3352
3353void f2fs_write_failed(struct inode *inode, loff_t to)
3354{
3355 loff_t i_size = i_size_read(inode);
3356
3357 if (IS_NOQUOTA(inode))
3358 return;
3359
3360 /* In the fs-verity case, f2fs_end_enable_verity() does the truncate */
3361 if (to > i_size && !f2fs_verity_in_progress(inode)) {
3362 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3363 filemap_invalidate_lock(inode->i_mapping);
3364
3365 truncate_pagecache(inode, i_size);
3366 f2fs_truncate_blocks(inode, i_size, true);
3367
3368 filemap_invalidate_unlock(inode->i_mapping);
3369 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3370 }
3371}
3372
3373static int prepare_write_begin(struct f2fs_sb_info *sbi,
3374 struct folio *folio, loff_t pos, unsigned int len,
3375 block_t *blk_addr, bool *node_changed)
3376{
3377 struct inode *inode = folio->mapping->host;
3378 pgoff_t index = folio->index;
3379 struct dnode_of_data dn;
3380 struct page *ipage;
3381 bool locked = false;
3382 int flag = F2FS_GET_BLOCK_PRE_AIO;
3383 int err = 0;
3384
3385 /*
3386 * If a whole page is being written and we already preallocated all the
3387 * blocks, then there is no need to get a block address now.
3388 */
3389 if (len == PAGE_SIZE && is_inode_flag_set(inode, FI_PREALLOCATED_ALL))
3390 return 0;
3391
3392 /* f2fs_lock_op avoids race between write CP and convert_inline_page */
3393 if (f2fs_has_inline_data(inode)) {
3394 if (pos + len > MAX_INLINE_DATA(inode))
3395 flag = F2FS_GET_BLOCK_DEFAULT;
3396 f2fs_map_lock(sbi, flag);
3397 locked = true;
3398 } else if ((pos & PAGE_MASK) >= i_size_read(inode)) {
3399 f2fs_map_lock(sbi, flag);
3400 locked = true;
3401 }
3402
3403restart:
3404 /* check inline_data */
3405 ipage = f2fs_get_node_page(sbi, inode->i_ino);
3406 if (IS_ERR(ipage)) {
3407 err = PTR_ERR(ipage);
3408 goto unlock_out;
3409 }
3410
3411 set_new_dnode(&dn, inode, ipage, ipage, 0);
3412
3413 if (f2fs_has_inline_data(inode)) {
3414 if (pos + len <= MAX_INLINE_DATA(inode)) {
3415 f2fs_do_read_inline_data(folio, ipage);
3416 set_inode_flag(inode, FI_DATA_EXIST);
3417 if (inode->i_nlink)
3418 set_page_private_inline(ipage);
3419 goto out;
3420 }
3421 err = f2fs_convert_inline_page(&dn, folio_page(folio, 0));
3422 if (err || dn.data_blkaddr != NULL_ADDR)
3423 goto out;
3424 }
3425
3426 if (!f2fs_lookup_read_extent_cache_block(inode, index,
3427 &dn.data_blkaddr)) {
3428 if (IS_DEVICE_ALIASING(inode)) {
3429 err = -ENODATA;
3430 goto out;
3431 }
3432
3433 if (locked) {
3434 err = f2fs_reserve_block(&dn, index);
3435 goto out;
3436 }
3437
3438 /* hole case */
3439 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
3440 if (!err && dn.data_blkaddr != NULL_ADDR)
3441 goto out;
3442 f2fs_put_dnode(&dn);
3443 f2fs_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO);
3444 WARN_ON(flag != F2FS_GET_BLOCK_PRE_AIO);
3445 locked = true;
3446 goto restart;
3447 }
3448out:
3449 if (!err) {
3450 /* convert_inline_page can make node_changed */
3451 *blk_addr = dn.data_blkaddr;
3452 *node_changed = dn.node_changed;
3453 }
3454 f2fs_put_dnode(&dn);
3455unlock_out:
3456 if (locked)
3457 f2fs_map_unlock(sbi, flag);
3458 return err;
3459}
3460
3461static int __find_data_block(struct inode *inode, pgoff_t index,
3462 block_t *blk_addr)
3463{
3464 struct dnode_of_data dn;
3465 struct page *ipage;
3466 int err = 0;
3467
3468 ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
3469 if (IS_ERR(ipage))
3470 return PTR_ERR(ipage);
3471
3472 set_new_dnode(&dn, inode, ipage, ipage, 0);
3473
3474 if (!f2fs_lookup_read_extent_cache_block(inode, index,
3475 &dn.data_blkaddr)) {
3476 /* hole case */
3477 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
3478 if (err) {
3479 dn.data_blkaddr = NULL_ADDR;
3480 err = 0;
3481 }
3482 }
3483 *blk_addr = dn.data_blkaddr;
3484 f2fs_put_dnode(&dn);
3485 return err;
3486}
3487
3488static int __reserve_data_block(struct inode *inode, pgoff_t index,
3489 block_t *blk_addr, bool *node_changed)
3490{
3491 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3492 struct dnode_of_data dn;
3493 struct page *ipage;
3494 int err = 0;
3495
3496 f2fs_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO);
3497
3498 ipage = f2fs_get_node_page(sbi, inode->i_ino);
3499 if (IS_ERR(ipage)) {
3500 err = PTR_ERR(ipage);
3501 goto unlock_out;
3502 }
3503 set_new_dnode(&dn, inode, ipage, ipage, 0);
3504
3505 if (!f2fs_lookup_read_extent_cache_block(dn.inode, index,
3506 &dn.data_blkaddr))
3507 err = f2fs_reserve_block(&dn, index);
3508
3509 *blk_addr = dn.data_blkaddr;
3510 *node_changed = dn.node_changed;
3511 f2fs_put_dnode(&dn);
3512
3513unlock_out:
3514 f2fs_map_unlock(sbi, F2FS_GET_BLOCK_PRE_AIO);
3515 return err;
3516}
3517
3518static int prepare_atomic_write_begin(struct f2fs_sb_info *sbi,
3519 struct folio *folio, loff_t pos, unsigned int len,
3520 block_t *blk_addr, bool *node_changed, bool *use_cow)
3521{
3522 struct inode *inode = folio->mapping->host;
3523 struct inode *cow_inode = F2FS_I(inode)->cow_inode;
3524 pgoff_t index = folio->index;
3525 int err = 0;
3526 block_t ori_blk_addr = NULL_ADDR;
3527
3528 /* If pos is beyond the end of file, reserve a new block in COW inode */
3529 if ((pos & PAGE_MASK) >= i_size_read(inode))
3530 goto reserve_block;
3531
3532 /* Look for the block in COW inode first */
3533 err = __find_data_block(cow_inode, index, blk_addr);
3534 if (err) {
3535 return err;
3536 } else if (*blk_addr != NULL_ADDR) {
3537 *use_cow = true;
3538 return 0;
3539 }
3540
3541 if (is_inode_flag_set(inode, FI_ATOMIC_REPLACE))
3542 goto reserve_block;
3543
3544 /* Look for the block in the original inode */
3545 err = __find_data_block(inode, index, &ori_blk_addr);
3546 if (err)
3547 return err;
3548
3549reserve_block:
3550 /* Finally, we should reserve a new block in COW inode for the update */
3551 err = __reserve_data_block(cow_inode, index, blk_addr, node_changed);
3552 if (err)
3553 return err;
3554 inc_atomic_write_cnt(inode);
3555
3556 if (ori_blk_addr != NULL_ADDR)
3557 *blk_addr = ori_blk_addr;
3558 return 0;
3559}
3560
3561static int f2fs_write_begin(struct file *file, struct address_space *mapping,
3562 loff_t pos, unsigned len, struct folio **foliop, void **fsdata)
3563{
3564 struct inode *inode = mapping->host;
3565 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3566 struct folio *folio;
3567 pgoff_t index = pos >> PAGE_SHIFT;
3568 bool need_balance = false;
3569 bool use_cow = false;
3570 block_t blkaddr = NULL_ADDR;
3571 int err = 0;
3572
3573 trace_f2fs_write_begin(inode, pos, len);
3574
3575 if (!f2fs_is_checkpoint_ready(sbi)) {
3576 err = -ENOSPC;
3577 goto fail;
3578 }
3579
3580 /*
3581 * We should check this at this moment to avoid deadlock on inode page
3582 * and #0 page. The locking rule for inline_data conversion should be:
3583 * folio_lock(folio #0) -> folio_lock(inode_page)
3584 */
3585 if (index != 0) {
3586 err = f2fs_convert_inline_inode(inode);
3587 if (err)
3588 goto fail;
3589 }
3590
3591#ifdef CONFIG_F2FS_FS_COMPRESSION
3592 if (f2fs_compressed_file(inode)) {
3593 int ret;
3594 struct page *page;
3595
3596 *fsdata = NULL;
3597
3598 if (len == PAGE_SIZE && !(f2fs_is_atomic_file(inode)))
3599 goto repeat;
3600
3601 ret = f2fs_prepare_compress_overwrite(inode, &page,
3602 index, fsdata);
3603 if (ret < 0) {
3604 err = ret;
3605 goto fail;
3606 } else if (ret) {
3607 *foliop = page_folio(page);
3608 return 0;
3609 }
3610 }
3611#endif
3612
3613repeat:
3614 /*
3615 * Do not use FGP_STABLE to avoid deadlock.
3616 * Will wait that below with our IO control.
3617 */
3618 folio = __filemap_get_folio(mapping, index,
3619 FGP_LOCK | FGP_WRITE | FGP_CREAT, GFP_NOFS);
3620 if (IS_ERR(folio)) {
3621 err = PTR_ERR(folio);
3622 goto fail;
3623 }
3624
3625 /* TODO: cluster can be compressed due to race with .writepage */
3626
3627 *foliop = folio;
3628
3629 if (f2fs_is_atomic_file(inode))
3630 err = prepare_atomic_write_begin(sbi, folio, pos, len,
3631 &blkaddr, &need_balance, &use_cow);
3632 else
3633 err = prepare_write_begin(sbi, folio, pos, len,
3634 &blkaddr, &need_balance);
3635 if (err)
3636 goto put_folio;
3637
3638 if (need_balance && !IS_NOQUOTA(inode) &&
3639 has_not_enough_free_secs(sbi, 0, 0)) {
3640 folio_unlock(folio);
3641 f2fs_balance_fs(sbi, true);
3642 folio_lock(folio);
3643 if (folio->mapping != mapping) {
3644 /* The folio got truncated from under us */
3645 folio_unlock(folio);
3646 folio_put(folio);
3647 goto repeat;
3648 }
3649 }
3650
3651 f2fs_wait_on_page_writeback(&folio->page, DATA, false, true);
3652
3653 if (len == folio_size(folio) || folio_test_uptodate(folio))
3654 return 0;
3655
3656 if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode) &&
3657 !f2fs_verity_in_progress(inode)) {
3658 folio_zero_segment(folio, len, folio_size(folio));
3659 return 0;
3660 }
3661
3662 if (blkaddr == NEW_ADDR) {
3663 folio_zero_segment(folio, 0, folio_size(folio));
3664 folio_mark_uptodate(folio);
3665 } else {
3666 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
3667 DATA_GENERIC_ENHANCE_READ)) {
3668 err = -EFSCORRUPTED;
3669 goto put_folio;
3670 }
3671 err = f2fs_submit_page_read(use_cow ?
3672 F2FS_I(inode)->cow_inode : inode,
3673 folio, blkaddr, 0, true);
3674 if (err)
3675 goto put_folio;
3676
3677 folio_lock(folio);
3678 if (unlikely(folio->mapping != mapping)) {
3679 folio_unlock(folio);
3680 folio_put(folio);
3681 goto repeat;
3682 }
3683 if (unlikely(!folio_test_uptodate(folio))) {
3684 err = -EIO;
3685 goto put_folio;
3686 }
3687 }
3688 return 0;
3689
3690put_folio:
3691 folio_unlock(folio);
3692 folio_put(folio);
3693fail:
3694 f2fs_write_failed(inode, pos + len);
3695 return err;
3696}
3697
3698static int f2fs_write_end(struct file *file,
3699 struct address_space *mapping,
3700 loff_t pos, unsigned len, unsigned copied,
3701 struct folio *folio, void *fsdata)
3702{
3703 struct inode *inode = folio->mapping->host;
3704
3705 trace_f2fs_write_end(inode, pos, len, copied);
3706
3707 /*
3708 * This should be come from len == PAGE_SIZE, and we expect copied
3709 * should be PAGE_SIZE. Otherwise, we treat it with zero copied and
3710 * let generic_perform_write() try to copy data again through copied=0.
3711 */
3712 if (!folio_test_uptodate(folio)) {
3713 if (unlikely(copied != len))
3714 copied = 0;
3715 else
3716 folio_mark_uptodate(folio);
3717 }
3718
3719#ifdef CONFIG_F2FS_FS_COMPRESSION
3720 /* overwrite compressed file */
3721 if (f2fs_compressed_file(inode) && fsdata) {
3722 f2fs_compress_write_end(inode, fsdata, folio->index, copied);
3723 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3724
3725 if (pos + copied > i_size_read(inode) &&
3726 !f2fs_verity_in_progress(inode))
3727 f2fs_i_size_write(inode, pos + copied);
3728 return copied;
3729 }
3730#endif
3731
3732 if (!copied)
3733 goto unlock_out;
3734
3735 folio_mark_dirty(folio);
3736
3737 if (f2fs_is_atomic_file(inode))
3738 set_page_private_atomic(folio_page(folio, 0));
3739
3740 if (pos + copied > i_size_read(inode) &&
3741 !f2fs_verity_in_progress(inode)) {
3742 f2fs_i_size_write(inode, pos + copied);
3743 if (f2fs_is_atomic_file(inode))
3744 f2fs_i_size_write(F2FS_I(inode)->cow_inode,
3745 pos + copied);
3746 }
3747unlock_out:
3748 folio_unlock(folio);
3749 folio_put(folio);
3750 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3751 return copied;
3752}
3753
3754void f2fs_invalidate_folio(struct folio *folio, size_t offset, size_t length)
3755{
3756 struct inode *inode = folio->mapping->host;
3757 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3758
3759 if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
3760 (offset || length != folio_size(folio)))
3761 return;
3762
3763 if (folio_test_dirty(folio)) {
3764 if (inode->i_ino == F2FS_META_INO(sbi)) {
3765 dec_page_count(sbi, F2FS_DIRTY_META);
3766 } else if (inode->i_ino == F2FS_NODE_INO(sbi)) {
3767 dec_page_count(sbi, F2FS_DIRTY_NODES);
3768 } else {
3769 inode_dec_dirty_pages(inode);
3770 f2fs_remove_dirty_inode(inode);
3771 }
3772 }
3773 clear_page_private_all(&folio->page);
3774}
3775
3776bool f2fs_release_folio(struct folio *folio, gfp_t wait)
3777{
3778 /* If this is dirty folio, keep private data */
3779 if (folio_test_dirty(folio))
3780 return false;
3781
3782 clear_page_private_all(&folio->page);
3783 return true;
3784}
3785
3786static bool f2fs_dirty_data_folio(struct address_space *mapping,
3787 struct folio *folio)
3788{
3789 struct inode *inode = mapping->host;
3790
3791 trace_f2fs_set_page_dirty(folio, DATA);
3792
3793 if (!folio_test_uptodate(folio))
3794 folio_mark_uptodate(folio);
3795 BUG_ON(folio_test_swapcache(folio));
3796
3797 if (filemap_dirty_folio(mapping, folio)) {
3798 f2fs_update_dirty_folio(inode, folio);
3799 return true;
3800 }
3801 return false;
3802}
3803
3804
3805static sector_t f2fs_bmap_compress(struct inode *inode, sector_t block)
3806{
3807#ifdef CONFIG_F2FS_FS_COMPRESSION
3808 struct dnode_of_data dn;
3809 sector_t start_idx, blknr = 0;
3810 int ret;
3811
3812 start_idx = round_down(block, F2FS_I(inode)->i_cluster_size);
3813
3814 set_new_dnode(&dn, inode, NULL, NULL, 0);
3815 ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
3816 if (ret)
3817 return 0;
3818
3819 if (dn.data_blkaddr != COMPRESS_ADDR) {
3820 dn.ofs_in_node += block - start_idx;
3821 blknr = f2fs_data_blkaddr(&dn);
3822 if (!__is_valid_data_blkaddr(blknr))
3823 blknr = 0;
3824 }
3825
3826 f2fs_put_dnode(&dn);
3827 return blknr;
3828#else
3829 return 0;
3830#endif
3831}
3832
3833
3834static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
3835{
3836 struct inode *inode = mapping->host;
3837 sector_t blknr = 0;
3838
3839 if (f2fs_has_inline_data(inode))
3840 goto out;
3841
3842 /* make sure allocating whole blocks */
3843 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
3844 filemap_write_and_wait(mapping);
3845
3846 /* Block number less than F2FS MAX BLOCKS */
3847 if (unlikely(block >= max_file_blocks(inode)))
3848 goto out;
3849
3850 if (f2fs_compressed_file(inode)) {
3851 blknr = f2fs_bmap_compress(inode, block);
3852 } else {
3853 struct f2fs_map_blocks map;
3854
3855 memset(&map, 0, sizeof(map));
3856 map.m_lblk = block;
3857 map.m_len = 1;
3858 map.m_next_pgofs = NULL;
3859 map.m_seg_type = NO_CHECK_TYPE;
3860
3861 if (!f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_BMAP))
3862 blknr = map.m_pblk;
3863 }
3864out:
3865 trace_f2fs_bmap(inode, block, blknr);
3866 return blknr;
3867}
3868
3869#ifdef CONFIG_SWAP
3870static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk,
3871 unsigned int blkcnt)
3872{
3873 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3874 unsigned int blkofs;
3875 unsigned int blk_per_sec = BLKS_PER_SEC(sbi);
3876 unsigned int end_blk = start_blk + blkcnt - 1;
3877 unsigned int secidx = start_blk / blk_per_sec;
3878 unsigned int end_sec;
3879 int ret = 0;
3880
3881 if (!blkcnt)
3882 return 0;
3883 end_sec = end_blk / blk_per_sec;
3884
3885 f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3886 filemap_invalidate_lock(inode->i_mapping);
3887
3888 set_inode_flag(inode, FI_ALIGNED_WRITE);
3889 set_inode_flag(inode, FI_OPU_WRITE);
3890
3891 for (; secidx <= end_sec; secidx++) {
3892 unsigned int blkofs_end = secidx == end_sec ?
3893 end_blk % blk_per_sec : blk_per_sec - 1;
3894
3895 f2fs_down_write(&sbi->pin_sem);
3896
3897 ret = f2fs_allocate_pinning_section(sbi);
3898 if (ret) {
3899 f2fs_up_write(&sbi->pin_sem);
3900 break;
3901 }
3902
3903 set_inode_flag(inode, FI_SKIP_WRITES);
3904
3905 for (blkofs = 0; blkofs <= blkofs_end; blkofs++) {
3906 struct page *page;
3907 unsigned int blkidx = secidx * blk_per_sec + blkofs;
3908
3909 page = f2fs_get_lock_data_page(inode, blkidx, true);
3910 if (IS_ERR(page)) {
3911 f2fs_up_write(&sbi->pin_sem);
3912 ret = PTR_ERR(page);
3913 goto done;
3914 }
3915
3916 set_page_dirty(page);
3917 f2fs_put_page(page, 1);
3918 }
3919
3920 clear_inode_flag(inode, FI_SKIP_WRITES);
3921
3922 ret = filemap_fdatawrite(inode->i_mapping);
3923
3924 f2fs_up_write(&sbi->pin_sem);
3925
3926 if (ret)
3927 break;
3928 }
3929
3930done:
3931 clear_inode_flag(inode, FI_SKIP_WRITES);
3932 clear_inode_flag(inode, FI_OPU_WRITE);
3933 clear_inode_flag(inode, FI_ALIGNED_WRITE);
3934
3935 filemap_invalidate_unlock(inode->i_mapping);
3936 f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3937
3938 return ret;
3939}
3940
3941static int check_swap_activate(struct swap_info_struct *sis,
3942 struct file *swap_file, sector_t *span)
3943{
3944 struct address_space *mapping = swap_file->f_mapping;
3945 struct inode *inode = mapping->host;
3946 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3947 block_t cur_lblock;
3948 block_t last_lblock;
3949 block_t pblock;
3950 block_t lowest_pblock = -1;
3951 block_t highest_pblock = 0;
3952 int nr_extents = 0;
3953 unsigned int nr_pblocks;
3954 unsigned int blks_per_sec = BLKS_PER_SEC(sbi);
3955 unsigned int not_aligned = 0;
3956 int ret = 0;
3957
3958 /*
3959 * Map all the blocks into the extent list. This code doesn't try
3960 * to be very smart.
3961 */
3962 cur_lblock = 0;
3963 last_lblock = F2FS_BYTES_TO_BLK(i_size_read(inode));
3964
3965 while (cur_lblock < last_lblock && cur_lblock < sis->max) {
3966 struct f2fs_map_blocks map;
3967retry:
3968 cond_resched();
3969
3970 memset(&map, 0, sizeof(map));
3971 map.m_lblk = cur_lblock;
3972 map.m_len = last_lblock - cur_lblock;
3973 map.m_next_pgofs = NULL;
3974 map.m_next_extent = NULL;
3975 map.m_seg_type = NO_CHECK_TYPE;
3976 map.m_may_create = false;
3977
3978 ret = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_FIEMAP);
3979 if (ret)
3980 goto out;
3981
3982 /* hole */
3983 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
3984 f2fs_err(sbi, "Swapfile has holes");
3985 ret = -EINVAL;
3986 goto out;
3987 }
3988
3989 pblock = map.m_pblk;
3990 nr_pblocks = map.m_len;
3991
3992 if ((pblock - SM_I(sbi)->main_blkaddr) % blks_per_sec ||
3993 nr_pblocks % blks_per_sec ||
3994 !f2fs_valid_pinned_area(sbi, pblock)) {
3995 bool last_extent = false;
3996
3997 not_aligned++;
3998
3999 nr_pblocks = roundup(nr_pblocks, blks_per_sec);
4000 if (cur_lblock + nr_pblocks > sis->max)
4001 nr_pblocks -= blks_per_sec;
4002
4003 /* this extent is last one */
4004 if (!nr_pblocks) {
4005 nr_pblocks = last_lblock - cur_lblock;
4006 last_extent = true;
4007 }
4008
4009 ret = f2fs_migrate_blocks(inode, cur_lblock,
4010 nr_pblocks);
4011 if (ret) {
4012 if (ret == -ENOENT)
4013 ret = -EINVAL;
4014 goto out;
4015 }
4016
4017 if (!last_extent)
4018 goto retry;
4019 }
4020
4021 if (cur_lblock + nr_pblocks >= sis->max)
4022 nr_pblocks = sis->max - cur_lblock;
4023
4024 if (cur_lblock) { /* exclude the header page */
4025 if (pblock < lowest_pblock)
4026 lowest_pblock = pblock;
4027 if (pblock + nr_pblocks - 1 > highest_pblock)
4028 highest_pblock = pblock + nr_pblocks - 1;
4029 }
4030
4031 /*
4032 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
4033 */
4034 ret = add_swap_extent(sis, cur_lblock, nr_pblocks, pblock);
4035 if (ret < 0)
4036 goto out;
4037 nr_extents += ret;
4038 cur_lblock += nr_pblocks;
4039 }
4040 ret = nr_extents;
4041 *span = 1 + highest_pblock - lowest_pblock;
4042 if (cur_lblock == 0)
4043 cur_lblock = 1; /* force Empty message */
4044 sis->max = cur_lblock;
4045 sis->pages = cur_lblock - 1;
4046 sis->highest_bit = cur_lblock - 1;
4047out:
4048 if (not_aligned)
4049 f2fs_warn(sbi, "Swapfile (%u) is not align to section: 1) creat(), 2) ioctl(F2FS_IOC_SET_PIN_FILE), 3) fallocate(%lu * N)",
4050 not_aligned, blks_per_sec * F2FS_BLKSIZE);
4051 return ret;
4052}
4053
4054static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
4055 sector_t *span)
4056{
4057 struct inode *inode = file_inode(file);
4058 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4059 int ret;
4060
4061 if (!S_ISREG(inode->i_mode))
4062 return -EINVAL;
4063
4064 if (f2fs_readonly(sbi->sb))
4065 return -EROFS;
4066
4067 if (f2fs_lfs_mode(sbi) && !f2fs_sb_has_blkzoned(sbi)) {
4068 f2fs_err(sbi, "Swapfile not supported in LFS mode");
4069 return -EINVAL;
4070 }
4071
4072 ret = f2fs_convert_inline_inode(inode);
4073 if (ret)
4074 return ret;
4075
4076 if (!f2fs_disable_compressed_file(inode))
4077 return -EINVAL;
4078
4079 ret = filemap_fdatawrite(inode->i_mapping);
4080 if (ret < 0)
4081 return ret;
4082
4083 f2fs_precache_extents(inode);
4084
4085 ret = check_swap_activate(sis, file, span);
4086 if (ret < 0)
4087 return ret;
4088
4089 stat_inc_swapfile_inode(inode);
4090 set_inode_flag(inode, FI_PIN_FILE);
4091 f2fs_update_time(sbi, REQ_TIME);
4092 return ret;
4093}
4094
4095static void f2fs_swap_deactivate(struct file *file)
4096{
4097 struct inode *inode = file_inode(file);
4098
4099 stat_dec_swapfile_inode(inode);
4100 clear_inode_flag(inode, FI_PIN_FILE);
4101}
4102#else
4103static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
4104 sector_t *span)
4105{
4106 return -EOPNOTSUPP;
4107}
4108
4109static void f2fs_swap_deactivate(struct file *file)
4110{
4111}
4112#endif
4113
4114const struct address_space_operations f2fs_dblock_aops = {
4115 .read_folio = f2fs_read_data_folio,
4116 .readahead = f2fs_readahead,
4117 .writepage = f2fs_write_data_page,
4118 .writepages = f2fs_write_data_pages,
4119 .write_begin = f2fs_write_begin,
4120 .write_end = f2fs_write_end,
4121 .dirty_folio = f2fs_dirty_data_folio,
4122 .migrate_folio = filemap_migrate_folio,
4123 .invalidate_folio = f2fs_invalidate_folio,
4124 .release_folio = f2fs_release_folio,
4125 .bmap = f2fs_bmap,
4126 .swap_activate = f2fs_swap_activate,
4127 .swap_deactivate = f2fs_swap_deactivate,
4128};
4129
4130void f2fs_clear_page_cache_dirty_tag(struct folio *folio)
4131{
4132 struct address_space *mapping = folio->mapping;
4133 unsigned long flags;
4134
4135 xa_lock_irqsave(&mapping->i_pages, flags);
4136 __xa_clear_mark(&mapping->i_pages, folio->index,
4137 PAGECACHE_TAG_DIRTY);
4138 xa_unlock_irqrestore(&mapping->i_pages, flags);
4139}
4140
4141int __init f2fs_init_post_read_processing(void)
4142{
4143 bio_post_read_ctx_cache =
4144 kmem_cache_create("f2fs_bio_post_read_ctx",
4145 sizeof(struct bio_post_read_ctx), 0, 0, NULL);
4146 if (!bio_post_read_ctx_cache)
4147 goto fail;
4148 bio_post_read_ctx_pool =
4149 mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
4150 bio_post_read_ctx_cache);
4151 if (!bio_post_read_ctx_pool)
4152 goto fail_free_cache;
4153 return 0;
4154
4155fail_free_cache:
4156 kmem_cache_destroy(bio_post_read_ctx_cache);
4157fail:
4158 return -ENOMEM;
4159}
4160
4161void f2fs_destroy_post_read_processing(void)
4162{
4163 mempool_destroy(bio_post_read_ctx_pool);
4164 kmem_cache_destroy(bio_post_read_ctx_cache);
4165}
4166
4167int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi)
4168{
4169 if (!f2fs_sb_has_encrypt(sbi) &&
4170 !f2fs_sb_has_verity(sbi) &&
4171 !f2fs_sb_has_compression(sbi))
4172 return 0;
4173
4174 sbi->post_read_wq = alloc_workqueue("f2fs_post_read_wq",
4175 WQ_UNBOUND | WQ_HIGHPRI,
4176 num_online_cpus());
4177 return sbi->post_read_wq ? 0 : -ENOMEM;
4178}
4179
4180void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi)
4181{
4182 if (sbi->post_read_wq)
4183 destroy_workqueue(sbi->post_read_wq);
4184}
4185
4186int __init f2fs_init_bio_entry_cache(void)
4187{
4188 bio_entry_slab = f2fs_kmem_cache_create("f2fs_bio_entry_slab",
4189 sizeof(struct bio_entry));
4190 return bio_entry_slab ? 0 : -ENOMEM;
4191}
4192
4193void f2fs_destroy_bio_entry_cache(void)
4194{
4195 kmem_cache_destroy(bio_entry_slab);
4196}
4197
4198static int f2fs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
4199 unsigned int flags, struct iomap *iomap,
4200 struct iomap *srcmap)
4201{
4202 struct f2fs_map_blocks map = {};
4203 pgoff_t next_pgofs = 0;
4204 int err;
4205
4206 map.m_lblk = F2FS_BYTES_TO_BLK(offset);
4207 map.m_len = F2FS_BYTES_TO_BLK(offset + length - 1) - map.m_lblk + 1;
4208 map.m_next_pgofs = &next_pgofs;
4209 map.m_seg_type = f2fs_rw_hint_to_seg_type(F2FS_I_SB(inode),
4210 inode->i_write_hint);
4211 if (flags & IOMAP_WRITE)
4212 map.m_may_create = true;
4213
4214 err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_DIO);
4215 if (err)
4216 return err;
4217
4218 iomap->offset = F2FS_BLK_TO_BYTES(map.m_lblk);
4219
4220 /*
4221 * When inline encryption is enabled, sometimes I/O to an encrypted file
4222 * has to be broken up to guarantee DUN contiguity. Handle this by
4223 * limiting the length of the mapping returned.
4224 */
4225 map.m_len = fscrypt_limit_io_blocks(inode, map.m_lblk, map.m_len);
4226
4227 /*
4228 * We should never see delalloc or compressed extents here based on
4229 * prior flushing and checks.
4230 */
4231 if (WARN_ON_ONCE(map.m_pblk == COMPRESS_ADDR))
4232 return -EINVAL;
4233
4234 if (map.m_flags & F2FS_MAP_MAPPED) {
4235 if (WARN_ON_ONCE(map.m_pblk == NEW_ADDR))
4236 return -EINVAL;
4237
4238 iomap->length = F2FS_BLK_TO_BYTES(map.m_len);
4239 iomap->type = IOMAP_MAPPED;
4240 iomap->flags |= IOMAP_F_MERGED;
4241 iomap->bdev = map.m_bdev;
4242 iomap->addr = F2FS_BLK_TO_BYTES(map.m_pblk);
4243 } else {
4244 if (flags & IOMAP_WRITE)
4245 return -ENOTBLK;
4246
4247 if (map.m_pblk == NULL_ADDR) {
4248 iomap->length = F2FS_BLK_TO_BYTES(next_pgofs) -
4249 iomap->offset;
4250 iomap->type = IOMAP_HOLE;
4251 } else if (map.m_pblk == NEW_ADDR) {
4252 iomap->length = F2FS_BLK_TO_BYTES(map.m_len);
4253 iomap->type = IOMAP_UNWRITTEN;
4254 } else {
4255 f2fs_bug_on(F2FS_I_SB(inode), 1);
4256 }
4257 iomap->addr = IOMAP_NULL_ADDR;
4258 }
4259
4260 if (map.m_flags & F2FS_MAP_NEW)
4261 iomap->flags |= IOMAP_F_NEW;
4262 if ((inode->i_state & I_DIRTY_DATASYNC) ||
4263 offset + length > i_size_read(inode))
4264 iomap->flags |= IOMAP_F_DIRTY;
4265
4266 return 0;
4267}
4268
4269const struct iomap_ops f2fs_iomap_ops = {
4270 .iomap_begin = f2fs_iomap_begin,
4271};