Loading...
Note: File does not exist in v3.5.6.
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * fs/f2fs/data.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
7 */
8#include <linux/fs.h>
9#include <linux/f2fs_fs.h>
10#include <linux/buffer_head.h>
11#include <linux/mpage.h>
12#include <linux/writeback.h>
13#include <linux/backing-dev.h>
14#include <linux/pagevec.h>
15#include <linux/blkdev.h>
16#include <linux/bio.h>
17#include <linux/blk-crypto.h>
18#include <linux/swap.h>
19#include <linux/prefetch.h>
20#include <linux/uio.h>
21#include <linux/cleancache.h>
22#include <linux/sched/signal.h>
23#include <linux/fiemap.h>
24
25#include "f2fs.h"
26#include "node.h"
27#include "segment.h"
28#include <trace/events/f2fs.h>
29
30#define NUM_PREALLOC_POST_READ_CTXS 128
31
32static struct kmem_cache *bio_post_read_ctx_cache;
33static struct kmem_cache *bio_entry_slab;
34static mempool_t *bio_post_read_ctx_pool;
35static struct bio_set f2fs_bioset;
36
37#define F2FS_BIO_POOL_SIZE NR_CURSEG_TYPE
38
39int __init f2fs_init_bioset(void)
40{
41 if (bioset_init(&f2fs_bioset, F2FS_BIO_POOL_SIZE,
42 0, BIOSET_NEED_BVECS))
43 return -ENOMEM;
44 return 0;
45}
46
47void f2fs_destroy_bioset(void)
48{
49 bioset_exit(&f2fs_bioset);
50}
51
52static bool __is_cp_guaranteed(struct page *page)
53{
54 struct address_space *mapping = page->mapping;
55 struct inode *inode;
56 struct f2fs_sb_info *sbi;
57
58 if (!mapping)
59 return false;
60
61 inode = mapping->host;
62 sbi = F2FS_I_SB(inode);
63
64 if (inode->i_ino == F2FS_META_INO(sbi) ||
65 inode->i_ino == F2FS_NODE_INO(sbi) ||
66 S_ISDIR(inode->i_mode))
67 return true;
68
69 if (f2fs_is_compressed_page(page))
70 return false;
71 if ((S_ISREG(inode->i_mode) &&
72 (f2fs_is_atomic_file(inode) || IS_NOQUOTA(inode))) ||
73 page_private_gcing(page))
74 return true;
75 return false;
76}
77
78static enum count_type __read_io_type(struct page *page)
79{
80 struct address_space *mapping = page_file_mapping(page);
81
82 if (mapping) {
83 struct inode *inode = mapping->host;
84 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
85
86 if (inode->i_ino == F2FS_META_INO(sbi))
87 return F2FS_RD_META;
88
89 if (inode->i_ino == F2FS_NODE_INO(sbi))
90 return F2FS_RD_NODE;
91 }
92 return F2FS_RD_DATA;
93}
94
95/* postprocessing steps for read bios */
96enum bio_post_read_step {
97#ifdef CONFIG_FS_ENCRYPTION
98 STEP_DECRYPT = 1 << 0,
99#else
100 STEP_DECRYPT = 0, /* compile out the decryption-related code */
101#endif
102#ifdef CONFIG_F2FS_FS_COMPRESSION
103 STEP_DECOMPRESS = 1 << 1,
104#else
105 STEP_DECOMPRESS = 0, /* compile out the decompression-related code */
106#endif
107#ifdef CONFIG_FS_VERITY
108 STEP_VERITY = 1 << 2,
109#else
110 STEP_VERITY = 0, /* compile out the verity-related code */
111#endif
112};
113
114struct bio_post_read_ctx {
115 struct bio *bio;
116 struct f2fs_sb_info *sbi;
117 struct work_struct work;
118 unsigned int enabled_steps;
119 block_t fs_blkaddr;
120};
121
122static void f2fs_finish_read_bio(struct bio *bio)
123{
124 struct bio_vec *bv;
125 struct bvec_iter_all iter_all;
126
127 /*
128 * Update and unlock the bio's pagecache pages, and put the
129 * decompression context for any compressed pages.
130 */
131 bio_for_each_segment_all(bv, bio, iter_all) {
132 struct page *page = bv->bv_page;
133
134 if (f2fs_is_compressed_page(page)) {
135 if (bio->bi_status)
136 f2fs_end_read_compressed_page(page, true, 0);
137 f2fs_put_page_dic(page);
138 continue;
139 }
140
141 /* PG_error was set if decryption or verity failed. */
142 if (bio->bi_status || PageError(page)) {
143 ClearPageUptodate(page);
144 /* will re-read again later */
145 ClearPageError(page);
146 } else {
147 SetPageUptodate(page);
148 }
149 dec_page_count(F2FS_P_SB(page), __read_io_type(page));
150 unlock_page(page);
151 }
152
153 if (bio->bi_private)
154 mempool_free(bio->bi_private, bio_post_read_ctx_pool);
155 bio_put(bio);
156}
157
158static void f2fs_verify_bio(struct work_struct *work)
159{
160 struct bio_post_read_ctx *ctx =
161 container_of(work, struct bio_post_read_ctx, work);
162 struct bio *bio = ctx->bio;
163 bool may_have_compressed_pages = (ctx->enabled_steps & STEP_DECOMPRESS);
164
165 /*
166 * fsverity_verify_bio() may call readpages() again, and while verity
167 * will be disabled for this, decryption and/or decompression may still
168 * be needed, resulting in another bio_post_read_ctx being allocated.
169 * So to prevent deadlocks we need to release the current ctx to the
170 * mempool first. This assumes that verity is the last post-read step.
171 */
172 mempool_free(ctx, bio_post_read_ctx_pool);
173 bio->bi_private = NULL;
174
175 /*
176 * Verify the bio's pages with fs-verity. Exclude compressed pages,
177 * as those were handled separately by f2fs_end_read_compressed_page().
178 */
179 if (may_have_compressed_pages) {
180 struct bio_vec *bv;
181 struct bvec_iter_all iter_all;
182
183 bio_for_each_segment_all(bv, bio, iter_all) {
184 struct page *page = bv->bv_page;
185
186 if (!f2fs_is_compressed_page(page) &&
187 !PageError(page) && !fsverity_verify_page(page))
188 SetPageError(page);
189 }
190 } else {
191 fsverity_verify_bio(bio);
192 }
193
194 f2fs_finish_read_bio(bio);
195}
196
197/*
198 * If the bio's data needs to be verified with fs-verity, then enqueue the
199 * verity work for the bio. Otherwise finish the bio now.
200 *
201 * Note that to avoid deadlocks, the verity work can't be done on the
202 * decryption/decompression workqueue. This is because verifying the data pages
203 * can involve reading verity metadata pages from the file, and these verity
204 * metadata pages may be encrypted and/or compressed.
205 */
206static void f2fs_verify_and_finish_bio(struct bio *bio)
207{
208 struct bio_post_read_ctx *ctx = bio->bi_private;
209
210 if (ctx && (ctx->enabled_steps & STEP_VERITY)) {
211 INIT_WORK(&ctx->work, f2fs_verify_bio);
212 fsverity_enqueue_verify_work(&ctx->work);
213 } else {
214 f2fs_finish_read_bio(bio);
215 }
216}
217
218/*
219 * Handle STEP_DECOMPRESS by decompressing any compressed clusters whose last
220 * remaining page was read by @ctx->bio.
221 *
222 * Note that a bio may span clusters (even a mix of compressed and uncompressed
223 * clusters) or be for just part of a cluster. STEP_DECOMPRESS just indicates
224 * that the bio includes at least one compressed page. The actual decompression
225 * is done on a per-cluster basis, not a per-bio basis.
226 */
227static void f2fs_handle_step_decompress(struct bio_post_read_ctx *ctx)
228{
229 struct bio_vec *bv;
230 struct bvec_iter_all iter_all;
231 bool all_compressed = true;
232 block_t blkaddr = ctx->fs_blkaddr;
233
234 bio_for_each_segment_all(bv, ctx->bio, iter_all) {
235 struct page *page = bv->bv_page;
236
237 /* PG_error was set if decryption failed. */
238 if (f2fs_is_compressed_page(page))
239 f2fs_end_read_compressed_page(page, PageError(page),
240 blkaddr);
241 else
242 all_compressed = false;
243
244 blkaddr++;
245 }
246
247 /*
248 * Optimization: if all the bio's pages are compressed, then scheduling
249 * the per-bio verity work is unnecessary, as verity will be fully
250 * handled at the compression cluster level.
251 */
252 if (all_compressed)
253 ctx->enabled_steps &= ~STEP_VERITY;
254}
255
256static void f2fs_post_read_work(struct work_struct *work)
257{
258 struct bio_post_read_ctx *ctx =
259 container_of(work, struct bio_post_read_ctx, work);
260
261 if (ctx->enabled_steps & STEP_DECRYPT)
262 fscrypt_decrypt_bio(ctx->bio);
263
264 if (ctx->enabled_steps & STEP_DECOMPRESS)
265 f2fs_handle_step_decompress(ctx);
266
267 f2fs_verify_and_finish_bio(ctx->bio);
268}
269
270static void f2fs_read_end_io(struct bio *bio)
271{
272 struct f2fs_sb_info *sbi = F2FS_P_SB(bio_first_page_all(bio));
273 struct bio_post_read_ctx *ctx = bio->bi_private;
274
275 if (time_to_inject(sbi, FAULT_READ_IO)) {
276 f2fs_show_injection_info(sbi, FAULT_READ_IO);
277 bio->bi_status = BLK_STS_IOERR;
278 }
279
280 if (bio->bi_status) {
281 f2fs_finish_read_bio(bio);
282 return;
283 }
284
285 if (ctx && (ctx->enabled_steps & (STEP_DECRYPT | STEP_DECOMPRESS))) {
286 INIT_WORK(&ctx->work, f2fs_post_read_work);
287 queue_work(ctx->sbi->post_read_wq, &ctx->work);
288 } else {
289 f2fs_verify_and_finish_bio(bio);
290 }
291}
292
293static void f2fs_write_end_io(struct bio *bio)
294{
295 struct f2fs_sb_info *sbi = bio->bi_private;
296 struct bio_vec *bvec;
297 struct bvec_iter_all iter_all;
298
299 if (time_to_inject(sbi, FAULT_WRITE_IO)) {
300 f2fs_show_injection_info(sbi, FAULT_WRITE_IO);
301 bio->bi_status = BLK_STS_IOERR;
302 }
303
304 bio_for_each_segment_all(bvec, bio, iter_all) {
305 struct page *page = bvec->bv_page;
306 enum count_type type = WB_DATA_TYPE(page);
307
308 if (page_private_dummy(page)) {
309 clear_page_private_dummy(page);
310 unlock_page(page);
311 mempool_free(page, sbi->write_io_dummy);
312
313 if (unlikely(bio->bi_status))
314 f2fs_stop_checkpoint(sbi, true);
315 continue;
316 }
317
318 fscrypt_finalize_bounce_page(&page);
319
320#ifdef CONFIG_F2FS_FS_COMPRESSION
321 if (f2fs_is_compressed_page(page)) {
322 f2fs_compress_write_end_io(bio, page);
323 continue;
324 }
325#endif
326
327 if (unlikely(bio->bi_status)) {
328 mapping_set_error(page->mapping, -EIO);
329 if (type == F2FS_WB_CP_DATA)
330 f2fs_stop_checkpoint(sbi, true);
331 }
332
333 f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) &&
334 page->index != nid_of_node(page));
335
336 dec_page_count(sbi, type);
337 if (f2fs_in_warm_node_list(sbi, page))
338 f2fs_del_fsync_node_entry(sbi, page);
339 clear_page_private_gcing(page);
340 end_page_writeback(page);
341 }
342 if (!get_pages(sbi, F2FS_WB_CP_DATA) &&
343 wq_has_sleeper(&sbi->cp_wait))
344 wake_up(&sbi->cp_wait);
345
346 bio_put(bio);
347}
348
349struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
350 block_t blk_addr, struct bio *bio)
351{
352 struct block_device *bdev = sbi->sb->s_bdev;
353 int i;
354
355 if (f2fs_is_multi_device(sbi)) {
356 for (i = 0; i < sbi->s_ndevs; i++) {
357 if (FDEV(i).start_blk <= blk_addr &&
358 FDEV(i).end_blk >= blk_addr) {
359 blk_addr -= FDEV(i).start_blk;
360 bdev = FDEV(i).bdev;
361 break;
362 }
363 }
364 }
365 if (bio) {
366 bio_set_dev(bio, bdev);
367 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
368 }
369 return bdev;
370}
371
372int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
373{
374 int i;
375
376 if (!f2fs_is_multi_device(sbi))
377 return 0;
378
379 for (i = 0; i < sbi->s_ndevs; i++)
380 if (FDEV(i).start_blk <= blkaddr && FDEV(i).end_blk >= blkaddr)
381 return i;
382 return 0;
383}
384
385static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages)
386{
387 struct f2fs_sb_info *sbi = fio->sbi;
388 struct bio *bio;
389
390 bio = bio_alloc_bioset(GFP_NOIO, npages, &f2fs_bioset);
391
392 f2fs_target_device(sbi, fio->new_blkaddr, bio);
393 if (is_read_io(fio->op)) {
394 bio->bi_end_io = f2fs_read_end_io;
395 bio->bi_private = NULL;
396 } else {
397 bio->bi_end_io = f2fs_write_end_io;
398 bio->bi_private = sbi;
399 bio->bi_write_hint = f2fs_io_type_to_rw_hint(sbi,
400 fio->type, fio->temp);
401 }
402 if (fio->io_wbc)
403 wbc_init_bio(fio->io_wbc, bio);
404
405 return bio;
406}
407
408static void f2fs_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
409 pgoff_t first_idx,
410 const struct f2fs_io_info *fio,
411 gfp_t gfp_mask)
412{
413 /*
414 * The f2fs garbage collector sets ->encrypted_page when it wants to
415 * read/write raw data without encryption.
416 */
417 if (!fio || !fio->encrypted_page)
418 fscrypt_set_bio_crypt_ctx(bio, inode, first_idx, gfp_mask);
419}
420
421static bool f2fs_crypt_mergeable_bio(struct bio *bio, const struct inode *inode,
422 pgoff_t next_idx,
423 const struct f2fs_io_info *fio)
424{
425 /*
426 * The f2fs garbage collector sets ->encrypted_page when it wants to
427 * read/write raw data without encryption.
428 */
429 if (fio && fio->encrypted_page)
430 return !bio_has_crypt_ctx(bio);
431
432 return fscrypt_mergeable_bio(bio, inode, next_idx);
433}
434
435static inline void __submit_bio(struct f2fs_sb_info *sbi,
436 struct bio *bio, enum page_type type)
437{
438 if (!is_read_io(bio_op(bio))) {
439 unsigned int start;
440
441 if (type != DATA && type != NODE)
442 goto submit_io;
443
444 if (f2fs_lfs_mode(sbi) && current->plug)
445 blk_finish_plug(current->plug);
446
447 if (!F2FS_IO_ALIGNED(sbi))
448 goto submit_io;
449
450 start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS;
451 start %= F2FS_IO_SIZE(sbi);
452
453 if (start == 0)
454 goto submit_io;
455
456 /* fill dummy pages */
457 for (; start < F2FS_IO_SIZE(sbi); start++) {
458 struct page *page =
459 mempool_alloc(sbi->write_io_dummy,
460 GFP_NOIO | __GFP_NOFAIL);
461 f2fs_bug_on(sbi, !page);
462
463 lock_page(page);
464
465 zero_user_segment(page, 0, PAGE_SIZE);
466 set_page_private_dummy(page);
467
468 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
469 f2fs_bug_on(sbi, 1);
470 }
471 /*
472 * In the NODE case, we lose next block address chain. So, we
473 * need to do checkpoint in f2fs_sync_file.
474 */
475 if (type == NODE)
476 set_sbi_flag(sbi, SBI_NEED_CP);
477 }
478submit_io:
479 if (is_read_io(bio_op(bio)))
480 trace_f2fs_submit_read_bio(sbi->sb, type, bio);
481 else
482 trace_f2fs_submit_write_bio(sbi->sb, type, bio);
483 submit_bio(bio);
484}
485
486void f2fs_submit_bio(struct f2fs_sb_info *sbi,
487 struct bio *bio, enum page_type type)
488{
489 __submit_bio(sbi, bio, type);
490}
491
492static void __attach_io_flag(struct f2fs_io_info *fio)
493{
494 struct f2fs_sb_info *sbi = fio->sbi;
495 unsigned int temp_mask = (1 << NR_TEMP_TYPE) - 1;
496 unsigned int io_flag, fua_flag, meta_flag;
497
498 if (fio->type == DATA)
499 io_flag = sbi->data_io_flag;
500 else if (fio->type == NODE)
501 io_flag = sbi->node_io_flag;
502 else
503 return;
504
505 fua_flag = io_flag & temp_mask;
506 meta_flag = (io_flag >> NR_TEMP_TYPE) & temp_mask;
507
508 /*
509 * data/node io flag bits per temp:
510 * REQ_META | REQ_FUA |
511 * 5 | 4 | 3 | 2 | 1 | 0 |
512 * Cold | Warm | Hot | Cold | Warm | Hot |
513 */
514 if ((1 << fio->temp) & meta_flag)
515 fio->op_flags |= REQ_META;
516 if ((1 << fio->temp) & fua_flag)
517 fio->op_flags |= REQ_FUA;
518}
519
520static void __submit_merged_bio(struct f2fs_bio_info *io)
521{
522 struct f2fs_io_info *fio = &io->fio;
523
524 if (!io->bio)
525 return;
526
527 __attach_io_flag(fio);
528 bio_set_op_attrs(io->bio, fio->op, fio->op_flags);
529
530 if (is_read_io(fio->op))
531 trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio);
532 else
533 trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio);
534
535 __submit_bio(io->sbi, io->bio, fio->type);
536 io->bio = NULL;
537}
538
539static bool __has_merged_page(struct bio *bio, struct inode *inode,
540 struct page *page, nid_t ino)
541{
542 struct bio_vec *bvec;
543 struct bvec_iter_all iter_all;
544
545 if (!bio)
546 return false;
547
548 if (!inode && !page && !ino)
549 return true;
550
551 bio_for_each_segment_all(bvec, bio, iter_all) {
552 struct page *target = bvec->bv_page;
553
554 if (fscrypt_is_bounce_page(target)) {
555 target = fscrypt_pagecache_page(target);
556 if (IS_ERR(target))
557 continue;
558 }
559 if (f2fs_is_compressed_page(target)) {
560 target = f2fs_compress_control_page(target);
561 if (IS_ERR(target))
562 continue;
563 }
564
565 if (inode && inode == target->mapping->host)
566 return true;
567 if (page && page == target)
568 return true;
569 if (ino && ino == ino_of_node(target))
570 return true;
571 }
572
573 return false;
574}
575
576static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi,
577 enum page_type type, enum temp_type temp)
578{
579 enum page_type btype = PAGE_TYPE_OF_BIO(type);
580 struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
581
582 down_write(&io->io_rwsem);
583
584 /* change META to META_FLUSH in the checkpoint procedure */
585 if (type >= META_FLUSH) {
586 io->fio.type = META_FLUSH;
587 io->fio.op = REQ_OP_WRITE;
588 io->fio.op_flags = REQ_META | REQ_PRIO | REQ_SYNC;
589 if (!test_opt(sbi, NOBARRIER))
590 io->fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
591 }
592 __submit_merged_bio(io);
593 up_write(&io->io_rwsem);
594}
595
596static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
597 struct inode *inode, struct page *page,
598 nid_t ino, enum page_type type, bool force)
599{
600 enum temp_type temp;
601 bool ret = true;
602
603 for (temp = HOT; temp < NR_TEMP_TYPE; temp++) {
604 if (!force) {
605 enum page_type btype = PAGE_TYPE_OF_BIO(type);
606 struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
607
608 down_read(&io->io_rwsem);
609 ret = __has_merged_page(io->bio, inode, page, ino);
610 up_read(&io->io_rwsem);
611 }
612 if (ret)
613 __f2fs_submit_merged_write(sbi, type, temp);
614
615 /* TODO: use HOT temp only for meta pages now. */
616 if (type >= META)
617 break;
618 }
619}
620
621void f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type type)
622{
623 __submit_merged_write_cond(sbi, NULL, NULL, 0, type, true);
624}
625
626void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
627 struct inode *inode, struct page *page,
628 nid_t ino, enum page_type type)
629{
630 __submit_merged_write_cond(sbi, inode, page, ino, type, false);
631}
632
633void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi)
634{
635 f2fs_submit_merged_write(sbi, DATA);
636 f2fs_submit_merged_write(sbi, NODE);
637 f2fs_submit_merged_write(sbi, META);
638}
639
640/*
641 * Fill the locked page with data located in the block address.
642 * A caller needs to unlock the page on failure.
643 */
644int f2fs_submit_page_bio(struct f2fs_io_info *fio)
645{
646 struct bio *bio;
647 struct page *page = fio->encrypted_page ?
648 fio->encrypted_page : fio->page;
649
650 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
651 fio->is_por ? META_POR : (__is_meta_io(fio) ?
652 META_GENERIC : DATA_GENERIC_ENHANCE)))
653 return -EFSCORRUPTED;
654
655 trace_f2fs_submit_page_bio(page, fio);
656
657 /* Allocate a new bio */
658 bio = __bio_alloc(fio, 1);
659
660 f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
661 fio->page->index, fio, GFP_NOIO);
662
663 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
664 bio_put(bio);
665 return -EFAULT;
666 }
667
668 if (fio->io_wbc && !is_read_io(fio->op))
669 wbc_account_cgroup_owner(fio->io_wbc, page, PAGE_SIZE);
670
671 __attach_io_flag(fio);
672 bio_set_op_attrs(bio, fio->op, fio->op_flags);
673
674 inc_page_count(fio->sbi, is_read_io(fio->op) ?
675 __read_io_type(page): WB_DATA_TYPE(fio->page));
676
677 __submit_bio(fio->sbi, bio, fio->type);
678 return 0;
679}
680
681static bool page_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
682 block_t last_blkaddr, block_t cur_blkaddr)
683{
684 if (unlikely(sbi->max_io_bytes &&
685 bio->bi_iter.bi_size >= sbi->max_io_bytes))
686 return false;
687 if (last_blkaddr + 1 != cur_blkaddr)
688 return false;
689 return bio->bi_bdev == f2fs_target_device(sbi, cur_blkaddr, NULL);
690}
691
692static bool io_type_is_mergeable(struct f2fs_bio_info *io,
693 struct f2fs_io_info *fio)
694{
695 if (io->fio.op != fio->op)
696 return false;
697 return io->fio.op_flags == fio->op_flags;
698}
699
700static bool io_is_mergeable(struct f2fs_sb_info *sbi, struct bio *bio,
701 struct f2fs_bio_info *io,
702 struct f2fs_io_info *fio,
703 block_t last_blkaddr,
704 block_t cur_blkaddr)
705{
706 if (F2FS_IO_ALIGNED(sbi) && (fio->type == DATA || fio->type == NODE)) {
707 unsigned int filled_blocks =
708 F2FS_BYTES_TO_BLK(bio->bi_iter.bi_size);
709 unsigned int io_size = F2FS_IO_SIZE(sbi);
710 unsigned int left_vecs = bio->bi_max_vecs - bio->bi_vcnt;
711
712 /* IOs in bio is aligned and left space of vectors is not enough */
713 if (!(filled_blocks % io_size) && left_vecs < io_size)
714 return false;
715 }
716 if (!page_is_mergeable(sbi, bio, last_blkaddr, cur_blkaddr))
717 return false;
718 return io_type_is_mergeable(io, fio);
719}
720
721static void add_bio_entry(struct f2fs_sb_info *sbi, struct bio *bio,
722 struct page *page, enum temp_type temp)
723{
724 struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
725 struct bio_entry *be;
726
727 be = f2fs_kmem_cache_alloc(bio_entry_slab, GFP_NOFS);
728 be->bio = bio;
729 bio_get(bio);
730
731 if (bio_add_page(bio, page, PAGE_SIZE, 0) != PAGE_SIZE)
732 f2fs_bug_on(sbi, 1);
733
734 down_write(&io->bio_list_lock);
735 list_add_tail(&be->list, &io->bio_list);
736 up_write(&io->bio_list_lock);
737}
738
739static void del_bio_entry(struct bio_entry *be)
740{
741 list_del(&be->list);
742 kmem_cache_free(bio_entry_slab, be);
743}
744
745static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio,
746 struct page *page)
747{
748 struct f2fs_sb_info *sbi = fio->sbi;
749 enum temp_type temp;
750 bool found = false;
751 int ret = -EAGAIN;
752
753 for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
754 struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
755 struct list_head *head = &io->bio_list;
756 struct bio_entry *be;
757
758 down_write(&io->bio_list_lock);
759 list_for_each_entry(be, head, list) {
760 if (be->bio != *bio)
761 continue;
762
763 found = true;
764
765 f2fs_bug_on(sbi, !page_is_mergeable(sbi, *bio,
766 *fio->last_block,
767 fio->new_blkaddr));
768 if (f2fs_crypt_mergeable_bio(*bio,
769 fio->page->mapping->host,
770 fio->page->index, fio) &&
771 bio_add_page(*bio, page, PAGE_SIZE, 0) ==
772 PAGE_SIZE) {
773 ret = 0;
774 break;
775 }
776
777 /* page can't be merged into bio; submit the bio */
778 del_bio_entry(be);
779 __submit_bio(sbi, *bio, DATA);
780 break;
781 }
782 up_write(&io->bio_list_lock);
783 }
784
785 if (ret) {
786 bio_put(*bio);
787 *bio = NULL;
788 }
789
790 return ret;
791}
792
793void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi,
794 struct bio **bio, struct page *page)
795{
796 enum temp_type temp;
797 bool found = false;
798 struct bio *target = bio ? *bio : NULL;
799
800 for (temp = HOT; temp < NR_TEMP_TYPE && !found; temp++) {
801 struct f2fs_bio_info *io = sbi->write_io[DATA] + temp;
802 struct list_head *head = &io->bio_list;
803 struct bio_entry *be;
804
805 if (list_empty(head))
806 continue;
807
808 down_read(&io->bio_list_lock);
809 list_for_each_entry(be, head, list) {
810 if (target)
811 found = (target == be->bio);
812 else
813 found = __has_merged_page(be->bio, NULL,
814 page, 0);
815 if (found)
816 break;
817 }
818 up_read(&io->bio_list_lock);
819
820 if (!found)
821 continue;
822
823 found = false;
824
825 down_write(&io->bio_list_lock);
826 list_for_each_entry(be, head, list) {
827 if (target)
828 found = (target == be->bio);
829 else
830 found = __has_merged_page(be->bio, NULL,
831 page, 0);
832 if (found) {
833 target = be->bio;
834 del_bio_entry(be);
835 break;
836 }
837 }
838 up_write(&io->bio_list_lock);
839 }
840
841 if (found)
842 __submit_bio(sbi, target, DATA);
843 if (bio && *bio) {
844 bio_put(*bio);
845 *bio = NULL;
846 }
847}
848
849int f2fs_merge_page_bio(struct f2fs_io_info *fio)
850{
851 struct bio *bio = *fio->bio;
852 struct page *page = fio->encrypted_page ?
853 fio->encrypted_page : fio->page;
854
855 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
856 __is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
857 return -EFSCORRUPTED;
858
859 trace_f2fs_submit_page_bio(page, fio);
860
861 if (bio && !page_is_mergeable(fio->sbi, bio, *fio->last_block,
862 fio->new_blkaddr))
863 f2fs_submit_merged_ipu_write(fio->sbi, &bio, NULL);
864alloc_new:
865 if (!bio) {
866 bio = __bio_alloc(fio, BIO_MAX_VECS);
867 __attach_io_flag(fio);
868 f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
869 fio->page->index, fio, GFP_NOIO);
870 bio_set_op_attrs(bio, fio->op, fio->op_flags);
871
872 add_bio_entry(fio->sbi, bio, page, fio->temp);
873 } else {
874 if (add_ipu_page(fio, &bio, page))
875 goto alloc_new;
876 }
877
878 if (fio->io_wbc)
879 wbc_account_cgroup_owner(fio->io_wbc, page, PAGE_SIZE);
880
881 inc_page_count(fio->sbi, WB_DATA_TYPE(page));
882
883 *fio->last_block = fio->new_blkaddr;
884 *fio->bio = bio;
885
886 return 0;
887}
888
889void f2fs_submit_page_write(struct f2fs_io_info *fio)
890{
891 struct f2fs_sb_info *sbi = fio->sbi;
892 enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
893 struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
894 struct page *bio_page;
895
896 f2fs_bug_on(sbi, is_read_io(fio->op));
897
898 down_write(&io->io_rwsem);
899next:
900 if (fio->in_list) {
901 spin_lock(&io->io_lock);
902 if (list_empty(&io->io_list)) {
903 spin_unlock(&io->io_lock);
904 goto out;
905 }
906 fio = list_first_entry(&io->io_list,
907 struct f2fs_io_info, list);
908 list_del(&fio->list);
909 spin_unlock(&io->io_lock);
910 }
911
912 verify_fio_blkaddr(fio);
913
914 if (fio->encrypted_page)
915 bio_page = fio->encrypted_page;
916 else if (fio->compressed_page)
917 bio_page = fio->compressed_page;
918 else
919 bio_page = fio->page;
920
921 /* set submitted = true as a return value */
922 fio->submitted = true;
923
924 inc_page_count(sbi, WB_DATA_TYPE(bio_page));
925
926 if (io->bio &&
927 (!io_is_mergeable(sbi, io->bio, io, fio, io->last_block_in_bio,
928 fio->new_blkaddr) ||
929 !f2fs_crypt_mergeable_bio(io->bio, fio->page->mapping->host,
930 bio_page->index, fio)))
931 __submit_merged_bio(io);
932alloc_new:
933 if (io->bio == NULL) {
934 if (F2FS_IO_ALIGNED(sbi) &&
935 (fio->type == DATA || fio->type == NODE) &&
936 fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) {
937 dec_page_count(sbi, WB_DATA_TYPE(bio_page));
938 fio->retry = true;
939 goto skip;
940 }
941 io->bio = __bio_alloc(fio, BIO_MAX_VECS);
942 f2fs_set_bio_crypt_ctx(io->bio, fio->page->mapping->host,
943 bio_page->index, fio, GFP_NOIO);
944 io->fio = *fio;
945 }
946
947 if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < PAGE_SIZE) {
948 __submit_merged_bio(io);
949 goto alloc_new;
950 }
951
952 if (fio->io_wbc)
953 wbc_account_cgroup_owner(fio->io_wbc, bio_page, PAGE_SIZE);
954
955 io->last_block_in_bio = fio->new_blkaddr;
956
957 trace_f2fs_submit_page_write(fio->page, fio);
958skip:
959 if (fio->in_list)
960 goto next;
961out:
962 if (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
963 !f2fs_is_checkpoint_ready(sbi))
964 __submit_merged_bio(io);
965 up_write(&io->io_rwsem);
966}
967
968static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
969 unsigned nr_pages, unsigned op_flag,
970 pgoff_t first_idx, bool for_write)
971{
972 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
973 struct bio *bio;
974 struct bio_post_read_ctx *ctx;
975 unsigned int post_read_steps = 0;
976
977 bio = bio_alloc_bioset(for_write ? GFP_NOIO : GFP_KERNEL,
978 bio_max_segs(nr_pages), &f2fs_bioset);
979 if (!bio)
980 return ERR_PTR(-ENOMEM);
981
982 f2fs_set_bio_crypt_ctx(bio, inode, first_idx, NULL, GFP_NOFS);
983
984 f2fs_target_device(sbi, blkaddr, bio);
985 bio->bi_end_io = f2fs_read_end_io;
986 bio_set_op_attrs(bio, REQ_OP_READ, op_flag);
987
988 if (fscrypt_inode_uses_fs_layer_crypto(inode))
989 post_read_steps |= STEP_DECRYPT;
990
991 if (f2fs_need_verity(inode, first_idx))
992 post_read_steps |= STEP_VERITY;
993
994 /*
995 * STEP_DECOMPRESS is handled specially, since a compressed file might
996 * contain both compressed and uncompressed clusters. We'll allocate a
997 * bio_post_read_ctx if the file is compressed, but the caller is
998 * responsible for enabling STEP_DECOMPRESS if it's actually needed.
999 */
1000
1001 if (post_read_steps || f2fs_compressed_file(inode)) {
1002 /* Due to the mempool, this never fails. */
1003 ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
1004 ctx->bio = bio;
1005 ctx->sbi = sbi;
1006 ctx->enabled_steps = post_read_steps;
1007 ctx->fs_blkaddr = blkaddr;
1008 bio->bi_private = ctx;
1009 }
1010
1011 return bio;
1012}
1013
1014/* This can handle encryption stuffs */
1015static int f2fs_submit_page_read(struct inode *inode, struct page *page,
1016 block_t blkaddr, int op_flags, bool for_write)
1017{
1018 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1019 struct bio *bio;
1020
1021 bio = f2fs_grab_read_bio(inode, blkaddr, 1, op_flags,
1022 page->index, for_write);
1023 if (IS_ERR(bio))
1024 return PTR_ERR(bio);
1025
1026 /* wait for GCed page writeback via META_MAPPING */
1027 f2fs_wait_on_block_writeback(inode, blkaddr);
1028
1029 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
1030 bio_put(bio);
1031 return -EFAULT;
1032 }
1033 ClearPageError(page);
1034 inc_page_count(sbi, F2FS_RD_DATA);
1035 f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
1036 __submit_bio(sbi, bio, DATA);
1037 return 0;
1038}
1039
1040static void __set_data_blkaddr(struct dnode_of_data *dn)
1041{
1042 struct f2fs_node *rn = F2FS_NODE(dn->node_page);
1043 __le32 *addr_array;
1044 int base = 0;
1045
1046 if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
1047 base = get_extra_isize(dn->inode);
1048
1049 /* Get physical address of data block */
1050 addr_array = blkaddr_in_node(rn);
1051 addr_array[base + dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
1052}
1053
1054/*
1055 * Lock ordering for the change of data block address:
1056 * ->data_page
1057 * ->node_page
1058 * update block addresses in the node page
1059 */
1060void f2fs_set_data_blkaddr(struct dnode_of_data *dn)
1061{
1062 f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
1063 __set_data_blkaddr(dn);
1064 if (set_page_dirty(dn->node_page))
1065 dn->node_changed = true;
1066}
1067
1068void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
1069{
1070 dn->data_blkaddr = blkaddr;
1071 f2fs_set_data_blkaddr(dn);
1072 f2fs_update_extent_cache(dn);
1073}
1074
1075/* dn->ofs_in_node will be returned with up-to-date last block pointer */
1076int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
1077{
1078 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1079 int err;
1080
1081 if (!count)
1082 return 0;
1083
1084 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1085 return -EPERM;
1086 if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
1087 return err;
1088
1089 trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
1090 dn->ofs_in_node, count);
1091
1092 f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
1093
1094 for (; count > 0; dn->ofs_in_node++) {
1095 block_t blkaddr = f2fs_data_blkaddr(dn);
1096
1097 if (blkaddr == NULL_ADDR) {
1098 dn->data_blkaddr = NEW_ADDR;
1099 __set_data_blkaddr(dn);
1100 count--;
1101 }
1102 }
1103
1104 if (set_page_dirty(dn->node_page))
1105 dn->node_changed = true;
1106 return 0;
1107}
1108
1109/* Should keep dn->ofs_in_node unchanged */
1110int f2fs_reserve_new_block(struct dnode_of_data *dn)
1111{
1112 unsigned int ofs_in_node = dn->ofs_in_node;
1113 int ret;
1114
1115 ret = f2fs_reserve_new_blocks(dn, 1);
1116 dn->ofs_in_node = ofs_in_node;
1117 return ret;
1118}
1119
1120int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
1121{
1122 bool need_put = dn->inode_page ? false : true;
1123 int err;
1124
1125 err = f2fs_get_dnode_of_data(dn, index, ALLOC_NODE);
1126 if (err)
1127 return err;
1128
1129 if (dn->data_blkaddr == NULL_ADDR)
1130 err = f2fs_reserve_new_block(dn);
1131 if (err || need_put)
1132 f2fs_put_dnode(dn);
1133 return err;
1134}
1135
1136int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
1137{
1138 struct extent_info ei = {0, 0, 0};
1139 struct inode *inode = dn->inode;
1140
1141 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
1142 dn->data_blkaddr = ei.blk + index - ei.fofs;
1143 return 0;
1144 }
1145
1146 return f2fs_reserve_block(dn, index);
1147}
1148
1149struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
1150 int op_flags, bool for_write)
1151{
1152 struct address_space *mapping = inode->i_mapping;
1153 struct dnode_of_data dn;
1154 struct page *page;
1155 struct extent_info ei = {0,0,0};
1156 int err;
1157
1158 page = f2fs_grab_cache_page(mapping, index, for_write);
1159 if (!page)
1160 return ERR_PTR(-ENOMEM);
1161
1162 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
1163 dn.data_blkaddr = ei.blk + index - ei.fofs;
1164 if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), dn.data_blkaddr,
1165 DATA_GENERIC_ENHANCE_READ)) {
1166 err = -EFSCORRUPTED;
1167 goto put_err;
1168 }
1169 goto got_it;
1170 }
1171
1172 set_new_dnode(&dn, inode, NULL, NULL, 0);
1173 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
1174 if (err)
1175 goto put_err;
1176 f2fs_put_dnode(&dn);
1177
1178 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
1179 err = -ENOENT;
1180 goto put_err;
1181 }
1182 if (dn.data_blkaddr != NEW_ADDR &&
1183 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
1184 dn.data_blkaddr,
1185 DATA_GENERIC_ENHANCE)) {
1186 err = -EFSCORRUPTED;
1187 goto put_err;
1188 }
1189got_it:
1190 if (PageUptodate(page)) {
1191 unlock_page(page);
1192 return page;
1193 }
1194
1195 /*
1196 * A new dentry page is allocated but not able to be written, since its
1197 * new inode page couldn't be allocated due to -ENOSPC.
1198 * In such the case, its blkaddr can be remained as NEW_ADDR.
1199 * see, f2fs_add_link -> f2fs_get_new_data_page ->
1200 * f2fs_init_inode_metadata.
1201 */
1202 if (dn.data_blkaddr == NEW_ADDR) {
1203 zero_user_segment(page, 0, PAGE_SIZE);
1204 if (!PageUptodate(page))
1205 SetPageUptodate(page);
1206 unlock_page(page);
1207 return page;
1208 }
1209
1210 err = f2fs_submit_page_read(inode, page, dn.data_blkaddr,
1211 op_flags, for_write);
1212 if (err)
1213 goto put_err;
1214 return page;
1215
1216put_err:
1217 f2fs_put_page(page, 1);
1218 return ERR_PTR(err);
1219}
1220
1221struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index)
1222{
1223 struct address_space *mapping = inode->i_mapping;
1224 struct page *page;
1225
1226 page = find_get_page(mapping, index);
1227 if (page && PageUptodate(page))
1228 return page;
1229 f2fs_put_page(page, 0);
1230
1231 page = f2fs_get_read_data_page(inode, index, 0, false);
1232 if (IS_ERR(page))
1233 return page;
1234
1235 if (PageUptodate(page))
1236 return page;
1237
1238 wait_on_page_locked(page);
1239 if (unlikely(!PageUptodate(page))) {
1240 f2fs_put_page(page, 0);
1241 return ERR_PTR(-EIO);
1242 }
1243 return page;
1244}
1245
1246/*
1247 * If it tries to access a hole, return an error.
1248 * Because, the callers, functions in dir.c and GC, should be able to know
1249 * whether this page exists or not.
1250 */
1251struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
1252 bool for_write)
1253{
1254 struct address_space *mapping = inode->i_mapping;
1255 struct page *page;
1256repeat:
1257 page = f2fs_get_read_data_page(inode, index, 0, for_write);
1258 if (IS_ERR(page))
1259 return page;
1260
1261 /* wait for read completion */
1262 lock_page(page);
1263 if (unlikely(page->mapping != mapping)) {
1264 f2fs_put_page(page, 1);
1265 goto repeat;
1266 }
1267 if (unlikely(!PageUptodate(page))) {
1268 f2fs_put_page(page, 1);
1269 return ERR_PTR(-EIO);
1270 }
1271 return page;
1272}
1273
1274/*
1275 * Caller ensures that this data page is never allocated.
1276 * A new zero-filled data page is allocated in the page cache.
1277 *
1278 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
1279 * f2fs_unlock_op().
1280 * Note that, ipage is set only by make_empty_dir, and if any error occur,
1281 * ipage should be released by this function.
1282 */
1283struct page *f2fs_get_new_data_page(struct inode *inode,
1284 struct page *ipage, pgoff_t index, bool new_i_size)
1285{
1286 struct address_space *mapping = inode->i_mapping;
1287 struct page *page;
1288 struct dnode_of_data dn;
1289 int err;
1290
1291 page = f2fs_grab_cache_page(mapping, index, true);
1292 if (!page) {
1293 /*
1294 * before exiting, we should make sure ipage will be released
1295 * if any error occur.
1296 */
1297 f2fs_put_page(ipage, 1);
1298 return ERR_PTR(-ENOMEM);
1299 }
1300
1301 set_new_dnode(&dn, inode, ipage, NULL, 0);
1302 err = f2fs_reserve_block(&dn, index);
1303 if (err) {
1304 f2fs_put_page(page, 1);
1305 return ERR_PTR(err);
1306 }
1307 if (!ipage)
1308 f2fs_put_dnode(&dn);
1309
1310 if (PageUptodate(page))
1311 goto got_it;
1312
1313 if (dn.data_blkaddr == NEW_ADDR) {
1314 zero_user_segment(page, 0, PAGE_SIZE);
1315 if (!PageUptodate(page))
1316 SetPageUptodate(page);
1317 } else {
1318 f2fs_put_page(page, 1);
1319
1320 /* if ipage exists, blkaddr should be NEW_ADDR */
1321 f2fs_bug_on(F2FS_I_SB(inode), ipage);
1322 page = f2fs_get_lock_data_page(inode, index, true);
1323 if (IS_ERR(page))
1324 return page;
1325 }
1326got_it:
1327 if (new_i_size && i_size_read(inode) <
1328 ((loff_t)(index + 1) << PAGE_SHIFT))
1329 f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
1330 return page;
1331}
1332
1333static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
1334{
1335 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1336 struct f2fs_summary sum;
1337 struct node_info ni;
1338 block_t old_blkaddr;
1339 blkcnt_t count = 1;
1340 int err;
1341
1342 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1343 return -EPERM;
1344
1345 err = f2fs_get_node_info(sbi, dn->nid, &ni);
1346 if (err)
1347 return err;
1348
1349 dn->data_blkaddr = f2fs_data_blkaddr(dn);
1350 if (dn->data_blkaddr != NULL_ADDR)
1351 goto alloc;
1352
1353 if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
1354 return err;
1355
1356alloc:
1357 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
1358 old_blkaddr = dn->data_blkaddr;
1359 f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr,
1360 &sum, seg_type, NULL);
1361 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
1362 invalidate_mapping_pages(META_MAPPING(sbi),
1363 old_blkaddr, old_blkaddr);
1364 f2fs_invalidate_compress_page(sbi, old_blkaddr);
1365 }
1366 f2fs_update_data_blkaddr(dn, dn->data_blkaddr);
1367
1368 /*
1369 * i_size will be updated by direct_IO. Otherwise, we'll get stale
1370 * data from unwritten block via dio_read.
1371 */
1372 return 0;
1373}
1374
1375int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
1376{
1377 struct inode *inode = file_inode(iocb->ki_filp);
1378 struct f2fs_map_blocks map;
1379 int flag;
1380 int err = 0;
1381 bool direct_io = iocb->ki_flags & IOCB_DIRECT;
1382
1383 map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
1384 map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from));
1385 if (map.m_len > map.m_lblk)
1386 map.m_len -= map.m_lblk;
1387 else
1388 map.m_len = 0;
1389
1390 map.m_next_pgofs = NULL;
1391 map.m_next_extent = NULL;
1392 map.m_seg_type = NO_CHECK_TYPE;
1393 map.m_may_create = true;
1394
1395 if (direct_io) {
1396 map.m_seg_type = f2fs_rw_hint_to_seg_type(iocb->ki_hint);
1397 flag = f2fs_force_buffered_io(inode, iocb, from) ?
1398 F2FS_GET_BLOCK_PRE_AIO :
1399 F2FS_GET_BLOCK_PRE_DIO;
1400 goto map_blocks;
1401 }
1402 if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA(inode)) {
1403 err = f2fs_convert_inline_inode(inode);
1404 if (err)
1405 return err;
1406 }
1407 if (f2fs_has_inline_data(inode))
1408 return err;
1409
1410 flag = F2FS_GET_BLOCK_PRE_AIO;
1411
1412map_blocks:
1413 err = f2fs_map_blocks(inode, &map, 1, flag);
1414 if (map.m_len > 0 && err == -ENOSPC) {
1415 if (!direct_io)
1416 set_inode_flag(inode, FI_NO_PREALLOC);
1417 err = 0;
1418 }
1419 return err;
1420}
1421
1422void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock)
1423{
1424 if (flag == F2FS_GET_BLOCK_PRE_AIO) {
1425 if (lock)
1426 down_read(&sbi->node_change);
1427 else
1428 up_read(&sbi->node_change);
1429 } else {
1430 if (lock)
1431 f2fs_lock_op(sbi);
1432 else
1433 f2fs_unlock_op(sbi);
1434 }
1435}
1436
1437/*
1438 * f2fs_map_blocks() tries to find or build mapping relationship which
1439 * maps continuous logical blocks to physical blocks, and return such
1440 * info via f2fs_map_blocks structure.
1441 */
1442int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
1443 int create, int flag)
1444{
1445 unsigned int maxblocks = map->m_len;
1446 struct dnode_of_data dn;
1447 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1448 int mode = map->m_may_create ? ALLOC_NODE : LOOKUP_NODE;
1449 pgoff_t pgofs, end_offset, end;
1450 int err = 0, ofs = 1;
1451 unsigned int ofs_in_node, last_ofs_in_node;
1452 blkcnt_t prealloc;
1453 struct extent_info ei = {0,0,0};
1454 block_t blkaddr;
1455 unsigned int start_pgofs;
1456
1457 if (!maxblocks)
1458 return 0;
1459
1460 map->m_len = 0;
1461 map->m_flags = 0;
1462
1463 /* it only supports block size == page size */
1464 pgofs = (pgoff_t)map->m_lblk;
1465 end = pgofs + maxblocks;
1466
1467 if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
1468 if (f2fs_lfs_mode(sbi) && flag == F2FS_GET_BLOCK_DIO &&
1469 map->m_may_create)
1470 goto next_dnode;
1471
1472 map->m_pblk = ei.blk + pgofs - ei.fofs;
1473 map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
1474 map->m_flags = F2FS_MAP_MAPPED;
1475 if (map->m_next_extent)
1476 *map->m_next_extent = pgofs + map->m_len;
1477
1478 /* for hardware encryption, but to avoid potential issue in future */
1479 if (flag == F2FS_GET_BLOCK_DIO)
1480 f2fs_wait_on_block_writeback_range(inode,
1481 map->m_pblk, map->m_len);
1482 goto out;
1483 }
1484
1485next_dnode:
1486 if (map->m_may_create)
1487 f2fs_do_map_lock(sbi, flag, true);
1488
1489 /* When reading holes, we need its node page */
1490 set_new_dnode(&dn, inode, NULL, NULL, 0);
1491 err = f2fs_get_dnode_of_data(&dn, pgofs, mode);
1492 if (err) {
1493 if (flag == F2FS_GET_BLOCK_BMAP)
1494 map->m_pblk = 0;
1495
1496 if (err == -ENOENT) {
1497 /*
1498 * There is one exceptional case that read_node_page()
1499 * may return -ENOENT due to filesystem has been
1500 * shutdown or cp_error, so force to convert error
1501 * number to EIO for such case.
1502 */
1503 if (map->m_may_create &&
1504 (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
1505 f2fs_cp_error(sbi))) {
1506 err = -EIO;
1507 goto unlock_out;
1508 }
1509
1510 err = 0;
1511 if (map->m_next_pgofs)
1512 *map->m_next_pgofs =
1513 f2fs_get_next_page_offset(&dn, pgofs);
1514 if (map->m_next_extent)
1515 *map->m_next_extent =
1516 f2fs_get_next_page_offset(&dn, pgofs);
1517 }
1518 goto unlock_out;
1519 }
1520
1521 start_pgofs = pgofs;
1522 prealloc = 0;
1523 last_ofs_in_node = ofs_in_node = dn.ofs_in_node;
1524 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1525
1526next_block:
1527 blkaddr = f2fs_data_blkaddr(&dn);
1528
1529 if (__is_valid_data_blkaddr(blkaddr) &&
1530 !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) {
1531 err = -EFSCORRUPTED;
1532 goto sync_out;
1533 }
1534
1535 if (__is_valid_data_blkaddr(blkaddr)) {
1536 /* use out-place-update for driect IO under LFS mode */
1537 if (f2fs_lfs_mode(sbi) && flag == F2FS_GET_BLOCK_DIO &&
1538 map->m_may_create) {
1539 err = __allocate_data_block(&dn, map->m_seg_type);
1540 if (err)
1541 goto sync_out;
1542 blkaddr = dn.data_blkaddr;
1543 set_inode_flag(inode, FI_APPEND_WRITE);
1544 }
1545 } else {
1546 if (create) {
1547 if (unlikely(f2fs_cp_error(sbi))) {
1548 err = -EIO;
1549 goto sync_out;
1550 }
1551 if (flag == F2FS_GET_BLOCK_PRE_AIO) {
1552 if (blkaddr == NULL_ADDR) {
1553 prealloc++;
1554 last_ofs_in_node = dn.ofs_in_node;
1555 }
1556 } else {
1557 WARN_ON(flag != F2FS_GET_BLOCK_PRE_DIO &&
1558 flag != F2FS_GET_BLOCK_DIO);
1559 err = __allocate_data_block(&dn,
1560 map->m_seg_type);
1561 if (!err)
1562 set_inode_flag(inode, FI_APPEND_WRITE);
1563 }
1564 if (err)
1565 goto sync_out;
1566 map->m_flags |= F2FS_MAP_NEW;
1567 blkaddr = dn.data_blkaddr;
1568 } else {
1569 if (flag == F2FS_GET_BLOCK_BMAP) {
1570 map->m_pblk = 0;
1571 goto sync_out;
1572 }
1573 if (flag == F2FS_GET_BLOCK_PRECACHE)
1574 goto sync_out;
1575 if (flag == F2FS_GET_BLOCK_FIEMAP &&
1576 blkaddr == NULL_ADDR) {
1577 if (map->m_next_pgofs)
1578 *map->m_next_pgofs = pgofs + 1;
1579 goto sync_out;
1580 }
1581 if (flag != F2FS_GET_BLOCK_FIEMAP) {
1582 /* for defragment case */
1583 if (map->m_next_pgofs)
1584 *map->m_next_pgofs = pgofs + 1;
1585 goto sync_out;
1586 }
1587 }
1588 }
1589
1590 if (flag == F2FS_GET_BLOCK_PRE_AIO)
1591 goto skip;
1592
1593 if (map->m_len == 0) {
1594 /* preallocated unwritten block should be mapped for fiemap. */
1595 if (blkaddr == NEW_ADDR)
1596 map->m_flags |= F2FS_MAP_UNWRITTEN;
1597 map->m_flags |= F2FS_MAP_MAPPED;
1598
1599 map->m_pblk = blkaddr;
1600 map->m_len = 1;
1601 } else if ((map->m_pblk != NEW_ADDR &&
1602 blkaddr == (map->m_pblk + ofs)) ||
1603 (map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
1604 flag == F2FS_GET_BLOCK_PRE_DIO) {
1605 ofs++;
1606 map->m_len++;
1607 } else {
1608 goto sync_out;
1609 }
1610
1611skip:
1612 dn.ofs_in_node++;
1613 pgofs++;
1614
1615 /* preallocate blocks in batch for one dnode page */
1616 if (flag == F2FS_GET_BLOCK_PRE_AIO &&
1617 (pgofs == end || dn.ofs_in_node == end_offset)) {
1618
1619 dn.ofs_in_node = ofs_in_node;
1620 err = f2fs_reserve_new_blocks(&dn, prealloc);
1621 if (err)
1622 goto sync_out;
1623
1624 map->m_len += dn.ofs_in_node - ofs_in_node;
1625 if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) {
1626 err = -ENOSPC;
1627 goto sync_out;
1628 }
1629 dn.ofs_in_node = end_offset;
1630 }
1631
1632 if (pgofs >= end)
1633 goto sync_out;
1634 else if (dn.ofs_in_node < end_offset)
1635 goto next_block;
1636
1637 if (flag == F2FS_GET_BLOCK_PRECACHE) {
1638 if (map->m_flags & F2FS_MAP_MAPPED) {
1639 unsigned int ofs = start_pgofs - map->m_lblk;
1640
1641 f2fs_update_extent_cache_range(&dn,
1642 start_pgofs, map->m_pblk + ofs,
1643 map->m_len - ofs);
1644 }
1645 }
1646
1647 f2fs_put_dnode(&dn);
1648
1649 if (map->m_may_create) {
1650 f2fs_do_map_lock(sbi, flag, false);
1651 f2fs_balance_fs(sbi, dn.node_changed);
1652 }
1653 goto next_dnode;
1654
1655sync_out:
1656
1657 /* for hardware encryption, but to avoid potential issue in future */
1658 if (flag == F2FS_GET_BLOCK_DIO && map->m_flags & F2FS_MAP_MAPPED)
1659 f2fs_wait_on_block_writeback_range(inode,
1660 map->m_pblk, map->m_len);
1661
1662 if (flag == F2FS_GET_BLOCK_PRECACHE) {
1663 if (map->m_flags & F2FS_MAP_MAPPED) {
1664 unsigned int ofs = start_pgofs - map->m_lblk;
1665
1666 f2fs_update_extent_cache_range(&dn,
1667 start_pgofs, map->m_pblk + ofs,
1668 map->m_len - ofs);
1669 }
1670 if (map->m_next_extent)
1671 *map->m_next_extent = pgofs + 1;
1672 }
1673 f2fs_put_dnode(&dn);
1674unlock_out:
1675 if (map->m_may_create) {
1676 f2fs_do_map_lock(sbi, flag, false);
1677 f2fs_balance_fs(sbi, dn.node_changed);
1678 }
1679out:
1680 trace_f2fs_map_blocks(inode, map, err);
1681 return err;
1682}
1683
1684bool f2fs_overwrite_io(struct inode *inode, loff_t pos, size_t len)
1685{
1686 struct f2fs_map_blocks map;
1687 block_t last_lblk;
1688 int err;
1689
1690 if (pos + len > i_size_read(inode))
1691 return false;
1692
1693 map.m_lblk = F2FS_BYTES_TO_BLK(pos);
1694 map.m_next_pgofs = NULL;
1695 map.m_next_extent = NULL;
1696 map.m_seg_type = NO_CHECK_TYPE;
1697 map.m_may_create = false;
1698 last_lblk = F2FS_BLK_ALIGN(pos + len);
1699
1700 while (map.m_lblk < last_lblk) {
1701 map.m_len = last_lblk - map.m_lblk;
1702 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
1703 if (err || map.m_len == 0)
1704 return false;
1705 map.m_lblk += map.m_len;
1706 }
1707 return true;
1708}
1709
1710static inline u64 bytes_to_blks(struct inode *inode, u64 bytes)
1711{
1712 return (bytes >> inode->i_blkbits);
1713}
1714
1715static inline u64 blks_to_bytes(struct inode *inode, u64 blks)
1716{
1717 return (blks << inode->i_blkbits);
1718}
1719
1720static int __get_data_block(struct inode *inode, sector_t iblock,
1721 struct buffer_head *bh, int create, int flag,
1722 pgoff_t *next_pgofs, int seg_type, bool may_write)
1723{
1724 struct f2fs_map_blocks map;
1725 int err;
1726
1727 map.m_lblk = iblock;
1728 map.m_len = bytes_to_blks(inode, bh->b_size);
1729 map.m_next_pgofs = next_pgofs;
1730 map.m_next_extent = NULL;
1731 map.m_seg_type = seg_type;
1732 map.m_may_create = may_write;
1733
1734 err = f2fs_map_blocks(inode, &map, create, flag);
1735 if (!err) {
1736 map_bh(bh, inode->i_sb, map.m_pblk);
1737 bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
1738 bh->b_size = blks_to_bytes(inode, map.m_len);
1739 }
1740 return err;
1741}
1742
1743static int get_data_block_dio_write(struct inode *inode, sector_t iblock,
1744 struct buffer_head *bh_result, int create)
1745{
1746 return __get_data_block(inode, iblock, bh_result, create,
1747 F2FS_GET_BLOCK_DIO, NULL,
1748 f2fs_rw_hint_to_seg_type(inode->i_write_hint),
1749 true);
1750}
1751
1752static int get_data_block_dio(struct inode *inode, sector_t iblock,
1753 struct buffer_head *bh_result, int create)
1754{
1755 return __get_data_block(inode, iblock, bh_result, create,
1756 F2FS_GET_BLOCK_DIO, NULL,
1757 f2fs_rw_hint_to_seg_type(inode->i_write_hint),
1758 false);
1759}
1760
1761static int f2fs_xattr_fiemap(struct inode *inode,
1762 struct fiemap_extent_info *fieinfo)
1763{
1764 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1765 struct page *page;
1766 struct node_info ni;
1767 __u64 phys = 0, len;
1768 __u32 flags;
1769 nid_t xnid = F2FS_I(inode)->i_xattr_nid;
1770 int err = 0;
1771
1772 if (f2fs_has_inline_xattr(inode)) {
1773 int offset;
1774
1775 page = f2fs_grab_cache_page(NODE_MAPPING(sbi),
1776 inode->i_ino, false);
1777 if (!page)
1778 return -ENOMEM;
1779
1780 err = f2fs_get_node_info(sbi, inode->i_ino, &ni);
1781 if (err) {
1782 f2fs_put_page(page, 1);
1783 return err;
1784 }
1785
1786 phys = blks_to_bytes(inode, ni.blk_addr);
1787 offset = offsetof(struct f2fs_inode, i_addr) +
1788 sizeof(__le32) * (DEF_ADDRS_PER_INODE -
1789 get_inline_xattr_addrs(inode));
1790
1791 phys += offset;
1792 len = inline_xattr_size(inode);
1793
1794 f2fs_put_page(page, 1);
1795
1796 flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED;
1797
1798 if (!xnid)
1799 flags |= FIEMAP_EXTENT_LAST;
1800
1801 err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
1802 trace_f2fs_fiemap(inode, 0, phys, len, flags, err);
1803 if (err || err == 1)
1804 return err;
1805 }
1806
1807 if (xnid) {
1808 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), xnid, false);
1809 if (!page)
1810 return -ENOMEM;
1811
1812 err = f2fs_get_node_info(sbi, xnid, &ni);
1813 if (err) {
1814 f2fs_put_page(page, 1);
1815 return err;
1816 }
1817
1818 phys = blks_to_bytes(inode, ni.blk_addr);
1819 len = inode->i_sb->s_blocksize;
1820
1821 f2fs_put_page(page, 1);
1822
1823 flags = FIEMAP_EXTENT_LAST;
1824 }
1825
1826 if (phys) {
1827 err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags);
1828 trace_f2fs_fiemap(inode, 0, phys, len, flags, err);
1829 }
1830
1831 return (err < 0 ? err : 0);
1832}
1833
1834static loff_t max_inode_blocks(struct inode *inode)
1835{
1836 loff_t result = ADDRS_PER_INODE(inode);
1837 loff_t leaf_count = ADDRS_PER_BLOCK(inode);
1838
1839 /* two direct node blocks */
1840 result += (leaf_count * 2);
1841
1842 /* two indirect node blocks */
1843 leaf_count *= NIDS_PER_BLOCK;
1844 result += (leaf_count * 2);
1845
1846 /* one double indirect node block */
1847 leaf_count *= NIDS_PER_BLOCK;
1848 result += leaf_count;
1849
1850 return result;
1851}
1852
1853int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
1854 u64 start, u64 len)
1855{
1856 struct f2fs_map_blocks map;
1857 sector_t start_blk, last_blk;
1858 pgoff_t next_pgofs;
1859 u64 logical = 0, phys = 0, size = 0;
1860 u32 flags = 0;
1861 int ret = 0;
1862 bool compr_cluster = false;
1863 unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
1864 loff_t maxbytes;
1865
1866 if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
1867 ret = f2fs_precache_extents(inode);
1868 if (ret)
1869 return ret;
1870 }
1871
1872 ret = fiemap_prep(inode, fieinfo, start, &len, FIEMAP_FLAG_XATTR);
1873 if (ret)
1874 return ret;
1875
1876 inode_lock(inode);
1877
1878 maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
1879 if (start > maxbytes) {
1880 ret = -EFBIG;
1881 goto out;
1882 }
1883
1884 if (len > maxbytes || (maxbytes - len) < start)
1885 len = maxbytes - start;
1886
1887 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
1888 ret = f2fs_xattr_fiemap(inode, fieinfo);
1889 goto out;
1890 }
1891
1892 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode)) {
1893 ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len);
1894 if (ret != -EAGAIN)
1895 goto out;
1896 }
1897
1898 if (bytes_to_blks(inode, len) == 0)
1899 len = blks_to_bytes(inode, 1);
1900
1901 start_blk = bytes_to_blks(inode, start);
1902 last_blk = bytes_to_blks(inode, start + len - 1);
1903
1904next:
1905 memset(&map, 0, sizeof(map));
1906 map.m_lblk = start_blk;
1907 map.m_len = bytes_to_blks(inode, len);
1908 map.m_next_pgofs = &next_pgofs;
1909 map.m_seg_type = NO_CHECK_TYPE;
1910
1911 if (compr_cluster)
1912 map.m_len = cluster_size - 1;
1913
1914 ret = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_FIEMAP);
1915 if (ret)
1916 goto out;
1917
1918 /* HOLE */
1919 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
1920 start_blk = next_pgofs;
1921
1922 if (blks_to_bytes(inode, start_blk) < blks_to_bytes(inode,
1923 max_inode_blocks(inode)))
1924 goto prep_next;
1925
1926 flags |= FIEMAP_EXTENT_LAST;
1927 }
1928
1929 if (size) {
1930 flags |= FIEMAP_EXTENT_MERGED;
1931 if (IS_ENCRYPTED(inode))
1932 flags |= FIEMAP_EXTENT_DATA_ENCRYPTED;
1933
1934 ret = fiemap_fill_next_extent(fieinfo, logical,
1935 phys, size, flags);
1936 trace_f2fs_fiemap(inode, logical, phys, size, flags, ret);
1937 if (ret)
1938 goto out;
1939 size = 0;
1940 }
1941
1942 if (start_blk > last_blk)
1943 goto out;
1944
1945 if (compr_cluster) {
1946 compr_cluster = false;
1947
1948
1949 logical = blks_to_bytes(inode, start_blk - 1);
1950 phys = blks_to_bytes(inode, map.m_pblk);
1951 size = blks_to_bytes(inode, cluster_size);
1952
1953 flags |= FIEMAP_EXTENT_ENCODED;
1954
1955 start_blk += cluster_size - 1;
1956
1957 if (start_blk > last_blk)
1958 goto out;
1959
1960 goto prep_next;
1961 }
1962
1963 if (map.m_pblk == COMPRESS_ADDR) {
1964 compr_cluster = true;
1965 start_blk++;
1966 goto prep_next;
1967 }
1968
1969 logical = blks_to_bytes(inode, start_blk);
1970 phys = blks_to_bytes(inode, map.m_pblk);
1971 size = blks_to_bytes(inode, map.m_len);
1972 flags = 0;
1973 if (map.m_flags & F2FS_MAP_UNWRITTEN)
1974 flags = FIEMAP_EXTENT_UNWRITTEN;
1975
1976 start_blk += bytes_to_blks(inode, size);
1977
1978prep_next:
1979 cond_resched();
1980 if (fatal_signal_pending(current))
1981 ret = -EINTR;
1982 else
1983 goto next;
1984out:
1985 if (ret == 1)
1986 ret = 0;
1987
1988 inode_unlock(inode);
1989 return ret;
1990}
1991
1992static inline loff_t f2fs_readpage_limit(struct inode *inode)
1993{
1994 if (IS_ENABLED(CONFIG_FS_VERITY) &&
1995 (IS_VERITY(inode) || f2fs_verity_in_progress(inode)))
1996 return inode->i_sb->s_maxbytes;
1997
1998 return i_size_read(inode);
1999}
2000
2001static int f2fs_read_single_page(struct inode *inode, struct page *page,
2002 unsigned nr_pages,
2003 struct f2fs_map_blocks *map,
2004 struct bio **bio_ret,
2005 sector_t *last_block_in_bio,
2006 bool is_readahead)
2007{
2008 struct bio *bio = *bio_ret;
2009 const unsigned blocksize = blks_to_bytes(inode, 1);
2010 sector_t block_in_file;
2011 sector_t last_block;
2012 sector_t last_block_in_file;
2013 sector_t block_nr;
2014 int ret = 0;
2015
2016 block_in_file = (sector_t)page_index(page);
2017 last_block = block_in_file + nr_pages;
2018 last_block_in_file = bytes_to_blks(inode,
2019 f2fs_readpage_limit(inode) + blocksize - 1);
2020 if (last_block > last_block_in_file)
2021 last_block = last_block_in_file;
2022
2023 /* just zeroing out page which is beyond EOF */
2024 if (block_in_file >= last_block)
2025 goto zero_out;
2026 /*
2027 * Map blocks using the previous result first.
2028 */
2029 if ((map->m_flags & F2FS_MAP_MAPPED) &&
2030 block_in_file > map->m_lblk &&
2031 block_in_file < (map->m_lblk + map->m_len))
2032 goto got_it;
2033
2034 /*
2035 * Then do more f2fs_map_blocks() calls until we are
2036 * done with this page.
2037 */
2038 map->m_lblk = block_in_file;
2039 map->m_len = last_block - block_in_file;
2040
2041 ret = f2fs_map_blocks(inode, map, 0, F2FS_GET_BLOCK_DEFAULT);
2042 if (ret)
2043 goto out;
2044got_it:
2045 if ((map->m_flags & F2FS_MAP_MAPPED)) {
2046 block_nr = map->m_pblk + block_in_file - map->m_lblk;
2047 SetPageMappedToDisk(page);
2048
2049 if (!PageUptodate(page) && (!PageSwapCache(page) &&
2050 !cleancache_get_page(page))) {
2051 SetPageUptodate(page);
2052 goto confused;
2053 }
2054
2055 if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
2056 DATA_GENERIC_ENHANCE_READ)) {
2057 ret = -EFSCORRUPTED;
2058 goto out;
2059 }
2060 } else {
2061zero_out:
2062 zero_user_segment(page, 0, PAGE_SIZE);
2063 if (f2fs_need_verity(inode, page->index) &&
2064 !fsverity_verify_page(page)) {
2065 ret = -EIO;
2066 goto out;
2067 }
2068 if (!PageUptodate(page))
2069 SetPageUptodate(page);
2070 unlock_page(page);
2071 goto out;
2072 }
2073
2074 /*
2075 * This page will go to BIO. Do we need to send this
2076 * BIO off first?
2077 */
2078 if (bio && (!page_is_mergeable(F2FS_I_SB(inode), bio,
2079 *last_block_in_bio, block_nr) ||
2080 !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
2081submit_and_realloc:
2082 __submit_bio(F2FS_I_SB(inode), bio, DATA);
2083 bio = NULL;
2084 }
2085 if (bio == NULL) {
2086 bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
2087 is_readahead ? REQ_RAHEAD : 0, page->index,
2088 false);
2089 if (IS_ERR(bio)) {
2090 ret = PTR_ERR(bio);
2091 bio = NULL;
2092 goto out;
2093 }
2094 }
2095
2096 /*
2097 * If the page is under writeback, we need to wait for
2098 * its completion to see the correct decrypted data.
2099 */
2100 f2fs_wait_on_block_writeback(inode, block_nr);
2101
2102 if (bio_add_page(bio, page, blocksize, 0) < blocksize)
2103 goto submit_and_realloc;
2104
2105 inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
2106 f2fs_update_iostat(F2FS_I_SB(inode), FS_DATA_READ_IO, F2FS_BLKSIZE);
2107 ClearPageError(page);
2108 *last_block_in_bio = block_nr;
2109 goto out;
2110confused:
2111 if (bio) {
2112 __submit_bio(F2FS_I_SB(inode), bio, DATA);
2113 bio = NULL;
2114 }
2115 unlock_page(page);
2116out:
2117 *bio_ret = bio;
2118 return ret;
2119}
2120
2121#ifdef CONFIG_F2FS_FS_COMPRESSION
2122int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
2123 unsigned nr_pages, sector_t *last_block_in_bio,
2124 bool is_readahead, bool for_write)
2125{
2126 struct dnode_of_data dn;
2127 struct inode *inode = cc->inode;
2128 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2129 struct bio *bio = *bio_ret;
2130 unsigned int start_idx = cc->cluster_idx << cc->log_cluster_size;
2131 sector_t last_block_in_file;
2132 const unsigned blocksize = blks_to_bytes(inode, 1);
2133 struct decompress_io_ctx *dic = NULL;
2134 int i;
2135 int ret = 0;
2136
2137 f2fs_bug_on(sbi, f2fs_cluster_is_empty(cc));
2138
2139 last_block_in_file = bytes_to_blks(inode,
2140 f2fs_readpage_limit(inode) + blocksize - 1);
2141
2142 /* get rid of pages beyond EOF */
2143 for (i = 0; i < cc->cluster_size; i++) {
2144 struct page *page = cc->rpages[i];
2145
2146 if (!page)
2147 continue;
2148 if ((sector_t)page->index >= last_block_in_file) {
2149 zero_user_segment(page, 0, PAGE_SIZE);
2150 if (!PageUptodate(page))
2151 SetPageUptodate(page);
2152 } else if (!PageUptodate(page)) {
2153 continue;
2154 }
2155 unlock_page(page);
2156 if (for_write)
2157 put_page(page);
2158 cc->rpages[i] = NULL;
2159 cc->nr_rpages--;
2160 }
2161
2162 /* we are done since all pages are beyond EOF */
2163 if (f2fs_cluster_is_empty(cc))
2164 goto out;
2165
2166 set_new_dnode(&dn, inode, NULL, NULL, 0);
2167 ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
2168 if (ret)
2169 goto out;
2170
2171 f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR);
2172
2173 for (i = 1; i < cc->cluster_size; i++) {
2174 block_t blkaddr;
2175
2176 blkaddr = data_blkaddr(dn.inode, dn.node_page,
2177 dn.ofs_in_node + i);
2178
2179 if (!__is_valid_data_blkaddr(blkaddr))
2180 break;
2181
2182 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC)) {
2183 ret = -EFAULT;
2184 goto out_put_dnode;
2185 }
2186 cc->nr_cpages++;
2187 }
2188
2189 /* nothing to decompress */
2190 if (cc->nr_cpages == 0) {
2191 ret = 0;
2192 goto out_put_dnode;
2193 }
2194
2195 dic = f2fs_alloc_dic(cc);
2196 if (IS_ERR(dic)) {
2197 ret = PTR_ERR(dic);
2198 goto out_put_dnode;
2199 }
2200
2201 for (i = 0; i < cc->nr_cpages; i++) {
2202 struct page *page = dic->cpages[i];
2203 block_t blkaddr;
2204 struct bio_post_read_ctx *ctx;
2205
2206 blkaddr = data_blkaddr(dn.inode, dn.node_page,
2207 dn.ofs_in_node + i + 1);
2208
2209 f2fs_wait_on_block_writeback(inode, blkaddr);
2210
2211 if (f2fs_load_compressed_page(sbi, page, blkaddr)) {
2212 if (atomic_dec_and_test(&dic->remaining_pages))
2213 f2fs_decompress_cluster(dic);
2214 continue;
2215 }
2216
2217 if (bio && (!page_is_mergeable(sbi, bio,
2218 *last_block_in_bio, blkaddr) ||
2219 !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
2220submit_and_realloc:
2221 __submit_bio(sbi, bio, DATA);
2222 bio = NULL;
2223 }
2224
2225 if (!bio) {
2226 bio = f2fs_grab_read_bio(inode, blkaddr, nr_pages,
2227 is_readahead ? REQ_RAHEAD : 0,
2228 page->index, for_write);
2229 if (IS_ERR(bio)) {
2230 ret = PTR_ERR(bio);
2231 f2fs_decompress_end_io(dic, ret);
2232 f2fs_put_dnode(&dn);
2233 *bio_ret = NULL;
2234 return ret;
2235 }
2236 }
2237
2238 if (bio_add_page(bio, page, blocksize, 0) < blocksize)
2239 goto submit_and_realloc;
2240
2241 ctx = bio->bi_private;
2242 ctx->enabled_steps |= STEP_DECOMPRESS;
2243 refcount_inc(&dic->refcnt);
2244
2245 inc_page_count(sbi, F2FS_RD_DATA);
2246 f2fs_update_iostat(sbi, FS_DATA_READ_IO, F2FS_BLKSIZE);
2247 f2fs_update_iostat(sbi, FS_CDATA_READ_IO, F2FS_BLKSIZE);
2248 ClearPageError(page);
2249 *last_block_in_bio = blkaddr;
2250 }
2251
2252 f2fs_put_dnode(&dn);
2253
2254 *bio_ret = bio;
2255 return 0;
2256
2257out_put_dnode:
2258 f2fs_put_dnode(&dn);
2259out:
2260 for (i = 0; i < cc->cluster_size; i++) {
2261 if (cc->rpages[i]) {
2262 ClearPageUptodate(cc->rpages[i]);
2263 ClearPageError(cc->rpages[i]);
2264 unlock_page(cc->rpages[i]);
2265 }
2266 }
2267 *bio_ret = bio;
2268 return ret;
2269}
2270#endif
2271
2272/*
2273 * This function was originally taken from fs/mpage.c, and customized for f2fs.
2274 * Major change was from block_size == page_size in f2fs by default.
2275 */
2276static int f2fs_mpage_readpages(struct inode *inode,
2277 struct readahead_control *rac, struct page *page)
2278{
2279 struct bio *bio = NULL;
2280 sector_t last_block_in_bio = 0;
2281 struct f2fs_map_blocks map;
2282#ifdef CONFIG_F2FS_FS_COMPRESSION
2283 struct compress_ctx cc = {
2284 .inode = inode,
2285 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
2286 .cluster_size = F2FS_I(inode)->i_cluster_size,
2287 .cluster_idx = NULL_CLUSTER,
2288 .rpages = NULL,
2289 .cpages = NULL,
2290 .nr_rpages = 0,
2291 .nr_cpages = 0,
2292 };
2293#endif
2294 unsigned nr_pages = rac ? readahead_count(rac) : 1;
2295 unsigned max_nr_pages = nr_pages;
2296 int ret = 0;
2297
2298 map.m_pblk = 0;
2299 map.m_lblk = 0;
2300 map.m_len = 0;
2301 map.m_flags = 0;
2302 map.m_next_pgofs = NULL;
2303 map.m_next_extent = NULL;
2304 map.m_seg_type = NO_CHECK_TYPE;
2305 map.m_may_create = false;
2306
2307 for (; nr_pages; nr_pages--) {
2308 if (rac) {
2309 page = readahead_page(rac);
2310 prefetchw(&page->flags);
2311 }
2312
2313#ifdef CONFIG_F2FS_FS_COMPRESSION
2314 if (f2fs_compressed_file(inode)) {
2315 /* there are remained comressed pages, submit them */
2316 if (!f2fs_cluster_can_merge_page(&cc, page->index)) {
2317 ret = f2fs_read_multi_pages(&cc, &bio,
2318 max_nr_pages,
2319 &last_block_in_bio,
2320 rac != NULL, false);
2321 f2fs_destroy_compress_ctx(&cc, false);
2322 if (ret)
2323 goto set_error_page;
2324 }
2325 ret = f2fs_is_compressed_cluster(inode, page->index);
2326 if (ret < 0)
2327 goto set_error_page;
2328 else if (!ret)
2329 goto read_single_page;
2330
2331 ret = f2fs_init_compress_ctx(&cc);
2332 if (ret)
2333 goto set_error_page;
2334
2335 f2fs_compress_ctx_add_page(&cc, page);
2336
2337 goto next_page;
2338 }
2339read_single_page:
2340#endif
2341
2342 ret = f2fs_read_single_page(inode, page, max_nr_pages, &map,
2343 &bio, &last_block_in_bio, rac);
2344 if (ret) {
2345#ifdef CONFIG_F2FS_FS_COMPRESSION
2346set_error_page:
2347#endif
2348 SetPageError(page);
2349 zero_user_segment(page, 0, PAGE_SIZE);
2350 unlock_page(page);
2351 }
2352#ifdef CONFIG_F2FS_FS_COMPRESSION
2353next_page:
2354#endif
2355 if (rac)
2356 put_page(page);
2357
2358#ifdef CONFIG_F2FS_FS_COMPRESSION
2359 if (f2fs_compressed_file(inode)) {
2360 /* last page */
2361 if (nr_pages == 1 && !f2fs_cluster_is_empty(&cc)) {
2362 ret = f2fs_read_multi_pages(&cc, &bio,
2363 max_nr_pages,
2364 &last_block_in_bio,
2365 rac != NULL, false);
2366 f2fs_destroy_compress_ctx(&cc, false);
2367 }
2368 }
2369#endif
2370 }
2371 if (bio)
2372 __submit_bio(F2FS_I_SB(inode), bio, DATA);
2373 return ret;
2374}
2375
2376static int f2fs_read_data_page(struct file *file, struct page *page)
2377{
2378 struct inode *inode = page_file_mapping(page)->host;
2379 int ret = -EAGAIN;
2380
2381 trace_f2fs_readpage(page, DATA);
2382
2383 if (!f2fs_is_compress_backend_ready(inode)) {
2384 unlock_page(page);
2385 return -EOPNOTSUPP;
2386 }
2387
2388 /* If the file has inline data, try to read it directly */
2389 if (f2fs_has_inline_data(inode))
2390 ret = f2fs_read_inline_data(inode, page);
2391 if (ret == -EAGAIN)
2392 ret = f2fs_mpage_readpages(inode, NULL, page);
2393 return ret;
2394}
2395
2396static void f2fs_readahead(struct readahead_control *rac)
2397{
2398 struct inode *inode = rac->mapping->host;
2399
2400 trace_f2fs_readpages(inode, readahead_index(rac), readahead_count(rac));
2401
2402 if (!f2fs_is_compress_backend_ready(inode))
2403 return;
2404
2405 /* If the file has inline data, skip readpages */
2406 if (f2fs_has_inline_data(inode))
2407 return;
2408
2409 f2fs_mpage_readpages(inode, rac, NULL);
2410}
2411
2412int f2fs_encrypt_one_page(struct f2fs_io_info *fio)
2413{
2414 struct inode *inode = fio->page->mapping->host;
2415 struct page *mpage, *page;
2416 gfp_t gfp_flags = GFP_NOFS;
2417
2418 if (!f2fs_encrypted_file(inode))
2419 return 0;
2420
2421 page = fio->compressed_page ? fio->compressed_page : fio->page;
2422
2423 /* wait for GCed page writeback via META_MAPPING */
2424 f2fs_wait_on_block_writeback(inode, fio->old_blkaddr);
2425
2426 if (fscrypt_inode_uses_inline_crypto(inode))
2427 return 0;
2428
2429retry_encrypt:
2430 fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page,
2431 PAGE_SIZE, 0, gfp_flags);
2432 if (IS_ERR(fio->encrypted_page)) {
2433 /* flush pending IOs and wait for a while in the ENOMEM case */
2434 if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
2435 f2fs_flush_merged_writes(fio->sbi);
2436 congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
2437 gfp_flags |= __GFP_NOFAIL;
2438 goto retry_encrypt;
2439 }
2440 return PTR_ERR(fio->encrypted_page);
2441 }
2442
2443 mpage = find_lock_page(META_MAPPING(fio->sbi), fio->old_blkaddr);
2444 if (mpage) {
2445 if (PageUptodate(mpage))
2446 memcpy(page_address(mpage),
2447 page_address(fio->encrypted_page), PAGE_SIZE);
2448 f2fs_put_page(mpage, 1);
2449 }
2450 return 0;
2451}
2452
2453static inline bool check_inplace_update_policy(struct inode *inode,
2454 struct f2fs_io_info *fio)
2455{
2456 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2457 unsigned int policy = SM_I(sbi)->ipu_policy;
2458
2459 if (policy & (0x1 << F2FS_IPU_FORCE))
2460 return true;
2461 if (policy & (0x1 << F2FS_IPU_SSR) && f2fs_need_SSR(sbi))
2462 return true;
2463 if (policy & (0x1 << F2FS_IPU_UTIL) &&
2464 utilization(sbi) > SM_I(sbi)->min_ipu_util)
2465 return true;
2466 if (policy & (0x1 << F2FS_IPU_SSR_UTIL) && f2fs_need_SSR(sbi) &&
2467 utilization(sbi) > SM_I(sbi)->min_ipu_util)
2468 return true;
2469
2470 /*
2471 * IPU for rewrite async pages
2472 */
2473 if (policy & (0x1 << F2FS_IPU_ASYNC) &&
2474 fio && fio->op == REQ_OP_WRITE &&
2475 !(fio->op_flags & REQ_SYNC) &&
2476 !IS_ENCRYPTED(inode))
2477 return true;
2478
2479 /* this is only set during fdatasync */
2480 if (policy & (0x1 << F2FS_IPU_FSYNC) &&
2481 is_inode_flag_set(inode, FI_NEED_IPU))
2482 return true;
2483
2484 if (unlikely(fio && is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
2485 !f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
2486 return true;
2487
2488 return false;
2489}
2490
2491bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio)
2492{
2493 /* swap file is migrating in aligned write mode */
2494 if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
2495 return false;
2496
2497 if (f2fs_is_pinned_file(inode))
2498 return true;
2499
2500 /* if this is cold file, we should overwrite to avoid fragmentation */
2501 if (file_is_cold(inode))
2502 return true;
2503
2504 return check_inplace_update_policy(inode, fio);
2505}
2506
2507bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
2508{
2509 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2510
2511 if (f2fs_lfs_mode(sbi))
2512 return true;
2513 if (S_ISDIR(inode->i_mode))
2514 return true;
2515 if (IS_NOQUOTA(inode))
2516 return true;
2517 if (f2fs_is_atomic_file(inode))
2518 return true;
2519 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
2520 return true;
2521
2522 /* swap file is migrating in aligned write mode */
2523 if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
2524 return true;
2525
2526 if (fio) {
2527 if (page_private_gcing(fio->page))
2528 return true;
2529 if (page_private_dummy(fio->page))
2530 return true;
2531 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
2532 f2fs_is_checkpointed_data(sbi, fio->old_blkaddr)))
2533 return true;
2534 }
2535 return false;
2536}
2537
2538static inline bool need_inplace_update(struct f2fs_io_info *fio)
2539{
2540 struct inode *inode = fio->page->mapping->host;
2541
2542 if (f2fs_should_update_outplace(inode, fio))
2543 return false;
2544
2545 return f2fs_should_update_inplace(inode, fio);
2546}
2547
2548int f2fs_do_write_data_page(struct f2fs_io_info *fio)
2549{
2550 struct page *page = fio->page;
2551 struct inode *inode = page->mapping->host;
2552 struct dnode_of_data dn;
2553 struct extent_info ei = {0,0,0};
2554 struct node_info ni;
2555 bool ipu_force = false;
2556 int err = 0;
2557
2558 set_new_dnode(&dn, inode, NULL, NULL, 0);
2559 if (need_inplace_update(fio) &&
2560 f2fs_lookup_extent_cache(inode, page->index, &ei)) {
2561 fio->old_blkaddr = ei.blk + page->index - ei.fofs;
2562
2563 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
2564 DATA_GENERIC_ENHANCE))
2565 return -EFSCORRUPTED;
2566
2567 ipu_force = true;
2568 fio->need_lock = LOCK_DONE;
2569 goto got_it;
2570 }
2571
2572 /* Deadlock due to between page->lock and f2fs_lock_op */
2573 if (fio->need_lock == LOCK_REQ && !f2fs_trylock_op(fio->sbi))
2574 return -EAGAIN;
2575
2576 err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
2577 if (err)
2578 goto out;
2579
2580 fio->old_blkaddr = dn.data_blkaddr;
2581
2582 /* This page is already truncated */
2583 if (fio->old_blkaddr == NULL_ADDR) {
2584 ClearPageUptodate(page);
2585 clear_page_private_gcing(page);
2586 goto out_writepage;
2587 }
2588got_it:
2589 if (__is_valid_data_blkaddr(fio->old_blkaddr) &&
2590 !f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
2591 DATA_GENERIC_ENHANCE)) {
2592 err = -EFSCORRUPTED;
2593 goto out_writepage;
2594 }
2595 /*
2596 * If current allocation needs SSR,
2597 * it had better in-place writes for updated data.
2598 */
2599 if (ipu_force ||
2600 (__is_valid_data_blkaddr(fio->old_blkaddr) &&
2601 need_inplace_update(fio))) {
2602 err = f2fs_encrypt_one_page(fio);
2603 if (err)
2604 goto out_writepage;
2605
2606 set_page_writeback(page);
2607 ClearPageError(page);
2608 f2fs_put_dnode(&dn);
2609 if (fio->need_lock == LOCK_REQ)
2610 f2fs_unlock_op(fio->sbi);
2611 err = f2fs_inplace_write_data(fio);
2612 if (err) {
2613 if (fscrypt_inode_uses_fs_layer_crypto(inode))
2614 fscrypt_finalize_bounce_page(&fio->encrypted_page);
2615 if (PageWriteback(page))
2616 end_page_writeback(page);
2617 } else {
2618 set_inode_flag(inode, FI_UPDATE_WRITE);
2619 }
2620 trace_f2fs_do_write_data_page(fio->page, IPU);
2621 return err;
2622 }
2623
2624 if (fio->need_lock == LOCK_RETRY) {
2625 if (!f2fs_trylock_op(fio->sbi)) {
2626 err = -EAGAIN;
2627 goto out_writepage;
2628 }
2629 fio->need_lock = LOCK_REQ;
2630 }
2631
2632 err = f2fs_get_node_info(fio->sbi, dn.nid, &ni);
2633 if (err)
2634 goto out_writepage;
2635
2636 fio->version = ni.version;
2637
2638 err = f2fs_encrypt_one_page(fio);
2639 if (err)
2640 goto out_writepage;
2641
2642 set_page_writeback(page);
2643 ClearPageError(page);
2644
2645 if (fio->compr_blocks && fio->old_blkaddr == COMPRESS_ADDR)
2646 f2fs_i_compr_blocks_update(inode, fio->compr_blocks - 1, false);
2647
2648 /* LFS mode write path */
2649 f2fs_outplace_write_data(&dn, fio);
2650 trace_f2fs_do_write_data_page(page, OPU);
2651 set_inode_flag(inode, FI_APPEND_WRITE);
2652 if (page->index == 0)
2653 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
2654out_writepage:
2655 f2fs_put_dnode(&dn);
2656out:
2657 if (fio->need_lock == LOCK_REQ)
2658 f2fs_unlock_op(fio->sbi);
2659 return err;
2660}
2661
2662int f2fs_write_single_data_page(struct page *page, int *submitted,
2663 struct bio **bio,
2664 sector_t *last_block,
2665 struct writeback_control *wbc,
2666 enum iostat_type io_type,
2667 int compr_blocks,
2668 bool allow_balance)
2669{
2670 struct inode *inode = page->mapping->host;
2671 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2672 loff_t i_size = i_size_read(inode);
2673 const pgoff_t end_index = ((unsigned long long)i_size)
2674 >> PAGE_SHIFT;
2675 loff_t psize = (loff_t)(page->index + 1) << PAGE_SHIFT;
2676 unsigned offset = 0;
2677 bool need_balance_fs = false;
2678 int err = 0;
2679 struct f2fs_io_info fio = {
2680 .sbi = sbi,
2681 .ino = inode->i_ino,
2682 .type = DATA,
2683 .op = REQ_OP_WRITE,
2684 .op_flags = wbc_to_write_flags(wbc),
2685 .old_blkaddr = NULL_ADDR,
2686 .page = page,
2687 .encrypted_page = NULL,
2688 .submitted = false,
2689 .compr_blocks = compr_blocks,
2690 .need_lock = LOCK_RETRY,
2691 .io_type = io_type,
2692 .io_wbc = wbc,
2693 .bio = bio,
2694 .last_block = last_block,
2695 };
2696
2697 trace_f2fs_writepage(page, DATA);
2698
2699 /* we should bypass data pages to proceed the kworkder jobs */
2700 if (unlikely(f2fs_cp_error(sbi))) {
2701 mapping_set_error(page->mapping, -EIO);
2702 /*
2703 * don't drop any dirty dentry pages for keeping lastest
2704 * directory structure.
2705 */
2706 if (S_ISDIR(inode->i_mode))
2707 goto redirty_out;
2708 goto out;
2709 }
2710
2711 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
2712 goto redirty_out;
2713
2714 if (page->index < end_index ||
2715 f2fs_verity_in_progress(inode) ||
2716 compr_blocks)
2717 goto write;
2718
2719 /*
2720 * If the offset is out-of-range of file size,
2721 * this page does not have to be written to disk.
2722 */
2723 offset = i_size & (PAGE_SIZE - 1);
2724 if ((page->index >= end_index + 1) || !offset)
2725 goto out;
2726
2727 zero_user_segment(page, offset, PAGE_SIZE);
2728write:
2729 if (f2fs_is_drop_cache(inode))
2730 goto out;
2731 /* we should not write 0'th page having journal header */
2732 if (f2fs_is_volatile_file(inode) && (!page->index ||
2733 (!wbc->for_reclaim &&
2734 f2fs_available_free_memory(sbi, BASE_CHECK))))
2735 goto redirty_out;
2736
2737 /* Dentry/quota blocks are controlled by checkpoint */
2738 if (S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) {
2739 /*
2740 * We need to wait for node_write to avoid block allocation during
2741 * checkpoint. This can only happen to quota writes which can cause
2742 * the below discard race condition.
2743 */
2744 if (IS_NOQUOTA(inode))
2745 down_read(&sbi->node_write);
2746
2747 fio.need_lock = LOCK_DONE;
2748 err = f2fs_do_write_data_page(&fio);
2749
2750 if (IS_NOQUOTA(inode))
2751 up_read(&sbi->node_write);
2752
2753 goto done;
2754 }
2755
2756 if (!wbc->for_reclaim)
2757 need_balance_fs = true;
2758 else if (has_not_enough_free_secs(sbi, 0, 0))
2759 goto redirty_out;
2760 else
2761 set_inode_flag(inode, FI_HOT_DATA);
2762
2763 err = -EAGAIN;
2764 if (f2fs_has_inline_data(inode)) {
2765 err = f2fs_write_inline_data(inode, page);
2766 if (!err)
2767 goto out;
2768 }
2769
2770 if (err == -EAGAIN) {
2771 err = f2fs_do_write_data_page(&fio);
2772 if (err == -EAGAIN) {
2773 fio.need_lock = LOCK_REQ;
2774 err = f2fs_do_write_data_page(&fio);
2775 }
2776 }
2777
2778 if (err) {
2779 file_set_keep_isize(inode);
2780 } else {
2781 spin_lock(&F2FS_I(inode)->i_size_lock);
2782 if (F2FS_I(inode)->last_disk_size < psize)
2783 F2FS_I(inode)->last_disk_size = psize;
2784 spin_unlock(&F2FS_I(inode)->i_size_lock);
2785 }
2786
2787done:
2788 if (err && err != -ENOENT)
2789 goto redirty_out;
2790
2791out:
2792 inode_dec_dirty_pages(inode);
2793 if (err) {
2794 ClearPageUptodate(page);
2795 clear_page_private_gcing(page);
2796 }
2797
2798 if (wbc->for_reclaim) {
2799 f2fs_submit_merged_write_cond(sbi, NULL, page, 0, DATA);
2800 clear_inode_flag(inode, FI_HOT_DATA);
2801 f2fs_remove_dirty_inode(inode);
2802 submitted = NULL;
2803 }
2804 unlock_page(page);
2805 if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) &&
2806 !F2FS_I(inode)->cp_task && allow_balance)
2807 f2fs_balance_fs(sbi, need_balance_fs);
2808
2809 if (unlikely(f2fs_cp_error(sbi))) {
2810 f2fs_submit_merged_write(sbi, DATA);
2811 f2fs_submit_merged_ipu_write(sbi, bio, NULL);
2812 submitted = NULL;
2813 }
2814
2815 if (submitted)
2816 *submitted = fio.submitted ? 1 : 0;
2817
2818 return 0;
2819
2820redirty_out:
2821 redirty_page_for_writepage(wbc, page);
2822 /*
2823 * pageout() in MM traslates EAGAIN, so calls handle_write_error()
2824 * -> mapping_set_error() -> set_bit(AS_EIO, ...).
2825 * file_write_and_wait_range() will see EIO error, which is critical
2826 * to return value of fsync() followed by atomic_write failure to user.
2827 */
2828 if (!err || wbc->for_reclaim)
2829 return AOP_WRITEPAGE_ACTIVATE;
2830 unlock_page(page);
2831 return err;
2832}
2833
2834static int f2fs_write_data_page(struct page *page,
2835 struct writeback_control *wbc)
2836{
2837#ifdef CONFIG_F2FS_FS_COMPRESSION
2838 struct inode *inode = page->mapping->host;
2839
2840 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
2841 goto out;
2842
2843 if (f2fs_compressed_file(inode)) {
2844 if (f2fs_is_compressed_cluster(inode, page->index)) {
2845 redirty_page_for_writepage(wbc, page);
2846 return AOP_WRITEPAGE_ACTIVATE;
2847 }
2848 }
2849out:
2850#endif
2851
2852 return f2fs_write_single_data_page(page, NULL, NULL, NULL,
2853 wbc, FS_DATA_IO, 0, true);
2854}
2855
2856/*
2857 * This function was copied from write_cche_pages from mm/page-writeback.c.
2858 * The major change is making write step of cold data page separately from
2859 * warm/hot data page.
2860 */
2861static int f2fs_write_cache_pages(struct address_space *mapping,
2862 struct writeback_control *wbc,
2863 enum iostat_type io_type)
2864{
2865 int ret = 0;
2866 int done = 0, retry = 0;
2867 struct pagevec pvec;
2868 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
2869 struct bio *bio = NULL;
2870 sector_t last_block;
2871#ifdef CONFIG_F2FS_FS_COMPRESSION
2872 struct inode *inode = mapping->host;
2873 struct compress_ctx cc = {
2874 .inode = inode,
2875 .log_cluster_size = F2FS_I(inode)->i_log_cluster_size,
2876 .cluster_size = F2FS_I(inode)->i_cluster_size,
2877 .cluster_idx = NULL_CLUSTER,
2878 .rpages = NULL,
2879 .nr_rpages = 0,
2880 .cpages = NULL,
2881 .rbuf = NULL,
2882 .cbuf = NULL,
2883 .rlen = PAGE_SIZE * F2FS_I(inode)->i_cluster_size,
2884 .private = NULL,
2885 };
2886#endif
2887 int nr_pages;
2888 pgoff_t index;
2889 pgoff_t end; /* Inclusive */
2890 pgoff_t done_index;
2891 int range_whole = 0;
2892 xa_mark_t tag;
2893 int nwritten = 0;
2894 int submitted = 0;
2895 int i;
2896
2897 pagevec_init(&pvec);
2898
2899 if (get_dirty_pages(mapping->host) <=
2900 SM_I(F2FS_M_SB(mapping))->min_hot_blocks)
2901 set_inode_flag(mapping->host, FI_HOT_DATA);
2902 else
2903 clear_inode_flag(mapping->host, FI_HOT_DATA);
2904
2905 if (wbc->range_cyclic) {
2906 index = mapping->writeback_index; /* prev offset */
2907 end = -1;
2908 } else {
2909 index = wbc->range_start >> PAGE_SHIFT;
2910 end = wbc->range_end >> PAGE_SHIFT;
2911 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2912 range_whole = 1;
2913 }
2914 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2915 tag = PAGECACHE_TAG_TOWRITE;
2916 else
2917 tag = PAGECACHE_TAG_DIRTY;
2918retry:
2919 retry = 0;
2920 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2921 tag_pages_for_writeback(mapping, index, end);
2922 done_index = index;
2923 while (!done && !retry && (index <= end)) {
2924 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
2925 tag);
2926 if (nr_pages == 0)
2927 break;
2928
2929 for (i = 0; i < nr_pages; i++) {
2930 struct page *page = pvec.pages[i];
2931 bool need_readd;
2932readd:
2933 need_readd = false;
2934#ifdef CONFIG_F2FS_FS_COMPRESSION
2935 if (f2fs_compressed_file(inode)) {
2936 ret = f2fs_init_compress_ctx(&cc);
2937 if (ret) {
2938 done = 1;
2939 break;
2940 }
2941
2942 if (!f2fs_cluster_can_merge_page(&cc,
2943 page->index)) {
2944 ret = f2fs_write_multi_pages(&cc,
2945 &submitted, wbc, io_type);
2946 if (!ret)
2947 need_readd = true;
2948 goto result;
2949 }
2950
2951 if (unlikely(f2fs_cp_error(sbi)))
2952 goto lock_page;
2953
2954 if (f2fs_cluster_is_empty(&cc)) {
2955 void *fsdata = NULL;
2956 struct page *pagep;
2957 int ret2;
2958
2959 ret2 = f2fs_prepare_compress_overwrite(
2960 inode, &pagep,
2961 page->index, &fsdata);
2962 if (ret2 < 0) {
2963 ret = ret2;
2964 done = 1;
2965 break;
2966 } else if (ret2 &&
2967 !f2fs_compress_write_end(inode,
2968 fsdata, page->index,
2969 1)) {
2970 retry = 1;
2971 break;
2972 }
2973 } else {
2974 goto lock_page;
2975 }
2976 }
2977#endif
2978 /* give a priority to WB_SYNC threads */
2979 if (atomic_read(&sbi->wb_sync_req[DATA]) &&
2980 wbc->sync_mode == WB_SYNC_NONE) {
2981 done = 1;
2982 break;
2983 }
2984#ifdef CONFIG_F2FS_FS_COMPRESSION
2985lock_page:
2986#endif
2987 done_index = page->index;
2988retry_write:
2989 lock_page(page);
2990
2991 if (unlikely(page->mapping != mapping)) {
2992continue_unlock:
2993 unlock_page(page);
2994 continue;
2995 }
2996
2997 if (!PageDirty(page)) {
2998 /* someone wrote it for us */
2999 goto continue_unlock;
3000 }
3001
3002 if (PageWriteback(page)) {
3003 if (wbc->sync_mode != WB_SYNC_NONE)
3004 f2fs_wait_on_page_writeback(page,
3005 DATA, true, true);
3006 else
3007 goto continue_unlock;
3008 }
3009
3010 if (!clear_page_dirty_for_io(page))
3011 goto continue_unlock;
3012
3013#ifdef CONFIG_F2FS_FS_COMPRESSION
3014 if (f2fs_compressed_file(inode)) {
3015 get_page(page);
3016 f2fs_compress_ctx_add_page(&cc, page);
3017 continue;
3018 }
3019#endif
3020 ret = f2fs_write_single_data_page(page, &submitted,
3021 &bio, &last_block, wbc, io_type,
3022 0, true);
3023 if (ret == AOP_WRITEPAGE_ACTIVATE)
3024 unlock_page(page);
3025#ifdef CONFIG_F2FS_FS_COMPRESSION
3026result:
3027#endif
3028 nwritten += submitted;
3029 wbc->nr_to_write -= submitted;
3030
3031 if (unlikely(ret)) {
3032 /*
3033 * keep nr_to_write, since vfs uses this to
3034 * get # of written pages.
3035 */
3036 if (ret == AOP_WRITEPAGE_ACTIVATE) {
3037 ret = 0;
3038 goto next;
3039 } else if (ret == -EAGAIN) {
3040 ret = 0;
3041 if (wbc->sync_mode == WB_SYNC_ALL) {
3042 cond_resched();
3043 congestion_wait(BLK_RW_ASYNC,
3044 DEFAULT_IO_TIMEOUT);
3045 goto retry_write;
3046 }
3047 goto next;
3048 }
3049 done_index = page->index + 1;
3050 done = 1;
3051 break;
3052 }
3053
3054 if (wbc->nr_to_write <= 0 &&
3055 wbc->sync_mode == WB_SYNC_NONE) {
3056 done = 1;
3057 break;
3058 }
3059next:
3060 if (need_readd)
3061 goto readd;
3062 }
3063 pagevec_release(&pvec);
3064 cond_resched();
3065 }
3066#ifdef CONFIG_F2FS_FS_COMPRESSION
3067 /* flush remained pages in compress cluster */
3068 if (f2fs_compressed_file(inode) && !f2fs_cluster_is_empty(&cc)) {
3069 ret = f2fs_write_multi_pages(&cc, &submitted, wbc, io_type);
3070 nwritten += submitted;
3071 wbc->nr_to_write -= submitted;
3072 if (ret) {
3073 done = 1;
3074 retry = 0;
3075 }
3076 }
3077 if (f2fs_compressed_file(inode))
3078 f2fs_destroy_compress_ctx(&cc, false);
3079#endif
3080 if (retry) {
3081 index = 0;
3082 end = -1;
3083 goto retry;
3084 }
3085 if (wbc->range_cyclic && !done)
3086 done_index = 0;
3087 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
3088 mapping->writeback_index = done_index;
3089
3090 if (nwritten)
3091 f2fs_submit_merged_write_cond(F2FS_M_SB(mapping), mapping->host,
3092 NULL, 0, DATA);
3093 /* submit cached bio of IPU write */
3094 if (bio)
3095 f2fs_submit_merged_ipu_write(sbi, &bio, NULL);
3096
3097 return ret;
3098}
3099
3100static inline bool __should_serialize_io(struct inode *inode,
3101 struct writeback_control *wbc)
3102{
3103 /* to avoid deadlock in path of data flush */
3104 if (F2FS_I(inode)->cp_task)
3105 return false;
3106
3107 if (!S_ISREG(inode->i_mode))
3108 return false;
3109 if (IS_NOQUOTA(inode))
3110 return false;
3111
3112 if (f2fs_need_compress_data(inode))
3113 return true;
3114 if (wbc->sync_mode != WB_SYNC_ALL)
3115 return true;
3116 if (get_dirty_pages(inode) >= SM_I(F2FS_I_SB(inode))->min_seq_blocks)
3117 return true;
3118 return false;
3119}
3120
3121static int __f2fs_write_data_pages(struct address_space *mapping,
3122 struct writeback_control *wbc,
3123 enum iostat_type io_type)
3124{
3125 struct inode *inode = mapping->host;
3126 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3127 struct blk_plug plug;
3128 int ret;
3129 bool locked = false;
3130
3131 /* deal with chardevs and other special file */
3132 if (!mapping->a_ops->writepage)
3133 return 0;
3134
3135 /* skip writing if there is no dirty page in this inode */
3136 if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE)
3137 return 0;
3138
3139 /* during POR, we don't need to trigger writepage at all. */
3140 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
3141 goto skip_write;
3142
3143 if ((S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) &&
3144 wbc->sync_mode == WB_SYNC_NONE &&
3145 get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
3146 f2fs_available_free_memory(sbi, DIRTY_DENTS))
3147 goto skip_write;
3148
3149 /* skip writing during file defragment */
3150 if (is_inode_flag_set(inode, FI_DO_DEFRAG))
3151 goto skip_write;
3152
3153 trace_f2fs_writepages(mapping->host, wbc, DATA);
3154
3155 /* to avoid spliting IOs due to mixed WB_SYNC_ALL and WB_SYNC_NONE */
3156 if (wbc->sync_mode == WB_SYNC_ALL)
3157 atomic_inc(&sbi->wb_sync_req[DATA]);
3158 else if (atomic_read(&sbi->wb_sync_req[DATA]))
3159 goto skip_write;
3160
3161 if (__should_serialize_io(inode, wbc)) {
3162 mutex_lock(&sbi->writepages);
3163 locked = true;
3164 }
3165
3166 blk_start_plug(&plug);
3167 ret = f2fs_write_cache_pages(mapping, wbc, io_type);
3168 blk_finish_plug(&plug);
3169
3170 if (locked)
3171 mutex_unlock(&sbi->writepages);
3172
3173 if (wbc->sync_mode == WB_SYNC_ALL)
3174 atomic_dec(&sbi->wb_sync_req[DATA]);
3175 /*
3176 * if some pages were truncated, we cannot guarantee its mapping->host
3177 * to detect pending bios.
3178 */
3179
3180 f2fs_remove_dirty_inode(inode);
3181 return ret;
3182
3183skip_write:
3184 wbc->pages_skipped += get_dirty_pages(inode);
3185 trace_f2fs_writepages(mapping->host, wbc, DATA);
3186 return 0;
3187}
3188
3189static int f2fs_write_data_pages(struct address_space *mapping,
3190 struct writeback_control *wbc)
3191{
3192 struct inode *inode = mapping->host;
3193
3194 return __f2fs_write_data_pages(mapping, wbc,
3195 F2FS_I(inode)->cp_task == current ?
3196 FS_CP_DATA_IO : FS_DATA_IO);
3197}
3198
3199static void f2fs_write_failed(struct address_space *mapping, loff_t to)
3200{
3201 struct inode *inode = mapping->host;
3202 loff_t i_size = i_size_read(inode);
3203
3204 if (IS_NOQUOTA(inode))
3205 return;
3206
3207 /* In the fs-verity case, f2fs_end_enable_verity() does the truncate */
3208 if (to > i_size && !f2fs_verity_in_progress(inode)) {
3209 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3210 down_write(&F2FS_I(inode)->i_mmap_sem);
3211
3212 truncate_pagecache(inode, i_size);
3213 f2fs_truncate_blocks(inode, i_size, true);
3214
3215 up_write(&F2FS_I(inode)->i_mmap_sem);
3216 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3217 }
3218}
3219
3220static int prepare_write_begin(struct f2fs_sb_info *sbi,
3221 struct page *page, loff_t pos, unsigned len,
3222 block_t *blk_addr, bool *node_changed)
3223{
3224 struct inode *inode = page->mapping->host;
3225 pgoff_t index = page->index;
3226 struct dnode_of_data dn;
3227 struct page *ipage;
3228 bool locked = false;
3229 struct extent_info ei = {0,0,0};
3230 int err = 0;
3231 int flag;
3232
3233 /*
3234 * we already allocated all the blocks, so we don't need to get
3235 * the block addresses when there is no need to fill the page.
3236 */
3237 if (!f2fs_has_inline_data(inode) && len == PAGE_SIZE &&
3238 !is_inode_flag_set(inode, FI_NO_PREALLOC) &&
3239 !f2fs_verity_in_progress(inode))
3240 return 0;
3241
3242 /* f2fs_lock_op avoids race between write CP and convert_inline_page */
3243 if (f2fs_has_inline_data(inode) && pos + len > MAX_INLINE_DATA(inode))
3244 flag = F2FS_GET_BLOCK_DEFAULT;
3245 else
3246 flag = F2FS_GET_BLOCK_PRE_AIO;
3247
3248 if (f2fs_has_inline_data(inode) ||
3249 (pos & PAGE_MASK) >= i_size_read(inode)) {
3250 f2fs_do_map_lock(sbi, flag, true);
3251 locked = true;
3252 }
3253
3254restart:
3255 /* check inline_data */
3256 ipage = f2fs_get_node_page(sbi, inode->i_ino);
3257 if (IS_ERR(ipage)) {
3258 err = PTR_ERR(ipage);
3259 goto unlock_out;
3260 }
3261
3262 set_new_dnode(&dn, inode, ipage, ipage, 0);
3263
3264 if (f2fs_has_inline_data(inode)) {
3265 if (pos + len <= MAX_INLINE_DATA(inode)) {
3266 f2fs_do_read_inline_data(page, ipage);
3267 set_inode_flag(inode, FI_DATA_EXIST);
3268 if (inode->i_nlink)
3269 set_page_private_inline(ipage);
3270 } else {
3271 err = f2fs_convert_inline_page(&dn, page);
3272 if (err)
3273 goto out;
3274 if (dn.data_blkaddr == NULL_ADDR)
3275 err = f2fs_get_block(&dn, index);
3276 }
3277 } else if (locked) {
3278 err = f2fs_get_block(&dn, index);
3279 } else {
3280 if (f2fs_lookup_extent_cache(inode, index, &ei)) {
3281 dn.data_blkaddr = ei.blk + index - ei.fofs;
3282 } else {
3283 /* hole case */
3284 err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
3285 if (err || dn.data_blkaddr == NULL_ADDR) {
3286 f2fs_put_dnode(&dn);
3287 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO,
3288 true);
3289 WARN_ON(flag != F2FS_GET_BLOCK_PRE_AIO);
3290 locked = true;
3291 goto restart;
3292 }
3293 }
3294 }
3295
3296 /* convert_inline_page can make node_changed */
3297 *blk_addr = dn.data_blkaddr;
3298 *node_changed = dn.node_changed;
3299out:
3300 f2fs_put_dnode(&dn);
3301unlock_out:
3302 if (locked)
3303 f2fs_do_map_lock(sbi, flag, false);
3304 return err;
3305}
3306
3307static int f2fs_write_begin(struct file *file, struct address_space *mapping,
3308 loff_t pos, unsigned len, unsigned flags,
3309 struct page **pagep, void **fsdata)
3310{
3311 struct inode *inode = mapping->host;
3312 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3313 struct page *page = NULL;
3314 pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT;
3315 bool need_balance = false, drop_atomic = false;
3316 block_t blkaddr = NULL_ADDR;
3317 int err = 0;
3318
3319 trace_f2fs_write_begin(inode, pos, len, flags);
3320
3321 if (!f2fs_is_checkpoint_ready(sbi)) {
3322 err = -ENOSPC;
3323 goto fail;
3324 }
3325
3326 if ((f2fs_is_atomic_file(inode) &&
3327 !f2fs_available_free_memory(sbi, INMEM_PAGES)) ||
3328 is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
3329 err = -ENOMEM;
3330 drop_atomic = true;
3331 goto fail;
3332 }
3333
3334 /*
3335 * We should check this at this moment to avoid deadlock on inode page
3336 * and #0 page. The locking rule for inline_data conversion should be:
3337 * lock_page(page #0) -> lock_page(inode_page)
3338 */
3339 if (index != 0) {
3340 err = f2fs_convert_inline_inode(inode);
3341 if (err)
3342 goto fail;
3343 }
3344
3345#ifdef CONFIG_F2FS_FS_COMPRESSION
3346 if (f2fs_compressed_file(inode)) {
3347 int ret;
3348
3349 *fsdata = NULL;
3350
3351 ret = f2fs_prepare_compress_overwrite(inode, pagep,
3352 index, fsdata);
3353 if (ret < 0) {
3354 err = ret;
3355 goto fail;
3356 } else if (ret) {
3357 return 0;
3358 }
3359 }
3360#endif
3361
3362repeat:
3363 /*
3364 * Do not use grab_cache_page_write_begin() to avoid deadlock due to
3365 * wait_for_stable_page. Will wait that below with our IO control.
3366 */
3367 page = f2fs_pagecache_get_page(mapping, index,
3368 FGP_LOCK | FGP_WRITE | FGP_CREAT, GFP_NOFS);
3369 if (!page) {
3370 err = -ENOMEM;
3371 goto fail;
3372 }
3373
3374 /* TODO: cluster can be compressed due to race with .writepage */
3375
3376 *pagep = page;
3377
3378 err = prepare_write_begin(sbi, page, pos, len,
3379 &blkaddr, &need_balance);
3380 if (err)
3381 goto fail;
3382
3383 if (need_balance && !IS_NOQUOTA(inode) &&
3384 has_not_enough_free_secs(sbi, 0, 0)) {
3385 unlock_page(page);
3386 f2fs_balance_fs(sbi, true);
3387 lock_page(page);
3388 if (page->mapping != mapping) {
3389 /* The page got truncated from under us */
3390 f2fs_put_page(page, 1);
3391 goto repeat;
3392 }
3393 }
3394
3395 f2fs_wait_on_page_writeback(page, DATA, false, true);
3396
3397 if (len == PAGE_SIZE || PageUptodate(page))
3398 return 0;
3399
3400 if (!(pos & (PAGE_SIZE - 1)) && (pos + len) >= i_size_read(inode) &&
3401 !f2fs_verity_in_progress(inode)) {
3402 zero_user_segment(page, len, PAGE_SIZE);
3403 return 0;
3404 }
3405
3406 if (blkaddr == NEW_ADDR) {
3407 zero_user_segment(page, 0, PAGE_SIZE);
3408 SetPageUptodate(page);
3409 } else {
3410 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
3411 DATA_GENERIC_ENHANCE_READ)) {
3412 err = -EFSCORRUPTED;
3413 goto fail;
3414 }
3415 err = f2fs_submit_page_read(inode, page, blkaddr, 0, true);
3416 if (err)
3417 goto fail;
3418
3419 lock_page(page);
3420 if (unlikely(page->mapping != mapping)) {
3421 f2fs_put_page(page, 1);
3422 goto repeat;
3423 }
3424 if (unlikely(!PageUptodate(page))) {
3425 err = -EIO;
3426 goto fail;
3427 }
3428 }
3429 return 0;
3430
3431fail:
3432 f2fs_put_page(page, 1);
3433 f2fs_write_failed(mapping, pos + len);
3434 if (drop_atomic)
3435 f2fs_drop_inmem_pages_all(sbi, false);
3436 return err;
3437}
3438
3439static int f2fs_write_end(struct file *file,
3440 struct address_space *mapping,
3441 loff_t pos, unsigned len, unsigned copied,
3442 struct page *page, void *fsdata)
3443{
3444 struct inode *inode = page->mapping->host;
3445
3446 trace_f2fs_write_end(inode, pos, len, copied);
3447
3448 /*
3449 * This should be come from len == PAGE_SIZE, and we expect copied
3450 * should be PAGE_SIZE. Otherwise, we treat it with zero copied and
3451 * let generic_perform_write() try to copy data again through copied=0.
3452 */
3453 if (!PageUptodate(page)) {
3454 if (unlikely(copied != len))
3455 copied = 0;
3456 else
3457 SetPageUptodate(page);
3458 }
3459
3460#ifdef CONFIG_F2FS_FS_COMPRESSION
3461 /* overwrite compressed file */
3462 if (f2fs_compressed_file(inode) && fsdata) {
3463 f2fs_compress_write_end(inode, fsdata, page->index, copied);
3464 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3465
3466 if (pos + copied > i_size_read(inode) &&
3467 !f2fs_verity_in_progress(inode))
3468 f2fs_i_size_write(inode, pos + copied);
3469 return copied;
3470 }
3471#endif
3472
3473 if (!copied)
3474 goto unlock_out;
3475
3476 set_page_dirty(page);
3477
3478 if (pos + copied > i_size_read(inode) &&
3479 !f2fs_verity_in_progress(inode))
3480 f2fs_i_size_write(inode, pos + copied);
3481unlock_out:
3482 f2fs_put_page(page, 1);
3483 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3484 return copied;
3485}
3486
3487static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
3488 loff_t offset)
3489{
3490 unsigned i_blkbits = READ_ONCE(inode->i_blkbits);
3491 unsigned blkbits = i_blkbits;
3492 unsigned blocksize_mask = (1 << blkbits) - 1;
3493 unsigned long align = offset | iov_iter_alignment(iter);
3494 struct block_device *bdev = inode->i_sb->s_bdev;
3495
3496 if (iov_iter_rw(iter) == READ && offset >= i_size_read(inode))
3497 return 1;
3498
3499 if (align & blocksize_mask) {
3500 if (bdev)
3501 blkbits = blksize_bits(bdev_logical_block_size(bdev));
3502 blocksize_mask = (1 << blkbits) - 1;
3503 if (align & blocksize_mask)
3504 return -EINVAL;
3505 return 1;
3506 }
3507 return 0;
3508}
3509
3510static void f2fs_dio_end_io(struct bio *bio)
3511{
3512 struct f2fs_private_dio *dio = bio->bi_private;
3513
3514 dec_page_count(F2FS_I_SB(dio->inode),
3515 dio->write ? F2FS_DIO_WRITE : F2FS_DIO_READ);
3516
3517 bio->bi_private = dio->orig_private;
3518 bio->bi_end_io = dio->orig_end_io;
3519
3520 kfree(dio);
3521
3522 bio_endio(bio);
3523}
3524
3525static void f2fs_dio_submit_bio(struct bio *bio, struct inode *inode,
3526 loff_t file_offset)
3527{
3528 struct f2fs_private_dio *dio;
3529 bool write = (bio_op(bio) == REQ_OP_WRITE);
3530
3531 dio = f2fs_kzalloc(F2FS_I_SB(inode),
3532 sizeof(struct f2fs_private_dio), GFP_NOFS);
3533 if (!dio)
3534 goto out;
3535
3536 dio->inode = inode;
3537 dio->orig_end_io = bio->bi_end_io;
3538 dio->orig_private = bio->bi_private;
3539 dio->write = write;
3540
3541 bio->bi_end_io = f2fs_dio_end_io;
3542 bio->bi_private = dio;
3543
3544 inc_page_count(F2FS_I_SB(inode),
3545 write ? F2FS_DIO_WRITE : F2FS_DIO_READ);
3546
3547 submit_bio(bio);
3548 return;
3549out:
3550 bio->bi_status = BLK_STS_IOERR;
3551 bio_endio(bio);
3552}
3553
3554static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
3555{
3556 struct address_space *mapping = iocb->ki_filp->f_mapping;
3557 struct inode *inode = mapping->host;
3558 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3559 struct f2fs_inode_info *fi = F2FS_I(inode);
3560 size_t count = iov_iter_count(iter);
3561 loff_t offset = iocb->ki_pos;
3562 int rw = iov_iter_rw(iter);
3563 int err;
3564 enum rw_hint hint = iocb->ki_hint;
3565 int whint_mode = F2FS_OPTION(sbi).whint_mode;
3566 bool do_opu;
3567
3568 err = check_direct_IO(inode, iter, offset);
3569 if (err)
3570 return err < 0 ? err : 0;
3571
3572 if (f2fs_force_buffered_io(inode, iocb, iter))
3573 return 0;
3574
3575 do_opu = allow_outplace_dio(inode, iocb, iter);
3576
3577 trace_f2fs_direct_IO_enter(inode, offset, count, rw);
3578
3579 if (rw == WRITE && whint_mode == WHINT_MODE_OFF)
3580 iocb->ki_hint = WRITE_LIFE_NOT_SET;
3581
3582 if (iocb->ki_flags & IOCB_NOWAIT) {
3583 if (!down_read_trylock(&fi->i_gc_rwsem[rw])) {
3584 iocb->ki_hint = hint;
3585 err = -EAGAIN;
3586 goto out;
3587 }
3588 if (do_opu && !down_read_trylock(&fi->i_gc_rwsem[READ])) {
3589 up_read(&fi->i_gc_rwsem[rw]);
3590 iocb->ki_hint = hint;
3591 err = -EAGAIN;
3592 goto out;
3593 }
3594 } else {
3595 down_read(&fi->i_gc_rwsem[rw]);
3596 if (do_opu)
3597 down_read(&fi->i_gc_rwsem[READ]);
3598 }
3599
3600 err = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
3601 iter, rw == WRITE ? get_data_block_dio_write :
3602 get_data_block_dio, NULL, f2fs_dio_submit_bio,
3603 rw == WRITE ? DIO_LOCKING | DIO_SKIP_HOLES :
3604 DIO_SKIP_HOLES);
3605
3606 if (do_opu)
3607 up_read(&fi->i_gc_rwsem[READ]);
3608
3609 up_read(&fi->i_gc_rwsem[rw]);
3610
3611 if (rw == WRITE) {
3612 if (whint_mode == WHINT_MODE_OFF)
3613 iocb->ki_hint = hint;
3614 if (err > 0) {
3615 f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO,
3616 err);
3617 if (!do_opu)
3618 set_inode_flag(inode, FI_UPDATE_WRITE);
3619 } else if (err == -EIOCBQUEUED) {
3620 f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO,
3621 count - iov_iter_count(iter));
3622 } else if (err < 0) {
3623 f2fs_write_failed(mapping, offset + count);
3624 }
3625 } else {
3626 if (err > 0)
3627 f2fs_update_iostat(sbi, APP_DIRECT_READ_IO, err);
3628 else if (err == -EIOCBQUEUED)
3629 f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_READ_IO,
3630 count - iov_iter_count(iter));
3631 }
3632
3633out:
3634 trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
3635
3636 return err;
3637}
3638
3639void f2fs_invalidate_page(struct page *page, unsigned int offset,
3640 unsigned int length)
3641{
3642 struct inode *inode = page->mapping->host;
3643 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3644
3645 if (inode->i_ino >= F2FS_ROOT_INO(sbi) &&
3646 (offset % PAGE_SIZE || length != PAGE_SIZE))
3647 return;
3648
3649 if (PageDirty(page)) {
3650 if (inode->i_ino == F2FS_META_INO(sbi)) {
3651 dec_page_count(sbi, F2FS_DIRTY_META);
3652 } else if (inode->i_ino == F2FS_NODE_INO(sbi)) {
3653 dec_page_count(sbi, F2FS_DIRTY_NODES);
3654 } else {
3655 inode_dec_dirty_pages(inode);
3656 f2fs_remove_dirty_inode(inode);
3657 }
3658 }
3659
3660 clear_page_private_gcing(page);
3661
3662 if (test_opt(sbi, COMPRESS_CACHE)) {
3663 if (f2fs_compressed_file(inode))
3664 f2fs_invalidate_compress_pages(sbi, inode->i_ino);
3665 if (inode->i_ino == F2FS_COMPRESS_INO(sbi))
3666 clear_page_private_data(page);
3667 }
3668
3669 if (page_private_atomic(page))
3670 return f2fs_drop_inmem_page(inode, page);
3671
3672 detach_page_private(page);
3673 set_page_private(page, 0);
3674}
3675
3676int f2fs_release_page(struct page *page, gfp_t wait)
3677{
3678 /* If this is dirty page, keep PagePrivate */
3679 if (PageDirty(page))
3680 return 0;
3681
3682 /* This is atomic written page, keep Private */
3683 if (page_private_atomic(page))
3684 return 0;
3685
3686 if (test_opt(F2FS_P_SB(page), COMPRESS_CACHE)) {
3687 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
3688 struct inode *inode = page->mapping->host;
3689
3690 if (f2fs_compressed_file(inode))
3691 f2fs_invalidate_compress_pages(sbi, inode->i_ino);
3692 if (inode->i_ino == F2FS_COMPRESS_INO(sbi))
3693 clear_page_private_data(page);
3694 }
3695
3696 clear_page_private_gcing(page);
3697
3698 detach_page_private(page);
3699 set_page_private(page, 0);
3700 return 1;
3701}
3702
3703static int f2fs_set_data_page_dirty(struct page *page)
3704{
3705 struct inode *inode = page_file_mapping(page)->host;
3706
3707 trace_f2fs_set_page_dirty(page, DATA);
3708
3709 if (!PageUptodate(page))
3710 SetPageUptodate(page);
3711 if (PageSwapCache(page))
3712 return __set_page_dirty_nobuffers(page);
3713
3714 if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) {
3715 if (!page_private_atomic(page)) {
3716 f2fs_register_inmem_page(inode, page);
3717 return 1;
3718 }
3719 /*
3720 * Previously, this page has been registered, we just
3721 * return here.
3722 */
3723 return 0;
3724 }
3725
3726 if (!PageDirty(page)) {
3727 __set_page_dirty_nobuffers(page);
3728 f2fs_update_dirty_page(inode, page);
3729 return 1;
3730 }
3731 return 0;
3732}
3733
3734
3735static sector_t f2fs_bmap_compress(struct inode *inode, sector_t block)
3736{
3737#ifdef CONFIG_F2FS_FS_COMPRESSION
3738 struct dnode_of_data dn;
3739 sector_t start_idx, blknr = 0;
3740 int ret;
3741
3742 start_idx = round_down(block, F2FS_I(inode)->i_cluster_size);
3743
3744 set_new_dnode(&dn, inode, NULL, NULL, 0);
3745 ret = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
3746 if (ret)
3747 return 0;
3748
3749 if (dn.data_blkaddr != COMPRESS_ADDR) {
3750 dn.ofs_in_node += block - start_idx;
3751 blknr = f2fs_data_blkaddr(&dn);
3752 if (!__is_valid_data_blkaddr(blknr))
3753 blknr = 0;
3754 }
3755
3756 f2fs_put_dnode(&dn);
3757 return blknr;
3758#else
3759 return 0;
3760#endif
3761}
3762
3763
3764static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
3765{
3766 struct inode *inode = mapping->host;
3767 sector_t blknr = 0;
3768
3769 if (f2fs_has_inline_data(inode))
3770 goto out;
3771
3772 /* make sure allocating whole blocks */
3773 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
3774 filemap_write_and_wait(mapping);
3775
3776 /* Block number less than F2FS MAX BLOCKS */
3777 if (unlikely(block >= max_file_blocks(inode)))
3778 goto out;
3779
3780 if (f2fs_compressed_file(inode)) {
3781 blknr = f2fs_bmap_compress(inode, block);
3782 } else {
3783 struct f2fs_map_blocks map;
3784
3785 memset(&map, 0, sizeof(map));
3786 map.m_lblk = block;
3787 map.m_len = 1;
3788 map.m_next_pgofs = NULL;
3789 map.m_seg_type = NO_CHECK_TYPE;
3790
3791 if (!f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_BMAP))
3792 blknr = map.m_pblk;
3793 }
3794out:
3795 trace_f2fs_bmap(inode, block, blknr);
3796 return blknr;
3797}
3798
3799#ifdef CONFIG_MIGRATION
3800#include <linux/migrate.h>
3801
3802int f2fs_migrate_page(struct address_space *mapping,
3803 struct page *newpage, struct page *page, enum migrate_mode mode)
3804{
3805 int rc, extra_count;
3806 struct f2fs_inode_info *fi = F2FS_I(mapping->host);
3807 bool atomic_written = page_private_atomic(page);
3808
3809 BUG_ON(PageWriteback(page));
3810
3811 /* migrating an atomic written page is safe with the inmem_lock hold */
3812 if (atomic_written) {
3813 if (mode != MIGRATE_SYNC)
3814 return -EBUSY;
3815 if (!mutex_trylock(&fi->inmem_lock))
3816 return -EAGAIN;
3817 }
3818
3819 /* one extra reference was held for atomic_write page */
3820 extra_count = atomic_written ? 1 : 0;
3821 rc = migrate_page_move_mapping(mapping, newpage,
3822 page, extra_count);
3823 if (rc != MIGRATEPAGE_SUCCESS) {
3824 if (atomic_written)
3825 mutex_unlock(&fi->inmem_lock);
3826 return rc;
3827 }
3828
3829 if (atomic_written) {
3830 struct inmem_pages *cur;
3831
3832 list_for_each_entry(cur, &fi->inmem_pages, list)
3833 if (cur->page == page) {
3834 cur->page = newpage;
3835 break;
3836 }
3837 mutex_unlock(&fi->inmem_lock);
3838 put_page(page);
3839 get_page(newpage);
3840 }
3841
3842 /* guarantee to start from no stale private field */
3843 set_page_private(newpage, 0);
3844 if (PagePrivate(page)) {
3845 set_page_private(newpage, page_private(page));
3846 SetPagePrivate(newpage);
3847 get_page(newpage);
3848
3849 set_page_private(page, 0);
3850 ClearPagePrivate(page);
3851 put_page(page);
3852 }
3853
3854 if (mode != MIGRATE_SYNC_NO_COPY)
3855 migrate_page_copy(newpage, page);
3856 else
3857 migrate_page_states(newpage, page);
3858
3859 return MIGRATEPAGE_SUCCESS;
3860}
3861#endif
3862
3863#ifdef CONFIG_SWAP
3864static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk,
3865 unsigned int blkcnt)
3866{
3867 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3868 unsigned int blkofs;
3869 unsigned int blk_per_sec = BLKS_PER_SEC(sbi);
3870 unsigned int secidx = start_blk / blk_per_sec;
3871 unsigned int end_sec = secidx + blkcnt / blk_per_sec;
3872 int ret = 0;
3873
3874 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3875 down_write(&F2FS_I(inode)->i_mmap_sem);
3876
3877 set_inode_flag(inode, FI_ALIGNED_WRITE);
3878
3879 for (; secidx < end_sec; secidx++) {
3880 down_write(&sbi->pin_sem);
3881
3882 f2fs_lock_op(sbi);
3883 f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
3884 f2fs_unlock_op(sbi);
3885
3886 set_inode_flag(inode, FI_DO_DEFRAG);
3887
3888 for (blkofs = 0; blkofs < blk_per_sec; blkofs++) {
3889 struct page *page;
3890 unsigned int blkidx = secidx * blk_per_sec + blkofs;
3891
3892 page = f2fs_get_lock_data_page(inode, blkidx, true);
3893 if (IS_ERR(page)) {
3894 up_write(&sbi->pin_sem);
3895 ret = PTR_ERR(page);
3896 goto done;
3897 }
3898
3899 set_page_dirty(page);
3900 f2fs_put_page(page, 1);
3901 }
3902
3903 clear_inode_flag(inode, FI_DO_DEFRAG);
3904
3905 ret = filemap_fdatawrite(inode->i_mapping);
3906
3907 up_write(&sbi->pin_sem);
3908
3909 if (ret)
3910 break;
3911 }
3912
3913done:
3914 clear_inode_flag(inode, FI_DO_DEFRAG);
3915 clear_inode_flag(inode, FI_ALIGNED_WRITE);
3916
3917 up_write(&F2FS_I(inode)->i_mmap_sem);
3918 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3919
3920 return ret;
3921}
3922
3923static int check_swap_activate(struct swap_info_struct *sis,
3924 struct file *swap_file, sector_t *span)
3925{
3926 struct address_space *mapping = swap_file->f_mapping;
3927 struct inode *inode = mapping->host;
3928 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3929 sector_t cur_lblock;
3930 sector_t last_lblock;
3931 sector_t pblock;
3932 sector_t lowest_pblock = -1;
3933 sector_t highest_pblock = 0;
3934 int nr_extents = 0;
3935 unsigned long nr_pblocks;
3936 unsigned int blks_per_sec = BLKS_PER_SEC(sbi);
3937 unsigned int sec_blks_mask = BLKS_PER_SEC(sbi) - 1;
3938 unsigned int not_aligned = 0;
3939 int ret = 0;
3940
3941 /*
3942 * Map all the blocks into the extent list. This code doesn't try
3943 * to be very smart.
3944 */
3945 cur_lblock = 0;
3946 last_lblock = bytes_to_blks(inode, i_size_read(inode));
3947
3948 while (cur_lblock < last_lblock && cur_lblock < sis->max) {
3949 struct f2fs_map_blocks map;
3950retry:
3951 cond_resched();
3952
3953 memset(&map, 0, sizeof(map));
3954 map.m_lblk = cur_lblock;
3955 map.m_len = last_lblock - cur_lblock;
3956 map.m_next_pgofs = NULL;
3957 map.m_next_extent = NULL;
3958 map.m_seg_type = NO_CHECK_TYPE;
3959 map.m_may_create = false;
3960
3961 ret = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_FIEMAP);
3962 if (ret)
3963 goto out;
3964
3965 /* hole */
3966 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
3967 f2fs_err(sbi, "Swapfile has holes");
3968 ret = -EINVAL;
3969 goto out;
3970 }
3971
3972 pblock = map.m_pblk;
3973 nr_pblocks = map.m_len;
3974
3975 if ((pblock - SM_I(sbi)->main_blkaddr) & sec_blks_mask ||
3976 nr_pblocks & sec_blks_mask) {
3977 not_aligned++;
3978
3979 nr_pblocks = roundup(nr_pblocks, blks_per_sec);
3980 if (cur_lblock + nr_pblocks > sis->max)
3981 nr_pblocks -= blks_per_sec;
3982
3983 if (!nr_pblocks) {
3984 /* this extent is last one */
3985 nr_pblocks = map.m_len;
3986 f2fs_warn(sbi, "Swapfile: last extent is not aligned to section");
3987 goto next;
3988 }
3989
3990 ret = f2fs_migrate_blocks(inode, cur_lblock,
3991 nr_pblocks);
3992 if (ret)
3993 goto out;
3994 goto retry;
3995 }
3996next:
3997 if (cur_lblock + nr_pblocks >= sis->max)
3998 nr_pblocks = sis->max - cur_lblock;
3999
4000 if (cur_lblock) { /* exclude the header page */
4001 if (pblock < lowest_pblock)
4002 lowest_pblock = pblock;
4003 if (pblock + nr_pblocks - 1 > highest_pblock)
4004 highest_pblock = pblock + nr_pblocks - 1;
4005 }
4006
4007 /*
4008 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
4009 */
4010 ret = add_swap_extent(sis, cur_lblock, nr_pblocks, pblock);
4011 if (ret < 0)
4012 goto out;
4013 nr_extents += ret;
4014 cur_lblock += nr_pblocks;
4015 }
4016 ret = nr_extents;
4017 *span = 1 + highest_pblock - lowest_pblock;
4018 if (cur_lblock == 0)
4019 cur_lblock = 1; /* force Empty message */
4020 sis->max = cur_lblock;
4021 sis->pages = cur_lblock - 1;
4022 sis->highest_bit = cur_lblock - 1;
4023out:
4024 if (not_aligned)
4025 f2fs_warn(sbi, "Swapfile (%u) is not align to section: 1) creat(), 2) ioctl(F2FS_IOC_SET_PIN_FILE), 3) fallocate(%u * N)",
4026 not_aligned, blks_per_sec * F2FS_BLKSIZE);
4027 return ret;
4028}
4029
4030static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
4031 sector_t *span)
4032{
4033 struct inode *inode = file_inode(file);
4034 int ret;
4035
4036 if (!S_ISREG(inode->i_mode))
4037 return -EINVAL;
4038
4039 if (f2fs_readonly(F2FS_I_SB(inode)->sb))
4040 return -EROFS;
4041
4042 if (f2fs_lfs_mode(F2FS_I_SB(inode))) {
4043 f2fs_err(F2FS_I_SB(inode),
4044 "Swapfile not supported in LFS mode");
4045 return -EINVAL;
4046 }
4047
4048 ret = f2fs_convert_inline_inode(inode);
4049 if (ret)
4050 return ret;
4051
4052 if (!f2fs_disable_compressed_file(inode))
4053 return -EINVAL;
4054
4055 f2fs_precache_extents(inode);
4056
4057 ret = check_swap_activate(sis, file, span);
4058 if (ret < 0)
4059 return ret;
4060
4061 set_inode_flag(inode, FI_PIN_FILE);
4062 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
4063 return ret;
4064}
4065
4066static void f2fs_swap_deactivate(struct file *file)
4067{
4068 struct inode *inode = file_inode(file);
4069
4070 clear_inode_flag(inode, FI_PIN_FILE);
4071}
4072#else
4073static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
4074 sector_t *span)
4075{
4076 return -EOPNOTSUPP;
4077}
4078
4079static void f2fs_swap_deactivate(struct file *file)
4080{
4081}
4082#endif
4083
4084const struct address_space_operations f2fs_dblock_aops = {
4085 .readpage = f2fs_read_data_page,
4086 .readahead = f2fs_readahead,
4087 .writepage = f2fs_write_data_page,
4088 .writepages = f2fs_write_data_pages,
4089 .write_begin = f2fs_write_begin,
4090 .write_end = f2fs_write_end,
4091 .set_page_dirty = f2fs_set_data_page_dirty,
4092 .invalidatepage = f2fs_invalidate_page,
4093 .releasepage = f2fs_release_page,
4094 .direct_IO = f2fs_direct_IO,
4095 .bmap = f2fs_bmap,
4096 .swap_activate = f2fs_swap_activate,
4097 .swap_deactivate = f2fs_swap_deactivate,
4098#ifdef CONFIG_MIGRATION
4099 .migratepage = f2fs_migrate_page,
4100#endif
4101};
4102
4103void f2fs_clear_page_cache_dirty_tag(struct page *page)
4104{
4105 struct address_space *mapping = page_mapping(page);
4106 unsigned long flags;
4107
4108 xa_lock_irqsave(&mapping->i_pages, flags);
4109 __xa_clear_mark(&mapping->i_pages, page_index(page),
4110 PAGECACHE_TAG_DIRTY);
4111 xa_unlock_irqrestore(&mapping->i_pages, flags);
4112}
4113
4114int __init f2fs_init_post_read_processing(void)
4115{
4116 bio_post_read_ctx_cache =
4117 kmem_cache_create("f2fs_bio_post_read_ctx",
4118 sizeof(struct bio_post_read_ctx), 0, 0, NULL);
4119 if (!bio_post_read_ctx_cache)
4120 goto fail;
4121 bio_post_read_ctx_pool =
4122 mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
4123 bio_post_read_ctx_cache);
4124 if (!bio_post_read_ctx_pool)
4125 goto fail_free_cache;
4126 return 0;
4127
4128fail_free_cache:
4129 kmem_cache_destroy(bio_post_read_ctx_cache);
4130fail:
4131 return -ENOMEM;
4132}
4133
4134void f2fs_destroy_post_read_processing(void)
4135{
4136 mempool_destroy(bio_post_read_ctx_pool);
4137 kmem_cache_destroy(bio_post_read_ctx_cache);
4138}
4139
4140int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi)
4141{
4142 if (!f2fs_sb_has_encrypt(sbi) &&
4143 !f2fs_sb_has_verity(sbi) &&
4144 !f2fs_sb_has_compression(sbi))
4145 return 0;
4146
4147 sbi->post_read_wq = alloc_workqueue("f2fs_post_read_wq",
4148 WQ_UNBOUND | WQ_HIGHPRI,
4149 num_online_cpus());
4150 if (!sbi->post_read_wq)
4151 return -ENOMEM;
4152 return 0;
4153}
4154
4155void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi)
4156{
4157 if (sbi->post_read_wq)
4158 destroy_workqueue(sbi->post_read_wq);
4159}
4160
4161int __init f2fs_init_bio_entry_cache(void)
4162{
4163 bio_entry_slab = f2fs_kmem_cache_create("f2fs_bio_entry_slab",
4164 sizeof(struct bio_entry));
4165 if (!bio_entry_slab)
4166 return -ENOMEM;
4167 return 0;
4168}
4169
4170void f2fs_destroy_bio_entry_cache(void)
4171{
4172 kmem_cache_destroy(bio_entry_slab);
4173}