Loading...
Note: File does not exist in v4.17.
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2010 Red Hat, Inc.
4 * Copyright (C) 2016-2019 Christoph Hellwig.
5 */
6#include <linux/module.h>
7#include <linux/compiler.h>
8#include <linux/fs.h>
9#include <linux/iomap.h>
10#include <linux/pagemap.h>
11#include <linux/uio.h>
12#include <linux/buffer_head.h>
13#include <linux/dax.h>
14#include <linux/writeback.h>
15#include <linux/list_sort.h>
16#include <linux/swap.h>
17#include <linux/bio.h>
18#include <linux/sched/signal.h>
19#include <linux/migrate.h>
20#include "trace.h"
21
22#include "../internal.h"
23
24#define IOEND_BATCH_SIZE 4096
25
26typedef int (*iomap_punch_t)(struct inode *inode, loff_t offset, loff_t length);
27/*
28 * Structure allocated for each folio to track per-block uptodate, dirty state
29 * and I/O completions.
30 */
31struct iomap_folio_state {
32 spinlock_t state_lock;
33 unsigned int read_bytes_pending;
34 atomic_t write_bytes_pending;
35
36 /*
37 * Each block has two bits in this bitmap:
38 * Bits [0..blocks_per_folio) has the uptodate status.
39 * Bits [b_p_f...(2*b_p_f)) has the dirty status.
40 */
41 unsigned long state[];
42};
43
44static struct bio_set iomap_ioend_bioset;
45
46static inline bool ifs_is_fully_uptodate(struct folio *folio,
47 struct iomap_folio_state *ifs)
48{
49 struct inode *inode = folio->mapping->host;
50
51 return bitmap_full(ifs->state, i_blocks_per_folio(inode, folio));
52}
53
54static inline bool ifs_block_is_uptodate(struct iomap_folio_state *ifs,
55 unsigned int block)
56{
57 return test_bit(block, ifs->state);
58}
59
60static bool ifs_set_range_uptodate(struct folio *folio,
61 struct iomap_folio_state *ifs, size_t off, size_t len)
62{
63 struct inode *inode = folio->mapping->host;
64 unsigned int first_blk = off >> inode->i_blkbits;
65 unsigned int last_blk = (off + len - 1) >> inode->i_blkbits;
66 unsigned int nr_blks = last_blk - first_blk + 1;
67
68 bitmap_set(ifs->state, first_blk, nr_blks);
69 return ifs_is_fully_uptodate(folio, ifs);
70}
71
72static void iomap_set_range_uptodate(struct folio *folio, size_t off,
73 size_t len)
74{
75 struct iomap_folio_state *ifs = folio->private;
76 unsigned long flags;
77 bool uptodate = true;
78
79 if (ifs) {
80 spin_lock_irqsave(&ifs->state_lock, flags);
81 uptodate = ifs_set_range_uptodate(folio, ifs, off, len);
82 spin_unlock_irqrestore(&ifs->state_lock, flags);
83 }
84
85 if (uptodate)
86 folio_mark_uptodate(folio);
87}
88
89static inline bool ifs_block_is_dirty(struct folio *folio,
90 struct iomap_folio_state *ifs, int block)
91{
92 struct inode *inode = folio->mapping->host;
93 unsigned int blks_per_folio = i_blocks_per_folio(inode, folio);
94
95 return test_bit(block + blks_per_folio, ifs->state);
96}
97
98static void ifs_clear_range_dirty(struct folio *folio,
99 struct iomap_folio_state *ifs, size_t off, size_t len)
100{
101 struct inode *inode = folio->mapping->host;
102 unsigned int blks_per_folio = i_blocks_per_folio(inode, folio);
103 unsigned int first_blk = (off >> inode->i_blkbits);
104 unsigned int last_blk = (off + len - 1) >> inode->i_blkbits;
105 unsigned int nr_blks = last_blk - first_blk + 1;
106 unsigned long flags;
107
108 spin_lock_irqsave(&ifs->state_lock, flags);
109 bitmap_clear(ifs->state, first_blk + blks_per_folio, nr_blks);
110 spin_unlock_irqrestore(&ifs->state_lock, flags);
111}
112
113static void iomap_clear_range_dirty(struct folio *folio, size_t off, size_t len)
114{
115 struct iomap_folio_state *ifs = folio->private;
116
117 if (ifs)
118 ifs_clear_range_dirty(folio, ifs, off, len);
119}
120
121static void ifs_set_range_dirty(struct folio *folio,
122 struct iomap_folio_state *ifs, size_t off, size_t len)
123{
124 struct inode *inode = folio->mapping->host;
125 unsigned int blks_per_folio = i_blocks_per_folio(inode, folio);
126 unsigned int first_blk = (off >> inode->i_blkbits);
127 unsigned int last_blk = (off + len - 1) >> inode->i_blkbits;
128 unsigned int nr_blks = last_blk - first_blk + 1;
129 unsigned long flags;
130
131 spin_lock_irqsave(&ifs->state_lock, flags);
132 bitmap_set(ifs->state, first_blk + blks_per_folio, nr_blks);
133 spin_unlock_irqrestore(&ifs->state_lock, flags);
134}
135
136static void iomap_set_range_dirty(struct folio *folio, size_t off, size_t len)
137{
138 struct iomap_folio_state *ifs = folio->private;
139
140 if (ifs)
141 ifs_set_range_dirty(folio, ifs, off, len);
142}
143
144static struct iomap_folio_state *ifs_alloc(struct inode *inode,
145 struct folio *folio, unsigned int flags)
146{
147 struct iomap_folio_state *ifs = folio->private;
148 unsigned int nr_blocks = i_blocks_per_folio(inode, folio);
149 gfp_t gfp;
150
151 if (ifs || nr_blocks <= 1)
152 return ifs;
153
154 if (flags & IOMAP_NOWAIT)
155 gfp = GFP_NOWAIT;
156 else
157 gfp = GFP_NOFS | __GFP_NOFAIL;
158
159 /*
160 * ifs->state tracks two sets of state flags when the
161 * filesystem block size is smaller than the folio size.
162 * The first state tracks per-block uptodate and the
163 * second tracks per-block dirty state.
164 */
165 ifs = kzalloc(struct_size(ifs, state,
166 BITS_TO_LONGS(2 * nr_blocks)), gfp);
167 if (!ifs)
168 return ifs;
169
170 spin_lock_init(&ifs->state_lock);
171 if (folio_test_uptodate(folio))
172 bitmap_set(ifs->state, 0, nr_blocks);
173 if (folio_test_dirty(folio))
174 bitmap_set(ifs->state, nr_blocks, nr_blocks);
175 folio_attach_private(folio, ifs);
176
177 return ifs;
178}
179
180static void ifs_free(struct folio *folio)
181{
182 struct iomap_folio_state *ifs = folio_detach_private(folio);
183
184 if (!ifs)
185 return;
186 WARN_ON_ONCE(ifs->read_bytes_pending != 0);
187 WARN_ON_ONCE(atomic_read(&ifs->write_bytes_pending));
188 WARN_ON_ONCE(ifs_is_fully_uptodate(folio, ifs) !=
189 folio_test_uptodate(folio));
190 kfree(ifs);
191}
192
193/*
194 * Calculate the range inside the folio that we actually need to read.
195 */
196static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
197 loff_t *pos, loff_t length, size_t *offp, size_t *lenp)
198{
199 struct iomap_folio_state *ifs = folio->private;
200 loff_t orig_pos = *pos;
201 loff_t isize = i_size_read(inode);
202 unsigned block_bits = inode->i_blkbits;
203 unsigned block_size = (1 << block_bits);
204 size_t poff = offset_in_folio(folio, *pos);
205 size_t plen = min_t(loff_t, folio_size(folio) - poff, length);
206 unsigned first = poff >> block_bits;
207 unsigned last = (poff + plen - 1) >> block_bits;
208
209 /*
210 * If the block size is smaller than the page size, we need to check the
211 * per-block uptodate status and adjust the offset and length if needed
212 * to avoid reading in already uptodate ranges.
213 */
214 if (ifs) {
215 unsigned int i;
216
217 /* move forward for each leading block marked uptodate */
218 for (i = first; i <= last; i++) {
219 if (!ifs_block_is_uptodate(ifs, i))
220 break;
221 *pos += block_size;
222 poff += block_size;
223 plen -= block_size;
224 first++;
225 }
226
227 /* truncate len if we find any trailing uptodate block(s) */
228 for ( ; i <= last; i++) {
229 if (ifs_block_is_uptodate(ifs, i)) {
230 plen -= (last - i + 1) * block_size;
231 last = i - 1;
232 break;
233 }
234 }
235 }
236
237 /*
238 * If the extent spans the block that contains the i_size, we need to
239 * handle both halves separately so that we properly zero data in the
240 * page cache for blocks that are entirely outside of i_size.
241 */
242 if (orig_pos <= isize && orig_pos + length > isize) {
243 unsigned end = offset_in_folio(folio, isize - 1) >> block_bits;
244
245 if (first <= end && last > end)
246 plen -= (last - end) * block_size;
247 }
248
249 *offp = poff;
250 *lenp = plen;
251}
252
253static void iomap_finish_folio_read(struct folio *folio, size_t off,
254 size_t len, int error)
255{
256 struct iomap_folio_state *ifs = folio->private;
257 bool uptodate = !error;
258 bool finished = true;
259
260 if (ifs) {
261 unsigned long flags;
262
263 spin_lock_irqsave(&ifs->state_lock, flags);
264 if (!error)
265 uptodate = ifs_set_range_uptodate(folio, ifs, off, len);
266 ifs->read_bytes_pending -= len;
267 finished = !ifs->read_bytes_pending;
268 spin_unlock_irqrestore(&ifs->state_lock, flags);
269 }
270
271 if (error)
272 folio_set_error(folio);
273 if (finished)
274 folio_end_read(folio, uptodate);
275}
276
277static void iomap_read_end_io(struct bio *bio)
278{
279 int error = blk_status_to_errno(bio->bi_status);
280 struct folio_iter fi;
281
282 bio_for_each_folio_all(fi, bio)
283 iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error);
284 bio_put(bio);
285}
286
287struct iomap_readpage_ctx {
288 struct folio *cur_folio;
289 bool cur_folio_in_bio;
290 struct bio *bio;
291 struct readahead_control *rac;
292};
293
294/**
295 * iomap_read_inline_data - copy inline data into the page cache
296 * @iter: iteration structure
297 * @folio: folio to copy to
298 *
299 * Copy the inline data in @iter into @folio and zero out the rest of the folio.
300 * Only a single IOMAP_INLINE extent is allowed at the end of each file.
301 * Returns zero for success to complete the read, or the usual negative errno.
302 */
303static int iomap_read_inline_data(const struct iomap_iter *iter,
304 struct folio *folio)
305{
306 const struct iomap *iomap = iomap_iter_srcmap(iter);
307 size_t size = i_size_read(iter->inode) - iomap->offset;
308 size_t offset = offset_in_folio(folio, iomap->offset);
309
310 if (folio_test_uptodate(folio))
311 return 0;
312
313 if (WARN_ON_ONCE(size > iomap->length))
314 return -EIO;
315 if (offset > 0)
316 ifs_alloc(iter->inode, folio, iter->flags);
317
318 folio_fill_tail(folio, offset, iomap->inline_data, size);
319 iomap_set_range_uptodate(folio, offset, folio_size(folio) - offset);
320 return 0;
321}
322
323static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter,
324 loff_t pos)
325{
326 const struct iomap *srcmap = iomap_iter_srcmap(iter);
327
328 return srcmap->type != IOMAP_MAPPED ||
329 (srcmap->flags & IOMAP_F_NEW) ||
330 pos >= i_size_read(iter->inode);
331}
332
333static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
334 struct iomap_readpage_ctx *ctx, loff_t offset)
335{
336 const struct iomap *iomap = &iter->iomap;
337 loff_t pos = iter->pos + offset;
338 loff_t length = iomap_length(iter) - offset;
339 struct folio *folio = ctx->cur_folio;
340 struct iomap_folio_state *ifs;
341 loff_t orig_pos = pos;
342 size_t poff, plen;
343 sector_t sector;
344
345 if (iomap->type == IOMAP_INLINE)
346 return iomap_read_inline_data(iter, folio);
347
348 /* zero post-eof blocks as the page may be mapped */
349 ifs = ifs_alloc(iter->inode, folio, iter->flags);
350 iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff, &plen);
351 if (plen == 0)
352 goto done;
353
354 if (iomap_block_needs_zeroing(iter, pos)) {
355 folio_zero_range(folio, poff, plen);
356 iomap_set_range_uptodate(folio, poff, plen);
357 goto done;
358 }
359
360 ctx->cur_folio_in_bio = true;
361 if (ifs) {
362 spin_lock_irq(&ifs->state_lock);
363 ifs->read_bytes_pending += plen;
364 spin_unlock_irq(&ifs->state_lock);
365 }
366
367 sector = iomap_sector(iomap, pos);
368 if (!ctx->bio ||
369 bio_end_sector(ctx->bio) != sector ||
370 !bio_add_folio(ctx->bio, folio, plen, poff)) {
371 gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
372 gfp_t orig_gfp = gfp;
373 unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
374
375 if (ctx->bio)
376 submit_bio(ctx->bio);
377
378 if (ctx->rac) /* same as readahead_gfp_mask */
379 gfp |= __GFP_NORETRY | __GFP_NOWARN;
380 ctx->bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs),
381 REQ_OP_READ, gfp);
382 /*
383 * If the bio_alloc fails, try it again for a single page to
384 * avoid having to deal with partial page reads. This emulates
385 * what do_mpage_read_folio does.
386 */
387 if (!ctx->bio) {
388 ctx->bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ,
389 orig_gfp);
390 }
391 if (ctx->rac)
392 ctx->bio->bi_opf |= REQ_RAHEAD;
393 ctx->bio->bi_iter.bi_sector = sector;
394 ctx->bio->bi_end_io = iomap_read_end_io;
395 bio_add_folio_nofail(ctx->bio, folio, plen, poff);
396 }
397
398done:
399 /*
400 * Move the caller beyond our range so that it keeps making progress.
401 * For that, we have to include any leading non-uptodate ranges, but
402 * we can skip trailing ones as they will be handled in the next
403 * iteration.
404 */
405 return pos - orig_pos + plen;
406}
407
408int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops)
409{
410 struct iomap_iter iter = {
411 .inode = folio->mapping->host,
412 .pos = folio_pos(folio),
413 .len = folio_size(folio),
414 };
415 struct iomap_readpage_ctx ctx = {
416 .cur_folio = folio,
417 };
418 int ret;
419
420 trace_iomap_readpage(iter.inode, 1);
421
422 while ((ret = iomap_iter(&iter, ops)) > 0)
423 iter.processed = iomap_readpage_iter(&iter, &ctx, 0);
424
425 if (ret < 0)
426 folio_set_error(folio);
427
428 if (ctx.bio) {
429 submit_bio(ctx.bio);
430 WARN_ON_ONCE(!ctx.cur_folio_in_bio);
431 } else {
432 WARN_ON_ONCE(ctx.cur_folio_in_bio);
433 folio_unlock(folio);
434 }
435
436 /*
437 * Just like mpage_readahead and block_read_full_folio, we always
438 * return 0 and just set the folio error flag on errors. This
439 * should be cleaned up throughout the stack eventually.
440 */
441 return 0;
442}
443EXPORT_SYMBOL_GPL(iomap_read_folio);
444
445static loff_t iomap_readahead_iter(const struct iomap_iter *iter,
446 struct iomap_readpage_ctx *ctx)
447{
448 loff_t length = iomap_length(iter);
449 loff_t done, ret;
450
451 for (done = 0; done < length; done += ret) {
452 if (ctx->cur_folio &&
453 offset_in_folio(ctx->cur_folio, iter->pos + done) == 0) {
454 if (!ctx->cur_folio_in_bio)
455 folio_unlock(ctx->cur_folio);
456 ctx->cur_folio = NULL;
457 }
458 if (!ctx->cur_folio) {
459 ctx->cur_folio = readahead_folio(ctx->rac);
460 ctx->cur_folio_in_bio = false;
461 }
462 ret = iomap_readpage_iter(iter, ctx, done);
463 if (ret <= 0)
464 return ret;
465 }
466
467 return done;
468}
469
470/**
471 * iomap_readahead - Attempt to read pages from a file.
472 * @rac: Describes the pages to be read.
473 * @ops: The operations vector for the filesystem.
474 *
475 * This function is for filesystems to call to implement their readahead
476 * address_space operation.
477 *
478 * Context: The @ops callbacks may submit I/O (eg to read the addresses of
479 * blocks from disc), and may wait for it. The caller may be trying to
480 * access a different page, and so sleeping excessively should be avoided.
481 * It may allocate memory, but should avoid costly allocations. This
482 * function is called with memalloc_nofs set, so allocations will not cause
483 * the filesystem to be reentered.
484 */
485void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
486{
487 struct iomap_iter iter = {
488 .inode = rac->mapping->host,
489 .pos = readahead_pos(rac),
490 .len = readahead_length(rac),
491 };
492 struct iomap_readpage_ctx ctx = {
493 .rac = rac,
494 };
495
496 trace_iomap_readahead(rac->mapping->host, readahead_count(rac));
497
498 while (iomap_iter(&iter, ops) > 0)
499 iter.processed = iomap_readahead_iter(&iter, &ctx);
500
501 if (ctx.bio)
502 submit_bio(ctx.bio);
503 if (ctx.cur_folio) {
504 if (!ctx.cur_folio_in_bio)
505 folio_unlock(ctx.cur_folio);
506 }
507}
508EXPORT_SYMBOL_GPL(iomap_readahead);
509
510/*
511 * iomap_is_partially_uptodate checks whether blocks within a folio are
512 * uptodate or not.
513 *
514 * Returns true if all blocks which correspond to the specified part
515 * of the folio are uptodate.
516 */
517bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
518{
519 struct iomap_folio_state *ifs = folio->private;
520 struct inode *inode = folio->mapping->host;
521 unsigned first, last, i;
522
523 if (!ifs)
524 return false;
525
526 /* Caller's range may extend past the end of this folio */
527 count = min(folio_size(folio) - from, count);
528
529 /* First and last blocks in range within folio */
530 first = from >> inode->i_blkbits;
531 last = (from + count - 1) >> inode->i_blkbits;
532
533 for (i = first; i <= last; i++)
534 if (!ifs_block_is_uptodate(ifs, i))
535 return false;
536 return true;
537}
538EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
539
540/**
541 * iomap_get_folio - get a folio reference for writing
542 * @iter: iteration structure
543 * @pos: start offset of write
544 * @len: Suggested size of folio to create.
545 *
546 * Returns a locked reference to the folio at @pos, or an error pointer if the
547 * folio could not be obtained.
548 */
549struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos, size_t len)
550{
551 fgf_t fgp = FGP_WRITEBEGIN | FGP_NOFS;
552
553 if (iter->flags & IOMAP_NOWAIT)
554 fgp |= FGP_NOWAIT;
555 fgp |= fgf_set_order(len);
556
557 return __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT,
558 fgp, mapping_gfp_mask(iter->inode->i_mapping));
559}
560EXPORT_SYMBOL_GPL(iomap_get_folio);
561
562bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags)
563{
564 trace_iomap_release_folio(folio->mapping->host, folio_pos(folio),
565 folio_size(folio));
566
567 /*
568 * If the folio is dirty, we refuse to release our metadata because
569 * it may be partially dirty. Once we track per-block dirty state,
570 * we can release the metadata if every block is dirty.
571 */
572 if (folio_test_dirty(folio))
573 return false;
574 ifs_free(folio);
575 return true;
576}
577EXPORT_SYMBOL_GPL(iomap_release_folio);
578
579void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len)
580{
581 trace_iomap_invalidate_folio(folio->mapping->host,
582 folio_pos(folio) + offset, len);
583
584 /*
585 * If we're invalidating the entire folio, clear the dirty state
586 * from it and release it to avoid unnecessary buildup of the LRU.
587 */
588 if (offset == 0 && len == folio_size(folio)) {
589 WARN_ON_ONCE(folio_test_writeback(folio));
590 folio_cancel_dirty(folio);
591 ifs_free(folio);
592 }
593}
594EXPORT_SYMBOL_GPL(iomap_invalidate_folio);
595
596bool iomap_dirty_folio(struct address_space *mapping, struct folio *folio)
597{
598 struct inode *inode = mapping->host;
599 size_t len = folio_size(folio);
600
601 ifs_alloc(inode, folio, 0);
602 iomap_set_range_dirty(folio, 0, len);
603 return filemap_dirty_folio(mapping, folio);
604}
605EXPORT_SYMBOL_GPL(iomap_dirty_folio);
606
607static void
608iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
609{
610 loff_t i_size = i_size_read(inode);
611
612 /*
613 * Only truncate newly allocated pages beyoned EOF, even if the
614 * write started inside the existing inode size.
615 */
616 if (pos + len > i_size)
617 truncate_pagecache_range(inode, max(pos, i_size),
618 pos + len - 1);
619}
620
621static int iomap_read_folio_sync(loff_t block_start, struct folio *folio,
622 size_t poff, size_t plen, const struct iomap *iomap)
623{
624 struct bio_vec bvec;
625 struct bio bio;
626
627 bio_init(&bio, iomap->bdev, &bvec, 1, REQ_OP_READ);
628 bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
629 bio_add_folio_nofail(&bio, folio, plen, poff);
630 return submit_bio_wait(&bio);
631}
632
633static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
634 size_t len, struct folio *folio)
635{
636 const struct iomap *srcmap = iomap_iter_srcmap(iter);
637 struct iomap_folio_state *ifs;
638 loff_t block_size = i_blocksize(iter->inode);
639 loff_t block_start = round_down(pos, block_size);
640 loff_t block_end = round_up(pos + len, block_size);
641 unsigned int nr_blocks = i_blocks_per_folio(iter->inode, folio);
642 size_t from = offset_in_folio(folio, pos), to = from + len;
643 size_t poff, plen;
644
645 /*
646 * If the write or zeroing completely overlaps the current folio, then
647 * entire folio will be dirtied so there is no need for
648 * per-block state tracking structures to be attached to this folio.
649 * For the unshare case, we must read in the ondisk contents because we
650 * are not changing pagecache contents.
651 */
652 if (!(iter->flags & IOMAP_UNSHARE) && pos <= folio_pos(folio) &&
653 pos + len >= folio_pos(folio) + folio_size(folio))
654 return 0;
655
656 ifs = ifs_alloc(iter->inode, folio, iter->flags);
657 if ((iter->flags & IOMAP_NOWAIT) && !ifs && nr_blocks > 1)
658 return -EAGAIN;
659
660 if (folio_test_uptodate(folio))
661 return 0;
662 folio_clear_error(folio);
663
664 do {
665 iomap_adjust_read_range(iter->inode, folio, &block_start,
666 block_end - block_start, &poff, &plen);
667 if (plen == 0)
668 break;
669
670 if (!(iter->flags & IOMAP_UNSHARE) &&
671 (from <= poff || from >= poff + plen) &&
672 (to <= poff || to >= poff + plen))
673 continue;
674
675 if (iomap_block_needs_zeroing(iter, block_start)) {
676 if (WARN_ON_ONCE(iter->flags & IOMAP_UNSHARE))
677 return -EIO;
678 folio_zero_segments(folio, poff, from, to, poff + plen);
679 } else {
680 int status;
681
682 if (iter->flags & IOMAP_NOWAIT)
683 return -EAGAIN;
684
685 status = iomap_read_folio_sync(block_start, folio,
686 poff, plen, srcmap);
687 if (status)
688 return status;
689 }
690 iomap_set_range_uptodate(folio, poff, plen);
691 } while ((block_start += plen) < block_end);
692
693 return 0;
694}
695
696static struct folio *__iomap_get_folio(struct iomap_iter *iter, loff_t pos,
697 size_t len)
698{
699 const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
700
701 if (folio_ops && folio_ops->get_folio)
702 return folio_ops->get_folio(iter, pos, len);
703 else
704 return iomap_get_folio(iter, pos, len);
705}
706
707static void __iomap_put_folio(struct iomap_iter *iter, loff_t pos, size_t ret,
708 struct folio *folio)
709{
710 const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
711
712 if (folio_ops && folio_ops->put_folio) {
713 folio_ops->put_folio(iter->inode, pos, ret, folio);
714 } else {
715 folio_unlock(folio);
716 folio_put(folio);
717 }
718}
719
720static int iomap_write_begin_inline(const struct iomap_iter *iter,
721 struct folio *folio)
722{
723 /* needs more work for the tailpacking case; disable for now */
724 if (WARN_ON_ONCE(iomap_iter_srcmap(iter)->offset != 0))
725 return -EIO;
726 return iomap_read_inline_data(iter, folio);
727}
728
729static int iomap_write_begin(struct iomap_iter *iter, loff_t pos,
730 size_t len, struct folio **foliop)
731{
732 const struct iomap_folio_ops *folio_ops = iter->iomap.folio_ops;
733 const struct iomap *srcmap = iomap_iter_srcmap(iter);
734 struct folio *folio;
735 int status = 0;
736
737 BUG_ON(pos + len > iter->iomap.offset + iter->iomap.length);
738 if (srcmap != &iter->iomap)
739 BUG_ON(pos + len > srcmap->offset + srcmap->length);
740
741 if (fatal_signal_pending(current))
742 return -EINTR;
743
744 if (!mapping_large_folio_support(iter->inode->i_mapping))
745 len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos));
746
747 folio = __iomap_get_folio(iter, pos, len);
748 if (IS_ERR(folio))
749 return PTR_ERR(folio);
750
751 /*
752 * Now we have a locked folio, before we do anything with it we need to
753 * check that the iomap we have cached is not stale. The inode extent
754 * mapping can change due to concurrent IO in flight (e.g.
755 * IOMAP_UNWRITTEN state can change and memory reclaim could have
756 * reclaimed a previously partially written page at this index after IO
757 * completion before this write reaches this file offset) and hence we
758 * could do the wrong thing here (zero a page range incorrectly or fail
759 * to zero) and corrupt data.
760 */
761 if (folio_ops && folio_ops->iomap_valid) {
762 bool iomap_valid = folio_ops->iomap_valid(iter->inode,
763 &iter->iomap);
764 if (!iomap_valid) {
765 iter->iomap.flags |= IOMAP_F_STALE;
766 status = 0;
767 goto out_unlock;
768 }
769 }
770
771 if (pos + len > folio_pos(folio) + folio_size(folio))
772 len = folio_pos(folio) + folio_size(folio) - pos;
773
774 if (srcmap->type == IOMAP_INLINE)
775 status = iomap_write_begin_inline(iter, folio);
776 else if (srcmap->flags & IOMAP_F_BUFFER_HEAD)
777 status = __block_write_begin_int(folio, pos, len, NULL, srcmap);
778 else
779 status = __iomap_write_begin(iter, pos, len, folio);
780
781 if (unlikely(status))
782 goto out_unlock;
783
784 *foliop = folio;
785 return 0;
786
787out_unlock:
788 __iomap_put_folio(iter, pos, 0, folio);
789 iomap_write_failed(iter->inode, pos, len);
790
791 return status;
792}
793
794static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
795 size_t copied, struct folio *folio)
796{
797 flush_dcache_folio(folio);
798
799 /*
800 * The blocks that were entirely written will now be uptodate, so we
801 * don't have to worry about a read_folio reading them and overwriting a
802 * partial write. However, if we've encountered a short write and only
803 * partially written into a block, it will not be marked uptodate, so a
804 * read_folio might come in and destroy our partial write.
805 *
806 * Do the simplest thing and just treat any short write to a
807 * non-uptodate page as a zero-length write, and force the caller to
808 * redo the whole thing.
809 */
810 if (unlikely(copied < len && !folio_test_uptodate(folio)))
811 return 0;
812 iomap_set_range_uptodate(folio, offset_in_folio(folio, pos), len);
813 iomap_set_range_dirty(folio, offset_in_folio(folio, pos), copied);
814 filemap_dirty_folio(inode->i_mapping, folio);
815 return copied;
816}
817
818static size_t iomap_write_end_inline(const struct iomap_iter *iter,
819 struct folio *folio, loff_t pos, size_t copied)
820{
821 const struct iomap *iomap = &iter->iomap;
822 void *addr;
823
824 WARN_ON_ONCE(!folio_test_uptodate(folio));
825 BUG_ON(!iomap_inline_data_valid(iomap));
826
827 flush_dcache_folio(folio);
828 addr = kmap_local_folio(folio, pos);
829 memcpy(iomap_inline_data(iomap, pos), addr, copied);
830 kunmap_local(addr);
831
832 mark_inode_dirty(iter->inode);
833 return copied;
834}
835
836/* Returns the number of bytes copied. May be 0. Cannot be an errno. */
837static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
838 size_t copied, struct folio *folio)
839{
840 const struct iomap *srcmap = iomap_iter_srcmap(iter);
841 loff_t old_size = iter->inode->i_size;
842 size_t ret;
843
844 if (srcmap->type == IOMAP_INLINE) {
845 ret = iomap_write_end_inline(iter, folio, pos, copied);
846 } else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
847 ret = block_write_end(NULL, iter->inode->i_mapping, pos, len,
848 copied, &folio->page, NULL);
849 } else {
850 ret = __iomap_write_end(iter->inode, pos, len, copied, folio);
851 }
852
853 /*
854 * Update the in-memory inode size after copying the data into the page
855 * cache. It's up to the file system to write the updated size to disk,
856 * preferably after I/O completion so that no stale data is exposed.
857 */
858 if (pos + ret > old_size) {
859 i_size_write(iter->inode, pos + ret);
860 iter->iomap.flags |= IOMAP_F_SIZE_CHANGED;
861 }
862 __iomap_put_folio(iter, pos, ret, folio);
863
864 if (old_size < pos)
865 pagecache_isize_extended(iter->inode, old_size, pos);
866 if (ret < len)
867 iomap_write_failed(iter->inode, pos + ret, len - ret);
868 return ret;
869}
870
871static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
872{
873 loff_t length = iomap_length(iter);
874 size_t chunk = PAGE_SIZE << MAX_PAGECACHE_ORDER;
875 loff_t pos = iter->pos;
876 ssize_t written = 0;
877 long status = 0;
878 struct address_space *mapping = iter->inode->i_mapping;
879 unsigned int bdp_flags = (iter->flags & IOMAP_NOWAIT) ? BDP_ASYNC : 0;
880
881 do {
882 struct folio *folio;
883 size_t offset; /* Offset into folio */
884 size_t bytes; /* Bytes to write to folio */
885 size_t copied; /* Bytes copied from user */
886
887 bytes = iov_iter_count(i);
888retry:
889 offset = pos & (chunk - 1);
890 bytes = min(chunk - offset, bytes);
891 status = balance_dirty_pages_ratelimited_flags(mapping,
892 bdp_flags);
893 if (unlikely(status))
894 break;
895
896 if (bytes > length)
897 bytes = length;
898
899 /*
900 * Bring in the user page that we'll copy from _first_.
901 * Otherwise there's a nasty deadlock on copying from the
902 * same page as we're writing to, without it being marked
903 * up-to-date.
904 *
905 * For async buffered writes the assumption is that the user
906 * page has already been faulted in. This can be optimized by
907 * faulting the user page.
908 */
909 if (unlikely(fault_in_iov_iter_readable(i, bytes) == bytes)) {
910 status = -EFAULT;
911 break;
912 }
913
914 status = iomap_write_begin(iter, pos, bytes, &folio);
915 if (unlikely(status))
916 break;
917 if (iter->iomap.flags & IOMAP_F_STALE)
918 break;
919
920 offset = offset_in_folio(folio, pos);
921 if (bytes > folio_size(folio) - offset)
922 bytes = folio_size(folio) - offset;
923
924 if (mapping_writably_mapped(mapping))
925 flush_dcache_folio(folio);
926
927 copied = copy_folio_from_iter_atomic(folio, offset, bytes, i);
928 status = iomap_write_end(iter, pos, bytes, copied, folio);
929
930 if (unlikely(copied != status))
931 iov_iter_revert(i, copied - status);
932
933 cond_resched();
934 if (unlikely(status == 0)) {
935 /*
936 * A short copy made iomap_write_end() reject the
937 * thing entirely. Might be memory poisoning
938 * halfway through, might be a race with munmap,
939 * might be severe memory pressure.
940 */
941 if (chunk > PAGE_SIZE)
942 chunk /= 2;
943 if (copied) {
944 bytes = copied;
945 goto retry;
946 }
947 } else {
948 pos += status;
949 written += status;
950 length -= status;
951 }
952 } while (iov_iter_count(i) && length);
953
954 if (status == -EAGAIN) {
955 iov_iter_revert(i, written);
956 return -EAGAIN;
957 }
958 return written ? written : status;
959}
960
961ssize_t
962iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *i,
963 const struct iomap_ops *ops)
964{
965 struct iomap_iter iter = {
966 .inode = iocb->ki_filp->f_mapping->host,
967 .pos = iocb->ki_pos,
968 .len = iov_iter_count(i),
969 .flags = IOMAP_WRITE,
970 };
971 ssize_t ret;
972
973 if (iocb->ki_flags & IOCB_NOWAIT)
974 iter.flags |= IOMAP_NOWAIT;
975
976 while ((ret = iomap_iter(&iter, ops)) > 0)
977 iter.processed = iomap_write_iter(&iter, i);
978
979 if (unlikely(iter.pos == iocb->ki_pos))
980 return ret;
981 ret = iter.pos - iocb->ki_pos;
982 iocb->ki_pos = iter.pos;
983 return ret;
984}
985EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
986
987static int iomap_write_delalloc_ifs_punch(struct inode *inode,
988 struct folio *folio, loff_t start_byte, loff_t end_byte,
989 iomap_punch_t punch)
990{
991 unsigned int first_blk, last_blk, i;
992 loff_t last_byte;
993 u8 blkbits = inode->i_blkbits;
994 struct iomap_folio_state *ifs;
995 int ret = 0;
996
997 /*
998 * When we have per-block dirty tracking, there can be
999 * blocks within a folio which are marked uptodate
1000 * but not dirty. In that case it is necessary to punch
1001 * out such blocks to avoid leaking any delalloc blocks.
1002 */
1003 ifs = folio->private;
1004 if (!ifs)
1005 return ret;
1006
1007 last_byte = min_t(loff_t, end_byte - 1,
1008 folio_pos(folio) + folio_size(folio) - 1);
1009 first_blk = offset_in_folio(folio, start_byte) >> blkbits;
1010 last_blk = offset_in_folio(folio, last_byte) >> blkbits;
1011 for (i = first_blk; i <= last_blk; i++) {
1012 if (!ifs_block_is_dirty(folio, ifs, i)) {
1013 ret = punch(inode, folio_pos(folio) + (i << blkbits),
1014 1 << blkbits);
1015 if (ret)
1016 return ret;
1017 }
1018 }
1019
1020 return ret;
1021}
1022
1023
1024static int iomap_write_delalloc_punch(struct inode *inode, struct folio *folio,
1025 loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte,
1026 iomap_punch_t punch)
1027{
1028 int ret = 0;
1029
1030 if (!folio_test_dirty(folio))
1031 return ret;
1032
1033 /* if dirty, punch up to offset */
1034 if (start_byte > *punch_start_byte) {
1035 ret = punch(inode, *punch_start_byte,
1036 start_byte - *punch_start_byte);
1037 if (ret)
1038 return ret;
1039 }
1040
1041 /* Punch non-dirty blocks within folio */
1042 ret = iomap_write_delalloc_ifs_punch(inode, folio, start_byte,
1043 end_byte, punch);
1044 if (ret)
1045 return ret;
1046
1047 /*
1048 * Make sure the next punch start is correctly bound to
1049 * the end of this data range, not the end of the folio.
1050 */
1051 *punch_start_byte = min_t(loff_t, end_byte,
1052 folio_pos(folio) + folio_size(folio));
1053
1054 return ret;
1055}
1056
1057/*
1058 * Scan the data range passed to us for dirty page cache folios. If we find a
1059 * dirty folio, punch out the preceding range and update the offset from which
1060 * the next punch will start from.
1061 *
1062 * We can punch out storage reservations under clean pages because they either
1063 * contain data that has been written back - in which case the delalloc punch
1064 * over that range is a no-op - or they have been read faults in which case they
1065 * contain zeroes and we can remove the delalloc backing range and any new
1066 * writes to those pages will do the normal hole filling operation...
1067 *
1068 * This makes the logic simple: we only need to keep the delalloc extents only
1069 * over the dirty ranges of the page cache.
1070 *
1071 * This function uses [start_byte, end_byte) intervals (i.e. open ended) to
1072 * simplify range iterations.
1073 */
1074static int iomap_write_delalloc_scan(struct inode *inode,
1075 loff_t *punch_start_byte, loff_t start_byte, loff_t end_byte,
1076 iomap_punch_t punch)
1077{
1078 while (start_byte < end_byte) {
1079 struct folio *folio;
1080 int ret;
1081
1082 /* grab locked page */
1083 folio = filemap_lock_folio(inode->i_mapping,
1084 start_byte >> PAGE_SHIFT);
1085 if (IS_ERR(folio)) {
1086 start_byte = ALIGN_DOWN(start_byte, PAGE_SIZE) +
1087 PAGE_SIZE;
1088 continue;
1089 }
1090
1091 ret = iomap_write_delalloc_punch(inode, folio, punch_start_byte,
1092 start_byte, end_byte, punch);
1093 if (ret) {
1094 folio_unlock(folio);
1095 folio_put(folio);
1096 return ret;
1097 }
1098
1099 /* move offset to start of next folio in range */
1100 start_byte = folio_next_index(folio) << PAGE_SHIFT;
1101 folio_unlock(folio);
1102 folio_put(folio);
1103 }
1104 return 0;
1105}
1106
1107/*
1108 * Punch out all the delalloc blocks in the range given except for those that
1109 * have dirty data still pending in the page cache - those are going to be
1110 * written and so must still retain the delalloc backing for writeback.
1111 *
1112 * As we are scanning the page cache for data, we don't need to reimplement the
1113 * wheel - mapping_seek_hole_data() does exactly what we need to identify the
1114 * start and end of data ranges correctly even for sub-folio block sizes. This
1115 * byte range based iteration is especially convenient because it means we
1116 * don't have to care about variable size folios, nor where the start or end of
1117 * the data range lies within a folio, if they lie within the same folio or even
1118 * if there are multiple discontiguous data ranges within the folio.
1119 *
1120 * It should be noted that mapping_seek_hole_data() is not aware of EOF, and so
1121 * can return data ranges that exist in the cache beyond EOF. e.g. a page fault
1122 * spanning EOF will initialise the post-EOF data to zeroes and mark it up to
1123 * date. A write page fault can then mark it dirty. If we then fail a write()
1124 * beyond EOF into that up to date cached range, we allocate a delalloc block
1125 * beyond EOF and then have to punch it out. Because the range is up to date,
1126 * mapping_seek_hole_data() will return it, and we will skip the punch because
1127 * the folio is dirty. THis is incorrect - we always need to punch out delalloc
1128 * beyond EOF in this case as writeback will never write back and covert that
1129 * delalloc block beyond EOF. Hence we limit the cached data scan range to EOF,
1130 * resulting in always punching out the range from the EOF to the end of the
1131 * range the iomap spans.
1132 *
1133 * Intervals are of the form [start_byte, end_byte) (i.e. open ended) because it
1134 * matches the intervals returned by mapping_seek_hole_data(). i.e. SEEK_DATA
1135 * returns the start of a data range (start_byte), and SEEK_HOLE(start_byte)
1136 * returns the end of the data range (data_end). Using closed intervals would
1137 * require sprinkling this code with magic "+ 1" and "- 1" arithmetic and expose
1138 * the code to subtle off-by-one bugs....
1139 */
1140static int iomap_write_delalloc_release(struct inode *inode,
1141 loff_t start_byte, loff_t end_byte, iomap_punch_t punch)
1142{
1143 loff_t punch_start_byte = start_byte;
1144 loff_t scan_end_byte = min(i_size_read(inode), end_byte);
1145 int error = 0;
1146
1147 /*
1148 * Lock the mapping to avoid races with page faults re-instantiating
1149 * folios and dirtying them via ->page_mkwrite whilst we walk the
1150 * cache and perform delalloc extent removal. Failing to do this can
1151 * leave dirty pages with no space reservation in the cache.
1152 */
1153 filemap_invalidate_lock(inode->i_mapping);
1154 while (start_byte < scan_end_byte) {
1155 loff_t data_end;
1156
1157 start_byte = mapping_seek_hole_data(inode->i_mapping,
1158 start_byte, scan_end_byte, SEEK_DATA);
1159 /*
1160 * If there is no more data to scan, all that is left is to
1161 * punch out the remaining range.
1162 */
1163 if (start_byte == -ENXIO || start_byte == scan_end_byte)
1164 break;
1165 if (start_byte < 0) {
1166 error = start_byte;
1167 goto out_unlock;
1168 }
1169 WARN_ON_ONCE(start_byte < punch_start_byte);
1170 WARN_ON_ONCE(start_byte > scan_end_byte);
1171
1172 /*
1173 * We find the end of this contiguous cached data range by
1174 * seeking from start_byte to the beginning of the next hole.
1175 */
1176 data_end = mapping_seek_hole_data(inode->i_mapping, start_byte,
1177 scan_end_byte, SEEK_HOLE);
1178 if (data_end < 0) {
1179 error = data_end;
1180 goto out_unlock;
1181 }
1182 WARN_ON_ONCE(data_end <= start_byte);
1183 WARN_ON_ONCE(data_end > scan_end_byte);
1184
1185 error = iomap_write_delalloc_scan(inode, &punch_start_byte,
1186 start_byte, data_end, punch);
1187 if (error)
1188 goto out_unlock;
1189
1190 /* The next data search starts at the end of this one. */
1191 start_byte = data_end;
1192 }
1193
1194 if (punch_start_byte < end_byte)
1195 error = punch(inode, punch_start_byte,
1196 end_byte - punch_start_byte);
1197out_unlock:
1198 filemap_invalidate_unlock(inode->i_mapping);
1199 return error;
1200}
1201
1202/*
1203 * When a short write occurs, the filesystem may need to remove reserved space
1204 * that was allocated in ->iomap_begin from it's ->iomap_end method. For
1205 * filesystems that use delayed allocation, we need to punch out delalloc
1206 * extents from the range that are not dirty in the page cache. As the write can
1207 * race with page faults, there can be dirty pages over the delalloc extent
1208 * outside the range of a short write but still within the delalloc extent
1209 * allocated for this iomap.
1210 *
1211 * This function uses [start_byte, end_byte) intervals (i.e. open ended) to
1212 * simplify range iterations.
1213 *
1214 * The punch() callback *must* only punch delalloc extents in the range passed
1215 * to it. It must skip over all other types of extents in the range and leave
1216 * them completely unchanged. It must do this punch atomically with respect to
1217 * other extent modifications.
1218 *
1219 * The punch() callback may be called with a folio locked to prevent writeback
1220 * extent allocation racing at the edge of the range we are currently punching.
1221 * The locked folio may or may not cover the range being punched, so it is not
1222 * safe for the punch() callback to lock folios itself.
1223 *
1224 * Lock order is:
1225 *
1226 * inode->i_rwsem (shared or exclusive)
1227 * inode->i_mapping->invalidate_lock (exclusive)
1228 * folio_lock()
1229 * ->punch
1230 * internal filesystem allocation lock
1231 */
1232int iomap_file_buffered_write_punch_delalloc(struct inode *inode,
1233 struct iomap *iomap, loff_t pos, loff_t length,
1234 ssize_t written, iomap_punch_t punch)
1235{
1236 loff_t start_byte;
1237 loff_t end_byte;
1238 unsigned int blocksize = i_blocksize(inode);
1239
1240 if (iomap->type != IOMAP_DELALLOC)
1241 return 0;
1242
1243 /* If we didn't reserve the blocks, we're not allowed to punch them. */
1244 if (!(iomap->flags & IOMAP_F_NEW))
1245 return 0;
1246
1247 /*
1248 * start_byte refers to the first unused block after a short write. If
1249 * nothing was written, round offset down to point at the first block in
1250 * the range.
1251 */
1252 if (unlikely(!written))
1253 start_byte = round_down(pos, blocksize);
1254 else
1255 start_byte = round_up(pos + written, blocksize);
1256 end_byte = round_up(pos + length, blocksize);
1257
1258 /* Nothing to do if we've written the entire delalloc extent */
1259 if (start_byte >= end_byte)
1260 return 0;
1261
1262 return iomap_write_delalloc_release(inode, start_byte, end_byte,
1263 punch);
1264}
1265EXPORT_SYMBOL_GPL(iomap_file_buffered_write_punch_delalloc);
1266
1267static loff_t iomap_unshare_iter(struct iomap_iter *iter)
1268{
1269 struct iomap *iomap = &iter->iomap;
1270 const struct iomap *srcmap = iomap_iter_srcmap(iter);
1271 loff_t pos = iter->pos;
1272 loff_t length = iomap_length(iter);
1273 loff_t written = 0;
1274
1275 /* don't bother with blocks that are not shared to start with */
1276 if (!(iomap->flags & IOMAP_F_SHARED))
1277 return length;
1278 /* don't bother with holes or unwritten extents */
1279 if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
1280 return length;
1281
1282 do {
1283 struct folio *folio;
1284 int status;
1285 size_t offset;
1286 size_t bytes = min_t(u64, SIZE_MAX, length);
1287
1288 status = iomap_write_begin(iter, pos, bytes, &folio);
1289 if (unlikely(status))
1290 return status;
1291 if (iomap->flags & IOMAP_F_STALE)
1292 break;
1293
1294 offset = offset_in_folio(folio, pos);
1295 if (bytes > folio_size(folio) - offset)
1296 bytes = folio_size(folio) - offset;
1297
1298 bytes = iomap_write_end(iter, pos, bytes, bytes, folio);
1299 if (WARN_ON_ONCE(bytes == 0))
1300 return -EIO;
1301
1302 cond_resched();
1303
1304 pos += bytes;
1305 written += bytes;
1306 length -= bytes;
1307
1308 balance_dirty_pages_ratelimited(iter->inode->i_mapping);
1309 } while (length > 0);
1310
1311 return written;
1312}
1313
1314int
1315iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
1316 const struct iomap_ops *ops)
1317{
1318 struct iomap_iter iter = {
1319 .inode = inode,
1320 .pos = pos,
1321 .len = len,
1322 .flags = IOMAP_WRITE | IOMAP_UNSHARE,
1323 };
1324 int ret;
1325
1326 while ((ret = iomap_iter(&iter, ops)) > 0)
1327 iter.processed = iomap_unshare_iter(&iter);
1328 return ret;
1329}
1330EXPORT_SYMBOL_GPL(iomap_file_unshare);
1331
1332static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
1333{
1334 const struct iomap *srcmap = iomap_iter_srcmap(iter);
1335 loff_t pos = iter->pos;
1336 loff_t length = iomap_length(iter);
1337 loff_t written = 0;
1338
1339 /* already zeroed? we're done. */
1340 if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
1341 return length;
1342
1343 do {
1344 struct folio *folio;
1345 int status;
1346 size_t offset;
1347 size_t bytes = min_t(u64, SIZE_MAX, length);
1348
1349 status = iomap_write_begin(iter, pos, bytes, &folio);
1350 if (status)
1351 return status;
1352 if (iter->iomap.flags & IOMAP_F_STALE)
1353 break;
1354
1355 offset = offset_in_folio(folio, pos);
1356 if (bytes > folio_size(folio) - offset)
1357 bytes = folio_size(folio) - offset;
1358
1359 folio_zero_range(folio, offset, bytes);
1360 folio_mark_accessed(folio);
1361
1362 bytes = iomap_write_end(iter, pos, bytes, bytes, folio);
1363 if (WARN_ON_ONCE(bytes == 0))
1364 return -EIO;
1365
1366 pos += bytes;
1367 length -= bytes;
1368 written += bytes;
1369 } while (length > 0);
1370
1371 if (did_zero)
1372 *did_zero = true;
1373 return written;
1374}
1375
1376int
1377iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
1378 const struct iomap_ops *ops)
1379{
1380 struct iomap_iter iter = {
1381 .inode = inode,
1382 .pos = pos,
1383 .len = len,
1384 .flags = IOMAP_ZERO,
1385 };
1386 int ret;
1387
1388 while ((ret = iomap_iter(&iter, ops)) > 0)
1389 iter.processed = iomap_zero_iter(&iter, did_zero);
1390 return ret;
1391}
1392EXPORT_SYMBOL_GPL(iomap_zero_range);
1393
1394int
1395iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
1396 const struct iomap_ops *ops)
1397{
1398 unsigned int blocksize = i_blocksize(inode);
1399 unsigned int off = pos & (blocksize - 1);
1400
1401 /* Block boundary? Nothing to do */
1402 if (!off)
1403 return 0;
1404 return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
1405}
1406EXPORT_SYMBOL_GPL(iomap_truncate_page);
1407
1408static loff_t iomap_folio_mkwrite_iter(struct iomap_iter *iter,
1409 struct folio *folio)
1410{
1411 loff_t length = iomap_length(iter);
1412 int ret;
1413
1414 if (iter->iomap.flags & IOMAP_F_BUFFER_HEAD) {
1415 ret = __block_write_begin_int(folio, iter->pos, length, NULL,
1416 &iter->iomap);
1417 if (ret)
1418 return ret;
1419 block_commit_write(&folio->page, 0, length);
1420 } else {
1421 WARN_ON_ONCE(!folio_test_uptodate(folio));
1422 folio_mark_dirty(folio);
1423 }
1424
1425 return length;
1426}
1427
1428vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
1429{
1430 struct iomap_iter iter = {
1431 .inode = file_inode(vmf->vma->vm_file),
1432 .flags = IOMAP_WRITE | IOMAP_FAULT,
1433 };
1434 struct folio *folio = page_folio(vmf->page);
1435 ssize_t ret;
1436
1437 folio_lock(folio);
1438 ret = folio_mkwrite_check_truncate(folio, iter.inode);
1439 if (ret < 0)
1440 goto out_unlock;
1441 iter.pos = folio_pos(folio);
1442 iter.len = ret;
1443 while ((ret = iomap_iter(&iter, ops)) > 0)
1444 iter.processed = iomap_folio_mkwrite_iter(&iter, folio);
1445
1446 if (ret < 0)
1447 goto out_unlock;
1448 folio_wait_stable(folio);
1449 return VM_FAULT_LOCKED;
1450out_unlock:
1451 folio_unlock(folio);
1452 return vmf_fs_error(ret);
1453}
1454EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
1455
1456static void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
1457 size_t len, int error)
1458{
1459 struct iomap_folio_state *ifs = folio->private;
1460
1461 if (error) {
1462 folio_set_error(folio);
1463 mapping_set_error(inode->i_mapping, error);
1464 }
1465
1466 WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !ifs);
1467 WARN_ON_ONCE(ifs && atomic_read(&ifs->write_bytes_pending) <= 0);
1468
1469 if (!ifs || atomic_sub_and_test(len, &ifs->write_bytes_pending))
1470 folio_end_writeback(folio);
1471}
1472
1473/*
1474 * We're now finished for good with this ioend structure. Update the page
1475 * state, release holds on bios, and finally free up memory. Do not use the
1476 * ioend after this.
1477 */
1478static u32
1479iomap_finish_ioend(struct iomap_ioend *ioend, int error)
1480{
1481 struct inode *inode = ioend->io_inode;
1482 struct bio *bio = &ioend->io_inline_bio;
1483 struct bio *last = ioend->io_bio, *next;
1484 u64 start = bio->bi_iter.bi_sector;
1485 loff_t offset = ioend->io_offset;
1486 bool quiet = bio_flagged(bio, BIO_QUIET);
1487 u32 folio_count = 0;
1488
1489 for (bio = &ioend->io_inline_bio; bio; bio = next) {
1490 struct folio_iter fi;
1491
1492 /*
1493 * For the last bio, bi_private points to the ioend, so we
1494 * need to explicitly end the iteration here.
1495 */
1496 if (bio == last)
1497 next = NULL;
1498 else
1499 next = bio->bi_private;
1500
1501 /* walk all folios in bio, ending page IO on them */
1502 bio_for_each_folio_all(fi, bio) {
1503 iomap_finish_folio_write(inode, fi.folio, fi.length,
1504 error);
1505 folio_count++;
1506 }
1507 bio_put(bio);
1508 }
1509 /* The ioend has been freed by bio_put() */
1510
1511 if (unlikely(error && !quiet)) {
1512 printk_ratelimited(KERN_ERR
1513"%s: writeback error on inode %lu, offset %lld, sector %llu",
1514 inode->i_sb->s_id, inode->i_ino, offset, start);
1515 }
1516 return folio_count;
1517}
1518
1519/*
1520 * Ioend completion routine for merged bios. This can only be called from task
1521 * contexts as merged ioends can be of unbound length. Hence we have to break up
1522 * the writeback completions into manageable chunks to avoid long scheduler
1523 * holdoffs. We aim to keep scheduler holdoffs down below 10ms so that we get
1524 * good batch processing throughput without creating adverse scheduler latency
1525 * conditions.
1526 */
1527void
1528iomap_finish_ioends(struct iomap_ioend *ioend, int error)
1529{
1530 struct list_head tmp;
1531 u32 completions;
1532
1533 might_sleep();
1534
1535 list_replace_init(&ioend->io_list, &tmp);
1536 completions = iomap_finish_ioend(ioend, error);
1537
1538 while (!list_empty(&tmp)) {
1539 if (completions > IOEND_BATCH_SIZE * 8) {
1540 cond_resched();
1541 completions = 0;
1542 }
1543 ioend = list_first_entry(&tmp, struct iomap_ioend, io_list);
1544 list_del_init(&ioend->io_list);
1545 completions += iomap_finish_ioend(ioend, error);
1546 }
1547}
1548EXPORT_SYMBOL_GPL(iomap_finish_ioends);
1549
1550/*
1551 * We can merge two adjacent ioends if they have the same set of work to do.
1552 */
1553static bool
1554iomap_ioend_can_merge(struct iomap_ioend *ioend, struct iomap_ioend *next)
1555{
1556 if (ioend->io_bio->bi_status != next->io_bio->bi_status)
1557 return false;
1558 if ((ioend->io_flags & IOMAP_F_SHARED) ^
1559 (next->io_flags & IOMAP_F_SHARED))
1560 return false;
1561 if ((ioend->io_type == IOMAP_UNWRITTEN) ^
1562 (next->io_type == IOMAP_UNWRITTEN))
1563 return false;
1564 if (ioend->io_offset + ioend->io_size != next->io_offset)
1565 return false;
1566 /*
1567 * Do not merge physically discontiguous ioends. The filesystem
1568 * completion functions will have to iterate the physical
1569 * discontiguities even if we merge the ioends at a logical level, so
1570 * we don't gain anything by merging physical discontiguities here.
1571 *
1572 * We cannot use bio->bi_iter.bi_sector here as it is modified during
1573 * submission so does not point to the start sector of the bio at
1574 * completion.
1575 */
1576 if (ioend->io_sector + (ioend->io_size >> 9) != next->io_sector)
1577 return false;
1578 return true;
1579}
1580
1581void
1582iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends)
1583{
1584 struct iomap_ioend *next;
1585
1586 INIT_LIST_HEAD(&ioend->io_list);
1587
1588 while ((next = list_first_entry_or_null(more_ioends, struct iomap_ioend,
1589 io_list))) {
1590 if (!iomap_ioend_can_merge(ioend, next))
1591 break;
1592 list_move_tail(&next->io_list, &ioend->io_list);
1593 ioend->io_size += next->io_size;
1594 }
1595}
1596EXPORT_SYMBOL_GPL(iomap_ioend_try_merge);
1597
1598static int
1599iomap_ioend_compare(void *priv, const struct list_head *a,
1600 const struct list_head *b)
1601{
1602 struct iomap_ioend *ia = container_of(a, struct iomap_ioend, io_list);
1603 struct iomap_ioend *ib = container_of(b, struct iomap_ioend, io_list);
1604
1605 if (ia->io_offset < ib->io_offset)
1606 return -1;
1607 if (ia->io_offset > ib->io_offset)
1608 return 1;
1609 return 0;
1610}
1611
1612void
1613iomap_sort_ioends(struct list_head *ioend_list)
1614{
1615 list_sort(NULL, ioend_list, iomap_ioend_compare);
1616}
1617EXPORT_SYMBOL_GPL(iomap_sort_ioends);
1618
1619static void iomap_writepage_end_bio(struct bio *bio)
1620{
1621 struct iomap_ioend *ioend = bio->bi_private;
1622
1623 iomap_finish_ioend(ioend, blk_status_to_errno(bio->bi_status));
1624}
1625
1626/*
1627 * Submit the final bio for an ioend.
1628 *
1629 * If @error is non-zero, it means that we have a situation where some part of
1630 * the submission process has failed after we've marked pages for writeback
1631 * and unlocked them. In this situation, we need to fail the bio instead of
1632 * submitting it. This typically only happens on a filesystem shutdown.
1633 */
1634static int
1635iomap_submit_ioend(struct iomap_writepage_ctx *wpc, struct iomap_ioend *ioend,
1636 int error)
1637{
1638 ioend->io_bio->bi_private = ioend;
1639 ioend->io_bio->bi_end_io = iomap_writepage_end_bio;
1640
1641 if (wpc->ops->prepare_ioend)
1642 error = wpc->ops->prepare_ioend(ioend, error);
1643 if (error) {
1644 /*
1645 * If we're failing the IO now, just mark the ioend with an
1646 * error and finish it. This will run IO completion immediately
1647 * as there is only one reference to the ioend at this point in
1648 * time.
1649 */
1650 ioend->io_bio->bi_status = errno_to_blk_status(error);
1651 bio_endio(ioend->io_bio);
1652 return error;
1653 }
1654
1655 submit_bio(ioend->io_bio);
1656 return 0;
1657}
1658
1659static struct iomap_ioend *
1660iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc,
1661 loff_t offset, sector_t sector, struct writeback_control *wbc)
1662{
1663 struct iomap_ioend *ioend;
1664 struct bio *bio;
1665
1666 bio = bio_alloc_bioset(wpc->iomap.bdev, BIO_MAX_VECS,
1667 REQ_OP_WRITE | wbc_to_write_flags(wbc),
1668 GFP_NOFS, &iomap_ioend_bioset);
1669 bio->bi_iter.bi_sector = sector;
1670 wbc_init_bio(wbc, bio);
1671
1672 ioend = container_of(bio, struct iomap_ioend, io_inline_bio);
1673 INIT_LIST_HEAD(&ioend->io_list);
1674 ioend->io_type = wpc->iomap.type;
1675 ioend->io_flags = wpc->iomap.flags;
1676 ioend->io_inode = inode;
1677 ioend->io_size = 0;
1678 ioend->io_folios = 0;
1679 ioend->io_offset = offset;
1680 ioend->io_bio = bio;
1681 ioend->io_sector = sector;
1682 return ioend;
1683}
1684
1685/*
1686 * Allocate a new bio, and chain the old bio to the new one.
1687 *
1688 * Note that we have to perform the chaining in this unintuitive order
1689 * so that the bi_private linkage is set up in the right direction for the
1690 * traversal in iomap_finish_ioend().
1691 */
1692static struct bio *
1693iomap_chain_bio(struct bio *prev)
1694{
1695 struct bio *new;
1696
1697 new = bio_alloc(prev->bi_bdev, BIO_MAX_VECS, prev->bi_opf, GFP_NOFS);
1698 bio_clone_blkg_association(new, prev);
1699 new->bi_iter.bi_sector = bio_end_sector(prev);
1700
1701 bio_chain(prev, new);
1702 bio_get(prev); /* for iomap_finish_ioend */
1703 submit_bio(prev);
1704 return new;
1705}
1706
1707static bool
1708iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t offset,
1709 sector_t sector)
1710{
1711 if ((wpc->iomap.flags & IOMAP_F_SHARED) !=
1712 (wpc->ioend->io_flags & IOMAP_F_SHARED))
1713 return false;
1714 if (wpc->iomap.type != wpc->ioend->io_type)
1715 return false;
1716 if (offset != wpc->ioend->io_offset + wpc->ioend->io_size)
1717 return false;
1718 if (sector != bio_end_sector(wpc->ioend->io_bio))
1719 return false;
1720 /*
1721 * Limit ioend bio chain lengths to minimise IO completion latency. This
1722 * also prevents long tight loops ending page writeback on all the
1723 * folios in the ioend.
1724 */
1725 if (wpc->ioend->io_folios >= IOEND_BATCH_SIZE)
1726 return false;
1727 return true;
1728}
1729
1730/*
1731 * Test to see if we have an existing ioend structure that we could append to
1732 * first; otherwise finish off the current ioend and start another.
1733 */
1734static void
1735iomap_add_to_ioend(struct inode *inode, loff_t pos, struct folio *folio,
1736 struct iomap_folio_state *ifs, struct iomap_writepage_ctx *wpc,
1737 struct writeback_control *wbc, struct list_head *iolist)
1738{
1739 sector_t sector = iomap_sector(&wpc->iomap, pos);
1740 unsigned len = i_blocksize(inode);
1741 size_t poff = offset_in_folio(folio, pos);
1742
1743 if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, pos, sector)) {
1744 if (wpc->ioend)
1745 list_add(&wpc->ioend->io_list, iolist);
1746 wpc->ioend = iomap_alloc_ioend(inode, wpc, pos, sector, wbc);
1747 }
1748
1749 if (!bio_add_folio(wpc->ioend->io_bio, folio, len, poff)) {
1750 wpc->ioend->io_bio = iomap_chain_bio(wpc->ioend->io_bio);
1751 bio_add_folio_nofail(wpc->ioend->io_bio, folio, len, poff);
1752 }
1753
1754 if (ifs)
1755 atomic_add(len, &ifs->write_bytes_pending);
1756 wpc->ioend->io_size += len;
1757 wbc_account_cgroup_owner(wbc, &folio->page, len);
1758}
1759
1760/*
1761 * We implement an immediate ioend submission policy here to avoid needing to
1762 * chain multiple ioends and hence nest mempool allocations which can violate
1763 * the forward progress guarantees we need to provide. The current ioend we're
1764 * adding blocks to is cached in the writepage context, and if the new block
1765 * doesn't append to the cached ioend, it will create a new ioend and cache that
1766 * instead.
1767 *
1768 * If a new ioend is created and cached, the old ioend is returned and queued
1769 * locally for submission once the entire page is processed or an error has been
1770 * detected. While ioends are submitted immediately after they are completed,
1771 * batching optimisations are provided by higher level block plugging.
1772 *
1773 * At the end of a writeback pass, there will be a cached ioend remaining on the
1774 * writepage context that the caller will need to submit.
1775 */
1776static int
1777iomap_writepage_map(struct iomap_writepage_ctx *wpc,
1778 struct writeback_control *wbc, struct inode *inode,
1779 struct folio *folio, u64 end_pos)
1780{
1781 struct iomap_folio_state *ifs = folio->private;
1782 struct iomap_ioend *ioend, *next;
1783 unsigned len = i_blocksize(inode);
1784 unsigned nblocks = i_blocks_per_folio(inode, folio);
1785 u64 pos = folio_pos(folio);
1786 int error = 0, count = 0, i;
1787 LIST_HEAD(submit_list);
1788
1789 WARN_ON_ONCE(end_pos <= pos);
1790
1791 if (!ifs && nblocks > 1) {
1792 ifs = ifs_alloc(inode, folio, 0);
1793 iomap_set_range_dirty(folio, 0, end_pos - pos);
1794 }
1795
1796 WARN_ON_ONCE(ifs && atomic_read(&ifs->write_bytes_pending) != 0);
1797
1798 /*
1799 * Walk through the folio to find areas to write back. If we
1800 * run off the end of the current map or find the current map
1801 * invalid, grab a new one.
1802 */
1803 for (i = 0; i < nblocks && pos < end_pos; i++, pos += len) {
1804 if (ifs && !ifs_block_is_dirty(folio, ifs, i))
1805 continue;
1806
1807 error = wpc->ops->map_blocks(wpc, inode, pos);
1808 if (error)
1809 break;
1810 trace_iomap_writepage_map(inode, &wpc->iomap);
1811 if (WARN_ON_ONCE(wpc->iomap.type == IOMAP_INLINE))
1812 continue;
1813 if (wpc->iomap.type == IOMAP_HOLE)
1814 continue;
1815 iomap_add_to_ioend(inode, pos, folio, ifs, wpc, wbc,
1816 &submit_list);
1817 count++;
1818 }
1819 if (count)
1820 wpc->ioend->io_folios++;
1821
1822 WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list));
1823 WARN_ON_ONCE(!folio_test_locked(folio));
1824 WARN_ON_ONCE(folio_test_writeback(folio));
1825 WARN_ON_ONCE(folio_test_dirty(folio));
1826
1827 /*
1828 * We cannot cancel the ioend directly here on error. We may have
1829 * already set other pages under writeback and hence we have to run I/O
1830 * completion to mark the error state of the pages under writeback
1831 * appropriately.
1832 */
1833 if (unlikely(error)) {
1834 /*
1835 * Let the filesystem know what portion of the current page
1836 * failed to map. If the page hasn't been added to ioend, it
1837 * won't be affected by I/O completion and we must unlock it
1838 * now.
1839 */
1840 if (wpc->ops->discard_folio)
1841 wpc->ops->discard_folio(folio, pos);
1842 if (!count) {
1843 folio_unlock(folio);
1844 goto done;
1845 }
1846 }
1847
1848 /*
1849 * We can have dirty bits set past end of file in page_mkwrite path
1850 * while mapping the last partial folio. Hence it's better to clear
1851 * all the dirty bits in the folio here.
1852 */
1853 iomap_clear_range_dirty(folio, 0, folio_size(folio));
1854 folio_start_writeback(folio);
1855 folio_unlock(folio);
1856
1857 /*
1858 * Preserve the original error if there was one; catch
1859 * submission errors here and propagate into subsequent ioend
1860 * submissions.
1861 */
1862 list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
1863 int error2;
1864
1865 list_del_init(&ioend->io_list);
1866 error2 = iomap_submit_ioend(wpc, ioend, error);
1867 if (error2 && !error)
1868 error = error2;
1869 }
1870
1871 /*
1872 * We can end up here with no error and nothing to write only if we race
1873 * with a partial page truncate on a sub-page block sized filesystem.
1874 */
1875 if (!count)
1876 folio_end_writeback(folio);
1877done:
1878 mapping_set_error(inode->i_mapping, error);
1879 return error;
1880}
1881
1882/*
1883 * Write out a dirty page.
1884 *
1885 * For delalloc space on the page, we need to allocate space and flush it.
1886 * For unwritten space on the page, we need to start the conversion to
1887 * regular allocated space.
1888 */
1889static int iomap_do_writepage(struct folio *folio,
1890 struct writeback_control *wbc, void *data)
1891{
1892 struct iomap_writepage_ctx *wpc = data;
1893 struct inode *inode = folio->mapping->host;
1894 u64 end_pos, isize;
1895
1896 trace_iomap_writepage(inode, folio_pos(folio), folio_size(folio));
1897
1898 /*
1899 * Refuse to write the folio out if we're called from reclaim context.
1900 *
1901 * This avoids stack overflows when called from deeply used stacks in
1902 * random callers for direct reclaim or memcg reclaim. We explicitly
1903 * allow reclaim from kswapd as the stack usage there is relatively low.
1904 *
1905 * This should never happen except in the case of a VM regression so
1906 * warn about it.
1907 */
1908 if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
1909 PF_MEMALLOC))
1910 goto redirty;
1911
1912 /*
1913 * Is this folio beyond the end of the file?
1914 *
1915 * The folio index is less than the end_index, adjust the end_pos
1916 * to the highest offset that this folio should represent.
1917 * -----------------------------------------------------
1918 * | file mapping | <EOF> |
1919 * -----------------------------------------------------
1920 * | Page ... | Page N-2 | Page N-1 | Page N | |
1921 * ^--------------------------------^----------|--------
1922 * | desired writeback range | see else |
1923 * ---------------------------------^------------------|
1924 */
1925 isize = i_size_read(inode);
1926 end_pos = folio_pos(folio) + folio_size(folio);
1927 if (end_pos > isize) {
1928 /*
1929 * Check whether the page to write out is beyond or straddles
1930 * i_size or not.
1931 * -------------------------------------------------------
1932 * | file mapping | <EOF> |
1933 * -------------------------------------------------------
1934 * | Page ... | Page N-2 | Page N-1 | Page N | Beyond |
1935 * ^--------------------------------^-----------|---------
1936 * | | Straddles |
1937 * ---------------------------------^-----------|--------|
1938 */
1939 size_t poff = offset_in_folio(folio, isize);
1940 pgoff_t end_index = isize >> PAGE_SHIFT;
1941
1942 /*
1943 * Skip the page if it's fully outside i_size, e.g.
1944 * due to a truncate operation that's in progress. We've
1945 * cleaned this page and truncate will finish things off for
1946 * us.
1947 *
1948 * Note that the end_index is unsigned long. If the given
1949 * offset is greater than 16TB on a 32-bit system then if we
1950 * checked if the page is fully outside i_size with
1951 * "if (page->index >= end_index + 1)", "end_index + 1" would
1952 * overflow and evaluate to 0. Hence this page would be
1953 * redirtied and written out repeatedly, which would result in
1954 * an infinite loop; the user program performing this operation
1955 * would hang. Instead, we can detect this situation by
1956 * checking if the page is totally beyond i_size or if its
1957 * offset is just equal to the EOF.
1958 */
1959 if (folio->index > end_index ||
1960 (folio->index == end_index && poff == 0))
1961 goto unlock;
1962
1963 /*
1964 * The page straddles i_size. It must be zeroed out on each
1965 * and every writepage invocation because it may be mmapped.
1966 * "A file is mapped in multiples of the page size. For a file
1967 * that is not a multiple of the page size, the remaining
1968 * memory is zeroed when mapped, and writes to that region are
1969 * not written out to the file."
1970 */
1971 folio_zero_segment(folio, poff, folio_size(folio));
1972 end_pos = isize;
1973 }
1974
1975 return iomap_writepage_map(wpc, wbc, inode, folio, end_pos);
1976
1977redirty:
1978 folio_redirty_for_writepage(wbc, folio);
1979unlock:
1980 folio_unlock(folio);
1981 return 0;
1982}
1983
1984int
1985iomap_writepages(struct address_space *mapping, struct writeback_control *wbc,
1986 struct iomap_writepage_ctx *wpc,
1987 const struct iomap_writeback_ops *ops)
1988{
1989 int ret;
1990
1991 wpc->ops = ops;
1992 ret = write_cache_pages(mapping, wbc, iomap_do_writepage, wpc);
1993 if (!wpc->ioend)
1994 return ret;
1995 return iomap_submit_ioend(wpc, wpc->ioend, ret);
1996}
1997EXPORT_SYMBOL_GPL(iomap_writepages);
1998
1999static int __init iomap_init(void)
2000{
2001 return bioset_init(&iomap_ioend_bioset, 4 * (PAGE_SIZE / SECTOR_SIZE),
2002 offsetof(struct iomap_ioend, io_inline_bio),
2003 BIOSET_NEED_BVECS);
2004}
2005fs_initcall(iomap_init);