Loading...
1/*
2 * linux/fs/ext4/page-io.c
3 *
4 * This contains the new page_io functions for ext4
5 *
6 * Written by Theodore Ts'o, 2010.
7 */
8
9#include <linux/fs.h>
10#include <linux/time.h>
11#include <linux/highuid.h>
12#include <linux/pagemap.h>
13#include <linux/quotaops.h>
14#include <linux/string.h>
15#include <linux/buffer_head.h>
16#include <linux/writeback.h>
17#include <linux/pagevec.h>
18#include <linux/mpage.h>
19#include <linux/namei.h>
20#include <linux/uio.h>
21#include <linux/bio.h>
22#include <linux/workqueue.h>
23#include <linux/kernel.h>
24#include <linux/slab.h>
25#include <linux/mm.h>
26#include <linux/backing-dev.h>
27
28#include "ext4_jbd2.h"
29#include "xattr.h"
30#include "acl.h"
31
32static struct kmem_cache *io_end_cachep;
33
34int __init ext4_init_pageio(void)
35{
36 io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT);
37 if (io_end_cachep == NULL)
38 return -ENOMEM;
39 return 0;
40}
41
42void ext4_exit_pageio(void)
43{
44 kmem_cache_destroy(io_end_cachep);
45}
46
47/*
48 * Print an buffer I/O error compatible with the fs/buffer.c. This
49 * provides compatibility with dmesg scrapers that look for a specific
50 * buffer I/O error message. We really need a unified error reporting
51 * structure to userspace ala Digital Unix's uerf system, but it's
52 * probably not going to happen in my lifetime, due to LKML politics...
53 */
54static void buffer_io_error(struct buffer_head *bh)
55{
56 printk_ratelimited(KERN_ERR "Buffer I/O error on device %pg, logical block %llu\n",
57 bh->b_bdev,
58 (unsigned long long)bh->b_blocknr);
59}
60
61static void ext4_finish_bio(struct bio *bio)
62{
63 int i;
64 struct bio_vec *bvec;
65
66 bio_for_each_segment_all(bvec, bio, i) {
67 struct page *page = bvec->bv_page;
68#ifdef CONFIG_EXT4_FS_ENCRYPTION
69 struct page *data_page = NULL;
70 struct ext4_crypto_ctx *ctx = NULL;
71#endif
72 struct buffer_head *bh, *head;
73 unsigned bio_start = bvec->bv_offset;
74 unsigned bio_end = bio_start + bvec->bv_len;
75 unsigned under_io = 0;
76 unsigned long flags;
77
78 if (!page)
79 continue;
80
81#ifdef CONFIG_EXT4_FS_ENCRYPTION
82 if (!page->mapping) {
83 /* The bounce data pages are unmapped. */
84 data_page = page;
85 ctx = (struct ext4_crypto_ctx *)page_private(data_page);
86 page = ctx->w.control_page;
87 }
88#endif
89
90 if (bio->bi_error) {
91 SetPageError(page);
92 set_bit(AS_EIO, &page->mapping->flags);
93 }
94 bh = head = page_buffers(page);
95 /*
96 * We check all buffers in the page under BH_Uptodate_Lock
97 * to avoid races with other end io clearing async_write flags
98 */
99 local_irq_save(flags);
100 bit_spin_lock(BH_Uptodate_Lock, &head->b_state);
101 do {
102 if (bh_offset(bh) < bio_start ||
103 bh_offset(bh) + bh->b_size > bio_end) {
104 if (buffer_async_write(bh))
105 under_io++;
106 continue;
107 }
108 clear_buffer_async_write(bh);
109 if (bio->bi_error)
110 buffer_io_error(bh);
111 } while ((bh = bh->b_this_page) != head);
112 bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
113 local_irq_restore(flags);
114 if (!under_io) {
115#ifdef CONFIG_EXT4_FS_ENCRYPTION
116 if (ctx)
117 ext4_restore_control_page(data_page);
118#endif
119 end_page_writeback(page);
120 }
121 }
122}
123
124static void ext4_release_io_end(ext4_io_end_t *io_end)
125{
126 struct bio *bio, *next_bio;
127
128 BUG_ON(!list_empty(&io_end->list));
129 BUG_ON(io_end->flag & EXT4_IO_END_UNWRITTEN);
130 WARN_ON(io_end->handle);
131
132 for (bio = io_end->bio; bio; bio = next_bio) {
133 next_bio = bio->bi_private;
134 ext4_finish_bio(bio);
135 bio_put(bio);
136 }
137 kmem_cache_free(io_end_cachep, io_end);
138}
139
140/*
141 * Check a range of space and convert unwritten extents to written. Note that
142 * we are protected from truncate touching same part of extent tree by the
143 * fact that truncate code waits for all DIO to finish (thus exclusion from
144 * direct IO is achieved) and also waits for PageWriteback bits. Thus we
145 * cannot get to ext4_ext_truncate() before all IOs overlapping that range are
146 * completed (happens from ext4_free_ioend()).
147 */
148static int ext4_end_io(ext4_io_end_t *io)
149{
150 struct inode *inode = io->inode;
151 loff_t offset = io->offset;
152 ssize_t size = io->size;
153 handle_t *handle = io->handle;
154 int ret = 0;
155
156 ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p,"
157 "list->prev 0x%p\n",
158 io, inode->i_ino, io->list.next, io->list.prev);
159
160 io->handle = NULL; /* Following call will use up the handle */
161 ret = ext4_convert_unwritten_extents(handle, inode, offset, size);
162 if (ret < 0) {
163 ext4_msg(inode->i_sb, KERN_EMERG,
164 "failed to convert unwritten extents to written "
165 "extents -- potential data loss! "
166 "(inode %lu, offset %llu, size %zd, error %d)",
167 inode->i_ino, offset, size, ret);
168 }
169 ext4_clear_io_unwritten_flag(io);
170 ext4_release_io_end(io);
171 return ret;
172}
173
174static void dump_completed_IO(struct inode *inode, struct list_head *head)
175{
176#ifdef EXT4FS_DEBUG
177 struct list_head *cur, *before, *after;
178 ext4_io_end_t *io, *io0, *io1;
179
180 if (list_empty(head))
181 return;
182
183 ext4_debug("Dump inode %lu completed io list\n", inode->i_ino);
184 list_for_each_entry(io, head, list) {
185 cur = &io->list;
186 before = cur->prev;
187 io0 = container_of(before, ext4_io_end_t, list);
188 after = cur->next;
189 io1 = container_of(after, ext4_io_end_t, list);
190
191 ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n",
192 io, inode->i_ino, io0, io1);
193 }
194#endif
195}
196
197/* Add the io_end to per-inode completed end_io list. */
198static void ext4_add_complete_io(ext4_io_end_t *io_end)
199{
200 struct ext4_inode_info *ei = EXT4_I(io_end->inode);
201 struct ext4_sb_info *sbi = EXT4_SB(io_end->inode->i_sb);
202 struct workqueue_struct *wq;
203 unsigned long flags;
204
205 /* Only reserved conversions from writeback should enter here */
206 WARN_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
207 WARN_ON(!io_end->handle && sbi->s_journal);
208 spin_lock_irqsave(&ei->i_completed_io_lock, flags);
209 wq = sbi->rsv_conversion_wq;
210 if (list_empty(&ei->i_rsv_conversion_list))
211 queue_work(wq, &ei->i_rsv_conversion_work);
212 list_add_tail(&io_end->list, &ei->i_rsv_conversion_list);
213 spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
214}
215
216static int ext4_do_flush_completed_IO(struct inode *inode,
217 struct list_head *head)
218{
219 ext4_io_end_t *io;
220 struct list_head unwritten;
221 unsigned long flags;
222 struct ext4_inode_info *ei = EXT4_I(inode);
223 int err, ret = 0;
224
225 spin_lock_irqsave(&ei->i_completed_io_lock, flags);
226 dump_completed_IO(inode, head);
227 list_replace_init(head, &unwritten);
228 spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
229
230 while (!list_empty(&unwritten)) {
231 io = list_entry(unwritten.next, ext4_io_end_t, list);
232 BUG_ON(!(io->flag & EXT4_IO_END_UNWRITTEN));
233 list_del_init(&io->list);
234
235 err = ext4_end_io(io);
236 if (unlikely(!ret && err))
237 ret = err;
238 }
239 return ret;
240}
241
242/*
243 * work on completed IO, to convert unwritten extents to extents
244 */
245void ext4_end_io_rsv_work(struct work_struct *work)
246{
247 struct ext4_inode_info *ei = container_of(work, struct ext4_inode_info,
248 i_rsv_conversion_work);
249 ext4_do_flush_completed_IO(&ei->vfs_inode, &ei->i_rsv_conversion_list);
250}
251
252ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
253{
254 ext4_io_end_t *io = kmem_cache_zalloc(io_end_cachep, flags);
255 if (io) {
256 io->inode = inode;
257 INIT_LIST_HEAD(&io->list);
258 atomic_set(&io->count, 1);
259 }
260 return io;
261}
262
263void ext4_put_io_end_defer(ext4_io_end_t *io_end)
264{
265 if (atomic_dec_and_test(&io_end->count)) {
266 if (!(io_end->flag & EXT4_IO_END_UNWRITTEN) || !io_end->size) {
267 ext4_release_io_end(io_end);
268 return;
269 }
270 ext4_add_complete_io(io_end);
271 }
272}
273
274int ext4_put_io_end(ext4_io_end_t *io_end)
275{
276 int err = 0;
277
278 if (atomic_dec_and_test(&io_end->count)) {
279 if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
280 err = ext4_convert_unwritten_extents(io_end->handle,
281 io_end->inode, io_end->offset,
282 io_end->size);
283 io_end->handle = NULL;
284 ext4_clear_io_unwritten_flag(io_end);
285 }
286 ext4_release_io_end(io_end);
287 }
288 return err;
289}
290
291ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end)
292{
293 atomic_inc(&io_end->count);
294 return io_end;
295}
296
297/* BIO completion function for page writeback */
298static void ext4_end_bio(struct bio *bio)
299{
300 ext4_io_end_t *io_end = bio->bi_private;
301 sector_t bi_sector = bio->bi_iter.bi_sector;
302
303 BUG_ON(!io_end);
304 bio->bi_end_io = NULL;
305
306 if (bio->bi_error) {
307 struct inode *inode = io_end->inode;
308
309 ext4_warning(inode->i_sb, "I/O error %d writing to inode %lu "
310 "(offset %llu size %ld starting block %llu)",
311 bio->bi_error, inode->i_ino,
312 (unsigned long long) io_end->offset,
313 (long) io_end->size,
314 (unsigned long long)
315 bi_sector >> (inode->i_blkbits - 9));
316 mapping_set_error(inode->i_mapping, bio->bi_error);
317 }
318
319 if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
320 /*
321 * Link bio into list hanging from io_end. We have to do it
322 * atomically as bio completions can be racing against each
323 * other.
324 */
325 bio->bi_private = xchg(&io_end->bio, bio);
326 ext4_put_io_end_defer(io_end);
327 } else {
328 /*
329 * Drop io_end reference early. Inode can get freed once
330 * we finish the bio.
331 */
332 ext4_put_io_end_defer(io_end);
333 ext4_finish_bio(bio);
334 bio_put(bio);
335 }
336}
337
338void ext4_io_submit(struct ext4_io_submit *io)
339{
340 struct bio *bio = io->io_bio;
341
342 if (bio) {
343 int io_op = io->io_wbc->sync_mode == WB_SYNC_ALL ?
344 WRITE_SYNC : WRITE;
345 bio_get(io->io_bio);
346 submit_bio(io_op, io->io_bio);
347 bio_put(io->io_bio);
348 }
349 io->io_bio = NULL;
350}
351
352void ext4_io_submit_init(struct ext4_io_submit *io,
353 struct writeback_control *wbc)
354{
355 io->io_wbc = wbc;
356 io->io_bio = NULL;
357 io->io_end = NULL;
358}
359
360static int io_submit_init_bio(struct ext4_io_submit *io,
361 struct buffer_head *bh)
362{
363 struct bio *bio;
364
365 bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
366 if (!bio)
367 return -ENOMEM;
368 wbc_init_bio(io->io_wbc, bio);
369 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
370 bio->bi_bdev = bh->b_bdev;
371 bio->bi_end_io = ext4_end_bio;
372 bio->bi_private = ext4_get_io_end(io->io_end);
373 io->io_bio = bio;
374 io->io_next_block = bh->b_blocknr;
375 return 0;
376}
377
378static int io_submit_add_bh(struct ext4_io_submit *io,
379 struct inode *inode,
380 struct page *page,
381 struct buffer_head *bh)
382{
383 int ret;
384
385 if (io->io_bio && bh->b_blocknr != io->io_next_block) {
386submit_and_retry:
387 ext4_io_submit(io);
388 }
389 if (io->io_bio == NULL) {
390 ret = io_submit_init_bio(io, bh);
391 if (ret)
392 return ret;
393 }
394 ret = bio_add_page(io->io_bio, page, bh->b_size, bh_offset(bh));
395 if (ret != bh->b_size)
396 goto submit_and_retry;
397 wbc_account_io(io->io_wbc, page, bh->b_size);
398 io->io_next_block++;
399 return 0;
400}
401
402int ext4_bio_write_page(struct ext4_io_submit *io,
403 struct page *page,
404 int len,
405 struct writeback_control *wbc,
406 bool keep_towrite)
407{
408 struct page *data_page = NULL;
409 struct inode *inode = page->mapping->host;
410 unsigned block_start, blocksize;
411 struct buffer_head *bh, *head;
412 int ret = 0;
413 int nr_submitted = 0;
414 int nr_to_submit = 0;
415
416 blocksize = 1 << inode->i_blkbits;
417
418 BUG_ON(!PageLocked(page));
419 BUG_ON(PageWriteback(page));
420
421 if (keep_towrite)
422 set_page_writeback_keepwrite(page);
423 else
424 set_page_writeback(page);
425 ClearPageError(page);
426
427 /*
428 * Comments copied from block_write_full_page:
429 *
430 * The page straddles i_size. It must be zeroed out on each and every
431 * writepage invocation because it may be mmapped. "A file is mapped
432 * in multiples of the page size. For a file that is not a multiple of
433 * the page size, the remaining memory is zeroed when mapped, and
434 * writes to that region are not written out to the file."
435 */
436 if (len < PAGE_SIZE)
437 zero_user_segment(page, len, PAGE_SIZE);
438 /*
439 * In the first loop we prepare and mark buffers to submit. We have to
440 * mark all buffers in the page before submitting so that
441 * end_page_writeback() cannot be called from ext4_bio_end_io() when IO
442 * on the first buffer finishes and we are still working on submitting
443 * the second buffer.
444 */
445 bh = head = page_buffers(page);
446 do {
447 block_start = bh_offset(bh);
448 if (block_start >= len) {
449 clear_buffer_dirty(bh);
450 set_buffer_uptodate(bh);
451 continue;
452 }
453 if (!buffer_dirty(bh) || buffer_delay(bh) ||
454 !buffer_mapped(bh) || buffer_unwritten(bh)) {
455 /* A hole? We can safely clear the dirty bit */
456 if (!buffer_mapped(bh))
457 clear_buffer_dirty(bh);
458 if (io->io_bio)
459 ext4_io_submit(io);
460 continue;
461 }
462 if (buffer_new(bh)) {
463 clear_buffer_new(bh);
464 unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
465 }
466 set_buffer_async_write(bh);
467 nr_to_submit++;
468 } while ((bh = bh->b_this_page) != head);
469
470 bh = head = page_buffers(page);
471
472 if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode) &&
473 nr_to_submit) {
474 gfp_t gfp_flags = GFP_NOFS;
475
476 retry_encrypt:
477 data_page = ext4_encrypt(inode, page, gfp_flags);
478 if (IS_ERR(data_page)) {
479 ret = PTR_ERR(data_page);
480 if (ret == -ENOMEM && wbc->sync_mode == WB_SYNC_ALL) {
481 if (io->io_bio) {
482 ext4_io_submit(io);
483 congestion_wait(BLK_RW_ASYNC, HZ/50);
484 }
485 gfp_flags |= __GFP_NOFAIL;
486 goto retry_encrypt;
487 }
488 data_page = NULL;
489 goto out;
490 }
491 }
492
493 /* Now submit buffers to write */
494 do {
495 if (!buffer_async_write(bh))
496 continue;
497 ret = io_submit_add_bh(io, inode,
498 data_page ? data_page : page, bh);
499 if (ret) {
500 /*
501 * We only get here on ENOMEM. Not much else
502 * we can do but mark the page as dirty, and
503 * better luck next time.
504 */
505 break;
506 }
507 nr_submitted++;
508 clear_buffer_dirty(bh);
509 } while ((bh = bh->b_this_page) != head);
510
511 /* Error stopped previous loop? Clean up buffers... */
512 if (ret) {
513 out:
514 if (data_page)
515 ext4_restore_control_page(data_page);
516 printk_ratelimited(KERN_ERR "%s: ret = %d\n", __func__, ret);
517 redirty_page_for_writepage(wbc, page);
518 do {
519 clear_buffer_async_write(bh);
520 bh = bh->b_this_page;
521 } while (bh != head);
522 }
523 unlock_page(page);
524 /* Nothing submitted - we have to end page writeback */
525 if (!nr_submitted)
526 end_page_writeback(page);
527 return ret;
528}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/fs/ext4/page-io.c
4 *
5 * This contains the new page_io functions for ext4
6 *
7 * Written by Theodore Ts'o, 2010.
8 */
9
10#include <linux/fs.h>
11#include <linux/time.h>
12#include <linux/highuid.h>
13#include <linux/pagemap.h>
14#include <linux/quotaops.h>
15#include <linux/string.h>
16#include <linux/buffer_head.h>
17#include <linux/writeback.h>
18#include <linux/pagevec.h>
19#include <linux/mpage.h>
20#include <linux/namei.h>
21#include <linux/uio.h>
22#include <linux/bio.h>
23#include <linux/workqueue.h>
24#include <linux/kernel.h>
25#include <linux/slab.h>
26#include <linux/mm.h>
27#include <linux/sched/mm.h>
28
29#include "ext4_jbd2.h"
30#include "xattr.h"
31#include "acl.h"
32
33static struct kmem_cache *io_end_cachep;
34static struct kmem_cache *io_end_vec_cachep;
35
36int __init ext4_init_pageio(void)
37{
38 io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT);
39 if (io_end_cachep == NULL)
40 return -ENOMEM;
41
42 io_end_vec_cachep = KMEM_CACHE(ext4_io_end_vec, 0);
43 if (io_end_vec_cachep == NULL) {
44 kmem_cache_destroy(io_end_cachep);
45 return -ENOMEM;
46 }
47 return 0;
48}
49
50void ext4_exit_pageio(void)
51{
52 kmem_cache_destroy(io_end_cachep);
53 kmem_cache_destroy(io_end_vec_cachep);
54}
55
56struct ext4_io_end_vec *ext4_alloc_io_end_vec(ext4_io_end_t *io_end)
57{
58 struct ext4_io_end_vec *io_end_vec;
59
60 io_end_vec = kmem_cache_zalloc(io_end_vec_cachep, GFP_NOFS);
61 if (!io_end_vec)
62 return ERR_PTR(-ENOMEM);
63 INIT_LIST_HEAD(&io_end_vec->list);
64 list_add_tail(&io_end_vec->list, &io_end->list_vec);
65 return io_end_vec;
66}
67
68static void ext4_free_io_end_vec(ext4_io_end_t *io_end)
69{
70 struct ext4_io_end_vec *io_end_vec, *tmp;
71
72 if (list_empty(&io_end->list_vec))
73 return;
74 list_for_each_entry_safe(io_end_vec, tmp, &io_end->list_vec, list) {
75 list_del(&io_end_vec->list);
76 kmem_cache_free(io_end_vec_cachep, io_end_vec);
77 }
78}
79
80struct ext4_io_end_vec *ext4_last_io_end_vec(ext4_io_end_t *io_end)
81{
82 BUG_ON(list_empty(&io_end->list_vec));
83 return list_last_entry(&io_end->list_vec, struct ext4_io_end_vec, list);
84}
85
86/*
87 * Print an buffer I/O error compatible with the fs/buffer.c. This
88 * provides compatibility with dmesg scrapers that look for a specific
89 * buffer I/O error message. We really need a unified error reporting
90 * structure to userspace ala Digital Unix's uerf system, but it's
91 * probably not going to happen in my lifetime, due to LKML politics...
92 */
93static void buffer_io_error(struct buffer_head *bh)
94{
95 printk_ratelimited(KERN_ERR "Buffer I/O error on device %pg, logical block %llu\n",
96 bh->b_bdev,
97 (unsigned long long)bh->b_blocknr);
98}
99
100static void ext4_finish_bio(struct bio *bio)
101{
102 struct folio_iter fi;
103
104 bio_for_each_folio_all(fi, bio) {
105 struct folio *folio = fi.folio;
106 struct folio *io_folio = NULL;
107 struct buffer_head *bh, *head;
108 size_t bio_start = fi.offset;
109 size_t bio_end = bio_start + fi.length;
110 unsigned under_io = 0;
111 unsigned long flags;
112
113 if (fscrypt_is_bounce_folio(folio)) {
114 io_folio = folio;
115 folio = fscrypt_pagecache_folio(folio);
116 }
117
118 if (bio->bi_status) {
119 int err = blk_status_to_errno(bio->bi_status);
120 mapping_set_error(folio->mapping, err);
121 }
122 bh = head = folio_buffers(folio);
123 /*
124 * We check all buffers in the folio under b_uptodate_lock
125 * to avoid races with other end io clearing async_write flags
126 */
127 spin_lock_irqsave(&head->b_uptodate_lock, flags);
128 do {
129 if (bh_offset(bh) < bio_start ||
130 bh_offset(bh) + bh->b_size > bio_end) {
131 if (buffer_async_write(bh))
132 under_io++;
133 continue;
134 }
135 clear_buffer_async_write(bh);
136 if (bio->bi_status) {
137 set_buffer_write_io_error(bh);
138 buffer_io_error(bh);
139 }
140 } while ((bh = bh->b_this_page) != head);
141 spin_unlock_irqrestore(&head->b_uptodate_lock, flags);
142 if (!under_io) {
143 fscrypt_free_bounce_page(&io_folio->page);
144 folio_end_writeback(folio);
145 }
146 }
147}
148
149static void ext4_release_io_end(ext4_io_end_t *io_end)
150{
151 struct bio *bio, *next_bio;
152
153 BUG_ON(!list_empty(&io_end->list));
154 BUG_ON(io_end->flag & EXT4_IO_END_UNWRITTEN);
155 WARN_ON(io_end->handle);
156
157 for (bio = io_end->bio; bio; bio = next_bio) {
158 next_bio = bio->bi_private;
159 ext4_finish_bio(bio);
160 bio_put(bio);
161 }
162 ext4_free_io_end_vec(io_end);
163 kmem_cache_free(io_end_cachep, io_end);
164}
165
166/*
167 * Check a range of space and convert unwritten extents to written. Note that
168 * we are protected from truncate touching same part of extent tree by the
169 * fact that truncate code waits for all DIO to finish (thus exclusion from
170 * direct IO is achieved) and also waits for PageWriteback bits. Thus we
171 * cannot get to ext4_ext_truncate() before all IOs overlapping that range are
172 * completed (happens from ext4_free_ioend()).
173 */
174static int ext4_end_io_end(ext4_io_end_t *io_end)
175{
176 struct inode *inode = io_end->inode;
177 handle_t *handle = io_end->handle;
178 int ret = 0;
179
180 ext4_debug("ext4_end_io_nolock: io_end 0x%p from inode %lu,list->next 0x%p,"
181 "list->prev 0x%p\n",
182 io_end, inode->i_ino, io_end->list.next, io_end->list.prev);
183
184 io_end->handle = NULL; /* Following call will use up the handle */
185 ret = ext4_convert_unwritten_io_end_vec(handle, io_end);
186 if (ret < 0 && !ext4_forced_shutdown(inode->i_sb)) {
187 ext4_msg(inode->i_sb, KERN_EMERG,
188 "failed to convert unwritten extents to written "
189 "extents -- potential data loss! "
190 "(inode %lu, error %d)", inode->i_ino, ret);
191 }
192 ext4_clear_io_unwritten_flag(io_end);
193 ext4_release_io_end(io_end);
194 return ret;
195}
196
197static void dump_completed_IO(struct inode *inode, struct list_head *head)
198{
199#ifdef EXT4FS_DEBUG
200 struct list_head *cur, *before, *after;
201 ext4_io_end_t *io_end, *io_end0, *io_end1;
202
203 if (list_empty(head))
204 return;
205
206 ext4_debug("Dump inode %lu completed io list\n", inode->i_ino);
207 list_for_each_entry(io_end, head, list) {
208 cur = &io_end->list;
209 before = cur->prev;
210 io_end0 = container_of(before, ext4_io_end_t, list);
211 after = cur->next;
212 io_end1 = container_of(after, ext4_io_end_t, list);
213
214 ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n",
215 io_end, inode->i_ino, io_end0, io_end1);
216 }
217#endif
218}
219
220/* Add the io_end to per-inode completed end_io list. */
221static void ext4_add_complete_io(ext4_io_end_t *io_end)
222{
223 struct ext4_inode_info *ei = EXT4_I(io_end->inode);
224 struct ext4_sb_info *sbi = EXT4_SB(io_end->inode->i_sb);
225 struct workqueue_struct *wq;
226 unsigned long flags;
227
228 /* Only reserved conversions from writeback should enter here */
229 WARN_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
230 WARN_ON(!io_end->handle && sbi->s_journal);
231 spin_lock_irqsave(&ei->i_completed_io_lock, flags);
232 wq = sbi->rsv_conversion_wq;
233 if (list_empty(&ei->i_rsv_conversion_list))
234 queue_work(wq, &ei->i_rsv_conversion_work);
235 list_add_tail(&io_end->list, &ei->i_rsv_conversion_list);
236 spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
237}
238
239static int ext4_do_flush_completed_IO(struct inode *inode,
240 struct list_head *head)
241{
242 ext4_io_end_t *io_end;
243 struct list_head unwritten;
244 unsigned long flags;
245 struct ext4_inode_info *ei = EXT4_I(inode);
246 int err, ret = 0;
247
248 spin_lock_irqsave(&ei->i_completed_io_lock, flags);
249 dump_completed_IO(inode, head);
250 list_replace_init(head, &unwritten);
251 spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
252
253 while (!list_empty(&unwritten)) {
254 io_end = list_entry(unwritten.next, ext4_io_end_t, list);
255 BUG_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
256 list_del_init(&io_end->list);
257
258 err = ext4_end_io_end(io_end);
259 if (unlikely(!ret && err))
260 ret = err;
261 }
262 return ret;
263}
264
265/*
266 * work on completed IO, to convert unwritten extents to extents
267 */
268void ext4_end_io_rsv_work(struct work_struct *work)
269{
270 struct ext4_inode_info *ei = container_of(work, struct ext4_inode_info,
271 i_rsv_conversion_work);
272 ext4_do_flush_completed_IO(&ei->vfs_inode, &ei->i_rsv_conversion_list);
273}
274
275ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
276{
277 ext4_io_end_t *io_end = kmem_cache_zalloc(io_end_cachep, flags);
278
279 if (io_end) {
280 io_end->inode = inode;
281 INIT_LIST_HEAD(&io_end->list);
282 INIT_LIST_HEAD(&io_end->list_vec);
283 refcount_set(&io_end->count, 1);
284 }
285 return io_end;
286}
287
288void ext4_put_io_end_defer(ext4_io_end_t *io_end)
289{
290 if (refcount_dec_and_test(&io_end->count)) {
291 if (!(io_end->flag & EXT4_IO_END_UNWRITTEN) ||
292 list_empty(&io_end->list_vec)) {
293 ext4_release_io_end(io_end);
294 return;
295 }
296 ext4_add_complete_io(io_end);
297 }
298}
299
300int ext4_put_io_end(ext4_io_end_t *io_end)
301{
302 int err = 0;
303
304 if (refcount_dec_and_test(&io_end->count)) {
305 if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
306 err = ext4_convert_unwritten_io_end_vec(io_end->handle,
307 io_end);
308 io_end->handle = NULL;
309 ext4_clear_io_unwritten_flag(io_end);
310 }
311 ext4_release_io_end(io_end);
312 }
313 return err;
314}
315
316ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end)
317{
318 refcount_inc(&io_end->count);
319 return io_end;
320}
321
322/* BIO completion function for page writeback */
323static void ext4_end_bio(struct bio *bio)
324{
325 ext4_io_end_t *io_end = bio->bi_private;
326 sector_t bi_sector = bio->bi_iter.bi_sector;
327
328 if (WARN_ONCE(!io_end, "io_end is NULL: %pg: sector %Lu len %u err %d\n",
329 bio->bi_bdev,
330 (long long) bio->bi_iter.bi_sector,
331 (unsigned) bio_sectors(bio),
332 bio->bi_status)) {
333 ext4_finish_bio(bio);
334 bio_put(bio);
335 return;
336 }
337 bio->bi_end_io = NULL;
338
339 if (bio->bi_status) {
340 struct inode *inode = io_end->inode;
341
342 ext4_warning(inode->i_sb, "I/O error %d writing to inode %lu "
343 "starting block %llu)",
344 bio->bi_status, inode->i_ino,
345 (unsigned long long)
346 bi_sector >> (inode->i_blkbits - 9));
347 mapping_set_error(inode->i_mapping,
348 blk_status_to_errno(bio->bi_status));
349 }
350
351 if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
352 /*
353 * Link bio into list hanging from io_end. We have to do it
354 * atomically as bio completions can be racing against each
355 * other.
356 */
357 bio->bi_private = xchg(&io_end->bio, bio);
358 ext4_put_io_end_defer(io_end);
359 } else {
360 /*
361 * Drop io_end reference early. Inode can get freed once
362 * we finish the bio.
363 */
364 ext4_put_io_end_defer(io_end);
365 ext4_finish_bio(bio);
366 bio_put(bio);
367 }
368}
369
370void ext4_io_submit(struct ext4_io_submit *io)
371{
372 struct bio *bio = io->io_bio;
373
374 if (bio) {
375 if (io->io_wbc->sync_mode == WB_SYNC_ALL)
376 io->io_bio->bi_opf |= REQ_SYNC;
377 submit_bio(io->io_bio);
378 }
379 io->io_bio = NULL;
380}
381
382void ext4_io_submit_init(struct ext4_io_submit *io,
383 struct writeback_control *wbc)
384{
385 io->io_wbc = wbc;
386 io->io_bio = NULL;
387 io->io_end = NULL;
388}
389
390static void io_submit_init_bio(struct ext4_io_submit *io,
391 struct buffer_head *bh)
392{
393 struct bio *bio;
394
395 /*
396 * bio_alloc will _always_ be able to allocate a bio if
397 * __GFP_DIRECT_RECLAIM is set, see comments for bio_alloc_bioset().
398 */
399 bio = bio_alloc(bh->b_bdev, BIO_MAX_VECS, REQ_OP_WRITE, GFP_NOIO);
400 fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
401 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
402 bio->bi_end_io = ext4_end_bio;
403 bio->bi_private = ext4_get_io_end(io->io_end);
404 io->io_bio = bio;
405 io->io_next_block = bh->b_blocknr;
406 wbc_init_bio(io->io_wbc, bio);
407}
408
409static void io_submit_add_bh(struct ext4_io_submit *io,
410 struct inode *inode,
411 struct folio *folio,
412 struct folio *io_folio,
413 struct buffer_head *bh)
414{
415 if (io->io_bio && (bh->b_blocknr != io->io_next_block ||
416 !fscrypt_mergeable_bio_bh(io->io_bio, bh))) {
417submit_and_retry:
418 ext4_io_submit(io);
419 }
420 if (io->io_bio == NULL) {
421 io_submit_init_bio(io, bh);
422 io->io_bio->bi_write_hint = inode->i_write_hint;
423 }
424 if (!bio_add_folio(io->io_bio, io_folio, bh->b_size, bh_offset(bh)))
425 goto submit_and_retry;
426 wbc_account_cgroup_owner(io->io_wbc, folio, bh->b_size);
427 io->io_next_block++;
428}
429
430int ext4_bio_write_folio(struct ext4_io_submit *io, struct folio *folio,
431 size_t len)
432{
433 struct folio *io_folio = folio;
434 struct inode *inode = folio->mapping->host;
435 unsigned block_start;
436 struct buffer_head *bh, *head;
437 int ret = 0;
438 int nr_to_submit = 0;
439 struct writeback_control *wbc = io->io_wbc;
440 bool keep_towrite = false;
441
442 BUG_ON(!folio_test_locked(folio));
443 BUG_ON(folio_test_writeback(folio));
444
445 /*
446 * Comments copied from block_write_full_folio:
447 *
448 * The folio straddles i_size. It must be zeroed out on each and every
449 * writepage invocation because it may be mmapped. "A file is mapped
450 * in multiples of the page size. For a file that is not a multiple of
451 * the page size, the remaining memory is zeroed when mapped, and
452 * writes to that region are not written out to the file."
453 */
454 if (len < folio_size(folio))
455 folio_zero_segment(folio, len, folio_size(folio));
456 /*
457 * In the first loop we prepare and mark buffers to submit. We have to
458 * mark all buffers in the folio before submitting so that
459 * folio_end_writeback() cannot be called from ext4_end_bio() when IO
460 * on the first buffer finishes and we are still working on submitting
461 * the second buffer.
462 */
463 bh = head = folio_buffers(folio);
464 do {
465 block_start = bh_offset(bh);
466 if (block_start >= len) {
467 clear_buffer_dirty(bh);
468 set_buffer_uptodate(bh);
469 continue;
470 }
471 if (!buffer_dirty(bh) || buffer_delay(bh) ||
472 !buffer_mapped(bh) || buffer_unwritten(bh)) {
473 /* A hole? We can safely clear the dirty bit */
474 if (!buffer_mapped(bh))
475 clear_buffer_dirty(bh);
476 /*
477 * Keeping dirty some buffer we cannot write? Make sure
478 * to redirty the folio and keep TOWRITE tag so that
479 * racing WB_SYNC_ALL writeback does not skip the folio.
480 * This happens e.g. when doing writeout for
481 * transaction commit or when journalled data is not
482 * yet committed.
483 */
484 if (buffer_dirty(bh) ||
485 (buffer_jbd(bh) && buffer_jbddirty(bh))) {
486 if (!folio_test_dirty(folio))
487 folio_redirty_for_writepage(wbc, folio);
488 keep_towrite = true;
489 }
490 continue;
491 }
492 if (buffer_new(bh))
493 clear_buffer_new(bh);
494 set_buffer_async_write(bh);
495 clear_buffer_dirty(bh);
496 nr_to_submit++;
497 } while ((bh = bh->b_this_page) != head);
498
499 /* Nothing to submit? Just unlock the folio... */
500 if (!nr_to_submit)
501 return 0;
502
503 bh = head = folio_buffers(folio);
504
505 /*
506 * If any blocks are being written to an encrypted file, encrypt them
507 * into a bounce page. For simplicity, just encrypt until the last
508 * block which might be needed. This may cause some unneeded blocks
509 * (e.g. holes) to be unnecessarily encrypted, but this is rare and
510 * can't happen in the common case of blocksize == PAGE_SIZE.
511 */
512 if (fscrypt_inode_uses_fs_layer_crypto(inode)) {
513 gfp_t gfp_flags = GFP_NOFS;
514 unsigned int enc_bytes = round_up(len, i_blocksize(inode));
515 struct page *bounce_page;
516
517 /*
518 * Since bounce page allocation uses a mempool, we can only use
519 * a waiting mask (i.e. request guaranteed allocation) on the
520 * first page of the bio. Otherwise it can deadlock.
521 */
522 if (io->io_bio)
523 gfp_flags = GFP_NOWAIT | __GFP_NOWARN;
524 retry_encrypt:
525 bounce_page = fscrypt_encrypt_pagecache_blocks(&folio->page,
526 enc_bytes, 0, gfp_flags);
527 if (IS_ERR(bounce_page)) {
528 ret = PTR_ERR(bounce_page);
529 if (ret == -ENOMEM &&
530 (io->io_bio || wbc->sync_mode == WB_SYNC_ALL)) {
531 gfp_t new_gfp_flags = GFP_NOFS;
532 if (io->io_bio)
533 ext4_io_submit(io);
534 else
535 new_gfp_flags |= __GFP_NOFAIL;
536 memalloc_retry_wait(gfp_flags);
537 gfp_flags = new_gfp_flags;
538 goto retry_encrypt;
539 }
540
541 printk_ratelimited(KERN_ERR "%s: ret = %d\n", __func__, ret);
542 folio_redirty_for_writepage(wbc, folio);
543 do {
544 if (buffer_async_write(bh)) {
545 clear_buffer_async_write(bh);
546 set_buffer_dirty(bh);
547 }
548 bh = bh->b_this_page;
549 } while (bh != head);
550
551 return ret;
552 }
553 io_folio = page_folio(bounce_page);
554 }
555
556 __folio_start_writeback(folio, keep_towrite);
557
558 /* Now submit buffers to write */
559 do {
560 if (!buffer_async_write(bh))
561 continue;
562 io_submit_add_bh(io, inode, folio, io_folio, bh);
563 } while ((bh = bh->b_this_page) != head);
564
565 return 0;
566}