Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * Copyright (c) 2016-2018 Christoph Hellwig.
5 * All Rights Reserved.
6 */
7#include "xfs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_mount.h"
13#include "xfs_inode.h"
14#include "xfs_trans.h"
15#include "xfs_iomap.h"
16#include "xfs_trace.h"
17#include "xfs_bmap.h"
18#include "xfs_bmap_util.h"
19#include "xfs_reflink.h"
20
21struct xfs_writepage_ctx {
22 struct iomap_writepage_ctx ctx;
23 unsigned int data_seq;
24 unsigned int cow_seq;
25};
26
27static inline struct xfs_writepage_ctx *
28XFS_WPC(struct iomap_writepage_ctx *ctx)
29{
30 return container_of(ctx, struct xfs_writepage_ctx, ctx);
31}
32
33/*
34 * Fast and loose check if this write could update the on-disk inode size.
35 */
36static inline bool xfs_ioend_is_append(struct iomap_ioend *ioend)
37{
38 return ioend->io_offset + ioend->io_size >
39 XFS_I(ioend->io_inode)->i_d.di_size;
40}
41
42STATIC int
43xfs_setfilesize_trans_alloc(
44 struct iomap_ioend *ioend)
45{
46 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
47 struct xfs_trans *tp;
48 int error;
49
50 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
51 if (error)
52 return error;
53
54 ioend->io_private = tp;
55
56 /*
57 * We may pass freeze protection with a transaction. So tell lockdep
58 * we released it.
59 */
60 __sb_writers_release(ioend->io_inode->i_sb, SB_FREEZE_FS);
61 /*
62 * We hand off the transaction to the completion thread now, so
63 * clear the flag here.
64 */
65 current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
66 return 0;
67}
68
69/*
70 * Update on-disk file size now that data has been written to disk.
71 */
72STATIC int
73__xfs_setfilesize(
74 struct xfs_inode *ip,
75 struct xfs_trans *tp,
76 xfs_off_t offset,
77 size_t size)
78{
79 xfs_fsize_t isize;
80
81 xfs_ilock(ip, XFS_ILOCK_EXCL);
82 isize = xfs_new_eof(ip, offset + size);
83 if (!isize) {
84 xfs_iunlock(ip, XFS_ILOCK_EXCL);
85 xfs_trans_cancel(tp);
86 return 0;
87 }
88
89 trace_xfs_setfilesize(ip, offset, size);
90
91 ip->i_d.di_size = isize;
92 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
93 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
94
95 return xfs_trans_commit(tp);
96}
97
98int
99xfs_setfilesize(
100 struct xfs_inode *ip,
101 xfs_off_t offset,
102 size_t size)
103{
104 struct xfs_mount *mp = ip->i_mount;
105 struct xfs_trans *tp;
106 int error;
107
108 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
109 if (error)
110 return error;
111
112 return __xfs_setfilesize(ip, tp, offset, size);
113}
114
115STATIC int
116xfs_setfilesize_ioend(
117 struct iomap_ioend *ioend,
118 int error)
119{
120 struct xfs_inode *ip = XFS_I(ioend->io_inode);
121 struct xfs_trans *tp = ioend->io_private;
122
123 /*
124 * The transaction may have been allocated in the I/O submission thread,
125 * thus we need to mark ourselves as being in a transaction manually.
126 * Similarly for freeze protection.
127 */
128 current_set_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
129 __sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS);
130
131 /* we abort the update if there was an IO error */
132 if (error) {
133 xfs_trans_cancel(tp);
134 return error;
135 }
136
137 return __xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size);
138}
139
140/*
141 * IO write completion.
142 */
143STATIC void
144xfs_end_ioend(
145 struct iomap_ioend *ioend)
146{
147 struct xfs_inode *ip = XFS_I(ioend->io_inode);
148 xfs_off_t offset = ioend->io_offset;
149 size_t size = ioend->io_size;
150 unsigned int nofs_flag;
151 int error;
152
153 /*
154 * We can allocate memory here while doing writeback on behalf of
155 * memory reclaim. To avoid memory allocation deadlocks set the
156 * task-wide nofs context for the following operations.
157 */
158 nofs_flag = memalloc_nofs_save();
159
160 /*
161 * Just clean up the in-memory strutures if the fs has been shut down.
162 */
163 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
164 error = -EIO;
165 goto done;
166 }
167
168 /*
169 * Clean up any COW blocks on an I/O error.
170 */
171 error = blk_status_to_errno(ioend->io_bio->bi_status);
172 if (unlikely(error)) {
173 if (ioend->io_flags & IOMAP_F_SHARED)
174 xfs_reflink_cancel_cow_range(ip, offset, size, true);
175 goto done;
176 }
177
178 /*
179 * Success: commit the COW or unwritten blocks if needed.
180 */
181 if (ioend->io_flags & IOMAP_F_SHARED)
182 error = xfs_reflink_end_cow(ip, offset, size);
183 else if (ioend->io_type == IOMAP_UNWRITTEN)
184 error = xfs_iomap_write_unwritten(ip, offset, size, false);
185 else
186 ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_private);
187
188done:
189 if (ioend->io_private)
190 error = xfs_setfilesize_ioend(ioend, error);
191 iomap_finish_ioends(ioend, error);
192 memalloc_nofs_restore(nofs_flag);
193}
194
195/*
196 * If the to be merged ioend has a preallocated transaction for file
197 * size updates we need to ensure the ioend it is merged into also
198 * has one. If it already has one we can simply cancel the transaction
199 * as it is guaranteed to be clean.
200 */
201static void
202xfs_ioend_merge_private(
203 struct iomap_ioend *ioend,
204 struct iomap_ioend *next)
205{
206 if (!ioend->io_private) {
207 ioend->io_private = next->io_private;
208 next->io_private = NULL;
209 } else {
210 xfs_setfilesize_ioend(next, -ECANCELED);
211 }
212}
213
214/* Finish all pending io completions. */
215void
216xfs_end_io(
217 struct work_struct *work)
218{
219 struct xfs_inode *ip =
220 container_of(work, struct xfs_inode, i_ioend_work);
221 struct iomap_ioend *ioend;
222 struct list_head tmp;
223 unsigned long flags;
224
225 spin_lock_irqsave(&ip->i_ioend_lock, flags);
226 list_replace_init(&ip->i_ioend_list, &tmp);
227 spin_unlock_irqrestore(&ip->i_ioend_lock, flags);
228
229 iomap_sort_ioends(&tmp);
230 while ((ioend = list_first_entry_or_null(&tmp, struct iomap_ioend,
231 io_list))) {
232 list_del_init(&ioend->io_list);
233 iomap_ioend_try_merge(ioend, &tmp, xfs_ioend_merge_private);
234 xfs_end_ioend(ioend);
235 }
236}
237
238static inline bool xfs_ioend_needs_workqueue(struct iomap_ioend *ioend)
239{
240 return ioend->io_private ||
241 ioend->io_type == IOMAP_UNWRITTEN ||
242 (ioend->io_flags & IOMAP_F_SHARED);
243}
244
245STATIC void
246xfs_end_bio(
247 struct bio *bio)
248{
249 struct iomap_ioend *ioend = bio->bi_private;
250 struct xfs_inode *ip = XFS_I(ioend->io_inode);
251 unsigned long flags;
252
253 ASSERT(xfs_ioend_needs_workqueue(ioend));
254
255 spin_lock_irqsave(&ip->i_ioend_lock, flags);
256 if (list_empty(&ip->i_ioend_list))
257 WARN_ON_ONCE(!queue_work(ip->i_mount->m_unwritten_workqueue,
258 &ip->i_ioend_work));
259 list_add_tail(&ioend->io_list, &ip->i_ioend_list);
260 spin_unlock_irqrestore(&ip->i_ioend_lock, flags);
261}
262
263/*
264 * Fast revalidation of the cached writeback mapping. Return true if the current
265 * mapping is valid, false otherwise.
266 */
267static bool
268xfs_imap_valid(
269 struct iomap_writepage_ctx *wpc,
270 struct xfs_inode *ip,
271 loff_t offset)
272{
273 if (offset < wpc->iomap.offset ||
274 offset >= wpc->iomap.offset + wpc->iomap.length)
275 return false;
276 /*
277 * If this is a COW mapping, it is sufficient to check that the mapping
278 * covers the offset. Be careful to check this first because the caller
279 * can revalidate a COW mapping without updating the data seqno.
280 */
281 if (wpc->iomap.flags & IOMAP_F_SHARED)
282 return true;
283
284 /*
285 * This is not a COW mapping. Check the sequence number of the data fork
286 * because concurrent changes could have invalidated the extent. Check
287 * the COW fork because concurrent changes since the last time we
288 * checked (and found nothing at this offset) could have added
289 * overlapping blocks.
290 */
291 if (XFS_WPC(wpc)->data_seq != READ_ONCE(ip->i_df.if_seq))
292 return false;
293 if (xfs_inode_has_cow_data(ip) &&
294 XFS_WPC(wpc)->cow_seq != READ_ONCE(ip->i_cowfp->if_seq))
295 return false;
296 return true;
297}
298
299/*
300 * Pass in a dellalloc extent and convert it to real extents, return the real
301 * extent that maps offset_fsb in wpc->iomap.
302 *
303 * The current page is held locked so nothing could have removed the block
304 * backing offset_fsb, although it could have moved from the COW to the data
305 * fork by another thread.
306 */
307static int
308xfs_convert_blocks(
309 struct iomap_writepage_ctx *wpc,
310 struct xfs_inode *ip,
311 int whichfork,
312 loff_t offset)
313{
314 int error;
315 unsigned *seq;
316
317 if (whichfork == XFS_COW_FORK)
318 seq = &XFS_WPC(wpc)->cow_seq;
319 else
320 seq = &XFS_WPC(wpc)->data_seq;
321
322 /*
323 * Attempt to allocate whatever delalloc extent currently backs offset
324 * and put the result into wpc->iomap. Allocate in a loop because it
325 * may take several attempts to allocate real blocks for a contiguous
326 * delalloc extent if free space is sufficiently fragmented.
327 */
328 do {
329 error = xfs_bmapi_convert_delalloc(ip, whichfork, offset,
330 &wpc->iomap, seq);
331 if (error)
332 return error;
333 } while (wpc->iomap.offset + wpc->iomap.length <= offset);
334
335 return 0;
336}
337
338static int
339xfs_map_blocks(
340 struct iomap_writepage_ctx *wpc,
341 struct inode *inode,
342 loff_t offset)
343{
344 struct xfs_inode *ip = XFS_I(inode);
345 struct xfs_mount *mp = ip->i_mount;
346 ssize_t count = i_blocksize(inode);
347 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
348 xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + count);
349 xfs_fileoff_t cow_fsb = NULLFILEOFF;
350 int whichfork = XFS_DATA_FORK;
351 struct xfs_bmbt_irec imap;
352 struct xfs_iext_cursor icur;
353 int retries = 0;
354 int error = 0;
355
356 if (XFS_FORCED_SHUTDOWN(mp))
357 return -EIO;
358
359 /*
360 * COW fork blocks can overlap data fork blocks even if the blocks
361 * aren't shared. COW I/O always takes precedent, so we must always
362 * check for overlap on reflink inodes unless the mapping is already a
363 * COW one, or the COW fork hasn't changed from the last time we looked
364 * at it.
365 *
366 * It's safe to check the COW fork if_seq here without the ILOCK because
367 * we've indirectly protected against concurrent updates: writeback has
368 * the page locked, which prevents concurrent invalidations by reflink
369 * and directio and prevents concurrent buffered writes to the same
370 * page. Changes to if_seq always happen under i_lock, which protects
371 * against concurrent updates and provides a memory barrier on the way
372 * out that ensures that we always see the current value.
373 */
374 if (xfs_imap_valid(wpc, ip, offset))
375 return 0;
376
377 /*
378 * If we don't have a valid map, now it's time to get a new one for this
379 * offset. This will convert delayed allocations (including COW ones)
380 * into real extents. If we return without a valid map, it means we
381 * landed in a hole and we skip the block.
382 */
383retry:
384 xfs_ilock(ip, XFS_ILOCK_SHARED);
385 ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE ||
386 (ip->i_df.if_flags & XFS_IFEXTENTS));
387
388 /*
389 * Check if this is offset is covered by a COW extents, and if yes use
390 * it directly instead of looking up anything in the data fork.
391 */
392 if (xfs_inode_has_cow_data(ip) &&
393 xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &imap))
394 cow_fsb = imap.br_startoff;
395 if (cow_fsb != NULLFILEOFF && cow_fsb <= offset_fsb) {
396 XFS_WPC(wpc)->cow_seq = READ_ONCE(ip->i_cowfp->if_seq);
397 xfs_iunlock(ip, XFS_ILOCK_SHARED);
398
399 whichfork = XFS_COW_FORK;
400 goto allocate_blocks;
401 }
402
403 /*
404 * No COW extent overlap. Revalidate now that we may have updated
405 * ->cow_seq. If the data mapping is still valid, we're done.
406 */
407 if (xfs_imap_valid(wpc, ip, offset)) {
408 xfs_iunlock(ip, XFS_ILOCK_SHARED);
409 return 0;
410 }
411
412 /*
413 * If we don't have a valid map, now it's time to get a new one for this
414 * offset. This will convert delayed allocations (including COW ones)
415 * into real extents.
416 */
417 if (!xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap))
418 imap.br_startoff = end_fsb; /* fake a hole past EOF */
419 XFS_WPC(wpc)->data_seq = READ_ONCE(ip->i_df.if_seq);
420 xfs_iunlock(ip, XFS_ILOCK_SHARED);
421
422 /* landed in a hole or beyond EOF? */
423 if (imap.br_startoff > offset_fsb) {
424 imap.br_blockcount = imap.br_startoff - offset_fsb;
425 imap.br_startoff = offset_fsb;
426 imap.br_startblock = HOLESTARTBLOCK;
427 imap.br_state = XFS_EXT_NORM;
428 }
429
430 /*
431 * Truncate to the next COW extent if there is one. This is the only
432 * opportunity to do this because we can skip COW fork lookups for the
433 * subsequent blocks in the mapping; however, the requirement to treat
434 * the COW range separately remains.
435 */
436 if (cow_fsb != NULLFILEOFF &&
437 cow_fsb < imap.br_startoff + imap.br_blockcount)
438 imap.br_blockcount = cow_fsb - imap.br_startoff;
439
440 /* got a delalloc extent? */
441 if (imap.br_startblock != HOLESTARTBLOCK &&
442 isnullstartblock(imap.br_startblock))
443 goto allocate_blocks;
444
445 xfs_bmbt_to_iomap(ip, &wpc->iomap, &imap, 0);
446 trace_xfs_map_blocks_found(ip, offset, count, whichfork, &imap);
447 return 0;
448allocate_blocks:
449 error = xfs_convert_blocks(wpc, ip, whichfork, offset);
450 if (error) {
451 /*
452 * If we failed to find the extent in the COW fork we might have
453 * raced with a COW to data fork conversion or truncate.
454 * Restart the lookup to catch the extent in the data fork for
455 * the former case, but prevent additional retries to avoid
456 * looping forever for the latter case.
457 */
458 if (error == -EAGAIN && whichfork == XFS_COW_FORK && !retries++)
459 goto retry;
460 ASSERT(error != -EAGAIN);
461 return error;
462 }
463
464 /*
465 * Due to merging the return real extent might be larger than the
466 * original delalloc one. Trim the return extent to the next COW
467 * boundary again to force a re-lookup.
468 */
469 if (whichfork != XFS_COW_FORK && cow_fsb != NULLFILEOFF) {
470 loff_t cow_offset = XFS_FSB_TO_B(mp, cow_fsb);
471
472 if (cow_offset < wpc->iomap.offset + wpc->iomap.length)
473 wpc->iomap.length = cow_offset - wpc->iomap.offset;
474 }
475
476 ASSERT(wpc->iomap.offset <= offset);
477 ASSERT(wpc->iomap.offset + wpc->iomap.length > offset);
478 trace_xfs_map_blocks_alloc(ip, offset, count, whichfork, &imap);
479 return 0;
480}
481
482static int
483xfs_prepare_ioend(
484 struct iomap_ioend *ioend,
485 int status)
486{
487 unsigned int nofs_flag;
488
489 /*
490 * We can allocate memory here while doing writeback on behalf of
491 * memory reclaim. To avoid memory allocation deadlocks set the
492 * task-wide nofs context for the following operations.
493 */
494 nofs_flag = memalloc_nofs_save();
495
496 /* Convert CoW extents to regular */
497 if (!status && (ioend->io_flags & IOMAP_F_SHARED)) {
498 status = xfs_reflink_convert_cow(XFS_I(ioend->io_inode),
499 ioend->io_offset, ioend->io_size);
500 }
501
502 /* Reserve log space if we might write beyond the on-disk inode size. */
503 if (!status &&
504 ((ioend->io_flags & IOMAP_F_SHARED) ||
505 ioend->io_type != IOMAP_UNWRITTEN) &&
506 xfs_ioend_is_append(ioend) &&
507 !ioend->io_private)
508 status = xfs_setfilesize_trans_alloc(ioend);
509
510 memalloc_nofs_restore(nofs_flag);
511
512 if (xfs_ioend_needs_workqueue(ioend))
513 ioend->io_bio->bi_end_io = xfs_end_bio;
514 return status;
515}
516
517/*
518 * If the page has delalloc blocks on it, we need to punch them out before we
519 * invalidate the page. If we don't, we leave a stale delalloc mapping on the
520 * inode that can trip up a later direct I/O read operation on the same region.
521 *
522 * We prevent this by truncating away the delalloc regions on the page. Because
523 * they are delalloc, we can do this without needing a transaction. Indeed - if
524 * we get ENOSPC errors, we have to be able to do this truncation without a
525 * transaction as there is no space left for block reservation (typically why we
526 * see a ENOSPC in writeback).
527 */
528static void
529xfs_discard_page(
530 struct page *page)
531{
532 struct inode *inode = page->mapping->host;
533 struct xfs_inode *ip = XFS_I(inode);
534 struct xfs_mount *mp = ip->i_mount;
535 loff_t offset = page_offset(page);
536 xfs_fileoff_t start_fsb = XFS_B_TO_FSBT(mp, offset);
537 int error;
538
539 if (XFS_FORCED_SHUTDOWN(mp))
540 goto out_invalidate;
541
542 xfs_alert_ratelimited(mp,
543 "page discard on page "PTR_FMT", inode 0x%llx, offset %llu.",
544 page, ip->i_ino, offset);
545
546 error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
547 PAGE_SIZE / i_blocksize(inode));
548 if (error && !XFS_FORCED_SHUTDOWN(mp))
549 xfs_alert(mp, "page discard unable to remove delalloc mapping.");
550out_invalidate:
551 iomap_invalidatepage(page, 0, PAGE_SIZE);
552}
553
554static const struct iomap_writeback_ops xfs_writeback_ops = {
555 .map_blocks = xfs_map_blocks,
556 .prepare_ioend = xfs_prepare_ioend,
557 .discard_page = xfs_discard_page,
558};
559
560STATIC int
561xfs_vm_writepage(
562 struct page *page,
563 struct writeback_control *wbc)
564{
565 struct xfs_writepage_ctx wpc = { };
566
567 return iomap_writepage(page, wbc, &wpc.ctx, &xfs_writeback_ops);
568}
569
570STATIC int
571xfs_vm_writepages(
572 struct address_space *mapping,
573 struct writeback_control *wbc)
574{
575 struct xfs_writepage_ctx wpc = { };
576
577 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
578 return iomap_writepages(mapping, wbc, &wpc.ctx, &xfs_writeback_ops);
579}
580
581STATIC int
582xfs_dax_writepages(
583 struct address_space *mapping,
584 struct writeback_control *wbc)
585{
586 struct xfs_inode *ip = XFS_I(mapping->host);
587
588 xfs_iflags_clear(ip, XFS_ITRUNCATED);
589 return dax_writeback_mapping_range(mapping,
590 xfs_inode_buftarg(ip)->bt_daxdev, wbc);
591}
592
593STATIC sector_t
594xfs_vm_bmap(
595 struct address_space *mapping,
596 sector_t block)
597{
598 struct xfs_inode *ip = XFS_I(mapping->host);
599
600 trace_xfs_vm_bmap(ip);
601
602 /*
603 * The swap code (ab-)uses ->bmap to get a block mapping and then
604 * bypasses the file system for actual I/O. We really can't allow
605 * that on reflinks inodes, so we have to skip out here. And yes,
606 * 0 is the magic code for a bmap error.
607 *
608 * Since we don't pass back blockdev info, we can't return bmap
609 * information for rt files either.
610 */
611 if (xfs_is_cow_inode(ip) || XFS_IS_REALTIME_INODE(ip))
612 return 0;
613 return iomap_bmap(mapping, block, &xfs_read_iomap_ops);
614}
615
616STATIC int
617xfs_vm_readpage(
618 struct file *unused,
619 struct page *page)
620{
621 return iomap_readpage(page, &xfs_read_iomap_ops);
622}
623
624STATIC void
625xfs_vm_readahead(
626 struct readahead_control *rac)
627{
628 iomap_readahead(rac, &xfs_read_iomap_ops);
629}
630
631static int
632xfs_iomap_swapfile_activate(
633 struct swap_info_struct *sis,
634 struct file *swap_file,
635 sector_t *span)
636{
637 sis->bdev = xfs_inode_buftarg(XFS_I(file_inode(swap_file)))->bt_bdev;
638 return iomap_swapfile_activate(sis, swap_file, span,
639 &xfs_read_iomap_ops);
640}
641
642const struct address_space_operations xfs_address_space_operations = {
643 .readpage = xfs_vm_readpage,
644 .readahead = xfs_vm_readahead,
645 .writepage = xfs_vm_writepage,
646 .writepages = xfs_vm_writepages,
647 .set_page_dirty = iomap_set_page_dirty,
648 .releasepage = iomap_releasepage,
649 .invalidatepage = iomap_invalidatepage,
650 .bmap = xfs_vm_bmap,
651 .direct_IO = noop_direct_IO,
652 .migratepage = iomap_migrate_page,
653 .is_partially_uptodate = iomap_is_partially_uptodate,
654 .error_remove_page = generic_error_remove_page,
655 .swap_activate = xfs_iomap_swapfile_activate,
656};
657
658const struct address_space_operations xfs_dax_aops = {
659 .writepages = xfs_dax_writepages,
660 .direct_IO = noop_direct_IO,
661 .set_page_dirty = noop_set_page_dirty,
662 .invalidatepage = noop_invalidatepage,
663 .swap_activate = xfs_iomap_swapfile_activate,
664};
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * Copyright (c) 2016-2018 Christoph Hellwig.
5 * All Rights Reserved.
6 */
7#include "xfs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_mount.h"
13#include "xfs_inode.h"
14#include "xfs_trans.h"
15#include "xfs_iomap.h"
16#include "xfs_trace.h"
17#include "xfs_bmap.h"
18#include "xfs_bmap_util.h"
19#include "xfs_reflink.h"
20#include "xfs_errortag.h"
21#include "xfs_error.h"
22
23struct xfs_writepage_ctx {
24 struct iomap_writepage_ctx ctx;
25 unsigned int data_seq;
26 unsigned int cow_seq;
27};
28
29static inline struct xfs_writepage_ctx *
30XFS_WPC(struct iomap_writepage_ctx *ctx)
31{
32 return container_of(ctx, struct xfs_writepage_ctx, ctx);
33}
34
35/*
36 * Fast and loose check if this write could update the on-disk inode size.
37 */
38static inline bool xfs_ioend_is_append(struct iomap_ioend *ioend)
39{
40 return ioend->io_offset + ioend->io_size >
41 XFS_I(ioend->io_inode)->i_disk_size;
42}
43
44/*
45 * Update on-disk file size now that data has been written to disk.
46 */
47int
48xfs_setfilesize(
49 struct xfs_inode *ip,
50 xfs_off_t offset,
51 size_t size)
52{
53 struct xfs_mount *mp = ip->i_mount;
54 struct xfs_trans *tp;
55 xfs_fsize_t isize;
56 int error;
57
58 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
59 if (error)
60 return error;
61
62 xfs_ilock(ip, XFS_ILOCK_EXCL);
63 isize = xfs_new_eof(ip, offset + size);
64 if (!isize) {
65 xfs_iunlock(ip, XFS_ILOCK_EXCL);
66 xfs_trans_cancel(tp);
67 return 0;
68 }
69
70 trace_xfs_setfilesize(ip, offset, size);
71
72 ip->i_disk_size = isize;
73 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
74 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
75
76 return xfs_trans_commit(tp);
77}
78
79/*
80 * IO write completion.
81 */
82STATIC void
83xfs_end_ioend(
84 struct iomap_ioend *ioend)
85{
86 struct xfs_inode *ip = XFS_I(ioend->io_inode);
87 struct xfs_mount *mp = ip->i_mount;
88 xfs_off_t offset = ioend->io_offset;
89 size_t size = ioend->io_size;
90 unsigned int nofs_flag;
91 int error;
92
93 /*
94 * We can allocate memory here while doing writeback on behalf of
95 * memory reclaim. To avoid memory allocation deadlocks set the
96 * task-wide nofs context for the following operations.
97 */
98 nofs_flag = memalloc_nofs_save();
99
100 /*
101 * Just clean up the in-memory structures if the fs has been shut down.
102 */
103 if (xfs_is_shutdown(mp)) {
104 error = -EIO;
105 goto done;
106 }
107
108 /*
109 * Clean up all COW blocks and underlying data fork delalloc blocks on
110 * I/O error. The delalloc punch is required because this ioend was
111 * mapped to blocks in the COW fork and the associated pages are no
112 * longer dirty. If we don't remove delalloc blocks here, they become
113 * stale and can corrupt free space accounting on unmount.
114 */
115 error = blk_status_to_errno(ioend->io_bio.bi_status);
116 if (unlikely(error)) {
117 if (ioend->io_flags & IOMAP_F_SHARED) {
118 xfs_reflink_cancel_cow_range(ip, offset, size, true);
119 xfs_bmap_punch_delalloc_range(ip, XFS_DATA_FORK, offset,
120 offset + size);
121 }
122 goto done;
123 }
124
125 /*
126 * Success: commit the COW or unwritten blocks if needed.
127 */
128 if (ioend->io_flags & IOMAP_F_SHARED)
129 error = xfs_reflink_end_cow(ip, offset, size);
130 else if (ioend->io_type == IOMAP_UNWRITTEN)
131 error = xfs_iomap_write_unwritten(ip, offset, size, false);
132
133 if (!error && xfs_ioend_is_append(ioend))
134 error = xfs_setfilesize(ip, ioend->io_offset, ioend->io_size);
135done:
136 iomap_finish_ioends(ioend, error);
137 memalloc_nofs_restore(nofs_flag);
138}
139
140/*
141 * Finish all pending IO completions that require transactional modifications.
142 *
143 * We try to merge physical and logically contiguous ioends before completion to
144 * minimise the number of transactions we need to perform during IO completion.
145 * Both unwritten extent conversion and COW remapping need to iterate and modify
146 * one physical extent at a time, so we gain nothing by merging physically
147 * discontiguous extents here.
148 *
149 * The ioend chain length that we can be processing here is largely unbound in
150 * length and we may have to perform significant amounts of work on each ioend
151 * to complete it. Hence we have to be careful about holding the CPU for too
152 * long in this loop.
153 */
154void
155xfs_end_io(
156 struct work_struct *work)
157{
158 struct xfs_inode *ip =
159 container_of(work, struct xfs_inode, i_ioend_work);
160 struct iomap_ioend *ioend;
161 struct list_head tmp;
162 unsigned long flags;
163
164 spin_lock_irqsave(&ip->i_ioend_lock, flags);
165 list_replace_init(&ip->i_ioend_list, &tmp);
166 spin_unlock_irqrestore(&ip->i_ioend_lock, flags);
167
168 iomap_sort_ioends(&tmp);
169 while ((ioend = list_first_entry_or_null(&tmp, struct iomap_ioend,
170 io_list))) {
171 list_del_init(&ioend->io_list);
172 iomap_ioend_try_merge(ioend, &tmp);
173 xfs_end_ioend(ioend);
174 cond_resched();
175 }
176}
177
178STATIC void
179xfs_end_bio(
180 struct bio *bio)
181{
182 struct iomap_ioend *ioend = iomap_ioend_from_bio(bio);
183 struct xfs_inode *ip = XFS_I(ioend->io_inode);
184 unsigned long flags;
185
186 spin_lock_irqsave(&ip->i_ioend_lock, flags);
187 if (list_empty(&ip->i_ioend_list))
188 WARN_ON_ONCE(!queue_work(ip->i_mount->m_unwritten_workqueue,
189 &ip->i_ioend_work));
190 list_add_tail(&ioend->io_list, &ip->i_ioend_list);
191 spin_unlock_irqrestore(&ip->i_ioend_lock, flags);
192}
193
194/*
195 * Fast revalidation of the cached writeback mapping. Return true if the current
196 * mapping is valid, false otherwise.
197 */
198static bool
199xfs_imap_valid(
200 struct iomap_writepage_ctx *wpc,
201 struct xfs_inode *ip,
202 loff_t offset)
203{
204 if (offset < wpc->iomap.offset ||
205 offset >= wpc->iomap.offset + wpc->iomap.length)
206 return false;
207 /*
208 * If this is a COW mapping, it is sufficient to check that the mapping
209 * covers the offset. Be careful to check this first because the caller
210 * can revalidate a COW mapping without updating the data seqno.
211 */
212 if (wpc->iomap.flags & IOMAP_F_SHARED)
213 return true;
214
215 /*
216 * This is not a COW mapping. Check the sequence number of the data fork
217 * because concurrent changes could have invalidated the extent. Check
218 * the COW fork because concurrent changes since the last time we
219 * checked (and found nothing at this offset) could have added
220 * overlapping blocks.
221 */
222 if (XFS_WPC(wpc)->data_seq != READ_ONCE(ip->i_df.if_seq)) {
223 trace_xfs_wb_data_iomap_invalid(ip, &wpc->iomap,
224 XFS_WPC(wpc)->data_seq, XFS_DATA_FORK);
225 return false;
226 }
227 if (xfs_inode_has_cow_data(ip) &&
228 XFS_WPC(wpc)->cow_seq != READ_ONCE(ip->i_cowfp->if_seq)) {
229 trace_xfs_wb_cow_iomap_invalid(ip, &wpc->iomap,
230 XFS_WPC(wpc)->cow_seq, XFS_COW_FORK);
231 return false;
232 }
233 return true;
234}
235
236static int
237xfs_map_blocks(
238 struct iomap_writepage_ctx *wpc,
239 struct inode *inode,
240 loff_t offset,
241 unsigned int len)
242{
243 struct xfs_inode *ip = XFS_I(inode);
244 struct xfs_mount *mp = ip->i_mount;
245 ssize_t count = i_blocksize(inode);
246 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
247 xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + count);
248 xfs_fileoff_t cow_fsb;
249 int whichfork;
250 struct xfs_bmbt_irec imap;
251 struct xfs_iext_cursor icur;
252 int retries = 0;
253 int error = 0;
254 unsigned int *seq;
255
256 if (xfs_is_shutdown(mp))
257 return -EIO;
258
259 XFS_ERRORTAG_DELAY(mp, XFS_ERRTAG_WB_DELAY_MS);
260
261 /*
262 * COW fork blocks can overlap data fork blocks even if the blocks
263 * aren't shared. COW I/O always takes precedent, so we must always
264 * check for overlap on reflink inodes unless the mapping is already a
265 * COW one, or the COW fork hasn't changed from the last time we looked
266 * at it.
267 *
268 * It's safe to check the COW fork if_seq here without the ILOCK because
269 * we've indirectly protected against concurrent updates: writeback has
270 * the page locked, which prevents concurrent invalidations by reflink
271 * and directio and prevents concurrent buffered writes to the same
272 * page. Changes to if_seq always happen under i_lock, which protects
273 * against concurrent updates and provides a memory barrier on the way
274 * out that ensures that we always see the current value.
275 */
276 if (xfs_imap_valid(wpc, ip, offset))
277 return 0;
278
279 /*
280 * If we don't have a valid map, now it's time to get a new one for this
281 * offset. This will convert delayed allocations (including COW ones)
282 * into real extents. If we return without a valid map, it means we
283 * landed in a hole and we skip the block.
284 */
285retry:
286 cow_fsb = NULLFILEOFF;
287 whichfork = XFS_DATA_FORK;
288 xfs_ilock(ip, XFS_ILOCK_SHARED);
289 ASSERT(!xfs_need_iread_extents(&ip->i_df));
290
291 /*
292 * Check if this is offset is covered by a COW extents, and if yes use
293 * it directly instead of looking up anything in the data fork.
294 */
295 if (xfs_inode_has_cow_data(ip) &&
296 xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &imap))
297 cow_fsb = imap.br_startoff;
298 if (cow_fsb != NULLFILEOFF && cow_fsb <= offset_fsb) {
299 XFS_WPC(wpc)->cow_seq = READ_ONCE(ip->i_cowfp->if_seq);
300 xfs_iunlock(ip, XFS_ILOCK_SHARED);
301
302 whichfork = XFS_COW_FORK;
303 goto allocate_blocks;
304 }
305
306 /*
307 * No COW extent overlap. Revalidate now that we may have updated
308 * ->cow_seq. If the data mapping is still valid, we're done.
309 */
310 if (xfs_imap_valid(wpc, ip, offset)) {
311 xfs_iunlock(ip, XFS_ILOCK_SHARED);
312 return 0;
313 }
314
315 /*
316 * If we don't have a valid map, now it's time to get a new one for this
317 * offset. This will convert delayed allocations (including COW ones)
318 * into real extents.
319 */
320 if (!xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap))
321 imap.br_startoff = end_fsb; /* fake a hole past EOF */
322 XFS_WPC(wpc)->data_seq = READ_ONCE(ip->i_df.if_seq);
323 xfs_iunlock(ip, XFS_ILOCK_SHARED);
324
325 /* landed in a hole or beyond EOF? */
326 if (imap.br_startoff > offset_fsb) {
327 imap.br_blockcount = imap.br_startoff - offset_fsb;
328 imap.br_startoff = offset_fsb;
329 imap.br_startblock = HOLESTARTBLOCK;
330 imap.br_state = XFS_EXT_NORM;
331 }
332
333 /*
334 * Truncate to the next COW extent if there is one. This is the only
335 * opportunity to do this because we can skip COW fork lookups for the
336 * subsequent blocks in the mapping; however, the requirement to treat
337 * the COW range separately remains.
338 */
339 if (cow_fsb != NULLFILEOFF &&
340 cow_fsb < imap.br_startoff + imap.br_blockcount)
341 imap.br_blockcount = cow_fsb - imap.br_startoff;
342
343 /* got a delalloc extent? */
344 if (imap.br_startblock != HOLESTARTBLOCK &&
345 isnullstartblock(imap.br_startblock))
346 goto allocate_blocks;
347
348 xfs_bmbt_to_iomap(ip, &wpc->iomap, &imap, 0, 0, XFS_WPC(wpc)->data_seq);
349 trace_xfs_map_blocks_found(ip, offset, count, whichfork, &imap);
350 return 0;
351allocate_blocks:
352 /*
353 * Convert a dellalloc extent to a real one. The current page is held
354 * locked so nothing could have removed the block backing offset_fsb,
355 * although it could have moved from the COW to the data fork by another
356 * thread.
357 */
358 if (whichfork == XFS_COW_FORK)
359 seq = &XFS_WPC(wpc)->cow_seq;
360 else
361 seq = &XFS_WPC(wpc)->data_seq;
362
363 error = xfs_bmapi_convert_delalloc(ip, whichfork, offset,
364 &wpc->iomap, seq);
365 if (error) {
366 /*
367 * If we failed to find the extent in the COW fork we might have
368 * raced with a COW to data fork conversion or truncate.
369 * Restart the lookup to catch the extent in the data fork for
370 * the former case, but prevent additional retries to avoid
371 * looping forever for the latter case.
372 */
373 if (error == -EAGAIN && whichfork == XFS_COW_FORK && !retries++)
374 goto retry;
375 ASSERT(error != -EAGAIN);
376 return error;
377 }
378
379 /*
380 * Due to merging the return real extent might be larger than the
381 * original delalloc one. Trim the return extent to the next COW
382 * boundary again to force a re-lookup.
383 */
384 if (whichfork != XFS_COW_FORK && cow_fsb != NULLFILEOFF) {
385 loff_t cow_offset = XFS_FSB_TO_B(mp, cow_fsb);
386
387 if (cow_offset < wpc->iomap.offset + wpc->iomap.length)
388 wpc->iomap.length = cow_offset - wpc->iomap.offset;
389 }
390
391 ASSERT(wpc->iomap.offset <= offset);
392 ASSERT(wpc->iomap.offset + wpc->iomap.length > offset);
393 trace_xfs_map_blocks_alloc(ip, offset, count, whichfork, &imap);
394 return 0;
395}
396
397static int
398xfs_prepare_ioend(
399 struct iomap_ioend *ioend,
400 int status)
401{
402 unsigned int nofs_flag;
403
404 /*
405 * We can allocate memory here while doing writeback on behalf of
406 * memory reclaim. To avoid memory allocation deadlocks set the
407 * task-wide nofs context for the following operations.
408 */
409 nofs_flag = memalloc_nofs_save();
410
411 /* Convert CoW extents to regular */
412 if (!status && (ioend->io_flags & IOMAP_F_SHARED)) {
413 status = xfs_reflink_convert_cow(XFS_I(ioend->io_inode),
414 ioend->io_offset, ioend->io_size);
415 }
416
417 memalloc_nofs_restore(nofs_flag);
418
419 /* send ioends that might require a transaction to the completion wq */
420 if (xfs_ioend_is_append(ioend) || ioend->io_type == IOMAP_UNWRITTEN ||
421 (ioend->io_flags & IOMAP_F_SHARED))
422 ioend->io_bio.bi_end_io = xfs_end_bio;
423 return status;
424}
425
426/*
427 * If the folio has delalloc blocks on it, the caller is asking us to punch them
428 * out. If we don't, we can leave a stale delalloc mapping covered by a clean
429 * page that needs to be dirtied again before the delalloc mapping can be
430 * converted. This stale delalloc mapping can trip up a later direct I/O read
431 * operation on the same region.
432 *
433 * We prevent this by truncating away the delalloc regions on the folio. Because
434 * they are delalloc, we can do this without needing a transaction. Indeed - if
435 * we get ENOSPC errors, we have to be able to do this truncation without a
436 * transaction as there is no space left for block reservation (typically why
437 * we see a ENOSPC in writeback).
438 */
439static void
440xfs_discard_folio(
441 struct folio *folio,
442 loff_t pos)
443{
444 struct xfs_inode *ip = XFS_I(folio->mapping->host);
445 struct xfs_mount *mp = ip->i_mount;
446
447 if (xfs_is_shutdown(mp))
448 return;
449
450 xfs_alert_ratelimited(mp,
451 "page discard on page "PTR_FMT", inode 0x%llx, pos %llu.",
452 folio, ip->i_ino, pos);
453
454 /*
455 * The end of the punch range is always the offset of the first
456 * byte of the next folio. Hence the end offset is only dependent on the
457 * folio itself and not the start offset that is passed in.
458 */
459 xfs_bmap_punch_delalloc_range(ip, XFS_DATA_FORK, pos,
460 folio_pos(folio) + folio_size(folio));
461}
462
463static const struct iomap_writeback_ops xfs_writeback_ops = {
464 .map_blocks = xfs_map_blocks,
465 .prepare_ioend = xfs_prepare_ioend,
466 .discard_folio = xfs_discard_folio,
467};
468
469STATIC int
470xfs_vm_writepages(
471 struct address_space *mapping,
472 struct writeback_control *wbc)
473{
474 struct xfs_writepage_ctx wpc = { };
475
476 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
477 return iomap_writepages(mapping, wbc, &wpc.ctx, &xfs_writeback_ops);
478}
479
480STATIC int
481xfs_dax_writepages(
482 struct address_space *mapping,
483 struct writeback_control *wbc)
484{
485 struct xfs_inode *ip = XFS_I(mapping->host);
486
487 xfs_iflags_clear(ip, XFS_ITRUNCATED);
488 return dax_writeback_mapping_range(mapping,
489 xfs_inode_buftarg(ip)->bt_daxdev, wbc);
490}
491
492STATIC sector_t
493xfs_vm_bmap(
494 struct address_space *mapping,
495 sector_t block)
496{
497 struct xfs_inode *ip = XFS_I(mapping->host);
498
499 trace_xfs_vm_bmap(ip);
500
501 /*
502 * The swap code (ab-)uses ->bmap to get a block mapping and then
503 * bypasses the file system for actual I/O. We really can't allow
504 * that on reflinks inodes, so we have to skip out here. And yes,
505 * 0 is the magic code for a bmap error.
506 *
507 * Since we don't pass back blockdev info, we can't return bmap
508 * information for rt files either.
509 */
510 if (xfs_is_cow_inode(ip) || XFS_IS_REALTIME_INODE(ip))
511 return 0;
512 return iomap_bmap(mapping, block, &xfs_read_iomap_ops);
513}
514
515STATIC int
516xfs_vm_read_folio(
517 struct file *unused,
518 struct folio *folio)
519{
520 return iomap_read_folio(folio, &xfs_read_iomap_ops);
521}
522
523STATIC void
524xfs_vm_readahead(
525 struct readahead_control *rac)
526{
527 iomap_readahead(rac, &xfs_read_iomap_ops);
528}
529
530static int
531xfs_iomap_swapfile_activate(
532 struct swap_info_struct *sis,
533 struct file *swap_file,
534 sector_t *span)
535{
536 sis->bdev = xfs_inode_buftarg(XFS_I(file_inode(swap_file)))->bt_bdev;
537 return iomap_swapfile_activate(sis, swap_file, span,
538 &xfs_read_iomap_ops);
539}
540
541const struct address_space_operations xfs_address_space_operations = {
542 .read_folio = xfs_vm_read_folio,
543 .readahead = xfs_vm_readahead,
544 .writepages = xfs_vm_writepages,
545 .dirty_folio = iomap_dirty_folio,
546 .release_folio = iomap_release_folio,
547 .invalidate_folio = iomap_invalidate_folio,
548 .bmap = xfs_vm_bmap,
549 .migrate_folio = filemap_migrate_folio,
550 .is_partially_uptodate = iomap_is_partially_uptodate,
551 .error_remove_folio = generic_error_remove_folio,
552 .swap_activate = xfs_iomap_swapfile_activate,
553};
554
555const struct address_space_operations xfs_dax_aops = {
556 .writepages = xfs_dax_writepages,
557 .dirty_folio = noop_dirty_folio,
558 .swap_activate = xfs_iomap_swapfile_activate,
559};