Linux Audio

Check our new training course

Embedded Linux training

Mar 10-20, 2025, special US time zones
Register
Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
  4 * Copyright (c) 2016-2018 Christoph Hellwig.
  5 * All Rights Reserved.
  6 */
  7#include "xfs.h"
  8#include "xfs_shared.h"
  9#include "xfs_format.h"
 10#include "xfs_log_format.h"
 11#include "xfs_trans_resv.h"
 12#include "xfs_mount.h"
 13#include "xfs_inode.h"
 14#include "xfs_trans.h"
 15#include "xfs_iomap.h"
 16#include "xfs_trace.h"
 17#include "xfs_bmap.h"
 18#include "xfs_bmap_util.h"
 19#include "xfs_reflink.h"
 20#include "xfs_errortag.h"
 21#include "xfs_error.h"
 22
 
 
 
 23struct xfs_writepage_ctx {
 24	struct iomap_writepage_ctx ctx;
 
 25	unsigned int		data_seq;
 26	unsigned int		cow_seq;
 
 27};
 28
 29static inline struct xfs_writepage_ctx *
 30XFS_WPC(struct iomap_writepage_ctx *ctx)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 31{
 32	return container_of(ctx, struct xfs_writepage_ctx, ctx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 33}
 34
 35/*
 36 * Fast and loose check if this write could update the on-disk inode size.
 37 */
 38static inline bool xfs_ioend_is_append(struct iomap_ioend *ioend)
 39{
 40	return ioend->io_offset + ioend->io_size >
 41		XFS_I(ioend->io_inode)->i_disk_size;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 42}
 43
 44/*
 45 * Update on-disk file size now that data has been written to disk.
 46 */
 47int
 48xfs_setfilesize(
 49	struct xfs_inode	*ip,
 
 50	xfs_off_t		offset,
 51	size_t			size)
 52{
 53	struct xfs_mount	*mp = ip->i_mount;
 54	struct xfs_trans	*tp;
 55	xfs_fsize_t		isize;
 56	int			error;
 57
 58	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
 59	if (error)
 60		return error;
 61
 62	xfs_ilock(ip, XFS_ILOCK_EXCL);
 63	isize = xfs_new_eof(ip, offset + size);
 64	if (!isize) {
 65		xfs_iunlock(ip, XFS_ILOCK_EXCL);
 66		xfs_trans_cancel(tp);
 67		return 0;
 68	}
 69
 70	trace_xfs_setfilesize(ip, offset, size);
 71
 72	ip->i_disk_size = isize;
 73	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
 74	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
 75
 76	return xfs_trans_commit(tp);
 77}
 78
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 79/*
 80 * IO write completion.
 81 */
 82STATIC void
 83xfs_end_ioend(
 84	struct iomap_ioend	*ioend)
 85{
 
 86	struct xfs_inode	*ip = XFS_I(ioend->io_inode);
 87	struct xfs_mount	*mp = ip->i_mount;
 88	xfs_off_t		offset = ioend->io_offset;
 89	size_t			size = ioend->io_size;
 90	unsigned int		nofs_flag;
 91	int			error;
 92
 93	/*
 94	 * We can allocate memory here while doing writeback on behalf of
 95	 * memory reclaim.  To avoid memory allocation deadlocks set the
 96	 * task-wide nofs context for the following operations.
 97	 */
 98	nofs_flag = memalloc_nofs_save();
 99
100	/*
101	 * Just clean up the in-memory structures if the fs has been shut down.
102	 */
103	if (xfs_is_shutdown(mp)) {
104		error = -EIO;
105		goto done;
106	}
107
108	/*
109	 * Clean up all COW blocks and underlying data fork delalloc blocks on
110	 * I/O error. The delalloc punch is required because this ioend was
111	 * mapped to blocks in the COW fork and the associated pages are no
112	 * longer dirty. If we don't remove delalloc blocks here, they become
113	 * stale and can corrupt free space accounting on unmount.
114	 */
115	error = blk_status_to_errno(ioend->io_bio->bi_status);
116	if (unlikely(error)) {
117		if (ioend->io_flags & IOMAP_F_SHARED) {
118			xfs_reflink_cancel_cow_range(ip, offset, size, true);
119			xfs_bmap_punch_delalloc_range(ip, offset,
120					offset + size);
121		}
122		goto done;
123	}
124
125	/*
126	 * Success: commit the COW or unwritten blocks if needed.
127	 */
128	if (ioend->io_flags & IOMAP_F_SHARED)
129		error = xfs_reflink_end_cow(ip, offset, size);
130	else if (ioend->io_type == IOMAP_UNWRITTEN)
131		error = xfs_iomap_write_unwritten(ip, offset, size, false);
 
 
132
133	if (!error && xfs_ioend_is_append(ioend))
134		error = xfs_setfilesize(ip, ioend->io_offset, ioend->io_size);
135done:
136	iomap_finish_ioends(ioend, error);
 
 
 
 
 
 
 
 
 
 
 
137	memalloc_nofs_restore(nofs_flag);
138}
139
140/*
141 * Finish all pending IO completions that require transactional modifications.
142 *
143 * We try to merge physical and logically contiguous ioends before completion to
144 * minimise the number of transactions we need to perform during IO completion.
145 * Both unwritten extent conversion and COW remapping need to iterate and modify
146 * one physical extent at a time, so we gain nothing by merging physically
147 * discontiguous extents here.
148 *
149 * The ioend chain length that we can be processing here is largely unbound in
150 * length and we may have to perform significant amounts of work on each ioend
151 * to complete it. Hence we have to be careful about holding the CPU for too
152 * long in this loop.
153 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
154void
155xfs_end_io(
156	struct work_struct	*work)
157{
158	struct xfs_inode	*ip =
159		container_of(work, struct xfs_inode, i_ioend_work);
160	struct iomap_ioend	*ioend;
161	struct list_head	tmp;
162	unsigned long		flags;
163
 
 
164	spin_lock_irqsave(&ip->i_ioend_lock, flags);
165	list_replace_init(&ip->i_ioend_list, &tmp);
166	spin_unlock_irqrestore(&ip->i_ioend_lock, flags);
167
168	iomap_sort_ioends(&tmp);
169	while ((ioend = list_first_entry_or_null(&tmp, struct iomap_ioend,
170			io_list))) {
 
 
171		list_del_init(&ioend->io_list);
172		iomap_ioend_try_merge(ioend, &tmp);
173		xfs_end_ioend(ioend);
174		cond_resched();
175	}
176}
177
178STATIC void
179xfs_end_bio(
180	struct bio		*bio)
181{
182	struct iomap_ioend	*ioend = bio->bi_private;
183	struct xfs_inode	*ip = XFS_I(ioend->io_inode);
 
184	unsigned long		flags;
185
186	spin_lock_irqsave(&ip->i_ioend_lock, flags);
187	if (list_empty(&ip->i_ioend_list))
188		WARN_ON_ONCE(!queue_work(ip->i_mount->m_unwritten_workqueue,
189					 &ip->i_ioend_work));
190	list_add_tail(&ioend->io_list, &ip->i_ioend_list);
191	spin_unlock_irqrestore(&ip->i_ioend_lock, flags);
 
 
 
 
 
192}
193
194/*
195 * Fast revalidation of the cached writeback mapping. Return true if the current
196 * mapping is valid, false otherwise.
197 */
198static bool
199xfs_imap_valid(
200	struct iomap_writepage_ctx	*wpc,
201	struct xfs_inode		*ip,
202	loff_t				offset)
203{
204	if (offset < wpc->iomap.offset ||
205	    offset >= wpc->iomap.offset + wpc->iomap.length)
206		return false;
207	/*
208	 * If this is a COW mapping, it is sufficient to check that the mapping
209	 * covers the offset. Be careful to check this first because the caller
210	 * can revalidate a COW mapping without updating the data seqno.
211	 */
212	if (wpc->iomap.flags & IOMAP_F_SHARED)
213		return true;
214
215	/*
216	 * This is not a COW mapping. Check the sequence number of the data fork
217	 * because concurrent changes could have invalidated the extent. Check
218	 * the COW fork because concurrent changes since the last time we
219	 * checked (and found nothing at this offset) could have added
220	 * overlapping blocks.
221	 */
222	if (XFS_WPC(wpc)->data_seq != READ_ONCE(ip->i_df.if_seq)) {
223		trace_xfs_wb_data_iomap_invalid(ip, &wpc->iomap,
224				XFS_WPC(wpc)->data_seq, XFS_DATA_FORK);
225		return false;
226	}
227	if (xfs_inode_has_cow_data(ip) &&
228	    XFS_WPC(wpc)->cow_seq != READ_ONCE(ip->i_cowfp->if_seq)) {
229		trace_xfs_wb_cow_iomap_invalid(ip, &wpc->iomap,
230				XFS_WPC(wpc)->cow_seq, XFS_COW_FORK);
231		return false;
232	}
233	return true;
234}
235
236/*
237 * Pass in a dellalloc extent and convert it to real extents, return the real
238 * extent that maps offset_fsb in wpc->iomap.
239 *
240 * The current page is held locked so nothing could have removed the block
241 * backing offset_fsb, although it could have moved from the COW to the data
242 * fork by another thread.
243 */
244static int
245xfs_convert_blocks(
246	struct iomap_writepage_ctx *wpc,
247	struct xfs_inode	*ip,
248	int			whichfork,
249	loff_t			offset)
250{
251	int			error;
252	unsigned		*seq;
253
254	if (whichfork == XFS_COW_FORK)
255		seq = &XFS_WPC(wpc)->cow_seq;
256	else
257		seq = &XFS_WPC(wpc)->data_seq;
258
259	/*
260	 * Attempt to allocate whatever delalloc extent currently backs offset
261	 * and put the result into wpc->iomap.  Allocate in a loop because it
262	 * may take several attempts to allocate real blocks for a contiguous
263	 * delalloc extent if free space is sufficiently fragmented.
264	 */
265	do {
266		error = xfs_bmapi_convert_delalloc(ip, whichfork, offset,
267				&wpc->iomap, seq);
 
268		if (error)
269			return error;
270	} while (wpc->iomap.offset + wpc->iomap.length <= offset);
271
272	return 0;
273}
274
275static int
276xfs_map_blocks(
277	struct iomap_writepage_ctx *wpc,
278	struct inode		*inode,
279	loff_t			offset)
280{
281	struct xfs_inode	*ip = XFS_I(inode);
282	struct xfs_mount	*mp = ip->i_mount;
283	ssize_t			count = i_blocksize(inode);
284	xfs_fileoff_t		offset_fsb = XFS_B_TO_FSBT(mp, offset);
285	xfs_fileoff_t		end_fsb = XFS_B_TO_FSB(mp, offset + count);
286	xfs_fileoff_t		cow_fsb;
287	int			whichfork;
288	struct xfs_bmbt_irec	imap;
289	struct xfs_iext_cursor	icur;
290	int			retries = 0;
291	int			error = 0;
292
293	if (xfs_is_shutdown(mp))
294		return -EIO;
295
296	XFS_ERRORTAG_DELAY(mp, XFS_ERRTAG_WB_DELAY_MS);
297
298	/*
299	 * COW fork blocks can overlap data fork blocks even if the blocks
300	 * aren't shared.  COW I/O always takes precedent, so we must always
301	 * check for overlap on reflink inodes unless the mapping is already a
302	 * COW one, or the COW fork hasn't changed from the last time we looked
303	 * at it.
304	 *
305	 * It's safe to check the COW fork if_seq here without the ILOCK because
306	 * we've indirectly protected against concurrent updates: writeback has
307	 * the page locked, which prevents concurrent invalidations by reflink
308	 * and directio and prevents concurrent buffered writes to the same
309	 * page.  Changes to if_seq always happen under i_lock, which protects
310	 * against concurrent updates and provides a memory barrier on the way
311	 * out that ensures that we always see the current value.
312	 */
313	if (xfs_imap_valid(wpc, ip, offset))
314		return 0;
315
316	/*
317	 * If we don't have a valid map, now it's time to get a new one for this
318	 * offset.  This will convert delayed allocations (including COW ones)
319	 * into real extents.  If we return without a valid map, it means we
320	 * landed in a hole and we skip the block.
321	 */
322retry:
323	cow_fsb = NULLFILEOFF;
324	whichfork = XFS_DATA_FORK;
325	xfs_ilock(ip, XFS_ILOCK_SHARED);
326	ASSERT(!xfs_need_iread_extents(&ip->i_df));
 
327
328	/*
329	 * Check if this is offset is covered by a COW extents, and if yes use
330	 * it directly instead of looking up anything in the data fork.
331	 */
332	if (xfs_inode_has_cow_data(ip) &&
333	    xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &imap))
334		cow_fsb = imap.br_startoff;
335	if (cow_fsb != NULLFILEOFF && cow_fsb <= offset_fsb) {
336		XFS_WPC(wpc)->cow_seq = READ_ONCE(ip->i_cowfp->if_seq);
337		xfs_iunlock(ip, XFS_ILOCK_SHARED);
338
339		whichfork = XFS_COW_FORK;
340		goto allocate_blocks;
341	}
342
343	/*
344	 * No COW extent overlap. Revalidate now that we may have updated
345	 * ->cow_seq. If the data mapping is still valid, we're done.
346	 */
347	if (xfs_imap_valid(wpc, ip, offset)) {
348		xfs_iunlock(ip, XFS_ILOCK_SHARED);
349		return 0;
350	}
351
352	/*
353	 * If we don't have a valid map, now it's time to get a new one for this
354	 * offset.  This will convert delayed allocations (including COW ones)
355	 * into real extents.
356	 */
357	if (!xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap))
358		imap.br_startoff = end_fsb;	/* fake a hole past EOF */
359	XFS_WPC(wpc)->data_seq = READ_ONCE(ip->i_df.if_seq);
360	xfs_iunlock(ip, XFS_ILOCK_SHARED);
361
 
 
362	/* landed in a hole or beyond EOF? */
363	if (imap.br_startoff > offset_fsb) {
364		imap.br_blockcount = imap.br_startoff - offset_fsb;
365		imap.br_startoff = offset_fsb;
366		imap.br_startblock = HOLESTARTBLOCK;
367		imap.br_state = XFS_EXT_NORM;
368	}
369
370	/*
371	 * Truncate to the next COW extent if there is one.  This is the only
372	 * opportunity to do this because we can skip COW fork lookups for the
373	 * subsequent blocks in the mapping; however, the requirement to treat
374	 * the COW range separately remains.
375	 */
376	if (cow_fsb != NULLFILEOFF &&
377	    cow_fsb < imap.br_startoff + imap.br_blockcount)
378		imap.br_blockcount = cow_fsb - imap.br_startoff;
379
380	/* got a delalloc extent? */
381	if (imap.br_startblock != HOLESTARTBLOCK &&
382	    isnullstartblock(imap.br_startblock))
383		goto allocate_blocks;
384
385	xfs_bmbt_to_iomap(ip, &wpc->iomap, &imap, 0, 0, XFS_WPC(wpc)->data_seq);
386	trace_xfs_map_blocks_found(ip, offset, count, whichfork, &imap);
387	return 0;
388allocate_blocks:
389	error = xfs_convert_blocks(wpc, ip, whichfork, offset);
390	if (error) {
391		/*
392		 * If we failed to find the extent in the COW fork we might have
393		 * raced with a COW to data fork conversion or truncate.
394		 * Restart the lookup to catch the extent in the data fork for
395		 * the former case, but prevent additional retries to avoid
396		 * looping forever for the latter case.
397		 */
398		if (error == -EAGAIN && whichfork == XFS_COW_FORK && !retries++)
399			goto retry;
400		ASSERT(error != -EAGAIN);
401		return error;
402	}
403
404	/*
405	 * Due to merging the return real extent might be larger than the
406	 * original delalloc one.  Trim the return extent to the next COW
407	 * boundary again to force a re-lookup.
408	 */
409	if (whichfork != XFS_COW_FORK && cow_fsb != NULLFILEOFF) {
410		loff_t		cow_offset = XFS_FSB_TO_B(mp, cow_fsb);
411
412		if (cow_offset < wpc->iomap.offset + wpc->iomap.length)
413			wpc->iomap.length = cow_offset - wpc->iomap.offset;
414	}
415
416	ASSERT(wpc->iomap.offset <= offset);
417	ASSERT(wpc->iomap.offset + wpc->iomap.length > offset);
418	trace_xfs_map_blocks_alloc(ip, offset, count, whichfork, &imap);
419	return 0;
420}
421
422static int
423xfs_prepare_ioend(
424	struct iomap_ioend	*ioend,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
425	int			status)
426{
427	unsigned int		nofs_flag;
428
429	/*
430	 * We can allocate memory here while doing writeback on behalf of
431	 * memory reclaim.  To avoid memory allocation deadlocks set the
432	 * task-wide nofs context for the following operations.
433	 */
434	nofs_flag = memalloc_nofs_save();
435
436	/* Convert CoW extents to regular */
437	if (!status && (ioend->io_flags & IOMAP_F_SHARED)) {
438		status = xfs_reflink_convert_cow(XFS_I(ioend->io_inode),
439				ioend->io_offset, ioend->io_size);
440	}
441
 
 
 
 
 
 
 
 
442	memalloc_nofs_restore(nofs_flag);
443
444	/* send ioends that might require a transaction to the completion wq */
445	if (xfs_ioend_is_append(ioend) || ioend->io_type == IOMAP_UNWRITTEN ||
446	    (ioend->io_flags & IOMAP_F_SHARED))
447		ioend->io_bio->bi_end_io = xfs_end_bio;
448	return status;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
449}
450
451/*
452 * If the folio has delalloc blocks on it, the caller is asking us to punch them
453 * out. If we don't, we can leave a stale delalloc mapping covered by a clean
454 * page that needs to be dirtied again before the delalloc mapping can be
455 * converted. This stale delalloc mapping can trip up a later direct I/O read
456 * operation on the same region.
457 *
458 * We prevent this by truncating away the delalloc regions on the folio. Because
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
459 * they are delalloc, we can do this without needing a transaction. Indeed - if
460 * we get ENOSPC errors, we have to be able to do this truncation without a
461 * transaction as there is no space left for block reservation (typically why
462 * we see a ENOSPC in writeback).
463 */
464static void
465xfs_discard_folio(
466	struct folio		*folio,
467	loff_t			pos)
468{
469	struct xfs_inode	*ip = XFS_I(folio->mapping->host);
 
470	struct xfs_mount	*mp = ip->i_mount;
 
 
471	int			error;
472
473	if (xfs_is_shutdown(mp))
474		return;
475
476	xfs_alert_ratelimited(mp,
477		"page discard on page "PTR_FMT", inode 0x%llx, pos %llu.",
478			folio, ip->i_ino, pos);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
479
480	/*
481	 * The end of the punch range is always the offset of the first
482	 * byte of the next folio. Hence the end offset is only dependent on the
483	 * folio itself and not the start offset that is passed in.
484	 */
485	error = xfs_bmap_punch_delalloc_range(ip, pos,
486				folio_pos(folio) + folio_size(folio));
 
 
 
487
488	if (error && !xfs_is_shutdown(mp))
489		xfs_alert(mp, "page discard unable to remove delalloc mapping.");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
490}
491
492static const struct iomap_writeback_ops xfs_writeback_ops = {
493	.map_blocks		= xfs_map_blocks,
494	.prepare_ioend		= xfs_prepare_ioend,
495	.discard_folio		= xfs_discard_folio,
496};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
497
498STATIC int
499xfs_vm_writepages(
500	struct address_space	*mapping,
501	struct writeback_control *wbc)
502{
503	struct xfs_writepage_ctx wpc = { };
504
505	/*
506	 * Writing back data in a transaction context can result in recursive
507	 * transactions. This is bad, so issue a warning and get out of here.
508	 */
509	if (WARN_ON_ONCE(current->journal_info))
510		return 0;
511
512	xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
513	return iomap_writepages(mapping, wbc, &wpc.ctx, &xfs_writeback_ops);
 
 
 
514}
515
516STATIC int
517xfs_dax_writepages(
518	struct address_space	*mapping,
519	struct writeback_control *wbc)
520{
521	struct xfs_inode	*ip = XFS_I(mapping->host);
522
523	xfs_iflags_clear(ip, XFS_ITRUNCATED);
524	return dax_writeback_mapping_range(mapping,
525			xfs_inode_buftarg(ip)->bt_daxdev, wbc);
 
 
 
 
 
 
 
 
 
526}
527
528STATIC sector_t
529xfs_vm_bmap(
530	struct address_space	*mapping,
531	sector_t		block)
532{
533	struct xfs_inode	*ip = XFS_I(mapping->host);
534
535	trace_xfs_vm_bmap(ip);
536
537	/*
538	 * The swap code (ab-)uses ->bmap to get a block mapping and then
539	 * bypasses the file system for actual I/O.  We really can't allow
540	 * that on reflinks inodes, so we have to skip out here.  And yes,
541	 * 0 is the magic code for a bmap error.
542	 *
543	 * Since we don't pass back blockdev info, we can't return bmap
544	 * information for rt files either.
545	 */
546	if (xfs_is_cow_inode(ip) || XFS_IS_REALTIME_INODE(ip))
547		return 0;
548	return iomap_bmap(mapping, block, &xfs_read_iomap_ops);
549}
550
551STATIC int
552xfs_vm_read_folio(
553	struct file		*unused,
554	struct folio		*folio)
555{
556	return iomap_read_folio(folio, &xfs_read_iomap_ops);
 
557}
558
559STATIC void
560xfs_vm_readahead(
561	struct readahead_control	*rac)
 
 
 
562{
563	iomap_readahead(rac, &xfs_read_iomap_ops);
 
564}
565
566static int
567xfs_iomap_swapfile_activate(
568	struct swap_info_struct		*sis,
569	struct file			*swap_file,
570	sector_t			*span)
571{
572	sis->bdev = xfs_inode_buftarg(XFS_I(file_inode(swap_file)))->bt_bdev;
573	return iomap_swapfile_activate(sis, swap_file, span,
574			&xfs_read_iomap_ops);
575}
576
577const struct address_space_operations xfs_address_space_operations = {
578	.read_folio		= xfs_vm_read_folio,
579	.readahead		= xfs_vm_readahead,
 
580	.writepages		= xfs_vm_writepages,
581	.dirty_folio		= iomap_dirty_folio,
582	.release_folio		= iomap_release_folio,
583	.invalidate_folio	= iomap_invalidate_folio,
584	.bmap			= xfs_vm_bmap,
585	.migrate_folio		= filemap_migrate_folio,
 
586	.is_partially_uptodate  = iomap_is_partially_uptodate,
587	.error_remove_folio	= generic_error_remove_folio,
588	.swap_activate		= xfs_iomap_swapfile_activate,
589};
590
591const struct address_space_operations xfs_dax_aops = {
592	.writepages		= xfs_dax_writepages,
593	.dirty_folio		= noop_dirty_folio,
 
 
594	.swap_activate		= xfs_iomap_swapfile_activate,
595};
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
   4 * Copyright (c) 2016-2018 Christoph Hellwig.
   5 * All Rights Reserved.
   6 */
   7#include "xfs.h"
   8#include "xfs_shared.h"
   9#include "xfs_format.h"
  10#include "xfs_log_format.h"
  11#include "xfs_trans_resv.h"
  12#include "xfs_mount.h"
  13#include "xfs_inode.h"
  14#include "xfs_trans.h"
  15#include "xfs_iomap.h"
  16#include "xfs_trace.h"
  17#include "xfs_bmap.h"
  18#include "xfs_bmap_util.h"
  19#include "xfs_reflink.h"
 
 
  20
  21/*
  22 * structure owned by writepages passed to individual writepage calls
  23 */
  24struct xfs_writepage_ctx {
  25	struct xfs_bmbt_irec    imap;
  26	int			fork;
  27	unsigned int		data_seq;
  28	unsigned int		cow_seq;
  29	struct xfs_ioend	*ioend;
  30};
  31
  32struct block_device *
  33xfs_find_bdev_for_inode(
  34	struct inode		*inode)
  35{
  36	struct xfs_inode	*ip = XFS_I(inode);
  37	struct xfs_mount	*mp = ip->i_mount;
  38
  39	if (XFS_IS_REALTIME_INODE(ip))
  40		return mp->m_rtdev_targp->bt_bdev;
  41	else
  42		return mp->m_ddev_targp->bt_bdev;
  43}
  44
  45struct dax_device *
  46xfs_find_daxdev_for_inode(
  47	struct inode		*inode)
  48{
  49	struct xfs_inode	*ip = XFS_I(inode);
  50	struct xfs_mount	*mp = ip->i_mount;
  51
  52	if (XFS_IS_REALTIME_INODE(ip))
  53		return mp->m_rtdev_targp->bt_daxdev;
  54	else
  55		return mp->m_ddev_targp->bt_daxdev;
  56}
  57
  58static void
  59xfs_finish_page_writeback(
  60	struct inode		*inode,
  61	struct bio_vec	*bvec,
  62	int			error)
  63{
  64	struct iomap_page	*iop = to_iomap_page(bvec->bv_page);
  65
  66	if (error) {
  67		SetPageError(bvec->bv_page);
  68		mapping_set_error(inode->i_mapping, -EIO);
  69	}
  70
  71	ASSERT(iop || i_blocksize(inode) == PAGE_SIZE);
  72	ASSERT(!iop || atomic_read(&iop->write_count) > 0);
  73
  74	if (!iop || atomic_dec_and_test(&iop->write_count))
  75		end_page_writeback(bvec->bv_page);
  76}
  77
  78/*
  79 * We're now finished for good with this ioend structure.  Update the page
  80 * state, release holds on bios, and finally free up memory.  Do not use the
  81 * ioend after this.
  82 */
  83STATIC void
  84xfs_destroy_ioend(
  85	struct xfs_ioend	*ioend,
  86	int			error)
  87{
  88	struct inode		*inode = ioend->io_inode;
  89	struct bio		*bio = &ioend->io_inline_bio;
  90	struct bio		*last = ioend->io_bio, *next;
  91	u64			start = bio->bi_iter.bi_sector;
  92	bool			quiet = bio_flagged(bio, BIO_QUIET);
  93
  94	for (bio = &ioend->io_inline_bio; bio; bio = next) {
  95		struct bio_vec	*bvec;
  96		struct bvec_iter_all iter_all;
  97
  98		/*
  99		 * For the last bio, bi_private points to the ioend, so we
 100		 * need to explicitly end the iteration here.
 101		 */
 102		if (bio == last)
 103			next = NULL;
 104		else
 105			next = bio->bi_private;
 106
 107		/* walk each page on bio, ending page IO on them */
 108		bio_for_each_segment_all(bvec, bio, iter_all)
 109			xfs_finish_page_writeback(inode, bvec, error);
 110		bio_put(bio);
 111	}
 112
 113	if (unlikely(error && !quiet)) {
 114		xfs_err_ratelimited(XFS_I(inode)->i_mount,
 115			"writeback error on sector %llu", start);
 116	}
 117}
 118
 119/*
 120 * Fast and loose check if this write could update the on-disk inode size.
 121 */
 122static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
 123{
 124	return ioend->io_offset + ioend->io_size >
 125		XFS_I(ioend->io_inode)->i_d.di_size;
 126}
 127
 128STATIC int
 129xfs_setfilesize_trans_alloc(
 130	struct xfs_ioend	*ioend)
 131{
 132	struct xfs_mount	*mp = XFS_I(ioend->io_inode)->i_mount;
 133	struct xfs_trans	*tp;
 134	int			error;
 135
 136	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
 137	if (error)
 138		return error;
 139
 140	ioend->io_append_trans = tp;
 141
 142	/*
 143	 * We may pass freeze protection with a transaction.  So tell lockdep
 144	 * we released it.
 145	 */
 146	__sb_writers_release(ioend->io_inode->i_sb, SB_FREEZE_FS);
 147	/*
 148	 * We hand off the transaction to the completion thread now, so
 149	 * clear the flag here.
 150	 */
 151	current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
 152	return 0;
 153}
 154
 155/*
 156 * Update on-disk file size now that data has been written to disk.
 157 */
 158STATIC int
 159__xfs_setfilesize(
 160	struct xfs_inode	*ip,
 161	struct xfs_trans	*tp,
 162	xfs_off_t		offset,
 163	size_t			size)
 164{
 
 
 165	xfs_fsize_t		isize;
 
 
 
 
 
 166
 167	xfs_ilock(ip, XFS_ILOCK_EXCL);
 168	isize = xfs_new_eof(ip, offset + size);
 169	if (!isize) {
 170		xfs_iunlock(ip, XFS_ILOCK_EXCL);
 171		xfs_trans_cancel(tp);
 172		return 0;
 173	}
 174
 175	trace_xfs_setfilesize(ip, offset, size);
 176
 177	ip->i_d.di_size = isize;
 178	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
 179	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
 180
 181	return xfs_trans_commit(tp);
 182}
 183
 184int
 185xfs_setfilesize(
 186	struct xfs_inode	*ip,
 187	xfs_off_t		offset,
 188	size_t			size)
 189{
 190	struct xfs_mount	*mp = ip->i_mount;
 191	struct xfs_trans	*tp;
 192	int			error;
 193
 194	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
 195	if (error)
 196		return error;
 197
 198	return __xfs_setfilesize(ip, tp, offset, size);
 199}
 200
 201STATIC int
 202xfs_setfilesize_ioend(
 203	struct xfs_ioend	*ioend,
 204	int			error)
 205{
 206	struct xfs_inode	*ip = XFS_I(ioend->io_inode);
 207	struct xfs_trans	*tp = ioend->io_append_trans;
 208
 209	/*
 210	 * The transaction may have been allocated in the I/O submission thread,
 211	 * thus we need to mark ourselves as being in a transaction manually.
 212	 * Similarly for freeze protection.
 213	 */
 214	current_set_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
 215	__sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS);
 216
 217	/* we abort the update if there was an IO error */
 218	if (error) {
 219		xfs_trans_cancel(tp);
 220		return error;
 221	}
 222
 223	return __xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size);
 224}
 225
 226/*
 227 * IO write completion.
 228 */
 229STATIC void
 230xfs_end_ioend(
 231	struct xfs_ioend	*ioend)
 232{
 233	struct list_head	ioend_list;
 234	struct xfs_inode	*ip = XFS_I(ioend->io_inode);
 
 235	xfs_off_t		offset = ioend->io_offset;
 236	size_t			size = ioend->io_size;
 237	unsigned int		nofs_flag;
 238	int			error;
 239
 240	/*
 241	 * We can allocate memory here while doing writeback on behalf of
 242	 * memory reclaim.  To avoid memory allocation deadlocks set the
 243	 * task-wide nofs context for the following operations.
 244	 */
 245	nofs_flag = memalloc_nofs_save();
 246
 247	/*
 248	 * Just clean up the in-memory strutures if the fs has been shut down.
 249	 */
 250	if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
 251		error = -EIO;
 252		goto done;
 253	}
 254
 255	/*
 256	 * Clean up any COW blocks on an I/O error.
 
 
 
 
 257	 */
 258	error = blk_status_to_errno(ioend->io_bio->bi_status);
 259	if (unlikely(error)) {
 260		if (ioend->io_fork == XFS_COW_FORK)
 261			xfs_reflink_cancel_cow_range(ip, offset, size, true);
 
 
 
 262		goto done;
 263	}
 264
 265	/*
 266	 * Success: commit the COW or unwritten blocks if needed.
 267	 */
 268	if (ioend->io_fork == XFS_COW_FORK)
 269		error = xfs_reflink_end_cow(ip, offset, size);
 270	else if (ioend->io_state == XFS_EXT_UNWRITTEN)
 271		error = xfs_iomap_write_unwritten(ip, offset, size, false);
 272	else
 273		ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_append_trans);
 274
 
 
 275done:
 276	if (ioend->io_append_trans)
 277		error = xfs_setfilesize_ioend(ioend, error);
 278	list_replace_init(&ioend->io_list, &ioend_list);
 279	xfs_destroy_ioend(ioend, error);
 280
 281	while (!list_empty(&ioend_list)) {
 282		ioend = list_first_entry(&ioend_list, struct xfs_ioend,
 283				io_list);
 284		list_del_init(&ioend->io_list);
 285		xfs_destroy_ioend(ioend, error);
 286	}
 287
 288	memalloc_nofs_restore(nofs_flag);
 289}
 290
 291/*
 292 * We can merge two adjacent ioends if they have the same set of work to do.
 
 
 
 
 
 
 
 
 
 
 
 293 */
 294static bool
 295xfs_ioend_can_merge(
 296	struct xfs_ioend	*ioend,
 297	struct xfs_ioend	*next)
 298{
 299	if (ioend->io_bio->bi_status != next->io_bio->bi_status)
 300		return false;
 301	if ((ioend->io_fork == XFS_COW_FORK) ^ (next->io_fork == XFS_COW_FORK))
 302		return false;
 303	if ((ioend->io_state == XFS_EXT_UNWRITTEN) ^
 304	    (next->io_state == XFS_EXT_UNWRITTEN))
 305		return false;
 306	if (ioend->io_offset + ioend->io_size != next->io_offset)
 307		return false;
 308	return true;
 309}
 310
 311/*
 312 * If the to be merged ioend has a preallocated transaction for file
 313 * size updates we need to ensure the ioend it is merged into also
 314 * has one.  If it already has one we can simply cancel the transaction
 315 * as it is guaranteed to be clean.
 316 */
 317static void
 318xfs_ioend_merge_append_transactions(
 319	struct xfs_ioend	*ioend,
 320	struct xfs_ioend	*next)
 321{
 322	if (!ioend->io_append_trans) {
 323		ioend->io_append_trans = next->io_append_trans;
 324		next->io_append_trans = NULL;
 325	} else {
 326		xfs_setfilesize_ioend(next, -ECANCELED);
 327	}
 328}
 329
 330/* Try to merge adjacent completions. */
 331STATIC void
 332xfs_ioend_try_merge(
 333	struct xfs_ioend	*ioend,
 334	struct list_head	*more_ioends)
 335{
 336	struct xfs_ioend	*next_ioend;
 337
 338	while (!list_empty(more_ioends)) {
 339		next_ioend = list_first_entry(more_ioends, struct xfs_ioend,
 340				io_list);
 341		if (!xfs_ioend_can_merge(ioend, next_ioend))
 342			break;
 343		list_move_tail(&next_ioend->io_list, &ioend->io_list);
 344		ioend->io_size += next_ioend->io_size;
 345		if (next_ioend->io_append_trans)
 346			xfs_ioend_merge_append_transactions(ioend, next_ioend);
 347	}
 348}
 349
 350/* list_sort compare function for ioends */
 351static int
 352xfs_ioend_compare(
 353	void			*priv,
 354	struct list_head	*a,
 355	struct list_head	*b)
 356{
 357	struct xfs_ioend	*ia;
 358	struct xfs_ioend	*ib;
 359
 360	ia = container_of(a, struct xfs_ioend, io_list);
 361	ib = container_of(b, struct xfs_ioend, io_list);
 362	if (ia->io_offset < ib->io_offset)
 363		return -1;
 364	else if (ia->io_offset > ib->io_offset)
 365		return 1;
 366	return 0;
 367}
 368
 369/* Finish all pending io completions. */
 370void
 371xfs_end_io(
 372	struct work_struct	*work)
 373{
 374	struct xfs_inode	*ip;
 375	struct xfs_ioend	*ioend;
 376	struct list_head	completion_list;
 
 377	unsigned long		flags;
 378
 379	ip = container_of(work, struct xfs_inode, i_ioend_work);
 380
 381	spin_lock_irqsave(&ip->i_ioend_lock, flags);
 382	list_replace_init(&ip->i_ioend_list, &completion_list);
 383	spin_unlock_irqrestore(&ip->i_ioend_lock, flags);
 384
 385	list_sort(NULL, &completion_list, xfs_ioend_compare);
 386
 387	while (!list_empty(&completion_list)) {
 388		ioend = list_first_entry(&completion_list, struct xfs_ioend,
 389				io_list);
 390		list_del_init(&ioend->io_list);
 391		xfs_ioend_try_merge(ioend, &completion_list);
 392		xfs_end_ioend(ioend);
 
 393	}
 394}
 395
 396STATIC void
 397xfs_end_bio(
 398	struct bio		*bio)
 399{
 400	struct xfs_ioend	*ioend = bio->bi_private;
 401	struct xfs_inode	*ip = XFS_I(ioend->io_inode);
 402	struct xfs_mount	*mp = ip->i_mount;
 403	unsigned long		flags;
 404
 405	if (ioend->io_fork == XFS_COW_FORK ||
 406	    ioend->io_state == XFS_EXT_UNWRITTEN ||
 407	    ioend->io_append_trans != NULL) {
 408		spin_lock_irqsave(&ip->i_ioend_lock, flags);
 409		if (list_empty(&ip->i_ioend_list))
 410			WARN_ON_ONCE(!queue_work(mp->m_unwritten_workqueue,
 411						 &ip->i_ioend_work));
 412		list_add_tail(&ioend->io_list, &ip->i_ioend_list);
 413		spin_unlock_irqrestore(&ip->i_ioend_lock, flags);
 414	} else
 415		xfs_destroy_ioend(ioend, blk_status_to_errno(bio->bi_status));
 416}
 417
 418/*
 419 * Fast revalidation of the cached writeback mapping. Return true if the current
 420 * mapping is valid, false otherwise.
 421 */
 422static bool
 423xfs_imap_valid(
 424	struct xfs_writepage_ctx	*wpc,
 425	struct xfs_inode		*ip,
 426	xfs_fileoff_t			offset_fsb)
 427{
 428	if (offset_fsb < wpc->imap.br_startoff ||
 429	    offset_fsb >= wpc->imap.br_startoff + wpc->imap.br_blockcount)
 430		return false;
 431	/*
 432	 * If this is a COW mapping, it is sufficient to check that the mapping
 433	 * covers the offset. Be careful to check this first because the caller
 434	 * can revalidate a COW mapping without updating the data seqno.
 435	 */
 436	if (wpc->fork == XFS_COW_FORK)
 437		return true;
 438
 439	/*
 440	 * This is not a COW mapping. Check the sequence number of the data fork
 441	 * because concurrent changes could have invalidated the extent. Check
 442	 * the COW fork because concurrent changes since the last time we
 443	 * checked (and found nothing at this offset) could have added
 444	 * overlapping blocks.
 445	 */
 446	if (wpc->data_seq != READ_ONCE(ip->i_df.if_seq))
 
 
 447		return false;
 
 448	if (xfs_inode_has_cow_data(ip) &&
 449	    wpc->cow_seq != READ_ONCE(ip->i_cowfp->if_seq))
 
 
 450		return false;
 
 451	return true;
 452}
 453
 454/*
 455 * Pass in a dellalloc extent and convert it to real extents, return the real
 456 * extent that maps offset_fsb in wpc->imap.
 457 *
 458 * The current page is held locked so nothing could have removed the block
 459 * backing offset_fsb, although it could have moved from the COW to the data
 460 * fork by another thread.
 461 */
 462static int
 463xfs_convert_blocks(
 464	struct xfs_writepage_ctx *wpc,
 465	struct xfs_inode	*ip,
 466	xfs_fileoff_t		offset_fsb)
 
 467{
 468	int			error;
 
 
 
 
 
 
 469
 470	/*
 471	 * Attempt to allocate whatever delalloc extent currently backs
 472	 * offset_fsb and put the result into wpc->imap.  Allocate in a loop
 473	 * because it may take several attempts to allocate real blocks for a
 474	 * contiguous delalloc extent if free space is sufficiently fragmented.
 475	 */
 476	do {
 477		error = xfs_bmapi_convert_delalloc(ip, wpc->fork, offset_fsb,
 478				&wpc->imap, wpc->fork == XFS_COW_FORK ?
 479					&wpc->cow_seq : &wpc->data_seq);
 480		if (error)
 481			return error;
 482	} while (wpc->imap.br_startoff + wpc->imap.br_blockcount <= offset_fsb);
 483
 484	return 0;
 485}
 486
 487STATIC int
 488xfs_map_blocks(
 489	struct xfs_writepage_ctx *wpc,
 490	struct inode		*inode,
 491	loff_t			offset)
 492{
 493	struct xfs_inode	*ip = XFS_I(inode);
 494	struct xfs_mount	*mp = ip->i_mount;
 495	ssize_t			count = i_blocksize(inode);
 496	xfs_fileoff_t		offset_fsb = XFS_B_TO_FSBT(mp, offset);
 497	xfs_fileoff_t		end_fsb = XFS_B_TO_FSB(mp, offset + count);
 498	xfs_fileoff_t		cow_fsb = NULLFILEOFF;
 
 499	struct xfs_bmbt_irec	imap;
 500	struct xfs_iext_cursor	icur;
 501	int			retries = 0;
 502	int			error = 0;
 503
 504	if (XFS_FORCED_SHUTDOWN(mp))
 505		return -EIO;
 506
 
 
 507	/*
 508	 * COW fork blocks can overlap data fork blocks even if the blocks
 509	 * aren't shared.  COW I/O always takes precedent, so we must always
 510	 * check for overlap on reflink inodes unless the mapping is already a
 511	 * COW one, or the COW fork hasn't changed from the last time we looked
 512	 * at it.
 513	 *
 514	 * It's safe to check the COW fork if_seq here without the ILOCK because
 515	 * we've indirectly protected against concurrent updates: writeback has
 516	 * the page locked, which prevents concurrent invalidations by reflink
 517	 * and directio and prevents concurrent buffered writes to the same
 518	 * page.  Changes to if_seq always happen under i_lock, which protects
 519	 * against concurrent updates and provides a memory barrier on the way
 520	 * out that ensures that we always see the current value.
 521	 */
 522	if (xfs_imap_valid(wpc, ip, offset_fsb))
 523		return 0;
 524
 525	/*
 526	 * If we don't have a valid map, now it's time to get a new one for this
 527	 * offset.  This will convert delayed allocations (including COW ones)
 528	 * into real extents.  If we return without a valid map, it means we
 529	 * landed in a hole and we skip the block.
 530	 */
 531retry:
 
 
 532	xfs_ilock(ip, XFS_ILOCK_SHARED);
 533	ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
 534	       (ip->i_df.if_flags & XFS_IFEXTENTS));
 535
 536	/*
 537	 * Check if this is offset is covered by a COW extents, and if yes use
 538	 * it directly instead of looking up anything in the data fork.
 539	 */
 540	if (xfs_inode_has_cow_data(ip) &&
 541	    xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &imap))
 542		cow_fsb = imap.br_startoff;
 543	if (cow_fsb != NULLFILEOFF && cow_fsb <= offset_fsb) {
 544		wpc->cow_seq = READ_ONCE(ip->i_cowfp->if_seq);
 545		xfs_iunlock(ip, XFS_ILOCK_SHARED);
 546
 547		wpc->fork = XFS_COW_FORK;
 548		goto allocate_blocks;
 549	}
 550
 551	/*
 552	 * No COW extent overlap. Revalidate now that we may have updated
 553	 * ->cow_seq. If the data mapping is still valid, we're done.
 554	 */
 555	if (xfs_imap_valid(wpc, ip, offset_fsb)) {
 556		xfs_iunlock(ip, XFS_ILOCK_SHARED);
 557		return 0;
 558	}
 559
 560	/*
 561	 * If we don't have a valid map, now it's time to get a new one for this
 562	 * offset.  This will convert delayed allocations (including COW ones)
 563	 * into real extents.
 564	 */
 565	if (!xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap))
 566		imap.br_startoff = end_fsb;	/* fake a hole past EOF */
 567	wpc->data_seq = READ_ONCE(ip->i_df.if_seq);
 568	xfs_iunlock(ip, XFS_ILOCK_SHARED);
 569
 570	wpc->fork = XFS_DATA_FORK;
 571
 572	/* landed in a hole or beyond EOF? */
 573	if (imap.br_startoff > offset_fsb) {
 574		imap.br_blockcount = imap.br_startoff - offset_fsb;
 575		imap.br_startoff = offset_fsb;
 576		imap.br_startblock = HOLESTARTBLOCK;
 577		imap.br_state = XFS_EXT_NORM;
 578	}
 579
 580	/*
 581	 * Truncate to the next COW extent if there is one.  This is the only
 582	 * opportunity to do this because we can skip COW fork lookups for the
 583	 * subsequent blocks in the mapping; however, the requirement to treat
 584	 * the COW range separately remains.
 585	 */
 586	if (cow_fsb != NULLFILEOFF &&
 587	    cow_fsb < imap.br_startoff + imap.br_blockcount)
 588		imap.br_blockcount = cow_fsb - imap.br_startoff;
 589
 590	/* got a delalloc extent? */
 591	if (imap.br_startblock != HOLESTARTBLOCK &&
 592	    isnullstartblock(imap.br_startblock))
 593		goto allocate_blocks;
 594
 595	wpc->imap = imap;
 596	trace_xfs_map_blocks_found(ip, offset, count, wpc->fork, &imap);
 597	return 0;
 598allocate_blocks:
 599	error = xfs_convert_blocks(wpc, ip, offset_fsb);
 600	if (error) {
 601		/*
 602		 * If we failed to find the extent in the COW fork we might have
 603		 * raced with a COW to data fork conversion or truncate.
 604		 * Restart the lookup to catch the extent in the data fork for
 605		 * the former case, but prevent additional retries to avoid
 606		 * looping forever for the latter case.
 607		 */
 608		if (error == -EAGAIN && wpc->fork == XFS_COW_FORK && !retries++)
 609			goto retry;
 610		ASSERT(error != -EAGAIN);
 611		return error;
 612	}
 613
 614	/*
 615	 * Due to merging the return real extent might be larger than the
 616	 * original delalloc one.  Trim the return extent to the next COW
 617	 * boundary again to force a re-lookup.
 618	 */
 619	if (wpc->fork != XFS_COW_FORK && cow_fsb != NULLFILEOFF &&
 620	    cow_fsb < wpc->imap.br_startoff + wpc->imap.br_blockcount)
 621		wpc->imap.br_blockcount = cow_fsb - wpc->imap.br_startoff;
 622
 623	ASSERT(wpc->imap.br_startoff <= offset_fsb);
 624	ASSERT(wpc->imap.br_startoff + wpc->imap.br_blockcount > offset_fsb);
 625	trace_xfs_map_blocks_alloc(ip, offset, count, wpc->fork, &imap);
 
 
 
 626	return 0;
 627}
 628
 629/*
 630 * Submit the bio for an ioend. We are passed an ioend with a bio attached to
 631 * it, and we submit that bio. The ioend may be used for multiple bio
 632 * submissions, so we only want to allocate an append transaction for the ioend
 633 * once. In the case of multiple bio submission, each bio will take an IO
 634 * reference to the ioend to ensure that the ioend completion is only done once
 635 * all bios have been submitted and the ioend is really done.
 636 *
 637 * If @status is non-zero, it means that we have a situation where some part of
 638 * the submission process has failed after we have marked paged for writeback
 639 * and unlocked them. In this situation, we need to fail the bio and ioend
 640 * rather than submit it to IO. This typically only happens on a filesystem
 641 * shutdown.
 642 */
 643STATIC int
 644xfs_submit_ioend(
 645	struct writeback_control *wbc,
 646	struct xfs_ioend	*ioend,
 647	int			status)
 648{
 649	unsigned int		nofs_flag;
 650
 651	/*
 652	 * We can allocate memory here while doing writeback on behalf of
 653	 * memory reclaim.  To avoid memory allocation deadlocks set the
 654	 * task-wide nofs context for the following operations.
 655	 */
 656	nofs_flag = memalloc_nofs_save();
 657
 658	/* Convert CoW extents to regular */
 659	if (!status && ioend->io_fork == XFS_COW_FORK) {
 660		status = xfs_reflink_convert_cow(XFS_I(ioend->io_inode),
 661				ioend->io_offset, ioend->io_size);
 662	}
 663
 664	/* Reserve log space if we might write beyond the on-disk inode size. */
 665	if (!status &&
 666	    (ioend->io_fork == XFS_COW_FORK ||
 667	     ioend->io_state != XFS_EXT_UNWRITTEN) &&
 668	    xfs_ioend_is_append(ioend) &&
 669	    !ioend->io_append_trans)
 670		status = xfs_setfilesize_trans_alloc(ioend);
 671
 672	memalloc_nofs_restore(nofs_flag);
 673
 674	ioend->io_bio->bi_private = ioend;
 675	ioend->io_bio->bi_end_io = xfs_end_bio;
 676
 677	/*
 678	 * If we are failing the IO now, just mark the ioend with an
 679	 * error and finish it. This will run IO completion immediately
 680	 * as there is only one reference to the ioend at this point in
 681	 * time.
 682	 */
 683	if (status) {
 684		ioend->io_bio->bi_status = errno_to_blk_status(status);
 685		bio_endio(ioend->io_bio);
 686		return status;
 687	}
 688
 689	submit_bio(ioend->io_bio);
 690	return 0;
 691}
 692
 693static struct xfs_ioend *
 694xfs_alloc_ioend(
 695	struct inode		*inode,
 696	int			fork,
 697	xfs_exntst_t		state,
 698	xfs_off_t		offset,
 699	struct block_device	*bdev,
 700	sector_t		sector,
 701	struct writeback_control *wbc)
 702{
 703	struct xfs_ioend	*ioend;
 704	struct bio		*bio;
 705
 706	bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, &xfs_ioend_bioset);
 707	bio_set_dev(bio, bdev);
 708	bio->bi_iter.bi_sector = sector;
 709	bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
 710	bio->bi_write_hint = inode->i_write_hint;
 711	wbc_init_bio(wbc, bio);
 712
 713	ioend = container_of(bio, struct xfs_ioend, io_inline_bio);
 714	INIT_LIST_HEAD(&ioend->io_list);
 715	ioend->io_fork = fork;
 716	ioend->io_state = state;
 717	ioend->io_inode = inode;
 718	ioend->io_size = 0;
 719	ioend->io_offset = offset;
 720	ioend->io_append_trans = NULL;
 721	ioend->io_bio = bio;
 722	return ioend;
 723}
 724
 725/*
 726 * Allocate a new bio, and chain the old bio to the new one.
 
 
 
 
 727 *
 728 * Note that we have to do perform the chaining in this unintuitive order
 729 * so that the bi_private linkage is set up in the right direction for the
 730 * traversal in xfs_destroy_ioend().
 731 */
 732static struct bio *
 733xfs_chain_bio(
 734	struct bio		*prev)
 735{
 736	struct bio *new;
 737
 738	new = bio_alloc(GFP_NOFS, BIO_MAX_PAGES);
 739	bio_copy_dev(new, prev);/* also copies over blkcg information */
 740	new->bi_iter.bi_sector = bio_end_sector(prev);
 741	new->bi_opf = prev->bi_opf;
 742	new->bi_write_hint = prev->bi_write_hint;
 743
 744	bio_chain(prev, new);
 745	bio_get(prev);		/* for xfs_destroy_ioend */
 746	submit_bio(prev);
 747	return new;
 748}
 749
 750/*
 751 * Test to see if we have an existing ioend structure that we could append to
 752 * first, otherwise finish off the current ioend and start another.
 753 */
 754STATIC void
 755xfs_add_to_ioend(
 756	struct inode		*inode,
 757	xfs_off_t		offset,
 758	struct page		*page,
 759	struct iomap_page	*iop,
 760	struct xfs_writepage_ctx *wpc,
 761	struct writeback_control *wbc,
 762	struct list_head	*iolist)
 763{
 764	struct xfs_inode	*ip = XFS_I(inode);
 765	struct xfs_mount	*mp = ip->i_mount;
 766	struct block_device	*bdev = xfs_find_bdev_for_inode(inode);
 767	unsigned		len = i_blocksize(inode);
 768	unsigned		poff = offset & (PAGE_SIZE - 1);
 769	bool			merged, same_page = false;
 770	sector_t		sector;
 771
 772	sector = xfs_fsb_to_db(ip, wpc->imap.br_startblock) +
 773		((offset - XFS_FSB_TO_B(mp, wpc->imap.br_startoff)) >> 9);
 774
 775	if (!wpc->ioend ||
 776	    wpc->fork != wpc->ioend->io_fork ||
 777	    wpc->imap.br_state != wpc->ioend->io_state ||
 778	    sector != bio_end_sector(wpc->ioend->io_bio) ||
 779	    offset != wpc->ioend->io_offset + wpc->ioend->io_size) {
 780		if (wpc->ioend)
 781			list_add(&wpc->ioend->io_list, iolist);
 782		wpc->ioend = xfs_alloc_ioend(inode, wpc->fork,
 783				wpc->imap.br_state, offset, bdev, sector, wbc);
 784	}
 785
 786	merged = __bio_try_merge_page(wpc->ioend->io_bio, page, len, poff,
 787			&same_page);
 788
 789	if (iop && !same_page)
 790		atomic_inc(&iop->write_count);
 791
 792	if (!merged) {
 793		if (bio_full(wpc->ioend->io_bio, len))
 794			wpc->ioend->io_bio = xfs_chain_bio(wpc->ioend->io_bio);
 795		bio_add_page(wpc->ioend->io_bio, page, len, poff);
 796	}
 797
 798	wpc->ioend->io_size += len;
 799	wbc_account_cgroup_owner(wbc, page, len);
 800}
 801
 802STATIC void
 803xfs_vm_invalidatepage(
 804	struct page		*page,
 805	unsigned int		offset,
 806	unsigned int		length)
 807{
 808	trace_xfs_invalidatepage(page->mapping->host, page, offset, length);
 809	iomap_invalidatepage(page, offset, length);
 810}
 811
 812/*
 813 * If the page has delalloc blocks on it, we need to punch them out before we
 814 * invalidate the page.  If we don't, we leave a stale delalloc mapping on the
 815 * inode that can trip up a later direct I/O read operation on the same region.
 816 *
 817 * We prevent this by truncating away the delalloc regions on the page.  Because
 818 * they are delalloc, we can do this without needing a transaction. Indeed - if
 819 * we get ENOSPC errors, we have to be able to do this truncation without a
 820 * transaction as there is no space left for block reservation (typically why we
 821 * see a ENOSPC in writeback).
 822 */
 823STATIC void
 824xfs_aops_discard_page(
 825	struct page		*page)
 
 826{
 827	struct inode		*inode = page->mapping->host;
 828	struct xfs_inode	*ip = XFS_I(inode);
 829	struct xfs_mount	*mp = ip->i_mount;
 830	loff_t			offset = page_offset(page);
 831	xfs_fileoff_t		start_fsb = XFS_B_TO_FSBT(mp, offset);
 832	int			error;
 833
 834	if (XFS_FORCED_SHUTDOWN(mp))
 835		goto out_invalidate;
 836
 837	xfs_alert(mp,
 838		"page discard on page "PTR_FMT", inode 0x%llx, offset %llu.",
 839			page, ip->i_ino, offset);
 840
 841	error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
 842			PAGE_SIZE / i_blocksize(inode));
 843	if (error && !XFS_FORCED_SHUTDOWN(mp))
 844		xfs_alert(mp, "page discard unable to remove delalloc mapping.");
 845out_invalidate:
 846	xfs_vm_invalidatepage(page, 0, PAGE_SIZE);
 847}
 848
 849/*
 850 * We implement an immediate ioend submission policy here to avoid needing to
 851 * chain multiple ioends and hence nest mempool allocations which can violate
 852 * forward progress guarantees we need to provide. The current ioend we are
 853 * adding blocks to is cached on the writepage context, and if the new block
 854 * does not append to the cached ioend it will create a new ioend and cache that
 855 * instead.
 856 *
 857 * If a new ioend is created and cached, the old ioend is returned and queued
 858 * locally for submission once the entire page is processed or an error has been
 859 * detected.  While ioends are submitted immediately after they are completed,
 860 * batching optimisations are provided by higher level block plugging.
 861 *
 862 * At the end of a writeback pass, there will be a cached ioend remaining on the
 863 * writepage context that the caller will need to submit.
 864 */
 865static int
 866xfs_writepage_map(
 867	struct xfs_writepage_ctx *wpc,
 868	struct writeback_control *wbc,
 869	struct inode		*inode,
 870	struct page		*page,
 871	uint64_t		end_offset)
 872{
 873	LIST_HEAD(submit_list);
 874	struct iomap_page	*iop = to_iomap_page(page);
 875	unsigned		len = i_blocksize(inode);
 876	struct xfs_ioend	*ioend, *next;
 877	uint64_t		file_offset;	/* file offset of page */
 878	int			error = 0, count = 0, i;
 879
 880	ASSERT(iop || i_blocksize(inode) == PAGE_SIZE);
 881	ASSERT(!iop || atomic_read(&iop->write_count) == 0);
 882
 883	/*
 884	 * Walk through the page to find areas to write back. If we run off the
 885	 * end of the current map or find the current map invalid, grab a new
 886	 * one.
 887	 */
 888	for (i = 0, file_offset = page_offset(page);
 889	     i < (PAGE_SIZE >> inode->i_blkbits) && file_offset < end_offset;
 890	     i++, file_offset += len) {
 891		if (iop && !test_bit(i, iop->uptodate))
 892			continue;
 893
 894		error = xfs_map_blocks(wpc, inode, file_offset);
 895		if (error)
 896			break;
 897		if (wpc->imap.br_startblock == HOLESTARTBLOCK)
 898			continue;
 899		xfs_add_to_ioend(inode, file_offset, page, iop, wpc, wbc,
 900				 &submit_list);
 901		count++;
 902	}
 903
 904	ASSERT(wpc->ioend || list_empty(&submit_list));
 905	ASSERT(PageLocked(page));
 906	ASSERT(!PageWriteback(page));
 907
 908	/*
 909	 * On error, we have to fail the ioend here because we may have set
 910	 * pages under writeback, we have to make sure we run IO completion to
 911	 * mark the error state of the IO appropriately, so we can't cancel the
 912	 * ioend directly here.  That means we have to mark this page as under
 913	 * writeback if we included any blocks from it in the ioend chain so
 914	 * that completion treats it correctly.
 915	 *
 916	 * If we didn't include the page in the ioend, the on error we can
 917	 * simply discard and unlock it as there are no other users of the page
 918	 * now.  The caller will still need to trigger submission of outstanding
 919	 * ioends on the writepage context so they are treated correctly on
 920	 * error.
 921	 */
 922	if (unlikely(error)) {
 923		if (!count) {
 924			xfs_aops_discard_page(page);
 925			ClearPageUptodate(page);
 926			unlock_page(page);
 927			goto done;
 928		}
 929
 930		/*
 931		 * If the page was not fully cleaned, we need to ensure that the
 932		 * higher layers come back to it correctly.  That means we need
 933		 * to keep the page dirty, and for WB_SYNC_ALL writeback we need
 934		 * to ensure the PAGECACHE_TAG_TOWRITE index mark is not removed
 935		 * so another attempt to write this page in this writeback sweep
 936		 * will be made.
 937		 */
 938		set_page_writeback_keepwrite(page);
 939	} else {
 940		clear_page_dirty_for_io(page);
 941		set_page_writeback(page);
 942	}
 943
 944	unlock_page(page);
 945
 946	/*
 947	 * Preserve the original error if there was one, otherwise catch
 948	 * submission errors here and propagate into subsequent ioend
 949	 * submissions.
 950	 */
 951	list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
 952		int error2;
 953
 954		list_del_init(&ioend->io_list);
 955		error2 = xfs_submit_ioend(wbc, ioend, error);
 956		if (error2 && !error)
 957			error = error2;
 958	}
 959
 960	/*
 961	 * We can end up here with no error and nothing to write only if we race
 962	 * with a partial page truncate on a sub-page block sized filesystem.
 963	 */
 964	if (!count)
 965		end_page_writeback(page);
 966done:
 967	mapping_set_error(page->mapping, error);
 968	return error;
 969}
 970
 971/*
 972 * Write out a dirty page.
 973 *
 974 * For delalloc space on the page we need to allocate space and flush it.
 975 * For unwritten space on the page we need to start the conversion to
 976 * regular allocated space.
 977 */
 978STATIC int
 979xfs_do_writepage(
 980	struct page		*page,
 981	struct writeback_control *wbc,
 982	void			*data)
 983{
 984	struct xfs_writepage_ctx *wpc = data;
 985	struct inode		*inode = page->mapping->host;
 986	loff_t			offset;
 987	uint64_t              end_offset;
 988	pgoff_t                 end_index;
 989
 990	trace_xfs_writepage(inode, page, 0, 0);
 991
 992	/*
 993	 * Refuse to write the page out if we are called from reclaim context.
 994	 *
 995	 * This avoids stack overflows when called from deeply used stacks in
 996	 * random callers for direct reclaim or memcg reclaim.  We explicitly
 997	 * allow reclaim from kswapd as the stack usage there is relatively low.
 998	 *
 999	 * This should never happen except in the case of a VM regression so
1000	 * warn about it.
1001	 */
1002	if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
1003			PF_MEMALLOC))
1004		goto redirty;
1005
1006	/*
1007	 * Given that we do not allow direct reclaim to call us, we should
1008	 * never be called while in a filesystem transaction.
1009	 */
1010	if (WARN_ON_ONCE(current->flags & PF_MEMALLOC_NOFS))
1011		goto redirty;
1012
1013	/*
1014	 * Is this page beyond the end of the file?
1015	 *
1016	 * The page index is less than the end_index, adjust the end_offset
1017	 * to the highest offset that this page should represent.
1018	 * -----------------------------------------------------
1019	 * |			file mapping	       | <EOF> |
1020	 * -----------------------------------------------------
1021	 * | Page ... | Page N-2 | Page N-1 |  Page N  |       |
1022	 * ^--------------------------------^----------|--------
1023	 * |     desired writeback range    |      see else    |
1024	 * ---------------------------------^------------------|
1025	 */
1026	offset = i_size_read(inode);
1027	end_index = offset >> PAGE_SHIFT;
1028	if (page->index < end_index)
1029		end_offset = (xfs_off_t)(page->index + 1) << PAGE_SHIFT;
1030	else {
1031		/*
1032		 * Check whether the page to write out is beyond or straddles
1033		 * i_size or not.
1034		 * -------------------------------------------------------
1035		 * |		file mapping		        | <EOF>  |
1036		 * -------------------------------------------------------
1037		 * | Page ... | Page N-2 | Page N-1 |  Page N   | Beyond |
1038		 * ^--------------------------------^-----------|---------
1039		 * |				    |      Straddles     |
1040		 * ---------------------------------^-----------|--------|
1041		 */
1042		unsigned offset_into_page = offset & (PAGE_SIZE - 1);
1043
1044		/*
1045		 * Skip the page if it is fully outside i_size, e.g. due to a
1046		 * truncate operation that is in progress. We must redirty the
1047		 * page so that reclaim stops reclaiming it. Otherwise
1048		 * xfs_vm_releasepage() is called on it and gets confused.
1049		 *
1050		 * Note that the end_index is unsigned long, it would overflow
1051		 * if the given offset is greater than 16TB on 32-bit system
1052		 * and if we do check the page is fully outside i_size or not
1053		 * via "if (page->index >= end_index + 1)" as "end_index + 1"
1054		 * will be evaluated to 0.  Hence this page will be redirtied
1055		 * and be written out repeatedly which would result in an
1056		 * infinite loop, the user program that perform this operation
1057		 * will hang.  Instead, we can verify this situation by checking
1058		 * if the page to write is totally beyond the i_size or if it's
1059		 * offset is just equal to the EOF.
1060		 */
1061		if (page->index > end_index ||
1062		    (page->index == end_index && offset_into_page == 0))
1063			goto redirty;
1064
1065		/*
1066		 * The page straddles i_size.  It must be zeroed out on each
1067		 * and every writepage invocation because it may be mmapped.
1068		 * "A file is mapped in multiples of the page size.  For a file
1069		 * that is not a multiple of the page size, the remaining
1070		 * memory is zeroed when mapped, and writes to that region are
1071		 * not written out to the file."
1072		 */
1073		zero_user_segment(page, offset_into_page, PAGE_SIZE);
1074
1075		/* Adjust the end_offset to the end of file */
1076		end_offset = offset;
1077	}
1078
1079	return xfs_writepage_map(wpc, wbc, inode, page, end_offset);
1080
1081redirty:
1082	redirty_page_for_writepage(wbc, page);
1083	unlock_page(page);
1084	return 0;
1085}
1086
1087STATIC int
1088xfs_vm_writepage(
1089	struct page		*page,
1090	struct writeback_control *wbc)
1091{
1092	struct xfs_writepage_ctx wpc = { };
1093	int			ret;
1094
1095	ret = xfs_do_writepage(page, wbc, &wpc);
1096	if (wpc.ioend)
1097		ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
1098	return ret;
1099}
1100
1101STATIC int
1102xfs_vm_writepages(
1103	struct address_space	*mapping,
1104	struct writeback_control *wbc)
1105{
1106	struct xfs_writepage_ctx wpc = { };
1107	int			ret;
 
 
 
 
 
 
1108
1109	xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
1110	ret = write_cache_pages(mapping, wbc, xfs_do_writepage, &wpc);
1111	if (wpc.ioend)
1112		ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
1113	return ret;
1114}
1115
1116STATIC int
1117xfs_dax_writepages(
1118	struct address_space	*mapping,
1119	struct writeback_control *wbc)
1120{
1121	xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
 
 
1122	return dax_writeback_mapping_range(mapping,
1123			xfs_find_bdev_for_inode(mapping->host), wbc);
1124}
1125
1126STATIC int
1127xfs_vm_releasepage(
1128	struct page		*page,
1129	gfp_t			gfp_mask)
1130{
1131	trace_xfs_releasepage(page->mapping->host, page, 0, 0);
1132	return iomap_releasepage(page, gfp_mask);
1133}
1134
1135STATIC sector_t
1136xfs_vm_bmap(
1137	struct address_space	*mapping,
1138	sector_t		block)
1139{
1140	struct xfs_inode	*ip = XFS_I(mapping->host);
1141
1142	trace_xfs_vm_bmap(ip);
1143
1144	/*
1145	 * The swap code (ab-)uses ->bmap to get a block mapping and then
1146	 * bypasses the file system for actual I/O.  We really can't allow
1147	 * that on reflinks inodes, so we have to skip out here.  And yes,
1148	 * 0 is the magic code for a bmap error.
1149	 *
1150	 * Since we don't pass back blockdev info, we can't return bmap
1151	 * information for rt files either.
1152	 */
1153	if (xfs_is_cow_inode(ip) || XFS_IS_REALTIME_INODE(ip))
1154		return 0;
1155	return iomap_bmap(mapping, block, &xfs_iomap_ops);
1156}
1157
1158STATIC int
1159xfs_vm_readpage(
1160	struct file		*unused,
1161	struct page		*page)
1162{
1163	trace_xfs_vm_readpage(page->mapping->host, 1);
1164	return iomap_readpage(page, &xfs_iomap_ops);
1165}
1166
1167STATIC int
1168xfs_vm_readpages(
1169	struct file		*unused,
1170	struct address_space	*mapping,
1171	struct list_head	*pages,
1172	unsigned		nr_pages)
1173{
1174	trace_xfs_vm_readpages(mapping->host, nr_pages);
1175	return iomap_readpages(mapping, pages, nr_pages, &xfs_iomap_ops);
1176}
1177
1178static int
1179xfs_iomap_swapfile_activate(
1180	struct swap_info_struct		*sis,
1181	struct file			*swap_file,
1182	sector_t			*span)
1183{
1184	sis->bdev = xfs_find_bdev_for_inode(file_inode(swap_file));
1185	return iomap_swapfile_activate(sis, swap_file, span, &xfs_iomap_ops);
 
1186}
1187
1188const struct address_space_operations xfs_address_space_operations = {
1189	.readpage		= xfs_vm_readpage,
1190	.readpages		= xfs_vm_readpages,
1191	.writepage		= xfs_vm_writepage,
1192	.writepages		= xfs_vm_writepages,
1193	.set_page_dirty		= iomap_set_page_dirty,
1194	.releasepage		= xfs_vm_releasepage,
1195	.invalidatepage		= xfs_vm_invalidatepage,
1196	.bmap			= xfs_vm_bmap,
1197	.direct_IO		= noop_direct_IO,
1198	.migratepage		= iomap_migrate_page,
1199	.is_partially_uptodate  = iomap_is_partially_uptodate,
1200	.error_remove_page	= generic_error_remove_page,
1201	.swap_activate		= xfs_iomap_swapfile_activate,
1202};
1203
1204const struct address_space_operations xfs_dax_aops = {
1205	.writepages		= xfs_dax_writepages,
1206	.direct_IO		= noop_direct_IO,
1207	.set_page_dirty		= noop_set_page_dirty,
1208	.invalidatepage		= noop_invalidatepage,
1209	.swap_activate		= xfs_iomap_swapfile_activate,
1210};