Loading...
1/*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_shared.h"
20#include "xfs_format.h"
21#include "xfs_log_format.h"
22#include "xfs_trans_resv.h"
23#include "xfs_mount.h"
24#include "xfs_inode.h"
25#include "xfs_trans.h"
26#include "xfs_inode_item.h"
27#include "xfs_alloc.h"
28#include "xfs_error.h"
29#include "xfs_iomap.h"
30#include "xfs_trace.h"
31#include "xfs_bmap.h"
32#include "xfs_bmap_util.h"
33#include "xfs_bmap_btree.h"
34#include "xfs_reflink.h"
35#include <linux/gfp.h>
36#include <linux/mpage.h>
37#include <linux/pagevec.h>
38#include <linux/writeback.h>
39
40/*
41 * structure owned by writepages passed to individual writepage calls
42 */
43struct xfs_writepage_ctx {
44 struct xfs_bmbt_irec imap;
45 bool imap_valid;
46 unsigned int io_type;
47 struct xfs_ioend *ioend;
48 sector_t last_block;
49};
50
51void
52xfs_count_page_state(
53 struct page *page,
54 int *delalloc,
55 int *unwritten)
56{
57 struct buffer_head *bh, *head;
58
59 *delalloc = *unwritten = 0;
60
61 bh = head = page_buffers(page);
62 do {
63 if (buffer_unwritten(bh))
64 (*unwritten) = 1;
65 else if (buffer_delay(bh))
66 (*delalloc) = 1;
67 } while ((bh = bh->b_this_page) != head);
68}
69
70struct block_device *
71xfs_find_bdev_for_inode(
72 struct inode *inode)
73{
74 struct xfs_inode *ip = XFS_I(inode);
75 struct xfs_mount *mp = ip->i_mount;
76
77 if (XFS_IS_REALTIME_INODE(ip))
78 return mp->m_rtdev_targp->bt_bdev;
79 else
80 return mp->m_ddev_targp->bt_bdev;
81}
82
83/*
84 * We're now finished for good with this page. Update the page state via the
85 * associated buffer_heads, paying attention to the start and end offsets that
86 * we need to process on the page.
87 *
88 * Landmine Warning: bh->b_end_io() will call end_page_writeback() on the last
89 * buffer in the IO. Once it does this, it is unsafe to access the bufferhead or
90 * the page at all, as we may be racing with memory reclaim and it can free both
91 * the bufferhead chain and the page as it will see the page as clean and
92 * unused.
93 */
94static void
95xfs_finish_page_writeback(
96 struct inode *inode,
97 struct bio_vec *bvec,
98 int error)
99{
100 unsigned int end = bvec->bv_offset + bvec->bv_len - 1;
101 struct buffer_head *head, *bh, *next;
102 unsigned int off = 0;
103 unsigned int bsize;
104
105 ASSERT(bvec->bv_offset < PAGE_SIZE);
106 ASSERT((bvec->bv_offset & ((1 << inode->i_blkbits) - 1)) == 0);
107 ASSERT(end < PAGE_SIZE);
108 ASSERT((bvec->bv_len & ((1 << inode->i_blkbits) - 1)) == 0);
109
110 bh = head = page_buffers(bvec->bv_page);
111
112 bsize = bh->b_size;
113 do {
114 next = bh->b_this_page;
115 if (off < bvec->bv_offset)
116 goto next_bh;
117 if (off > end)
118 break;
119 bh->b_end_io(bh, !error);
120next_bh:
121 off += bsize;
122 } while ((bh = next) != head);
123}
124
125/*
126 * We're now finished for good with this ioend structure. Update the page
127 * state, release holds on bios, and finally free up memory. Do not use the
128 * ioend after this.
129 */
130STATIC void
131xfs_destroy_ioend(
132 struct xfs_ioend *ioend,
133 int error)
134{
135 struct inode *inode = ioend->io_inode;
136 struct bio *last = ioend->io_bio;
137 struct bio *bio, *next;
138
139 for (bio = &ioend->io_inline_bio; bio; bio = next) {
140 struct bio_vec *bvec;
141 int i;
142
143 /*
144 * For the last bio, bi_private points to the ioend, so we
145 * need to explicitly end the iteration here.
146 */
147 if (bio == last)
148 next = NULL;
149 else
150 next = bio->bi_private;
151
152 /* walk each page on bio, ending page IO on them */
153 bio_for_each_segment_all(bvec, bio, i)
154 xfs_finish_page_writeback(inode, bvec, error);
155
156 bio_put(bio);
157 }
158}
159
160/*
161 * Fast and loose check if this write could update the on-disk inode size.
162 */
163static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
164{
165 return ioend->io_offset + ioend->io_size >
166 XFS_I(ioend->io_inode)->i_d.di_size;
167}
168
169STATIC int
170xfs_setfilesize_trans_alloc(
171 struct xfs_ioend *ioend)
172{
173 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
174 struct xfs_trans *tp;
175 int error;
176
177 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
178 if (error)
179 return error;
180
181 ioend->io_append_trans = tp;
182
183 /*
184 * We may pass freeze protection with a transaction. So tell lockdep
185 * we released it.
186 */
187 __sb_writers_release(ioend->io_inode->i_sb, SB_FREEZE_FS);
188 /*
189 * We hand off the transaction to the completion thread now, so
190 * clear the flag here.
191 */
192 current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
193 return 0;
194}
195
196/*
197 * Update on-disk file size now that data has been written to disk.
198 */
199STATIC int
200__xfs_setfilesize(
201 struct xfs_inode *ip,
202 struct xfs_trans *tp,
203 xfs_off_t offset,
204 size_t size)
205{
206 xfs_fsize_t isize;
207
208 xfs_ilock(ip, XFS_ILOCK_EXCL);
209 isize = xfs_new_eof(ip, offset + size);
210 if (!isize) {
211 xfs_iunlock(ip, XFS_ILOCK_EXCL);
212 xfs_trans_cancel(tp);
213 return 0;
214 }
215
216 trace_xfs_setfilesize(ip, offset, size);
217
218 ip->i_d.di_size = isize;
219 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
220 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
221
222 return xfs_trans_commit(tp);
223}
224
225int
226xfs_setfilesize(
227 struct xfs_inode *ip,
228 xfs_off_t offset,
229 size_t size)
230{
231 struct xfs_mount *mp = ip->i_mount;
232 struct xfs_trans *tp;
233 int error;
234
235 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_fsyncts, 0, 0, 0, &tp);
236 if (error)
237 return error;
238
239 return __xfs_setfilesize(ip, tp, offset, size);
240}
241
242STATIC int
243xfs_setfilesize_ioend(
244 struct xfs_ioend *ioend,
245 int error)
246{
247 struct xfs_inode *ip = XFS_I(ioend->io_inode);
248 struct xfs_trans *tp = ioend->io_append_trans;
249
250 /*
251 * The transaction may have been allocated in the I/O submission thread,
252 * thus we need to mark ourselves as being in a transaction manually.
253 * Similarly for freeze protection.
254 */
255 current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
256 __sb_writers_acquired(VFS_I(ip)->i_sb, SB_FREEZE_FS);
257
258 /* we abort the update if there was an IO error */
259 if (error) {
260 xfs_trans_cancel(tp);
261 return error;
262 }
263
264 return __xfs_setfilesize(ip, tp, ioend->io_offset, ioend->io_size);
265}
266
267/*
268 * IO write completion.
269 */
270STATIC void
271xfs_end_io(
272 struct work_struct *work)
273{
274 struct xfs_ioend *ioend =
275 container_of(work, struct xfs_ioend, io_work);
276 struct xfs_inode *ip = XFS_I(ioend->io_inode);
277 xfs_off_t offset = ioend->io_offset;
278 size_t size = ioend->io_size;
279 int error = ioend->io_bio->bi_error;
280
281 /*
282 * Just clean up the in-memory strutures if the fs has been shut down.
283 */
284 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
285 error = -EIO;
286 goto done;
287 }
288
289 /*
290 * Clean up any COW blocks on an I/O error.
291 */
292 if (unlikely(error)) {
293 switch (ioend->io_type) {
294 case XFS_IO_COW:
295 xfs_reflink_cancel_cow_range(ip, offset, size, true);
296 break;
297 }
298
299 goto done;
300 }
301
302 /*
303 * Success: commit the COW or unwritten blocks if needed.
304 */
305 switch (ioend->io_type) {
306 case XFS_IO_COW:
307 error = xfs_reflink_end_cow(ip, offset, size);
308 break;
309 case XFS_IO_UNWRITTEN:
310 error = xfs_iomap_write_unwritten(ip, offset, size);
311 break;
312 default:
313 ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_append_trans);
314 break;
315 }
316
317done:
318 if (ioend->io_append_trans)
319 error = xfs_setfilesize_ioend(ioend, error);
320 xfs_destroy_ioend(ioend, error);
321}
322
323STATIC void
324xfs_end_bio(
325 struct bio *bio)
326{
327 struct xfs_ioend *ioend = bio->bi_private;
328 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
329
330 if (ioend->io_type == XFS_IO_UNWRITTEN || ioend->io_type == XFS_IO_COW)
331 queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
332 else if (ioend->io_append_trans)
333 queue_work(mp->m_data_workqueue, &ioend->io_work);
334 else
335 xfs_destroy_ioend(ioend, bio->bi_error);
336}
337
338STATIC int
339xfs_map_blocks(
340 struct inode *inode,
341 loff_t offset,
342 struct xfs_bmbt_irec *imap,
343 int type)
344{
345 struct xfs_inode *ip = XFS_I(inode);
346 struct xfs_mount *mp = ip->i_mount;
347 ssize_t count = 1 << inode->i_blkbits;
348 xfs_fileoff_t offset_fsb, end_fsb;
349 int error = 0;
350 int bmapi_flags = XFS_BMAPI_ENTIRE;
351 int nimaps = 1;
352
353 if (XFS_FORCED_SHUTDOWN(mp))
354 return -EIO;
355
356 ASSERT(type != XFS_IO_COW);
357 if (type == XFS_IO_UNWRITTEN)
358 bmapi_flags |= XFS_BMAPI_IGSTATE;
359
360 xfs_ilock(ip, XFS_ILOCK_SHARED);
361 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
362 (ip->i_df.if_flags & XFS_IFEXTENTS));
363 ASSERT(offset <= mp->m_super->s_maxbytes);
364
365 if (offset + count > mp->m_super->s_maxbytes)
366 count = mp->m_super->s_maxbytes - offset;
367 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
368 offset_fsb = XFS_B_TO_FSBT(mp, offset);
369 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
370 imap, &nimaps, bmapi_flags);
371 /*
372 * Truncate an overwrite extent if there's a pending CoW
373 * reservation before the end of this extent. This forces us
374 * to come back to writepage to take care of the CoW.
375 */
376 if (nimaps && type == XFS_IO_OVERWRITE)
377 xfs_reflink_trim_irec_to_next_cow(ip, offset_fsb, imap);
378 xfs_iunlock(ip, XFS_ILOCK_SHARED);
379
380 if (error)
381 return error;
382
383 if (type == XFS_IO_DELALLOC &&
384 (!nimaps || isnullstartblock(imap->br_startblock))) {
385 error = xfs_iomap_write_allocate(ip, XFS_DATA_FORK, offset,
386 imap);
387 if (!error)
388 trace_xfs_map_blocks_alloc(ip, offset, count, type, imap);
389 return error;
390 }
391
392#ifdef DEBUG
393 if (type == XFS_IO_UNWRITTEN) {
394 ASSERT(nimaps);
395 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
396 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
397 }
398#endif
399 if (nimaps)
400 trace_xfs_map_blocks_found(ip, offset, count, type, imap);
401 return 0;
402}
403
404STATIC bool
405xfs_imap_valid(
406 struct inode *inode,
407 struct xfs_bmbt_irec *imap,
408 xfs_off_t offset)
409{
410 offset >>= inode->i_blkbits;
411
412 return offset >= imap->br_startoff &&
413 offset < imap->br_startoff + imap->br_blockcount;
414}
415
416STATIC void
417xfs_start_buffer_writeback(
418 struct buffer_head *bh)
419{
420 ASSERT(buffer_mapped(bh));
421 ASSERT(buffer_locked(bh));
422 ASSERT(!buffer_delay(bh));
423 ASSERT(!buffer_unwritten(bh));
424
425 mark_buffer_async_write(bh);
426 set_buffer_uptodate(bh);
427 clear_buffer_dirty(bh);
428}
429
430STATIC void
431xfs_start_page_writeback(
432 struct page *page,
433 int clear_dirty)
434{
435 ASSERT(PageLocked(page));
436 ASSERT(!PageWriteback(page));
437
438 /*
439 * if the page was not fully cleaned, we need to ensure that the higher
440 * layers come back to it correctly. That means we need to keep the page
441 * dirty, and for WB_SYNC_ALL writeback we need to ensure the
442 * PAGECACHE_TAG_TOWRITE index mark is not removed so another attempt to
443 * write this page in this writeback sweep will be made.
444 */
445 if (clear_dirty) {
446 clear_page_dirty_for_io(page);
447 set_page_writeback(page);
448 } else
449 set_page_writeback_keepwrite(page);
450
451 unlock_page(page);
452}
453
454static inline int xfs_bio_add_buffer(struct bio *bio, struct buffer_head *bh)
455{
456 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
457}
458
459/*
460 * Submit the bio for an ioend. We are passed an ioend with a bio attached to
461 * it, and we submit that bio. The ioend may be used for multiple bio
462 * submissions, so we only want to allocate an append transaction for the ioend
463 * once. In the case of multiple bio submission, each bio will take an IO
464 * reference to the ioend to ensure that the ioend completion is only done once
465 * all bios have been submitted and the ioend is really done.
466 *
467 * If @fail is non-zero, it means that we have a situation where some part of
468 * the submission process has failed after we have marked paged for writeback
469 * and unlocked them. In this situation, we need to fail the bio and ioend
470 * rather than submit it to IO. This typically only happens on a filesystem
471 * shutdown.
472 */
473STATIC int
474xfs_submit_ioend(
475 struct writeback_control *wbc,
476 struct xfs_ioend *ioend,
477 int status)
478{
479 /* Convert CoW extents to regular */
480 if (!status && ioend->io_type == XFS_IO_COW) {
481 status = xfs_reflink_convert_cow(XFS_I(ioend->io_inode),
482 ioend->io_offset, ioend->io_size);
483 }
484
485 /* Reserve log space if we might write beyond the on-disk inode size. */
486 if (!status &&
487 ioend->io_type != XFS_IO_UNWRITTEN &&
488 xfs_ioend_is_append(ioend) &&
489 !ioend->io_append_trans)
490 status = xfs_setfilesize_trans_alloc(ioend);
491
492 ioend->io_bio->bi_private = ioend;
493 ioend->io_bio->bi_end_io = xfs_end_bio;
494 ioend->io_bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
495
496 /*
497 * If we are failing the IO now, just mark the ioend with an
498 * error and finish it. This will run IO completion immediately
499 * as there is only one reference to the ioend at this point in
500 * time.
501 */
502 if (status) {
503 ioend->io_bio->bi_error = status;
504 bio_endio(ioend->io_bio);
505 return status;
506 }
507
508 submit_bio(ioend->io_bio);
509 return 0;
510}
511
512static void
513xfs_init_bio_from_bh(
514 struct bio *bio,
515 struct buffer_head *bh)
516{
517 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
518 bio->bi_bdev = bh->b_bdev;
519}
520
521static struct xfs_ioend *
522xfs_alloc_ioend(
523 struct inode *inode,
524 unsigned int type,
525 xfs_off_t offset,
526 struct buffer_head *bh)
527{
528 struct xfs_ioend *ioend;
529 struct bio *bio;
530
531 bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, xfs_ioend_bioset);
532 xfs_init_bio_from_bh(bio, bh);
533
534 ioend = container_of(bio, struct xfs_ioend, io_inline_bio);
535 INIT_LIST_HEAD(&ioend->io_list);
536 ioend->io_type = type;
537 ioend->io_inode = inode;
538 ioend->io_size = 0;
539 ioend->io_offset = offset;
540 INIT_WORK(&ioend->io_work, xfs_end_io);
541 ioend->io_append_trans = NULL;
542 ioend->io_bio = bio;
543 return ioend;
544}
545
546/*
547 * Allocate a new bio, and chain the old bio to the new one.
548 *
549 * Note that we have to do perform the chaining in this unintuitive order
550 * so that the bi_private linkage is set up in the right direction for the
551 * traversal in xfs_destroy_ioend().
552 */
553static void
554xfs_chain_bio(
555 struct xfs_ioend *ioend,
556 struct writeback_control *wbc,
557 struct buffer_head *bh)
558{
559 struct bio *new;
560
561 new = bio_alloc(GFP_NOFS, BIO_MAX_PAGES);
562 xfs_init_bio_from_bh(new, bh);
563
564 bio_chain(ioend->io_bio, new);
565 bio_get(ioend->io_bio); /* for xfs_destroy_ioend */
566 ioend->io_bio->bi_opf = REQ_OP_WRITE | wbc_to_write_flags(wbc);
567 submit_bio(ioend->io_bio);
568 ioend->io_bio = new;
569}
570
571/*
572 * Test to see if we've been building up a completion structure for
573 * earlier buffers -- if so, we try to append to this ioend if we
574 * can, otherwise we finish off any current ioend and start another.
575 * Return the ioend we finished off so that the caller can submit it
576 * once it has finished processing the dirty page.
577 */
578STATIC void
579xfs_add_to_ioend(
580 struct inode *inode,
581 struct buffer_head *bh,
582 xfs_off_t offset,
583 struct xfs_writepage_ctx *wpc,
584 struct writeback_control *wbc,
585 struct list_head *iolist)
586{
587 if (!wpc->ioend || wpc->io_type != wpc->ioend->io_type ||
588 bh->b_blocknr != wpc->last_block + 1 ||
589 offset != wpc->ioend->io_offset + wpc->ioend->io_size) {
590 if (wpc->ioend)
591 list_add(&wpc->ioend->io_list, iolist);
592 wpc->ioend = xfs_alloc_ioend(inode, wpc->io_type, offset, bh);
593 }
594
595 /*
596 * If the buffer doesn't fit into the bio we need to allocate a new
597 * one. This shouldn't happen more than once for a given buffer.
598 */
599 while (xfs_bio_add_buffer(wpc->ioend->io_bio, bh) != bh->b_size)
600 xfs_chain_bio(wpc->ioend, wbc, bh);
601
602 wpc->ioend->io_size += bh->b_size;
603 wpc->last_block = bh->b_blocknr;
604 xfs_start_buffer_writeback(bh);
605}
606
607STATIC void
608xfs_map_buffer(
609 struct inode *inode,
610 struct buffer_head *bh,
611 struct xfs_bmbt_irec *imap,
612 xfs_off_t offset)
613{
614 sector_t bn;
615 struct xfs_mount *m = XFS_I(inode)->i_mount;
616 xfs_off_t iomap_offset = XFS_FSB_TO_B(m, imap->br_startoff);
617 xfs_daddr_t iomap_bn = xfs_fsb_to_db(XFS_I(inode), imap->br_startblock);
618
619 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
620 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
621
622 bn = (iomap_bn >> (inode->i_blkbits - BBSHIFT)) +
623 ((offset - iomap_offset) >> inode->i_blkbits);
624
625 ASSERT(bn || XFS_IS_REALTIME_INODE(XFS_I(inode)));
626
627 bh->b_blocknr = bn;
628 set_buffer_mapped(bh);
629}
630
631STATIC void
632xfs_map_at_offset(
633 struct inode *inode,
634 struct buffer_head *bh,
635 struct xfs_bmbt_irec *imap,
636 xfs_off_t offset)
637{
638 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
639 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
640
641 xfs_map_buffer(inode, bh, imap, offset);
642 set_buffer_mapped(bh);
643 clear_buffer_delay(bh);
644 clear_buffer_unwritten(bh);
645}
646
647/*
648 * Test if a given page contains at least one buffer of a given @type.
649 * If @check_all_buffers is true, then we walk all the buffers in the page to
650 * try to find one of the type passed in. If it is not set, then the caller only
651 * needs to check the first buffer on the page for a match.
652 */
653STATIC bool
654xfs_check_page_type(
655 struct page *page,
656 unsigned int type,
657 bool check_all_buffers)
658{
659 struct buffer_head *bh;
660 struct buffer_head *head;
661
662 if (PageWriteback(page))
663 return false;
664 if (!page->mapping)
665 return false;
666 if (!page_has_buffers(page))
667 return false;
668
669 bh = head = page_buffers(page);
670 do {
671 if (buffer_unwritten(bh)) {
672 if (type == XFS_IO_UNWRITTEN)
673 return true;
674 } else if (buffer_delay(bh)) {
675 if (type == XFS_IO_DELALLOC)
676 return true;
677 } else if (buffer_dirty(bh) && buffer_mapped(bh)) {
678 if (type == XFS_IO_OVERWRITE)
679 return true;
680 }
681
682 /* If we are only checking the first buffer, we are done now. */
683 if (!check_all_buffers)
684 break;
685 } while ((bh = bh->b_this_page) != head);
686
687 return false;
688}
689
690STATIC void
691xfs_vm_invalidatepage(
692 struct page *page,
693 unsigned int offset,
694 unsigned int length)
695{
696 trace_xfs_invalidatepage(page->mapping->host, page, offset,
697 length);
698 block_invalidatepage(page, offset, length);
699}
700
701/*
702 * If the page has delalloc buffers on it, we need to punch them out before we
703 * invalidate the page. If we don't, we leave a stale delalloc mapping on the
704 * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read
705 * is done on that same region - the delalloc extent is returned when none is
706 * supposed to be there.
707 *
708 * We prevent this by truncating away the delalloc regions on the page before
709 * invalidating it. Because they are delalloc, we can do this without needing a
710 * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this
711 * truncation without a transaction as there is no space left for block
712 * reservation (typically why we see a ENOSPC in writeback).
713 *
714 * This is not a performance critical path, so for now just do the punching a
715 * buffer head at a time.
716 */
717STATIC void
718xfs_aops_discard_page(
719 struct page *page)
720{
721 struct inode *inode = page->mapping->host;
722 struct xfs_inode *ip = XFS_I(inode);
723 struct buffer_head *bh, *head;
724 loff_t offset = page_offset(page);
725
726 if (!xfs_check_page_type(page, XFS_IO_DELALLOC, true))
727 goto out_invalidate;
728
729 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
730 goto out_invalidate;
731
732 xfs_alert(ip->i_mount,
733 "page discard on page %p, inode 0x%llx, offset %llu.",
734 page, ip->i_ino, offset);
735
736 xfs_ilock(ip, XFS_ILOCK_EXCL);
737 bh = head = page_buffers(page);
738 do {
739 int error;
740 xfs_fileoff_t start_fsb;
741
742 if (!buffer_delay(bh))
743 goto next_buffer;
744
745 start_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
746 error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1);
747 if (error) {
748 /* something screwed, just bail */
749 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
750 xfs_alert(ip->i_mount,
751 "page discard unable to remove delalloc mapping.");
752 }
753 break;
754 }
755next_buffer:
756 offset += 1 << inode->i_blkbits;
757
758 } while ((bh = bh->b_this_page) != head);
759
760 xfs_iunlock(ip, XFS_ILOCK_EXCL);
761out_invalidate:
762 xfs_vm_invalidatepage(page, 0, PAGE_SIZE);
763 return;
764}
765
766static int
767xfs_map_cow(
768 struct xfs_writepage_ctx *wpc,
769 struct inode *inode,
770 loff_t offset,
771 unsigned int *new_type)
772{
773 struct xfs_inode *ip = XFS_I(inode);
774 struct xfs_bmbt_irec imap;
775 bool is_cow = false;
776 int error;
777
778 /*
779 * If we already have a valid COW mapping keep using it.
780 */
781 if (wpc->io_type == XFS_IO_COW) {
782 wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap, offset);
783 if (wpc->imap_valid) {
784 *new_type = XFS_IO_COW;
785 return 0;
786 }
787 }
788
789 /*
790 * Else we need to check if there is a COW mapping at this offset.
791 */
792 xfs_ilock(ip, XFS_ILOCK_SHARED);
793 is_cow = xfs_reflink_find_cow_mapping(ip, offset, &imap);
794 xfs_iunlock(ip, XFS_ILOCK_SHARED);
795
796 if (!is_cow)
797 return 0;
798
799 /*
800 * And if the COW mapping has a delayed extent here we need to
801 * allocate real space for it now.
802 */
803 if (isnullstartblock(imap.br_startblock)) {
804 error = xfs_iomap_write_allocate(ip, XFS_COW_FORK, offset,
805 &imap);
806 if (error)
807 return error;
808 }
809
810 wpc->io_type = *new_type = XFS_IO_COW;
811 wpc->imap_valid = true;
812 wpc->imap = imap;
813 return 0;
814}
815
816/*
817 * We implement an immediate ioend submission policy here to avoid needing to
818 * chain multiple ioends and hence nest mempool allocations which can violate
819 * forward progress guarantees we need to provide. The current ioend we are
820 * adding buffers to is cached on the writepage context, and if the new buffer
821 * does not append to the cached ioend it will create a new ioend and cache that
822 * instead.
823 *
824 * If a new ioend is created and cached, the old ioend is returned and queued
825 * locally for submission once the entire page is processed or an error has been
826 * detected. While ioends are submitted immediately after they are completed,
827 * batching optimisations are provided by higher level block plugging.
828 *
829 * At the end of a writeback pass, there will be a cached ioend remaining on the
830 * writepage context that the caller will need to submit.
831 */
832static int
833xfs_writepage_map(
834 struct xfs_writepage_ctx *wpc,
835 struct writeback_control *wbc,
836 struct inode *inode,
837 struct page *page,
838 loff_t offset,
839 __uint64_t end_offset)
840{
841 LIST_HEAD(submit_list);
842 struct xfs_ioend *ioend, *next;
843 struct buffer_head *bh, *head;
844 ssize_t len = 1 << inode->i_blkbits;
845 int error = 0;
846 int count = 0;
847 int uptodate = 1;
848 unsigned int new_type;
849
850 bh = head = page_buffers(page);
851 offset = page_offset(page);
852 do {
853 if (offset >= end_offset)
854 break;
855 if (!buffer_uptodate(bh))
856 uptodate = 0;
857
858 /*
859 * set_page_dirty dirties all buffers in a page, independent
860 * of their state. The dirty state however is entirely
861 * meaningless for holes (!mapped && uptodate), so skip
862 * buffers covering holes here.
863 */
864 if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
865 wpc->imap_valid = false;
866 continue;
867 }
868
869 if (buffer_unwritten(bh))
870 new_type = XFS_IO_UNWRITTEN;
871 else if (buffer_delay(bh))
872 new_type = XFS_IO_DELALLOC;
873 else if (buffer_uptodate(bh))
874 new_type = XFS_IO_OVERWRITE;
875 else {
876 if (PageUptodate(page))
877 ASSERT(buffer_mapped(bh));
878 /*
879 * This buffer is not uptodate and will not be
880 * written to disk. Ensure that we will put any
881 * subsequent writeable buffers into a new
882 * ioend.
883 */
884 wpc->imap_valid = false;
885 continue;
886 }
887
888 if (xfs_is_reflink_inode(XFS_I(inode))) {
889 error = xfs_map_cow(wpc, inode, offset, &new_type);
890 if (error)
891 goto out;
892 }
893
894 if (wpc->io_type != new_type) {
895 wpc->io_type = new_type;
896 wpc->imap_valid = false;
897 }
898
899 if (wpc->imap_valid)
900 wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap,
901 offset);
902 if (!wpc->imap_valid) {
903 error = xfs_map_blocks(inode, offset, &wpc->imap,
904 wpc->io_type);
905 if (error)
906 goto out;
907 wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap,
908 offset);
909 }
910 if (wpc->imap_valid) {
911 lock_buffer(bh);
912 if (wpc->io_type != XFS_IO_OVERWRITE)
913 xfs_map_at_offset(inode, bh, &wpc->imap, offset);
914 xfs_add_to_ioend(inode, bh, offset, wpc, wbc, &submit_list);
915 count++;
916 }
917
918 } while (offset += len, ((bh = bh->b_this_page) != head));
919
920 if (uptodate && bh == head)
921 SetPageUptodate(page);
922
923 ASSERT(wpc->ioend || list_empty(&submit_list));
924
925out:
926 /*
927 * On error, we have to fail the ioend here because we have locked
928 * buffers in the ioend. If we don't do this, we'll deadlock
929 * invalidating the page as that tries to lock the buffers on the page.
930 * Also, because we may have set pages under writeback, we have to make
931 * sure we run IO completion to mark the error state of the IO
932 * appropriately, so we can't cancel the ioend directly here. That means
933 * we have to mark this page as under writeback if we included any
934 * buffers from it in the ioend chain so that completion treats it
935 * correctly.
936 *
937 * If we didn't include the page in the ioend, the on error we can
938 * simply discard and unlock it as there are no other users of the page
939 * or it's buffers right now. The caller will still need to trigger
940 * submission of outstanding ioends on the writepage context so they are
941 * treated correctly on error.
942 */
943 if (count) {
944 xfs_start_page_writeback(page, !error);
945
946 /*
947 * Preserve the original error if there was one, otherwise catch
948 * submission errors here and propagate into subsequent ioend
949 * submissions.
950 */
951 list_for_each_entry_safe(ioend, next, &submit_list, io_list) {
952 int error2;
953
954 list_del_init(&ioend->io_list);
955 error2 = xfs_submit_ioend(wbc, ioend, error);
956 if (error2 && !error)
957 error = error2;
958 }
959 } else if (error) {
960 xfs_aops_discard_page(page);
961 ClearPageUptodate(page);
962 unlock_page(page);
963 } else {
964 /*
965 * We can end up here with no error and nothing to write if we
966 * race with a partial page truncate on a sub-page block sized
967 * filesystem. In that case we need to mark the page clean.
968 */
969 xfs_start_page_writeback(page, 1);
970 end_page_writeback(page);
971 }
972
973 mapping_set_error(page->mapping, error);
974 return error;
975}
976
977/*
978 * Write out a dirty page.
979 *
980 * For delalloc space on the page we need to allocate space and flush it.
981 * For unwritten space on the page we need to start the conversion to
982 * regular allocated space.
983 * For any other dirty buffer heads on the page we should flush them.
984 */
985STATIC int
986xfs_do_writepage(
987 struct page *page,
988 struct writeback_control *wbc,
989 void *data)
990{
991 struct xfs_writepage_ctx *wpc = data;
992 struct inode *inode = page->mapping->host;
993 loff_t offset;
994 __uint64_t end_offset;
995 pgoff_t end_index;
996
997 trace_xfs_writepage(inode, page, 0, 0);
998
999 ASSERT(page_has_buffers(page));
1000
1001 /*
1002 * Refuse to write the page out if we are called from reclaim context.
1003 *
1004 * This avoids stack overflows when called from deeply used stacks in
1005 * random callers for direct reclaim or memcg reclaim. We explicitly
1006 * allow reclaim from kswapd as the stack usage there is relatively low.
1007 *
1008 * This should never happen except in the case of a VM regression so
1009 * warn about it.
1010 */
1011 if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
1012 PF_MEMALLOC))
1013 goto redirty;
1014
1015 /*
1016 * Given that we do not allow direct reclaim to call us, we should
1017 * never be called while in a filesystem transaction.
1018 */
1019 if (WARN_ON_ONCE(current->flags & PF_FSTRANS))
1020 goto redirty;
1021
1022 /*
1023 * Is this page beyond the end of the file?
1024 *
1025 * The page index is less than the end_index, adjust the end_offset
1026 * to the highest offset that this page should represent.
1027 * -----------------------------------------------------
1028 * | file mapping | <EOF> |
1029 * -----------------------------------------------------
1030 * | Page ... | Page N-2 | Page N-1 | Page N | |
1031 * ^--------------------------------^----------|--------
1032 * | desired writeback range | see else |
1033 * ---------------------------------^------------------|
1034 */
1035 offset = i_size_read(inode);
1036 end_index = offset >> PAGE_SHIFT;
1037 if (page->index < end_index)
1038 end_offset = (xfs_off_t)(page->index + 1) << PAGE_SHIFT;
1039 else {
1040 /*
1041 * Check whether the page to write out is beyond or straddles
1042 * i_size or not.
1043 * -------------------------------------------------------
1044 * | file mapping | <EOF> |
1045 * -------------------------------------------------------
1046 * | Page ... | Page N-2 | Page N-1 | Page N | Beyond |
1047 * ^--------------------------------^-----------|---------
1048 * | | Straddles |
1049 * ---------------------------------^-----------|--------|
1050 */
1051 unsigned offset_into_page = offset & (PAGE_SIZE - 1);
1052
1053 /*
1054 * Skip the page if it is fully outside i_size, e.g. due to a
1055 * truncate operation that is in progress. We must redirty the
1056 * page so that reclaim stops reclaiming it. Otherwise
1057 * xfs_vm_releasepage() is called on it and gets confused.
1058 *
1059 * Note that the end_index is unsigned long, it would overflow
1060 * if the given offset is greater than 16TB on 32-bit system
1061 * and if we do check the page is fully outside i_size or not
1062 * via "if (page->index >= end_index + 1)" as "end_index + 1"
1063 * will be evaluated to 0. Hence this page will be redirtied
1064 * and be written out repeatedly which would result in an
1065 * infinite loop, the user program that perform this operation
1066 * will hang. Instead, we can verify this situation by checking
1067 * if the page to write is totally beyond the i_size or if it's
1068 * offset is just equal to the EOF.
1069 */
1070 if (page->index > end_index ||
1071 (page->index == end_index && offset_into_page == 0))
1072 goto redirty;
1073
1074 /*
1075 * The page straddles i_size. It must be zeroed out on each
1076 * and every writepage invocation because it may be mmapped.
1077 * "A file is mapped in multiples of the page size. For a file
1078 * that is not a multiple of the page size, the remaining
1079 * memory is zeroed when mapped, and writes to that region are
1080 * not written out to the file."
1081 */
1082 zero_user_segment(page, offset_into_page, PAGE_SIZE);
1083
1084 /* Adjust the end_offset to the end of file */
1085 end_offset = offset;
1086 }
1087
1088 return xfs_writepage_map(wpc, wbc, inode, page, offset, end_offset);
1089
1090redirty:
1091 redirty_page_for_writepage(wbc, page);
1092 unlock_page(page);
1093 return 0;
1094}
1095
1096STATIC int
1097xfs_vm_writepage(
1098 struct page *page,
1099 struct writeback_control *wbc)
1100{
1101 struct xfs_writepage_ctx wpc = {
1102 .io_type = XFS_IO_INVALID,
1103 };
1104 int ret;
1105
1106 ret = xfs_do_writepage(page, wbc, &wpc);
1107 if (wpc.ioend)
1108 ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
1109 return ret;
1110}
1111
1112STATIC int
1113xfs_vm_writepages(
1114 struct address_space *mapping,
1115 struct writeback_control *wbc)
1116{
1117 struct xfs_writepage_ctx wpc = {
1118 .io_type = XFS_IO_INVALID,
1119 };
1120 int ret;
1121
1122 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
1123 if (dax_mapping(mapping))
1124 return dax_writeback_mapping_range(mapping,
1125 xfs_find_bdev_for_inode(mapping->host), wbc);
1126
1127 ret = write_cache_pages(mapping, wbc, xfs_do_writepage, &wpc);
1128 if (wpc.ioend)
1129 ret = xfs_submit_ioend(wbc, wpc.ioend, ret);
1130 return ret;
1131}
1132
1133/*
1134 * Called to move a page into cleanable state - and from there
1135 * to be released. The page should already be clean. We always
1136 * have buffer heads in this call.
1137 *
1138 * Returns 1 if the page is ok to release, 0 otherwise.
1139 */
1140STATIC int
1141xfs_vm_releasepage(
1142 struct page *page,
1143 gfp_t gfp_mask)
1144{
1145 int delalloc, unwritten;
1146
1147 trace_xfs_releasepage(page->mapping->host, page, 0, 0);
1148
1149 /*
1150 * mm accommodates an old ext3 case where clean pages might not have had
1151 * the dirty bit cleared. Thus, it can send actual dirty pages to
1152 * ->releasepage() via shrink_active_list(). Conversely,
1153 * block_invalidatepage() can send pages that are still marked dirty
1154 * but otherwise have invalidated buffers.
1155 *
1156 * We want to release the latter to avoid unnecessary buildup of the
1157 * LRU, skip the former and warn if we've left any lingering
1158 * delalloc/unwritten buffers on clean pages. Skip pages with delalloc
1159 * or unwritten buffers and warn if the page is not dirty. Otherwise
1160 * try to release the buffers.
1161 */
1162 xfs_count_page_state(page, &delalloc, &unwritten);
1163
1164 if (delalloc) {
1165 WARN_ON_ONCE(!PageDirty(page));
1166 return 0;
1167 }
1168 if (unwritten) {
1169 WARN_ON_ONCE(!PageDirty(page));
1170 return 0;
1171 }
1172
1173 return try_to_free_buffers(page);
1174}
1175
1176/*
1177 * If this is O_DIRECT or the mpage code calling tell them how large the mapping
1178 * is, so that we can avoid repeated get_blocks calls.
1179 *
1180 * If the mapping spans EOF, then we have to break the mapping up as the mapping
1181 * for blocks beyond EOF must be marked new so that sub block regions can be
1182 * correctly zeroed. We can't do this for mappings within EOF unless the mapping
1183 * was just allocated or is unwritten, otherwise the callers would overwrite
1184 * existing data with zeros. Hence we have to split the mapping into a range up
1185 * to and including EOF, and a second mapping for beyond EOF.
1186 */
1187static void
1188xfs_map_trim_size(
1189 struct inode *inode,
1190 sector_t iblock,
1191 struct buffer_head *bh_result,
1192 struct xfs_bmbt_irec *imap,
1193 xfs_off_t offset,
1194 ssize_t size)
1195{
1196 xfs_off_t mapping_size;
1197
1198 mapping_size = imap->br_startoff + imap->br_blockcount - iblock;
1199 mapping_size <<= inode->i_blkbits;
1200
1201 ASSERT(mapping_size > 0);
1202 if (mapping_size > size)
1203 mapping_size = size;
1204 if (offset < i_size_read(inode) &&
1205 offset + mapping_size >= i_size_read(inode)) {
1206 /* limit mapping to block that spans EOF */
1207 mapping_size = roundup_64(i_size_read(inode) - offset,
1208 1 << inode->i_blkbits);
1209 }
1210 if (mapping_size > LONG_MAX)
1211 mapping_size = LONG_MAX;
1212
1213 bh_result->b_size = mapping_size;
1214}
1215
1216static int
1217xfs_get_blocks(
1218 struct inode *inode,
1219 sector_t iblock,
1220 struct buffer_head *bh_result,
1221 int create)
1222{
1223 struct xfs_inode *ip = XFS_I(inode);
1224 struct xfs_mount *mp = ip->i_mount;
1225 xfs_fileoff_t offset_fsb, end_fsb;
1226 int error = 0;
1227 int lockmode = 0;
1228 struct xfs_bmbt_irec imap;
1229 int nimaps = 1;
1230 xfs_off_t offset;
1231 ssize_t size;
1232
1233 BUG_ON(create);
1234
1235 if (XFS_FORCED_SHUTDOWN(mp))
1236 return -EIO;
1237
1238 offset = (xfs_off_t)iblock << inode->i_blkbits;
1239 ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
1240 size = bh_result->b_size;
1241
1242 if (offset >= i_size_read(inode))
1243 return 0;
1244
1245 /*
1246 * Direct I/O is usually done on preallocated files, so try getting
1247 * a block mapping without an exclusive lock first.
1248 */
1249 lockmode = xfs_ilock_data_map_shared(ip);
1250
1251 ASSERT(offset <= mp->m_super->s_maxbytes);
1252 if (offset + size > mp->m_super->s_maxbytes)
1253 size = mp->m_super->s_maxbytes - offset;
1254 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
1255 offset_fsb = XFS_B_TO_FSBT(mp, offset);
1256
1257 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
1258 &imap, &nimaps, XFS_BMAPI_ENTIRE);
1259 if (error)
1260 goto out_unlock;
1261
1262 if (nimaps) {
1263 trace_xfs_get_blocks_found(ip, offset, size,
1264 ISUNWRITTEN(&imap) ? XFS_IO_UNWRITTEN
1265 : XFS_IO_OVERWRITE, &imap);
1266 xfs_iunlock(ip, lockmode);
1267 } else {
1268 trace_xfs_get_blocks_notfound(ip, offset, size);
1269 goto out_unlock;
1270 }
1271
1272 /* trim mapping down to size requested */
1273 xfs_map_trim_size(inode, iblock, bh_result, &imap, offset, size);
1274
1275 /*
1276 * For unwritten extents do not report a disk address in the buffered
1277 * read case (treat as if we're reading into a hole).
1278 */
1279 if (imap.br_startblock != HOLESTARTBLOCK &&
1280 imap.br_startblock != DELAYSTARTBLOCK &&
1281 !ISUNWRITTEN(&imap))
1282 xfs_map_buffer(inode, bh_result, &imap, offset);
1283
1284 /*
1285 * If this is a realtime file, data may be on a different device.
1286 * to that pointed to from the buffer_head b_bdev currently.
1287 */
1288 bh_result->b_bdev = xfs_find_bdev_for_inode(inode);
1289 return 0;
1290
1291out_unlock:
1292 xfs_iunlock(ip, lockmode);
1293 return error;
1294}
1295
1296STATIC ssize_t
1297xfs_vm_direct_IO(
1298 struct kiocb *iocb,
1299 struct iov_iter *iter)
1300{
1301 /*
1302 * We just need the method present so that open/fcntl allow direct I/O.
1303 */
1304 return -EINVAL;
1305}
1306
1307STATIC sector_t
1308xfs_vm_bmap(
1309 struct address_space *mapping,
1310 sector_t block)
1311{
1312 struct inode *inode = (struct inode *)mapping->host;
1313 struct xfs_inode *ip = XFS_I(inode);
1314
1315 trace_xfs_vm_bmap(XFS_I(inode));
1316
1317 /*
1318 * The swap code (ab-)uses ->bmap to get a block mapping and then
1319 * bypasseѕ the file system for actual I/O. We really can't allow
1320 * that on reflinks inodes, so we have to skip out here. And yes,
1321 * 0 is the magic code for a bmap error..
1322 */
1323 if (xfs_is_reflink_inode(ip))
1324 return 0;
1325
1326 filemap_write_and_wait(mapping);
1327 return generic_block_bmap(mapping, block, xfs_get_blocks);
1328}
1329
1330STATIC int
1331xfs_vm_readpage(
1332 struct file *unused,
1333 struct page *page)
1334{
1335 trace_xfs_vm_readpage(page->mapping->host, 1);
1336 return mpage_readpage(page, xfs_get_blocks);
1337}
1338
1339STATIC int
1340xfs_vm_readpages(
1341 struct file *unused,
1342 struct address_space *mapping,
1343 struct list_head *pages,
1344 unsigned nr_pages)
1345{
1346 trace_xfs_vm_readpages(mapping->host, nr_pages);
1347 return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
1348}
1349
1350/*
1351 * This is basically a copy of __set_page_dirty_buffers() with one
1352 * small tweak: buffers beyond EOF do not get marked dirty. If we mark them
1353 * dirty, we'll never be able to clean them because we don't write buffers
1354 * beyond EOF, and that means we can't invalidate pages that span EOF
1355 * that have been marked dirty. Further, the dirty state can leak into
1356 * the file interior if the file is extended, resulting in all sorts of
1357 * bad things happening as the state does not match the underlying data.
1358 *
1359 * XXX: this really indicates that bufferheads in XFS need to die. Warts like
1360 * this only exist because of bufferheads and how the generic code manages them.
1361 */
1362STATIC int
1363xfs_vm_set_page_dirty(
1364 struct page *page)
1365{
1366 struct address_space *mapping = page->mapping;
1367 struct inode *inode = mapping->host;
1368 loff_t end_offset;
1369 loff_t offset;
1370 int newly_dirty;
1371
1372 if (unlikely(!mapping))
1373 return !TestSetPageDirty(page);
1374
1375 end_offset = i_size_read(inode);
1376 offset = page_offset(page);
1377
1378 spin_lock(&mapping->private_lock);
1379 if (page_has_buffers(page)) {
1380 struct buffer_head *head = page_buffers(page);
1381 struct buffer_head *bh = head;
1382
1383 do {
1384 if (offset < end_offset)
1385 set_buffer_dirty(bh);
1386 bh = bh->b_this_page;
1387 offset += 1 << inode->i_blkbits;
1388 } while (bh != head);
1389 }
1390 /*
1391 * Lock out page->mem_cgroup migration to keep PageDirty
1392 * synchronized with per-memcg dirty page counters.
1393 */
1394 lock_page_memcg(page);
1395 newly_dirty = !TestSetPageDirty(page);
1396 spin_unlock(&mapping->private_lock);
1397
1398 if (newly_dirty) {
1399 /* sigh - __set_page_dirty() is static, so copy it here, too */
1400 unsigned long flags;
1401
1402 spin_lock_irqsave(&mapping->tree_lock, flags);
1403 if (page->mapping) { /* Race with truncate? */
1404 WARN_ON_ONCE(!PageUptodate(page));
1405 account_page_dirtied(page, mapping);
1406 radix_tree_tag_set(&mapping->page_tree,
1407 page_index(page), PAGECACHE_TAG_DIRTY);
1408 }
1409 spin_unlock_irqrestore(&mapping->tree_lock, flags);
1410 }
1411 unlock_page_memcg(page);
1412 if (newly_dirty)
1413 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1414 return newly_dirty;
1415}
1416
1417const struct address_space_operations xfs_address_space_operations = {
1418 .readpage = xfs_vm_readpage,
1419 .readpages = xfs_vm_readpages,
1420 .writepage = xfs_vm_writepage,
1421 .writepages = xfs_vm_writepages,
1422 .set_page_dirty = xfs_vm_set_page_dirty,
1423 .releasepage = xfs_vm_releasepage,
1424 .invalidatepage = xfs_vm_invalidatepage,
1425 .bmap = xfs_vm_bmap,
1426 .direct_IO = xfs_vm_direct_IO,
1427 .migratepage = buffer_migrate_page,
1428 .is_partially_uptodate = block_is_partially_uptodate,
1429 .error_remove_page = generic_error_remove_page,
1430};
1/*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_shared.h"
20#include "xfs_format.h"
21#include "xfs_log_format.h"
22#include "xfs_trans_resv.h"
23#include "xfs_sb.h"
24#include "xfs_ag.h"
25#include "xfs_mount.h"
26#include "xfs_inode.h"
27#include "xfs_trans.h"
28#include "xfs_inode_item.h"
29#include "xfs_alloc.h"
30#include "xfs_error.h"
31#include "xfs_iomap.h"
32#include "xfs_trace.h"
33#include "xfs_bmap.h"
34#include "xfs_bmap_util.h"
35#include "xfs_bmap_btree.h"
36#include "xfs_dinode.h"
37#include <linux/aio.h>
38#include <linux/gfp.h>
39#include <linux/mpage.h>
40#include <linux/pagevec.h>
41#include <linux/writeback.h>
42
43void
44xfs_count_page_state(
45 struct page *page,
46 int *delalloc,
47 int *unwritten)
48{
49 struct buffer_head *bh, *head;
50
51 *delalloc = *unwritten = 0;
52
53 bh = head = page_buffers(page);
54 do {
55 if (buffer_unwritten(bh))
56 (*unwritten) = 1;
57 else if (buffer_delay(bh))
58 (*delalloc) = 1;
59 } while ((bh = bh->b_this_page) != head);
60}
61
62STATIC struct block_device *
63xfs_find_bdev_for_inode(
64 struct inode *inode)
65{
66 struct xfs_inode *ip = XFS_I(inode);
67 struct xfs_mount *mp = ip->i_mount;
68
69 if (XFS_IS_REALTIME_INODE(ip))
70 return mp->m_rtdev_targp->bt_bdev;
71 else
72 return mp->m_ddev_targp->bt_bdev;
73}
74
75/*
76 * We're now finished for good with this ioend structure.
77 * Update the page state via the associated buffer_heads,
78 * release holds on the inode and bio, and finally free
79 * up memory. Do not use the ioend after this.
80 */
81STATIC void
82xfs_destroy_ioend(
83 xfs_ioend_t *ioend)
84{
85 struct buffer_head *bh, *next;
86
87 for (bh = ioend->io_buffer_head; bh; bh = next) {
88 next = bh->b_private;
89 bh->b_end_io(bh, !ioend->io_error);
90 }
91
92 mempool_free(ioend, xfs_ioend_pool);
93}
94
95/*
96 * Fast and loose check if this write could update the on-disk inode size.
97 */
98static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
99{
100 return ioend->io_offset + ioend->io_size >
101 XFS_I(ioend->io_inode)->i_d.di_size;
102}
103
104STATIC int
105xfs_setfilesize_trans_alloc(
106 struct xfs_ioend *ioend)
107{
108 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
109 struct xfs_trans *tp;
110 int error;
111
112 tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
113
114 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_fsyncts, 0, 0);
115 if (error) {
116 xfs_trans_cancel(tp, 0);
117 return error;
118 }
119
120 ioend->io_append_trans = tp;
121
122 /*
123 * We may pass freeze protection with a transaction. So tell lockdep
124 * we released it.
125 */
126 rwsem_release(&ioend->io_inode->i_sb->s_writers.lock_map[SB_FREEZE_FS-1],
127 1, _THIS_IP_);
128 /*
129 * We hand off the transaction to the completion thread now, so
130 * clear the flag here.
131 */
132 current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
133 return 0;
134}
135
136/*
137 * Update on-disk file size now that data has been written to disk.
138 */
139STATIC int
140xfs_setfilesize(
141 struct xfs_ioend *ioend)
142{
143 struct xfs_inode *ip = XFS_I(ioend->io_inode);
144 struct xfs_trans *tp = ioend->io_append_trans;
145 xfs_fsize_t isize;
146
147 /*
148 * The transaction may have been allocated in the I/O submission thread,
149 * thus we need to mark ourselves as beeing in a transaction manually.
150 * Similarly for freeze protection.
151 */
152 current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
153 rwsem_acquire_read(&VFS_I(ip)->i_sb->s_writers.lock_map[SB_FREEZE_FS-1],
154 0, 1, _THIS_IP_);
155
156 xfs_ilock(ip, XFS_ILOCK_EXCL);
157 isize = xfs_new_eof(ip, ioend->io_offset + ioend->io_size);
158 if (!isize) {
159 xfs_iunlock(ip, XFS_ILOCK_EXCL);
160 xfs_trans_cancel(tp, 0);
161 return 0;
162 }
163
164 trace_xfs_setfilesize(ip, ioend->io_offset, ioend->io_size);
165
166 ip->i_d.di_size = isize;
167 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
168 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
169
170 return xfs_trans_commit(tp, 0);
171}
172
173/*
174 * Schedule IO completion handling on the final put of an ioend.
175 *
176 * If there is no work to do we might as well call it a day and free the
177 * ioend right now.
178 */
179STATIC void
180xfs_finish_ioend(
181 struct xfs_ioend *ioend)
182{
183 if (atomic_dec_and_test(&ioend->io_remaining)) {
184 struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
185
186 if (ioend->io_type == XFS_IO_UNWRITTEN)
187 queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
188 else if (ioend->io_append_trans ||
189 (ioend->io_isdirect && xfs_ioend_is_append(ioend)))
190 queue_work(mp->m_data_workqueue, &ioend->io_work);
191 else
192 xfs_destroy_ioend(ioend);
193 }
194}
195
196/*
197 * IO write completion.
198 */
199STATIC void
200xfs_end_io(
201 struct work_struct *work)
202{
203 xfs_ioend_t *ioend = container_of(work, xfs_ioend_t, io_work);
204 struct xfs_inode *ip = XFS_I(ioend->io_inode);
205 int error = 0;
206
207 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
208 ioend->io_error = -EIO;
209 goto done;
210 }
211 if (ioend->io_error)
212 goto done;
213
214 /*
215 * For unwritten extents we need to issue transactions to convert a
216 * range to normal written extens after the data I/O has finished.
217 */
218 if (ioend->io_type == XFS_IO_UNWRITTEN) {
219 error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
220 ioend->io_size);
221 } else if (ioend->io_isdirect && xfs_ioend_is_append(ioend)) {
222 /*
223 * For direct I/O we do not know if we need to allocate blocks
224 * or not so we can't preallocate an append transaction as that
225 * results in nested reservations and log space deadlocks. Hence
226 * allocate the transaction here. While this is sub-optimal and
227 * can block IO completion for some time, we're stuck with doing
228 * it this way until we can pass the ioend to the direct IO
229 * allocation callbacks and avoid nesting that way.
230 */
231 error = xfs_setfilesize_trans_alloc(ioend);
232 if (error)
233 goto done;
234 error = xfs_setfilesize(ioend);
235 } else if (ioend->io_append_trans) {
236 error = xfs_setfilesize(ioend);
237 } else {
238 ASSERT(!xfs_ioend_is_append(ioend));
239 }
240
241done:
242 if (error)
243 ioend->io_error = -error;
244 xfs_destroy_ioend(ioend);
245}
246
247/*
248 * Call IO completion handling in caller context on the final put of an ioend.
249 */
250STATIC void
251xfs_finish_ioend_sync(
252 struct xfs_ioend *ioend)
253{
254 if (atomic_dec_and_test(&ioend->io_remaining))
255 xfs_end_io(&ioend->io_work);
256}
257
258/*
259 * Allocate and initialise an IO completion structure.
260 * We need to track unwritten extent write completion here initially.
261 * We'll need to extend this for updating the ondisk inode size later
262 * (vs. incore size).
263 */
264STATIC xfs_ioend_t *
265xfs_alloc_ioend(
266 struct inode *inode,
267 unsigned int type)
268{
269 xfs_ioend_t *ioend;
270
271 ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS);
272
273 /*
274 * Set the count to 1 initially, which will prevent an I/O
275 * completion callback from happening before we have started
276 * all the I/O from calling the completion routine too early.
277 */
278 atomic_set(&ioend->io_remaining, 1);
279 ioend->io_isdirect = 0;
280 ioend->io_error = 0;
281 ioend->io_list = NULL;
282 ioend->io_type = type;
283 ioend->io_inode = inode;
284 ioend->io_buffer_head = NULL;
285 ioend->io_buffer_tail = NULL;
286 ioend->io_offset = 0;
287 ioend->io_size = 0;
288 ioend->io_append_trans = NULL;
289
290 INIT_WORK(&ioend->io_work, xfs_end_io);
291 return ioend;
292}
293
294STATIC int
295xfs_map_blocks(
296 struct inode *inode,
297 loff_t offset,
298 struct xfs_bmbt_irec *imap,
299 int type,
300 int nonblocking)
301{
302 struct xfs_inode *ip = XFS_I(inode);
303 struct xfs_mount *mp = ip->i_mount;
304 ssize_t count = 1 << inode->i_blkbits;
305 xfs_fileoff_t offset_fsb, end_fsb;
306 int error = 0;
307 int bmapi_flags = XFS_BMAPI_ENTIRE;
308 int nimaps = 1;
309
310 if (XFS_FORCED_SHUTDOWN(mp))
311 return -XFS_ERROR(EIO);
312
313 if (type == XFS_IO_UNWRITTEN)
314 bmapi_flags |= XFS_BMAPI_IGSTATE;
315
316 if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
317 if (nonblocking)
318 return -XFS_ERROR(EAGAIN);
319 xfs_ilock(ip, XFS_ILOCK_SHARED);
320 }
321
322 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
323 (ip->i_df.if_flags & XFS_IFEXTENTS));
324 ASSERT(offset <= mp->m_super->s_maxbytes);
325
326 if (offset + count > mp->m_super->s_maxbytes)
327 count = mp->m_super->s_maxbytes - offset;
328 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
329 offset_fsb = XFS_B_TO_FSBT(mp, offset);
330 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
331 imap, &nimaps, bmapi_flags);
332 xfs_iunlock(ip, XFS_ILOCK_SHARED);
333
334 if (error)
335 return -XFS_ERROR(error);
336
337 if (type == XFS_IO_DELALLOC &&
338 (!nimaps || isnullstartblock(imap->br_startblock))) {
339 error = xfs_iomap_write_allocate(ip, offset, imap);
340 if (!error)
341 trace_xfs_map_blocks_alloc(ip, offset, count, type, imap);
342 return -XFS_ERROR(error);
343 }
344
345#ifdef DEBUG
346 if (type == XFS_IO_UNWRITTEN) {
347 ASSERT(nimaps);
348 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
349 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
350 }
351#endif
352 if (nimaps)
353 trace_xfs_map_blocks_found(ip, offset, count, type, imap);
354 return 0;
355}
356
357STATIC int
358xfs_imap_valid(
359 struct inode *inode,
360 struct xfs_bmbt_irec *imap,
361 xfs_off_t offset)
362{
363 offset >>= inode->i_blkbits;
364
365 return offset >= imap->br_startoff &&
366 offset < imap->br_startoff + imap->br_blockcount;
367}
368
369/*
370 * BIO completion handler for buffered IO.
371 */
372STATIC void
373xfs_end_bio(
374 struct bio *bio,
375 int error)
376{
377 xfs_ioend_t *ioend = bio->bi_private;
378
379 ASSERT(atomic_read(&bio->bi_cnt) >= 1);
380 ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error;
381
382 /* Toss bio and pass work off to an xfsdatad thread */
383 bio->bi_private = NULL;
384 bio->bi_end_io = NULL;
385 bio_put(bio);
386
387 xfs_finish_ioend(ioend);
388}
389
390STATIC void
391xfs_submit_ioend_bio(
392 struct writeback_control *wbc,
393 xfs_ioend_t *ioend,
394 struct bio *bio)
395{
396 atomic_inc(&ioend->io_remaining);
397 bio->bi_private = ioend;
398 bio->bi_end_io = xfs_end_bio;
399 submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE, bio);
400}
401
402STATIC struct bio *
403xfs_alloc_ioend_bio(
404 struct buffer_head *bh)
405{
406 int nvecs = bio_get_nr_vecs(bh->b_bdev);
407 struct bio *bio = bio_alloc(GFP_NOIO, nvecs);
408
409 ASSERT(bio->bi_private == NULL);
410 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
411 bio->bi_bdev = bh->b_bdev;
412 return bio;
413}
414
415STATIC void
416xfs_start_buffer_writeback(
417 struct buffer_head *bh)
418{
419 ASSERT(buffer_mapped(bh));
420 ASSERT(buffer_locked(bh));
421 ASSERT(!buffer_delay(bh));
422 ASSERT(!buffer_unwritten(bh));
423
424 mark_buffer_async_write(bh);
425 set_buffer_uptodate(bh);
426 clear_buffer_dirty(bh);
427}
428
429STATIC void
430xfs_start_page_writeback(
431 struct page *page,
432 int clear_dirty,
433 int buffers)
434{
435 ASSERT(PageLocked(page));
436 ASSERT(!PageWriteback(page));
437 if (clear_dirty)
438 clear_page_dirty_for_io(page);
439 set_page_writeback(page);
440 unlock_page(page);
441 /* If no buffers on the page are to be written, finish it here */
442 if (!buffers)
443 end_page_writeback(page);
444}
445
446static inline int xfs_bio_add_buffer(struct bio *bio, struct buffer_head *bh)
447{
448 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
449}
450
451/*
452 * Submit all of the bios for all of the ioends we have saved up, covering the
453 * initial writepage page and also any probed pages.
454 *
455 * Because we may have multiple ioends spanning a page, we need to start
456 * writeback on all the buffers before we submit them for I/O. If we mark the
457 * buffers as we got, then we can end up with a page that only has buffers
458 * marked async write and I/O complete on can occur before we mark the other
459 * buffers async write.
460 *
461 * The end result of this is that we trip a bug in end_page_writeback() because
462 * we call it twice for the one page as the code in end_buffer_async_write()
463 * assumes that all buffers on the page are started at the same time.
464 *
465 * The fix is two passes across the ioend list - one to start writeback on the
466 * buffer_heads, and then submit them for I/O on the second pass.
467 *
468 * If @fail is non-zero, it means that we have a situation where some part of
469 * the submission process has failed after we have marked paged for writeback
470 * and unlocked them. In this situation, we need to fail the ioend chain rather
471 * than submit it to IO. This typically only happens on a filesystem shutdown.
472 */
473STATIC void
474xfs_submit_ioend(
475 struct writeback_control *wbc,
476 xfs_ioend_t *ioend,
477 int fail)
478{
479 xfs_ioend_t *head = ioend;
480 xfs_ioend_t *next;
481 struct buffer_head *bh;
482 struct bio *bio;
483 sector_t lastblock = 0;
484
485 /* Pass 1 - start writeback */
486 do {
487 next = ioend->io_list;
488 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private)
489 xfs_start_buffer_writeback(bh);
490 } while ((ioend = next) != NULL);
491
492 /* Pass 2 - submit I/O */
493 ioend = head;
494 do {
495 next = ioend->io_list;
496 bio = NULL;
497
498 /*
499 * If we are failing the IO now, just mark the ioend with an
500 * error and finish it. This will run IO completion immediately
501 * as there is only one reference to the ioend at this point in
502 * time.
503 */
504 if (fail) {
505 ioend->io_error = -fail;
506 xfs_finish_ioend(ioend);
507 continue;
508 }
509
510 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
511
512 if (!bio) {
513 retry:
514 bio = xfs_alloc_ioend_bio(bh);
515 } else if (bh->b_blocknr != lastblock + 1) {
516 xfs_submit_ioend_bio(wbc, ioend, bio);
517 goto retry;
518 }
519
520 if (xfs_bio_add_buffer(bio, bh) != bh->b_size) {
521 xfs_submit_ioend_bio(wbc, ioend, bio);
522 goto retry;
523 }
524
525 lastblock = bh->b_blocknr;
526 }
527 if (bio)
528 xfs_submit_ioend_bio(wbc, ioend, bio);
529 xfs_finish_ioend(ioend);
530 } while ((ioend = next) != NULL);
531}
532
533/*
534 * Cancel submission of all buffer_heads so far in this endio.
535 * Toss the endio too. Only ever called for the initial page
536 * in a writepage request, so only ever one page.
537 */
538STATIC void
539xfs_cancel_ioend(
540 xfs_ioend_t *ioend)
541{
542 xfs_ioend_t *next;
543 struct buffer_head *bh, *next_bh;
544
545 do {
546 next = ioend->io_list;
547 bh = ioend->io_buffer_head;
548 do {
549 next_bh = bh->b_private;
550 clear_buffer_async_write(bh);
551 unlock_buffer(bh);
552 } while ((bh = next_bh) != NULL);
553
554 mempool_free(ioend, xfs_ioend_pool);
555 } while ((ioend = next) != NULL);
556}
557
558/*
559 * Test to see if we've been building up a completion structure for
560 * earlier buffers -- if so, we try to append to this ioend if we
561 * can, otherwise we finish off any current ioend and start another.
562 * Return true if we've finished the given ioend.
563 */
564STATIC void
565xfs_add_to_ioend(
566 struct inode *inode,
567 struct buffer_head *bh,
568 xfs_off_t offset,
569 unsigned int type,
570 xfs_ioend_t **result,
571 int need_ioend)
572{
573 xfs_ioend_t *ioend = *result;
574
575 if (!ioend || need_ioend || type != ioend->io_type) {
576 xfs_ioend_t *previous = *result;
577
578 ioend = xfs_alloc_ioend(inode, type);
579 ioend->io_offset = offset;
580 ioend->io_buffer_head = bh;
581 ioend->io_buffer_tail = bh;
582 if (previous)
583 previous->io_list = ioend;
584 *result = ioend;
585 } else {
586 ioend->io_buffer_tail->b_private = bh;
587 ioend->io_buffer_tail = bh;
588 }
589
590 bh->b_private = NULL;
591 ioend->io_size += bh->b_size;
592}
593
594STATIC void
595xfs_map_buffer(
596 struct inode *inode,
597 struct buffer_head *bh,
598 struct xfs_bmbt_irec *imap,
599 xfs_off_t offset)
600{
601 sector_t bn;
602 struct xfs_mount *m = XFS_I(inode)->i_mount;
603 xfs_off_t iomap_offset = XFS_FSB_TO_B(m, imap->br_startoff);
604 xfs_daddr_t iomap_bn = xfs_fsb_to_db(XFS_I(inode), imap->br_startblock);
605
606 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
607 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
608
609 bn = (iomap_bn >> (inode->i_blkbits - BBSHIFT)) +
610 ((offset - iomap_offset) >> inode->i_blkbits);
611
612 ASSERT(bn || XFS_IS_REALTIME_INODE(XFS_I(inode)));
613
614 bh->b_blocknr = bn;
615 set_buffer_mapped(bh);
616}
617
618STATIC void
619xfs_map_at_offset(
620 struct inode *inode,
621 struct buffer_head *bh,
622 struct xfs_bmbt_irec *imap,
623 xfs_off_t offset)
624{
625 ASSERT(imap->br_startblock != HOLESTARTBLOCK);
626 ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
627
628 xfs_map_buffer(inode, bh, imap, offset);
629 set_buffer_mapped(bh);
630 clear_buffer_delay(bh);
631 clear_buffer_unwritten(bh);
632}
633
634/*
635 * Test if a given page contains at least one buffer of a given @type.
636 * If @check_all_buffers is true, then we walk all the buffers in the page to
637 * try to find one of the type passed in. If it is not set, then the caller only
638 * needs to check the first buffer on the page for a match.
639 */
640STATIC bool
641xfs_check_page_type(
642 struct page *page,
643 unsigned int type,
644 bool check_all_buffers)
645{
646 struct buffer_head *bh;
647 struct buffer_head *head;
648
649 if (PageWriteback(page))
650 return false;
651 if (!page->mapping)
652 return false;
653 if (!page_has_buffers(page))
654 return false;
655
656 bh = head = page_buffers(page);
657 do {
658 if (buffer_unwritten(bh)) {
659 if (type == XFS_IO_UNWRITTEN)
660 return true;
661 } else if (buffer_delay(bh)) {
662 if (type == XFS_IO_DELALLOC)
663 return true;
664 } else if (buffer_dirty(bh) && buffer_mapped(bh)) {
665 if (type == XFS_IO_OVERWRITE)
666 return true;
667 }
668
669 /* If we are only checking the first buffer, we are done now. */
670 if (!check_all_buffers)
671 break;
672 } while ((bh = bh->b_this_page) != head);
673
674 return false;
675}
676
677/*
678 * Allocate & map buffers for page given the extent map. Write it out.
679 * except for the original page of a writepage, this is called on
680 * delalloc/unwritten pages only, for the original page it is possible
681 * that the page has no mapping at all.
682 */
683STATIC int
684xfs_convert_page(
685 struct inode *inode,
686 struct page *page,
687 loff_t tindex,
688 struct xfs_bmbt_irec *imap,
689 xfs_ioend_t **ioendp,
690 struct writeback_control *wbc)
691{
692 struct buffer_head *bh, *head;
693 xfs_off_t end_offset;
694 unsigned long p_offset;
695 unsigned int type;
696 int len, page_dirty;
697 int count = 0, done = 0, uptodate = 1;
698 xfs_off_t offset = page_offset(page);
699
700 if (page->index != tindex)
701 goto fail;
702 if (!trylock_page(page))
703 goto fail;
704 if (PageWriteback(page))
705 goto fail_unlock_page;
706 if (page->mapping != inode->i_mapping)
707 goto fail_unlock_page;
708 if (!xfs_check_page_type(page, (*ioendp)->io_type, false))
709 goto fail_unlock_page;
710
711 /*
712 * page_dirty is initially a count of buffers on the page before
713 * EOF and is decremented as we move each into a cleanable state.
714 *
715 * Derivation:
716 *
717 * End offset is the highest offset that this page should represent.
718 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
719 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
720 * hence give us the correct page_dirty count. On any other page,
721 * it will be zero and in that case we need page_dirty to be the
722 * count of buffers on the page.
723 */
724 end_offset = min_t(unsigned long long,
725 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
726 i_size_read(inode));
727
728 /*
729 * If the current map does not span the entire page we are about to try
730 * to write, then give up. The only way we can write a page that spans
731 * multiple mappings in a single writeback iteration is via the
732 * xfs_vm_writepage() function. Data integrity writeback requires the
733 * entire page to be written in a single attempt, otherwise the part of
734 * the page we don't write here doesn't get written as part of the data
735 * integrity sync.
736 *
737 * For normal writeback, we also don't attempt to write partial pages
738 * here as it simply means that write_cache_pages() will see it under
739 * writeback and ignore the page until some point in the future, at
740 * which time this will be the only page in the file that needs
741 * writeback. Hence for more optimal IO patterns, we should always
742 * avoid partial page writeback due to multiple mappings on a page here.
743 */
744 if (!xfs_imap_valid(inode, imap, end_offset))
745 goto fail_unlock_page;
746
747 len = 1 << inode->i_blkbits;
748 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
749 PAGE_CACHE_SIZE);
750 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
751 page_dirty = p_offset / len;
752
753 /*
754 * The moment we find a buffer that doesn't match our current type
755 * specification or can't be written, abort the loop and start
756 * writeback. As per the above xfs_imap_valid() check, only
757 * xfs_vm_writepage() can handle partial page writeback fully - we are
758 * limited here to the buffers that are contiguous with the current
759 * ioend, and hence a buffer we can't write breaks that contiguity and
760 * we have to defer the rest of the IO to xfs_vm_writepage().
761 */
762 bh = head = page_buffers(page);
763 do {
764 if (offset >= end_offset)
765 break;
766 if (!buffer_uptodate(bh))
767 uptodate = 0;
768 if (!(PageUptodate(page) || buffer_uptodate(bh))) {
769 done = 1;
770 break;
771 }
772
773 if (buffer_unwritten(bh) || buffer_delay(bh) ||
774 buffer_mapped(bh)) {
775 if (buffer_unwritten(bh))
776 type = XFS_IO_UNWRITTEN;
777 else if (buffer_delay(bh))
778 type = XFS_IO_DELALLOC;
779 else
780 type = XFS_IO_OVERWRITE;
781
782 /*
783 * imap should always be valid because of the above
784 * partial page end_offset check on the imap.
785 */
786 ASSERT(xfs_imap_valid(inode, imap, offset));
787
788 lock_buffer(bh);
789 if (type != XFS_IO_OVERWRITE)
790 xfs_map_at_offset(inode, bh, imap, offset);
791 xfs_add_to_ioend(inode, bh, offset, type,
792 ioendp, done);
793
794 page_dirty--;
795 count++;
796 } else {
797 done = 1;
798 break;
799 }
800 } while (offset += len, (bh = bh->b_this_page) != head);
801
802 if (uptodate && bh == head)
803 SetPageUptodate(page);
804
805 if (count) {
806 if (--wbc->nr_to_write <= 0 &&
807 wbc->sync_mode == WB_SYNC_NONE)
808 done = 1;
809 }
810 xfs_start_page_writeback(page, !page_dirty, count);
811
812 return done;
813 fail_unlock_page:
814 unlock_page(page);
815 fail:
816 return 1;
817}
818
819/*
820 * Convert & write out a cluster of pages in the same extent as defined
821 * by mp and following the start page.
822 */
823STATIC void
824xfs_cluster_write(
825 struct inode *inode,
826 pgoff_t tindex,
827 struct xfs_bmbt_irec *imap,
828 xfs_ioend_t **ioendp,
829 struct writeback_control *wbc,
830 pgoff_t tlast)
831{
832 struct pagevec pvec;
833 int done = 0, i;
834
835 pagevec_init(&pvec, 0);
836 while (!done && tindex <= tlast) {
837 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
838
839 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
840 break;
841
842 for (i = 0; i < pagevec_count(&pvec); i++) {
843 done = xfs_convert_page(inode, pvec.pages[i], tindex++,
844 imap, ioendp, wbc);
845 if (done)
846 break;
847 }
848
849 pagevec_release(&pvec);
850 cond_resched();
851 }
852}
853
854STATIC void
855xfs_vm_invalidatepage(
856 struct page *page,
857 unsigned int offset,
858 unsigned int length)
859{
860 trace_xfs_invalidatepage(page->mapping->host, page, offset,
861 length);
862 block_invalidatepage(page, offset, length);
863}
864
865/*
866 * If the page has delalloc buffers on it, we need to punch them out before we
867 * invalidate the page. If we don't, we leave a stale delalloc mapping on the
868 * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read
869 * is done on that same region - the delalloc extent is returned when none is
870 * supposed to be there.
871 *
872 * We prevent this by truncating away the delalloc regions on the page before
873 * invalidating it. Because they are delalloc, we can do this without needing a
874 * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this
875 * truncation without a transaction as there is no space left for block
876 * reservation (typically why we see a ENOSPC in writeback).
877 *
878 * This is not a performance critical path, so for now just do the punching a
879 * buffer head at a time.
880 */
881STATIC void
882xfs_aops_discard_page(
883 struct page *page)
884{
885 struct inode *inode = page->mapping->host;
886 struct xfs_inode *ip = XFS_I(inode);
887 struct buffer_head *bh, *head;
888 loff_t offset = page_offset(page);
889
890 if (!xfs_check_page_type(page, XFS_IO_DELALLOC, true))
891 goto out_invalidate;
892
893 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
894 goto out_invalidate;
895
896 xfs_alert(ip->i_mount,
897 "page discard on page %p, inode 0x%llx, offset %llu.",
898 page, ip->i_ino, offset);
899
900 xfs_ilock(ip, XFS_ILOCK_EXCL);
901 bh = head = page_buffers(page);
902 do {
903 int error;
904 xfs_fileoff_t start_fsb;
905
906 if (!buffer_delay(bh))
907 goto next_buffer;
908
909 start_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
910 error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1);
911 if (error) {
912 /* something screwed, just bail */
913 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
914 xfs_alert(ip->i_mount,
915 "page discard unable to remove delalloc mapping.");
916 }
917 break;
918 }
919next_buffer:
920 offset += 1 << inode->i_blkbits;
921
922 } while ((bh = bh->b_this_page) != head);
923
924 xfs_iunlock(ip, XFS_ILOCK_EXCL);
925out_invalidate:
926 xfs_vm_invalidatepage(page, 0, PAGE_CACHE_SIZE);
927 return;
928}
929
930/*
931 * Write out a dirty page.
932 *
933 * For delalloc space on the page we need to allocate space and flush it.
934 * For unwritten space on the page we need to start the conversion to
935 * regular allocated space.
936 * For any other dirty buffer heads on the page we should flush them.
937 */
938STATIC int
939xfs_vm_writepage(
940 struct page *page,
941 struct writeback_control *wbc)
942{
943 struct inode *inode = page->mapping->host;
944 struct buffer_head *bh, *head;
945 struct xfs_bmbt_irec imap;
946 xfs_ioend_t *ioend = NULL, *iohead = NULL;
947 loff_t offset;
948 unsigned int type;
949 __uint64_t end_offset;
950 pgoff_t end_index, last_index;
951 ssize_t len;
952 int err, imap_valid = 0, uptodate = 1;
953 int count = 0;
954 int nonblocking = 0;
955
956 trace_xfs_writepage(inode, page, 0, 0);
957
958 ASSERT(page_has_buffers(page));
959
960 /*
961 * Refuse to write the page out if we are called from reclaim context.
962 *
963 * This avoids stack overflows when called from deeply used stacks in
964 * random callers for direct reclaim or memcg reclaim. We explicitly
965 * allow reclaim from kswapd as the stack usage there is relatively low.
966 *
967 * This should never happen except in the case of a VM regression so
968 * warn about it.
969 */
970 if (WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
971 PF_MEMALLOC))
972 goto redirty;
973
974 /*
975 * Given that we do not allow direct reclaim to call us, we should
976 * never be called while in a filesystem transaction.
977 */
978 if (WARN_ON(current->flags & PF_FSTRANS))
979 goto redirty;
980
981 /* Is this page beyond the end of the file? */
982 offset = i_size_read(inode);
983 end_index = offset >> PAGE_CACHE_SHIFT;
984 last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
985 if (page->index >= end_index) {
986 unsigned offset_into_page = offset & (PAGE_CACHE_SIZE - 1);
987
988 /*
989 * Skip the page if it is fully outside i_size, e.g. due to a
990 * truncate operation that is in progress. We must redirty the
991 * page so that reclaim stops reclaiming it. Otherwise
992 * xfs_vm_releasepage() is called on it and gets confused.
993 */
994 if (page->index >= end_index + 1 || offset_into_page == 0)
995 goto redirty;
996
997 /*
998 * The page straddles i_size. It must be zeroed out on each
999 * and every writepage invocation because it may be mmapped.
1000 * "A file is mapped in multiples of the page size. For a file
1001 * that is not a multiple of the page size, the remaining
1002 * memory is zeroed when mapped, and writes to that region are
1003 * not written out to the file."
1004 */
1005 zero_user_segment(page, offset_into_page, PAGE_CACHE_SIZE);
1006 }
1007
1008 end_offset = min_t(unsigned long long,
1009 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
1010 offset);
1011 len = 1 << inode->i_blkbits;
1012
1013 bh = head = page_buffers(page);
1014 offset = page_offset(page);
1015 type = XFS_IO_OVERWRITE;
1016
1017 if (wbc->sync_mode == WB_SYNC_NONE)
1018 nonblocking = 1;
1019
1020 do {
1021 int new_ioend = 0;
1022
1023 if (offset >= end_offset)
1024 break;
1025 if (!buffer_uptodate(bh))
1026 uptodate = 0;
1027
1028 /*
1029 * set_page_dirty dirties all buffers in a page, independent
1030 * of their state. The dirty state however is entirely
1031 * meaningless for holes (!mapped && uptodate), so skip
1032 * buffers covering holes here.
1033 */
1034 if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
1035 imap_valid = 0;
1036 continue;
1037 }
1038
1039 if (buffer_unwritten(bh)) {
1040 if (type != XFS_IO_UNWRITTEN) {
1041 type = XFS_IO_UNWRITTEN;
1042 imap_valid = 0;
1043 }
1044 } else if (buffer_delay(bh)) {
1045 if (type != XFS_IO_DELALLOC) {
1046 type = XFS_IO_DELALLOC;
1047 imap_valid = 0;
1048 }
1049 } else if (buffer_uptodate(bh)) {
1050 if (type != XFS_IO_OVERWRITE) {
1051 type = XFS_IO_OVERWRITE;
1052 imap_valid = 0;
1053 }
1054 } else {
1055 if (PageUptodate(page))
1056 ASSERT(buffer_mapped(bh));
1057 /*
1058 * This buffer is not uptodate and will not be
1059 * written to disk. Ensure that we will put any
1060 * subsequent writeable buffers into a new
1061 * ioend.
1062 */
1063 imap_valid = 0;
1064 continue;
1065 }
1066
1067 if (imap_valid)
1068 imap_valid = xfs_imap_valid(inode, &imap, offset);
1069 if (!imap_valid) {
1070 /*
1071 * If we didn't have a valid mapping then we need to
1072 * put the new mapping into a separate ioend structure.
1073 * This ensures non-contiguous extents always have
1074 * separate ioends, which is particularly important
1075 * for unwritten extent conversion at I/O completion
1076 * time.
1077 */
1078 new_ioend = 1;
1079 err = xfs_map_blocks(inode, offset, &imap, type,
1080 nonblocking);
1081 if (err)
1082 goto error;
1083 imap_valid = xfs_imap_valid(inode, &imap, offset);
1084 }
1085 if (imap_valid) {
1086 lock_buffer(bh);
1087 if (type != XFS_IO_OVERWRITE)
1088 xfs_map_at_offset(inode, bh, &imap, offset);
1089 xfs_add_to_ioend(inode, bh, offset, type, &ioend,
1090 new_ioend);
1091 count++;
1092 }
1093
1094 if (!iohead)
1095 iohead = ioend;
1096
1097 } while (offset += len, ((bh = bh->b_this_page) != head));
1098
1099 if (uptodate && bh == head)
1100 SetPageUptodate(page);
1101
1102 xfs_start_page_writeback(page, 1, count);
1103
1104 /* if there is no IO to be submitted for this page, we are done */
1105 if (!ioend)
1106 return 0;
1107
1108 ASSERT(iohead);
1109
1110 /*
1111 * Any errors from this point onwards need tobe reported through the IO
1112 * completion path as we have marked the initial page as under writeback
1113 * and unlocked it.
1114 */
1115 if (imap_valid) {
1116 xfs_off_t end_index;
1117
1118 end_index = imap.br_startoff + imap.br_blockcount;
1119
1120 /* to bytes */
1121 end_index <<= inode->i_blkbits;
1122
1123 /* to pages */
1124 end_index = (end_index - 1) >> PAGE_CACHE_SHIFT;
1125
1126 /* check against file size */
1127 if (end_index > last_index)
1128 end_index = last_index;
1129
1130 xfs_cluster_write(inode, page->index + 1, &imap, &ioend,
1131 wbc, end_index);
1132 }
1133
1134
1135 /*
1136 * Reserve log space if we might write beyond the on-disk inode size.
1137 */
1138 err = 0;
1139 if (ioend->io_type != XFS_IO_UNWRITTEN && xfs_ioend_is_append(ioend))
1140 err = xfs_setfilesize_trans_alloc(ioend);
1141
1142 xfs_submit_ioend(wbc, iohead, err);
1143
1144 return 0;
1145
1146error:
1147 if (iohead)
1148 xfs_cancel_ioend(iohead);
1149
1150 if (err == -EAGAIN)
1151 goto redirty;
1152
1153 xfs_aops_discard_page(page);
1154 ClearPageUptodate(page);
1155 unlock_page(page);
1156 return err;
1157
1158redirty:
1159 redirty_page_for_writepage(wbc, page);
1160 unlock_page(page);
1161 return 0;
1162}
1163
1164STATIC int
1165xfs_vm_writepages(
1166 struct address_space *mapping,
1167 struct writeback_control *wbc)
1168{
1169 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
1170 return generic_writepages(mapping, wbc);
1171}
1172
1173/*
1174 * Called to move a page into cleanable state - and from there
1175 * to be released. The page should already be clean. We always
1176 * have buffer heads in this call.
1177 *
1178 * Returns 1 if the page is ok to release, 0 otherwise.
1179 */
1180STATIC int
1181xfs_vm_releasepage(
1182 struct page *page,
1183 gfp_t gfp_mask)
1184{
1185 int delalloc, unwritten;
1186
1187 trace_xfs_releasepage(page->mapping->host, page, 0, 0);
1188
1189 xfs_count_page_state(page, &delalloc, &unwritten);
1190
1191 if (WARN_ON(delalloc))
1192 return 0;
1193 if (WARN_ON(unwritten))
1194 return 0;
1195
1196 return try_to_free_buffers(page);
1197}
1198
1199STATIC int
1200__xfs_get_blocks(
1201 struct inode *inode,
1202 sector_t iblock,
1203 struct buffer_head *bh_result,
1204 int create,
1205 int direct)
1206{
1207 struct xfs_inode *ip = XFS_I(inode);
1208 struct xfs_mount *mp = ip->i_mount;
1209 xfs_fileoff_t offset_fsb, end_fsb;
1210 int error = 0;
1211 int lockmode = 0;
1212 struct xfs_bmbt_irec imap;
1213 int nimaps = 1;
1214 xfs_off_t offset;
1215 ssize_t size;
1216 int new = 0;
1217
1218 if (XFS_FORCED_SHUTDOWN(mp))
1219 return -XFS_ERROR(EIO);
1220
1221 offset = (xfs_off_t)iblock << inode->i_blkbits;
1222 ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
1223 size = bh_result->b_size;
1224
1225 if (!create && direct && offset >= i_size_read(inode))
1226 return 0;
1227
1228 /*
1229 * Direct I/O is usually done on preallocated files, so try getting
1230 * a block mapping without an exclusive lock first. For buffered
1231 * writes we already have the exclusive iolock anyway, so avoiding
1232 * a lock roundtrip here by taking the ilock exclusive from the
1233 * beginning is a useful micro optimization.
1234 */
1235 if (create && !direct) {
1236 lockmode = XFS_ILOCK_EXCL;
1237 xfs_ilock(ip, lockmode);
1238 } else {
1239 lockmode = xfs_ilock_data_map_shared(ip);
1240 }
1241
1242 ASSERT(offset <= mp->m_super->s_maxbytes);
1243 if (offset + size > mp->m_super->s_maxbytes)
1244 size = mp->m_super->s_maxbytes - offset;
1245 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
1246 offset_fsb = XFS_B_TO_FSBT(mp, offset);
1247
1248 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
1249 &imap, &nimaps, XFS_BMAPI_ENTIRE);
1250 if (error)
1251 goto out_unlock;
1252
1253 if (create &&
1254 (!nimaps ||
1255 (imap.br_startblock == HOLESTARTBLOCK ||
1256 imap.br_startblock == DELAYSTARTBLOCK))) {
1257 if (direct || xfs_get_extsz_hint(ip)) {
1258 /*
1259 * Drop the ilock in preparation for starting the block
1260 * allocation transaction. It will be retaken
1261 * exclusively inside xfs_iomap_write_direct for the
1262 * actual allocation.
1263 */
1264 xfs_iunlock(ip, lockmode);
1265 error = xfs_iomap_write_direct(ip, offset, size,
1266 &imap, nimaps);
1267 if (error)
1268 return -error;
1269 new = 1;
1270 } else {
1271 /*
1272 * Delalloc reservations do not require a transaction,
1273 * we can go on without dropping the lock here. If we
1274 * are allocating a new delalloc block, make sure that
1275 * we set the new flag so that we mark the buffer new so
1276 * that we know that it is newly allocated if the write
1277 * fails.
1278 */
1279 if (nimaps && imap.br_startblock == HOLESTARTBLOCK)
1280 new = 1;
1281 error = xfs_iomap_write_delay(ip, offset, size, &imap);
1282 if (error)
1283 goto out_unlock;
1284
1285 xfs_iunlock(ip, lockmode);
1286 }
1287
1288 trace_xfs_get_blocks_alloc(ip, offset, size, 0, &imap);
1289 } else if (nimaps) {
1290 trace_xfs_get_blocks_found(ip, offset, size, 0, &imap);
1291 xfs_iunlock(ip, lockmode);
1292 } else {
1293 trace_xfs_get_blocks_notfound(ip, offset, size);
1294 goto out_unlock;
1295 }
1296
1297 if (imap.br_startblock != HOLESTARTBLOCK &&
1298 imap.br_startblock != DELAYSTARTBLOCK) {
1299 /*
1300 * For unwritten extents do not report a disk address on
1301 * the read case (treat as if we're reading into a hole).
1302 */
1303 if (create || !ISUNWRITTEN(&imap))
1304 xfs_map_buffer(inode, bh_result, &imap, offset);
1305 if (create && ISUNWRITTEN(&imap)) {
1306 if (direct) {
1307 bh_result->b_private = inode;
1308 set_buffer_defer_completion(bh_result);
1309 }
1310 set_buffer_unwritten(bh_result);
1311 }
1312 }
1313
1314 /*
1315 * If this is a realtime file, data may be on a different device.
1316 * to that pointed to from the buffer_head b_bdev currently.
1317 */
1318 bh_result->b_bdev = xfs_find_bdev_for_inode(inode);
1319
1320 /*
1321 * If we previously allocated a block out beyond eof and we are now
1322 * coming back to use it then we will need to flag it as new even if it
1323 * has a disk address.
1324 *
1325 * With sub-block writes into unwritten extents we also need to mark
1326 * the buffer as new so that the unwritten parts of the buffer gets
1327 * correctly zeroed.
1328 */
1329 if (create &&
1330 ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
1331 (offset >= i_size_read(inode)) ||
1332 (new || ISUNWRITTEN(&imap))))
1333 set_buffer_new(bh_result);
1334
1335 if (imap.br_startblock == DELAYSTARTBLOCK) {
1336 BUG_ON(direct);
1337 if (create) {
1338 set_buffer_uptodate(bh_result);
1339 set_buffer_mapped(bh_result);
1340 set_buffer_delay(bh_result);
1341 }
1342 }
1343
1344 /*
1345 * If this is O_DIRECT or the mpage code calling tell them how large
1346 * the mapping is, so that we can avoid repeated get_blocks calls.
1347 *
1348 * If the mapping spans EOF, then we have to break the mapping up as the
1349 * mapping for blocks beyond EOF must be marked new so that sub block
1350 * regions can be correctly zeroed. We can't do this for mappings within
1351 * EOF unless the mapping was just allocated or is unwritten, otherwise
1352 * the callers would overwrite existing data with zeros. Hence we have
1353 * to split the mapping into a range up to and including EOF, and a
1354 * second mapping for beyond EOF.
1355 */
1356 if (direct || size > (1 << inode->i_blkbits)) {
1357 xfs_off_t mapping_size;
1358
1359 mapping_size = imap.br_startoff + imap.br_blockcount - iblock;
1360 mapping_size <<= inode->i_blkbits;
1361
1362 ASSERT(mapping_size > 0);
1363 if (mapping_size > size)
1364 mapping_size = size;
1365 if (offset < i_size_read(inode) &&
1366 offset + mapping_size >= i_size_read(inode)) {
1367 /* limit mapping to block that spans EOF */
1368 mapping_size = roundup_64(i_size_read(inode) - offset,
1369 1 << inode->i_blkbits);
1370 }
1371 if (mapping_size > LONG_MAX)
1372 mapping_size = LONG_MAX;
1373
1374 bh_result->b_size = mapping_size;
1375 }
1376
1377 return 0;
1378
1379out_unlock:
1380 xfs_iunlock(ip, lockmode);
1381 return -error;
1382}
1383
1384int
1385xfs_get_blocks(
1386 struct inode *inode,
1387 sector_t iblock,
1388 struct buffer_head *bh_result,
1389 int create)
1390{
1391 return __xfs_get_blocks(inode, iblock, bh_result, create, 0);
1392}
1393
1394STATIC int
1395xfs_get_blocks_direct(
1396 struct inode *inode,
1397 sector_t iblock,
1398 struct buffer_head *bh_result,
1399 int create)
1400{
1401 return __xfs_get_blocks(inode, iblock, bh_result, create, 1);
1402}
1403
1404/*
1405 * Complete a direct I/O write request.
1406 *
1407 * If the private argument is non-NULL __xfs_get_blocks signals us that we
1408 * need to issue a transaction to convert the range from unwritten to written
1409 * extents. In case this is regular synchronous I/O we just call xfs_end_io
1410 * to do this and we are done. But in case this was a successful AIO
1411 * request this handler is called from interrupt context, from which we
1412 * can't start transactions. In that case offload the I/O completion to
1413 * the workqueues we also use for buffered I/O completion.
1414 */
1415STATIC void
1416xfs_end_io_direct_write(
1417 struct kiocb *iocb,
1418 loff_t offset,
1419 ssize_t size,
1420 void *private)
1421{
1422 struct xfs_ioend *ioend = iocb->private;
1423
1424 /*
1425 * While the generic direct I/O code updates the inode size, it does
1426 * so only after the end_io handler is called, which means our
1427 * end_io handler thinks the on-disk size is outside the in-core
1428 * size. To prevent this just update it a little bit earlier here.
1429 */
1430 if (offset + size > i_size_read(ioend->io_inode))
1431 i_size_write(ioend->io_inode, offset + size);
1432
1433 /*
1434 * blockdev_direct_IO can return an error even after the I/O
1435 * completion handler was called. Thus we need to protect
1436 * against double-freeing.
1437 */
1438 iocb->private = NULL;
1439
1440 ioend->io_offset = offset;
1441 ioend->io_size = size;
1442 if (private && size > 0)
1443 ioend->io_type = XFS_IO_UNWRITTEN;
1444
1445 xfs_finish_ioend_sync(ioend);
1446}
1447
1448STATIC ssize_t
1449xfs_vm_direct_IO(
1450 int rw,
1451 struct kiocb *iocb,
1452 const struct iovec *iov,
1453 loff_t offset,
1454 unsigned long nr_segs)
1455{
1456 struct inode *inode = iocb->ki_filp->f_mapping->host;
1457 struct block_device *bdev = xfs_find_bdev_for_inode(inode);
1458 struct xfs_ioend *ioend = NULL;
1459 ssize_t ret;
1460
1461 if (rw & WRITE) {
1462 size_t size = iov_length(iov, nr_segs);
1463
1464 /*
1465 * We cannot preallocate a size update transaction here as we
1466 * don't know whether allocation is necessary or not. Hence we
1467 * can only tell IO completion that one is necessary if we are
1468 * not doing unwritten extent conversion.
1469 */
1470 iocb->private = ioend = xfs_alloc_ioend(inode, XFS_IO_DIRECT);
1471 if (offset + size > XFS_I(inode)->i_d.di_size)
1472 ioend->io_isdirect = 1;
1473
1474 ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov,
1475 offset, nr_segs,
1476 xfs_get_blocks_direct,
1477 xfs_end_io_direct_write, NULL,
1478 DIO_ASYNC_EXTEND);
1479 if (ret != -EIOCBQUEUED && iocb->private)
1480 goto out_destroy_ioend;
1481 } else {
1482 ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov,
1483 offset, nr_segs,
1484 xfs_get_blocks_direct,
1485 NULL, NULL, 0);
1486 }
1487
1488 return ret;
1489
1490out_destroy_ioend:
1491 xfs_destroy_ioend(ioend);
1492 return ret;
1493}
1494
1495/*
1496 * Punch out the delalloc blocks we have already allocated.
1497 *
1498 * Don't bother with xfs_setattr given that nothing can have made it to disk yet
1499 * as the page is still locked at this point.
1500 */
1501STATIC void
1502xfs_vm_kill_delalloc_range(
1503 struct inode *inode,
1504 loff_t start,
1505 loff_t end)
1506{
1507 struct xfs_inode *ip = XFS_I(inode);
1508 xfs_fileoff_t start_fsb;
1509 xfs_fileoff_t end_fsb;
1510 int error;
1511
1512 start_fsb = XFS_B_TO_FSB(ip->i_mount, start);
1513 end_fsb = XFS_B_TO_FSB(ip->i_mount, end);
1514 if (end_fsb <= start_fsb)
1515 return;
1516
1517 xfs_ilock(ip, XFS_ILOCK_EXCL);
1518 error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
1519 end_fsb - start_fsb);
1520 if (error) {
1521 /* something screwed, just bail */
1522 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
1523 xfs_alert(ip->i_mount,
1524 "xfs_vm_write_failed: unable to clean up ino %lld",
1525 ip->i_ino);
1526 }
1527 }
1528 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1529}
1530
1531STATIC void
1532xfs_vm_write_failed(
1533 struct inode *inode,
1534 struct page *page,
1535 loff_t pos,
1536 unsigned len)
1537{
1538 loff_t block_offset;
1539 loff_t block_start;
1540 loff_t block_end;
1541 loff_t from = pos & (PAGE_CACHE_SIZE - 1);
1542 loff_t to = from + len;
1543 struct buffer_head *bh, *head;
1544
1545 /*
1546 * The request pos offset might be 32 or 64 bit, this is all fine
1547 * on 64-bit platform. However, for 64-bit pos request on 32-bit
1548 * platform, the high 32-bit will be masked off if we evaluate the
1549 * block_offset via (pos & PAGE_MASK) because the PAGE_MASK is
1550 * 0xfffff000 as an unsigned long, hence the result is incorrect
1551 * which could cause the following ASSERT failed in most cases.
1552 * In order to avoid this, we can evaluate the block_offset of the
1553 * start of the page by using shifts rather than masks the mismatch
1554 * problem.
1555 */
1556 block_offset = (pos >> PAGE_CACHE_SHIFT) << PAGE_CACHE_SHIFT;
1557
1558 ASSERT(block_offset + from == pos);
1559
1560 head = page_buffers(page);
1561 block_start = 0;
1562 for (bh = head; bh != head || !block_start;
1563 bh = bh->b_this_page, block_start = block_end,
1564 block_offset += bh->b_size) {
1565 block_end = block_start + bh->b_size;
1566
1567 /* skip buffers before the write */
1568 if (block_end <= from)
1569 continue;
1570
1571 /* if the buffer is after the write, we're done */
1572 if (block_start >= to)
1573 break;
1574
1575 if (!buffer_delay(bh))
1576 continue;
1577
1578 if (!buffer_new(bh) && block_offset < i_size_read(inode))
1579 continue;
1580
1581 xfs_vm_kill_delalloc_range(inode, block_offset,
1582 block_offset + bh->b_size);
1583
1584 /*
1585 * This buffer does not contain data anymore. make sure anyone
1586 * who finds it knows that for certain.
1587 */
1588 clear_buffer_delay(bh);
1589 clear_buffer_uptodate(bh);
1590 clear_buffer_mapped(bh);
1591 clear_buffer_new(bh);
1592 clear_buffer_dirty(bh);
1593 }
1594
1595}
1596
1597/*
1598 * This used to call block_write_begin(), but it unlocks and releases the page
1599 * on error, and we need that page to be able to punch stale delalloc blocks out
1600 * on failure. hence we copy-n-waste it here and call xfs_vm_write_failed() at
1601 * the appropriate point.
1602 */
1603STATIC int
1604xfs_vm_write_begin(
1605 struct file *file,
1606 struct address_space *mapping,
1607 loff_t pos,
1608 unsigned len,
1609 unsigned flags,
1610 struct page **pagep,
1611 void **fsdata)
1612{
1613 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1614 struct page *page;
1615 int status;
1616
1617 ASSERT(len <= PAGE_CACHE_SIZE);
1618
1619 page = grab_cache_page_write_begin(mapping, index, flags);
1620 if (!page)
1621 return -ENOMEM;
1622
1623 status = __block_write_begin(page, pos, len, xfs_get_blocks);
1624 if (unlikely(status)) {
1625 struct inode *inode = mapping->host;
1626 size_t isize = i_size_read(inode);
1627
1628 xfs_vm_write_failed(inode, page, pos, len);
1629 unlock_page(page);
1630
1631 /*
1632 * If the write is beyond EOF, we only want to kill blocks
1633 * allocated in this write, not blocks that were previously
1634 * written successfully.
1635 */
1636 if (pos + len > isize) {
1637 ssize_t start = max_t(ssize_t, pos, isize);
1638
1639 truncate_pagecache_range(inode, start, pos + len);
1640 }
1641
1642 page_cache_release(page);
1643 page = NULL;
1644 }
1645
1646 *pagep = page;
1647 return status;
1648}
1649
1650/*
1651 * On failure, we only need to kill delalloc blocks beyond EOF in the range of
1652 * this specific write because they will never be written. Previous writes
1653 * beyond EOF where block allocation succeeded do not need to be trashed, so
1654 * only new blocks from this write should be trashed. For blocks within
1655 * EOF, generic_write_end() zeros them so they are safe to leave alone and be
1656 * written with all the other valid data.
1657 */
1658STATIC int
1659xfs_vm_write_end(
1660 struct file *file,
1661 struct address_space *mapping,
1662 loff_t pos,
1663 unsigned len,
1664 unsigned copied,
1665 struct page *page,
1666 void *fsdata)
1667{
1668 int ret;
1669
1670 ASSERT(len <= PAGE_CACHE_SIZE);
1671
1672 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
1673 if (unlikely(ret < len)) {
1674 struct inode *inode = mapping->host;
1675 size_t isize = i_size_read(inode);
1676 loff_t to = pos + len;
1677
1678 if (to > isize) {
1679 /* only kill blocks in this write beyond EOF */
1680 if (pos > isize)
1681 isize = pos;
1682 xfs_vm_kill_delalloc_range(inode, isize, to);
1683 truncate_pagecache_range(inode, isize, to);
1684 }
1685 }
1686 return ret;
1687}
1688
1689STATIC sector_t
1690xfs_vm_bmap(
1691 struct address_space *mapping,
1692 sector_t block)
1693{
1694 struct inode *inode = (struct inode *)mapping->host;
1695 struct xfs_inode *ip = XFS_I(inode);
1696
1697 trace_xfs_vm_bmap(XFS_I(inode));
1698 xfs_ilock(ip, XFS_IOLOCK_SHARED);
1699 filemap_write_and_wait(mapping);
1700 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
1701 return generic_block_bmap(mapping, block, xfs_get_blocks);
1702}
1703
1704STATIC int
1705xfs_vm_readpage(
1706 struct file *unused,
1707 struct page *page)
1708{
1709 return mpage_readpage(page, xfs_get_blocks);
1710}
1711
1712STATIC int
1713xfs_vm_readpages(
1714 struct file *unused,
1715 struct address_space *mapping,
1716 struct list_head *pages,
1717 unsigned nr_pages)
1718{
1719 return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
1720}
1721
1722const struct address_space_operations xfs_address_space_operations = {
1723 .readpage = xfs_vm_readpage,
1724 .readpages = xfs_vm_readpages,
1725 .writepage = xfs_vm_writepage,
1726 .writepages = xfs_vm_writepages,
1727 .releasepage = xfs_vm_releasepage,
1728 .invalidatepage = xfs_vm_invalidatepage,
1729 .write_begin = xfs_vm_write_begin,
1730 .write_end = xfs_vm_write_end,
1731 .bmap = xfs_vm_bmap,
1732 .direct_IO = xfs_vm_direct_IO,
1733 .migratepage = buffer_migrate_page,
1734 .is_partially_uptodate = block_is_partially_uptodate,
1735 .error_remove_page = generic_error_remove_page,
1736};