Loading...
1/*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_log.h"
21#include "xfs_trans.h"
22#include "xfs_sb.h"
23#include "xfs_ag.h"
24#include "xfs_alloc.h"
25#include "xfs_quota.h"
26#include "xfs_mount.h"
27#include "xfs_bmap_btree.h"
28#include "xfs_alloc_btree.h"
29#include "xfs_ialloc_btree.h"
30#include "xfs_dinode.h"
31#include "xfs_inode.h"
32#include "xfs_inode_item.h"
33#include "xfs_btree.h"
34#include "xfs_bmap.h"
35#include "xfs_rtalloc.h"
36#include "xfs_error.h"
37#include "xfs_itable.h"
38#include "xfs_attr.h"
39#include "xfs_buf_item.h"
40#include "xfs_trans_space.h"
41#include "xfs_utils.h"
42#include "xfs_iomap.h"
43#include "xfs_trace.h"
44
45
46#define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \
47 << mp->m_writeio_log)
48#define XFS_WRITE_IMAPS XFS_BMAP_MAX_NMAP
49
50STATIC int
51xfs_iomap_eof_align_last_fsb(
52 xfs_mount_t *mp,
53 xfs_inode_t *ip,
54 xfs_extlen_t extsize,
55 xfs_fileoff_t *last_fsb)
56{
57 xfs_fileoff_t new_last_fsb = 0;
58 xfs_extlen_t align = 0;
59 int eof, error;
60
61 if (!XFS_IS_REALTIME_INODE(ip)) {
62 /*
63 * Round up the allocation request to a stripe unit
64 * (m_dalign) boundary if the file size is >= stripe unit
65 * size, and we are allocating past the allocation eof.
66 *
67 * If mounted with the "-o swalloc" option the alignment is
68 * increased from the strip unit size to the stripe width.
69 */
70 if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC))
71 align = mp->m_swidth;
72 else if (mp->m_dalign)
73 align = mp->m_dalign;
74
75 if (align && XFS_ISIZE(ip) >= XFS_FSB_TO_B(mp, align))
76 new_last_fsb = roundup_64(*last_fsb, align);
77 }
78
79 /*
80 * Always round up the allocation request to an extent boundary
81 * (when file on a real-time subvolume or has di_extsize hint).
82 */
83 if (extsize) {
84 if (new_last_fsb)
85 align = roundup_64(new_last_fsb, extsize);
86 else
87 align = extsize;
88 new_last_fsb = roundup_64(*last_fsb, align);
89 }
90
91 if (new_last_fsb) {
92 error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof);
93 if (error)
94 return error;
95 if (eof)
96 *last_fsb = new_last_fsb;
97 }
98 return 0;
99}
100
101STATIC int
102xfs_alert_fsblock_zero(
103 xfs_inode_t *ip,
104 xfs_bmbt_irec_t *imap)
105{
106 xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO,
107 "Access to block zero in inode %llu "
108 "start_block: %llx start_off: %llx "
109 "blkcnt: %llx extent-state: %x\n",
110 (unsigned long long)ip->i_ino,
111 (unsigned long long)imap->br_startblock,
112 (unsigned long long)imap->br_startoff,
113 (unsigned long long)imap->br_blockcount,
114 imap->br_state);
115 return EFSCORRUPTED;
116}
117
118int
119xfs_iomap_write_direct(
120 xfs_inode_t *ip,
121 xfs_off_t offset,
122 size_t count,
123 xfs_bmbt_irec_t *imap,
124 int nmaps)
125{
126 xfs_mount_t *mp = ip->i_mount;
127 xfs_fileoff_t offset_fsb;
128 xfs_fileoff_t last_fsb;
129 xfs_filblks_t count_fsb, resaligned;
130 xfs_fsblock_t firstfsb;
131 xfs_extlen_t extsz, temp;
132 int nimaps;
133 int bmapi_flag;
134 int quota_flag;
135 int rt;
136 xfs_trans_t *tp;
137 xfs_bmap_free_t free_list;
138 uint qblocks, resblks, resrtextents;
139 int committed;
140 int error;
141
142 error = xfs_qm_dqattach(ip, 0);
143 if (error)
144 return XFS_ERROR(error);
145
146 rt = XFS_IS_REALTIME_INODE(ip);
147 extsz = xfs_get_extsz_hint(ip);
148
149 offset_fsb = XFS_B_TO_FSBT(mp, offset);
150 last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
151 if ((offset + count) > XFS_ISIZE(ip)) {
152 error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb);
153 if (error)
154 return XFS_ERROR(error);
155 } else {
156 if (nmaps && (imap->br_startblock == HOLESTARTBLOCK))
157 last_fsb = MIN(last_fsb, (xfs_fileoff_t)
158 imap->br_blockcount +
159 imap->br_startoff);
160 }
161 count_fsb = last_fsb - offset_fsb;
162 ASSERT(count_fsb > 0);
163
164 resaligned = count_fsb;
165 if (unlikely(extsz)) {
166 if ((temp = do_mod(offset_fsb, extsz)))
167 resaligned += temp;
168 if ((temp = do_mod(resaligned, extsz)))
169 resaligned += extsz - temp;
170 }
171
172 if (unlikely(rt)) {
173 resrtextents = qblocks = resaligned;
174 resrtextents /= mp->m_sb.sb_rextsize;
175 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
176 quota_flag = XFS_QMOPT_RES_RTBLKS;
177 } else {
178 resrtextents = 0;
179 resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
180 quota_flag = XFS_QMOPT_RES_REGBLKS;
181 }
182
183 /*
184 * Allocate and setup the transaction
185 */
186 tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
187 error = xfs_trans_reserve(tp, resblks,
188 XFS_WRITE_LOG_RES(mp), resrtextents,
189 XFS_TRANS_PERM_LOG_RES,
190 XFS_WRITE_LOG_COUNT);
191 /*
192 * Check for running out of space, note: need lock to return
193 */
194 if (error) {
195 xfs_trans_cancel(tp, 0);
196 return XFS_ERROR(error);
197 }
198
199 xfs_ilock(ip, XFS_ILOCK_EXCL);
200
201 error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag);
202 if (error)
203 goto out_trans_cancel;
204
205 xfs_trans_ijoin(tp, ip, 0);
206
207 bmapi_flag = 0;
208 if (offset < XFS_ISIZE(ip) || extsz)
209 bmapi_flag |= XFS_BMAPI_PREALLOC;
210
211 /*
212 * From this point onwards we overwrite the imap pointer that the
213 * caller gave to us.
214 */
215 xfs_bmap_init(&free_list, &firstfsb);
216 nimaps = 1;
217 error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb, bmapi_flag,
218 &firstfsb, 0, imap, &nimaps, &free_list);
219 if (error)
220 goto out_bmap_cancel;
221
222 /*
223 * Complete the transaction
224 */
225 error = xfs_bmap_finish(&tp, &free_list, &committed);
226 if (error)
227 goto out_bmap_cancel;
228 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
229 if (error)
230 goto out_unlock;
231
232 /*
233 * Copy any maps to caller's array and return any error.
234 */
235 if (nimaps == 0) {
236 error = XFS_ERROR(ENOSPC);
237 goto out_unlock;
238 }
239
240 if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip)))
241 error = xfs_alert_fsblock_zero(ip, imap);
242
243out_unlock:
244 xfs_iunlock(ip, XFS_ILOCK_EXCL);
245 return error;
246
247out_bmap_cancel:
248 xfs_bmap_cancel(&free_list);
249 xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
250out_trans_cancel:
251 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
252 goto out_unlock;
253}
254
255/*
256 * If the caller is doing a write at the end of the file, then extend the
257 * allocation out to the file system's write iosize. We clean up any extra
258 * space left over when the file is closed in xfs_inactive().
259 *
260 * If we find we already have delalloc preallocation beyond EOF, don't do more
261 * preallocation as it it not needed.
262 */
263STATIC int
264xfs_iomap_eof_want_preallocate(
265 xfs_mount_t *mp,
266 xfs_inode_t *ip,
267 xfs_off_t offset,
268 size_t count,
269 xfs_bmbt_irec_t *imap,
270 int nimaps,
271 int *prealloc)
272{
273 xfs_fileoff_t start_fsb;
274 xfs_filblks_t count_fsb;
275 xfs_fsblock_t firstblock;
276 int n, error, imaps;
277 int found_delalloc = 0;
278
279 *prealloc = 0;
280 if (offset + count <= XFS_ISIZE(ip))
281 return 0;
282
283 /*
284 * If there are any real blocks past eof, then don't
285 * do any speculative allocation.
286 */
287 start_fsb = XFS_B_TO_FSBT(mp, ((xfs_ufsize_t)(offset + count - 1)));
288 count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));
289 while (count_fsb > 0) {
290 imaps = nimaps;
291 firstblock = NULLFSBLOCK;
292 error = xfs_bmapi_read(ip, start_fsb, count_fsb, imap, &imaps,
293 0);
294 if (error)
295 return error;
296 for (n = 0; n < imaps; n++) {
297 if ((imap[n].br_startblock != HOLESTARTBLOCK) &&
298 (imap[n].br_startblock != DELAYSTARTBLOCK))
299 return 0;
300 start_fsb += imap[n].br_blockcount;
301 count_fsb -= imap[n].br_blockcount;
302
303 if (imap[n].br_startblock == DELAYSTARTBLOCK)
304 found_delalloc = 1;
305 }
306 }
307 if (!found_delalloc)
308 *prealloc = 1;
309 return 0;
310}
311
312/*
313 * If we don't have a user specified preallocation size, dynamically increase
314 * the preallocation size as the size of the file grows. Cap the maximum size
315 * at a single extent or less if the filesystem is near full. The closer the
316 * filesystem is to full, the smaller the maximum prealocation.
317 */
318STATIC xfs_fsblock_t
319xfs_iomap_prealloc_size(
320 struct xfs_mount *mp,
321 struct xfs_inode *ip)
322{
323 xfs_fsblock_t alloc_blocks = 0;
324
325 if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)) {
326 int shift = 0;
327 int64_t freesp;
328
329 /*
330 * rounddown_pow_of_two() returns an undefined result
331 * if we pass in alloc_blocks = 0. Hence the "+ 1" to
332 * ensure we always pass in a non-zero value.
333 */
334 alloc_blocks = XFS_B_TO_FSB(mp, XFS_ISIZE(ip)) + 1;
335 alloc_blocks = XFS_FILEOFF_MIN(MAXEXTLEN,
336 rounddown_pow_of_two(alloc_blocks));
337
338 xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT);
339 freesp = mp->m_sb.sb_fdblocks;
340 if (freesp < mp->m_low_space[XFS_LOWSP_5_PCNT]) {
341 shift = 2;
342 if (freesp < mp->m_low_space[XFS_LOWSP_4_PCNT])
343 shift++;
344 if (freesp < mp->m_low_space[XFS_LOWSP_3_PCNT])
345 shift++;
346 if (freesp < mp->m_low_space[XFS_LOWSP_2_PCNT])
347 shift++;
348 if (freesp < mp->m_low_space[XFS_LOWSP_1_PCNT])
349 shift++;
350 }
351 if (shift)
352 alloc_blocks >>= shift;
353 }
354
355 if (alloc_blocks < mp->m_writeio_blocks)
356 alloc_blocks = mp->m_writeio_blocks;
357
358 return alloc_blocks;
359}
360
361int
362xfs_iomap_write_delay(
363 xfs_inode_t *ip,
364 xfs_off_t offset,
365 size_t count,
366 xfs_bmbt_irec_t *ret_imap)
367{
368 xfs_mount_t *mp = ip->i_mount;
369 xfs_fileoff_t offset_fsb;
370 xfs_fileoff_t last_fsb;
371 xfs_off_t aligned_offset;
372 xfs_fileoff_t ioalign;
373 xfs_extlen_t extsz;
374 int nimaps;
375 xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS];
376 int prealloc, flushed = 0;
377 int error;
378
379 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
380
381 /*
382 * Make sure that the dquots are there. This doesn't hold
383 * the ilock across a disk read.
384 */
385 error = xfs_qm_dqattach_locked(ip, 0);
386 if (error)
387 return XFS_ERROR(error);
388
389 extsz = xfs_get_extsz_hint(ip);
390 offset_fsb = XFS_B_TO_FSBT(mp, offset);
391
392
393 error = xfs_iomap_eof_want_preallocate(mp, ip, offset, count,
394 imap, XFS_WRITE_IMAPS, &prealloc);
395 if (error)
396 return error;
397
398retry:
399 if (prealloc) {
400 xfs_fsblock_t alloc_blocks = xfs_iomap_prealloc_size(mp, ip);
401
402 aligned_offset = XFS_WRITEIO_ALIGN(mp, (offset + count - 1));
403 ioalign = XFS_B_TO_FSBT(mp, aligned_offset);
404 last_fsb = ioalign + alloc_blocks;
405 } else {
406 last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
407 }
408
409 if (prealloc || extsz) {
410 error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb);
411 if (error)
412 return error;
413 }
414
415 /*
416 * Make sure preallocation does not create extents beyond the range we
417 * actually support in this filesystem.
418 */
419 if (last_fsb > XFS_B_TO_FSB(mp, mp->m_maxioffset))
420 last_fsb = XFS_B_TO_FSB(mp, mp->m_maxioffset);
421
422 ASSERT(last_fsb > offset_fsb);
423
424 nimaps = XFS_WRITE_IMAPS;
425 error = xfs_bmapi_delay(ip, offset_fsb, last_fsb - offset_fsb,
426 imap, &nimaps, XFS_BMAPI_ENTIRE);
427 switch (error) {
428 case 0:
429 case ENOSPC:
430 case EDQUOT:
431 break;
432 default:
433 return XFS_ERROR(error);
434 }
435
436 /*
437 * If bmapi returned us nothing, we got either ENOSPC or EDQUOT. For
438 * ENOSPC, * flush all other inodes with delalloc blocks to free up
439 * some of the excess reserved metadata space. For both cases, retry
440 * without EOF preallocation.
441 */
442 if (nimaps == 0) {
443 trace_xfs_delalloc_enospc(ip, offset, count);
444 if (flushed)
445 return XFS_ERROR(error ? error : ENOSPC);
446
447 if (error == ENOSPC) {
448 xfs_iunlock(ip, XFS_ILOCK_EXCL);
449 xfs_flush_inodes(ip);
450 xfs_ilock(ip, XFS_ILOCK_EXCL);
451 }
452
453 flushed = 1;
454 error = 0;
455 prealloc = 0;
456 goto retry;
457 }
458
459 if (!(imap[0].br_startblock || XFS_IS_REALTIME_INODE(ip)))
460 return xfs_alert_fsblock_zero(ip, &imap[0]);
461
462 *ret_imap = imap[0];
463 return 0;
464}
465
466/*
467 * Pass in a delayed allocate extent, convert it to real extents;
468 * return to the caller the extent we create which maps on top of
469 * the originating callers request.
470 *
471 * Called without a lock on the inode.
472 *
473 * We no longer bother to look at the incoming map - all we have to
474 * guarantee is that whatever we allocate fills the required range.
475 */
476int
477xfs_iomap_write_allocate(
478 xfs_inode_t *ip,
479 xfs_off_t offset,
480 size_t count,
481 xfs_bmbt_irec_t *imap)
482{
483 xfs_mount_t *mp = ip->i_mount;
484 xfs_fileoff_t offset_fsb, last_block;
485 xfs_fileoff_t end_fsb, map_start_fsb;
486 xfs_fsblock_t first_block;
487 xfs_bmap_free_t free_list;
488 xfs_filblks_t count_fsb;
489 xfs_trans_t *tp;
490 int nimaps, committed;
491 int error = 0;
492 int nres;
493
494 /*
495 * Make sure that the dquots are there.
496 */
497 error = xfs_qm_dqattach(ip, 0);
498 if (error)
499 return XFS_ERROR(error);
500
501 offset_fsb = XFS_B_TO_FSBT(mp, offset);
502 count_fsb = imap->br_blockcount;
503 map_start_fsb = imap->br_startoff;
504
505 XFS_STATS_ADD(xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb));
506
507 while (count_fsb != 0) {
508 /*
509 * Set up a transaction with which to allocate the
510 * backing store for the file. Do allocations in a
511 * loop until we get some space in the range we are
512 * interested in. The other space that might be allocated
513 * is in the delayed allocation extent on which we sit
514 * but before our buffer starts.
515 */
516
517 nimaps = 0;
518 while (nimaps == 0) {
519 tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE);
520 tp->t_flags |= XFS_TRANS_RESERVE;
521 nres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
522 error = xfs_trans_reserve(tp, nres,
523 XFS_WRITE_LOG_RES(mp),
524 0, XFS_TRANS_PERM_LOG_RES,
525 XFS_WRITE_LOG_COUNT);
526 if (error) {
527 xfs_trans_cancel(tp, 0);
528 return XFS_ERROR(error);
529 }
530 xfs_ilock(ip, XFS_ILOCK_EXCL);
531 xfs_trans_ijoin(tp, ip, 0);
532
533 xfs_bmap_init(&free_list, &first_block);
534
535 /*
536 * it is possible that the extents have changed since
537 * we did the read call as we dropped the ilock for a
538 * while. We have to be careful about truncates or hole
539 * punchs here - we are not allowed to allocate
540 * non-delalloc blocks here.
541 *
542 * The only protection against truncation is the pages
543 * for the range we are being asked to convert are
544 * locked and hence a truncate will block on them
545 * first.
546 *
547 * As a result, if we go beyond the range we really
548 * need and hit an delalloc extent boundary followed by
549 * a hole while we have excess blocks in the map, we
550 * will fill the hole incorrectly and overrun the
551 * transaction reservation.
552 *
553 * Using a single map prevents this as we are forced to
554 * check each map we look for overlap with the desired
555 * range and abort as soon as we find it. Also, given
556 * that we only return a single map, having one beyond
557 * what we can return is probably a bit silly.
558 *
559 * We also need to check that we don't go beyond EOF;
560 * this is a truncate optimisation as a truncate sets
561 * the new file size before block on the pages we
562 * currently have locked under writeback. Because they
563 * are about to be tossed, we don't need to write them
564 * back....
565 */
566 nimaps = 1;
567 end_fsb = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
568 error = xfs_bmap_last_offset(NULL, ip, &last_block,
569 XFS_DATA_FORK);
570 if (error)
571 goto trans_cancel;
572
573 last_block = XFS_FILEOFF_MAX(last_block, end_fsb);
574 if ((map_start_fsb + count_fsb) > last_block) {
575 count_fsb = last_block - map_start_fsb;
576 if (count_fsb == 0) {
577 error = EAGAIN;
578 goto trans_cancel;
579 }
580 }
581
582 /*
583 * From this point onwards we overwrite the imap
584 * pointer that the caller gave to us.
585 */
586 error = xfs_bmapi_write(tp, ip, map_start_fsb,
587 count_fsb, 0, &first_block, 1,
588 imap, &nimaps, &free_list);
589 if (error)
590 goto trans_cancel;
591
592 error = xfs_bmap_finish(&tp, &free_list, &committed);
593 if (error)
594 goto trans_cancel;
595
596 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
597 if (error)
598 goto error0;
599
600 xfs_iunlock(ip, XFS_ILOCK_EXCL);
601 }
602
603 /*
604 * See if we were able to allocate an extent that
605 * covers at least part of the callers request
606 */
607 if (!(imap->br_startblock || XFS_IS_REALTIME_INODE(ip)))
608 return xfs_alert_fsblock_zero(ip, imap);
609
610 if ((offset_fsb >= imap->br_startoff) &&
611 (offset_fsb < (imap->br_startoff +
612 imap->br_blockcount))) {
613 XFS_STATS_INC(xs_xstrat_quick);
614 return 0;
615 }
616
617 /*
618 * So far we have not mapped the requested part of the
619 * file, just surrounding data, try again.
620 */
621 count_fsb -= imap->br_blockcount;
622 map_start_fsb = imap->br_startoff + imap->br_blockcount;
623 }
624
625trans_cancel:
626 xfs_bmap_cancel(&free_list);
627 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
628error0:
629 xfs_iunlock(ip, XFS_ILOCK_EXCL);
630 return XFS_ERROR(error);
631}
632
633int
634xfs_iomap_write_unwritten(
635 xfs_inode_t *ip,
636 xfs_off_t offset,
637 size_t count)
638{
639 xfs_mount_t *mp = ip->i_mount;
640 xfs_fileoff_t offset_fsb;
641 xfs_filblks_t count_fsb;
642 xfs_filblks_t numblks_fsb;
643 xfs_fsblock_t firstfsb;
644 int nimaps;
645 xfs_trans_t *tp;
646 xfs_bmbt_irec_t imap;
647 xfs_bmap_free_t free_list;
648 xfs_fsize_t i_size;
649 uint resblks;
650 int committed;
651 int error;
652
653 trace_xfs_unwritten_convert(ip, offset, count);
654
655 offset_fsb = XFS_B_TO_FSBT(mp, offset);
656 count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
657 count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb);
658
659 /*
660 * Reserve enough blocks in this transaction for two complete extent
661 * btree splits. We may be converting the middle part of an unwritten
662 * extent and in this case we will insert two new extents in the btree
663 * each of which could cause a full split.
664 *
665 * This reservation amount will be used in the first call to
666 * xfs_bmbt_split() to select an AG with enough space to satisfy the
667 * rest of the operation.
668 */
669 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
670
671 do {
672 /*
673 * set up a transaction to convert the range of extents
674 * from unwritten to real. Do allocations in a loop until
675 * we have covered the range passed in.
676 *
677 * Note that we open code the transaction allocation here
678 * to pass KM_NOFS--we can't risk to recursing back into
679 * the filesystem here as we might be asked to write out
680 * the same inode that we complete here and might deadlock
681 * on the iolock.
682 */
683 xfs_wait_for_freeze(mp, SB_FREEZE_TRANS);
684 tp = _xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE, KM_NOFS);
685 tp->t_flags |= XFS_TRANS_RESERVE;
686 error = xfs_trans_reserve(tp, resblks,
687 XFS_WRITE_LOG_RES(mp), 0,
688 XFS_TRANS_PERM_LOG_RES,
689 XFS_WRITE_LOG_COUNT);
690 if (error) {
691 xfs_trans_cancel(tp, 0);
692 return XFS_ERROR(error);
693 }
694
695 xfs_ilock(ip, XFS_ILOCK_EXCL);
696 xfs_trans_ijoin(tp, ip, 0);
697
698 /*
699 * Modify the unwritten extent state of the buffer.
700 */
701 xfs_bmap_init(&free_list, &firstfsb);
702 nimaps = 1;
703 error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
704 XFS_BMAPI_CONVERT, &firstfsb,
705 1, &imap, &nimaps, &free_list);
706 if (error)
707 goto error_on_bmapi_transaction;
708
709 /*
710 * Log the updated inode size as we go. We have to be careful
711 * to only log it up to the actual write offset if it is
712 * halfway into a block.
713 */
714 i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb);
715 if (i_size > offset + count)
716 i_size = offset + count;
717
718 i_size = xfs_new_eof(ip, i_size);
719 if (i_size) {
720 ip->i_d.di_size = i_size;
721 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
722 }
723
724 error = xfs_bmap_finish(&tp, &free_list, &committed);
725 if (error)
726 goto error_on_bmapi_transaction;
727
728 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
729 xfs_iunlock(ip, XFS_ILOCK_EXCL);
730 if (error)
731 return XFS_ERROR(error);
732
733 if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip)))
734 return xfs_alert_fsblock_zero(ip, &imap);
735
736 if ((numblks_fsb = imap.br_blockcount) == 0) {
737 /*
738 * The numblks_fsb value should always get
739 * smaller, otherwise the loop is stuck.
740 */
741 ASSERT(imap.br_blockcount);
742 break;
743 }
744 offset_fsb += numblks_fsb;
745 count_fsb -= numblks_fsb;
746 } while (count_fsb > 0);
747
748 return 0;
749
750error_on_bmapi_transaction:
751 xfs_bmap_cancel(&free_list);
752 xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT));
753 xfs_iunlock(ip, XFS_ILOCK_EXCL);
754 return XFS_ERROR(error);
755}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4 * Copyright (c) 2016-2018 Christoph Hellwig.
5 * All Rights Reserved.
6 */
7#include "xfs.h"
8#include "xfs_fs.h"
9#include "xfs_shared.h"
10#include "xfs_format.h"
11#include "xfs_log_format.h"
12#include "xfs_trans_resv.h"
13#include "xfs_mount.h"
14#include "xfs_inode.h"
15#include "xfs_btree.h"
16#include "xfs_bmap_btree.h"
17#include "xfs_bmap.h"
18#include "xfs_bmap_util.h"
19#include "xfs_errortag.h"
20#include "xfs_error.h"
21#include "xfs_trans.h"
22#include "xfs_trans_space.h"
23#include "xfs_inode_item.h"
24#include "xfs_iomap.h"
25#include "xfs_trace.h"
26#include "xfs_quota.h"
27#include "xfs_dquot_item.h"
28#include "xfs_dquot.h"
29#include "xfs_reflink.h"
30
31
32#define XFS_ALLOC_ALIGN(mp, off) \
33 (((off) >> mp->m_allocsize_log) << mp->m_allocsize_log)
34
35static int
36xfs_alert_fsblock_zero(
37 xfs_inode_t *ip,
38 xfs_bmbt_irec_t *imap)
39{
40 xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO,
41 "Access to block zero in inode %llu "
42 "start_block: %llx start_off: %llx "
43 "blkcnt: %llx extent-state: %x",
44 (unsigned long long)ip->i_ino,
45 (unsigned long long)imap->br_startblock,
46 (unsigned long long)imap->br_startoff,
47 (unsigned long long)imap->br_blockcount,
48 imap->br_state);
49 return -EFSCORRUPTED;
50}
51
52int
53xfs_bmbt_to_iomap(
54 struct xfs_inode *ip,
55 struct iomap *iomap,
56 struct xfs_bmbt_irec *imap,
57 u16 flags)
58{
59 struct xfs_mount *mp = ip->i_mount;
60 struct xfs_buftarg *target = xfs_inode_buftarg(ip);
61
62 if (unlikely(!xfs_valid_startblock(ip, imap->br_startblock)))
63 return xfs_alert_fsblock_zero(ip, imap);
64
65 if (imap->br_startblock == HOLESTARTBLOCK) {
66 iomap->addr = IOMAP_NULL_ADDR;
67 iomap->type = IOMAP_HOLE;
68 } else if (imap->br_startblock == DELAYSTARTBLOCK ||
69 isnullstartblock(imap->br_startblock)) {
70 iomap->addr = IOMAP_NULL_ADDR;
71 iomap->type = IOMAP_DELALLOC;
72 } else {
73 iomap->addr = BBTOB(xfs_fsb_to_db(ip, imap->br_startblock));
74 if (imap->br_state == XFS_EXT_UNWRITTEN)
75 iomap->type = IOMAP_UNWRITTEN;
76 else
77 iomap->type = IOMAP_MAPPED;
78 }
79 iomap->offset = XFS_FSB_TO_B(mp, imap->br_startoff);
80 iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount);
81 iomap->bdev = target->bt_bdev;
82 iomap->dax_dev = target->bt_daxdev;
83 iomap->flags = flags;
84
85 if (xfs_ipincount(ip) &&
86 (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
87 iomap->flags |= IOMAP_F_DIRTY;
88 return 0;
89}
90
91static void
92xfs_hole_to_iomap(
93 struct xfs_inode *ip,
94 struct iomap *iomap,
95 xfs_fileoff_t offset_fsb,
96 xfs_fileoff_t end_fsb)
97{
98 struct xfs_buftarg *target = xfs_inode_buftarg(ip);
99
100 iomap->addr = IOMAP_NULL_ADDR;
101 iomap->type = IOMAP_HOLE;
102 iomap->offset = XFS_FSB_TO_B(ip->i_mount, offset_fsb);
103 iomap->length = XFS_FSB_TO_B(ip->i_mount, end_fsb - offset_fsb);
104 iomap->bdev = target->bt_bdev;
105 iomap->dax_dev = target->bt_daxdev;
106}
107
108static inline xfs_fileoff_t
109xfs_iomap_end_fsb(
110 struct xfs_mount *mp,
111 loff_t offset,
112 loff_t count)
113{
114 ASSERT(offset <= mp->m_super->s_maxbytes);
115 return min(XFS_B_TO_FSB(mp, offset + count),
116 XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes));
117}
118
119static xfs_extlen_t
120xfs_eof_alignment(
121 struct xfs_inode *ip)
122{
123 struct xfs_mount *mp = ip->i_mount;
124 xfs_extlen_t align = 0;
125
126 if (!XFS_IS_REALTIME_INODE(ip)) {
127 /*
128 * Round up the allocation request to a stripe unit
129 * (m_dalign) boundary if the file size is >= stripe unit
130 * size, and we are allocating past the allocation eof.
131 *
132 * If mounted with the "-o swalloc" option the alignment is
133 * increased from the strip unit size to the stripe width.
134 */
135 if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC))
136 align = mp->m_swidth;
137 else if (mp->m_dalign)
138 align = mp->m_dalign;
139
140 if (align && XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, align))
141 align = 0;
142 }
143
144 return align;
145}
146
147/*
148 * Check if last_fsb is outside the last extent, and if so grow it to the next
149 * stripe unit boundary.
150 */
151xfs_fileoff_t
152xfs_iomap_eof_align_last_fsb(
153 struct xfs_inode *ip,
154 xfs_fileoff_t end_fsb)
155{
156 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
157 xfs_extlen_t extsz = xfs_get_extsz_hint(ip);
158 xfs_extlen_t align = xfs_eof_alignment(ip);
159 struct xfs_bmbt_irec irec;
160 struct xfs_iext_cursor icur;
161
162 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
163
164 /*
165 * Always round up the allocation request to the extent hint boundary.
166 */
167 if (extsz) {
168 if (align)
169 align = roundup_64(align, extsz);
170 else
171 align = extsz;
172 }
173
174 if (align) {
175 xfs_fileoff_t aligned_end_fsb = roundup_64(end_fsb, align);
176
177 xfs_iext_last(ifp, &icur);
178 if (!xfs_iext_get_extent(ifp, &icur, &irec) ||
179 aligned_end_fsb >= irec.br_startoff + irec.br_blockcount)
180 return aligned_end_fsb;
181 }
182
183 return end_fsb;
184}
185
186int
187xfs_iomap_write_direct(
188 struct xfs_inode *ip,
189 xfs_fileoff_t offset_fsb,
190 xfs_fileoff_t count_fsb,
191 struct xfs_bmbt_irec *imap)
192{
193 struct xfs_mount *mp = ip->i_mount;
194 struct xfs_trans *tp;
195 xfs_filblks_t resaligned;
196 int nimaps;
197 int quota_flag;
198 uint qblocks, resblks;
199 unsigned int resrtextents = 0;
200 int error;
201 int bmapi_flags = XFS_BMAPI_PREALLOC;
202 uint tflags = 0;
203
204 ASSERT(count_fsb > 0);
205
206 resaligned = xfs_aligned_fsb_count(offset_fsb, count_fsb,
207 xfs_get_extsz_hint(ip));
208 if (unlikely(XFS_IS_REALTIME_INODE(ip))) {
209 resrtextents = qblocks = resaligned;
210 resrtextents /= mp->m_sb.sb_rextsize;
211 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
212 quota_flag = XFS_QMOPT_RES_RTBLKS;
213 } else {
214 resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
215 quota_flag = XFS_QMOPT_RES_REGBLKS;
216 }
217
218 error = xfs_qm_dqattach(ip);
219 if (error)
220 return error;
221
222 /*
223 * For DAX, we do not allocate unwritten extents, but instead we zero
224 * the block before we commit the transaction. Ideally we'd like to do
225 * this outside the transaction context, but if we commit and then crash
226 * we may not have zeroed the blocks and this will be exposed on
227 * recovery of the allocation. Hence we must zero before commit.
228 *
229 * Further, if we are mapping unwritten extents here, we need to zero
230 * and convert them to written so that we don't need an unwritten extent
231 * callback for DAX. This also means that we need to be able to dip into
232 * the reserve block pool for bmbt block allocation if there is no space
233 * left but we need to do unwritten extent conversion.
234 */
235 if (IS_DAX(VFS_I(ip))) {
236 bmapi_flags = XFS_BMAPI_CONVERT | XFS_BMAPI_ZERO;
237 if (imap->br_state == XFS_EXT_UNWRITTEN) {
238 tflags |= XFS_TRANS_RESERVE;
239 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
240 }
241 }
242 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, resrtextents,
243 tflags, &tp);
244 if (error)
245 return error;
246
247 xfs_ilock(ip, XFS_ILOCK_EXCL);
248
249 error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks, 0, quota_flag);
250 if (error)
251 goto out_trans_cancel;
252
253 xfs_trans_ijoin(tp, ip, 0);
254
255 /*
256 * From this point onwards we overwrite the imap pointer that the
257 * caller gave to us.
258 */
259 nimaps = 1;
260 error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb, bmapi_flags, 0,
261 imap, &nimaps);
262 if (error)
263 goto out_res_cancel;
264
265 /*
266 * Complete the transaction
267 */
268 error = xfs_trans_commit(tp);
269 if (error)
270 goto out_unlock;
271
272 /*
273 * Copy any maps to caller's array and return any error.
274 */
275 if (nimaps == 0) {
276 error = -ENOSPC;
277 goto out_unlock;
278 }
279
280 if (unlikely(!xfs_valid_startblock(ip, imap->br_startblock)))
281 error = xfs_alert_fsblock_zero(ip, imap);
282
283out_unlock:
284 xfs_iunlock(ip, XFS_ILOCK_EXCL);
285 return error;
286
287out_res_cancel:
288 xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
289out_trans_cancel:
290 xfs_trans_cancel(tp);
291 goto out_unlock;
292}
293
294STATIC bool
295xfs_quota_need_throttle(
296 struct xfs_inode *ip,
297 xfs_dqtype_t type,
298 xfs_fsblock_t alloc_blocks)
299{
300 struct xfs_dquot *dq = xfs_inode_dquot(ip, type);
301
302 if (!dq || !xfs_this_quota_on(ip->i_mount, type))
303 return false;
304
305 /* no hi watermark, no throttle */
306 if (!dq->q_prealloc_hi_wmark)
307 return false;
308
309 /* under the lo watermark, no throttle */
310 if (dq->q_blk.reserved + alloc_blocks < dq->q_prealloc_lo_wmark)
311 return false;
312
313 return true;
314}
315
316STATIC void
317xfs_quota_calc_throttle(
318 struct xfs_inode *ip,
319 xfs_dqtype_t type,
320 xfs_fsblock_t *qblocks,
321 int *qshift,
322 int64_t *qfreesp)
323{
324 struct xfs_dquot *dq = xfs_inode_dquot(ip, type);
325 int64_t freesp;
326 int shift = 0;
327
328 /* no dq, or over hi wmark, squash the prealloc completely */
329 if (!dq || dq->q_blk.reserved >= dq->q_prealloc_hi_wmark) {
330 *qblocks = 0;
331 *qfreesp = 0;
332 return;
333 }
334
335 freesp = dq->q_prealloc_hi_wmark - dq->q_blk.reserved;
336 if (freesp < dq->q_low_space[XFS_QLOWSP_5_PCNT]) {
337 shift = 2;
338 if (freesp < dq->q_low_space[XFS_QLOWSP_3_PCNT])
339 shift += 2;
340 if (freesp < dq->q_low_space[XFS_QLOWSP_1_PCNT])
341 shift += 2;
342 }
343
344 if (freesp < *qfreesp)
345 *qfreesp = freesp;
346
347 /* only overwrite the throttle values if we are more aggressive */
348 if ((freesp >> shift) < (*qblocks >> *qshift)) {
349 *qblocks = freesp;
350 *qshift = shift;
351 }
352}
353
354/*
355 * If we don't have a user specified preallocation size, dynamically increase
356 * the preallocation size as the size of the file grows. Cap the maximum size
357 * at a single extent or less if the filesystem is near full. The closer the
358 * filesystem is to being full, the smaller the maximum preallocation.
359 */
360STATIC xfs_fsblock_t
361xfs_iomap_prealloc_size(
362 struct xfs_inode *ip,
363 int whichfork,
364 loff_t offset,
365 loff_t count,
366 struct xfs_iext_cursor *icur)
367{
368 struct xfs_iext_cursor ncur = *icur;
369 struct xfs_bmbt_irec prev, got;
370 struct xfs_mount *mp = ip->i_mount;
371 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
372 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
373 int64_t freesp;
374 xfs_fsblock_t qblocks;
375 xfs_fsblock_t alloc_blocks = 0;
376 xfs_extlen_t plen;
377 int shift = 0;
378 int qshift = 0;
379
380 /*
381 * As an exception we don't do any preallocation at all if the file is
382 * smaller than the minimum preallocation and we are using the default
383 * dynamic preallocation scheme, as it is likely this is the only write
384 * to the file that is going to be done.
385 */
386 if (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_allocsize_blocks))
387 return 0;
388
389 /*
390 * Use the minimum preallocation size for small files or if we are
391 * writing right after a hole.
392 */
393 if (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign) ||
394 !xfs_iext_prev_extent(ifp, &ncur, &prev) ||
395 prev.br_startoff + prev.br_blockcount < offset_fsb)
396 return mp->m_allocsize_blocks;
397
398 /*
399 * Take the size of the preceding data extents as the basis for the
400 * preallocation size. Note that we don't care if the previous extents
401 * are written or not.
402 */
403 plen = prev.br_blockcount;
404 while (xfs_iext_prev_extent(ifp, &ncur, &got)) {
405 if (plen > MAXEXTLEN / 2 ||
406 isnullstartblock(got.br_startblock) ||
407 got.br_startoff + got.br_blockcount != prev.br_startoff ||
408 got.br_startblock + got.br_blockcount != prev.br_startblock)
409 break;
410 plen += got.br_blockcount;
411 prev = got;
412 }
413
414 /*
415 * If the size of the extents is greater than half the maximum extent
416 * length, then use the current offset as the basis. This ensures that
417 * for large files the preallocation size always extends to MAXEXTLEN
418 * rather than falling short due to things like stripe unit/width
419 * alignment of real extents.
420 */
421 alloc_blocks = plen * 2;
422 if (alloc_blocks > MAXEXTLEN)
423 alloc_blocks = XFS_B_TO_FSB(mp, offset);
424 qblocks = alloc_blocks;
425
426 /*
427 * MAXEXTLEN is not a power of two value but we round the prealloc down
428 * to the nearest power of two value after throttling. To prevent the
429 * round down from unconditionally reducing the maximum supported
430 * prealloc size, we round up first, apply appropriate throttling,
431 * round down and cap the value to MAXEXTLEN.
432 */
433 alloc_blocks = XFS_FILEOFF_MIN(roundup_pow_of_two(MAXEXTLEN),
434 alloc_blocks);
435
436 freesp = percpu_counter_read_positive(&mp->m_fdblocks);
437 if (freesp < mp->m_low_space[XFS_LOWSP_5_PCNT]) {
438 shift = 2;
439 if (freesp < mp->m_low_space[XFS_LOWSP_4_PCNT])
440 shift++;
441 if (freesp < mp->m_low_space[XFS_LOWSP_3_PCNT])
442 shift++;
443 if (freesp < mp->m_low_space[XFS_LOWSP_2_PCNT])
444 shift++;
445 if (freesp < mp->m_low_space[XFS_LOWSP_1_PCNT])
446 shift++;
447 }
448
449 /*
450 * Check each quota to cap the prealloc size, provide a shift value to
451 * throttle with and adjust amount of available space.
452 */
453 if (xfs_quota_need_throttle(ip, XFS_DQTYPE_USER, alloc_blocks))
454 xfs_quota_calc_throttle(ip, XFS_DQTYPE_USER, &qblocks, &qshift,
455 &freesp);
456 if (xfs_quota_need_throttle(ip, XFS_DQTYPE_GROUP, alloc_blocks))
457 xfs_quota_calc_throttle(ip, XFS_DQTYPE_GROUP, &qblocks, &qshift,
458 &freesp);
459 if (xfs_quota_need_throttle(ip, XFS_DQTYPE_PROJ, alloc_blocks))
460 xfs_quota_calc_throttle(ip, XFS_DQTYPE_PROJ, &qblocks, &qshift,
461 &freesp);
462
463 /*
464 * The final prealloc size is set to the minimum of free space available
465 * in each of the quotas and the overall filesystem.
466 *
467 * The shift throttle value is set to the maximum value as determined by
468 * the global low free space values and per-quota low free space values.
469 */
470 alloc_blocks = min(alloc_blocks, qblocks);
471 shift = max(shift, qshift);
472
473 if (shift)
474 alloc_blocks >>= shift;
475 /*
476 * rounddown_pow_of_two() returns an undefined result if we pass in
477 * alloc_blocks = 0.
478 */
479 if (alloc_blocks)
480 alloc_blocks = rounddown_pow_of_two(alloc_blocks);
481 if (alloc_blocks > MAXEXTLEN)
482 alloc_blocks = MAXEXTLEN;
483
484 /*
485 * If we are still trying to allocate more space than is
486 * available, squash the prealloc hard. This can happen if we
487 * have a large file on a small filesystem and the above
488 * lowspace thresholds are smaller than MAXEXTLEN.
489 */
490 while (alloc_blocks && alloc_blocks >= freesp)
491 alloc_blocks >>= 4;
492 if (alloc_blocks < mp->m_allocsize_blocks)
493 alloc_blocks = mp->m_allocsize_blocks;
494 trace_xfs_iomap_prealloc_size(ip, alloc_blocks, shift,
495 mp->m_allocsize_blocks);
496 return alloc_blocks;
497}
498
499int
500xfs_iomap_write_unwritten(
501 xfs_inode_t *ip,
502 xfs_off_t offset,
503 xfs_off_t count,
504 bool update_isize)
505{
506 xfs_mount_t *mp = ip->i_mount;
507 xfs_fileoff_t offset_fsb;
508 xfs_filblks_t count_fsb;
509 xfs_filblks_t numblks_fsb;
510 int nimaps;
511 xfs_trans_t *tp;
512 xfs_bmbt_irec_t imap;
513 struct inode *inode = VFS_I(ip);
514 xfs_fsize_t i_size;
515 uint resblks;
516 int error;
517
518 trace_xfs_unwritten_convert(ip, offset, count);
519
520 offset_fsb = XFS_B_TO_FSBT(mp, offset);
521 count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
522 count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb);
523
524 /*
525 * Reserve enough blocks in this transaction for two complete extent
526 * btree splits. We may be converting the middle part of an unwritten
527 * extent and in this case we will insert two new extents in the btree
528 * each of which could cause a full split.
529 *
530 * This reservation amount will be used in the first call to
531 * xfs_bmbt_split() to select an AG with enough space to satisfy the
532 * rest of the operation.
533 */
534 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
535
536 /* Attach dquots so that bmbt splits are accounted correctly. */
537 error = xfs_qm_dqattach(ip);
538 if (error)
539 return error;
540
541 do {
542 /*
543 * Set up a transaction to convert the range of extents
544 * from unwritten to real. Do allocations in a loop until
545 * we have covered the range passed in.
546 *
547 * Note that we can't risk to recursing back into the filesystem
548 * here as we might be asked to write out the same inode that we
549 * complete here and might deadlock on the iolock.
550 */
551 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0,
552 XFS_TRANS_RESERVE, &tp);
553 if (error)
554 return error;
555
556 xfs_ilock(ip, XFS_ILOCK_EXCL);
557 xfs_trans_ijoin(tp, ip, 0);
558
559 error = xfs_trans_reserve_quota_nblks(tp, ip, resblks, 0,
560 XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES);
561 if (error)
562 goto error_on_bmapi_transaction;
563
564 /*
565 * Modify the unwritten extent state of the buffer.
566 */
567 nimaps = 1;
568 error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
569 XFS_BMAPI_CONVERT, resblks, &imap,
570 &nimaps);
571 if (error)
572 goto error_on_bmapi_transaction;
573
574 /*
575 * Log the updated inode size as we go. We have to be careful
576 * to only log it up to the actual write offset if it is
577 * halfway into a block.
578 */
579 i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb);
580 if (i_size > offset + count)
581 i_size = offset + count;
582 if (update_isize && i_size > i_size_read(inode))
583 i_size_write(inode, i_size);
584 i_size = xfs_new_eof(ip, i_size);
585 if (i_size) {
586 ip->i_d.di_size = i_size;
587 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
588 }
589
590 error = xfs_trans_commit(tp);
591 xfs_iunlock(ip, XFS_ILOCK_EXCL);
592 if (error)
593 return error;
594
595 if (unlikely(!xfs_valid_startblock(ip, imap.br_startblock)))
596 return xfs_alert_fsblock_zero(ip, &imap);
597
598 if ((numblks_fsb = imap.br_blockcount) == 0) {
599 /*
600 * The numblks_fsb value should always get
601 * smaller, otherwise the loop is stuck.
602 */
603 ASSERT(imap.br_blockcount);
604 break;
605 }
606 offset_fsb += numblks_fsb;
607 count_fsb -= numblks_fsb;
608 } while (count_fsb > 0);
609
610 return 0;
611
612error_on_bmapi_transaction:
613 xfs_trans_cancel(tp);
614 xfs_iunlock(ip, XFS_ILOCK_EXCL);
615 return error;
616}
617
618static inline bool
619imap_needs_alloc(
620 struct inode *inode,
621 unsigned flags,
622 struct xfs_bmbt_irec *imap,
623 int nimaps)
624{
625 /* don't allocate blocks when just zeroing */
626 if (flags & IOMAP_ZERO)
627 return false;
628 if (!nimaps ||
629 imap->br_startblock == HOLESTARTBLOCK ||
630 imap->br_startblock == DELAYSTARTBLOCK)
631 return true;
632 /* we convert unwritten extents before copying the data for DAX */
633 if (IS_DAX(inode) && imap->br_state == XFS_EXT_UNWRITTEN)
634 return true;
635 return false;
636}
637
638static inline bool
639imap_needs_cow(
640 struct xfs_inode *ip,
641 unsigned int flags,
642 struct xfs_bmbt_irec *imap,
643 int nimaps)
644{
645 if (!xfs_is_cow_inode(ip))
646 return false;
647
648 /* when zeroing we don't have to COW holes or unwritten extents */
649 if (flags & IOMAP_ZERO) {
650 if (!nimaps ||
651 imap->br_startblock == HOLESTARTBLOCK ||
652 imap->br_state == XFS_EXT_UNWRITTEN)
653 return false;
654 }
655
656 return true;
657}
658
659static int
660xfs_ilock_for_iomap(
661 struct xfs_inode *ip,
662 unsigned flags,
663 unsigned *lockmode)
664{
665 unsigned mode = XFS_ILOCK_SHARED;
666 bool is_write = flags & (IOMAP_WRITE | IOMAP_ZERO);
667
668 /*
669 * COW writes may allocate delalloc space or convert unwritten COW
670 * extents, so we need to make sure to take the lock exclusively here.
671 */
672 if (xfs_is_cow_inode(ip) && is_write)
673 mode = XFS_ILOCK_EXCL;
674
675 /*
676 * Extents not yet cached requires exclusive access, don't block. This
677 * is an opencoded xfs_ilock_data_map_shared() call but with
678 * non-blocking behaviour.
679 */
680 if (!(ip->i_df.if_flags & XFS_IFEXTENTS)) {
681 if (flags & IOMAP_NOWAIT)
682 return -EAGAIN;
683 mode = XFS_ILOCK_EXCL;
684 }
685
686relock:
687 if (flags & IOMAP_NOWAIT) {
688 if (!xfs_ilock_nowait(ip, mode))
689 return -EAGAIN;
690 } else {
691 xfs_ilock(ip, mode);
692 }
693
694 /*
695 * The reflink iflag could have changed since the earlier unlocked
696 * check, so if we got ILOCK_SHARED for a write and but we're now a
697 * reflink inode we have to switch to ILOCK_EXCL and relock.
698 */
699 if (mode == XFS_ILOCK_SHARED && is_write && xfs_is_cow_inode(ip)) {
700 xfs_iunlock(ip, mode);
701 mode = XFS_ILOCK_EXCL;
702 goto relock;
703 }
704
705 *lockmode = mode;
706 return 0;
707}
708
709static int
710xfs_direct_write_iomap_begin(
711 struct inode *inode,
712 loff_t offset,
713 loff_t length,
714 unsigned flags,
715 struct iomap *iomap,
716 struct iomap *srcmap)
717{
718 struct xfs_inode *ip = XFS_I(inode);
719 struct xfs_mount *mp = ip->i_mount;
720 struct xfs_bmbt_irec imap, cmap;
721 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
722 xfs_fileoff_t end_fsb = xfs_iomap_end_fsb(mp, offset, length);
723 int nimaps = 1, error = 0;
724 bool shared = false;
725 u16 iomap_flags = 0;
726 unsigned lockmode;
727
728 ASSERT(flags & (IOMAP_WRITE | IOMAP_ZERO));
729
730 if (XFS_FORCED_SHUTDOWN(mp))
731 return -EIO;
732
733 /*
734 * Writes that span EOF might trigger an IO size update on completion,
735 * so consider them to be dirty for the purposes of O_DSYNC even if
736 * there is no other metadata changes pending or have been made here.
737 */
738 if (offset + length > i_size_read(inode))
739 iomap_flags |= IOMAP_F_DIRTY;
740
741 error = xfs_ilock_for_iomap(ip, flags, &lockmode);
742 if (error)
743 return error;
744
745 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
746 &nimaps, 0);
747 if (error)
748 goto out_unlock;
749
750 if (imap_needs_cow(ip, flags, &imap, nimaps)) {
751 error = -EAGAIN;
752 if (flags & IOMAP_NOWAIT)
753 goto out_unlock;
754
755 /* may drop and re-acquire the ilock */
756 error = xfs_reflink_allocate_cow(ip, &imap, &cmap, &shared,
757 &lockmode, flags & IOMAP_DIRECT);
758 if (error)
759 goto out_unlock;
760 if (shared)
761 goto out_found_cow;
762 end_fsb = imap.br_startoff + imap.br_blockcount;
763 length = XFS_FSB_TO_B(mp, end_fsb) - offset;
764 }
765
766 if (imap_needs_alloc(inode, flags, &imap, nimaps))
767 goto allocate_blocks;
768
769 xfs_iunlock(ip, lockmode);
770 trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap);
771 return xfs_bmbt_to_iomap(ip, iomap, &imap, iomap_flags);
772
773allocate_blocks:
774 error = -EAGAIN;
775 if (flags & IOMAP_NOWAIT)
776 goto out_unlock;
777
778 /*
779 * We cap the maximum length we map to a sane size to keep the chunks
780 * of work done where somewhat symmetric with the work writeback does.
781 * This is a completely arbitrary number pulled out of thin air as a
782 * best guess for initial testing.
783 *
784 * Note that the values needs to be less than 32-bits wide until the
785 * lower level functions are updated.
786 */
787 length = min_t(loff_t, length, 1024 * PAGE_SIZE);
788 end_fsb = xfs_iomap_end_fsb(mp, offset, length);
789
790 if (offset + length > XFS_ISIZE(ip))
791 end_fsb = xfs_iomap_eof_align_last_fsb(ip, end_fsb);
792 else if (nimaps && imap.br_startblock == HOLESTARTBLOCK)
793 end_fsb = min(end_fsb, imap.br_startoff + imap.br_blockcount);
794 xfs_iunlock(ip, lockmode);
795
796 error = xfs_iomap_write_direct(ip, offset_fsb, end_fsb - offset_fsb,
797 &imap);
798 if (error)
799 return error;
800
801 trace_xfs_iomap_alloc(ip, offset, length, XFS_DATA_FORK, &imap);
802 return xfs_bmbt_to_iomap(ip, iomap, &imap, iomap_flags | IOMAP_F_NEW);
803
804out_found_cow:
805 xfs_iunlock(ip, lockmode);
806 length = XFS_FSB_TO_B(mp, cmap.br_startoff + cmap.br_blockcount);
807 trace_xfs_iomap_found(ip, offset, length - offset, XFS_COW_FORK, &cmap);
808 if (imap.br_startblock != HOLESTARTBLOCK) {
809 error = xfs_bmbt_to_iomap(ip, srcmap, &imap, 0);
810 if (error)
811 return error;
812 }
813 return xfs_bmbt_to_iomap(ip, iomap, &cmap, IOMAP_F_SHARED);
814
815out_unlock:
816 xfs_iunlock(ip, lockmode);
817 return error;
818}
819
820const struct iomap_ops xfs_direct_write_iomap_ops = {
821 .iomap_begin = xfs_direct_write_iomap_begin,
822};
823
824static int
825xfs_buffered_write_iomap_begin(
826 struct inode *inode,
827 loff_t offset,
828 loff_t count,
829 unsigned flags,
830 struct iomap *iomap,
831 struct iomap *srcmap)
832{
833 struct xfs_inode *ip = XFS_I(inode);
834 struct xfs_mount *mp = ip->i_mount;
835 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
836 xfs_fileoff_t end_fsb = xfs_iomap_end_fsb(mp, offset, count);
837 struct xfs_bmbt_irec imap, cmap;
838 struct xfs_iext_cursor icur, ccur;
839 xfs_fsblock_t prealloc_blocks = 0;
840 bool eof = false, cow_eof = false, shared = false;
841 int allocfork = XFS_DATA_FORK;
842 int error = 0;
843
844 /* we can't use delayed allocations when using extent size hints */
845 if (xfs_get_extsz_hint(ip))
846 return xfs_direct_write_iomap_begin(inode, offset, count,
847 flags, iomap, srcmap);
848
849 ASSERT(!XFS_IS_REALTIME_INODE(ip));
850
851 xfs_ilock(ip, XFS_ILOCK_EXCL);
852
853 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(&ip->i_df)) ||
854 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) {
855 error = -EFSCORRUPTED;
856 goto out_unlock;
857 }
858
859 XFS_STATS_INC(mp, xs_blk_mapw);
860
861 if (!(ip->i_df.if_flags & XFS_IFEXTENTS)) {
862 error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
863 if (error)
864 goto out_unlock;
865 }
866
867 /*
868 * Search the data fork first to look up our source mapping. We
869 * always need the data fork map, as we have to return it to the
870 * iomap code so that the higher level write code can read data in to
871 * perform read-modify-write cycles for unaligned writes.
872 */
873 eof = !xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap);
874 if (eof)
875 imap.br_startoff = end_fsb; /* fake hole until the end */
876
877 /* We never need to allocate blocks for zeroing a hole. */
878 if ((flags & IOMAP_ZERO) && imap.br_startoff > offset_fsb) {
879 xfs_hole_to_iomap(ip, iomap, offset_fsb, imap.br_startoff);
880 goto out_unlock;
881 }
882
883 /*
884 * Search the COW fork extent list even if we did not find a data fork
885 * extent. This serves two purposes: first this implements the
886 * speculative preallocation using cowextsize, so that we also unshare
887 * block adjacent to shared blocks instead of just the shared blocks
888 * themselves. Second the lookup in the extent list is generally faster
889 * than going out to the shared extent tree.
890 */
891 if (xfs_is_cow_inode(ip)) {
892 if (!ip->i_cowfp) {
893 ASSERT(!xfs_is_reflink_inode(ip));
894 xfs_ifork_init_cow(ip);
895 }
896 cow_eof = !xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb,
897 &ccur, &cmap);
898 if (!cow_eof && cmap.br_startoff <= offset_fsb) {
899 trace_xfs_reflink_cow_found(ip, &cmap);
900 goto found_cow;
901 }
902 }
903
904 if (imap.br_startoff <= offset_fsb) {
905 /*
906 * For reflink files we may need a delalloc reservation when
907 * overwriting shared extents. This includes zeroing of
908 * existing extents that contain data.
909 */
910 if (!xfs_is_cow_inode(ip) ||
911 ((flags & IOMAP_ZERO) && imap.br_state != XFS_EXT_NORM)) {
912 trace_xfs_iomap_found(ip, offset, count, XFS_DATA_FORK,
913 &imap);
914 goto found_imap;
915 }
916
917 xfs_trim_extent(&imap, offset_fsb, end_fsb - offset_fsb);
918
919 /* Trim the mapping to the nearest shared extent boundary. */
920 error = xfs_bmap_trim_cow(ip, &imap, &shared);
921 if (error)
922 goto out_unlock;
923
924 /* Not shared? Just report the (potentially capped) extent. */
925 if (!shared) {
926 trace_xfs_iomap_found(ip, offset, count, XFS_DATA_FORK,
927 &imap);
928 goto found_imap;
929 }
930
931 /*
932 * Fork all the shared blocks from our write offset until the
933 * end of the extent.
934 */
935 allocfork = XFS_COW_FORK;
936 end_fsb = imap.br_startoff + imap.br_blockcount;
937 } else {
938 /*
939 * We cap the maximum length we map here to MAX_WRITEBACK_PAGES
940 * pages to keep the chunks of work done where somewhat
941 * symmetric with the work writeback does. This is a completely
942 * arbitrary number pulled out of thin air.
943 *
944 * Note that the values needs to be less than 32-bits wide until
945 * the lower level functions are updated.
946 */
947 count = min_t(loff_t, count, 1024 * PAGE_SIZE);
948 end_fsb = xfs_iomap_end_fsb(mp, offset, count);
949
950 if (xfs_is_always_cow_inode(ip))
951 allocfork = XFS_COW_FORK;
952 }
953
954 error = xfs_qm_dqattach_locked(ip, false);
955 if (error)
956 goto out_unlock;
957
958 if (eof && offset + count > XFS_ISIZE(ip)) {
959 /*
960 * Determine the initial size of the preallocation.
961 * We clean up any extra preallocation when the file is closed.
962 */
963 if (mp->m_flags & XFS_MOUNT_ALLOCSIZE)
964 prealloc_blocks = mp->m_allocsize_blocks;
965 else
966 prealloc_blocks = xfs_iomap_prealloc_size(ip, allocfork,
967 offset, count, &icur);
968 if (prealloc_blocks) {
969 xfs_extlen_t align;
970 xfs_off_t end_offset;
971 xfs_fileoff_t p_end_fsb;
972
973 end_offset = XFS_ALLOC_ALIGN(mp, offset + count - 1);
974 p_end_fsb = XFS_B_TO_FSBT(mp, end_offset) +
975 prealloc_blocks;
976
977 align = xfs_eof_alignment(ip);
978 if (align)
979 p_end_fsb = roundup_64(p_end_fsb, align);
980
981 p_end_fsb = min(p_end_fsb,
982 XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes));
983 ASSERT(p_end_fsb > offset_fsb);
984 prealloc_blocks = p_end_fsb - end_fsb;
985 }
986 }
987
988retry:
989 error = xfs_bmapi_reserve_delalloc(ip, allocfork, offset_fsb,
990 end_fsb - offset_fsb, prealloc_blocks,
991 allocfork == XFS_DATA_FORK ? &imap : &cmap,
992 allocfork == XFS_DATA_FORK ? &icur : &ccur,
993 allocfork == XFS_DATA_FORK ? eof : cow_eof);
994 switch (error) {
995 case 0:
996 break;
997 case -ENOSPC:
998 case -EDQUOT:
999 /* retry without any preallocation */
1000 trace_xfs_delalloc_enospc(ip, offset, count);
1001 if (prealloc_blocks) {
1002 prealloc_blocks = 0;
1003 goto retry;
1004 }
1005 /*FALLTHRU*/
1006 default:
1007 goto out_unlock;
1008 }
1009
1010 if (allocfork == XFS_COW_FORK) {
1011 trace_xfs_iomap_alloc(ip, offset, count, allocfork, &cmap);
1012 goto found_cow;
1013 }
1014
1015 /*
1016 * Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch
1017 * them out if the write happens to fail.
1018 */
1019 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1020 trace_xfs_iomap_alloc(ip, offset, count, allocfork, &imap);
1021 return xfs_bmbt_to_iomap(ip, iomap, &imap, IOMAP_F_NEW);
1022
1023found_imap:
1024 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1025 return xfs_bmbt_to_iomap(ip, iomap, &imap, 0);
1026
1027found_cow:
1028 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1029 if (imap.br_startoff <= offset_fsb) {
1030 error = xfs_bmbt_to_iomap(ip, srcmap, &imap, 0);
1031 if (error)
1032 return error;
1033 } else {
1034 xfs_trim_extent(&cmap, offset_fsb,
1035 imap.br_startoff - offset_fsb);
1036 }
1037 return xfs_bmbt_to_iomap(ip, iomap, &cmap, IOMAP_F_SHARED);
1038
1039out_unlock:
1040 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1041 return error;
1042}
1043
1044static int
1045xfs_buffered_write_iomap_end(
1046 struct inode *inode,
1047 loff_t offset,
1048 loff_t length,
1049 ssize_t written,
1050 unsigned flags,
1051 struct iomap *iomap)
1052{
1053 struct xfs_inode *ip = XFS_I(inode);
1054 struct xfs_mount *mp = ip->i_mount;
1055 xfs_fileoff_t start_fsb;
1056 xfs_fileoff_t end_fsb;
1057 int error = 0;
1058
1059 if (iomap->type != IOMAP_DELALLOC)
1060 return 0;
1061
1062 /*
1063 * Behave as if the write failed if drop writes is enabled. Set the NEW
1064 * flag to force delalloc cleanup.
1065 */
1066 if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_DROP_WRITES)) {
1067 iomap->flags |= IOMAP_F_NEW;
1068 written = 0;
1069 }
1070
1071 /*
1072 * start_fsb refers to the first unused block after a short write. If
1073 * nothing was written, round offset down to point at the first block in
1074 * the range.
1075 */
1076 if (unlikely(!written))
1077 start_fsb = XFS_B_TO_FSBT(mp, offset);
1078 else
1079 start_fsb = XFS_B_TO_FSB(mp, offset + written);
1080 end_fsb = XFS_B_TO_FSB(mp, offset + length);
1081
1082 /*
1083 * Trim delalloc blocks if they were allocated by this write and we
1084 * didn't manage to write the whole range.
1085 *
1086 * We don't need to care about racing delalloc as we hold i_mutex
1087 * across the reserve/allocate/unreserve calls. If there are delalloc
1088 * blocks in the range, they are ours.
1089 */
1090 if ((iomap->flags & IOMAP_F_NEW) && start_fsb < end_fsb) {
1091 truncate_pagecache_range(VFS_I(ip), XFS_FSB_TO_B(mp, start_fsb),
1092 XFS_FSB_TO_B(mp, end_fsb) - 1);
1093
1094 error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
1095 end_fsb - start_fsb);
1096 if (error && !XFS_FORCED_SHUTDOWN(mp)) {
1097 xfs_alert(mp, "%s: unable to clean up ino %lld",
1098 __func__, ip->i_ino);
1099 return error;
1100 }
1101 }
1102
1103 return 0;
1104}
1105
1106const struct iomap_ops xfs_buffered_write_iomap_ops = {
1107 .iomap_begin = xfs_buffered_write_iomap_begin,
1108 .iomap_end = xfs_buffered_write_iomap_end,
1109};
1110
1111static int
1112xfs_read_iomap_begin(
1113 struct inode *inode,
1114 loff_t offset,
1115 loff_t length,
1116 unsigned flags,
1117 struct iomap *iomap,
1118 struct iomap *srcmap)
1119{
1120 struct xfs_inode *ip = XFS_I(inode);
1121 struct xfs_mount *mp = ip->i_mount;
1122 struct xfs_bmbt_irec imap;
1123 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
1124 xfs_fileoff_t end_fsb = xfs_iomap_end_fsb(mp, offset, length);
1125 int nimaps = 1, error = 0;
1126 bool shared = false;
1127 unsigned lockmode;
1128
1129 ASSERT(!(flags & (IOMAP_WRITE | IOMAP_ZERO)));
1130
1131 if (XFS_FORCED_SHUTDOWN(mp))
1132 return -EIO;
1133
1134 error = xfs_ilock_for_iomap(ip, flags, &lockmode);
1135 if (error)
1136 return error;
1137 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
1138 &nimaps, 0);
1139 if (!error && (flags & IOMAP_REPORT))
1140 error = xfs_reflink_trim_around_shared(ip, &imap, &shared);
1141 xfs_iunlock(ip, lockmode);
1142
1143 if (error)
1144 return error;
1145 trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap);
1146 return xfs_bmbt_to_iomap(ip, iomap, &imap, shared ? IOMAP_F_SHARED : 0);
1147}
1148
1149const struct iomap_ops xfs_read_iomap_ops = {
1150 .iomap_begin = xfs_read_iomap_begin,
1151};
1152
1153static int
1154xfs_seek_iomap_begin(
1155 struct inode *inode,
1156 loff_t offset,
1157 loff_t length,
1158 unsigned flags,
1159 struct iomap *iomap,
1160 struct iomap *srcmap)
1161{
1162 struct xfs_inode *ip = XFS_I(inode);
1163 struct xfs_mount *mp = ip->i_mount;
1164 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
1165 xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + length);
1166 xfs_fileoff_t cow_fsb = NULLFILEOFF, data_fsb = NULLFILEOFF;
1167 struct xfs_iext_cursor icur;
1168 struct xfs_bmbt_irec imap, cmap;
1169 int error = 0;
1170 unsigned lockmode;
1171
1172 if (XFS_FORCED_SHUTDOWN(mp))
1173 return -EIO;
1174
1175 lockmode = xfs_ilock_data_map_shared(ip);
1176 if (!(ip->i_df.if_flags & XFS_IFEXTENTS)) {
1177 error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
1178 if (error)
1179 goto out_unlock;
1180 }
1181
1182 if (xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap)) {
1183 /*
1184 * If we found a data extent we are done.
1185 */
1186 if (imap.br_startoff <= offset_fsb)
1187 goto done;
1188 data_fsb = imap.br_startoff;
1189 } else {
1190 /*
1191 * Fake a hole until the end of the file.
1192 */
1193 data_fsb = xfs_iomap_end_fsb(mp, offset, length);
1194 }
1195
1196 /*
1197 * If a COW fork extent covers the hole, report it - capped to the next
1198 * data fork extent:
1199 */
1200 if (xfs_inode_has_cow_data(ip) &&
1201 xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &cmap))
1202 cow_fsb = cmap.br_startoff;
1203 if (cow_fsb != NULLFILEOFF && cow_fsb <= offset_fsb) {
1204 if (data_fsb < cow_fsb + cmap.br_blockcount)
1205 end_fsb = min(end_fsb, data_fsb);
1206 xfs_trim_extent(&cmap, offset_fsb, end_fsb);
1207 error = xfs_bmbt_to_iomap(ip, iomap, &cmap, IOMAP_F_SHARED);
1208 /*
1209 * This is a COW extent, so we must probe the page cache
1210 * because there could be dirty page cache being backed
1211 * by this extent.
1212 */
1213 iomap->type = IOMAP_UNWRITTEN;
1214 goto out_unlock;
1215 }
1216
1217 /*
1218 * Else report a hole, capped to the next found data or COW extent.
1219 */
1220 if (cow_fsb != NULLFILEOFF && cow_fsb < data_fsb)
1221 imap.br_blockcount = cow_fsb - offset_fsb;
1222 else
1223 imap.br_blockcount = data_fsb - offset_fsb;
1224 imap.br_startoff = offset_fsb;
1225 imap.br_startblock = HOLESTARTBLOCK;
1226 imap.br_state = XFS_EXT_NORM;
1227done:
1228 xfs_trim_extent(&imap, offset_fsb, end_fsb);
1229 error = xfs_bmbt_to_iomap(ip, iomap, &imap, 0);
1230out_unlock:
1231 xfs_iunlock(ip, lockmode);
1232 return error;
1233}
1234
1235const struct iomap_ops xfs_seek_iomap_ops = {
1236 .iomap_begin = xfs_seek_iomap_begin,
1237};
1238
1239static int
1240xfs_xattr_iomap_begin(
1241 struct inode *inode,
1242 loff_t offset,
1243 loff_t length,
1244 unsigned flags,
1245 struct iomap *iomap,
1246 struct iomap *srcmap)
1247{
1248 struct xfs_inode *ip = XFS_I(inode);
1249 struct xfs_mount *mp = ip->i_mount;
1250 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset);
1251 xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + length);
1252 struct xfs_bmbt_irec imap;
1253 int nimaps = 1, error = 0;
1254 unsigned lockmode;
1255
1256 if (XFS_FORCED_SHUTDOWN(mp))
1257 return -EIO;
1258
1259 lockmode = xfs_ilock_attr_map_shared(ip);
1260
1261 /* if there are no attribute fork or extents, return ENOENT */
1262 if (!XFS_IFORK_Q(ip) || !ip->i_afp->if_nextents) {
1263 error = -ENOENT;
1264 goto out_unlock;
1265 }
1266
1267 ASSERT(ip->i_afp->if_format != XFS_DINODE_FMT_LOCAL);
1268 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
1269 &nimaps, XFS_BMAPI_ATTRFORK);
1270out_unlock:
1271 xfs_iunlock(ip, lockmode);
1272
1273 if (error)
1274 return error;
1275 ASSERT(nimaps);
1276 return xfs_bmbt_to_iomap(ip, iomap, &imap, 0);
1277}
1278
1279const struct iomap_ops xfs_xattr_iomap_ops = {
1280 .iomap_begin = xfs_xattr_iomap_begin,
1281};