Loading...
Note: File does not exist in v3.15.
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2014 Red Hat, Inc.
4 * All Rights Reserved.
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_bit.h"
13#include "xfs_mount.h"
14#include "xfs_sb.h"
15#include "xfs_defer.h"
16#include "xfs_btree.h"
17#include "xfs_trans.h"
18#include "xfs_alloc.h"
19#include "xfs_rmap.h"
20#include "xfs_rmap_btree.h"
21#include "xfs_trace.h"
22#include "xfs_errortag.h"
23#include "xfs_error.h"
24#include "xfs_inode.h"
25#include "xfs_ag.h"
26#include "xfs_health.h"
27#include "xfs_rmap_item.h"
28
29struct kmem_cache *xfs_rmap_intent_cache;
30
31/*
32 * Lookup the first record less than or equal to [bno, len, owner, offset]
33 * in the btree given by cur.
34 */
35int
36xfs_rmap_lookup_le(
37 struct xfs_btree_cur *cur,
38 xfs_agblock_t bno,
39 uint64_t owner,
40 uint64_t offset,
41 unsigned int flags,
42 struct xfs_rmap_irec *irec,
43 int *stat)
44{
45 int get_stat = 0;
46 int error;
47
48 cur->bc_rec.r.rm_startblock = bno;
49 cur->bc_rec.r.rm_blockcount = 0;
50 cur->bc_rec.r.rm_owner = owner;
51 cur->bc_rec.r.rm_offset = offset;
52 cur->bc_rec.r.rm_flags = flags;
53
54 error = xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
55 if (error || !(*stat) || !irec)
56 return error;
57
58 error = xfs_rmap_get_rec(cur, irec, &get_stat);
59 if (error)
60 return error;
61 if (!get_stat) {
62 xfs_btree_mark_sick(cur);
63 return -EFSCORRUPTED;
64 }
65
66 return 0;
67}
68
69/*
70 * Lookup the record exactly matching [bno, len, owner, offset]
71 * in the btree given by cur.
72 */
73int
74xfs_rmap_lookup_eq(
75 struct xfs_btree_cur *cur,
76 xfs_agblock_t bno,
77 xfs_extlen_t len,
78 uint64_t owner,
79 uint64_t offset,
80 unsigned int flags,
81 int *stat)
82{
83 cur->bc_rec.r.rm_startblock = bno;
84 cur->bc_rec.r.rm_blockcount = len;
85 cur->bc_rec.r.rm_owner = owner;
86 cur->bc_rec.r.rm_offset = offset;
87 cur->bc_rec.r.rm_flags = flags;
88 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
89}
90
91/*
92 * Update the record referred to by cur to the value given
93 * by [bno, len, owner, offset].
94 * This either works (return 0) or gets an EFSCORRUPTED error.
95 */
96STATIC int
97xfs_rmap_update(
98 struct xfs_btree_cur *cur,
99 struct xfs_rmap_irec *irec)
100{
101 union xfs_btree_rec rec;
102 int error;
103
104 trace_xfs_rmap_update(cur, irec->rm_startblock, irec->rm_blockcount,
105 irec->rm_owner, irec->rm_offset, irec->rm_flags);
106
107 rec.rmap.rm_startblock = cpu_to_be32(irec->rm_startblock);
108 rec.rmap.rm_blockcount = cpu_to_be32(irec->rm_blockcount);
109 rec.rmap.rm_owner = cpu_to_be64(irec->rm_owner);
110 rec.rmap.rm_offset = cpu_to_be64(
111 xfs_rmap_irec_offset_pack(irec));
112 error = xfs_btree_update(cur, &rec);
113 if (error)
114 trace_xfs_rmap_update_error(cur, error, _RET_IP_);
115 return error;
116}
117
118int
119xfs_rmap_insert(
120 struct xfs_btree_cur *rcur,
121 xfs_agblock_t agbno,
122 xfs_extlen_t len,
123 uint64_t owner,
124 uint64_t offset,
125 unsigned int flags)
126{
127 int i;
128 int error;
129
130 trace_xfs_rmap_insert(rcur, agbno, len, owner, offset, flags);
131
132 error = xfs_rmap_lookup_eq(rcur, agbno, len, owner, offset, flags, &i);
133 if (error)
134 goto done;
135 if (XFS_IS_CORRUPT(rcur->bc_mp, i != 0)) {
136 xfs_btree_mark_sick(rcur);
137 error = -EFSCORRUPTED;
138 goto done;
139 }
140
141 rcur->bc_rec.r.rm_startblock = agbno;
142 rcur->bc_rec.r.rm_blockcount = len;
143 rcur->bc_rec.r.rm_owner = owner;
144 rcur->bc_rec.r.rm_offset = offset;
145 rcur->bc_rec.r.rm_flags = flags;
146 error = xfs_btree_insert(rcur, &i);
147 if (error)
148 goto done;
149 if (XFS_IS_CORRUPT(rcur->bc_mp, i != 1)) {
150 xfs_btree_mark_sick(rcur);
151 error = -EFSCORRUPTED;
152 goto done;
153 }
154done:
155 if (error)
156 trace_xfs_rmap_insert_error(rcur, error, _RET_IP_);
157 return error;
158}
159
160STATIC int
161xfs_rmap_delete(
162 struct xfs_btree_cur *rcur,
163 xfs_agblock_t agbno,
164 xfs_extlen_t len,
165 uint64_t owner,
166 uint64_t offset,
167 unsigned int flags)
168{
169 int i;
170 int error;
171
172 trace_xfs_rmap_delete(rcur, agbno, len, owner, offset, flags);
173
174 error = xfs_rmap_lookup_eq(rcur, agbno, len, owner, offset, flags, &i);
175 if (error)
176 goto done;
177 if (XFS_IS_CORRUPT(rcur->bc_mp, i != 1)) {
178 xfs_btree_mark_sick(rcur);
179 error = -EFSCORRUPTED;
180 goto done;
181 }
182
183 error = xfs_btree_delete(rcur, &i);
184 if (error)
185 goto done;
186 if (XFS_IS_CORRUPT(rcur->bc_mp, i != 1)) {
187 xfs_btree_mark_sick(rcur);
188 error = -EFSCORRUPTED;
189 goto done;
190 }
191done:
192 if (error)
193 trace_xfs_rmap_delete_error(rcur, error, _RET_IP_);
194 return error;
195}
196
197/* Convert an internal btree record to an rmap record. */
198xfs_failaddr_t
199xfs_rmap_btrec_to_irec(
200 const union xfs_btree_rec *rec,
201 struct xfs_rmap_irec *irec)
202{
203 irec->rm_startblock = be32_to_cpu(rec->rmap.rm_startblock);
204 irec->rm_blockcount = be32_to_cpu(rec->rmap.rm_blockcount);
205 irec->rm_owner = be64_to_cpu(rec->rmap.rm_owner);
206 return xfs_rmap_irec_offset_unpack(be64_to_cpu(rec->rmap.rm_offset),
207 irec);
208}
209
210/* Simple checks for rmap records. */
211xfs_failaddr_t
212xfs_rmap_check_irec(
213 struct xfs_perag *pag,
214 const struct xfs_rmap_irec *irec)
215{
216 struct xfs_mount *mp = pag_mount(pag);
217 bool is_inode;
218 bool is_unwritten;
219 bool is_bmbt;
220 bool is_attr;
221
222 if (irec->rm_blockcount == 0)
223 return __this_address;
224 if (irec->rm_startblock <= XFS_AGFL_BLOCK(mp)) {
225 if (irec->rm_owner != XFS_RMAP_OWN_FS)
226 return __this_address;
227 if (irec->rm_blockcount != XFS_AGFL_BLOCK(mp) + 1)
228 return __this_address;
229 } else {
230 /* check for valid extent range, including overflow */
231 if (!xfs_verify_agbext(pag, irec->rm_startblock,
232 irec->rm_blockcount))
233 return __this_address;
234 }
235
236 if (!(xfs_verify_ino(mp, irec->rm_owner) ||
237 (irec->rm_owner <= XFS_RMAP_OWN_FS &&
238 irec->rm_owner >= XFS_RMAP_OWN_MIN)))
239 return __this_address;
240
241 /* Check flags. */
242 is_inode = !XFS_RMAP_NON_INODE_OWNER(irec->rm_owner);
243 is_bmbt = irec->rm_flags & XFS_RMAP_BMBT_BLOCK;
244 is_attr = irec->rm_flags & XFS_RMAP_ATTR_FORK;
245 is_unwritten = irec->rm_flags & XFS_RMAP_UNWRITTEN;
246
247 if (is_bmbt && irec->rm_offset != 0)
248 return __this_address;
249
250 if (!is_inode && irec->rm_offset != 0)
251 return __this_address;
252
253 if (is_unwritten && (is_bmbt || !is_inode || is_attr))
254 return __this_address;
255
256 if (!is_inode && (is_bmbt || is_unwritten || is_attr))
257 return __this_address;
258
259 /* Check for a valid fork offset, if applicable. */
260 if (is_inode && !is_bmbt &&
261 !xfs_verify_fileext(mp, irec->rm_offset, irec->rm_blockcount))
262 return __this_address;
263
264 return NULL;
265}
266
267static inline xfs_failaddr_t
268xfs_rmap_check_btrec(
269 struct xfs_btree_cur *cur,
270 const struct xfs_rmap_irec *irec)
271{
272 return xfs_rmap_check_irec(to_perag(cur->bc_group), irec);
273}
274
275static inline int
276xfs_rmap_complain_bad_rec(
277 struct xfs_btree_cur *cur,
278 xfs_failaddr_t fa,
279 const struct xfs_rmap_irec *irec)
280{
281 struct xfs_mount *mp = cur->bc_mp;
282
283 if (xfs_btree_is_mem_rmap(cur->bc_ops))
284 xfs_warn(mp,
285 "In-Memory Reverse Mapping BTree record corruption detected at %pS!", fa);
286 else
287 xfs_warn(mp,
288 "Reverse Mapping BTree record corruption in AG %d detected at %pS!",
289 cur->bc_group->xg_gno, fa);
290 xfs_warn(mp,
291 "Owner 0x%llx, flags 0x%x, start block 0x%x block count 0x%x",
292 irec->rm_owner, irec->rm_flags, irec->rm_startblock,
293 irec->rm_blockcount);
294 xfs_btree_mark_sick(cur);
295 return -EFSCORRUPTED;
296}
297
298/*
299 * Get the data from the pointed-to record.
300 */
301int
302xfs_rmap_get_rec(
303 struct xfs_btree_cur *cur,
304 struct xfs_rmap_irec *irec,
305 int *stat)
306{
307 union xfs_btree_rec *rec;
308 xfs_failaddr_t fa;
309 int error;
310
311 error = xfs_btree_get_rec(cur, &rec, stat);
312 if (error || !*stat)
313 return error;
314
315 fa = xfs_rmap_btrec_to_irec(rec, irec);
316 if (!fa)
317 fa = xfs_rmap_check_btrec(cur, irec);
318 if (fa)
319 return xfs_rmap_complain_bad_rec(cur, fa, irec);
320
321 return 0;
322}
323
324struct xfs_find_left_neighbor_info {
325 struct xfs_rmap_irec high;
326 struct xfs_rmap_irec *irec;
327};
328
329/* For each rmap given, figure out if it matches the key we want. */
330STATIC int
331xfs_rmap_find_left_neighbor_helper(
332 struct xfs_btree_cur *cur,
333 const struct xfs_rmap_irec *rec,
334 void *priv)
335{
336 struct xfs_find_left_neighbor_info *info = priv;
337
338 trace_xfs_rmap_find_left_neighbor_candidate(cur, rec->rm_startblock,
339 rec->rm_blockcount, rec->rm_owner, rec->rm_offset,
340 rec->rm_flags);
341
342 if (rec->rm_owner != info->high.rm_owner)
343 return 0;
344 if (!XFS_RMAP_NON_INODE_OWNER(rec->rm_owner) &&
345 !(rec->rm_flags & XFS_RMAP_BMBT_BLOCK) &&
346 rec->rm_offset + rec->rm_blockcount - 1 != info->high.rm_offset)
347 return 0;
348
349 *info->irec = *rec;
350 return -ECANCELED;
351}
352
353/*
354 * Find the record to the left of the given extent, being careful only to
355 * return a match with the same owner and adjacent physical and logical
356 * block ranges.
357 */
358STATIC int
359xfs_rmap_find_left_neighbor(
360 struct xfs_btree_cur *cur,
361 xfs_agblock_t bno,
362 uint64_t owner,
363 uint64_t offset,
364 unsigned int flags,
365 struct xfs_rmap_irec *irec,
366 int *stat)
367{
368 struct xfs_find_left_neighbor_info info;
369 int found = 0;
370 int error;
371
372 *stat = 0;
373 if (bno == 0)
374 return 0;
375 info.high.rm_startblock = bno - 1;
376 info.high.rm_owner = owner;
377 if (!XFS_RMAP_NON_INODE_OWNER(owner) &&
378 !(flags & XFS_RMAP_BMBT_BLOCK)) {
379 if (offset == 0)
380 return 0;
381 info.high.rm_offset = offset - 1;
382 } else
383 info.high.rm_offset = 0;
384 info.high.rm_flags = flags;
385 info.high.rm_blockcount = 0;
386 info.irec = irec;
387
388 trace_xfs_rmap_find_left_neighbor_query(cur, bno, 0, owner, offset,
389 flags);
390
391 /*
392 * Historically, we always used the range query to walk every reverse
393 * mapping that could possibly overlap the key that the caller asked
394 * for, and filter out the ones that don't. That is very slow when
395 * there are a lot of records.
396 *
397 * However, there are two scenarios where the classic btree search can
398 * produce correct results -- if the index contains a record that is an
399 * exact match for the lookup key; and if there are no other records
400 * between the record we want and the key we supplied.
401 *
402 * As an optimization, try a non-overlapped lookup first. This makes
403 * extent conversion and remap operations run a bit faster if the
404 * physical extents aren't being shared. If we don't find what we
405 * want, we fall back to the overlapped query.
406 */
407 error = xfs_rmap_lookup_le(cur, bno, owner, offset, flags, irec,
408 &found);
409 if (error)
410 return error;
411 if (found)
412 error = xfs_rmap_find_left_neighbor_helper(cur, irec, &info);
413 if (!error)
414 error = xfs_rmap_query_range(cur, &info.high, &info.high,
415 xfs_rmap_find_left_neighbor_helper, &info);
416 if (error != -ECANCELED)
417 return error;
418
419 *stat = 1;
420 trace_xfs_rmap_find_left_neighbor_result(cur, irec->rm_startblock,
421 irec->rm_blockcount, irec->rm_owner, irec->rm_offset,
422 irec->rm_flags);
423 return 0;
424}
425
426/* For each rmap given, figure out if it matches the key we want. */
427STATIC int
428xfs_rmap_lookup_le_range_helper(
429 struct xfs_btree_cur *cur,
430 const struct xfs_rmap_irec *rec,
431 void *priv)
432{
433 struct xfs_find_left_neighbor_info *info = priv;
434
435 trace_xfs_rmap_lookup_le_range_candidate(cur, rec->rm_startblock,
436 rec->rm_blockcount, rec->rm_owner, rec->rm_offset,
437 rec->rm_flags);
438
439 if (rec->rm_owner != info->high.rm_owner)
440 return 0;
441 if (!XFS_RMAP_NON_INODE_OWNER(rec->rm_owner) &&
442 !(rec->rm_flags & XFS_RMAP_BMBT_BLOCK) &&
443 (rec->rm_offset > info->high.rm_offset ||
444 rec->rm_offset + rec->rm_blockcount <= info->high.rm_offset))
445 return 0;
446
447 *info->irec = *rec;
448 return -ECANCELED;
449}
450
451/*
452 * Find the record to the left of the given extent, being careful only to
453 * return a match with the same owner and overlapping physical and logical
454 * block ranges. This is the overlapping-interval version of
455 * xfs_rmap_lookup_le.
456 */
457int
458xfs_rmap_lookup_le_range(
459 struct xfs_btree_cur *cur,
460 xfs_agblock_t bno,
461 uint64_t owner,
462 uint64_t offset,
463 unsigned int flags,
464 struct xfs_rmap_irec *irec,
465 int *stat)
466{
467 struct xfs_find_left_neighbor_info info;
468 int found = 0;
469 int error;
470
471 info.high.rm_startblock = bno;
472 info.high.rm_owner = owner;
473 if (!XFS_RMAP_NON_INODE_OWNER(owner) && !(flags & XFS_RMAP_BMBT_BLOCK))
474 info.high.rm_offset = offset;
475 else
476 info.high.rm_offset = 0;
477 info.high.rm_flags = flags;
478 info.high.rm_blockcount = 0;
479 *stat = 0;
480 info.irec = irec;
481
482 trace_xfs_rmap_lookup_le_range(cur, bno, 0, owner, offset, flags);
483
484 /*
485 * Historically, we always used the range query to walk every reverse
486 * mapping that could possibly overlap the key that the caller asked
487 * for, and filter out the ones that don't. That is very slow when
488 * there are a lot of records.
489 *
490 * However, there are two scenarios where the classic btree search can
491 * produce correct results -- if the index contains a record that is an
492 * exact match for the lookup key; and if there are no other records
493 * between the record we want and the key we supplied.
494 *
495 * As an optimization, try a non-overlapped lookup first. This makes
496 * scrub run much faster on most filesystems because bmbt records are
497 * usually an exact match for rmap records. If we don't find what we
498 * want, we fall back to the overlapped query.
499 */
500 error = xfs_rmap_lookup_le(cur, bno, owner, offset, flags, irec,
501 &found);
502 if (error)
503 return error;
504 if (found)
505 error = xfs_rmap_lookup_le_range_helper(cur, irec, &info);
506 if (!error)
507 error = xfs_rmap_query_range(cur, &info.high, &info.high,
508 xfs_rmap_lookup_le_range_helper, &info);
509 if (error != -ECANCELED)
510 return error;
511
512 *stat = 1;
513 trace_xfs_rmap_lookup_le_range_result(cur, irec->rm_startblock,
514 irec->rm_blockcount, irec->rm_owner, irec->rm_offset,
515 irec->rm_flags);
516 return 0;
517}
518
519/*
520 * Perform all the relevant owner checks for a removal op. If we're doing an
521 * unknown-owner removal then we have no owner information to check.
522 */
523static int
524xfs_rmap_free_check_owner(
525 struct xfs_btree_cur *cur,
526 uint64_t ltoff,
527 struct xfs_rmap_irec *rec,
528 xfs_filblks_t len,
529 uint64_t owner,
530 uint64_t offset,
531 unsigned int flags)
532{
533 struct xfs_mount *mp = cur->bc_mp;
534 int error = 0;
535
536 if (owner == XFS_RMAP_OWN_UNKNOWN)
537 return 0;
538
539 /* Make sure the unwritten flag matches. */
540 if (XFS_IS_CORRUPT(mp,
541 (flags & XFS_RMAP_UNWRITTEN) !=
542 (rec->rm_flags & XFS_RMAP_UNWRITTEN))) {
543 xfs_btree_mark_sick(cur);
544 error = -EFSCORRUPTED;
545 goto out;
546 }
547
548 /* Make sure the owner matches what we expect to find in the tree. */
549 if (XFS_IS_CORRUPT(mp, owner != rec->rm_owner)) {
550 xfs_btree_mark_sick(cur);
551 error = -EFSCORRUPTED;
552 goto out;
553 }
554
555 /* Check the offset, if necessary. */
556 if (XFS_RMAP_NON_INODE_OWNER(owner))
557 goto out;
558
559 if (flags & XFS_RMAP_BMBT_BLOCK) {
560 if (XFS_IS_CORRUPT(mp,
561 !(rec->rm_flags & XFS_RMAP_BMBT_BLOCK))) {
562 xfs_btree_mark_sick(cur);
563 error = -EFSCORRUPTED;
564 goto out;
565 }
566 } else {
567 if (XFS_IS_CORRUPT(mp, rec->rm_offset > offset)) {
568 xfs_btree_mark_sick(cur);
569 error = -EFSCORRUPTED;
570 goto out;
571 }
572 if (XFS_IS_CORRUPT(mp,
573 offset + len > ltoff + rec->rm_blockcount)) {
574 xfs_btree_mark_sick(cur);
575 error = -EFSCORRUPTED;
576 goto out;
577 }
578 }
579
580out:
581 return error;
582}
583
584/*
585 * Find the extent in the rmap btree and remove it.
586 *
587 * The record we find should always be an exact match for the extent that we're
588 * looking for, since we insert them into the btree without modification.
589 *
590 * Special Case #1: when growing the filesystem, we "free" an extent when
591 * growing the last AG. This extent is new space and so it is not tracked as
592 * used space in the btree. The growfs code will pass in an owner of
593 * XFS_RMAP_OWN_NULL to indicate that it expected that there is no owner of this
594 * extent. We verify that - the extent lookup result in a record that does not
595 * overlap.
596 *
597 * Special Case #2: EFIs do not record the owner of the extent, so when
598 * recovering EFIs from the log we pass in XFS_RMAP_OWN_UNKNOWN to tell the rmap
599 * btree to ignore the owner (i.e. wildcard match) so we don't trigger
600 * corruption checks during log recovery.
601 */
602STATIC int
603xfs_rmap_unmap(
604 struct xfs_btree_cur *cur,
605 xfs_agblock_t bno,
606 xfs_extlen_t len,
607 bool unwritten,
608 const struct xfs_owner_info *oinfo)
609{
610 struct xfs_mount *mp = cur->bc_mp;
611 struct xfs_rmap_irec ltrec;
612 uint64_t ltoff;
613 int error = 0;
614 int i;
615 uint64_t owner;
616 uint64_t offset;
617 unsigned int flags;
618 bool ignore_off;
619
620 xfs_owner_info_unpack(oinfo, &owner, &offset, &flags);
621 ignore_off = XFS_RMAP_NON_INODE_OWNER(owner) ||
622 (flags & XFS_RMAP_BMBT_BLOCK);
623 if (unwritten)
624 flags |= XFS_RMAP_UNWRITTEN;
625 trace_xfs_rmap_unmap(cur, bno, len, unwritten, oinfo);
626
627 /*
628 * We should always have a left record because there's a static record
629 * for the AG headers at rm_startblock == 0 created by mkfs/growfs that
630 * will not ever be removed from the tree.
631 */
632 error = xfs_rmap_lookup_le(cur, bno, owner, offset, flags, <rec, &i);
633 if (error)
634 goto out_error;
635 if (XFS_IS_CORRUPT(mp, i != 1)) {
636 xfs_btree_mark_sick(cur);
637 error = -EFSCORRUPTED;
638 goto out_error;
639 }
640
641 trace_xfs_rmap_lookup_le_range_result(cur, ltrec.rm_startblock,
642 ltrec.rm_blockcount, ltrec.rm_owner, ltrec.rm_offset,
643 ltrec.rm_flags);
644 ltoff = ltrec.rm_offset;
645
646 /*
647 * For growfs, the incoming extent must be beyond the left record we
648 * just found as it is new space and won't be used by anyone. This is
649 * just a corruption check as we don't actually do anything with this
650 * extent. Note that we need to use >= instead of > because it might
651 * be the case that the "left" extent goes all the way to EOFS.
652 */
653 if (owner == XFS_RMAP_OWN_NULL) {
654 if (XFS_IS_CORRUPT(mp,
655 bno <
656 ltrec.rm_startblock + ltrec.rm_blockcount)) {
657 xfs_btree_mark_sick(cur);
658 error = -EFSCORRUPTED;
659 goto out_error;
660 }
661 goto out_done;
662 }
663
664 /*
665 * If we're doing an unknown-owner removal for EFI recovery, we expect
666 * to find the full range in the rmapbt or nothing at all. If we
667 * don't find any rmaps overlapping either end of the range, we're
668 * done. Hopefully this means that the EFI creator already queued
669 * (and finished) a RUI to remove the rmap.
670 */
671 if (owner == XFS_RMAP_OWN_UNKNOWN &&
672 ltrec.rm_startblock + ltrec.rm_blockcount <= bno) {
673 struct xfs_rmap_irec rtrec;
674
675 error = xfs_btree_increment(cur, 0, &i);
676 if (error)
677 goto out_error;
678 if (i == 0)
679 goto out_done;
680 error = xfs_rmap_get_rec(cur, &rtrec, &i);
681 if (error)
682 goto out_error;
683 if (XFS_IS_CORRUPT(mp, i != 1)) {
684 xfs_btree_mark_sick(cur);
685 error = -EFSCORRUPTED;
686 goto out_error;
687 }
688 if (rtrec.rm_startblock >= bno + len)
689 goto out_done;
690 }
691
692 /* Make sure the extent we found covers the entire freeing range. */
693 if (XFS_IS_CORRUPT(mp,
694 ltrec.rm_startblock > bno ||
695 ltrec.rm_startblock + ltrec.rm_blockcount <
696 bno + len)) {
697 xfs_btree_mark_sick(cur);
698 error = -EFSCORRUPTED;
699 goto out_error;
700 }
701
702 /* Check owner information. */
703 error = xfs_rmap_free_check_owner(cur, ltoff, <rec, len, owner,
704 offset, flags);
705 if (error)
706 goto out_error;
707
708 if (ltrec.rm_startblock == bno && ltrec.rm_blockcount == len) {
709 /* exact match, simply remove the record from rmap tree */
710 trace_xfs_rmap_delete(cur, ltrec.rm_startblock,
711 ltrec.rm_blockcount, ltrec.rm_owner,
712 ltrec.rm_offset, ltrec.rm_flags);
713 error = xfs_btree_delete(cur, &i);
714 if (error)
715 goto out_error;
716 if (XFS_IS_CORRUPT(mp, i != 1)) {
717 xfs_btree_mark_sick(cur);
718 error = -EFSCORRUPTED;
719 goto out_error;
720 }
721 } else if (ltrec.rm_startblock == bno) {
722 /*
723 * overlap left hand side of extent: move the start, trim the
724 * length and update the current record.
725 *
726 * ltbno ltlen
727 * Orig: |oooooooooooooooooooo|
728 * Freeing: |fffffffff|
729 * Result: |rrrrrrrrrr|
730 * bno len
731 */
732 ltrec.rm_startblock += len;
733 ltrec.rm_blockcount -= len;
734 if (!ignore_off)
735 ltrec.rm_offset += len;
736 error = xfs_rmap_update(cur, <rec);
737 if (error)
738 goto out_error;
739 } else if (ltrec.rm_startblock + ltrec.rm_blockcount == bno + len) {
740 /*
741 * overlap right hand side of extent: trim the length and update
742 * the current record.
743 *
744 * ltbno ltlen
745 * Orig: |oooooooooooooooooooo|
746 * Freeing: |fffffffff|
747 * Result: |rrrrrrrrrr|
748 * bno len
749 */
750 ltrec.rm_blockcount -= len;
751 error = xfs_rmap_update(cur, <rec);
752 if (error)
753 goto out_error;
754 } else {
755
756 /*
757 * overlap middle of extent: trim the length of the existing
758 * record to the length of the new left-extent size, increment
759 * the insertion position so we can insert a new record
760 * containing the remaining right-extent space.
761 *
762 * ltbno ltlen
763 * Orig: |oooooooooooooooooooo|
764 * Freeing: |fffffffff|
765 * Result: |rrrrr| |rrrr|
766 * bno len
767 */
768 xfs_extlen_t orig_len = ltrec.rm_blockcount;
769
770 ltrec.rm_blockcount = bno - ltrec.rm_startblock;
771 error = xfs_rmap_update(cur, <rec);
772 if (error)
773 goto out_error;
774
775 error = xfs_btree_increment(cur, 0, &i);
776 if (error)
777 goto out_error;
778
779 cur->bc_rec.r.rm_startblock = bno + len;
780 cur->bc_rec.r.rm_blockcount = orig_len - len -
781 ltrec.rm_blockcount;
782 cur->bc_rec.r.rm_owner = ltrec.rm_owner;
783 if (ignore_off)
784 cur->bc_rec.r.rm_offset = 0;
785 else
786 cur->bc_rec.r.rm_offset = offset + len;
787 cur->bc_rec.r.rm_flags = flags;
788 trace_xfs_rmap_insert(cur, cur->bc_rec.r.rm_startblock,
789 cur->bc_rec.r.rm_blockcount,
790 cur->bc_rec.r.rm_owner,
791 cur->bc_rec.r.rm_offset,
792 cur->bc_rec.r.rm_flags);
793 error = xfs_btree_insert(cur, &i);
794 if (error)
795 goto out_error;
796 }
797
798out_done:
799 trace_xfs_rmap_unmap_done(cur, bno, len, unwritten, oinfo);
800out_error:
801 if (error)
802 trace_xfs_rmap_unmap_error(cur, error, _RET_IP_);
803 return error;
804}
805
806#ifdef CONFIG_XFS_LIVE_HOOKS
807/*
808 * Use a static key here to reduce the overhead of rmapbt live updates. If
809 * the compiler supports jump labels, the static branch will be replaced by a
810 * nop sled when there are no hook users. Online fsck is currently the only
811 * caller, so this is a reasonable tradeoff.
812 *
813 * Note: Patching the kernel code requires taking the cpu hotplug lock. Other
814 * parts of the kernel allocate memory with that lock held, which means that
815 * XFS callers cannot hold any locks that might be used by memory reclaim or
816 * writeback when calling the static_branch_{inc,dec} functions.
817 */
818DEFINE_STATIC_XFS_HOOK_SWITCH(xfs_rmap_hooks_switch);
819
820void
821xfs_rmap_hook_disable(void)
822{
823 xfs_hooks_switch_off(&xfs_rmap_hooks_switch);
824}
825
826void
827xfs_rmap_hook_enable(void)
828{
829 xfs_hooks_switch_on(&xfs_rmap_hooks_switch);
830}
831
832/* Call downstream hooks for a reverse mapping update. */
833static inline void
834xfs_rmap_update_hook(
835 struct xfs_trans *tp,
836 struct xfs_group *xg,
837 enum xfs_rmap_intent_type op,
838 xfs_agblock_t startblock,
839 xfs_extlen_t blockcount,
840 bool unwritten,
841 const struct xfs_owner_info *oinfo)
842{
843 if (xfs_hooks_switched_on(&xfs_rmap_hooks_switch)) {
844 struct xfs_rmap_update_params p = {
845 .startblock = startblock,
846 .blockcount = blockcount,
847 .unwritten = unwritten,
848 .oinfo = *oinfo, /* struct copy */
849 };
850
851 if (xg)
852 xfs_hooks_call(&xg->xg_rmap_update_hooks, op, &p);
853 }
854}
855
856/* Call the specified function during a reverse mapping update. */
857int
858xfs_rmap_hook_add(
859 struct xfs_group *xg,
860 struct xfs_rmap_hook *hook)
861{
862 return xfs_hooks_add(&xg->xg_rmap_update_hooks, &hook->rmap_hook);
863}
864
865/* Stop calling the specified function during a reverse mapping update. */
866void
867xfs_rmap_hook_del(
868 struct xfs_group *xg,
869 struct xfs_rmap_hook *hook)
870{
871 xfs_hooks_del(&xg->xg_rmap_update_hooks, &hook->rmap_hook);
872}
873
874/* Configure rmap update hook functions. */
875void
876xfs_rmap_hook_setup(
877 struct xfs_rmap_hook *hook,
878 notifier_fn_t mod_fn)
879{
880 xfs_hook_setup(&hook->rmap_hook, mod_fn);
881}
882#else
883# define xfs_rmap_update_hook(t, p, o, s, b, u, oi) do { } while (0)
884#endif /* CONFIG_XFS_LIVE_HOOKS */
885
886/*
887 * Remove a reference to an extent in the rmap btree.
888 */
889int
890xfs_rmap_free(
891 struct xfs_trans *tp,
892 struct xfs_buf *agbp,
893 struct xfs_perag *pag,
894 xfs_agblock_t bno,
895 xfs_extlen_t len,
896 const struct xfs_owner_info *oinfo)
897{
898 struct xfs_mount *mp = tp->t_mountp;
899 struct xfs_btree_cur *cur;
900 int error;
901
902 if (!xfs_has_rmapbt(mp))
903 return 0;
904
905 cur = xfs_rmapbt_init_cursor(mp, tp, agbp, pag);
906 xfs_rmap_update_hook(tp, pag_group(pag), XFS_RMAP_UNMAP, bno, len,
907 false, oinfo);
908 error = xfs_rmap_unmap(cur, bno, len, false, oinfo);
909
910 xfs_btree_del_cursor(cur, error);
911 return error;
912}
913
914/*
915 * A mergeable rmap must have the same owner and the same values for
916 * the unwritten, attr_fork, and bmbt flags. The startblock and
917 * offset are checked separately.
918 */
919static bool
920xfs_rmap_is_mergeable(
921 struct xfs_rmap_irec *irec,
922 uint64_t owner,
923 unsigned int flags)
924{
925 if (irec->rm_owner == XFS_RMAP_OWN_NULL)
926 return false;
927 if (irec->rm_owner != owner)
928 return false;
929 if ((flags & XFS_RMAP_UNWRITTEN) ^
930 (irec->rm_flags & XFS_RMAP_UNWRITTEN))
931 return false;
932 if ((flags & XFS_RMAP_ATTR_FORK) ^
933 (irec->rm_flags & XFS_RMAP_ATTR_FORK))
934 return false;
935 if ((flags & XFS_RMAP_BMBT_BLOCK) ^
936 (irec->rm_flags & XFS_RMAP_BMBT_BLOCK))
937 return false;
938 return true;
939}
940
941/*
942 * When we allocate a new block, the first thing we do is add a reference to
943 * the extent in the rmap btree. This takes the form of a [agbno, length,
944 * owner, offset] record. Flags are encoded in the high bits of the offset
945 * field.
946 */
947STATIC int
948xfs_rmap_map(
949 struct xfs_btree_cur *cur,
950 xfs_agblock_t bno,
951 xfs_extlen_t len,
952 bool unwritten,
953 const struct xfs_owner_info *oinfo)
954{
955 struct xfs_mount *mp = cur->bc_mp;
956 struct xfs_rmap_irec ltrec;
957 struct xfs_rmap_irec gtrec;
958 int have_gt;
959 int have_lt;
960 int error = 0;
961 int i;
962 uint64_t owner;
963 uint64_t offset;
964 unsigned int flags = 0;
965 bool ignore_off;
966
967 xfs_owner_info_unpack(oinfo, &owner, &offset, &flags);
968 ASSERT(owner != 0);
969 ignore_off = XFS_RMAP_NON_INODE_OWNER(owner) ||
970 (flags & XFS_RMAP_BMBT_BLOCK);
971 if (unwritten)
972 flags |= XFS_RMAP_UNWRITTEN;
973 trace_xfs_rmap_map(cur, bno, len, unwritten, oinfo);
974 ASSERT(!xfs_rmap_should_skip_owner_update(oinfo));
975
976 /*
977 * For the initial lookup, look for an exact match or the left-adjacent
978 * record for our insertion point. This will also give us the record for
979 * start block contiguity tests.
980 */
981 error = xfs_rmap_lookup_le(cur, bno, owner, offset, flags, <rec,
982 &have_lt);
983 if (error)
984 goto out_error;
985 if (have_lt) {
986 trace_xfs_rmap_lookup_le_range_result(cur, ltrec.rm_startblock,
987 ltrec.rm_blockcount, ltrec.rm_owner,
988 ltrec.rm_offset, ltrec.rm_flags);
989
990 if (!xfs_rmap_is_mergeable(<rec, owner, flags))
991 have_lt = 0;
992 }
993
994 if (XFS_IS_CORRUPT(mp,
995 have_lt != 0 &&
996 ltrec.rm_startblock + ltrec.rm_blockcount > bno)) {
997 xfs_btree_mark_sick(cur);
998 error = -EFSCORRUPTED;
999 goto out_error;
1000 }
1001
1002 /*
1003 * Increment the cursor to see if we have a right-adjacent record to our
1004 * insertion point. This will give us the record for end block
1005 * contiguity tests.
1006 */
1007 error = xfs_btree_increment(cur, 0, &have_gt);
1008 if (error)
1009 goto out_error;
1010 if (have_gt) {
1011 error = xfs_rmap_get_rec(cur, >rec, &have_gt);
1012 if (error)
1013 goto out_error;
1014 if (XFS_IS_CORRUPT(mp, have_gt != 1)) {
1015 xfs_btree_mark_sick(cur);
1016 error = -EFSCORRUPTED;
1017 goto out_error;
1018 }
1019 if (XFS_IS_CORRUPT(mp, bno + len > gtrec.rm_startblock)) {
1020 xfs_btree_mark_sick(cur);
1021 error = -EFSCORRUPTED;
1022 goto out_error;
1023 }
1024 trace_xfs_rmap_find_right_neighbor_result(cur,
1025 gtrec.rm_startblock, gtrec.rm_blockcount,
1026 gtrec.rm_owner, gtrec.rm_offset,
1027 gtrec.rm_flags);
1028 if (!xfs_rmap_is_mergeable(>rec, owner, flags))
1029 have_gt = 0;
1030 }
1031
1032 /*
1033 * Note: cursor currently points one record to the right of ltrec, even
1034 * if there is no record in the tree to the right.
1035 */
1036 if (have_lt &&
1037 ltrec.rm_startblock + ltrec.rm_blockcount == bno &&
1038 (ignore_off || ltrec.rm_offset + ltrec.rm_blockcount == offset)) {
1039 /*
1040 * left edge contiguous, merge into left record.
1041 *
1042 * ltbno ltlen
1043 * orig: |ooooooooo|
1044 * adding: |aaaaaaaaa|
1045 * result: |rrrrrrrrrrrrrrrrrrr|
1046 * bno len
1047 */
1048 ltrec.rm_blockcount += len;
1049 if (have_gt &&
1050 bno + len == gtrec.rm_startblock &&
1051 (ignore_off || offset + len == gtrec.rm_offset) &&
1052 (unsigned long)ltrec.rm_blockcount + len +
1053 gtrec.rm_blockcount <= XFS_RMAP_LEN_MAX) {
1054 /*
1055 * right edge also contiguous, delete right record
1056 * and merge into left record.
1057 *
1058 * ltbno ltlen gtbno gtlen
1059 * orig: |ooooooooo| |ooooooooo|
1060 * adding: |aaaaaaaaa|
1061 * result: |rrrrrrrrrrrrrrrrrrrrrrrrrrrrr|
1062 */
1063 ltrec.rm_blockcount += gtrec.rm_blockcount;
1064 trace_xfs_rmap_delete(cur, gtrec.rm_startblock,
1065 gtrec.rm_blockcount, gtrec.rm_owner,
1066 gtrec.rm_offset, gtrec.rm_flags);
1067 error = xfs_btree_delete(cur, &i);
1068 if (error)
1069 goto out_error;
1070 if (XFS_IS_CORRUPT(mp, i != 1)) {
1071 xfs_btree_mark_sick(cur);
1072 error = -EFSCORRUPTED;
1073 goto out_error;
1074 }
1075 }
1076
1077 /* point the cursor back to the left record and update */
1078 error = xfs_btree_decrement(cur, 0, &have_gt);
1079 if (error)
1080 goto out_error;
1081 error = xfs_rmap_update(cur, <rec);
1082 if (error)
1083 goto out_error;
1084 } else if (have_gt &&
1085 bno + len == gtrec.rm_startblock &&
1086 (ignore_off || offset + len == gtrec.rm_offset)) {
1087 /*
1088 * right edge contiguous, merge into right record.
1089 *
1090 * gtbno gtlen
1091 * Orig: |ooooooooo|
1092 * adding: |aaaaaaaaa|
1093 * Result: |rrrrrrrrrrrrrrrrrrr|
1094 * bno len
1095 */
1096 gtrec.rm_startblock = bno;
1097 gtrec.rm_blockcount += len;
1098 if (!ignore_off)
1099 gtrec.rm_offset = offset;
1100 error = xfs_rmap_update(cur, >rec);
1101 if (error)
1102 goto out_error;
1103 } else {
1104 /*
1105 * no contiguous edge with identical owner, insert
1106 * new record at current cursor position.
1107 */
1108 cur->bc_rec.r.rm_startblock = bno;
1109 cur->bc_rec.r.rm_blockcount = len;
1110 cur->bc_rec.r.rm_owner = owner;
1111 cur->bc_rec.r.rm_offset = offset;
1112 cur->bc_rec.r.rm_flags = flags;
1113 trace_xfs_rmap_insert(cur, bno, len, owner, offset, flags);
1114 error = xfs_btree_insert(cur, &i);
1115 if (error)
1116 goto out_error;
1117 if (XFS_IS_CORRUPT(mp, i != 1)) {
1118 xfs_btree_mark_sick(cur);
1119 error = -EFSCORRUPTED;
1120 goto out_error;
1121 }
1122 }
1123
1124 trace_xfs_rmap_map_done(cur, bno, len, unwritten, oinfo);
1125out_error:
1126 if (error)
1127 trace_xfs_rmap_map_error(cur, error, _RET_IP_);
1128 return error;
1129}
1130
1131/*
1132 * Add a reference to an extent in the rmap btree.
1133 */
1134int
1135xfs_rmap_alloc(
1136 struct xfs_trans *tp,
1137 struct xfs_buf *agbp,
1138 struct xfs_perag *pag,
1139 xfs_agblock_t bno,
1140 xfs_extlen_t len,
1141 const struct xfs_owner_info *oinfo)
1142{
1143 struct xfs_mount *mp = tp->t_mountp;
1144 struct xfs_btree_cur *cur;
1145 int error;
1146
1147 if (!xfs_has_rmapbt(mp))
1148 return 0;
1149
1150 cur = xfs_rmapbt_init_cursor(mp, tp, agbp, pag);
1151 xfs_rmap_update_hook(tp, pag_group(pag), XFS_RMAP_MAP, bno, len, false,
1152 oinfo);
1153 error = xfs_rmap_map(cur, bno, len, false, oinfo);
1154
1155 xfs_btree_del_cursor(cur, error);
1156 return error;
1157}
1158
1159#define RMAP_LEFT_CONTIG (1 << 0)
1160#define RMAP_RIGHT_CONTIG (1 << 1)
1161#define RMAP_LEFT_FILLING (1 << 2)
1162#define RMAP_RIGHT_FILLING (1 << 3)
1163#define RMAP_LEFT_VALID (1 << 6)
1164#define RMAP_RIGHT_VALID (1 << 7)
1165
1166#define LEFT r[0]
1167#define RIGHT r[1]
1168#define PREV r[2]
1169#define NEW r[3]
1170
1171/*
1172 * Convert an unwritten extent to a real extent or vice versa.
1173 * Does not handle overlapping extents.
1174 */
1175STATIC int
1176xfs_rmap_convert(
1177 struct xfs_btree_cur *cur,
1178 xfs_agblock_t bno,
1179 xfs_extlen_t len,
1180 bool unwritten,
1181 const struct xfs_owner_info *oinfo)
1182{
1183 struct xfs_mount *mp = cur->bc_mp;
1184 struct xfs_rmap_irec r[4]; /* neighbor extent entries */
1185 /* left is 0, right is 1, */
1186 /* prev is 2, new is 3 */
1187 uint64_t owner;
1188 uint64_t offset;
1189 uint64_t new_endoff;
1190 unsigned int oldext;
1191 unsigned int newext;
1192 unsigned int flags = 0;
1193 int i;
1194 int state = 0;
1195 int error;
1196
1197 xfs_owner_info_unpack(oinfo, &owner, &offset, &flags);
1198 ASSERT(!(XFS_RMAP_NON_INODE_OWNER(owner) ||
1199 (flags & (XFS_RMAP_ATTR_FORK | XFS_RMAP_BMBT_BLOCK))));
1200 oldext = unwritten ? XFS_RMAP_UNWRITTEN : 0;
1201 new_endoff = offset + len;
1202 trace_xfs_rmap_convert(cur, bno, len, unwritten, oinfo);
1203
1204 /*
1205 * For the initial lookup, look for an exact match or the left-adjacent
1206 * record for our insertion point. This will also give us the record for
1207 * start block contiguity tests.
1208 */
1209 error = xfs_rmap_lookup_le(cur, bno, owner, offset, oldext, &PREV, &i);
1210 if (error)
1211 goto done;
1212 if (XFS_IS_CORRUPT(mp, i != 1)) {
1213 xfs_btree_mark_sick(cur);
1214 error = -EFSCORRUPTED;
1215 goto done;
1216 }
1217
1218 trace_xfs_rmap_lookup_le_range_result(cur, PREV.rm_startblock,
1219 PREV.rm_blockcount, PREV.rm_owner, PREV.rm_offset,
1220 PREV.rm_flags);
1221
1222 ASSERT(PREV.rm_offset <= offset);
1223 ASSERT(PREV.rm_offset + PREV.rm_blockcount >= new_endoff);
1224 ASSERT((PREV.rm_flags & XFS_RMAP_UNWRITTEN) == oldext);
1225 newext = ~oldext & XFS_RMAP_UNWRITTEN;
1226
1227 /*
1228 * Set flags determining what part of the previous oldext allocation
1229 * extent is being replaced by a newext allocation.
1230 */
1231 if (PREV.rm_offset == offset)
1232 state |= RMAP_LEFT_FILLING;
1233 if (PREV.rm_offset + PREV.rm_blockcount == new_endoff)
1234 state |= RMAP_RIGHT_FILLING;
1235
1236 /*
1237 * Decrement the cursor to see if we have a left-adjacent record to our
1238 * insertion point. This will give us the record for end block
1239 * contiguity tests.
1240 */
1241 error = xfs_btree_decrement(cur, 0, &i);
1242 if (error)
1243 goto done;
1244 if (i) {
1245 state |= RMAP_LEFT_VALID;
1246 error = xfs_rmap_get_rec(cur, &LEFT, &i);
1247 if (error)
1248 goto done;
1249 if (XFS_IS_CORRUPT(mp, i != 1)) {
1250 xfs_btree_mark_sick(cur);
1251 error = -EFSCORRUPTED;
1252 goto done;
1253 }
1254 if (XFS_IS_CORRUPT(mp,
1255 LEFT.rm_startblock + LEFT.rm_blockcount >
1256 bno)) {
1257 xfs_btree_mark_sick(cur);
1258 error = -EFSCORRUPTED;
1259 goto done;
1260 }
1261 trace_xfs_rmap_find_left_neighbor_result(cur,
1262 LEFT.rm_startblock, LEFT.rm_blockcount,
1263 LEFT.rm_owner, LEFT.rm_offset, LEFT.rm_flags);
1264 if (LEFT.rm_startblock + LEFT.rm_blockcount == bno &&
1265 LEFT.rm_offset + LEFT.rm_blockcount == offset &&
1266 xfs_rmap_is_mergeable(&LEFT, owner, newext))
1267 state |= RMAP_LEFT_CONTIG;
1268 }
1269
1270 /*
1271 * Increment the cursor to see if we have a right-adjacent record to our
1272 * insertion point. This will give us the record for end block
1273 * contiguity tests.
1274 */
1275 error = xfs_btree_increment(cur, 0, &i);
1276 if (error)
1277 goto done;
1278 if (XFS_IS_CORRUPT(mp, i != 1)) {
1279 xfs_btree_mark_sick(cur);
1280 error = -EFSCORRUPTED;
1281 goto done;
1282 }
1283 error = xfs_btree_increment(cur, 0, &i);
1284 if (error)
1285 goto done;
1286 if (i) {
1287 state |= RMAP_RIGHT_VALID;
1288 error = xfs_rmap_get_rec(cur, &RIGHT, &i);
1289 if (error)
1290 goto done;
1291 if (XFS_IS_CORRUPT(mp, i != 1)) {
1292 xfs_btree_mark_sick(cur);
1293 error = -EFSCORRUPTED;
1294 goto done;
1295 }
1296 if (XFS_IS_CORRUPT(mp, bno + len > RIGHT.rm_startblock)) {
1297 xfs_btree_mark_sick(cur);
1298 error = -EFSCORRUPTED;
1299 goto done;
1300 }
1301 trace_xfs_rmap_find_right_neighbor_result(cur,
1302 RIGHT.rm_startblock, RIGHT.rm_blockcount,
1303 RIGHT.rm_owner, RIGHT.rm_offset,
1304 RIGHT.rm_flags);
1305 if (bno + len == RIGHT.rm_startblock &&
1306 offset + len == RIGHT.rm_offset &&
1307 xfs_rmap_is_mergeable(&RIGHT, owner, newext))
1308 state |= RMAP_RIGHT_CONTIG;
1309 }
1310
1311 /* check that left + prev + right is not too long */
1312 if ((state & (RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG |
1313 RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG)) ==
1314 (RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG |
1315 RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG) &&
1316 (unsigned long)LEFT.rm_blockcount + len +
1317 RIGHT.rm_blockcount > XFS_RMAP_LEN_MAX)
1318 state &= ~RMAP_RIGHT_CONTIG;
1319
1320 trace_xfs_rmap_convert_state(cur, state, _RET_IP_);
1321
1322 /* reset the cursor back to PREV */
1323 error = xfs_rmap_lookup_le(cur, bno, owner, offset, oldext, NULL, &i);
1324 if (error)
1325 goto done;
1326 if (XFS_IS_CORRUPT(mp, i != 1)) {
1327 xfs_btree_mark_sick(cur);
1328 error = -EFSCORRUPTED;
1329 goto done;
1330 }
1331
1332 /*
1333 * Switch out based on the FILLING and CONTIG state bits.
1334 */
1335 switch (state & (RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG |
1336 RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG)) {
1337 case RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG |
1338 RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG:
1339 /*
1340 * Setting all of a previous oldext extent to newext.
1341 * The left and right neighbors are both contiguous with new.
1342 */
1343 error = xfs_btree_increment(cur, 0, &i);
1344 if (error)
1345 goto done;
1346 if (XFS_IS_CORRUPT(mp, i != 1)) {
1347 xfs_btree_mark_sick(cur);
1348 error = -EFSCORRUPTED;
1349 goto done;
1350 }
1351 trace_xfs_rmap_delete(cur, RIGHT.rm_startblock,
1352 RIGHT.rm_blockcount, RIGHT.rm_owner,
1353 RIGHT.rm_offset, RIGHT.rm_flags);
1354 error = xfs_btree_delete(cur, &i);
1355 if (error)
1356 goto done;
1357 if (XFS_IS_CORRUPT(mp, i != 1)) {
1358 xfs_btree_mark_sick(cur);
1359 error = -EFSCORRUPTED;
1360 goto done;
1361 }
1362 error = xfs_btree_decrement(cur, 0, &i);
1363 if (error)
1364 goto done;
1365 if (XFS_IS_CORRUPT(mp, i != 1)) {
1366 xfs_btree_mark_sick(cur);
1367 error = -EFSCORRUPTED;
1368 goto done;
1369 }
1370 trace_xfs_rmap_delete(cur, PREV.rm_startblock,
1371 PREV.rm_blockcount, PREV.rm_owner,
1372 PREV.rm_offset, PREV.rm_flags);
1373 error = xfs_btree_delete(cur, &i);
1374 if (error)
1375 goto done;
1376 if (XFS_IS_CORRUPT(mp, i != 1)) {
1377 xfs_btree_mark_sick(cur);
1378 error = -EFSCORRUPTED;
1379 goto done;
1380 }
1381 error = xfs_btree_decrement(cur, 0, &i);
1382 if (error)
1383 goto done;
1384 if (XFS_IS_CORRUPT(mp, i != 1)) {
1385 xfs_btree_mark_sick(cur);
1386 error = -EFSCORRUPTED;
1387 goto done;
1388 }
1389 NEW = LEFT;
1390 NEW.rm_blockcount += PREV.rm_blockcount + RIGHT.rm_blockcount;
1391 error = xfs_rmap_update(cur, &NEW);
1392 if (error)
1393 goto done;
1394 break;
1395
1396 case RMAP_LEFT_FILLING | RMAP_RIGHT_FILLING | RMAP_LEFT_CONTIG:
1397 /*
1398 * Setting all of a previous oldext extent to newext.
1399 * The left neighbor is contiguous, the right is not.
1400 */
1401 trace_xfs_rmap_delete(cur, PREV.rm_startblock,
1402 PREV.rm_blockcount, PREV.rm_owner,
1403 PREV.rm_offset, PREV.rm_flags);
1404 error = xfs_btree_delete(cur, &i);
1405 if (error)
1406 goto done;
1407 if (XFS_IS_CORRUPT(mp, i != 1)) {
1408 xfs_btree_mark_sick(cur);
1409 error = -EFSCORRUPTED;
1410 goto done;
1411 }
1412 error = xfs_btree_decrement(cur, 0, &i);
1413 if (error)
1414 goto done;
1415 if (XFS_IS_CORRUPT(mp, i != 1)) {
1416 xfs_btree_mark_sick(cur);
1417 error = -EFSCORRUPTED;
1418 goto done;
1419 }
1420 NEW = LEFT;
1421 NEW.rm_blockcount += PREV.rm_blockcount;
1422 error = xfs_rmap_update(cur, &NEW);
1423 if (error)
1424 goto done;
1425 break;
1426
1427 case RMAP_LEFT_FILLING | RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG:
1428 /*
1429 * Setting all of a previous oldext extent to newext.
1430 * The right neighbor is contiguous, the left is not.
1431 */
1432 error = xfs_btree_increment(cur, 0, &i);
1433 if (error)
1434 goto done;
1435 if (XFS_IS_CORRUPT(mp, i != 1)) {
1436 xfs_btree_mark_sick(cur);
1437 error = -EFSCORRUPTED;
1438 goto done;
1439 }
1440 trace_xfs_rmap_delete(cur, RIGHT.rm_startblock,
1441 RIGHT.rm_blockcount, RIGHT.rm_owner,
1442 RIGHT.rm_offset, RIGHT.rm_flags);
1443 error = xfs_btree_delete(cur, &i);
1444 if (error)
1445 goto done;
1446 if (XFS_IS_CORRUPT(mp, i != 1)) {
1447 xfs_btree_mark_sick(cur);
1448 error = -EFSCORRUPTED;
1449 goto done;
1450 }
1451 error = xfs_btree_decrement(cur, 0, &i);
1452 if (error)
1453 goto done;
1454 if (XFS_IS_CORRUPT(mp, i != 1)) {
1455 xfs_btree_mark_sick(cur);
1456 error = -EFSCORRUPTED;
1457 goto done;
1458 }
1459 NEW = PREV;
1460 NEW.rm_blockcount = len + RIGHT.rm_blockcount;
1461 NEW.rm_flags = newext;
1462 error = xfs_rmap_update(cur, &NEW);
1463 if (error)
1464 goto done;
1465 break;
1466
1467 case RMAP_LEFT_FILLING | RMAP_RIGHT_FILLING:
1468 /*
1469 * Setting all of a previous oldext extent to newext.
1470 * Neither the left nor right neighbors are contiguous with
1471 * the new one.
1472 */
1473 NEW = PREV;
1474 NEW.rm_flags = newext;
1475 error = xfs_rmap_update(cur, &NEW);
1476 if (error)
1477 goto done;
1478 break;
1479
1480 case RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG:
1481 /*
1482 * Setting the first part of a previous oldext extent to newext.
1483 * The left neighbor is contiguous.
1484 */
1485 NEW = PREV;
1486 NEW.rm_offset += len;
1487 NEW.rm_startblock += len;
1488 NEW.rm_blockcount -= len;
1489 error = xfs_rmap_update(cur, &NEW);
1490 if (error)
1491 goto done;
1492 error = xfs_btree_decrement(cur, 0, &i);
1493 if (error)
1494 goto done;
1495 NEW = LEFT;
1496 NEW.rm_blockcount += len;
1497 error = xfs_rmap_update(cur, &NEW);
1498 if (error)
1499 goto done;
1500 break;
1501
1502 case RMAP_LEFT_FILLING:
1503 /*
1504 * Setting the first part of a previous oldext extent to newext.
1505 * The left neighbor is not contiguous.
1506 */
1507 NEW = PREV;
1508 NEW.rm_startblock += len;
1509 NEW.rm_offset += len;
1510 NEW.rm_blockcount -= len;
1511 error = xfs_rmap_update(cur, &NEW);
1512 if (error)
1513 goto done;
1514 NEW.rm_startblock = bno;
1515 NEW.rm_owner = owner;
1516 NEW.rm_offset = offset;
1517 NEW.rm_blockcount = len;
1518 NEW.rm_flags = newext;
1519 cur->bc_rec.r = NEW;
1520 trace_xfs_rmap_insert(cur, bno, len, owner, offset, newext);
1521 error = xfs_btree_insert(cur, &i);
1522 if (error)
1523 goto done;
1524 if (XFS_IS_CORRUPT(mp, i != 1)) {
1525 xfs_btree_mark_sick(cur);
1526 error = -EFSCORRUPTED;
1527 goto done;
1528 }
1529 break;
1530
1531 case RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG:
1532 /*
1533 * Setting the last part of a previous oldext extent to newext.
1534 * The right neighbor is contiguous with the new allocation.
1535 */
1536 NEW = PREV;
1537 NEW.rm_blockcount -= len;
1538 error = xfs_rmap_update(cur, &NEW);
1539 if (error)
1540 goto done;
1541 error = xfs_btree_increment(cur, 0, &i);
1542 if (error)
1543 goto done;
1544 NEW = RIGHT;
1545 NEW.rm_offset = offset;
1546 NEW.rm_startblock = bno;
1547 NEW.rm_blockcount += len;
1548 error = xfs_rmap_update(cur, &NEW);
1549 if (error)
1550 goto done;
1551 break;
1552
1553 case RMAP_RIGHT_FILLING:
1554 /*
1555 * Setting the last part of a previous oldext extent to newext.
1556 * The right neighbor is not contiguous.
1557 */
1558 NEW = PREV;
1559 NEW.rm_blockcount -= len;
1560 error = xfs_rmap_update(cur, &NEW);
1561 if (error)
1562 goto done;
1563 error = xfs_rmap_lookup_eq(cur, bno, len, owner, offset,
1564 oldext, &i);
1565 if (error)
1566 goto done;
1567 if (XFS_IS_CORRUPT(mp, i != 0)) {
1568 xfs_btree_mark_sick(cur);
1569 error = -EFSCORRUPTED;
1570 goto done;
1571 }
1572 NEW.rm_startblock = bno;
1573 NEW.rm_owner = owner;
1574 NEW.rm_offset = offset;
1575 NEW.rm_blockcount = len;
1576 NEW.rm_flags = newext;
1577 cur->bc_rec.r = NEW;
1578 trace_xfs_rmap_insert(cur, bno, len, owner, offset, newext);
1579 error = xfs_btree_insert(cur, &i);
1580 if (error)
1581 goto done;
1582 if (XFS_IS_CORRUPT(mp, i != 1)) {
1583 xfs_btree_mark_sick(cur);
1584 error = -EFSCORRUPTED;
1585 goto done;
1586 }
1587 break;
1588
1589 case 0:
1590 /*
1591 * Setting the middle part of a previous oldext extent to
1592 * newext. Contiguity is impossible here.
1593 * One extent becomes three extents.
1594 */
1595 /* new right extent - oldext */
1596 NEW.rm_startblock = bno + len;
1597 NEW.rm_owner = owner;
1598 NEW.rm_offset = new_endoff;
1599 NEW.rm_blockcount = PREV.rm_offset + PREV.rm_blockcount -
1600 new_endoff;
1601 NEW.rm_flags = PREV.rm_flags;
1602 error = xfs_rmap_update(cur, &NEW);
1603 if (error)
1604 goto done;
1605 /* new left extent - oldext */
1606 NEW = PREV;
1607 NEW.rm_blockcount = offset - PREV.rm_offset;
1608 cur->bc_rec.r = NEW;
1609 trace_xfs_rmap_insert(cur, NEW.rm_startblock,
1610 NEW.rm_blockcount, NEW.rm_owner, NEW.rm_offset,
1611 NEW.rm_flags);
1612 error = xfs_btree_insert(cur, &i);
1613 if (error)
1614 goto done;
1615 if (XFS_IS_CORRUPT(mp, i != 1)) {
1616 xfs_btree_mark_sick(cur);
1617 error = -EFSCORRUPTED;
1618 goto done;
1619 }
1620 /*
1621 * Reset the cursor to the position of the new extent
1622 * we are about to insert as we can't trust it after
1623 * the previous insert.
1624 */
1625 error = xfs_rmap_lookup_eq(cur, bno, len, owner, offset,
1626 oldext, &i);
1627 if (error)
1628 goto done;
1629 if (XFS_IS_CORRUPT(mp, i != 0)) {
1630 xfs_btree_mark_sick(cur);
1631 error = -EFSCORRUPTED;
1632 goto done;
1633 }
1634 /* new middle extent - newext */
1635 cur->bc_rec.r.rm_flags &= ~XFS_RMAP_UNWRITTEN;
1636 cur->bc_rec.r.rm_flags |= newext;
1637 trace_xfs_rmap_insert(cur, bno, len, owner, offset, newext);
1638 error = xfs_btree_insert(cur, &i);
1639 if (error)
1640 goto done;
1641 if (XFS_IS_CORRUPT(mp, i != 1)) {
1642 xfs_btree_mark_sick(cur);
1643 error = -EFSCORRUPTED;
1644 goto done;
1645 }
1646 break;
1647
1648 case RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG | RMAP_RIGHT_CONTIG:
1649 case RMAP_RIGHT_FILLING | RMAP_LEFT_CONTIG | RMAP_RIGHT_CONTIG:
1650 case RMAP_LEFT_FILLING | RMAP_RIGHT_CONTIG:
1651 case RMAP_RIGHT_FILLING | RMAP_LEFT_CONTIG:
1652 case RMAP_LEFT_CONTIG | RMAP_RIGHT_CONTIG:
1653 case RMAP_LEFT_CONTIG:
1654 case RMAP_RIGHT_CONTIG:
1655 /*
1656 * These cases are all impossible.
1657 */
1658 ASSERT(0);
1659 }
1660
1661 trace_xfs_rmap_convert_done(cur, bno, len, unwritten, oinfo);
1662done:
1663 if (error)
1664 trace_xfs_rmap_convert_error(cur, error, _RET_IP_);
1665 return error;
1666}
1667
1668/*
1669 * Convert an unwritten extent to a real extent or vice versa. If there is no
1670 * possibility of overlapping extents, delegate to the simpler convert
1671 * function.
1672 */
1673STATIC int
1674xfs_rmap_convert_shared(
1675 struct xfs_btree_cur *cur,
1676 xfs_agblock_t bno,
1677 xfs_extlen_t len,
1678 bool unwritten,
1679 const struct xfs_owner_info *oinfo)
1680{
1681 struct xfs_mount *mp = cur->bc_mp;
1682 struct xfs_rmap_irec r[4]; /* neighbor extent entries */
1683 /* left is 0, right is 1, */
1684 /* prev is 2, new is 3 */
1685 uint64_t owner;
1686 uint64_t offset;
1687 uint64_t new_endoff;
1688 unsigned int oldext;
1689 unsigned int newext;
1690 unsigned int flags = 0;
1691 int i;
1692 int state = 0;
1693 int error;
1694
1695 xfs_owner_info_unpack(oinfo, &owner, &offset, &flags);
1696 ASSERT(!(XFS_RMAP_NON_INODE_OWNER(owner) ||
1697 (flags & (XFS_RMAP_ATTR_FORK | XFS_RMAP_BMBT_BLOCK))));
1698 oldext = unwritten ? XFS_RMAP_UNWRITTEN : 0;
1699 new_endoff = offset + len;
1700 trace_xfs_rmap_convert(cur, bno, len, unwritten, oinfo);
1701
1702 /*
1703 * For the initial lookup, look for and exact match or the left-adjacent
1704 * record for our insertion point. This will also give us the record for
1705 * start block contiguity tests.
1706 */
1707 error = xfs_rmap_lookup_le_range(cur, bno, owner, offset, oldext,
1708 &PREV, &i);
1709 if (error)
1710 goto done;
1711 if (XFS_IS_CORRUPT(mp, i != 1)) {
1712 xfs_btree_mark_sick(cur);
1713 error = -EFSCORRUPTED;
1714 goto done;
1715 }
1716
1717 ASSERT(PREV.rm_offset <= offset);
1718 ASSERT(PREV.rm_offset + PREV.rm_blockcount >= new_endoff);
1719 ASSERT((PREV.rm_flags & XFS_RMAP_UNWRITTEN) == oldext);
1720 newext = ~oldext & XFS_RMAP_UNWRITTEN;
1721
1722 /*
1723 * Set flags determining what part of the previous oldext allocation
1724 * extent is being replaced by a newext allocation.
1725 */
1726 if (PREV.rm_offset == offset)
1727 state |= RMAP_LEFT_FILLING;
1728 if (PREV.rm_offset + PREV.rm_blockcount == new_endoff)
1729 state |= RMAP_RIGHT_FILLING;
1730
1731 /* Is there a left record that abuts our range? */
1732 error = xfs_rmap_find_left_neighbor(cur, bno, owner, offset, newext,
1733 &LEFT, &i);
1734 if (error)
1735 goto done;
1736 if (i) {
1737 state |= RMAP_LEFT_VALID;
1738 if (XFS_IS_CORRUPT(mp,
1739 LEFT.rm_startblock + LEFT.rm_blockcount >
1740 bno)) {
1741 xfs_btree_mark_sick(cur);
1742 error = -EFSCORRUPTED;
1743 goto done;
1744 }
1745 if (xfs_rmap_is_mergeable(&LEFT, owner, newext))
1746 state |= RMAP_LEFT_CONTIG;
1747 }
1748
1749 /* Is there a right record that abuts our range? */
1750 error = xfs_rmap_lookup_eq(cur, bno + len, len, owner, offset + len,
1751 newext, &i);
1752 if (error)
1753 goto done;
1754 if (i) {
1755 state |= RMAP_RIGHT_VALID;
1756 error = xfs_rmap_get_rec(cur, &RIGHT, &i);
1757 if (error)
1758 goto done;
1759 if (XFS_IS_CORRUPT(mp, i != 1)) {
1760 xfs_btree_mark_sick(cur);
1761 error = -EFSCORRUPTED;
1762 goto done;
1763 }
1764 if (XFS_IS_CORRUPT(mp, bno + len > RIGHT.rm_startblock)) {
1765 xfs_btree_mark_sick(cur);
1766 error = -EFSCORRUPTED;
1767 goto done;
1768 }
1769 trace_xfs_rmap_find_right_neighbor_result(cur,
1770 RIGHT.rm_startblock, RIGHT.rm_blockcount,
1771 RIGHT.rm_owner, RIGHT.rm_offset,
1772 RIGHT.rm_flags);
1773 if (xfs_rmap_is_mergeable(&RIGHT, owner, newext))
1774 state |= RMAP_RIGHT_CONTIG;
1775 }
1776
1777 /* check that left + prev + right is not too long */
1778 if ((state & (RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG |
1779 RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG)) ==
1780 (RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG |
1781 RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG) &&
1782 (unsigned long)LEFT.rm_blockcount + len +
1783 RIGHT.rm_blockcount > XFS_RMAP_LEN_MAX)
1784 state &= ~RMAP_RIGHT_CONTIG;
1785
1786 trace_xfs_rmap_convert_state(cur, state, _RET_IP_);
1787 /*
1788 * Switch out based on the FILLING and CONTIG state bits.
1789 */
1790 switch (state & (RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG |
1791 RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG)) {
1792 case RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG |
1793 RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG:
1794 /*
1795 * Setting all of a previous oldext extent to newext.
1796 * The left and right neighbors are both contiguous with new.
1797 */
1798 error = xfs_rmap_delete(cur, RIGHT.rm_startblock,
1799 RIGHT.rm_blockcount, RIGHT.rm_owner,
1800 RIGHT.rm_offset, RIGHT.rm_flags);
1801 if (error)
1802 goto done;
1803 error = xfs_rmap_delete(cur, PREV.rm_startblock,
1804 PREV.rm_blockcount, PREV.rm_owner,
1805 PREV.rm_offset, PREV.rm_flags);
1806 if (error)
1807 goto done;
1808 NEW = LEFT;
1809 error = xfs_rmap_lookup_eq(cur, NEW.rm_startblock,
1810 NEW.rm_blockcount, NEW.rm_owner,
1811 NEW.rm_offset, NEW.rm_flags, &i);
1812 if (error)
1813 goto done;
1814 if (XFS_IS_CORRUPT(mp, i != 1)) {
1815 xfs_btree_mark_sick(cur);
1816 error = -EFSCORRUPTED;
1817 goto done;
1818 }
1819 NEW.rm_blockcount += PREV.rm_blockcount + RIGHT.rm_blockcount;
1820 error = xfs_rmap_update(cur, &NEW);
1821 if (error)
1822 goto done;
1823 break;
1824
1825 case RMAP_LEFT_FILLING | RMAP_RIGHT_FILLING | RMAP_LEFT_CONTIG:
1826 /*
1827 * Setting all of a previous oldext extent to newext.
1828 * The left neighbor is contiguous, the right is not.
1829 */
1830 error = xfs_rmap_delete(cur, PREV.rm_startblock,
1831 PREV.rm_blockcount, PREV.rm_owner,
1832 PREV.rm_offset, PREV.rm_flags);
1833 if (error)
1834 goto done;
1835 NEW = LEFT;
1836 error = xfs_rmap_lookup_eq(cur, NEW.rm_startblock,
1837 NEW.rm_blockcount, NEW.rm_owner,
1838 NEW.rm_offset, NEW.rm_flags, &i);
1839 if (error)
1840 goto done;
1841 if (XFS_IS_CORRUPT(mp, i != 1)) {
1842 xfs_btree_mark_sick(cur);
1843 error = -EFSCORRUPTED;
1844 goto done;
1845 }
1846 NEW.rm_blockcount += PREV.rm_blockcount;
1847 error = xfs_rmap_update(cur, &NEW);
1848 if (error)
1849 goto done;
1850 break;
1851
1852 case RMAP_LEFT_FILLING | RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG:
1853 /*
1854 * Setting all of a previous oldext extent to newext.
1855 * The right neighbor is contiguous, the left is not.
1856 */
1857 error = xfs_rmap_delete(cur, RIGHT.rm_startblock,
1858 RIGHT.rm_blockcount, RIGHT.rm_owner,
1859 RIGHT.rm_offset, RIGHT.rm_flags);
1860 if (error)
1861 goto done;
1862 NEW = PREV;
1863 error = xfs_rmap_lookup_eq(cur, NEW.rm_startblock,
1864 NEW.rm_blockcount, NEW.rm_owner,
1865 NEW.rm_offset, NEW.rm_flags, &i);
1866 if (error)
1867 goto done;
1868 if (XFS_IS_CORRUPT(mp, i != 1)) {
1869 xfs_btree_mark_sick(cur);
1870 error = -EFSCORRUPTED;
1871 goto done;
1872 }
1873 NEW.rm_blockcount += RIGHT.rm_blockcount;
1874 NEW.rm_flags = RIGHT.rm_flags;
1875 error = xfs_rmap_update(cur, &NEW);
1876 if (error)
1877 goto done;
1878 break;
1879
1880 case RMAP_LEFT_FILLING | RMAP_RIGHT_FILLING:
1881 /*
1882 * Setting all of a previous oldext extent to newext.
1883 * Neither the left nor right neighbors are contiguous with
1884 * the new one.
1885 */
1886 NEW = PREV;
1887 error = xfs_rmap_lookup_eq(cur, NEW.rm_startblock,
1888 NEW.rm_blockcount, NEW.rm_owner,
1889 NEW.rm_offset, NEW.rm_flags, &i);
1890 if (error)
1891 goto done;
1892 if (XFS_IS_CORRUPT(mp, i != 1)) {
1893 xfs_btree_mark_sick(cur);
1894 error = -EFSCORRUPTED;
1895 goto done;
1896 }
1897 NEW.rm_flags = newext;
1898 error = xfs_rmap_update(cur, &NEW);
1899 if (error)
1900 goto done;
1901 break;
1902
1903 case RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG:
1904 /*
1905 * Setting the first part of a previous oldext extent to newext.
1906 * The left neighbor is contiguous.
1907 */
1908 NEW = PREV;
1909 error = xfs_rmap_delete(cur, NEW.rm_startblock,
1910 NEW.rm_blockcount, NEW.rm_owner,
1911 NEW.rm_offset, NEW.rm_flags);
1912 if (error)
1913 goto done;
1914 NEW.rm_offset += len;
1915 NEW.rm_startblock += len;
1916 NEW.rm_blockcount -= len;
1917 error = xfs_rmap_insert(cur, NEW.rm_startblock,
1918 NEW.rm_blockcount, NEW.rm_owner,
1919 NEW.rm_offset, NEW.rm_flags);
1920 if (error)
1921 goto done;
1922 NEW = LEFT;
1923 error = xfs_rmap_lookup_eq(cur, NEW.rm_startblock,
1924 NEW.rm_blockcount, NEW.rm_owner,
1925 NEW.rm_offset, NEW.rm_flags, &i);
1926 if (error)
1927 goto done;
1928 if (XFS_IS_CORRUPT(mp, i != 1)) {
1929 xfs_btree_mark_sick(cur);
1930 error = -EFSCORRUPTED;
1931 goto done;
1932 }
1933 NEW.rm_blockcount += len;
1934 error = xfs_rmap_update(cur, &NEW);
1935 if (error)
1936 goto done;
1937 break;
1938
1939 case RMAP_LEFT_FILLING:
1940 /*
1941 * Setting the first part of a previous oldext extent to newext.
1942 * The left neighbor is not contiguous.
1943 */
1944 NEW = PREV;
1945 error = xfs_rmap_delete(cur, NEW.rm_startblock,
1946 NEW.rm_blockcount, NEW.rm_owner,
1947 NEW.rm_offset, NEW.rm_flags);
1948 if (error)
1949 goto done;
1950 NEW.rm_offset += len;
1951 NEW.rm_startblock += len;
1952 NEW.rm_blockcount -= len;
1953 error = xfs_rmap_insert(cur, NEW.rm_startblock,
1954 NEW.rm_blockcount, NEW.rm_owner,
1955 NEW.rm_offset, NEW.rm_flags);
1956 if (error)
1957 goto done;
1958 error = xfs_rmap_insert(cur, bno, len, owner, offset, newext);
1959 if (error)
1960 goto done;
1961 break;
1962
1963 case RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG:
1964 /*
1965 * Setting the last part of a previous oldext extent to newext.
1966 * The right neighbor is contiguous with the new allocation.
1967 */
1968 NEW = PREV;
1969 error = xfs_rmap_lookup_eq(cur, NEW.rm_startblock,
1970 NEW.rm_blockcount, NEW.rm_owner,
1971 NEW.rm_offset, NEW.rm_flags, &i);
1972 if (error)
1973 goto done;
1974 if (XFS_IS_CORRUPT(mp, i != 1)) {
1975 xfs_btree_mark_sick(cur);
1976 error = -EFSCORRUPTED;
1977 goto done;
1978 }
1979 NEW.rm_blockcount = offset - NEW.rm_offset;
1980 error = xfs_rmap_update(cur, &NEW);
1981 if (error)
1982 goto done;
1983 NEW = RIGHT;
1984 error = xfs_rmap_delete(cur, NEW.rm_startblock,
1985 NEW.rm_blockcount, NEW.rm_owner,
1986 NEW.rm_offset, NEW.rm_flags);
1987 if (error)
1988 goto done;
1989 NEW.rm_offset = offset;
1990 NEW.rm_startblock = bno;
1991 NEW.rm_blockcount += len;
1992 error = xfs_rmap_insert(cur, NEW.rm_startblock,
1993 NEW.rm_blockcount, NEW.rm_owner,
1994 NEW.rm_offset, NEW.rm_flags);
1995 if (error)
1996 goto done;
1997 break;
1998
1999 case RMAP_RIGHT_FILLING:
2000 /*
2001 * Setting the last part of a previous oldext extent to newext.
2002 * The right neighbor is not contiguous.
2003 */
2004 NEW = PREV;
2005 error = xfs_rmap_lookup_eq(cur, NEW.rm_startblock,
2006 NEW.rm_blockcount, NEW.rm_owner,
2007 NEW.rm_offset, NEW.rm_flags, &i);
2008 if (error)
2009 goto done;
2010 if (XFS_IS_CORRUPT(mp, i != 1)) {
2011 xfs_btree_mark_sick(cur);
2012 error = -EFSCORRUPTED;
2013 goto done;
2014 }
2015 NEW.rm_blockcount -= len;
2016 error = xfs_rmap_update(cur, &NEW);
2017 if (error)
2018 goto done;
2019 error = xfs_rmap_insert(cur, bno, len, owner, offset, newext);
2020 if (error)
2021 goto done;
2022 break;
2023
2024 case 0:
2025 /*
2026 * Setting the middle part of a previous oldext extent to
2027 * newext. Contiguity is impossible here.
2028 * One extent becomes three extents.
2029 */
2030 /* new right extent - oldext */
2031 NEW.rm_startblock = bno + len;
2032 NEW.rm_owner = owner;
2033 NEW.rm_offset = new_endoff;
2034 NEW.rm_blockcount = PREV.rm_offset + PREV.rm_blockcount -
2035 new_endoff;
2036 NEW.rm_flags = PREV.rm_flags;
2037 error = xfs_rmap_insert(cur, NEW.rm_startblock,
2038 NEW.rm_blockcount, NEW.rm_owner, NEW.rm_offset,
2039 NEW.rm_flags);
2040 if (error)
2041 goto done;
2042 /* new left extent - oldext */
2043 NEW = PREV;
2044 error = xfs_rmap_lookup_eq(cur, NEW.rm_startblock,
2045 NEW.rm_blockcount, NEW.rm_owner,
2046 NEW.rm_offset, NEW.rm_flags, &i);
2047 if (error)
2048 goto done;
2049 if (XFS_IS_CORRUPT(mp, i != 1)) {
2050 xfs_btree_mark_sick(cur);
2051 error = -EFSCORRUPTED;
2052 goto done;
2053 }
2054 NEW.rm_blockcount = offset - NEW.rm_offset;
2055 error = xfs_rmap_update(cur, &NEW);
2056 if (error)
2057 goto done;
2058 /* new middle extent - newext */
2059 NEW.rm_startblock = bno;
2060 NEW.rm_blockcount = len;
2061 NEW.rm_owner = owner;
2062 NEW.rm_offset = offset;
2063 NEW.rm_flags = newext;
2064 error = xfs_rmap_insert(cur, NEW.rm_startblock,
2065 NEW.rm_blockcount, NEW.rm_owner, NEW.rm_offset,
2066 NEW.rm_flags);
2067 if (error)
2068 goto done;
2069 break;
2070
2071 case RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG | RMAP_RIGHT_CONTIG:
2072 case RMAP_RIGHT_FILLING | RMAP_LEFT_CONTIG | RMAP_RIGHT_CONTIG:
2073 case RMAP_LEFT_FILLING | RMAP_RIGHT_CONTIG:
2074 case RMAP_RIGHT_FILLING | RMAP_LEFT_CONTIG:
2075 case RMAP_LEFT_CONTIG | RMAP_RIGHT_CONTIG:
2076 case RMAP_LEFT_CONTIG:
2077 case RMAP_RIGHT_CONTIG:
2078 /*
2079 * These cases are all impossible.
2080 */
2081 ASSERT(0);
2082 }
2083
2084 trace_xfs_rmap_convert_done(cur, bno, len, unwritten, oinfo);
2085done:
2086 if (error)
2087 trace_xfs_rmap_convert_error(cur, error, _RET_IP_);
2088 return error;
2089}
2090
2091#undef NEW
2092#undef LEFT
2093#undef RIGHT
2094#undef PREV
2095
2096/*
2097 * Find an extent in the rmap btree and unmap it. For rmap extent types that
2098 * can overlap (data fork rmaps on reflink filesystems) we must be careful
2099 * that the prev/next records in the btree might belong to another owner.
2100 * Therefore we must use delete+insert to alter any of the key fields.
2101 *
2102 * For every other situation there can only be one owner for a given extent,
2103 * so we can call the regular _free function.
2104 */
2105STATIC int
2106xfs_rmap_unmap_shared(
2107 struct xfs_btree_cur *cur,
2108 xfs_agblock_t bno,
2109 xfs_extlen_t len,
2110 bool unwritten,
2111 const struct xfs_owner_info *oinfo)
2112{
2113 struct xfs_mount *mp = cur->bc_mp;
2114 struct xfs_rmap_irec ltrec;
2115 uint64_t ltoff;
2116 int error = 0;
2117 int i;
2118 uint64_t owner;
2119 uint64_t offset;
2120 unsigned int flags;
2121
2122 xfs_owner_info_unpack(oinfo, &owner, &offset, &flags);
2123 if (unwritten)
2124 flags |= XFS_RMAP_UNWRITTEN;
2125 trace_xfs_rmap_unmap(cur, bno, len, unwritten, oinfo);
2126
2127 /*
2128 * We should always have a left record because there's a static record
2129 * for the AG headers at rm_startblock == 0 created by mkfs/growfs that
2130 * will not ever be removed from the tree.
2131 */
2132 error = xfs_rmap_lookup_le_range(cur, bno, owner, offset, flags,
2133 <rec, &i);
2134 if (error)
2135 goto out_error;
2136 if (XFS_IS_CORRUPT(mp, i != 1)) {
2137 xfs_btree_mark_sick(cur);
2138 error = -EFSCORRUPTED;
2139 goto out_error;
2140 }
2141 ltoff = ltrec.rm_offset;
2142
2143 /* Make sure the extent we found covers the entire freeing range. */
2144 if (XFS_IS_CORRUPT(mp,
2145 ltrec.rm_startblock > bno ||
2146 ltrec.rm_startblock + ltrec.rm_blockcount <
2147 bno + len)) {
2148 xfs_btree_mark_sick(cur);
2149 error = -EFSCORRUPTED;
2150 goto out_error;
2151 }
2152
2153 /* Make sure the owner matches what we expect to find in the tree. */
2154 if (XFS_IS_CORRUPT(mp, owner != ltrec.rm_owner)) {
2155 xfs_btree_mark_sick(cur);
2156 error = -EFSCORRUPTED;
2157 goto out_error;
2158 }
2159
2160 /* Make sure the unwritten flag matches. */
2161 if (XFS_IS_CORRUPT(mp,
2162 (flags & XFS_RMAP_UNWRITTEN) !=
2163 (ltrec.rm_flags & XFS_RMAP_UNWRITTEN))) {
2164 xfs_btree_mark_sick(cur);
2165 error = -EFSCORRUPTED;
2166 goto out_error;
2167 }
2168
2169 /* Check the offset. */
2170 if (XFS_IS_CORRUPT(mp, ltrec.rm_offset > offset)) {
2171 xfs_btree_mark_sick(cur);
2172 error = -EFSCORRUPTED;
2173 goto out_error;
2174 }
2175 if (XFS_IS_CORRUPT(mp, offset > ltoff + ltrec.rm_blockcount)) {
2176 xfs_btree_mark_sick(cur);
2177 error = -EFSCORRUPTED;
2178 goto out_error;
2179 }
2180
2181 if (ltrec.rm_startblock == bno && ltrec.rm_blockcount == len) {
2182 /* Exact match, simply remove the record from rmap tree. */
2183 error = xfs_rmap_delete(cur, ltrec.rm_startblock,
2184 ltrec.rm_blockcount, ltrec.rm_owner,
2185 ltrec.rm_offset, ltrec.rm_flags);
2186 if (error)
2187 goto out_error;
2188 } else if (ltrec.rm_startblock == bno) {
2189 /*
2190 * Overlap left hand side of extent: move the start, trim the
2191 * length and update the current record.
2192 *
2193 * ltbno ltlen
2194 * Orig: |oooooooooooooooooooo|
2195 * Freeing: |fffffffff|
2196 * Result: |rrrrrrrrrr|
2197 * bno len
2198 */
2199
2200 /* Delete prev rmap. */
2201 error = xfs_rmap_delete(cur, ltrec.rm_startblock,
2202 ltrec.rm_blockcount, ltrec.rm_owner,
2203 ltrec.rm_offset, ltrec.rm_flags);
2204 if (error)
2205 goto out_error;
2206
2207 /* Add an rmap at the new offset. */
2208 ltrec.rm_startblock += len;
2209 ltrec.rm_blockcount -= len;
2210 ltrec.rm_offset += len;
2211 error = xfs_rmap_insert(cur, ltrec.rm_startblock,
2212 ltrec.rm_blockcount, ltrec.rm_owner,
2213 ltrec.rm_offset, ltrec.rm_flags);
2214 if (error)
2215 goto out_error;
2216 } else if (ltrec.rm_startblock + ltrec.rm_blockcount == bno + len) {
2217 /*
2218 * Overlap right hand side of extent: trim the length and
2219 * update the current record.
2220 *
2221 * ltbno ltlen
2222 * Orig: |oooooooooooooooooooo|
2223 * Freeing: |fffffffff|
2224 * Result: |rrrrrrrrrr|
2225 * bno len
2226 */
2227 error = xfs_rmap_lookup_eq(cur, ltrec.rm_startblock,
2228 ltrec.rm_blockcount, ltrec.rm_owner,
2229 ltrec.rm_offset, ltrec.rm_flags, &i);
2230 if (error)
2231 goto out_error;
2232 if (XFS_IS_CORRUPT(mp, i != 1)) {
2233 xfs_btree_mark_sick(cur);
2234 error = -EFSCORRUPTED;
2235 goto out_error;
2236 }
2237 ltrec.rm_blockcount -= len;
2238 error = xfs_rmap_update(cur, <rec);
2239 if (error)
2240 goto out_error;
2241 } else {
2242 /*
2243 * Overlap middle of extent: trim the length of the existing
2244 * record to the length of the new left-extent size, increment
2245 * the insertion position so we can insert a new record
2246 * containing the remaining right-extent space.
2247 *
2248 * ltbno ltlen
2249 * Orig: |oooooooooooooooooooo|
2250 * Freeing: |fffffffff|
2251 * Result: |rrrrr| |rrrr|
2252 * bno len
2253 */
2254 xfs_extlen_t orig_len = ltrec.rm_blockcount;
2255
2256 /* Shrink the left side of the rmap */
2257 error = xfs_rmap_lookup_eq(cur, ltrec.rm_startblock,
2258 ltrec.rm_blockcount, ltrec.rm_owner,
2259 ltrec.rm_offset, ltrec.rm_flags, &i);
2260 if (error)
2261 goto out_error;
2262 if (XFS_IS_CORRUPT(mp, i != 1)) {
2263 xfs_btree_mark_sick(cur);
2264 error = -EFSCORRUPTED;
2265 goto out_error;
2266 }
2267 ltrec.rm_blockcount = bno - ltrec.rm_startblock;
2268 error = xfs_rmap_update(cur, <rec);
2269 if (error)
2270 goto out_error;
2271
2272 /* Add an rmap at the new offset */
2273 error = xfs_rmap_insert(cur, bno + len,
2274 orig_len - len - ltrec.rm_blockcount,
2275 ltrec.rm_owner, offset + len,
2276 ltrec.rm_flags);
2277 if (error)
2278 goto out_error;
2279 }
2280
2281 trace_xfs_rmap_unmap_done(cur, bno, len, unwritten, oinfo);
2282out_error:
2283 if (error)
2284 trace_xfs_rmap_unmap_error(cur, error, _RET_IP_);
2285 return error;
2286}
2287
2288/*
2289 * Find an extent in the rmap btree and map it. For rmap extent types that
2290 * can overlap (data fork rmaps on reflink filesystems) we must be careful
2291 * that the prev/next records in the btree might belong to another owner.
2292 * Therefore we must use delete+insert to alter any of the key fields.
2293 *
2294 * For every other situation there can only be one owner for a given extent,
2295 * so we can call the regular _alloc function.
2296 */
2297STATIC int
2298xfs_rmap_map_shared(
2299 struct xfs_btree_cur *cur,
2300 xfs_agblock_t bno,
2301 xfs_extlen_t len,
2302 bool unwritten,
2303 const struct xfs_owner_info *oinfo)
2304{
2305 struct xfs_mount *mp = cur->bc_mp;
2306 struct xfs_rmap_irec ltrec;
2307 struct xfs_rmap_irec gtrec;
2308 int have_gt;
2309 int have_lt;
2310 int error = 0;
2311 int i;
2312 uint64_t owner;
2313 uint64_t offset;
2314 unsigned int flags = 0;
2315
2316 xfs_owner_info_unpack(oinfo, &owner, &offset, &flags);
2317 if (unwritten)
2318 flags |= XFS_RMAP_UNWRITTEN;
2319 trace_xfs_rmap_map(cur, bno, len, unwritten, oinfo);
2320
2321 /* Is there a left record that abuts our range? */
2322 error = xfs_rmap_find_left_neighbor(cur, bno, owner, offset, flags,
2323 <rec, &have_lt);
2324 if (error)
2325 goto out_error;
2326 if (have_lt &&
2327 !xfs_rmap_is_mergeable(<rec, owner, flags))
2328 have_lt = 0;
2329
2330 /* Is there a right record that abuts our range? */
2331 error = xfs_rmap_lookup_eq(cur, bno + len, len, owner, offset + len,
2332 flags, &have_gt);
2333 if (error)
2334 goto out_error;
2335 if (have_gt) {
2336 error = xfs_rmap_get_rec(cur, >rec, &have_gt);
2337 if (error)
2338 goto out_error;
2339 if (XFS_IS_CORRUPT(mp, have_gt != 1)) {
2340 xfs_btree_mark_sick(cur);
2341 error = -EFSCORRUPTED;
2342 goto out_error;
2343 }
2344 trace_xfs_rmap_find_right_neighbor_result(cur,
2345 gtrec.rm_startblock, gtrec.rm_blockcount,
2346 gtrec.rm_owner, gtrec.rm_offset,
2347 gtrec.rm_flags);
2348
2349 if (!xfs_rmap_is_mergeable(>rec, owner, flags))
2350 have_gt = 0;
2351 }
2352
2353 if (have_lt &&
2354 ltrec.rm_startblock + ltrec.rm_blockcount == bno &&
2355 ltrec.rm_offset + ltrec.rm_blockcount == offset) {
2356 /*
2357 * Left edge contiguous, merge into left record.
2358 *
2359 * ltbno ltlen
2360 * orig: |ooooooooo|
2361 * adding: |aaaaaaaaa|
2362 * result: |rrrrrrrrrrrrrrrrrrr|
2363 * bno len
2364 */
2365 ltrec.rm_blockcount += len;
2366 if (have_gt &&
2367 bno + len == gtrec.rm_startblock &&
2368 offset + len == gtrec.rm_offset) {
2369 /*
2370 * Right edge also contiguous, delete right record
2371 * and merge into left record.
2372 *
2373 * ltbno ltlen gtbno gtlen
2374 * orig: |ooooooooo| |ooooooooo|
2375 * adding: |aaaaaaaaa|
2376 * result: |rrrrrrrrrrrrrrrrrrrrrrrrrrrrr|
2377 */
2378 ltrec.rm_blockcount += gtrec.rm_blockcount;
2379 error = xfs_rmap_delete(cur, gtrec.rm_startblock,
2380 gtrec.rm_blockcount, gtrec.rm_owner,
2381 gtrec.rm_offset, gtrec.rm_flags);
2382 if (error)
2383 goto out_error;
2384 }
2385
2386 /* Point the cursor back to the left record and update. */
2387 error = xfs_rmap_lookup_eq(cur, ltrec.rm_startblock,
2388 ltrec.rm_blockcount, ltrec.rm_owner,
2389 ltrec.rm_offset, ltrec.rm_flags, &i);
2390 if (error)
2391 goto out_error;
2392 if (XFS_IS_CORRUPT(mp, i != 1)) {
2393 xfs_btree_mark_sick(cur);
2394 error = -EFSCORRUPTED;
2395 goto out_error;
2396 }
2397
2398 error = xfs_rmap_update(cur, <rec);
2399 if (error)
2400 goto out_error;
2401 } else if (have_gt &&
2402 bno + len == gtrec.rm_startblock &&
2403 offset + len == gtrec.rm_offset) {
2404 /*
2405 * Right edge contiguous, merge into right record.
2406 *
2407 * gtbno gtlen
2408 * Orig: |ooooooooo|
2409 * adding: |aaaaaaaaa|
2410 * Result: |rrrrrrrrrrrrrrrrrrr|
2411 * bno len
2412 */
2413 /* Delete the old record. */
2414 error = xfs_rmap_delete(cur, gtrec.rm_startblock,
2415 gtrec.rm_blockcount, gtrec.rm_owner,
2416 gtrec.rm_offset, gtrec.rm_flags);
2417 if (error)
2418 goto out_error;
2419
2420 /* Move the start and re-add it. */
2421 gtrec.rm_startblock = bno;
2422 gtrec.rm_blockcount += len;
2423 gtrec.rm_offset = offset;
2424 error = xfs_rmap_insert(cur, gtrec.rm_startblock,
2425 gtrec.rm_blockcount, gtrec.rm_owner,
2426 gtrec.rm_offset, gtrec.rm_flags);
2427 if (error)
2428 goto out_error;
2429 } else {
2430 /*
2431 * No contiguous edge with identical owner, insert
2432 * new record at current cursor position.
2433 */
2434 error = xfs_rmap_insert(cur, bno, len, owner, offset, flags);
2435 if (error)
2436 goto out_error;
2437 }
2438
2439 trace_xfs_rmap_map_done(cur, bno, len, unwritten, oinfo);
2440out_error:
2441 if (error)
2442 trace_xfs_rmap_map_error(cur, error, _RET_IP_);
2443 return error;
2444}
2445
2446/* Insert a raw rmap into the rmapbt. */
2447int
2448xfs_rmap_map_raw(
2449 struct xfs_btree_cur *cur,
2450 struct xfs_rmap_irec *rmap)
2451{
2452 struct xfs_owner_info oinfo;
2453
2454 xfs_owner_info_pack(&oinfo, rmap->rm_owner, rmap->rm_offset,
2455 rmap->rm_flags);
2456
2457 if ((rmap->rm_flags & (XFS_RMAP_ATTR_FORK | XFS_RMAP_BMBT_BLOCK |
2458 XFS_RMAP_UNWRITTEN)) ||
2459 XFS_RMAP_NON_INODE_OWNER(rmap->rm_owner))
2460 return xfs_rmap_map(cur, rmap->rm_startblock,
2461 rmap->rm_blockcount,
2462 rmap->rm_flags & XFS_RMAP_UNWRITTEN,
2463 &oinfo);
2464
2465 return xfs_rmap_map_shared(cur, rmap->rm_startblock,
2466 rmap->rm_blockcount,
2467 rmap->rm_flags & XFS_RMAP_UNWRITTEN,
2468 &oinfo);
2469}
2470
2471struct xfs_rmap_query_range_info {
2472 xfs_rmap_query_range_fn fn;
2473 void *priv;
2474};
2475
2476/* Format btree record and pass to our callback. */
2477STATIC int
2478xfs_rmap_query_range_helper(
2479 struct xfs_btree_cur *cur,
2480 const union xfs_btree_rec *rec,
2481 void *priv)
2482{
2483 struct xfs_rmap_query_range_info *query = priv;
2484 struct xfs_rmap_irec irec;
2485 xfs_failaddr_t fa;
2486
2487 fa = xfs_rmap_btrec_to_irec(rec, &irec);
2488 if (!fa)
2489 fa = xfs_rmap_check_btrec(cur, &irec);
2490 if (fa)
2491 return xfs_rmap_complain_bad_rec(cur, fa, &irec);
2492
2493 return query->fn(cur, &irec, query->priv);
2494}
2495
2496/* Find all rmaps between two keys. */
2497int
2498xfs_rmap_query_range(
2499 struct xfs_btree_cur *cur,
2500 const struct xfs_rmap_irec *low_rec,
2501 const struct xfs_rmap_irec *high_rec,
2502 xfs_rmap_query_range_fn fn,
2503 void *priv)
2504{
2505 union xfs_btree_irec low_brec = { .r = *low_rec };
2506 union xfs_btree_irec high_brec = { .r = *high_rec };
2507 struct xfs_rmap_query_range_info query = { .priv = priv, .fn = fn };
2508
2509 return xfs_btree_query_range(cur, &low_brec, &high_brec,
2510 xfs_rmap_query_range_helper, &query);
2511}
2512
2513/* Find all rmaps. */
2514int
2515xfs_rmap_query_all(
2516 struct xfs_btree_cur *cur,
2517 xfs_rmap_query_range_fn fn,
2518 void *priv)
2519{
2520 struct xfs_rmap_query_range_info query;
2521
2522 query.priv = priv;
2523 query.fn = fn;
2524 return xfs_btree_query_all(cur, xfs_rmap_query_range_helper, &query);
2525}
2526
2527/* Commit an rmap operation into the ondisk tree. */
2528int
2529__xfs_rmap_finish_intent(
2530 struct xfs_btree_cur *rcur,
2531 enum xfs_rmap_intent_type op,
2532 xfs_agblock_t bno,
2533 xfs_extlen_t len,
2534 const struct xfs_owner_info *oinfo,
2535 bool unwritten)
2536{
2537 switch (op) {
2538 case XFS_RMAP_ALLOC:
2539 case XFS_RMAP_MAP:
2540 return xfs_rmap_map(rcur, bno, len, unwritten, oinfo);
2541 case XFS_RMAP_MAP_SHARED:
2542 return xfs_rmap_map_shared(rcur, bno, len, unwritten, oinfo);
2543 case XFS_RMAP_FREE:
2544 case XFS_RMAP_UNMAP:
2545 return xfs_rmap_unmap(rcur, bno, len, unwritten, oinfo);
2546 case XFS_RMAP_UNMAP_SHARED:
2547 return xfs_rmap_unmap_shared(rcur, bno, len, unwritten, oinfo);
2548 case XFS_RMAP_CONVERT:
2549 return xfs_rmap_convert(rcur, bno, len, !unwritten, oinfo);
2550 case XFS_RMAP_CONVERT_SHARED:
2551 return xfs_rmap_convert_shared(rcur, bno, len, !unwritten,
2552 oinfo);
2553 default:
2554 ASSERT(0);
2555 return -EFSCORRUPTED;
2556 }
2557}
2558
2559/*
2560 * Process one of the deferred rmap operations. We pass back the
2561 * btree cursor to maintain our lock on the rmapbt between calls.
2562 * This saves time and eliminates a buffer deadlock between the
2563 * superblock and the AGF because we'll always grab them in the same
2564 * order.
2565 */
2566int
2567xfs_rmap_finish_one(
2568 struct xfs_trans *tp,
2569 struct xfs_rmap_intent *ri,
2570 struct xfs_btree_cur **pcur)
2571{
2572 struct xfs_owner_info oinfo;
2573 struct xfs_mount *mp = tp->t_mountp;
2574 struct xfs_btree_cur *rcur = *pcur;
2575 struct xfs_buf *agbp = NULL;
2576 xfs_agblock_t bno;
2577 bool unwritten;
2578 int error = 0;
2579
2580 trace_xfs_rmap_deferred(mp, ri);
2581
2582 if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_RMAP_FINISH_ONE))
2583 return -EIO;
2584
2585 /*
2586 * If we haven't gotten a cursor or the cursor AG doesn't match
2587 * the startblock, get one now.
2588 */
2589 if (rcur != NULL && rcur->bc_group != ri->ri_group) {
2590 xfs_btree_del_cursor(rcur, 0);
2591 rcur = NULL;
2592 *pcur = NULL;
2593 }
2594 if (rcur == NULL) {
2595 struct xfs_perag *pag = to_perag(ri->ri_group);
2596
2597 /*
2598 * Refresh the freelist before we start changing the
2599 * rmapbt, because a shape change could cause us to
2600 * allocate blocks.
2601 */
2602 error = xfs_free_extent_fix_freelist(tp, pag, &agbp);
2603 if (error) {
2604 xfs_ag_mark_sick(pag, XFS_SICK_AG_AGFL);
2605 return error;
2606 }
2607 if (XFS_IS_CORRUPT(tp->t_mountp, !agbp)) {
2608 xfs_ag_mark_sick(pag, XFS_SICK_AG_AGFL);
2609 return -EFSCORRUPTED;
2610 }
2611
2612 *pcur = rcur = xfs_rmapbt_init_cursor(mp, tp, agbp, pag);
2613 }
2614
2615 xfs_rmap_ino_owner(&oinfo, ri->ri_owner, ri->ri_whichfork,
2616 ri->ri_bmap.br_startoff);
2617 unwritten = ri->ri_bmap.br_state == XFS_EXT_UNWRITTEN;
2618 bno = XFS_FSB_TO_AGBNO(rcur->bc_mp, ri->ri_bmap.br_startblock);
2619
2620 error = __xfs_rmap_finish_intent(rcur, ri->ri_type, bno,
2621 ri->ri_bmap.br_blockcount, &oinfo, unwritten);
2622 if (error)
2623 return error;
2624
2625 xfs_rmap_update_hook(tp, ri->ri_group, ri->ri_type, bno,
2626 ri->ri_bmap.br_blockcount, unwritten, &oinfo);
2627 return 0;
2628}
2629
2630/*
2631 * Don't defer an rmap if we aren't an rmap filesystem.
2632 */
2633static bool
2634xfs_rmap_update_is_needed(
2635 struct xfs_mount *mp,
2636 int whichfork)
2637{
2638 return xfs_has_rmapbt(mp) && whichfork != XFS_COW_FORK;
2639}
2640
2641/*
2642 * Record a rmap intent; the list is kept sorted first by AG and then by
2643 * increasing age.
2644 */
2645static void
2646__xfs_rmap_add(
2647 struct xfs_trans *tp,
2648 enum xfs_rmap_intent_type type,
2649 uint64_t owner,
2650 int whichfork,
2651 struct xfs_bmbt_irec *bmap)
2652{
2653 struct xfs_rmap_intent *ri;
2654
2655 ri = kmem_cache_alloc(xfs_rmap_intent_cache, GFP_KERNEL | __GFP_NOFAIL);
2656 INIT_LIST_HEAD(&ri->ri_list);
2657 ri->ri_type = type;
2658 ri->ri_owner = owner;
2659 ri->ri_whichfork = whichfork;
2660 ri->ri_bmap = *bmap;
2661
2662 xfs_rmap_defer_add(tp, ri);
2663}
2664
2665/* Map an extent into a file. */
2666void
2667xfs_rmap_map_extent(
2668 struct xfs_trans *tp,
2669 struct xfs_inode *ip,
2670 int whichfork,
2671 struct xfs_bmbt_irec *PREV)
2672{
2673 enum xfs_rmap_intent_type type = XFS_RMAP_MAP;
2674
2675 if (!xfs_rmap_update_is_needed(tp->t_mountp, whichfork))
2676 return;
2677
2678 if (whichfork != XFS_ATTR_FORK && xfs_is_reflink_inode(ip))
2679 type = XFS_RMAP_MAP_SHARED;
2680
2681 __xfs_rmap_add(tp, type, ip->i_ino, whichfork, PREV);
2682}
2683
2684/* Unmap an extent out of a file. */
2685void
2686xfs_rmap_unmap_extent(
2687 struct xfs_trans *tp,
2688 struct xfs_inode *ip,
2689 int whichfork,
2690 struct xfs_bmbt_irec *PREV)
2691{
2692 enum xfs_rmap_intent_type type = XFS_RMAP_UNMAP;
2693
2694 if (!xfs_rmap_update_is_needed(tp->t_mountp, whichfork))
2695 return;
2696
2697 if (whichfork != XFS_ATTR_FORK && xfs_is_reflink_inode(ip))
2698 type = XFS_RMAP_UNMAP_SHARED;
2699
2700 __xfs_rmap_add(tp, type, ip->i_ino, whichfork, PREV);
2701}
2702
2703/*
2704 * Convert a data fork extent from unwritten to real or vice versa.
2705 *
2706 * Note that tp can be NULL here as no transaction is used for COW fork
2707 * unwritten conversion.
2708 */
2709void
2710xfs_rmap_convert_extent(
2711 struct xfs_mount *mp,
2712 struct xfs_trans *tp,
2713 struct xfs_inode *ip,
2714 int whichfork,
2715 struct xfs_bmbt_irec *PREV)
2716{
2717 enum xfs_rmap_intent_type type = XFS_RMAP_CONVERT;
2718
2719 if (!xfs_rmap_update_is_needed(mp, whichfork))
2720 return;
2721
2722 if (whichfork != XFS_ATTR_FORK && xfs_is_reflink_inode(ip))
2723 type = XFS_RMAP_CONVERT_SHARED;
2724
2725 __xfs_rmap_add(tp, type, ip->i_ino, whichfork, PREV);
2726}
2727
2728/* Schedule the creation of an rmap for non-file data. */
2729void
2730xfs_rmap_alloc_extent(
2731 struct xfs_trans *tp,
2732 xfs_agnumber_t agno,
2733 xfs_agblock_t bno,
2734 xfs_extlen_t len,
2735 uint64_t owner)
2736{
2737 struct xfs_bmbt_irec bmap;
2738
2739 if (!xfs_rmap_update_is_needed(tp->t_mountp, XFS_DATA_FORK))
2740 return;
2741
2742 bmap.br_startblock = XFS_AGB_TO_FSB(tp->t_mountp, agno, bno);
2743 bmap.br_blockcount = len;
2744 bmap.br_startoff = 0;
2745 bmap.br_state = XFS_EXT_NORM;
2746
2747 __xfs_rmap_add(tp, XFS_RMAP_ALLOC, owner, XFS_DATA_FORK, &bmap);
2748}
2749
2750/* Schedule the deletion of an rmap for non-file data. */
2751void
2752xfs_rmap_free_extent(
2753 struct xfs_trans *tp,
2754 xfs_agnumber_t agno,
2755 xfs_agblock_t bno,
2756 xfs_extlen_t len,
2757 uint64_t owner)
2758{
2759 struct xfs_bmbt_irec bmap;
2760
2761 if (!xfs_rmap_update_is_needed(tp->t_mountp, XFS_DATA_FORK))
2762 return;
2763
2764 bmap.br_startblock = XFS_AGB_TO_FSB(tp->t_mountp, agno, bno);
2765 bmap.br_blockcount = len;
2766 bmap.br_startoff = 0;
2767 bmap.br_state = XFS_EXT_NORM;
2768
2769 __xfs_rmap_add(tp, XFS_RMAP_FREE, owner, XFS_DATA_FORK, &bmap);
2770}
2771
2772/* Compare rmap records. Returns -1 if a < b, 1 if a > b, and 0 if equal. */
2773int
2774xfs_rmap_compare(
2775 const struct xfs_rmap_irec *a,
2776 const struct xfs_rmap_irec *b)
2777{
2778 __u64 oa;
2779 __u64 ob;
2780
2781 oa = xfs_rmap_irec_offset_pack(a);
2782 ob = xfs_rmap_irec_offset_pack(b);
2783
2784 if (a->rm_startblock < b->rm_startblock)
2785 return -1;
2786 else if (a->rm_startblock > b->rm_startblock)
2787 return 1;
2788 else if (a->rm_owner < b->rm_owner)
2789 return -1;
2790 else if (a->rm_owner > b->rm_owner)
2791 return 1;
2792 else if (oa < ob)
2793 return -1;
2794 else if (oa > ob)
2795 return 1;
2796 else
2797 return 0;
2798}
2799
2800/*
2801 * Scan the physical storage part of the keyspace of the reverse mapping index
2802 * and tell us if the area has no records, is fully mapped by records, or is
2803 * partially filled.
2804 */
2805int
2806xfs_rmap_has_records(
2807 struct xfs_btree_cur *cur,
2808 xfs_agblock_t bno,
2809 xfs_extlen_t len,
2810 enum xbtree_recpacking *outcome)
2811{
2812 union xfs_btree_key mask = {
2813 .rmap.rm_startblock = cpu_to_be32(-1U),
2814 };
2815 union xfs_btree_irec low;
2816 union xfs_btree_irec high;
2817
2818 memset(&low, 0, sizeof(low));
2819 low.r.rm_startblock = bno;
2820 memset(&high, 0xFF, sizeof(high));
2821 high.r.rm_startblock = bno + len - 1;
2822
2823 return xfs_btree_has_records(cur, &low, &high, &mask, outcome);
2824}
2825
2826struct xfs_rmap_ownercount {
2827 /* Owner that we're looking for. */
2828 struct xfs_rmap_irec good;
2829
2830 /* rmap search keys */
2831 struct xfs_rmap_irec low;
2832 struct xfs_rmap_irec high;
2833
2834 struct xfs_rmap_matches *results;
2835
2836 /* Stop early if we find a nonmatch? */
2837 bool stop_on_nonmatch;
2838};
2839
2840/* Does this rmap represent space that can have multiple owners? */
2841static inline bool
2842xfs_rmap_shareable(
2843 struct xfs_mount *mp,
2844 const struct xfs_rmap_irec *rmap)
2845{
2846 if (!xfs_has_reflink(mp))
2847 return false;
2848 if (XFS_RMAP_NON_INODE_OWNER(rmap->rm_owner))
2849 return false;
2850 if (rmap->rm_flags & (XFS_RMAP_ATTR_FORK |
2851 XFS_RMAP_BMBT_BLOCK))
2852 return false;
2853 return true;
2854}
2855
2856static inline void
2857xfs_rmap_ownercount_init(
2858 struct xfs_rmap_ownercount *roc,
2859 xfs_agblock_t bno,
2860 xfs_extlen_t len,
2861 const struct xfs_owner_info *oinfo,
2862 struct xfs_rmap_matches *results)
2863{
2864 memset(roc, 0, sizeof(*roc));
2865 roc->results = results;
2866
2867 roc->low.rm_startblock = bno;
2868 memset(&roc->high, 0xFF, sizeof(roc->high));
2869 roc->high.rm_startblock = bno + len - 1;
2870
2871 memset(results, 0, sizeof(*results));
2872 roc->good.rm_startblock = bno;
2873 roc->good.rm_blockcount = len;
2874 roc->good.rm_owner = oinfo->oi_owner;
2875 roc->good.rm_offset = oinfo->oi_offset;
2876 if (oinfo->oi_flags & XFS_OWNER_INFO_ATTR_FORK)
2877 roc->good.rm_flags |= XFS_RMAP_ATTR_FORK;
2878 if (oinfo->oi_flags & XFS_OWNER_INFO_BMBT_BLOCK)
2879 roc->good.rm_flags |= XFS_RMAP_BMBT_BLOCK;
2880}
2881
2882/* Figure out if this is a match for the owner. */
2883STATIC int
2884xfs_rmap_count_owners_helper(
2885 struct xfs_btree_cur *cur,
2886 const struct xfs_rmap_irec *rec,
2887 void *priv)
2888{
2889 struct xfs_rmap_ownercount *roc = priv;
2890 struct xfs_rmap_irec check = *rec;
2891 unsigned int keyflags;
2892 bool filedata;
2893 int64_t delta;
2894
2895 filedata = !XFS_RMAP_NON_INODE_OWNER(check.rm_owner) &&
2896 !(check.rm_flags & XFS_RMAP_BMBT_BLOCK);
2897
2898 /* Trim the part of check that comes before the comparison range. */
2899 delta = (int64_t)roc->good.rm_startblock - check.rm_startblock;
2900 if (delta > 0) {
2901 check.rm_startblock += delta;
2902 check.rm_blockcount -= delta;
2903 if (filedata)
2904 check.rm_offset += delta;
2905 }
2906
2907 /* Trim the part of check that comes after the comparison range. */
2908 delta = (check.rm_startblock + check.rm_blockcount) -
2909 (roc->good.rm_startblock + roc->good.rm_blockcount);
2910 if (delta > 0)
2911 check.rm_blockcount -= delta;
2912
2913 /* Don't care about unwritten status for establishing ownership. */
2914 keyflags = check.rm_flags & (XFS_RMAP_ATTR_FORK | XFS_RMAP_BMBT_BLOCK);
2915
2916 if (check.rm_startblock == roc->good.rm_startblock &&
2917 check.rm_blockcount == roc->good.rm_blockcount &&
2918 check.rm_owner == roc->good.rm_owner &&
2919 check.rm_offset == roc->good.rm_offset &&
2920 keyflags == roc->good.rm_flags) {
2921 roc->results->matches++;
2922 } else {
2923 roc->results->non_owner_matches++;
2924 if (xfs_rmap_shareable(cur->bc_mp, &roc->good) ^
2925 xfs_rmap_shareable(cur->bc_mp, &check))
2926 roc->results->bad_non_owner_matches++;
2927 }
2928
2929 if (roc->results->non_owner_matches && roc->stop_on_nonmatch)
2930 return -ECANCELED;
2931
2932 return 0;
2933}
2934
2935/* Count the number of owners and non-owners of this range of blocks. */
2936int
2937xfs_rmap_count_owners(
2938 struct xfs_btree_cur *cur,
2939 xfs_agblock_t bno,
2940 xfs_extlen_t len,
2941 const struct xfs_owner_info *oinfo,
2942 struct xfs_rmap_matches *results)
2943{
2944 struct xfs_rmap_ownercount roc;
2945 int error;
2946
2947 xfs_rmap_ownercount_init(&roc, bno, len, oinfo, results);
2948 error = xfs_rmap_query_range(cur, &roc.low, &roc.high,
2949 xfs_rmap_count_owners_helper, &roc);
2950 if (error)
2951 return error;
2952
2953 /*
2954 * There can't be any non-owner rmaps that conflict with the given
2955 * owner if we didn't find any rmaps matching the owner.
2956 */
2957 if (!results->matches)
2958 results->bad_non_owner_matches = 0;
2959
2960 return 0;
2961}
2962
2963/*
2964 * Given an extent and some owner info, can we find records overlapping
2965 * the extent whose owner info does not match the given owner?
2966 */
2967int
2968xfs_rmap_has_other_keys(
2969 struct xfs_btree_cur *cur,
2970 xfs_agblock_t bno,
2971 xfs_extlen_t len,
2972 const struct xfs_owner_info *oinfo,
2973 bool *has_other)
2974{
2975 struct xfs_rmap_matches res;
2976 struct xfs_rmap_ownercount roc;
2977 int error;
2978
2979 xfs_rmap_ownercount_init(&roc, bno, len, oinfo, &res);
2980 roc.stop_on_nonmatch = true;
2981
2982 error = xfs_rmap_query_range(cur, &roc.low, &roc.high,
2983 xfs_rmap_count_owners_helper, &roc);
2984 if (error == -ECANCELED) {
2985 *has_other = true;
2986 return 0;
2987 }
2988 if (error)
2989 return error;
2990
2991 *has_other = false;
2992 return 0;
2993}
2994
2995const struct xfs_owner_info XFS_RMAP_OINFO_SKIP_UPDATE = {
2996 .oi_owner = XFS_RMAP_OWN_NULL,
2997};
2998const struct xfs_owner_info XFS_RMAP_OINFO_ANY_OWNER = {
2999 .oi_owner = XFS_RMAP_OWN_UNKNOWN,
3000};
3001const struct xfs_owner_info XFS_RMAP_OINFO_FS = {
3002 .oi_owner = XFS_RMAP_OWN_FS,
3003};
3004const struct xfs_owner_info XFS_RMAP_OINFO_LOG = {
3005 .oi_owner = XFS_RMAP_OWN_LOG,
3006};
3007const struct xfs_owner_info XFS_RMAP_OINFO_AG = {
3008 .oi_owner = XFS_RMAP_OWN_AG,
3009};
3010const struct xfs_owner_info XFS_RMAP_OINFO_INOBT = {
3011 .oi_owner = XFS_RMAP_OWN_INOBT,
3012};
3013const struct xfs_owner_info XFS_RMAP_OINFO_INODES = {
3014 .oi_owner = XFS_RMAP_OWN_INODES,
3015};
3016const struct xfs_owner_info XFS_RMAP_OINFO_REFC = {
3017 .oi_owner = XFS_RMAP_OWN_REFC,
3018};
3019const struct xfs_owner_info XFS_RMAP_OINFO_COW = {
3020 .oi_owner = XFS_RMAP_OWN_COW,
3021};
3022
3023int __init
3024xfs_rmap_intent_init_cache(void)
3025{
3026 xfs_rmap_intent_cache = kmem_cache_create("xfs_rmap_intent",
3027 sizeof(struct xfs_rmap_intent),
3028 0, 0, NULL);
3029
3030 return xfs_rmap_intent_cache != NULL ? 0 : -ENOMEM;
3031}
3032
3033void
3034xfs_rmap_intent_destroy_cache(void)
3035{
3036 kmem_cache_destroy(xfs_rmap_intent_cache);
3037 xfs_rmap_intent_cache = NULL;
3038}