Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * Copyright (c) 2013 Red Hat, Inc.
5 * All Rights Reserved.
6 */
7#include "xfs.h"
8#include "xfs_fs.h"
9#include "xfs_shared.h"
10#include "xfs_format.h"
11#include "xfs_log_format.h"
12#include "xfs_trans_resv.h"
13#include "xfs_bit.h"
14#include "xfs_mount.h"
15#include "xfs_defer.h"
16#include "xfs_da_format.h"
17#include "xfs_da_btree.h"
18#include "xfs_inode.h"
19#include "xfs_trans.h"
20#include "xfs_bmap.h"
21#include "xfs_attr.h"
22#include "xfs_attr_remote.h"
23#include "xfs_trace.h"
24#include "xfs_error.h"
25
26#define ATTR_RMTVALUE_MAPSIZE 1 /* # of map entries at once */
27
28/*
29 * Remote Attribute Values
30 * =======================
31 *
32 * Remote extended attribute values are conceptually simple -- they're written
33 * to data blocks mapped by an inode's attribute fork, and they have an upper
34 * size limit of 64k. Setting a value does not involve the XFS log.
35 *
36 * However, on a v5 filesystem, maximally sized remote attr values require one
37 * block more than 64k worth of space to hold both the remote attribute value
38 * header (64 bytes). On a 4k block filesystem this results in a 68k buffer;
39 * on a 64k block filesystem, this would be a 128k buffer. Note that the log
40 * format can only handle a dirty buffer of XFS_MAX_BLOCKSIZE length (64k).
41 * Therefore, we /must/ ensure that remote attribute value buffers never touch
42 * the logging system and therefore never have a log item.
43 */
44
45/*
46 * Each contiguous block has a header, so it is not just a simple attribute
47 * length to FSB conversion.
48 */
49int
50xfs_attr3_rmt_blocks(
51 struct xfs_mount *mp,
52 int attrlen)
53{
54 if (xfs_has_crc(mp)) {
55 int buflen = XFS_ATTR3_RMT_BUF_SPACE(mp, mp->m_sb.sb_blocksize);
56 return (attrlen + buflen - 1) / buflen;
57 }
58 return XFS_B_TO_FSB(mp, attrlen);
59}
60
61/*
62 * Checking of the remote attribute header is split into two parts. The verifier
63 * does CRC, location and bounds checking, the unpacking function checks the
64 * attribute parameters and owner.
65 */
66static xfs_failaddr_t
67xfs_attr3_rmt_hdr_ok(
68 void *ptr,
69 xfs_ino_t ino,
70 uint32_t offset,
71 uint32_t size,
72 xfs_daddr_t bno)
73{
74 struct xfs_attr3_rmt_hdr *rmt = ptr;
75
76 if (bno != be64_to_cpu(rmt->rm_blkno))
77 return __this_address;
78 if (offset != be32_to_cpu(rmt->rm_offset))
79 return __this_address;
80 if (size != be32_to_cpu(rmt->rm_bytes))
81 return __this_address;
82 if (ino != be64_to_cpu(rmt->rm_owner))
83 return __this_address;
84
85 /* ok */
86 return NULL;
87}
88
89static xfs_failaddr_t
90xfs_attr3_rmt_verify(
91 struct xfs_mount *mp,
92 struct xfs_buf *bp,
93 void *ptr,
94 int fsbsize,
95 xfs_daddr_t bno)
96{
97 struct xfs_attr3_rmt_hdr *rmt = ptr;
98
99 if (!xfs_verify_magic(bp, rmt->rm_magic))
100 return __this_address;
101 if (!uuid_equal(&rmt->rm_uuid, &mp->m_sb.sb_meta_uuid))
102 return __this_address;
103 if (be64_to_cpu(rmt->rm_blkno) != bno)
104 return __this_address;
105 if (be32_to_cpu(rmt->rm_bytes) > fsbsize - sizeof(*rmt))
106 return __this_address;
107 if (be32_to_cpu(rmt->rm_offset) +
108 be32_to_cpu(rmt->rm_bytes) > XFS_XATTR_SIZE_MAX)
109 return __this_address;
110 if (rmt->rm_owner == 0)
111 return __this_address;
112
113 return NULL;
114}
115
116static int
117__xfs_attr3_rmt_read_verify(
118 struct xfs_buf *bp,
119 bool check_crc,
120 xfs_failaddr_t *failaddr)
121{
122 struct xfs_mount *mp = bp->b_mount;
123 char *ptr;
124 int len;
125 xfs_daddr_t bno;
126 int blksize = mp->m_attr_geo->blksize;
127
128 /* no verification of non-crc buffers */
129 if (!xfs_has_crc(mp))
130 return 0;
131
132 ptr = bp->b_addr;
133 bno = xfs_buf_daddr(bp);
134 len = BBTOB(bp->b_length);
135 ASSERT(len >= blksize);
136
137 while (len > 0) {
138 if (check_crc &&
139 !xfs_verify_cksum(ptr, blksize, XFS_ATTR3_RMT_CRC_OFF)) {
140 *failaddr = __this_address;
141 return -EFSBADCRC;
142 }
143 *failaddr = xfs_attr3_rmt_verify(mp, bp, ptr, blksize, bno);
144 if (*failaddr)
145 return -EFSCORRUPTED;
146 len -= blksize;
147 ptr += blksize;
148 bno += BTOBB(blksize);
149 }
150
151 if (len != 0) {
152 *failaddr = __this_address;
153 return -EFSCORRUPTED;
154 }
155
156 return 0;
157}
158
159static void
160xfs_attr3_rmt_read_verify(
161 struct xfs_buf *bp)
162{
163 xfs_failaddr_t fa;
164 int error;
165
166 error = __xfs_attr3_rmt_read_verify(bp, true, &fa);
167 if (error)
168 xfs_verifier_error(bp, error, fa);
169}
170
171static xfs_failaddr_t
172xfs_attr3_rmt_verify_struct(
173 struct xfs_buf *bp)
174{
175 xfs_failaddr_t fa;
176 int error;
177
178 error = __xfs_attr3_rmt_read_verify(bp, false, &fa);
179 return error ? fa : NULL;
180}
181
182static void
183xfs_attr3_rmt_write_verify(
184 struct xfs_buf *bp)
185{
186 struct xfs_mount *mp = bp->b_mount;
187 xfs_failaddr_t fa;
188 int blksize = mp->m_attr_geo->blksize;
189 char *ptr;
190 int len;
191 xfs_daddr_t bno;
192
193 /* no verification of non-crc buffers */
194 if (!xfs_has_crc(mp))
195 return;
196
197 ptr = bp->b_addr;
198 bno = xfs_buf_daddr(bp);
199 len = BBTOB(bp->b_length);
200 ASSERT(len >= blksize);
201
202 while (len > 0) {
203 struct xfs_attr3_rmt_hdr *rmt = (struct xfs_attr3_rmt_hdr *)ptr;
204
205 fa = xfs_attr3_rmt_verify(mp, bp, ptr, blksize, bno);
206 if (fa) {
207 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
208 return;
209 }
210
211 /*
212 * Ensure we aren't writing bogus LSNs to disk. See
213 * xfs_attr3_rmt_hdr_set() for the explanation.
214 */
215 if (rmt->rm_lsn != cpu_to_be64(NULLCOMMITLSN)) {
216 xfs_verifier_error(bp, -EFSCORRUPTED, __this_address);
217 return;
218 }
219 xfs_update_cksum(ptr, blksize, XFS_ATTR3_RMT_CRC_OFF);
220
221 len -= blksize;
222 ptr += blksize;
223 bno += BTOBB(blksize);
224 }
225
226 if (len != 0)
227 xfs_verifier_error(bp, -EFSCORRUPTED, __this_address);
228}
229
230const struct xfs_buf_ops xfs_attr3_rmt_buf_ops = {
231 .name = "xfs_attr3_rmt",
232 .magic = { 0, cpu_to_be32(XFS_ATTR3_RMT_MAGIC) },
233 .verify_read = xfs_attr3_rmt_read_verify,
234 .verify_write = xfs_attr3_rmt_write_verify,
235 .verify_struct = xfs_attr3_rmt_verify_struct,
236};
237
238STATIC int
239xfs_attr3_rmt_hdr_set(
240 struct xfs_mount *mp,
241 void *ptr,
242 xfs_ino_t ino,
243 uint32_t offset,
244 uint32_t size,
245 xfs_daddr_t bno)
246{
247 struct xfs_attr3_rmt_hdr *rmt = ptr;
248
249 if (!xfs_has_crc(mp))
250 return 0;
251
252 rmt->rm_magic = cpu_to_be32(XFS_ATTR3_RMT_MAGIC);
253 rmt->rm_offset = cpu_to_be32(offset);
254 rmt->rm_bytes = cpu_to_be32(size);
255 uuid_copy(&rmt->rm_uuid, &mp->m_sb.sb_meta_uuid);
256 rmt->rm_owner = cpu_to_be64(ino);
257 rmt->rm_blkno = cpu_to_be64(bno);
258
259 /*
260 * Remote attribute blocks are written synchronously, so we don't
261 * have an LSN that we can stamp in them that makes any sense to log
262 * recovery. To ensure that log recovery handles overwrites of these
263 * blocks sanely (i.e. once they've been freed and reallocated as some
264 * other type of metadata) we need to ensure that the LSN has a value
265 * that tells log recovery to ignore the LSN and overwrite the buffer
266 * with whatever is in it's log. To do this, we use the magic
267 * NULLCOMMITLSN to indicate that the LSN is invalid.
268 */
269 rmt->rm_lsn = cpu_to_be64(NULLCOMMITLSN);
270
271 return sizeof(struct xfs_attr3_rmt_hdr);
272}
273
274/*
275 * Helper functions to copy attribute data in and out of the one disk extents
276 */
277STATIC int
278xfs_attr_rmtval_copyout(
279 struct xfs_mount *mp,
280 struct xfs_buf *bp,
281 xfs_ino_t ino,
282 int *offset,
283 int *valuelen,
284 uint8_t **dst)
285{
286 char *src = bp->b_addr;
287 xfs_daddr_t bno = xfs_buf_daddr(bp);
288 int len = BBTOB(bp->b_length);
289 int blksize = mp->m_attr_geo->blksize;
290
291 ASSERT(len >= blksize);
292
293 while (len > 0 && *valuelen > 0) {
294 int hdr_size = 0;
295 int byte_cnt = XFS_ATTR3_RMT_BUF_SPACE(mp, blksize);
296
297 byte_cnt = min(*valuelen, byte_cnt);
298
299 if (xfs_has_crc(mp)) {
300 if (xfs_attr3_rmt_hdr_ok(src, ino, *offset,
301 byte_cnt, bno)) {
302 xfs_alert(mp,
303"remote attribute header mismatch bno/off/len/owner (0x%llx/0x%x/Ox%x/0x%llx)",
304 bno, *offset, byte_cnt, ino);
305 return -EFSCORRUPTED;
306 }
307 hdr_size = sizeof(struct xfs_attr3_rmt_hdr);
308 }
309
310 memcpy(*dst, src + hdr_size, byte_cnt);
311
312 /* roll buffer forwards */
313 len -= blksize;
314 src += blksize;
315 bno += BTOBB(blksize);
316
317 /* roll attribute data forwards */
318 *valuelen -= byte_cnt;
319 *dst += byte_cnt;
320 *offset += byte_cnt;
321 }
322 return 0;
323}
324
325STATIC void
326xfs_attr_rmtval_copyin(
327 struct xfs_mount *mp,
328 struct xfs_buf *bp,
329 xfs_ino_t ino,
330 int *offset,
331 int *valuelen,
332 uint8_t **src)
333{
334 char *dst = bp->b_addr;
335 xfs_daddr_t bno = xfs_buf_daddr(bp);
336 int len = BBTOB(bp->b_length);
337 int blksize = mp->m_attr_geo->blksize;
338
339 ASSERT(len >= blksize);
340
341 while (len > 0 && *valuelen > 0) {
342 int hdr_size;
343 int byte_cnt = XFS_ATTR3_RMT_BUF_SPACE(mp, blksize);
344
345 byte_cnt = min(*valuelen, byte_cnt);
346 hdr_size = xfs_attr3_rmt_hdr_set(mp, dst, ino, *offset,
347 byte_cnt, bno);
348
349 memcpy(dst + hdr_size, *src, byte_cnt);
350
351 /*
352 * If this is the last block, zero the remainder of it.
353 * Check that we are actually the last block, too.
354 */
355 if (byte_cnt + hdr_size < blksize) {
356 ASSERT(*valuelen - byte_cnt == 0);
357 ASSERT(len == blksize);
358 memset(dst + hdr_size + byte_cnt, 0,
359 blksize - hdr_size - byte_cnt);
360 }
361
362 /* roll buffer forwards */
363 len -= blksize;
364 dst += blksize;
365 bno += BTOBB(blksize);
366
367 /* roll attribute data forwards */
368 *valuelen -= byte_cnt;
369 *src += byte_cnt;
370 *offset += byte_cnt;
371 }
372}
373
374/*
375 * Read the value associated with an attribute from the out-of-line buffer
376 * that we stored it in.
377 *
378 * Returns 0 on successful retrieval, otherwise an error.
379 */
380int
381xfs_attr_rmtval_get(
382 struct xfs_da_args *args)
383{
384 struct xfs_bmbt_irec map[ATTR_RMTVALUE_MAPSIZE];
385 struct xfs_mount *mp = args->dp->i_mount;
386 struct xfs_buf *bp;
387 xfs_dablk_t lblkno = args->rmtblkno;
388 uint8_t *dst = args->value;
389 int valuelen;
390 int nmap;
391 int error;
392 int blkcnt = args->rmtblkcnt;
393 int i;
394 int offset = 0;
395
396 trace_xfs_attr_rmtval_get(args);
397
398 ASSERT(args->valuelen != 0);
399 ASSERT(args->rmtvaluelen == args->valuelen);
400
401 valuelen = args->rmtvaluelen;
402 while (valuelen > 0) {
403 nmap = ATTR_RMTVALUE_MAPSIZE;
404 error = xfs_bmapi_read(args->dp, (xfs_fileoff_t)lblkno,
405 blkcnt, map, &nmap,
406 XFS_BMAPI_ATTRFORK);
407 if (error)
408 return error;
409 ASSERT(nmap >= 1);
410
411 for (i = 0; (i < nmap) && (valuelen > 0); i++) {
412 xfs_daddr_t dblkno;
413 int dblkcnt;
414
415 ASSERT((map[i].br_startblock != DELAYSTARTBLOCK) &&
416 (map[i].br_startblock != HOLESTARTBLOCK));
417 dblkno = XFS_FSB_TO_DADDR(mp, map[i].br_startblock);
418 dblkcnt = XFS_FSB_TO_BB(mp, map[i].br_blockcount);
419 error = xfs_buf_read(mp->m_ddev_targp, dblkno, dblkcnt,
420 0, &bp, &xfs_attr3_rmt_buf_ops);
421 if (error)
422 return error;
423
424 error = xfs_attr_rmtval_copyout(mp, bp, args->dp->i_ino,
425 &offset, &valuelen,
426 &dst);
427 xfs_buf_relse(bp);
428 if (error)
429 return error;
430
431 /* roll attribute extent map forwards */
432 lblkno += map[i].br_blockcount;
433 blkcnt -= map[i].br_blockcount;
434 }
435 }
436 ASSERT(valuelen == 0);
437 return 0;
438}
439
440/*
441 * Find a "hole" in the attribute address space large enough for us to drop the
442 * new attributes value into
443 */
444int
445xfs_attr_rmt_find_hole(
446 struct xfs_da_args *args)
447{
448 struct xfs_inode *dp = args->dp;
449 struct xfs_mount *mp = dp->i_mount;
450 int error;
451 int blkcnt;
452 xfs_fileoff_t lfileoff = 0;
453
454 /*
455 * Because CRC enable attributes have headers, we can't just do a
456 * straight byte to FSB conversion and have to take the header space
457 * into account.
458 */
459 blkcnt = xfs_attr3_rmt_blocks(mp, args->rmtvaluelen);
460 error = xfs_bmap_first_unused(args->trans, args->dp, blkcnt, &lfileoff,
461 XFS_ATTR_FORK);
462 if (error)
463 return error;
464
465 args->rmtblkno = (xfs_dablk_t)lfileoff;
466 args->rmtblkcnt = blkcnt;
467
468 return 0;
469}
470
471int
472xfs_attr_rmtval_set_value(
473 struct xfs_da_args *args)
474{
475 struct xfs_inode *dp = args->dp;
476 struct xfs_mount *mp = dp->i_mount;
477 struct xfs_bmbt_irec map;
478 xfs_dablk_t lblkno;
479 uint8_t *src = args->value;
480 int blkcnt;
481 int valuelen;
482 int nmap;
483 int error;
484 int offset = 0;
485
486 /*
487 * Roll through the "value", copying the attribute value to the
488 * already-allocated blocks. Blocks are written synchronously
489 * so that we can know they are all on disk before we turn off
490 * the INCOMPLETE flag.
491 */
492 lblkno = args->rmtblkno;
493 blkcnt = args->rmtblkcnt;
494 valuelen = args->rmtvaluelen;
495 while (valuelen > 0) {
496 struct xfs_buf *bp;
497 xfs_daddr_t dblkno;
498 int dblkcnt;
499
500 ASSERT(blkcnt > 0);
501
502 nmap = 1;
503 error = xfs_bmapi_read(dp, (xfs_fileoff_t)lblkno,
504 blkcnt, &map, &nmap,
505 XFS_BMAPI_ATTRFORK);
506 if (error)
507 return error;
508 ASSERT(nmap == 1);
509 ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
510 (map.br_startblock != HOLESTARTBLOCK));
511
512 dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock),
513 dblkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
514
515 error = xfs_buf_get(mp->m_ddev_targp, dblkno, dblkcnt, &bp);
516 if (error)
517 return error;
518 bp->b_ops = &xfs_attr3_rmt_buf_ops;
519
520 xfs_attr_rmtval_copyin(mp, bp, args->dp->i_ino, &offset,
521 &valuelen, &src);
522
523 error = xfs_bwrite(bp); /* GROT: NOTE: synchronous write */
524 xfs_buf_relse(bp);
525 if (error)
526 return error;
527
528
529 /* roll attribute extent map forwards */
530 lblkno += map.br_blockcount;
531 blkcnt -= map.br_blockcount;
532 }
533 ASSERT(valuelen == 0);
534 return 0;
535}
536
537/* Mark stale any incore buffers for the remote value. */
538int
539xfs_attr_rmtval_stale(
540 struct xfs_inode *ip,
541 struct xfs_bmbt_irec *map,
542 xfs_buf_flags_t incore_flags)
543{
544 struct xfs_mount *mp = ip->i_mount;
545 struct xfs_buf *bp;
546 int error;
547
548 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
549
550 if (XFS_IS_CORRUPT(mp, map->br_startblock == DELAYSTARTBLOCK) ||
551 XFS_IS_CORRUPT(mp, map->br_startblock == HOLESTARTBLOCK))
552 return -EFSCORRUPTED;
553
554 error = xfs_buf_incore(mp->m_ddev_targp,
555 XFS_FSB_TO_DADDR(mp, map->br_startblock),
556 XFS_FSB_TO_BB(mp, map->br_blockcount),
557 incore_flags, &bp);
558 if (error) {
559 if (error == -ENOENT)
560 return 0;
561 return error;
562 }
563
564 xfs_buf_stale(bp);
565 xfs_buf_relse(bp);
566 return 0;
567}
568
569/*
570 * Find a hole for the attr and store it in the delayed attr context. This
571 * initializes the context to roll through allocating an attr extent for a
572 * delayed attr operation
573 */
574int
575xfs_attr_rmtval_find_space(
576 struct xfs_attr_intent *attr)
577{
578 struct xfs_da_args *args = attr->xattri_da_args;
579 struct xfs_bmbt_irec *map = &attr->xattri_map;
580 int error;
581
582 attr->xattri_lblkno = 0;
583 attr->xattri_blkcnt = 0;
584 args->rmtblkcnt = 0;
585 args->rmtblkno = 0;
586 memset(map, 0, sizeof(struct xfs_bmbt_irec));
587
588 error = xfs_attr_rmt_find_hole(args);
589 if (error)
590 return error;
591
592 attr->xattri_blkcnt = args->rmtblkcnt;
593 attr->xattri_lblkno = args->rmtblkno;
594
595 return 0;
596}
597
598/*
599 * Write one block of the value associated with an attribute into the
600 * out-of-line buffer that we have defined for it. This is similar to a subset
601 * of xfs_attr_rmtval_set, but records the current block to the delayed attr
602 * context, and leaves transaction handling to the caller.
603 */
604int
605xfs_attr_rmtval_set_blk(
606 struct xfs_attr_intent *attr)
607{
608 struct xfs_da_args *args = attr->xattri_da_args;
609 struct xfs_inode *dp = args->dp;
610 struct xfs_bmbt_irec *map = &attr->xattri_map;
611 int nmap;
612 int error;
613
614 nmap = 1;
615 error = xfs_bmapi_write(args->trans, dp,
616 (xfs_fileoff_t)attr->xattri_lblkno,
617 attr->xattri_blkcnt, XFS_BMAPI_ATTRFORK, args->total,
618 map, &nmap);
619 if (error)
620 return error;
621
622 ASSERT(nmap == 1);
623 ASSERT((map->br_startblock != DELAYSTARTBLOCK) &&
624 (map->br_startblock != HOLESTARTBLOCK));
625
626 /* roll attribute extent map forwards */
627 attr->xattri_lblkno += map->br_blockcount;
628 attr->xattri_blkcnt -= map->br_blockcount;
629
630 return 0;
631}
632
633/*
634 * Remove the value associated with an attribute by deleting the
635 * out-of-line buffer that it is stored on.
636 */
637int
638xfs_attr_rmtval_invalidate(
639 struct xfs_da_args *args)
640{
641 xfs_dablk_t lblkno;
642 int blkcnt;
643 int error;
644
645 /*
646 * Roll through the "value", invalidating the attribute value's blocks.
647 */
648 lblkno = args->rmtblkno;
649 blkcnt = args->rmtblkcnt;
650 while (blkcnt > 0) {
651 struct xfs_bmbt_irec map;
652 int nmap;
653
654 /*
655 * Try to remember where we decided to put the value.
656 */
657 nmap = 1;
658 error = xfs_bmapi_read(args->dp, (xfs_fileoff_t)lblkno,
659 blkcnt, &map, &nmap, XFS_BMAPI_ATTRFORK);
660 if (error)
661 return error;
662 if (XFS_IS_CORRUPT(args->dp->i_mount, nmap != 1))
663 return -EFSCORRUPTED;
664 error = xfs_attr_rmtval_stale(args->dp, &map, XBF_TRYLOCK);
665 if (error)
666 return error;
667
668 lblkno += map.br_blockcount;
669 blkcnt -= map.br_blockcount;
670 }
671 return 0;
672}
673
674/*
675 * Remove the value associated with an attribute by deleting the out-of-line
676 * buffer that it is stored on. Returns -EAGAIN for the caller to refresh the
677 * transaction and re-call the function. Callers should keep calling this
678 * routine until it returns something other than -EAGAIN.
679 */
680int
681xfs_attr_rmtval_remove(
682 struct xfs_attr_intent *attr)
683{
684 struct xfs_da_args *args = attr->xattri_da_args;
685 int error, done;
686
687 /*
688 * Unmap value blocks for this attr.
689 */
690 error = xfs_bunmapi(args->trans, args->dp, args->rmtblkno,
691 args->rmtblkcnt, XFS_BMAPI_ATTRFORK, 1, &done);
692 if (error)
693 return error;
694
695 /*
696 * We don't need an explicit state here to pick up where we left off. We
697 * can figure it out using the !done return code. The actual value of
698 * attr->xattri_dela_state may be some value reminiscent of the calling
699 * function, but it's value is irrelevant with in the context of this
700 * function. Once we are done here, the next state is set as needed by
701 * the parent
702 */
703 if (!done) {
704 trace_xfs_attr_rmtval_remove_return(attr->xattri_dela_state,
705 args->dp);
706 return -EAGAIN;
707 }
708
709 args->rmtblkno = 0;
710 args->rmtblkcnt = 0;
711 return 0;
712}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * Copyright (c) 2013 Red Hat, Inc.
5 * All Rights Reserved.
6 */
7#include "xfs.h"
8#include "xfs_fs.h"
9#include "xfs_shared.h"
10#include "xfs_format.h"
11#include "xfs_log_format.h"
12#include "xfs_trans_resv.h"
13#include "xfs_bit.h"
14#include "xfs_mount.h"
15#include "xfs_defer.h"
16#include "xfs_da_format.h"
17#include "xfs_da_btree.h"
18#include "xfs_inode.h"
19#include "xfs_trans.h"
20#include "xfs_bmap.h"
21#include "xfs_attr.h"
22#include "xfs_attr_remote.h"
23#include "xfs_trace.h"
24#include "xfs_error.h"
25#include "xfs_health.h"
26
27#define ATTR_RMTVALUE_MAPSIZE 1 /* # of map entries at once */
28
29/*
30 * Remote Attribute Values
31 * =======================
32 *
33 * Remote extended attribute values are conceptually simple -- they're written
34 * to data blocks mapped by an inode's attribute fork, and they have an upper
35 * size limit of 64k. Setting a value does not involve the XFS log.
36 *
37 * However, on a v5 filesystem, maximally sized remote attr values require one
38 * block more than 64k worth of space to hold both the remote attribute value
39 * header (64 bytes). On a 4k block filesystem this results in a 68k buffer;
40 * on a 64k block filesystem, this would be a 128k buffer. Note that the log
41 * format can only handle a dirty buffer of XFS_MAX_BLOCKSIZE length (64k).
42 * Therefore, we /must/ ensure that remote attribute value buffers never touch
43 * the logging system and therefore never have a log item.
44 */
45
46/* How many bytes can be stored in a remote value buffer? */
47inline unsigned int
48xfs_attr3_rmt_buf_space(
49 struct xfs_mount *mp)
50{
51 unsigned int blocksize = mp->m_attr_geo->blksize;
52
53 if (xfs_has_crc(mp))
54 return blocksize - sizeof(struct xfs_attr3_rmt_hdr);
55
56 return blocksize;
57}
58
59/* Compute number of fsblocks needed to store a remote attr value */
60unsigned int
61xfs_attr3_rmt_blocks(
62 struct xfs_mount *mp,
63 unsigned int attrlen)
64{
65 /*
66 * Each contiguous block has a header, so it is not just a simple
67 * attribute length to FSB conversion.
68 */
69 if (xfs_has_crc(mp))
70 return howmany(attrlen, xfs_attr3_rmt_buf_space(mp));
71
72 return XFS_B_TO_FSB(mp, attrlen);
73}
74
75/*
76 * Checking of the remote attribute header is split into two parts. The verifier
77 * does CRC, location and bounds checking, the unpacking function checks the
78 * attribute parameters and owner.
79 */
80static xfs_failaddr_t
81xfs_attr3_rmt_hdr_ok(
82 void *ptr,
83 xfs_ino_t ino,
84 uint32_t offset,
85 uint32_t size,
86 xfs_daddr_t bno)
87{
88 struct xfs_attr3_rmt_hdr *rmt = ptr;
89
90 if (bno != be64_to_cpu(rmt->rm_blkno))
91 return __this_address;
92 if (offset != be32_to_cpu(rmt->rm_offset))
93 return __this_address;
94 if (size != be32_to_cpu(rmt->rm_bytes))
95 return __this_address;
96 if (ino != be64_to_cpu(rmt->rm_owner))
97 return __this_address;
98
99 /* ok */
100 return NULL;
101}
102
103static xfs_failaddr_t
104xfs_attr3_rmt_verify(
105 struct xfs_mount *mp,
106 struct xfs_buf *bp,
107 void *ptr,
108 xfs_daddr_t bno)
109{
110 struct xfs_attr3_rmt_hdr *rmt = ptr;
111
112 if (!xfs_verify_magic(bp, rmt->rm_magic))
113 return __this_address;
114 if (!uuid_equal(&rmt->rm_uuid, &mp->m_sb.sb_meta_uuid))
115 return __this_address;
116 if (be64_to_cpu(rmt->rm_blkno) != bno)
117 return __this_address;
118 if (be32_to_cpu(rmt->rm_bytes) > mp->m_attr_geo->blksize - sizeof(*rmt))
119 return __this_address;
120 if (be32_to_cpu(rmt->rm_offset) +
121 be32_to_cpu(rmt->rm_bytes) > XFS_XATTR_SIZE_MAX)
122 return __this_address;
123 if (rmt->rm_owner == 0)
124 return __this_address;
125
126 return NULL;
127}
128
129static int
130__xfs_attr3_rmt_read_verify(
131 struct xfs_buf *bp,
132 bool check_crc,
133 xfs_failaddr_t *failaddr)
134{
135 struct xfs_mount *mp = bp->b_mount;
136 char *ptr;
137 unsigned int len;
138 xfs_daddr_t bno;
139 unsigned int blksize = mp->m_attr_geo->blksize;
140
141 /* no verification of non-crc buffers */
142 if (!xfs_has_crc(mp))
143 return 0;
144
145 ptr = bp->b_addr;
146 bno = xfs_buf_daddr(bp);
147 len = BBTOB(bp->b_length);
148 ASSERT(len >= blksize);
149
150 while (len > 0) {
151 if (check_crc &&
152 !xfs_verify_cksum(ptr, blksize, XFS_ATTR3_RMT_CRC_OFF)) {
153 *failaddr = __this_address;
154 return -EFSBADCRC;
155 }
156 *failaddr = xfs_attr3_rmt_verify(mp, bp, ptr, bno);
157 if (*failaddr)
158 return -EFSCORRUPTED;
159 len -= blksize;
160 ptr += blksize;
161 bno += BTOBB(blksize);
162 }
163
164 if (len != 0) {
165 *failaddr = __this_address;
166 return -EFSCORRUPTED;
167 }
168
169 return 0;
170}
171
172static void
173xfs_attr3_rmt_read_verify(
174 struct xfs_buf *bp)
175{
176 xfs_failaddr_t fa;
177 int error;
178
179 error = __xfs_attr3_rmt_read_verify(bp, true, &fa);
180 if (error)
181 xfs_verifier_error(bp, error, fa);
182}
183
184static xfs_failaddr_t
185xfs_attr3_rmt_verify_struct(
186 struct xfs_buf *bp)
187{
188 xfs_failaddr_t fa;
189 int error;
190
191 error = __xfs_attr3_rmt_read_verify(bp, false, &fa);
192 return error ? fa : NULL;
193}
194
195static void
196xfs_attr3_rmt_write_verify(
197 struct xfs_buf *bp)
198{
199 struct xfs_mount *mp = bp->b_mount;
200 xfs_failaddr_t fa;
201 unsigned int blksize = mp->m_attr_geo->blksize;
202 char *ptr;
203 int len;
204 xfs_daddr_t bno;
205
206 /* no verification of non-crc buffers */
207 if (!xfs_has_crc(mp))
208 return;
209
210 ptr = bp->b_addr;
211 bno = xfs_buf_daddr(bp);
212 len = BBTOB(bp->b_length);
213 ASSERT(len >= blksize);
214
215 while (len > 0) {
216 struct xfs_attr3_rmt_hdr *rmt = (struct xfs_attr3_rmt_hdr *)ptr;
217
218 fa = xfs_attr3_rmt_verify(mp, bp, ptr, bno);
219 if (fa) {
220 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
221 return;
222 }
223
224 /*
225 * Ensure we aren't writing bogus LSNs to disk. See
226 * xfs_attr3_rmt_hdr_set() for the explanation.
227 */
228 if (rmt->rm_lsn != cpu_to_be64(NULLCOMMITLSN)) {
229 xfs_verifier_error(bp, -EFSCORRUPTED, __this_address);
230 return;
231 }
232 xfs_update_cksum(ptr, blksize, XFS_ATTR3_RMT_CRC_OFF);
233
234 len -= blksize;
235 ptr += blksize;
236 bno += BTOBB(blksize);
237 }
238
239 if (len != 0)
240 xfs_verifier_error(bp, -EFSCORRUPTED, __this_address);
241}
242
243const struct xfs_buf_ops xfs_attr3_rmt_buf_ops = {
244 .name = "xfs_attr3_rmt",
245 .magic = { 0, cpu_to_be32(XFS_ATTR3_RMT_MAGIC) },
246 .verify_read = xfs_attr3_rmt_read_verify,
247 .verify_write = xfs_attr3_rmt_write_verify,
248 .verify_struct = xfs_attr3_rmt_verify_struct,
249};
250
251STATIC int
252xfs_attr3_rmt_hdr_set(
253 struct xfs_mount *mp,
254 void *ptr,
255 xfs_ino_t ino,
256 uint32_t offset,
257 uint32_t size,
258 xfs_daddr_t bno)
259{
260 struct xfs_attr3_rmt_hdr *rmt = ptr;
261
262 if (!xfs_has_crc(mp))
263 return 0;
264
265 rmt->rm_magic = cpu_to_be32(XFS_ATTR3_RMT_MAGIC);
266 rmt->rm_offset = cpu_to_be32(offset);
267 rmt->rm_bytes = cpu_to_be32(size);
268 uuid_copy(&rmt->rm_uuid, &mp->m_sb.sb_meta_uuid);
269 rmt->rm_owner = cpu_to_be64(ino);
270 rmt->rm_blkno = cpu_to_be64(bno);
271
272 /*
273 * Remote attribute blocks are written synchronously, so we don't
274 * have an LSN that we can stamp in them that makes any sense to log
275 * recovery. To ensure that log recovery handles overwrites of these
276 * blocks sanely (i.e. once they've been freed and reallocated as some
277 * other type of metadata) we need to ensure that the LSN has a value
278 * that tells log recovery to ignore the LSN and overwrite the buffer
279 * with whatever is in it's log. To do this, we use the magic
280 * NULLCOMMITLSN to indicate that the LSN is invalid.
281 */
282 rmt->rm_lsn = cpu_to_be64(NULLCOMMITLSN);
283
284 return sizeof(struct xfs_attr3_rmt_hdr);
285}
286
287/*
288 * Helper functions to copy attribute data in and out of the one disk extents
289 */
290STATIC int
291xfs_attr_rmtval_copyout(
292 struct xfs_mount *mp,
293 struct xfs_buf *bp,
294 struct xfs_inode *dp,
295 xfs_ino_t owner,
296 unsigned int *offset,
297 unsigned int *valuelen,
298 uint8_t **dst)
299{
300 char *src = bp->b_addr;
301 xfs_daddr_t bno = xfs_buf_daddr(bp);
302 unsigned int len = BBTOB(bp->b_length);
303 unsigned int blksize = mp->m_attr_geo->blksize;
304
305 ASSERT(len >= blksize);
306
307 while (len > 0 && *valuelen > 0) {
308 unsigned int hdr_size = 0;
309 unsigned int byte_cnt = xfs_attr3_rmt_buf_space(mp);
310
311 byte_cnt = min(*valuelen, byte_cnt);
312
313 if (xfs_has_crc(mp)) {
314 if (xfs_attr3_rmt_hdr_ok(src, owner, *offset,
315 byte_cnt, bno)) {
316 xfs_alert(mp,
317"remote attribute header mismatch bno/off/len/owner (0x%llx/0x%x/Ox%x/0x%llx)",
318 bno, *offset, byte_cnt, owner);
319 xfs_dirattr_mark_sick(dp, XFS_ATTR_FORK);
320 return -EFSCORRUPTED;
321 }
322 hdr_size = sizeof(struct xfs_attr3_rmt_hdr);
323 }
324
325 memcpy(*dst, src + hdr_size, byte_cnt);
326
327 /* roll buffer forwards */
328 len -= blksize;
329 src += blksize;
330 bno += BTOBB(blksize);
331
332 /* roll attribute data forwards */
333 *valuelen -= byte_cnt;
334 *dst += byte_cnt;
335 *offset += byte_cnt;
336 }
337 return 0;
338}
339
340STATIC void
341xfs_attr_rmtval_copyin(
342 struct xfs_mount *mp,
343 struct xfs_buf *bp,
344 xfs_ino_t ino,
345 unsigned int *offset,
346 unsigned int *valuelen,
347 uint8_t **src)
348{
349 char *dst = bp->b_addr;
350 xfs_daddr_t bno = xfs_buf_daddr(bp);
351 unsigned int len = BBTOB(bp->b_length);
352 unsigned int blksize = mp->m_attr_geo->blksize;
353
354 ASSERT(len >= blksize);
355
356 while (len > 0 && *valuelen > 0) {
357 unsigned int hdr_size;
358 unsigned int byte_cnt = xfs_attr3_rmt_buf_space(mp);
359
360 byte_cnt = min(*valuelen, byte_cnt);
361 hdr_size = xfs_attr3_rmt_hdr_set(mp, dst, ino, *offset,
362 byte_cnt, bno);
363
364 memcpy(dst + hdr_size, *src, byte_cnt);
365
366 /*
367 * If this is the last block, zero the remainder of it.
368 * Check that we are actually the last block, too.
369 */
370 if (byte_cnt + hdr_size < blksize) {
371 ASSERT(*valuelen - byte_cnt == 0);
372 ASSERT(len == blksize);
373 memset(dst + hdr_size + byte_cnt, 0,
374 blksize - hdr_size - byte_cnt);
375 }
376
377 /* roll buffer forwards */
378 len -= blksize;
379 dst += blksize;
380 bno += BTOBB(blksize);
381
382 /* roll attribute data forwards */
383 *valuelen -= byte_cnt;
384 *src += byte_cnt;
385 *offset += byte_cnt;
386 }
387}
388
389/*
390 * Read the value associated with an attribute from the out-of-line buffer
391 * that we stored it in.
392 *
393 * Returns 0 on successful retrieval, otherwise an error.
394 */
395int
396xfs_attr_rmtval_get(
397 struct xfs_da_args *args)
398{
399 struct xfs_bmbt_irec map[ATTR_RMTVALUE_MAPSIZE];
400 struct xfs_mount *mp = args->dp->i_mount;
401 struct xfs_buf *bp;
402 xfs_dablk_t lblkno = args->rmtblkno;
403 uint8_t *dst = args->value;
404 unsigned int valuelen;
405 int nmap;
406 int error;
407 unsigned int blkcnt = args->rmtblkcnt;
408 int i;
409 unsigned int offset = 0;
410
411 trace_xfs_attr_rmtval_get(args);
412
413 ASSERT(args->valuelen != 0);
414 ASSERT(args->rmtvaluelen == args->valuelen);
415
416 valuelen = args->rmtvaluelen;
417 while (valuelen > 0) {
418 nmap = ATTR_RMTVALUE_MAPSIZE;
419 error = xfs_bmapi_read(args->dp, (xfs_fileoff_t)lblkno,
420 blkcnt, map, &nmap,
421 XFS_BMAPI_ATTRFORK);
422 if (error)
423 return error;
424 ASSERT(nmap >= 1);
425
426 for (i = 0; (i < nmap) && (valuelen > 0); i++) {
427 xfs_daddr_t dblkno;
428 int dblkcnt;
429
430 ASSERT((map[i].br_startblock != DELAYSTARTBLOCK) &&
431 (map[i].br_startblock != HOLESTARTBLOCK));
432 dblkno = XFS_FSB_TO_DADDR(mp, map[i].br_startblock);
433 dblkcnt = XFS_FSB_TO_BB(mp, map[i].br_blockcount);
434 error = xfs_buf_read(mp->m_ddev_targp, dblkno, dblkcnt,
435 0, &bp, &xfs_attr3_rmt_buf_ops);
436 if (xfs_metadata_is_sick(error))
437 xfs_dirattr_mark_sick(args->dp, XFS_ATTR_FORK);
438 if (error)
439 return error;
440
441 error = xfs_attr_rmtval_copyout(mp, bp, args->dp,
442 args->owner, &offset, &valuelen, &dst);
443 xfs_buf_relse(bp);
444 if (error)
445 return error;
446
447 /* roll attribute extent map forwards */
448 lblkno += map[i].br_blockcount;
449 blkcnt -= map[i].br_blockcount;
450 }
451 }
452 ASSERT(valuelen == 0);
453 return 0;
454}
455
456/*
457 * Find a "hole" in the attribute address space large enough for us to drop the
458 * new attributes value into
459 */
460int
461xfs_attr_rmt_find_hole(
462 struct xfs_da_args *args)
463{
464 struct xfs_inode *dp = args->dp;
465 struct xfs_mount *mp = dp->i_mount;
466 int error;
467 unsigned int blkcnt;
468 xfs_fileoff_t lfileoff = 0;
469
470 /*
471 * Because CRC enable attributes have headers, we can't just do a
472 * straight byte to FSB conversion and have to take the header space
473 * into account.
474 */
475 blkcnt = xfs_attr3_rmt_blocks(mp, args->rmtvaluelen);
476 error = xfs_bmap_first_unused(args->trans, args->dp, blkcnt, &lfileoff,
477 XFS_ATTR_FORK);
478 if (error)
479 return error;
480
481 args->rmtblkno = (xfs_dablk_t)lfileoff;
482 args->rmtblkcnt = blkcnt;
483
484 return 0;
485}
486
487int
488xfs_attr_rmtval_set_value(
489 struct xfs_da_args *args)
490{
491 struct xfs_inode *dp = args->dp;
492 struct xfs_mount *mp = dp->i_mount;
493 struct xfs_bmbt_irec map;
494 xfs_dablk_t lblkno;
495 uint8_t *src = args->value;
496 unsigned int blkcnt;
497 unsigned int valuelen;
498 int nmap;
499 int error;
500 unsigned int offset = 0;
501
502 /*
503 * Roll through the "value", copying the attribute value to the
504 * already-allocated blocks. Blocks are written synchronously
505 * so that we can know they are all on disk before we turn off
506 * the INCOMPLETE flag.
507 */
508 lblkno = args->rmtblkno;
509 blkcnt = args->rmtblkcnt;
510 valuelen = args->rmtvaluelen;
511 while (valuelen > 0) {
512 struct xfs_buf *bp;
513 xfs_daddr_t dblkno;
514 int dblkcnt;
515
516 ASSERT(blkcnt > 0);
517
518 nmap = 1;
519 error = xfs_bmapi_read(dp, (xfs_fileoff_t)lblkno,
520 blkcnt, &map, &nmap,
521 XFS_BMAPI_ATTRFORK);
522 if (error)
523 return error;
524 ASSERT(nmap == 1);
525 ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
526 (map.br_startblock != HOLESTARTBLOCK));
527
528 dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock),
529 dblkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
530
531 error = xfs_buf_get(mp->m_ddev_targp, dblkno, dblkcnt, &bp);
532 if (error)
533 return error;
534 bp->b_ops = &xfs_attr3_rmt_buf_ops;
535
536 xfs_attr_rmtval_copyin(mp, bp, args->owner, &offset, &valuelen,
537 &src);
538
539 error = xfs_bwrite(bp); /* GROT: NOTE: synchronous write */
540 xfs_buf_relse(bp);
541 if (error)
542 return error;
543
544
545 /* roll attribute extent map forwards */
546 lblkno += map.br_blockcount;
547 blkcnt -= map.br_blockcount;
548 }
549 ASSERT(valuelen == 0);
550 return 0;
551}
552
553/* Mark stale any incore buffers for the remote value. */
554int
555xfs_attr_rmtval_stale(
556 struct xfs_inode *ip,
557 struct xfs_bmbt_irec *map,
558 xfs_buf_flags_t incore_flags)
559{
560 struct xfs_mount *mp = ip->i_mount;
561 struct xfs_buf *bp;
562 int error;
563
564 xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
565
566 if (XFS_IS_CORRUPT(mp, map->br_startblock == DELAYSTARTBLOCK) ||
567 XFS_IS_CORRUPT(mp, map->br_startblock == HOLESTARTBLOCK)) {
568 xfs_bmap_mark_sick(ip, XFS_ATTR_FORK);
569 return -EFSCORRUPTED;
570 }
571
572 error = xfs_buf_incore(mp->m_ddev_targp,
573 XFS_FSB_TO_DADDR(mp, map->br_startblock),
574 XFS_FSB_TO_BB(mp, map->br_blockcount),
575 incore_flags, &bp);
576 if (error) {
577 if (error == -ENOENT)
578 return 0;
579 return error;
580 }
581
582 xfs_buf_stale(bp);
583 xfs_buf_relse(bp);
584 return 0;
585}
586
587/*
588 * Find a hole for the attr and store it in the delayed attr context. This
589 * initializes the context to roll through allocating an attr extent for a
590 * delayed attr operation
591 */
592int
593xfs_attr_rmtval_find_space(
594 struct xfs_attr_intent *attr)
595{
596 struct xfs_da_args *args = attr->xattri_da_args;
597 struct xfs_bmbt_irec *map = &attr->xattri_map;
598 int error;
599
600 attr->xattri_lblkno = 0;
601 attr->xattri_blkcnt = 0;
602 args->rmtblkcnt = 0;
603 args->rmtblkno = 0;
604 memset(map, 0, sizeof(struct xfs_bmbt_irec));
605
606 error = xfs_attr_rmt_find_hole(args);
607 if (error)
608 return error;
609
610 attr->xattri_blkcnt = args->rmtblkcnt;
611 attr->xattri_lblkno = args->rmtblkno;
612
613 return 0;
614}
615
616/*
617 * Write one block of the value associated with an attribute into the
618 * out-of-line buffer that we have defined for it. This is similar to a subset
619 * of xfs_attr_rmtval_set, but records the current block to the delayed attr
620 * context, and leaves transaction handling to the caller.
621 */
622int
623xfs_attr_rmtval_set_blk(
624 struct xfs_attr_intent *attr)
625{
626 struct xfs_da_args *args = attr->xattri_da_args;
627 struct xfs_inode *dp = args->dp;
628 struct xfs_bmbt_irec *map = &attr->xattri_map;
629 int nmap;
630 int error;
631
632 nmap = 1;
633 error = xfs_bmapi_write(args->trans, dp,
634 (xfs_fileoff_t)attr->xattri_lblkno,
635 attr->xattri_blkcnt, XFS_BMAPI_ATTRFORK, args->total,
636 map, &nmap);
637 if (error)
638 return error;
639
640 ASSERT((map->br_startblock != DELAYSTARTBLOCK) &&
641 (map->br_startblock != HOLESTARTBLOCK));
642
643 /* roll attribute extent map forwards */
644 attr->xattri_lblkno += map->br_blockcount;
645 attr->xattri_blkcnt -= map->br_blockcount;
646
647 return 0;
648}
649
650/*
651 * Remove the value associated with an attribute by deleting the
652 * out-of-line buffer that it is stored on.
653 */
654int
655xfs_attr_rmtval_invalidate(
656 struct xfs_da_args *args)
657{
658 xfs_dablk_t lblkno;
659 unsigned int blkcnt;
660 int error;
661
662 /*
663 * Roll through the "value", invalidating the attribute value's blocks.
664 */
665 lblkno = args->rmtblkno;
666 blkcnt = args->rmtblkcnt;
667 while (blkcnt > 0) {
668 struct xfs_bmbt_irec map;
669 int nmap;
670
671 /*
672 * Try to remember where we decided to put the value.
673 */
674 nmap = 1;
675 error = xfs_bmapi_read(args->dp, (xfs_fileoff_t)lblkno,
676 blkcnt, &map, &nmap, XFS_BMAPI_ATTRFORK);
677 if (error)
678 return error;
679 if (XFS_IS_CORRUPT(args->dp->i_mount, nmap != 1)) {
680 xfs_bmap_mark_sick(args->dp, XFS_ATTR_FORK);
681 return -EFSCORRUPTED;
682 }
683 error = xfs_attr_rmtval_stale(args->dp, &map, XBF_TRYLOCK);
684 if (error)
685 return error;
686
687 lblkno += map.br_blockcount;
688 blkcnt -= map.br_blockcount;
689 }
690 return 0;
691}
692
693/*
694 * Remove the value associated with an attribute by deleting the out-of-line
695 * buffer that it is stored on. Returns -EAGAIN for the caller to refresh the
696 * transaction and re-call the function. Callers should keep calling this
697 * routine until it returns something other than -EAGAIN.
698 */
699int
700xfs_attr_rmtval_remove(
701 struct xfs_attr_intent *attr)
702{
703 struct xfs_da_args *args = attr->xattri_da_args;
704 int error, done;
705
706 /*
707 * Unmap value blocks for this attr.
708 */
709 error = xfs_bunmapi(args->trans, args->dp, args->rmtblkno,
710 args->rmtblkcnt, XFS_BMAPI_ATTRFORK, 1, &done);
711 if (error)
712 return error;
713
714 /*
715 * We don't need an explicit state here to pick up where we left off. We
716 * can figure it out using the !done return code. The actual value of
717 * attr->xattri_dela_state may be some value reminiscent of the calling
718 * function, but it's value is irrelevant with in the context of this
719 * function. Once we are done here, the next state is set as needed by
720 * the parent
721 */
722 if (!done) {
723 trace_xfs_attr_rmtval_remove_return(attr->xattri_dela_state,
724 args->dp);
725 return -EAGAIN;
726 }
727
728 args->rmtblkno = 0;
729 args->rmtblkcnt = 0;
730 return 0;
731}