Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * Copyright (c) 2013 Red Hat, Inc.
5 * All Rights Reserved.
6 */
7#include "xfs.h"
8#include "xfs_fs.h"
9#include "xfs_shared.h"
10#include "xfs_format.h"
11#include "xfs_log_format.h"
12#include "xfs_trans_resv.h"
13#include "xfs_bit.h"
14#include "xfs_mount.h"
15#include "xfs_defer.h"
16#include "xfs_da_format.h"
17#include "xfs_da_btree.h"
18#include "xfs_inode.h"
19#include "xfs_trans.h"
20#include "xfs_bmap.h"
21#include "xfs_attr.h"
22#include "xfs_attr_remote.h"
23#include "xfs_trace.h"
24#include "xfs_error.h"
25
26#define ATTR_RMTVALUE_MAPSIZE 1 /* # of map entries at once */
27
28/*
29 * Remote Attribute Values
30 * =======================
31 *
32 * Remote extended attribute values are conceptually simple -- they're written
33 * to data blocks mapped by an inode's attribute fork, and they have an upper
34 * size limit of 64k. Setting a value does not involve the XFS log.
35 *
36 * However, on a v5 filesystem, maximally sized remote attr values require one
37 * block more than 64k worth of space to hold both the remote attribute value
38 * header (64 bytes). On a 4k block filesystem this results in a 68k buffer;
39 * on a 64k block filesystem, this would be a 128k buffer. Note that the log
40 * format can only handle a dirty buffer of XFS_MAX_BLOCKSIZE length (64k).
41 * Therefore, we /must/ ensure that remote attribute value buffers never touch
42 * the logging system and therefore never have a log item.
43 */
44
45/*
46 * Each contiguous block has a header, so it is not just a simple attribute
47 * length to FSB conversion.
48 */
49int
50xfs_attr3_rmt_blocks(
51 struct xfs_mount *mp,
52 int attrlen)
53{
54 if (xfs_has_crc(mp)) {
55 int buflen = XFS_ATTR3_RMT_BUF_SPACE(mp, mp->m_sb.sb_blocksize);
56 return (attrlen + buflen - 1) / buflen;
57 }
58 return XFS_B_TO_FSB(mp, attrlen);
59}
60
61/*
62 * Checking of the remote attribute header is split into two parts. The verifier
63 * does CRC, location and bounds checking, the unpacking function checks the
64 * attribute parameters and owner.
65 */
66static xfs_failaddr_t
67xfs_attr3_rmt_hdr_ok(
68 void *ptr,
69 xfs_ino_t ino,
70 uint32_t offset,
71 uint32_t size,
72 xfs_daddr_t bno)
73{
74 struct xfs_attr3_rmt_hdr *rmt = ptr;
75
76 if (bno != be64_to_cpu(rmt->rm_blkno))
77 return __this_address;
78 if (offset != be32_to_cpu(rmt->rm_offset))
79 return __this_address;
80 if (size != be32_to_cpu(rmt->rm_bytes))
81 return __this_address;
82 if (ino != be64_to_cpu(rmt->rm_owner))
83 return __this_address;
84
85 /* ok */
86 return NULL;
87}
88
89static xfs_failaddr_t
90xfs_attr3_rmt_verify(
91 struct xfs_mount *mp,
92 struct xfs_buf *bp,
93 void *ptr,
94 int fsbsize,
95 xfs_daddr_t bno)
96{
97 struct xfs_attr3_rmt_hdr *rmt = ptr;
98
99 if (!xfs_verify_magic(bp, rmt->rm_magic))
100 return __this_address;
101 if (!uuid_equal(&rmt->rm_uuid, &mp->m_sb.sb_meta_uuid))
102 return __this_address;
103 if (be64_to_cpu(rmt->rm_blkno) != bno)
104 return __this_address;
105 if (be32_to_cpu(rmt->rm_bytes) > fsbsize - sizeof(*rmt))
106 return __this_address;
107 if (be32_to_cpu(rmt->rm_offset) +
108 be32_to_cpu(rmt->rm_bytes) > XFS_XATTR_SIZE_MAX)
109 return __this_address;
110 if (rmt->rm_owner == 0)
111 return __this_address;
112
113 return NULL;
114}
115
116static int
117__xfs_attr3_rmt_read_verify(
118 struct xfs_buf *bp,
119 bool check_crc,
120 xfs_failaddr_t *failaddr)
121{
122 struct xfs_mount *mp = bp->b_mount;
123 char *ptr;
124 int len;
125 xfs_daddr_t bno;
126 int blksize = mp->m_attr_geo->blksize;
127
128 /* no verification of non-crc buffers */
129 if (!xfs_has_crc(mp))
130 return 0;
131
132 ptr = bp->b_addr;
133 bno = xfs_buf_daddr(bp);
134 len = BBTOB(bp->b_length);
135 ASSERT(len >= blksize);
136
137 while (len > 0) {
138 if (check_crc &&
139 !xfs_verify_cksum(ptr, blksize, XFS_ATTR3_RMT_CRC_OFF)) {
140 *failaddr = __this_address;
141 return -EFSBADCRC;
142 }
143 *failaddr = xfs_attr3_rmt_verify(mp, bp, ptr, blksize, bno);
144 if (*failaddr)
145 return -EFSCORRUPTED;
146 len -= blksize;
147 ptr += blksize;
148 bno += BTOBB(blksize);
149 }
150
151 if (len != 0) {
152 *failaddr = __this_address;
153 return -EFSCORRUPTED;
154 }
155
156 return 0;
157}
158
159static void
160xfs_attr3_rmt_read_verify(
161 struct xfs_buf *bp)
162{
163 xfs_failaddr_t fa;
164 int error;
165
166 error = __xfs_attr3_rmt_read_verify(bp, true, &fa);
167 if (error)
168 xfs_verifier_error(bp, error, fa);
169}
170
171static xfs_failaddr_t
172xfs_attr3_rmt_verify_struct(
173 struct xfs_buf *bp)
174{
175 xfs_failaddr_t fa;
176 int error;
177
178 error = __xfs_attr3_rmt_read_verify(bp, false, &fa);
179 return error ? fa : NULL;
180}
181
182static void
183xfs_attr3_rmt_write_verify(
184 struct xfs_buf *bp)
185{
186 struct xfs_mount *mp = bp->b_mount;
187 xfs_failaddr_t fa;
188 int blksize = mp->m_attr_geo->blksize;
189 char *ptr;
190 int len;
191 xfs_daddr_t bno;
192
193 /* no verification of non-crc buffers */
194 if (!xfs_has_crc(mp))
195 return;
196
197 ptr = bp->b_addr;
198 bno = xfs_buf_daddr(bp);
199 len = BBTOB(bp->b_length);
200 ASSERT(len >= blksize);
201
202 while (len > 0) {
203 struct xfs_attr3_rmt_hdr *rmt = (struct xfs_attr3_rmt_hdr *)ptr;
204
205 fa = xfs_attr3_rmt_verify(mp, bp, ptr, blksize, bno);
206 if (fa) {
207 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
208 return;
209 }
210
211 /*
212 * Ensure we aren't writing bogus LSNs to disk. See
213 * xfs_attr3_rmt_hdr_set() for the explanation.
214 */
215 if (rmt->rm_lsn != cpu_to_be64(NULLCOMMITLSN)) {
216 xfs_verifier_error(bp, -EFSCORRUPTED, __this_address);
217 return;
218 }
219 xfs_update_cksum(ptr, blksize, XFS_ATTR3_RMT_CRC_OFF);
220
221 len -= blksize;
222 ptr += blksize;
223 bno += BTOBB(blksize);
224 }
225
226 if (len != 0)
227 xfs_verifier_error(bp, -EFSCORRUPTED, __this_address);
228}
229
230const struct xfs_buf_ops xfs_attr3_rmt_buf_ops = {
231 .name = "xfs_attr3_rmt",
232 .magic = { 0, cpu_to_be32(XFS_ATTR3_RMT_MAGIC) },
233 .verify_read = xfs_attr3_rmt_read_verify,
234 .verify_write = xfs_attr3_rmt_write_verify,
235 .verify_struct = xfs_attr3_rmt_verify_struct,
236};
237
238STATIC int
239xfs_attr3_rmt_hdr_set(
240 struct xfs_mount *mp,
241 void *ptr,
242 xfs_ino_t ino,
243 uint32_t offset,
244 uint32_t size,
245 xfs_daddr_t bno)
246{
247 struct xfs_attr3_rmt_hdr *rmt = ptr;
248
249 if (!xfs_has_crc(mp))
250 return 0;
251
252 rmt->rm_magic = cpu_to_be32(XFS_ATTR3_RMT_MAGIC);
253 rmt->rm_offset = cpu_to_be32(offset);
254 rmt->rm_bytes = cpu_to_be32(size);
255 uuid_copy(&rmt->rm_uuid, &mp->m_sb.sb_meta_uuid);
256 rmt->rm_owner = cpu_to_be64(ino);
257 rmt->rm_blkno = cpu_to_be64(bno);
258
259 /*
260 * Remote attribute blocks are written synchronously, so we don't
261 * have an LSN that we can stamp in them that makes any sense to log
262 * recovery. To ensure that log recovery handles overwrites of these
263 * blocks sanely (i.e. once they've been freed and reallocated as some
264 * other type of metadata) we need to ensure that the LSN has a value
265 * that tells log recovery to ignore the LSN and overwrite the buffer
266 * with whatever is in it's log. To do this, we use the magic
267 * NULLCOMMITLSN to indicate that the LSN is invalid.
268 */
269 rmt->rm_lsn = cpu_to_be64(NULLCOMMITLSN);
270
271 return sizeof(struct xfs_attr3_rmt_hdr);
272}
273
274/*
275 * Helper functions to copy attribute data in and out of the one disk extents
276 */
277STATIC int
278xfs_attr_rmtval_copyout(
279 struct xfs_mount *mp,
280 struct xfs_buf *bp,
281 xfs_ino_t ino,
282 int *offset,
283 int *valuelen,
284 uint8_t **dst)
285{
286 char *src = bp->b_addr;
287 xfs_daddr_t bno = xfs_buf_daddr(bp);
288 int len = BBTOB(bp->b_length);
289 int blksize = mp->m_attr_geo->blksize;
290
291 ASSERT(len >= blksize);
292
293 while (len > 0 && *valuelen > 0) {
294 int hdr_size = 0;
295 int byte_cnt = XFS_ATTR3_RMT_BUF_SPACE(mp, blksize);
296
297 byte_cnt = min(*valuelen, byte_cnt);
298
299 if (xfs_has_crc(mp)) {
300 if (xfs_attr3_rmt_hdr_ok(src, ino, *offset,
301 byte_cnt, bno)) {
302 xfs_alert(mp,
303"remote attribute header mismatch bno/off/len/owner (0x%llx/0x%x/Ox%x/0x%llx)",
304 bno, *offset, byte_cnt, ino);
305 return -EFSCORRUPTED;
306 }
307 hdr_size = sizeof(struct xfs_attr3_rmt_hdr);
308 }
309
310 memcpy(*dst, src + hdr_size, byte_cnt);
311
312 /* roll buffer forwards */
313 len -= blksize;
314 src += blksize;
315 bno += BTOBB(blksize);
316
317 /* roll attribute data forwards */
318 *valuelen -= byte_cnt;
319 *dst += byte_cnt;
320 *offset += byte_cnt;
321 }
322 return 0;
323}
324
325STATIC void
326xfs_attr_rmtval_copyin(
327 struct xfs_mount *mp,
328 struct xfs_buf *bp,
329 xfs_ino_t ino,
330 int *offset,
331 int *valuelen,
332 uint8_t **src)
333{
334 char *dst = bp->b_addr;
335 xfs_daddr_t bno = xfs_buf_daddr(bp);
336 int len = BBTOB(bp->b_length);
337 int blksize = mp->m_attr_geo->blksize;
338
339 ASSERT(len >= blksize);
340
341 while (len > 0 && *valuelen > 0) {
342 int hdr_size;
343 int byte_cnt = XFS_ATTR3_RMT_BUF_SPACE(mp, blksize);
344
345 byte_cnt = min(*valuelen, byte_cnt);
346 hdr_size = xfs_attr3_rmt_hdr_set(mp, dst, ino, *offset,
347 byte_cnt, bno);
348
349 memcpy(dst + hdr_size, *src, byte_cnt);
350
351 /*
352 * If this is the last block, zero the remainder of it.
353 * Check that we are actually the last block, too.
354 */
355 if (byte_cnt + hdr_size < blksize) {
356 ASSERT(*valuelen - byte_cnt == 0);
357 ASSERT(len == blksize);
358 memset(dst + hdr_size + byte_cnt, 0,
359 blksize - hdr_size - byte_cnt);
360 }
361
362 /* roll buffer forwards */
363 len -= blksize;
364 dst += blksize;
365 bno += BTOBB(blksize);
366
367 /* roll attribute data forwards */
368 *valuelen -= byte_cnt;
369 *src += byte_cnt;
370 *offset += byte_cnt;
371 }
372}
373
374/*
375 * Read the value associated with an attribute from the out-of-line buffer
376 * that we stored it in.
377 *
378 * Returns 0 on successful retrieval, otherwise an error.
379 */
380int
381xfs_attr_rmtval_get(
382 struct xfs_da_args *args)
383{
384 struct xfs_bmbt_irec map[ATTR_RMTVALUE_MAPSIZE];
385 struct xfs_mount *mp = args->dp->i_mount;
386 struct xfs_buf *bp;
387 xfs_dablk_t lblkno = args->rmtblkno;
388 uint8_t *dst = args->value;
389 int valuelen;
390 int nmap;
391 int error;
392 int blkcnt = args->rmtblkcnt;
393 int i;
394 int offset = 0;
395
396 trace_xfs_attr_rmtval_get(args);
397
398 ASSERT(args->valuelen != 0);
399 ASSERT(args->rmtvaluelen == args->valuelen);
400
401 valuelen = args->rmtvaluelen;
402 while (valuelen > 0) {
403 nmap = ATTR_RMTVALUE_MAPSIZE;
404 error = xfs_bmapi_read(args->dp, (xfs_fileoff_t)lblkno,
405 blkcnt, map, &nmap,
406 XFS_BMAPI_ATTRFORK);
407 if (error)
408 return error;
409 ASSERT(nmap >= 1);
410
411 for (i = 0; (i < nmap) && (valuelen > 0); i++) {
412 xfs_daddr_t dblkno;
413 int dblkcnt;
414
415 ASSERT((map[i].br_startblock != DELAYSTARTBLOCK) &&
416 (map[i].br_startblock != HOLESTARTBLOCK));
417 dblkno = XFS_FSB_TO_DADDR(mp, map[i].br_startblock);
418 dblkcnt = XFS_FSB_TO_BB(mp, map[i].br_blockcount);
419 error = xfs_buf_read(mp->m_ddev_targp, dblkno, dblkcnt,
420 0, &bp, &xfs_attr3_rmt_buf_ops);
421 if (error)
422 return error;
423
424 error = xfs_attr_rmtval_copyout(mp, bp, args->dp->i_ino,
425 &offset, &valuelen,
426 &dst);
427 xfs_buf_relse(bp);
428 if (error)
429 return error;
430
431 /* roll attribute extent map forwards */
432 lblkno += map[i].br_blockcount;
433 blkcnt -= map[i].br_blockcount;
434 }
435 }
436 ASSERT(valuelen == 0);
437 return 0;
438}
439
440/*
441 * Find a "hole" in the attribute address space large enough for us to drop the
442 * new attributes value into
443 */
444int
445xfs_attr_rmt_find_hole(
446 struct xfs_da_args *args)
447{
448 struct xfs_inode *dp = args->dp;
449 struct xfs_mount *mp = dp->i_mount;
450 int error;
451 int blkcnt;
452 xfs_fileoff_t lfileoff = 0;
453
454 /*
455 * Because CRC enable attributes have headers, we can't just do a
456 * straight byte to FSB conversion and have to take the header space
457 * into account.
458 */
459 blkcnt = xfs_attr3_rmt_blocks(mp, args->rmtvaluelen);
460 error = xfs_bmap_first_unused(args->trans, args->dp, blkcnt, &lfileoff,
461 XFS_ATTR_FORK);
462 if (error)
463 return error;
464
465 args->rmtblkno = (xfs_dablk_t)lfileoff;
466 args->rmtblkcnt = blkcnt;
467
468 return 0;
469}
470
471int
472xfs_attr_rmtval_set_value(
473 struct xfs_da_args *args)
474{
475 struct xfs_inode *dp = args->dp;
476 struct xfs_mount *mp = dp->i_mount;
477 struct xfs_bmbt_irec map;
478 xfs_dablk_t lblkno;
479 uint8_t *src = args->value;
480 int blkcnt;
481 int valuelen;
482 int nmap;
483 int error;
484 int offset = 0;
485
486 /*
487 * Roll through the "value", copying the attribute value to the
488 * already-allocated blocks. Blocks are written synchronously
489 * so that we can know they are all on disk before we turn off
490 * the INCOMPLETE flag.
491 */
492 lblkno = args->rmtblkno;
493 blkcnt = args->rmtblkcnt;
494 valuelen = args->rmtvaluelen;
495 while (valuelen > 0) {
496 struct xfs_buf *bp;
497 xfs_daddr_t dblkno;
498 int dblkcnt;
499
500 ASSERT(blkcnt > 0);
501
502 nmap = 1;
503 error = xfs_bmapi_read(dp, (xfs_fileoff_t)lblkno,
504 blkcnt, &map, &nmap,
505 XFS_BMAPI_ATTRFORK);
506 if (error)
507 return error;
508 ASSERT(nmap == 1);
509 ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
510 (map.br_startblock != HOLESTARTBLOCK));
511
512 dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock),
513 dblkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
514
515 error = xfs_buf_get(mp->m_ddev_targp, dblkno, dblkcnt, &bp);
516 if (error)
517 return error;
518 bp->b_ops = &xfs_attr3_rmt_buf_ops;
519
520 xfs_attr_rmtval_copyin(mp, bp, args->dp->i_ino, &offset,
521 &valuelen, &src);
522
523 error = xfs_bwrite(bp); /* GROT: NOTE: synchronous write */
524 xfs_buf_relse(bp);
525 if (error)
526 return error;
527
528
529 /* roll attribute extent map forwards */
530 lblkno += map.br_blockcount;
531 blkcnt -= map.br_blockcount;
532 }
533 ASSERT(valuelen == 0);
534 return 0;
535}
536
537/* Mark stale any incore buffers for the remote value. */
538int
539xfs_attr_rmtval_stale(
540 struct xfs_inode *ip,
541 struct xfs_bmbt_irec *map,
542 xfs_buf_flags_t incore_flags)
543{
544 struct xfs_mount *mp = ip->i_mount;
545 struct xfs_buf *bp;
546 int error;
547
548 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
549
550 if (XFS_IS_CORRUPT(mp, map->br_startblock == DELAYSTARTBLOCK) ||
551 XFS_IS_CORRUPT(mp, map->br_startblock == HOLESTARTBLOCK))
552 return -EFSCORRUPTED;
553
554 error = xfs_buf_incore(mp->m_ddev_targp,
555 XFS_FSB_TO_DADDR(mp, map->br_startblock),
556 XFS_FSB_TO_BB(mp, map->br_blockcount),
557 incore_flags, &bp);
558 if (error) {
559 if (error == -ENOENT)
560 return 0;
561 return error;
562 }
563
564 xfs_buf_stale(bp);
565 xfs_buf_relse(bp);
566 return 0;
567}
568
569/*
570 * Find a hole for the attr and store it in the delayed attr context. This
571 * initializes the context to roll through allocating an attr extent for a
572 * delayed attr operation
573 */
574int
575xfs_attr_rmtval_find_space(
576 struct xfs_attr_intent *attr)
577{
578 struct xfs_da_args *args = attr->xattri_da_args;
579 struct xfs_bmbt_irec *map = &attr->xattri_map;
580 int error;
581
582 attr->xattri_lblkno = 0;
583 attr->xattri_blkcnt = 0;
584 args->rmtblkcnt = 0;
585 args->rmtblkno = 0;
586 memset(map, 0, sizeof(struct xfs_bmbt_irec));
587
588 error = xfs_attr_rmt_find_hole(args);
589 if (error)
590 return error;
591
592 attr->xattri_blkcnt = args->rmtblkcnt;
593 attr->xattri_lblkno = args->rmtblkno;
594
595 return 0;
596}
597
598/*
599 * Write one block of the value associated with an attribute into the
600 * out-of-line buffer that we have defined for it. This is similar to a subset
601 * of xfs_attr_rmtval_set, but records the current block to the delayed attr
602 * context, and leaves transaction handling to the caller.
603 */
604int
605xfs_attr_rmtval_set_blk(
606 struct xfs_attr_intent *attr)
607{
608 struct xfs_da_args *args = attr->xattri_da_args;
609 struct xfs_inode *dp = args->dp;
610 struct xfs_bmbt_irec *map = &attr->xattri_map;
611 int nmap;
612 int error;
613
614 nmap = 1;
615 error = xfs_bmapi_write(args->trans, dp,
616 (xfs_fileoff_t)attr->xattri_lblkno,
617 attr->xattri_blkcnt, XFS_BMAPI_ATTRFORK, args->total,
618 map, &nmap);
619 if (error)
620 return error;
621
622 ASSERT(nmap == 1);
623 ASSERT((map->br_startblock != DELAYSTARTBLOCK) &&
624 (map->br_startblock != HOLESTARTBLOCK));
625
626 /* roll attribute extent map forwards */
627 attr->xattri_lblkno += map->br_blockcount;
628 attr->xattri_blkcnt -= map->br_blockcount;
629
630 return 0;
631}
632
633/*
634 * Remove the value associated with an attribute by deleting the
635 * out-of-line buffer that it is stored on.
636 */
637int
638xfs_attr_rmtval_invalidate(
639 struct xfs_da_args *args)
640{
641 xfs_dablk_t lblkno;
642 int blkcnt;
643 int error;
644
645 /*
646 * Roll through the "value", invalidating the attribute value's blocks.
647 */
648 lblkno = args->rmtblkno;
649 blkcnt = args->rmtblkcnt;
650 while (blkcnt > 0) {
651 struct xfs_bmbt_irec map;
652 int nmap;
653
654 /*
655 * Try to remember where we decided to put the value.
656 */
657 nmap = 1;
658 error = xfs_bmapi_read(args->dp, (xfs_fileoff_t)lblkno,
659 blkcnt, &map, &nmap, XFS_BMAPI_ATTRFORK);
660 if (error)
661 return error;
662 if (XFS_IS_CORRUPT(args->dp->i_mount, nmap != 1))
663 return -EFSCORRUPTED;
664 error = xfs_attr_rmtval_stale(args->dp, &map, XBF_TRYLOCK);
665 if (error)
666 return error;
667
668 lblkno += map.br_blockcount;
669 blkcnt -= map.br_blockcount;
670 }
671 return 0;
672}
673
674/*
675 * Remove the value associated with an attribute by deleting the out-of-line
676 * buffer that it is stored on. Returns -EAGAIN for the caller to refresh the
677 * transaction and re-call the function. Callers should keep calling this
678 * routine until it returns something other than -EAGAIN.
679 */
680int
681xfs_attr_rmtval_remove(
682 struct xfs_attr_intent *attr)
683{
684 struct xfs_da_args *args = attr->xattri_da_args;
685 int error, done;
686
687 /*
688 * Unmap value blocks for this attr.
689 */
690 error = xfs_bunmapi(args->trans, args->dp, args->rmtblkno,
691 args->rmtblkcnt, XFS_BMAPI_ATTRFORK, 1, &done);
692 if (error)
693 return error;
694
695 /*
696 * We don't need an explicit state here to pick up where we left off. We
697 * can figure it out using the !done return code. The actual value of
698 * attr->xattri_dela_state may be some value reminiscent of the calling
699 * function, but it's value is irrelevant with in the context of this
700 * function. Once we are done here, the next state is set as needed by
701 * the parent
702 */
703 if (!done) {
704 trace_xfs_attr_rmtval_remove_return(attr->xattri_dela_state,
705 args->dp);
706 return -EAGAIN;
707 }
708
709 args->rmtblkno = 0;
710 args->rmtblkcnt = 0;
711 return 0;
712}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * Copyright (c) 2013 Red Hat, Inc.
5 * All Rights Reserved.
6 */
7#include "xfs.h"
8#include "xfs_fs.h"
9#include "xfs_shared.h"
10#include "xfs_format.h"
11#include "xfs_log_format.h"
12#include "xfs_trans_resv.h"
13#include "xfs_bit.h"
14#include "xfs_mount.h"
15#include "xfs_defer.h"
16#include "xfs_da_format.h"
17#include "xfs_da_btree.h"
18#include "xfs_inode.h"
19#include "xfs_trans.h"
20#include "xfs_bmap.h"
21#include "xfs_attr.h"
22#include "xfs_trace.h"
23#include "xfs_error.h"
24
25#define ATTR_RMTVALUE_MAPSIZE 1 /* # of map entries at once */
26
27/*
28 * Each contiguous block has a header, so it is not just a simple attribute
29 * length to FSB conversion.
30 */
31int
32xfs_attr3_rmt_blocks(
33 struct xfs_mount *mp,
34 int attrlen)
35{
36 if (xfs_sb_version_hascrc(&mp->m_sb)) {
37 int buflen = XFS_ATTR3_RMT_BUF_SPACE(mp, mp->m_sb.sb_blocksize);
38 return (attrlen + buflen - 1) / buflen;
39 }
40 return XFS_B_TO_FSB(mp, attrlen);
41}
42
43/*
44 * Checking of the remote attribute header is split into two parts. The verifier
45 * does CRC, location and bounds checking, the unpacking function checks the
46 * attribute parameters and owner.
47 */
48static xfs_failaddr_t
49xfs_attr3_rmt_hdr_ok(
50 void *ptr,
51 xfs_ino_t ino,
52 uint32_t offset,
53 uint32_t size,
54 xfs_daddr_t bno)
55{
56 struct xfs_attr3_rmt_hdr *rmt = ptr;
57
58 if (bno != be64_to_cpu(rmt->rm_blkno))
59 return __this_address;
60 if (offset != be32_to_cpu(rmt->rm_offset))
61 return __this_address;
62 if (size != be32_to_cpu(rmt->rm_bytes))
63 return __this_address;
64 if (ino != be64_to_cpu(rmt->rm_owner))
65 return __this_address;
66
67 /* ok */
68 return NULL;
69}
70
71static xfs_failaddr_t
72xfs_attr3_rmt_verify(
73 struct xfs_mount *mp,
74 struct xfs_buf *bp,
75 void *ptr,
76 int fsbsize,
77 xfs_daddr_t bno)
78{
79 struct xfs_attr3_rmt_hdr *rmt = ptr;
80
81 if (!xfs_sb_version_hascrc(&mp->m_sb))
82 return __this_address;
83 if (!xfs_verify_magic(bp, rmt->rm_magic))
84 return __this_address;
85 if (!uuid_equal(&rmt->rm_uuid, &mp->m_sb.sb_meta_uuid))
86 return __this_address;
87 if (be64_to_cpu(rmt->rm_blkno) != bno)
88 return __this_address;
89 if (be32_to_cpu(rmt->rm_bytes) > fsbsize - sizeof(*rmt))
90 return __this_address;
91 if (be32_to_cpu(rmt->rm_offset) +
92 be32_to_cpu(rmt->rm_bytes) > XFS_XATTR_SIZE_MAX)
93 return __this_address;
94 if (rmt->rm_owner == 0)
95 return __this_address;
96
97 return NULL;
98}
99
100static int
101__xfs_attr3_rmt_read_verify(
102 struct xfs_buf *bp,
103 bool check_crc,
104 xfs_failaddr_t *failaddr)
105{
106 struct xfs_mount *mp = bp->b_mount;
107 char *ptr;
108 int len;
109 xfs_daddr_t bno;
110 int blksize = mp->m_attr_geo->blksize;
111
112 /* no verification of non-crc buffers */
113 if (!xfs_sb_version_hascrc(&mp->m_sb))
114 return 0;
115
116 ptr = bp->b_addr;
117 bno = bp->b_bn;
118 len = BBTOB(bp->b_length);
119 ASSERT(len >= blksize);
120
121 while (len > 0) {
122 if (check_crc &&
123 !xfs_verify_cksum(ptr, blksize, XFS_ATTR3_RMT_CRC_OFF)) {
124 *failaddr = __this_address;
125 return -EFSBADCRC;
126 }
127 *failaddr = xfs_attr3_rmt_verify(mp, bp, ptr, blksize, bno);
128 if (*failaddr)
129 return -EFSCORRUPTED;
130 len -= blksize;
131 ptr += blksize;
132 bno += BTOBB(blksize);
133 }
134
135 if (len != 0) {
136 *failaddr = __this_address;
137 return -EFSCORRUPTED;
138 }
139
140 return 0;
141}
142
143static void
144xfs_attr3_rmt_read_verify(
145 struct xfs_buf *bp)
146{
147 xfs_failaddr_t fa;
148 int error;
149
150 error = __xfs_attr3_rmt_read_verify(bp, true, &fa);
151 if (error)
152 xfs_verifier_error(bp, error, fa);
153}
154
155static xfs_failaddr_t
156xfs_attr3_rmt_verify_struct(
157 struct xfs_buf *bp)
158{
159 xfs_failaddr_t fa;
160 int error;
161
162 error = __xfs_attr3_rmt_read_verify(bp, false, &fa);
163 return error ? fa : NULL;
164}
165
166static void
167xfs_attr3_rmt_write_verify(
168 struct xfs_buf *bp)
169{
170 struct xfs_mount *mp = bp->b_mount;
171 xfs_failaddr_t fa;
172 int blksize = mp->m_attr_geo->blksize;
173 char *ptr;
174 int len;
175 xfs_daddr_t bno;
176
177 /* no verification of non-crc buffers */
178 if (!xfs_sb_version_hascrc(&mp->m_sb))
179 return;
180
181 ptr = bp->b_addr;
182 bno = bp->b_bn;
183 len = BBTOB(bp->b_length);
184 ASSERT(len >= blksize);
185
186 while (len > 0) {
187 struct xfs_attr3_rmt_hdr *rmt = (struct xfs_attr3_rmt_hdr *)ptr;
188
189 fa = xfs_attr3_rmt_verify(mp, bp, ptr, blksize, bno);
190 if (fa) {
191 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
192 return;
193 }
194
195 /*
196 * Ensure we aren't writing bogus LSNs to disk. See
197 * xfs_attr3_rmt_hdr_set() for the explanation.
198 */
199 if (rmt->rm_lsn != cpu_to_be64(NULLCOMMITLSN)) {
200 xfs_verifier_error(bp, -EFSCORRUPTED, __this_address);
201 return;
202 }
203 xfs_update_cksum(ptr, blksize, XFS_ATTR3_RMT_CRC_OFF);
204
205 len -= blksize;
206 ptr += blksize;
207 bno += BTOBB(blksize);
208 }
209
210 if (len != 0)
211 xfs_verifier_error(bp, -EFSCORRUPTED, __this_address);
212}
213
214const struct xfs_buf_ops xfs_attr3_rmt_buf_ops = {
215 .name = "xfs_attr3_rmt",
216 .magic = { 0, cpu_to_be32(XFS_ATTR3_RMT_MAGIC) },
217 .verify_read = xfs_attr3_rmt_read_verify,
218 .verify_write = xfs_attr3_rmt_write_verify,
219 .verify_struct = xfs_attr3_rmt_verify_struct,
220};
221
222STATIC int
223xfs_attr3_rmt_hdr_set(
224 struct xfs_mount *mp,
225 void *ptr,
226 xfs_ino_t ino,
227 uint32_t offset,
228 uint32_t size,
229 xfs_daddr_t bno)
230{
231 struct xfs_attr3_rmt_hdr *rmt = ptr;
232
233 if (!xfs_sb_version_hascrc(&mp->m_sb))
234 return 0;
235
236 rmt->rm_magic = cpu_to_be32(XFS_ATTR3_RMT_MAGIC);
237 rmt->rm_offset = cpu_to_be32(offset);
238 rmt->rm_bytes = cpu_to_be32(size);
239 uuid_copy(&rmt->rm_uuid, &mp->m_sb.sb_meta_uuid);
240 rmt->rm_owner = cpu_to_be64(ino);
241 rmt->rm_blkno = cpu_to_be64(bno);
242
243 /*
244 * Remote attribute blocks are written synchronously, so we don't
245 * have an LSN that we can stamp in them that makes any sense to log
246 * recovery. To ensure that log recovery handles overwrites of these
247 * blocks sanely (i.e. once they've been freed and reallocated as some
248 * other type of metadata) we need to ensure that the LSN has a value
249 * that tells log recovery to ignore the LSN and overwrite the buffer
250 * with whatever is in it's log. To do this, we use the magic
251 * NULLCOMMITLSN to indicate that the LSN is invalid.
252 */
253 rmt->rm_lsn = cpu_to_be64(NULLCOMMITLSN);
254
255 return sizeof(struct xfs_attr3_rmt_hdr);
256}
257
258/*
259 * Helper functions to copy attribute data in and out of the one disk extents
260 */
261STATIC int
262xfs_attr_rmtval_copyout(
263 struct xfs_mount *mp,
264 struct xfs_buf *bp,
265 xfs_ino_t ino,
266 int *offset,
267 int *valuelen,
268 uint8_t **dst)
269{
270 char *src = bp->b_addr;
271 xfs_daddr_t bno = bp->b_bn;
272 int len = BBTOB(bp->b_length);
273 int blksize = mp->m_attr_geo->blksize;
274
275 ASSERT(len >= blksize);
276
277 while (len > 0 && *valuelen > 0) {
278 int hdr_size = 0;
279 int byte_cnt = XFS_ATTR3_RMT_BUF_SPACE(mp, blksize);
280
281 byte_cnt = min(*valuelen, byte_cnt);
282
283 if (xfs_sb_version_hascrc(&mp->m_sb)) {
284 if (xfs_attr3_rmt_hdr_ok(src, ino, *offset,
285 byte_cnt, bno)) {
286 xfs_alert(mp,
287"remote attribute header mismatch bno/off/len/owner (0x%llx/0x%x/Ox%x/0x%llx)",
288 bno, *offset, byte_cnt, ino);
289 return -EFSCORRUPTED;
290 }
291 hdr_size = sizeof(struct xfs_attr3_rmt_hdr);
292 }
293
294 memcpy(*dst, src + hdr_size, byte_cnt);
295
296 /* roll buffer forwards */
297 len -= blksize;
298 src += blksize;
299 bno += BTOBB(blksize);
300
301 /* roll attribute data forwards */
302 *valuelen -= byte_cnt;
303 *dst += byte_cnt;
304 *offset += byte_cnt;
305 }
306 return 0;
307}
308
309STATIC void
310xfs_attr_rmtval_copyin(
311 struct xfs_mount *mp,
312 struct xfs_buf *bp,
313 xfs_ino_t ino,
314 int *offset,
315 int *valuelen,
316 uint8_t **src)
317{
318 char *dst = bp->b_addr;
319 xfs_daddr_t bno = bp->b_bn;
320 int len = BBTOB(bp->b_length);
321 int blksize = mp->m_attr_geo->blksize;
322
323 ASSERT(len >= blksize);
324
325 while (len > 0 && *valuelen > 0) {
326 int hdr_size;
327 int byte_cnt = XFS_ATTR3_RMT_BUF_SPACE(mp, blksize);
328
329 byte_cnt = min(*valuelen, byte_cnt);
330 hdr_size = xfs_attr3_rmt_hdr_set(mp, dst, ino, *offset,
331 byte_cnt, bno);
332
333 memcpy(dst + hdr_size, *src, byte_cnt);
334
335 /*
336 * If this is the last block, zero the remainder of it.
337 * Check that we are actually the last block, too.
338 */
339 if (byte_cnt + hdr_size < blksize) {
340 ASSERT(*valuelen - byte_cnt == 0);
341 ASSERT(len == blksize);
342 memset(dst + hdr_size + byte_cnt, 0,
343 blksize - hdr_size - byte_cnt);
344 }
345
346 /* roll buffer forwards */
347 len -= blksize;
348 dst += blksize;
349 bno += BTOBB(blksize);
350
351 /* roll attribute data forwards */
352 *valuelen -= byte_cnt;
353 *src += byte_cnt;
354 *offset += byte_cnt;
355 }
356}
357
358/*
359 * Read the value associated with an attribute from the out-of-line buffer
360 * that we stored it in.
361 *
362 * Returns 0 on successful retrieval, otherwise an error.
363 */
364int
365xfs_attr_rmtval_get(
366 struct xfs_da_args *args)
367{
368 struct xfs_bmbt_irec map[ATTR_RMTVALUE_MAPSIZE];
369 struct xfs_mount *mp = args->dp->i_mount;
370 struct xfs_buf *bp;
371 xfs_dablk_t lblkno = args->rmtblkno;
372 uint8_t *dst = args->value;
373 int valuelen;
374 int nmap;
375 int error;
376 int blkcnt = args->rmtblkcnt;
377 int i;
378 int offset = 0;
379
380 trace_xfs_attr_rmtval_get(args);
381
382 ASSERT(!(args->flags & ATTR_KERNOVAL));
383 ASSERT(args->rmtvaluelen == args->valuelen);
384
385 valuelen = args->rmtvaluelen;
386 while (valuelen > 0) {
387 nmap = ATTR_RMTVALUE_MAPSIZE;
388 error = xfs_bmapi_read(args->dp, (xfs_fileoff_t)lblkno,
389 blkcnt, map, &nmap,
390 XFS_BMAPI_ATTRFORK);
391 if (error)
392 return error;
393 ASSERT(nmap >= 1);
394
395 for (i = 0; (i < nmap) && (valuelen > 0); i++) {
396 xfs_daddr_t dblkno;
397 int dblkcnt;
398
399 ASSERT((map[i].br_startblock != DELAYSTARTBLOCK) &&
400 (map[i].br_startblock != HOLESTARTBLOCK));
401 dblkno = XFS_FSB_TO_DADDR(mp, map[i].br_startblock);
402 dblkcnt = XFS_FSB_TO_BB(mp, map[i].br_blockcount);
403 error = xfs_trans_read_buf(mp, args->trans,
404 mp->m_ddev_targp,
405 dblkno, dblkcnt, 0, &bp,
406 &xfs_attr3_rmt_buf_ops);
407 if (error)
408 return error;
409
410 error = xfs_attr_rmtval_copyout(mp, bp, args->dp->i_ino,
411 &offset, &valuelen,
412 &dst);
413 xfs_trans_brelse(args->trans, bp);
414 if (error)
415 return error;
416
417 /* roll attribute extent map forwards */
418 lblkno += map[i].br_blockcount;
419 blkcnt -= map[i].br_blockcount;
420 }
421 }
422 ASSERT(valuelen == 0);
423 return 0;
424}
425
426/*
427 * Write the value associated with an attribute into the out-of-line buffer
428 * that we have defined for it.
429 */
430int
431xfs_attr_rmtval_set(
432 struct xfs_da_args *args)
433{
434 struct xfs_inode *dp = args->dp;
435 struct xfs_mount *mp = dp->i_mount;
436 struct xfs_bmbt_irec map;
437 xfs_dablk_t lblkno;
438 xfs_fileoff_t lfileoff = 0;
439 uint8_t *src = args->value;
440 int blkcnt;
441 int valuelen;
442 int nmap;
443 int error;
444 int offset = 0;
445
446 trace_xfs_attr_rmtval_set(args);
447
448 /*
449 * Find a "hole" in the attribute address space large enough for
450 * us to drop the new attribute's value into. Because CRC enable
451 * attributes have headers, we can't just do a straight byte to FSB
452 * conversion and have to take the header space into account.
453 */
454 blkcnt = xfs_attr3_rmt_blocks(mp, args->rmtvaluelen);
455 error = xfs_bmap_first_unused(args->trans, args->dp, blkcnt, &lfileoff,
456 XFS_ATTR_FORK);
457 if (error)
458 return error;
459
460 args->rmtblkno = lblkno = (xfs_dablk_t)lfileoff;
461 args->rmtblkcnt = blkcnt;
462
463 /*
464 * Roll through the "value", allocating blocks on disk as required.
465 */
466 while (blkcnt > 0) {
467 /*
468 * Allocate a single extent, up to the size of the value.
469 *
470 * Note that we have to consider this a data allocation as we
471 * write the remote attribute without logging the contents.
472 * Hence we must ensure that we aren't using blocks that are on
473 * the busy list so that we don't overwrite blocks which have
474 * recently been freed but their transactions are not yet
475 * committed to disk. If we overwrite the contents of a busy
476 * extent and then crash then the block may not contain the
477 * correct metadata after log recovery occurs.
478 */
479 nmap = 1;
480 error = xfs_bmapi_write(args->trans, dp, (xfs_fileoff_t)lblkno,
481 blkcnt, XFS_BMAPI_ATTRFORK, args->total, &map,
482 &nmap);
483 if (error)
484 return error;
485 error = xfs_defer_finish(&args->trans);
486 if (error)
487 return error;
488
489 ASSERT(nmap == 1);
490 ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
491 (map.br_startblock != HOLESTARTBLOCK));
492 lblkno += map.br_blockcount;
493 blkcnt -= map.br_blockcount;
494
495 /*
496 * Start the next trans in the chain.
497 */
498 error = xfs_trans_roll_inode(&args->trans, dp);
499 if (error)
500 return error;
501 }
502
503 /*
504 * Roll through the "value", copying the attribute value to the
505 * already-allocated blocks. Blocks are written synchronously
506 * so that we can know they are all on disk before we turn off
507 * the INCOMPLETE flag.
508 */
509 lblkno = args->rmtblkno;
510 blkcnt = args->rmtblkcnt;
511 valuelen = args->rmtvaluelen;
512 while (valuelen > 0) {
513 struct xfs_buf *bp;
514 xfs_daddr_t dblkno;
515 int dblkcnt;
516
517 ASSERT(blkcnt > 0);
518
519 nmap = 1;
520 error = xfs_bmapi_read(dp, (xfs_fileoff_t)lblkno,
521 blkcnt, &map, &nmap,
522 XFS_BMAPI_ATTRFORK);
523 if (error)
524 return error;
525 ASSERT(nmap == 1);
526 ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
527 (map.br_startblock != HOLESTARTBLOCK));
528
529 dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock),
530 dblkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
531
532 bp = xfs_buf_get(mp->m_ddev_targp, dblkno, dblkcnt);
533 if (!bp)
534 return -ENOMEM;
535 bp->b_ops = &xfs_attr3_rmt_buf_ops;
536
537 xfs_attr_rmtval_copyin(mp, bp, args->dp->i_ino, &offset,
538 &valuelen, &src);
539
540 error = xfs_bwrite(bp); /* GROT: NOTE: synchronous write */
541 xfs_buf_relse(bp);
542 if (error)
543 return error;
544
545
546 /* roll attribute extent map forwards */
547 lblkno += map.br_blockcount;
548 blkcnt -= map.br_blockcount;
549 }
550 ASSERT(valuelen == 0);
551 return 0;
552}
553
554/*
555 * Remove the value associated with an attribute by deleting the
556 * out-of-line buffer that it is stored on.
557 */
558int
559xfs_attr_rmtval_remove(
560 struct xfs_da_args *args)
561{
562 struct xfs_mount *mp = args->dp->i_mount;
563 xfs_dablk_t lblkno;
564 int blkcnt;
565 int error;
566 int done;
567
568 trace_xfs_attr_rmtval_remove(args);
569
570 /*
571 * Roll through the "value", invalidating the attribute value's blocks.
572 */
573 lblkno = args->rmtblkno;
574 blkcnt = args->rmtblkcnt;
575 while (blkcnt > 0) {
576 struct xfs_bmbt_irec map;
577 struct xfs_buf *bp;
578 xfs_daddr_t dblkno;
579 int dblkcnt;
580 int nmap;
581
582 /*
583 * Try to remember where we decided to put the value.
584 */
585 nmap = 1;
586 error = xfs_bmapi_read(args->dp, (xfs_fileoff_t)lblkno,
587 blkcnt, &map, &nmap, XFS_BMAPI_ATTRFORK);
588 if (error)
589 return error;
590 ASSERT(nmap == 1);
591 ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
592 (map.br_startblock != HOLESTARTBLOCK));
593
594 dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock),
595 dblkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
596
597 /*
598 * If the "remote" value is in the cache, remove it.
599 */
600 bp = xfs_buf_incore(mp->m_ddev_targp, dblkno, dblkcnt, XBF_TRYLOCK);
601 if (bp) {
602 xfs_buf_stale(bp);
603 xfs_buf_relse(bp);
604 bp = NULL;
605 }
606
607 lblkno += map.br_blockcount;
608 blkcnt -= map.br_blockcount;
609 }
610
611 /*
612 * Keep de-allocating extents until the remote-value region is gone.
613 */
614 lblkno = args->rmtblkno;
615 blkcnt = args->rmtblkcnt;
616 done = 0;
617 while (!done) {
618 error = xfs_bunmapi(args->trans, args->dp, lblkno, blkcnt,
619 XFS_BMAPI_ATTRFORK, 1, &done);
620 if (error)
621 return error;
622 error = xfs_defer_finish(&args->trans);
623 if (error)
624 return error;
625
626 /*
627 * Close out trans and start the next one in the chain.
628 */
629 error = xfs_trans_roll_inode(&args->trans, args->dp);
630 if (error)
631 return error;
632 }
633 return 0;
634}