Loading...
1/*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * Copyright (c) 2013 Red Hat, Inc.
4 * All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it would be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19#include "xfs.h"
20#include "xfs_fs.h"
21#include "xfs_shared.h"
22#include "xfs_format.h"
23#include "xfs_log_format.h"
24#include "xfs_trans_resv.h"
25#include "xfs_bit.h"
26#include "xfs_mount.h"
27#include "xfs_da_format.h"
28#include "xfs_da_btree.h"
29#include "xfs_dir2.h"
30#include "xfs_dir2_priv.h"
31#include "xfs_inode.h"
32#include "xfs_trans.h"
33#include "xfs_inode_item.h"
34#include "xfs_alloc.h"
35#include "xfs_bmap.h"
36#include "xfs_attr.h"
37#include "xfs_attr_leaf.h"
38#include "xfs_error.h"
39#include "xfs_trace.h"
40#include "xfs_cksum.h"
41#include "xfs_buf_item.h"
42#include "xfs_log.h"
43
44/*
45 * xfs_da_btree.c
46 *
47 * Routines to implement directories as Btrees of hashed names.
48 */
49
50/*========================================================================
51 * Function prototypes for the kernel.
52 *========================================================================*/
53
54/*
55 * Routines used for growing the Btree.
56 */
57STATIC int xfs_da3_root_split(xfs_da_state_t *state,
58 xfs_da_state_blk_t *existing_root,
59 xfs_da_state_blk_t *new_child);
60STATIC int xfs_da3_node_split(xfs_da_state_t *state,
61 xfs_da_state_blk_t *existing_blk,
62 xfs_da_state_blk_t *split_blk,
63 xfs_da_state_blk_t *blk_to_add,
64 int treelevel,
65 int *result);
66STATIC void xfs_da3_node_rebalance(xfs_da_state_t *state,
67 xfs_da_state_blk_t *node_blk_1,
68 xfs_da_state_blk_t *node_blk_2);
69STATIC void xfs_da3_node_add(xfs_da_state_t *state,
70 xfs_da_state_blk_t *old_node_blk,
71 xfs_da_state_blk_t *new_node_blk);
72
73/*
74 * Routines used for shrinking the Btree.
75 */
76STATIC int xfs_da3_root_join(xfs_da_state_t *state,
77 xfs_da_state_blk_t *root_blk);
78STATIC int xfs_da3_node_toosmall(xfs_da_state_t *state, int *retval);
79STATIC void xfs_da3_node_remove(xfs_da_state_t *state,
80 xfs_da_state_blk_t *drop_blk);
81STATIC void xfs_da3_node_unbalance(xfs_da_state_t *state,
82 xfs_da_state_blk_t *src_node_blk,
83 xfs_da_state_blk_t *dst_node_blk);
84
85/*
86 * Utility routines.
87 */
88STATIC int xfs_da3_blk_unlink(xfs_da_state_t *state,
89 xfs_da_state_blk_t *drop_blk,
90 xfs_da_state_blk_t *save_blk);
91
92
93kmem_zone_t *xfs_da_state_zone; /* anchor for state struct zone */
94
95/*
96 * Allocate a dir-state structure.
97 * We don't put them on the stack since they're large.
98 */
99xfs_da_state_t *
100xfs_da_state_alloc(void)
101{
102 return kmem_zone_zalloc(xfs_da_state_zone, KM_NOFS);
103}
104
105/*
106 * Kill the altpath contents of a da-state structure.
107 */
108STATIC void
109xfs_da_state_kill_altpath(xfs_da_state_t *state)
110{
111 int i;
112
113 for (i = 0; i < state->altpath.active; i++)
114 state->altpath.blk[i].bp = NULL;
115 state->altpath.active = 0;
116}
117
118/*
119 * Free a da-state structure.
120 */
121void
122xfs_da_state_free(xfs_da_state_t *state)
123{
124 xfs_da_state_kill_altpath(state);
125#ifdef DEBUG
126 memset((char *)state, 0, sizeof(*state));
127#endif /* DEBUG */
128 kmem_zone_free(xfs_da_state_zone, state);
129}
130
131static bool
132xfs_da3_node_verify(
133 struct xfs_buf *bp)
134{
135 struct xfs_mount *mp = bp->b_target->bt_mount;
136 struct xfs_da_intnode *hdr = bp->b_addr;
137 struct xfs_da3_icnode_hdr ichdr;
138 const struct xfs_dir_ops *ops;
139
140 ops = xfs_dir_get_ops(mp, NULL);
141
142 ops->node_hdr_from_disk(&ichdr, hdr);
143
144 if (xfs_sb_version_hascrc(&mp->m_sb)) {
145 struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
146
147 if (ichdr.magic != XFS_DA3_NODE_MAGIC)
148 return false;
149
150 if (!uuid_equal(&hdr3->info.uuid, &mp->m_sb.sb_meta_uuid))
151 return false;
152 if (be64_to_cpu(hdr3->info.blkno) != bp->b_bn)
153 return false;
154 if (!xfs_log_check_lsn(mp, be64_to_cpu(hdr3->info.lsn)))
155 return false;
156 } else {
157 if (ichdr.magic != XFS_DA_NODE_MAGIC)
158 return false;
159 }
160 if (ichdr.level == 0)
161 return false;
162 if (ichdr.level > XFS_DA_NODE_MAXDEPTH)
163 return false;
164 if (ichdr.count == 0)
165 return false;
166
167 /*
168 * we don't know if the node is for and attribute or directory tree,
169 * so only fail if the count is outside both bounds
170 */
171 if (ichdr.count > mp->m_dir_geo->node_ents &&
172 ichdr.count > mp->m_attr_geo->node_ents)
173 return false;
174
175 /* XXX: hash order check? */
176
177 return true;
178}
179
180static void
181xfs_da3_node_write_verify(
182 struct xfs_buf *bp)
183{
184 struct xfs_mount *mp = bp->b_target->bt_mount;
185 struct xfs_buf_log_item *bip = bp->b_fspriv;
186 struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
187
188 if (!xfs_da3_node_verify(bp)) {
189 xfs_buf_ioerror(bp, -EFSCORRUPTED);
190 xfs_verifier_error(bp);
191 return;
192 }
193
194 if (!xfs_sb_version_hascrc(&mp->m_sb))
195 return;
196
197 if (bip)
198 hdr3->info.lsn = cpu_to_be64(bip->bli_item.li_lsn);
199
200 xfs_buf_update_cksum(bp, XFS_DA3_NODE_CRC_OFF);
201}
202
203/*
204 * leaf/node format detection on trees is sketchy, so a node read can be done on
205 * leaf level blocks when detection identifies the tree as a node format tree
206 * incorrectly. In this case, we need to swap the verifier to match the correct
207 * format of the block being read.
208 */
209static void
210xfs_da3_node_read_verify(
211 struct xfs_buf *bp)
212{
213 struct xfs_da_blkinfo *info = bp->b_addr;
214
215 switch (be16_to_cpu(info->magic)) {
216 case XFS_DA3_NODE_MAGIC:
217 if (!xfs_buf_verify_cksum(bp, XFS_DA3_NODE_CRC_OFF)) {
218 xfs_buf_ioerror(bp, -EFSBADCRC);
219 break;
220 }
221 /* fall through */
222 case XFS_DA_NODE_MAGIC:
223 if (!xfs_da3_node_verify(bp)) {
224 xfs_buf_ioerror(bp, -EFSCORRUPTED);
225 break;
226 }
227 return;
228 case XFS_ATTR_LEAF_MAGIC:
229 case XFS_ATTR3_LEAF_MAGIC:
230 bp->b_ops = &xfs_attr3_leaf_buf_ops;
231 bp->b_ops->verify_read(bp);
232 return;
233 case XFS_DIR2_LEAFN_MAGIC:
234 case XFS_DIR3_LEAFN_MAGIC:
235 bp->b_ops = &xfs_dir3_leafn_buf_ops;
236 bp->b_ops->verify_read(bp);
237 return;
238 default:
239 xfs_buf_ioerror(bp, -EFSCORRUPTED);
240 break;
241 }
242
243 /* corrupt block */
244 xfs_verifier_error(bp);
245}
246
247const struct xfs_buf_ops xfs_da3_node_buf_ops = {
248 .name = "xfs_da3_node",
249 .verify_read = xfs_da3_node_read_verify,
250 .verify_write = xfs_da3_node_write_verify,
251};
252
253int
254xfs_da3_node_read(
255 struct xfs_trans *tp,
256 struct xfs_inode *dp,
257 xfs_dablk_t bno,
258 xfs_daddr_t mappedbno,
259 struct xfs_buf **bpp,
260 int which_fork)
261{
262 int err;
263
264 err = xfs_da_read_buf(tp, dp, bno, mappedbno, bpp,
265 which_fork, &xfs_da3_node_buf_ops);
266 if (!err && tp) {
267 struct xfs_da_blkinfo *info = (*bpp)->b_addr;
268 int type;
269
270 switch (be16_to_cpu(info->magic)) {
271 case XFS_DA_NODE_MAGIC:
272 case XFS_DA3_NODE_MAGIC:
273 type = XFS_BLFT_DA_NODE_BUF;
274 break;
275 case XFS_ATTR_LEAF_MAGIC:
276 case XFS_ATTR3_LEAF_MAGIC:
277 type = XFS_BLFT_ATTR_LEAF_BUF;
278 break;
279 case XFS_DIR2_LEAFN_MAGIC:
280 case XFS_DIR3_LEAFN_MAGIC:
281 type = XFS_BLFT_DIR_LEAFN_BUF;
282 break;
283 default:
284 type = 0;
285 ASSERT(0);
286 break;
287 }
288 xfs_trans_buf_set_type(tp, *bpp, type);
289 }
290 return err;
291}
292
293/*========================================================================
294 * Routines used for growing the Btree.
295 *========================================================================*/
296
297/*
298 * Create the initial contents of an intermediate node.
299 */
300int
301xfs_da3_node_create(
302 struct xfs_da_args *args,
303 xfs_dablk_t blkno,
304 int level,
305 struct xfs_buf **bpp,
306 int whichfork)
307{
308 struct xfs_da_intnode *node;
309 struct xfs_trans *tp = args->trans;
310 struct xfs_mount *mp = tp->t_mountp;
311 struct xfs_da3_icnode_hdr ichdr = {0};
312 struct xfs_buf *bp;
313 int error;
314 struct xfs_inode *dp = args->dp;
315
316 trace_xfs_da_node_create(args);
317 ASSERT(level <= XFS_DA_NODE_MAXDEPTH);
318
319 error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, whichfork);
320 if (error)
321 return error;
322 bp->b_ops = &xfs_da3_node_buf_ops;
323 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF);
324 node = bp->b_addr;
325
326 if (xfs_sb_version_hascrc(&mp->m_sb)) {
327 struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
328
329 memset(hdr3, 0, sizeof(struct xfs_da3_node_hdr));
330 ichdr.magic = XFS_DA3_NODE_MAGIC;
331 hdr3->info.blkno = cpu_to_be64(bp->b_bn);
332 hdr3->info.owner = cpu_to_be64(args->dp->i_ino);
333 uuid_copy(&hdr3->info.uuid, &mp->m_sb.sb_meta_uuid);
334 } else {
335 ichdr.magic = XFS_DA_NODE_MAGIC;
336 }
337 ichdr.level = level;
338
339 dp->d_ops->node_hdr_to_disk(node, &ichdr);
340 xfs_trans_log_buf(tp, bp,
341 XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size));
342
343 *bpp = bp;
344 return 0;
345}
346
347/*
348 * Split a leaf node, rebalance, then possibly split
349 * intermediate nodes, rebalance, etc.
350 */
351int /* error */
352xfs_da3_split(
353 struct xfs_da_state *state)
354{
355 struct xfs_da_state_blk *oldblk;
356 struct xfs_da_state_blk *newblk;
357 struct xfs_da_state_blk *addblk;
358 struct xfs_da_intnode *node;
359 int max;
360 int action = 0;
361 int error;
362 int i;
363
364 trace_xfs_da_split(state->args);
365
366 /*
367 * Walk back up the tree splitting/inserting/adjusting as necessary.
368 * If we need to insert and there isn't room, split the node, then
369 * decide which fragment to insert the new block from below into.
370 * Note that we may split the root this way, but we need more fixup.
371 */
372 max = state->path.active - 1;
373 ASSERT((max >= 0) && (max < XFS_DA_NODE_MAXDEPTH));
374 ASSERT(state->path.blk[max].magic == XFS_ATTR_LEAF_MAGIC ||
375 state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC);
376
377 addblk = &state->path.blk[max]; /* initial dummy value */
378 for (i = max; (i >= 0) && addblk; state->path.active--, i--) {
379 oldblk = &state->path.blk[i];
380 newblk = &state->altpath.blk[i];
381
382 /*
383 * If a leaf node then
384 * Allocate a new leaf node, then rebalance across them.
385 * else if an intermediate node then
386 * We split on the last layer, must we split the node?
387 */
388 switch (oldblk->magic) {
389 case XFS_ATTR_LEAF_MAGIC:
390 error = xfs_attr3_leaf_split(state, oldblk, newblk);
391 if ((error != 0) && (error != -ENOSPC)) {
392 return error; /* GROT: attr is inconsistent */
393 }
394 if (!error) {
395 addblk = newblk;
396 break;
397 }
398 /*
399 * Entry wouldn't fit, split the leaf again. The new
400 * extrablk will be consumed by xfs_da3_node_split if
401 * the node is split.
402 */
403 state->extravalid = 1;
404 if (state->inleaf) {
405 state->extraafter = 0; /* before newblk */
406 trace_xfs_attr_leaf_split_before(state->args);
407 error = xfs_attr3_leaf_split(state, oldblk,
408 &state->extrablk);
409 } else {
410 state->extraafter = 1; /* after newblk */
411 trace_xfs_attr_leaf_split_after(state->args);
412 error = xfs_attr3_leaf_split(state, newblk,
413 &state->extrablk);
414 }
415 if (error)
416 return error; /* GROT: attr inconsistent */
417 addblk = newblk;
418 break;
419 case XFS_DIR2_LEAFN_MAGIC:
420 error = xfs_dir2_leafn_split(state, oldblk, newblk);
421 if (error)
422 return error;
423 addblk = newblk;
424 break;
425 case XFS_DA_NODE_MAGIC:
426 error = xfs_da3_node_split(state, oldblk, newblk, addblk,
427 max - i, &action);
428 addblk->bp = NULL;
429 if (error)
430 return error; /* GROT: dir is inconsistent */
431 /*
432 * Record the newly split block for the next time thru?
433 */
434 if (action)
435 addblk = newblk;
436 else
437 addblk = NULL;
438 break;
439 }
440
441 /*
442 * Update the btree to show the new hashval for this child.
443 */
444 xfs_da3_fixhashpath(state, &state->path);
445 }
446 if (!addblk)
447 return 0;
448
449 /*
450 * xfs_da3_node_split() should have consumed any extra blocks we added
451 * during a double leaf split in the attr fork. This is guaranteed as
452 * we can't be here if the attr fork only has a single leaf block.
453 */
454 ASSERT(state->extravalid == 0 ||
455 state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC);
456
457 /*
458 * Split the root node.
459 */
460 ASSERT(state->path.active == 0);
461 oldblk = &state->path.blk[0];
462 error = xfs_da3_root_split(state, oldblk, addblk);
463 if (error) {
464 addblk->bp = NULL;
465 return error; /* GROT: dir is inconsistent */
466 }
467
468 /*
469 * Update pointers to the node which used to be block 0 and just got
470 * bumped because of the addition of a new root node. Note that the
471 * original block 0 could be at any position in the list of blocks in
472 * the tree.
473 *
474 * Note: the magic numbers and sibling pointers are in the same physical
475 * place for both v2 and v3 headers (by design). Hence it doesn't matter
476 * which version of the xfs_da_intnode structure we use here as the
477 * result will be the same using either structure.
478 */
479 node = oldblk->bp->b_addr;
480 if (node->hdr.info.forw) {
481 ASSERT(be32_to_cpu(node->hdr.info.forw) == addblk->blkno);
482 node = addblk->bp->b_addr;
483 node->hdr.info.back = cpu_to_be32(oldblk->blkno);
484 xfs_trans_log_buf(state->args->trans, addblk->bp,
485 XFS_DA_LOGRANGE(node, &node->hdr.info,
486 sizeof(node->hdr.info)));
487 }
488 node = oldblk->bp->b_addr;
489 if (node->hdr.info.back) {
490 ASSERT(be32_to_cpu(node->hdr.info.back) == addblk->blkno);
491 node = addblk->bp->b_addr;
492 node->hdr.info.forw = cpu_to_be32(oldblk->blkno);
493 xfs_trans_log_buf(state->args->trans, addblk->bp,
494 XFS_DA_LOGRANGE(node, &node->hdr.info,
495 sizeof(node->hdr.info)));
496 }
497 addblk->bp = NULL;
498 return 0;
499}
500
501/*
502 * Split the root. We have to create a new root and point to the two
503 * parts (the split old root) that we just created. Copy block zero to
504 * the EOF, extending the inode in process.
505 */
506STATIC int /* error */
507xfs_da3_root_split(
508 struct xfs_da_state *state,
509 struct xfs_da_state_blk *blk1,
510 struct xfs_da_state_blk *blk2)
511{
512 struct xfs_da_intnode *node;
513 struct xfs_da_intnode *oldroot;
514 struct xfs_da_node_entry *btree;
515 struct xfs_da3_icnode_hdr nodehdr;
516 struct xfs_da_args *args;
517 struct xfs_buf *bp;
518 struct xfs_inode *dp;
519 struct xfs_trans *tp;
520 struct xfs_dir2_leaf *leaf;
521 xfs_dablk_t blkno;
522 int level;
523 int error;
524 int size;
525
526 trace_xfs_da_root_split(state->args);
527
528 /*
529 * Copy the existing (incorrect) block from the root node position
530 * to a free space somewhere.
531 */
532 args = state->args;
533 error = xfs_da_grow_inode(args, &blkno);
534 if (error)
535 return error;
536
537 dp = args->dp;
538 tp = args->trans;
539 error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, args->whichfork);
540 if (error)
541 return error;
542 node = bp->b_addr;
543 oldroot = blk1->bp->b_addr;
544 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
545 oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC)) {
546 struct xfs_da3_icnode_hdr icnodehdr;
547
548 dp->d_ops->node_hdr_from_disk(&icnodehdr, oldroot);
549 btree = dp->d_ops->node_tree_p(oldroot);
550 size = (int)((char *)&btree[icnodehdr.count] - (char *)oldroot);
551 level = icnodehdr.level;
552
553 /*
554 * we are about to copy oldroot to bp, so set up the type
555 * of bp while we know exactly what it will be.
556 */
557 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF);
558 } else {
559 struct xfs_dir3_icleaf_hdr leafhdr;
560 struct xfs_dir2_leaf_entry *ents;
561
562 leaf = (xfs_dir2_leaf_t *)oldroot;
563 dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
564 ents = dp->d_ops->leaf_ents_p(leaf);
565
566 ASSERT(leafhdr.magic == XFS_DIR2_LEAFN_MAGIC ||
567 leafhdr.magic == XFS_DIR3_LEAFN_MAGIC);
568 size = (int)((char *)&ents[leafhdr.count] - (char *)leaf);
569 level = 0;
570
571 /*
572 * we are about to copy oldroot to bp, so set up the type
573 * of bp while we know exactly what it will be.
574 */
575 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DIR_LEAFN_BUF);
576 }
577
578 /*
579 * we can copy most of the information in the node from one block to
580 * another, but for CRC enabled headers we have to make sure that the
581 * block specific identifiers are kept intact. We update the buffer
582 * directly for this.
583 */
584 memcpy(node, oldroot, size);
585 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) ||
586 oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
587 struct xfs_da3_intnode *node3 = (struct xfs_da3_intnode *)node;
588
589 node3->hdr.info.blkno = cpu_to_be64(bp->b_bn);
590 }
591 xfs_trans_log_buf(tp, bp, 0, size - 1);
592
593 bp->b_ops = blk1->bp->b_ops;
594 xfs_trans_buf_copy_type(bp, blk1->bp);
595 blk1->bp = bp;
596 blk1->blkno = blkno;
597
598 /*
599 * Set up the new root node.
600 */
601 error = xfs_da3_node_create(args,
602 (args->whichfork == XFS_DATA_FORK) ? args->geo->leafblk : 0,
603 level + 1, &bp, args->whichfork);
604 if (error)
605 return error;
606
607 node = bp->b_addr;
608 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
609 btree = dp->d_ops->node_tree_p(node);
610 btree[0].hashval = cpu_to_be32(blk1->hashval);
611 btree[0].before = cpu_to_be32(blk1->blkno);
612 btree[1].hashval = cpu_to_be32(blk2->hashval);
613 btree[1].before = cpu_to_be32(blk2->blkno);
614 nodehdr.count = 2;
615 dp->d_ops->node_hdr_to_disk(node, &nodehdr);
616
617#ifdef DEBUG
618 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
619 oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
620 ASSERT(blk1->blkno >= args->geo->leafblk &&
621 blk1->blkno < args->geo->freeblk);
622 ASSERT(blk2->blkno >= args->geo->leafblk &&
623 blk2->blkno < args->geo->freeblk);
624 }
625#endif
626
627 /* Header is already logged by xfs_da_node_create */
628 xfs_trans_log_buf(tp, bp,
629 XFS_DA_LOGRANGE(node, btree, sizeof(xfs_da_node_entry_t) * 2));
630
631 return 0;
632}
633
634/*
635 * Split the node, rebalance, then add the new entry.
636 */
637STATIC int /* error */
638xfs_da3_node_split(
639 struct xfs_da_state *state,
640 struct xfs_da_state_blk *oldblk,
641 struct xfs_da_state_blk *newblk,
642 struct xfs_da_state_blk *addblk,
643 int treelevel,
644 int *result)
645{
646 struct xfs_da_intnode *node;
647 struct xfs_da3_icnode_hdr nodehdr;
648 xfs_dablk_t blkno;
649 int newcount;
650 int error;
651 int useextra;
652 struct xfs_inode *dp = state->args->dp;
653
654 trace_xfs_da_node_split(state->args);
655
656 node = oldblk->bp->b_addr;
657 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
658
659 /*
660 * With V2 dirs the extra block is data or freespace.
661 */
662 useextra = state->extravalid && state->args->whichfork == XFS_ATTR_FORK;
663 newcount = 1 + useextra;
664 /*
665 * Do we have to split the node?
666 */
667 if (nodehdr.count + newcount > state->args->geo->node_ents) {
668 /*
669 * Allocate a new node, add to the doubly linked chain of
670 * nodes, then move some of our excess entries into it.
671 */
672 error = xfs_da_grow_inode(state->args, &blkno);
673 if (error)
674 return error; /* GROT: dir is inconsistent */
675
676 error = xfs_da3_node_create(state->args, blkno, treelevel,
677 &newblk->bp, state->args->whichfork);
678 if (error)
679 return error; /* GROT: dir is inconsistent */
680 newblk->blkno = blkno;
681 newblk->magic = XFS_DA_NODE_MAGIC;
682 xfs_da3_node_rebalance(state, oldblk, newblk);
683 error = xfs_da3_blk_link(state, oldblk, newblk);
684 if (error)
685 return error;
686 *result = 1;
687 } else {
688 *result = 0;
689 }
690
691 /*
692 * Insert the new entry(s) into the correct block
693 * (updating last hashval in the process).
694 *
695 * xfs_da3_node_add() inserts BEFORE the given index,
696 * and as a result of using node_lookup_int() we always
697 * point to a valid entry (not after one), but a split
698 * operation always results in a new block whose hashvals
699 * FOLLOW the current block.
700 *
701 * If we had double-split op below us, then add the extra block too.
702 */
703 node = oldblk->bp->b_addr;
704 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
705 if (oldblk->index <= nodehdr.count) {
706 oldblk->index++;
707 xfs_da3_node_add(state, oldblk, addblk);
708 if (useextra) {
709 if (state->extraafter)
710 oldblk->index++;
711 xfs_da3_node_add(state, oldblk, &state->extrablk);
712 state->extravalid = 0;
713 }
714 } else {
715 newblk->index++;
716 xfs_da3_node_add(state, newblk, addblk);
717 if (useextra) {
718 if (state->extraafter)
719 newblk->index++;
720 xfs_da3_node_add(state, newblk, &state->extrablk);
721 state->extravalid = 0;
722 }
723 }
724
725 return 0;
726}
727
728/*
729 * Balance the btree elements between two intermediate nodes,
730 * usually one full and one empty.
731 *
732 * NOTE: if blk2 is empty, then it will get the upper half of blk1.
733 */
734STATIC void
735xfs_da3_node_rebalance(
736 struct xfs_da_state *state,
737 struct xfs_da_state_blk *blk1,
738 struct xfs_da_state_blk *blk2)
739{
740 struct xfs_da_intnode *node1;
741 struct xfs_da_intnode *node2;
742 struct xfs_da_intnode *tmpnode;
743 struct xfs_da_node_entry *btree1;
744 struct xfs_da_node_entry *btree2;
745 struct xfs_da_node_entry *btree_s;
746 struct xfs_da_node_entry *btree_d;
747 struct xfs_da3_icnode_hdr nodehdr1;
748 struct xfs_da3_icnode_hdr nodehdr2;
749 struct xfs_trans *tp;
750 int count;
751 int tmp;
752 int swap = 0;
753 struct xfs_inode *dp = state->args->dp;
754
755 trace_xfs_da_node_rebalance(state->args);
756
757 node1 = blk1->bp->b_addr;
758 node2 = blk2->bp->b_addr;
759 dp->d_ops->node_hdr_from_disk(&nodehdr1, node1);
760 dp->d_ops->node_hdr_from_disk(&nodehdr2, node2);
761 btree1 = dp->d_ops->node_tree_p(node1);
762 btree2 = dp->d_ops->node_tree_p(node2);
763
764 /*
765 * Figure out how many entries need to move, and in which direction.
766 * Swap the nodes around if that makes it simpler.
767 */
768 if (nodehdr1.count > 0 && nodehdr2.count > 0 &&
769 ((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) ||
770 (be32_to_cpu(btree2[nodehdr2.count - 1].hashval) <
771 be32_to_cpu(btree1[nodehdr1.count - 1].hashval)))) {
772 tmpnode = node1;
773 node1 = node2;
774 node2 = tmpnode;
775 dp->d_ops->node_hdr_from_disk(&nodehdr1, node1);
776 dp->d_ops->node_hdr_from_disk(&nodehdr2, node2);
777 btree1 = dp->d_ops->node_tree_p(node1);
778 btree2 = dp->d_ops->node_tree_p(node2);
779 swap = 1;
780 }
781
782 count = (nodehdr1.count - nodehdr2.count) / 2;
783 if (count == 0)
784 return;
785 tp = state->args->trans;
786 /*
787 * Two cases: high-to-low and low-to-high.
788 */
789 if (count > 0) {
790 /*
791 * Move elements in node2 up to make a hole.
792 */
793 tmp = nodehdr2.count;
794 if (tmp > 0) {
795 tmp *= (uint)sizeof(xfs_da_node_entry_t);
796 btree_s = &btree2[0];
797 btree_d = &btree2[count];
798 memmove(btree_d, btree_s, tmp);
799 }
800
801 /*
802 * Move the req'd B-tree elements from high in node1 to
803 * low in node2.
804 */
805 nodehdr2.count += count;
806 tmp = count * (uint)sizeof(xfs_da_node_entry_t);
807 btree_s = &btree1[nodehdr1.count - count];
808 btree_d = &btree2[0];
809 memcpy(btree_d, btree_s, tmp);
810 nodehdr1.count -= count;
811 } else {
812 /*
813 * Move the req'd B-tree elements from low in node2 to
814 * high in node1.
815 */
816 count = -count;
817 tmp = count * (uint)sizeof(xfs_da_node_entry_t);
818 btree_s = &btree2[0];
819 btree_d = &btree1[nodehdr1.count];
820 memcpy(btree_d, btree_s, tmp);
821 nodehdr1.count += count;
822
823 xfs_trans_log_buf(tp, blk1->bp,
824 XFS_DA_LOGRANGE(node1, btree_d, tmp));
825
826 /*
827 * Move elements in node2 down to fill the hole.
828 */
829 tmp = nodehdr2.count - count;
830 tmp *= (uint)sizeof(xfs_da_node_entry_t);
831 btree_s = &btree2[count];
832 btree_d = &btree2[0];
833 memmove(btree_d, btree_s, tmp);
834 nodehdr2.count -= count;
835 }
836
837 /*
838 * Log header of node 1 and all current bits of node 2.
839 */
840 dp->d_ops->node_hdr_to_disk(node1, &nodehdr1);
841 xfs_trans_log_buf(tp, blk1->bp,
842 XFS_DA_LOGRANGE(node1, &node1->hdr, dp->d_ops->node_hdr_size));
843
844 dp->d_ops->node_hdr_to_disk(node2, &nodehdr2);
845 xfs_trans_log_buf(tp, blk2->bp,
846 XFS_DA_LOGRANGE(node2, &node2->hdr,
847 dp->d_ops->node_hdr_size +
848 (sizeof(btree2[0]) * nodehdr2.count)));
849
850 /*
851 * Record the last hashval from each block for upward propagation.
852 * (note: don't use the swapped node pointers)
853 */
854 if (swap) {
855 node1 = blk1->bp->b_addr;
856 node2 = blk2->bp->b_addr;
857 dp->d_ops->node_hdr_from_disk(&nodehdr1, node1);
858 dp->d_ops->node_hdr_from_disk(&nodehdr2, node2);
859 btree1 = dp->d_ops->node_tree_p(node1);
860 btree2 = dp->d_ops->node_tree_p(node2);
861 }
862 blk1->hashval = be32_to_cpu(btree1[nodehdr1.count - 1].hashval);
863 blk2->hashval = be32_to_cpu(btree2[nodehdr2.count - 1].hashval);
864
865 /*
866 * Adjust the expected index for insertion.
867 */
868 if (blk1->index >= nodehdr1.count) {
869 blk2->index = blk1->index - nodehdr1.count;
870 blk1->index = nodehdr1.count + 1; /* make it invalid */
871 }
872}
873
874/*
875 * Add a new entry to an intermediate node.
876 */
877STATIC void
878xfs_da3_node_add(
879 struct xfs_da_state *state,
880 struct xfs_da_state_blk *oldblk,
881 struct xfs_da_state_blk *newblk)
882{
883 struct xfs_da_intnode *node;
884 struct xfs_da3_icnode_hdr nodehdr;
885 struct xfs_da_node_entry *btree;
886 int tmp;
887 struct xfs_inode *dp = state->args->dp;
888
889 trace_xfs_da_node_add(state->args);
890
891 node = oldblk->bp->b_addr;
892 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
893 btree = dp->d_ops->node_tree_p(node);
894
895 ASSERT(oldblk->index >= 0 && oldblk->index <= nodehdr.count);
896 ASSERT(newblk->blkno != 0);
897 if (state->args->whichfork == XFS_DATA_FORK)
898 ASSERT(newblk->blkno >= state->args->geo->leafblk &&
899 newblk->blkno < state->args->geo->freeblk);
900
901 /*
902 * We may need to make some room before we insert the new node.
903 */
904 tmp = 0;
905 if (oldblk->index < nodehdr.count) {
906 tmp = (nodehdr.count - oldblk->index) * (uint)sizeof(*btree);
907 memmove(&btree[oldblk->index + 1], &btree[oldblk->index], tmp);
908 }
909 btree[oldblk->index].hashval = cpu_to_be32(newblk->hashval);
910 btree[oldblk->index].before = cpu_to_be32(newblk->blkno);
911 xfs_trans_log_buf(state->args->trans, oldblk->bp,
912 XFS_DA_LOGRANGE(node, &btree[oldblk->index],
913 tmp + sizeof(*btree)));
914
915 nodehdr.count += 1;
916 dp->d_ops->node_hdr_to_disk(node, &nodehdr);
917 xfs_trans_log_buf(state->args->trans, oldblk->bp,
918 XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size));
919
920 /*
921 * Copy the last hash value from the oldblk to propagate upwards.
922 */
923 oldblk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval);
924}
925
926/*========================================================================
927 * Routines used for shrinking the Btree.
928 *========================================================================*/
929
930/*
931 * Deallocate an empty leaf node, remove it from its parent,
932 * possibly deallocating that block, etc...
933 */
934int
935xfs_da3_join(
936 struct xfs_da_state *state)
937{
938 struct xfs_da_state_blk *drop_blk;
939 struct xfs_da_state_blk *save_blk;
940 int action = 0;
941 int error;
942
943 trace_xfs_da_join(state->args);
944
945 drop_blk = &state->path.blk[ state->path.active-1 ];
946 save_blk = &state->altpath.blk[ state->path.active-1 ];
947 ASSERT(state->path.blk[0].magic == XFS_DA_NODE_MAGIC);
948 ASSERT(drop_blk->magic == XFS_ATTR_LEAF_MAGIC ||
949 drop_blk->magic == XFS_DIR2_LEAFN_MAGIC);
950
951 /*
952 * Walk back up the tree joining/deallocating as necessary.
953 * When we stop dropping blocks, break out.
954 */
955 for ( ; state->path.active >= 2; drop_blk--, save_blk--,
956 state->path.active--) {
957 /*
958 * See if we can combine the block with a neighbor.
959 * (action == 0) => no options, just leave
960 * (action == 1) => coalesce, then unlink
961 * (action == 2) => block empty, unlink it
962 */
963 switch (drop_blk->magic) {
964 case XFS_ATTR_LEAF_MAGIC:
965 error = xfs_attr3_leaf_toosmall(state, &action);
966 if (error)
967 return error;
968 if (action == 0)
969 return 0;
970 xfs_attr3_leaf_unbalance(state, drop_blk, save_blk);
971 break;
972 case XFS_DIR2_LEAFN_MAGIC:
973 error = xfs_dir2_leafn_toosmall(state, &action);
974 if (error)
975 return error;
976 if (action == 0)
977 return 0;
978 xfs_dir2_leafn_unbalance(state, drop_blk, save_blk);
979 break;
980 case XFS_DA_NODE_MAGIC:
981 /*
982 * Remove the offending node, fixup hashvals,
983 * check for a toosmall neighbor.
984 */
985 xfs_da3_node_remove(state, drop_blk);
986 xfs_da3_fixhashpath(state, &state->path);
987 error = xfs_da3_node_toosmall(state, &action);
988 if (error)
989 return error;
990 if (action == 0)
991 return 0;
992 xfs_da3_node_unbalance(state, drop_blk, save_blk);
993 break;
994 }
995 xfs_da3_fixhashpath(state, &state->altpath);
996 error = xfs_da3_blk_unlink(state, drop_blk, save_blk);
997 xfs_da_state_kill_altpath(state);
998 if (error)
999 return error;
1000 error = xfs_da_shrink_inode(state->args, drop_blk->blkno,
1001 drop_blk->bp);
1002 drop_blk->bp = NULL;
1003 if (error)
1004 return error;
1005 }
1006 /*
1007 * We joined all the way to the top. If it turns out that
1008 * we only have one entry in the root, make the child block
1009 * the new root.
1010 */
1011 xfs_da3_node_remove(state, drop_blk);
1012 xfs_da3_fixhashpath(state, &state->path);
1013 error = xfs_da3_root_join(state, &state->path.blk[0]);
1014 return error;
1015}
1016
1017#ifdef DEBUG
1018static void
1019xfs_da_blkinfo_onlychild_validate(struct xfs_da_blkinfo *blkinfo, __u16 level)
1020{
1021 __be16 magic = blkinfo->magic;
1022
1023 if (level == 1) {
1024 ASSERT(magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
1025 magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC) ||
1026 magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) ||
1027 magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC));
1028 } else {
1029 ASSERT(magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
1030 magic == cpu_to_be16(XFS_DA3_NODE_MAGIC));
1031 }
1032 ASSERT(!blkinfo->forw);
1033 ASSERT(!blkinfo->back);
1034}
1035#else /* !DEBUG */
1036#define xfs_da_blkinfo_onlychild_validate(blkinfo, level)
1037#endif /* !DEBUG */
1038
1039/*
1040 * We have only one entry in the root. Copy the only remaining child of
1041 * the old root to block 0 as the new root node.
1042 */
1043STATIC int
1044xfs_da3_root_join(
1045 struct xfs_da_state *state,
1046 struct xfs_da_state_blk *root_blk)
1047{
1048 struct xfs_da_intnode *oldroot;
1049 struct xfs_da_args *args;
1050 xfs_dablk_t child;
1051 struct xfs_buf *bp;
1052 struct xfs_da3_icnode_hdr oldroothdr;
1053 struct xfs_da_node_entry *btree;
1054 int error;
1055 struct xfs_inode *dp = state->args->dp;
1056
1057 trace_xfs_da_root_join(state->args);
1058
1059 ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC);
1060
1061 args = state->args;
1062 oldroot = root_blk->bp->b_addr;
1063 dp->d_ops->node_hdr_from_disk(&oldroothdr, oldroot);
1064 ASSERT(oldroothdr.forw == 0);
1065 ASSERT(oldroothdr.back == 0);
1066
1067 /*
1068 * If the root has more than one child, then don't do anything.
1069 */
1070 if (oldroothdr.count > 1)
1071 return 0;
1072
1073 /*
1074 * Read in the (only) child block, then copy those bytes into
1075 * the root block's buffer and free the original child block.
1076 */
1077 btree = dp->d_ops->node_tree_p(oldroot);
1078 child = be32_to_cpu(btree[0].before);
1079 ASSERT(child != 0);
1080 error = xfs_da3_node_read(args->trans, dp, child, -1, &bp,
1081 args->whichfork);
1082 if (error)
1083 return error;
1084 xfs_da_blkinfo_onlychild_validate(bp->b_addr, oldroothdr.level);
1085
1086 /*
1087 * This could be copying a leaf back into the root block in the case of
1088 * there only being a single leaf block left in the tree. Hence we have
1089 * to update the b_ops pointer as well to match the buffer type change
1090 * that could occur. For dir3 blocks we also need to update the block
1091 * number in the buffer header.
1092 */
1093 memcpy(root_blk->bp->b_addr, bp->b_addr, args->geo->blksize);
1094 root_blk->bp->b_ops = bp->b_ops;
1095 xfs_trans_buf_copy_type(root_blk->bp, bp);
1096 if (oldroothdr.magic == XFS_DA3_NODE_MAGIC) {
1097 struct xfs_da3_blkinfo *da3 = root_blk->bp->b_addr;
1098 da3->blkno = cpu_to_be64(root_blk->bp->b_bn);
1099 }
1100 xfs_trans_log_buf(args->trans, root_blk->bp, 0,
1101 args->geo->blksize - 1);
1102 error = xfs_da_shrink_inode(args, child, bp);
1103 return error;
1104}
1105
1106/*
1107 * Check a node block and its neighbors to see if the block should be
1108 * collapsed into one or the other neighbor. Always keep the block
1109 * with the smaller block number.
1110 * If the current block is over 50% full, don't try to join it, return 0.
1111 * If the block is empty, fill in the state structure and return 2.
1112 * If it can be collapsed, fill in the state structure and return 1.
1113 * If nothing can be done, return 0.
1114 */
1115STATIC int
1116xfs_da3_node_toosmall(
1117 struct xfs_da_state *state,
1118 int *action)
1119{
1120 struct xfs_da_intnode *node;
1121 struct xfs_da_state_blk *blk;
1122 struct xfs_da_blkinfo *info;
1123 xfs_dablk_t blkno;
1124 struct xfs_buf *bp;
1125 struct xfs_da3_icnode_hdr nodehdr;
1126 int count;
1127 int forward;
1128 int error;
1129 int retval;
1130 int i;
1131 struct xfs_inode *dp = state->args->dp;
1132
1133 trace_xfs_da_node_toosmall(state->args);
1134
1135 /*
1136 * Check for the degenerate case of the block being over 50% full.
1137 * If so, it's not worth even looking to see if we might be able
1138 * to coalesce with a sibling.
1139 */
1140 blk = &state->path.blk[ state->path.active-1 ];
1141 info = blk->bp->b_addr;
1142 node = (xfs_da_intnode_t *)info;
1143 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1144 if (nodehdr.count > (state->args->geo->node_ents >> 1)) {
1145 *action = 0; /* blk over 50%, don't try to join */
1146 return 0; /* blk over 50%, don't try to join */
1147 }
1148
1149 /*
1150 * Check for the degenerate case of the block being empty.
1151 * If the block is empty, we'll simply delete it, no need to
1152 * coalesce it with a sibling block. We choose (arbitrarily)
1153 * to merge with the forward block unless it is NULL.
1154 */
1155 if (nodehdr.count == 0) {
1156 /*
1157 * Make altpath point to the block we want to keep and
1158 * path point to the block we want to drop (this one).
1159 */
1160 forward = (info->forw != 0);
1161 memcpy(&state->altpath, &state->path, sizeof(state->path));
1162 error = xfs_da3_path_shift(state, &state->altpath, forward,
1163 0, &retval);
1164 if (error)
1165 return error;
1166 if (retval) {
1167 *action = 0;
1168 } else {
1169 *action = 2;
1170 }
1171 return 0;
1172 }
1173
1174 /*
1175 * Examine each sibling block to see if we can coalesce with
1176 * at least 25% free space to spare. We need to figure out
1177 * whether to merge with the forward or the backward block.
1178 * We prefer coalescing with the lower numbered sibling so as
1179 * to shrink a directory over time.
1180 */
1181 count = state->args->geo->node_ents;
1182 count -= state->args->geo->node_ents >> 2;
1183 count -= nodehdr.count;
1184
1185 /* start with smaller blk num */
1186 forward = nodehdr.forw < nodehdr.back;
1187 for (i = 0; i < 2; forward = !forward, i++) {
1188 struct xfs_da3_icnode_hdr thdr;
1189 if (forward)
1190 blkno = nodehdr.forw;
1191 else
1192 blkno = nodehdr.back;
1193 if (blkno == 0)
1194 continue;
1195 error = xfs_da3_node_read(state->args->trans, dp,
1196 blkno, -1, &bp, state->args->whichfork);
1197 if (error)
1198 return error;
1199
1200 node = bp->b_addr;
1201 dp->d_ops->node_hdr_from_disk(&thdr, node);
1202 xfs_trans_brelse(state->args->trans, bp);
1203
1204 if (count - thdr.count >= 0)
1205 break; /* fits with at least 25% to spare */
1206 }
1207 if (i >= 2) {
1208 *action = 0;
1209 return 0;
1210 }
1211
1212 /*
1213 * Make altpath point to the block we want to keep (the lower
1214 * numbered block) and path point to the block we want to drop.
1215 */
1216 memcpy(&state->altpath, &state->path, sizeof(state->path));
1217 if (blkno < blk->blkno) {
1218 error = xfs_da3_path_shift(state, &state->altpath, forward,
1219 0, &retval);
1220 } else {
1221 error = xfs_da3_path_shift(state, &state->path, forward,
1222 0, &retval);
1223 }
1224 if (error)
1225 return error;
1226 if (retval) {
1227 *action = 0;
1228 return 0;
1229 }
1230 *action = 1;
1231 return 0;
1232}
1233
1234/*
1235 * Pick up the last hashvalue from an intermediate node.
1236 */
1237STATIC uint
1238xfs_da3_node_lasthash(
1239 struct xfs_inode *dp,
1240 struct xfs_buf *bp,
1241 int *count)
1242{
1243 struct xfs_da_intnode *node;
1244 struct xfs_da_node_entry *btree;
1245 struct xfs_da3_icnode_hdr nodehdr;
1246
1247 node = bp->b_addr;
1248 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1249 if (count)
1250 *count = nodehdr.count;
1251 if (!nodehdr.count)
1252 return 0;
1253 btree = dp->d_ops->node_tree_p(node);
1254 return be32_to_cpu(btree[nodehdr.count - 1].hashval);
1255}
1256
1257/*
1258 * Walk back up the tree adjusting hash values as necessary,
1259 * when we stop making changes, return.
1260 */
1261void
1262xfs_da3_fixhashpath(
1263 struct xfs_da_state *state,
1264 struct xfs_da_state_path *path)
1265{
1266 struct xfs_da_state_blk *blk;
1267 struct xfs_da_intnode *node;
1268 struct xfs_da_node_entry *btree;
1269 xfs_dahash_t lasthash=0;
1270 int level;
1271 int count;
1272 struct xfs_inode *dp = state->args->dp;
1273
1274 trace_xfs_da_fixhashpath(state->args);
1275
1276 level = path->active-1;
1277 blk = &path->blk[ level ];
1278 switch (blk->magic) {
1279 case XFS_ATTR_LEAF_MAGIC:
1280 lasthash = xfs_attr_leaf_lasthash(blk->bp, &count);
1281 if (count == 0)
1282 return;
1283 break;
1284 case XFS_DIR2_LEAFN_MAGIC:
1285 lasthash = xfs_dir2_leafn_lasthash(dp, blk->bp, &count);
1286 if (count == 0)
1287 return;
1288 break;
1289 case XFS_DA_NODE_MAGIC:
1290 lasthash = xfs_da3_node_lasthash(dp, blk->bp, &count);
1291 if (count == 0)
1292 return;
1293 break;
1294 }
1295 for (blk--, level--; level >= 0; blk--, level--) {
1296 struct xfs_da3_icnode_hdr nodehdr;
1297
1298 node = blk->bp->b_addr;
1299 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1300 btree = dp->d_ops->node_tree_p(node);
1301 if (be32_to_cpu(btree[blk->index].hashval) == lasthash)
1302 break;
1303 blk->hashval = lasthash;
1304 btree[blk->index].hashval = cpu_to_be32(lasthash);
1305 xfs_trans_log_buf(state->args->trans, blk->bp,
1306 XFS_DA_LOGRANGE(node, &btree[blk->index],
1307 sizeof(*btree)));
1308
1309 lasthash = be32_to_cpu(btree[nodehdr.count - 1].hashval);
1310 }
1311}
1312
1313/*
1314 * Remove an entry from an intermediate node.
1315 */
1316STATIC void
1317xfs_da3_node_remove(
1318 struct xfs_da_state *state,
1319 struct xfs_da_state_blk *drop_blk)
1320{
1321 struct xfs_da_intnode *node;
1322 struct xfs_da3_icnode_hdr nodehdr;
1323 struct xfs_da_node_entry *btree;
1324 int index;
1325 int tmp;
1326 struct xfs_inode *dp = state->args->dp;
1327
1328 trace_xfs_da_node_remove(state->args);
1329
1330 node = drop_blk->bp->b_addr;
1331 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1332 ASSERT(drop_blk->index < nodehdr.count);
1333 ASSERT(drop_blk->index >= 0);
1334
1335 /*
1336 * Copy over the offending entry, or just zero it out.
1337 */
1338 index = drop_blk->index;
1339 btree = dp->d_ops->node_tree_p(node);
1340 if (index < nodehdr.count - 1) {
1341 tmp = nodehdr.count - index - 1;
1342 tmp *= (uint)sizeof(xfs_da_node_entry_t);
1343 memmove(&btree[index], &btree[index + 1], tmp);
1344 xfs_trans_log_buf(state->args->trans, drop_blk->bp,
1345 XFS_DA_LOGRANGE(node, &btree[index], tmp));
1346 index = nodehdr.count - 1;
1347 }
1348 memset(&btree[index], 0, sizeof(xfs_da_node_entry_t));
1349 xfs_trans_log_buf(state->args->trans, drop_blk->bp,
1350 XFS_DA_LOGRANGE(node, &btree[index], sizeof(btree[index])));
1351 nodehdr.count -= 1;
1352 dp->d_ops->node_hdr_to_disk(node, &nodehdr);
1353 xfs_trans_log_buf(state->args->trans, drop_blk->bp,
1354 XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size));
1355
1356 /*
1357 * Copy the last hash value from the block to propagate upwards.
1358 */
1359 drop_blk->hashval = be32_to_cpu(btree[index - 1].hashval);
1360}
1361
1362/*
1363 * Unbalance the elements between two intermediate nodes,
1364 * move all Btree elements from one node into another.
1365 */
1366STATIC void
1367xfs_da3_node_unbalance(
1368 struct xfs_da_state *state,
1369 struct xfs_da_state_blk *drop_blk,
1370 struct xfs_da_state_blk *save_blk)
1371{
1372 struct xfs_da_intnode *drop_node;
1373 struct xfs_da_intnode *save_node;
1374 struct xfs_da_node_entry *drop_btree;
1375 struct xfs_da_node_entry *save_btree;
1376 struct xfs_da3_icnode_hdr drop_hdr;
1377 struct xfs_da3_icnode_hdr save_hdr;
1378 struct xfs_trans *tp;
1379 int sindex;
1380 int tmp;
1381 struct xfs_inode *dp = state->args->dp;
1382
1383 trace_xfs_da_node_unbalance(state->args);
1384
1385 drop_node = drop_blk->bp->b_addr;
1386 save_node = save_blk->bp->b_addr;
1387 dp->d_ops->node_hdr_from_disk(&drop_hdr, drop_node);
1388 dp->d_ops->node_hdr_from_disk(&save_hdr, save_node);
1389 drop_btree = dp->d_ops->node_tree_p(drop_node);
1390 save_btree = dp->d_ops->node_tree_p(save_node);
1391 tp = state->args->trans;
1392
1393 /*
1394 * If the dying block has lower hashvals, then move all the
1395 * elements in the remaining block up to make a hole.
1396 */
1397 if ((be32_to_cpu(drop_btree[0].hashval) <
1398 be32_to_cpu(save_btree[0].hashval)) ||
1399 (be32_to_cpu(drop_btree[drop_hdr.count - 1].hashval) <
1400 be32_to_cpu(save_btree[save_hdr.count - 1].hashval))) {
1401 /* XXX: check this - is memmove dst correct? */
1402 tmp = save_hdr.count * sizeof(xfs_da_node_entry_t);
1403 memmove(&save_btree[drop_hdr.count], &save_btree[0], tmp);
1404
1405 sindex = 0;
1406 xfs_trans_log_buf(tp, save_blk->bp,
1407 XFS_DA_LOGRANGE(save_node, &save_btree[0],
1408 (save_hdr.count + drop_hdr.count) *
1409 sizeof(xfs_da_node_entry_t)));
1410 } else {
1411 sindex = save_hdr.count;
1412 xfs_trans_log_buf(tp, save_blk->bp,
1413 XFS_DA_LOGRANGE(save_node, &save_btree[sindex],
1414 drop_hdr.count * sizeof(xfs_da_node_entry_t)));
1415 }
1416
1417 /*
1418 * Move all the B-tree elements from drop_blk to save_blk.
1419 */
1420 tmp = drop_hdr.count * (uint)sizeof(xfs_da_node_entry_t);
1421 memcpy(&save_btree[sindex], &drop_btree[0], tmp);
1422 save_hdr.count += drop_hdr.count;
1423
1424 dp->d_ops->node_hdr_to_disk(save_node, &save_hdr);
1425 xfs_trans_log_buf(tp, save_blk->bp,
1426 XFS_DA_LOGRANGE(save_node, &save_node->hdr,
1427 dp->d_ops->node_hdr_size));
1428
1429 /*
1430 * Save the last hashval in the remaining block for upward propagation.
1431 */
1432 save_blk->hashval = be32_to_cpu(save_btree[save_hdr.count - 1].hashval);
1433}
1434
1435/*========================================================================
1436 * Routines used for finding things in the Btree.
1437 *========================================================================*/
1438
1439/*
1440 * Walk down the Btree looking for a particular filename, filling
1441 * in the state structure as we go.
1442 *
1443 * We will set the state structure to point to each of the elements
1444 * in each of the nodes where either the hashval is or should be.
1445 *
1446 * We support duplicate hashval's so for each entry in the current
1447 * node that could contain the desired hashval, descend. This is a
1448 * pruned depth-first tree search.
1449 */
1450int /* error */
1451xfs_da3_node_lookup_int(
1452 struct xfs_da_state *state,
1453 int *result)
1454{
1455 struct xfs_da_state_blk *blk;
1456 struct xfs_da_blkinfo *curr;
1457 struct xfs_da_intnode *node;
1458 struct xfs_da_node_entry *btree;
1459 struct xfs_da3_icnode_hdr nodehdr;
1460 struct xfs_da_args *args;
1461 xfs_dablk_t blkno;
1462 xfs_dahash_t hashval;
1463 xfs_dahash_t btreehashval;
1464 int probe;
1465 int span;
1466 int max;
1467 int error;
1468 int retval;
1469 struct xfs_inode *dp = state->args->dp;
1470
1471 args = state->args;
1472
1473 /*
1474 * Descend thru the B-tree searching each level for the right
1475 * node to use, until the right hashval is found.
1476 */
1477 blkno = (args->whichfork == XFS_DATA_FORK)? args->geo->leafblk : 0;
1478 for (blk = &state->path.blk[0], state->path.active = 1;
1479 state->path.active <= XFS_DA_NODE_MAXDEPTH;
1480 blk++, state->path.active++) {
1481 /*
1482 * Read the next node down in the tree.
1483 */
1484 blk->blkno = blkno;
1485 error = xfs_da3_node_read(args->trans, args->dp, blkno,
1486 -1, &blk->bp, args->whichfork);
1487 if (error) {
1488 blk->blkno = 0;
1489 state->path.active--;
1490 return error;
1491 }
1492 curr = blk->bp->b_addr;
1493 blk->magic = be16_to_cpu(curr->magic);
1494
1495 if (blk->magic == XFS_ATTR_LEAF_MAGIC ||
1496 blk->magic == XFS_ATTR3_LEAF_MAGIC) {
1497 blk->magic = XFS_ATTR_LEAF_MAGIC;
1498 blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
1499 break;
1500 }
1501
1502 if (blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1503 blk->magic == XFS_DIR3_LEAFN_MAGIC) {
1504 blk->magic = XFS_DIR2_LEAFN_MAGIC;
1505 blk->hashval = xfs_dir2_leafn_lasthash(args->dp,
1506 blk->bp, NULL);
1507 break;
1508 }
1509
1510 blk->magic = XFS_DA_NODE_MAGIC;
1511
1512
1513 /*
1514 * Search an intermediate node for a match.
1515 */
1516 node = blk->bp->b_addr;
1517 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1518 btree = dp->d_ops->node_tree_p(node);
1519
1520 max = nodehdr.count;
1521 blk->hashval = be32_to_cpu(btree[max - 1].hashval);
1522
1523 /*
1524 * Binary search. (note: small blocks will skip loop)
1525 */
1526 probe = span = max / 2;
1527 hashval = args->hashval;
1528 while (span > 4) {
1529 span /= 2;
1530 btreehashval = be32_to_cpu(btree[probe].hashval);
1531 if (btreehashval < hashval)
1532 probe += span;
1533 else if (btreehashval > hashval)
1534 probe -= span;
1535 else
1536 break;
1537 }
1538 ASSERT((probe >= 0) && (probe < max));
1539 ASSERT((span <= 4) ||
1540 (be32_to_cpu(btree[probe].hashval) == hashval));
1541
1542 /*
1543 * Since we may have duplicate hashval's, find the first
1544 * matching hashval in the node.
1545 */
1546 while (probe > 0 &&
1547 be32_to_cpu(btree[probe].hashval) >= hashval) {
1548 probe--;
1549 }
1550 while (probe < max &&
1551 be32_to_cpu(btree[probe].hashval) < hashval) {
1552 probe++;
1553 }
1554
1555 /*
1556 * Pick the right block to descend on.
1557 */
1558 if (probe == max) {
1559 blk->index = max - 1;
1560 blkno = be32_to_cpu(btree[max - 1].before);
1561 } else {
1562 blk->index = probe;
1563 blkno = be32_to_cpu(btree[probe].before);
1564 }
1565 }
1566
1567 /*
1568 * A leaf block that ends in the hashval that we are interested in
1569 * (final hashval == search hashval) means that the next block may
1570 * contain more entries with the same hashval, shift upward to the
1571 * next leaf and keep searching.
1572 */
1573 for (;;) {
1574 if (blk->magic == XFS_DIR2_LEAFN_MAGIC) {
1575 retval = xfs_dir2_leafn_lookup_int(blk->bp, args,
1576 &blk->index, state);
1577 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1578 retval = xfs_attr3_leaf_lookup_int(blk->bp, args);
1579 blk->index = args->index;
1580 args->blkno = blk->blkno;
1581 } else {
1582 ASSERT(0);
1583 return -EFSCORRUPTED;
1584 }
1585 if (((retval == -ENOENT) || (retval == -ENOATTR)) &&
1586 (blk->hashval == args->hashval)) {
1587 error = xfs_da3_path_shift(state, &state->path, 1, 1,
1588 &retval);
1589 if (error)
1590 return error;
1591 if (retval == 0) {
1592 continue;
1593 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1594 /* path_shift() gives ENOENT */
1595 retval = -ENOATTR;
1596 }
1597 }
1598 break;
1599 }
1600 *result = retval;
1601 return 0;
1602}
1603
1604/*========================================================================
1605 * Utility routines.
1606 *========================================================================*/
1607
1608/*
1609 * Compare two intermediate nodes for "order".
1610 */
1611STATIC int
1612xfs_da3_node_order(
1613 struct xfs_inode *dp,
1614 struct xfs_buf *node1_bp,
1615 struct xfs_buf *node2_bp)
1616{
1617 struct xfs_da_intnode *node1;
1618 struct xfs_da_intnode *node2;
1619 struct xfs_da_node_entry *btree1;
1620 struct xfs_da_node_entry *btree2;
1621 struct xfs_da3_icnode_hdr node1hdr;
1622 struct xfs_da3_icnode_hdr node2hdr;
1623
1624 node1 = node1_bp->b_addr;
1625 node2 = node2_bp->b_addr;
1626 dp->d_ops->node_hdr_from_disk(&node1hdr, node1);
1627 dp->d_ops->node_hdr_from_disk(&node2hdr, node2);
1628 btree1 = dp->d_ops->node_tree_p(node1);
1629 btree2 = dp->d_ops->node_tree_p(node2);
1630
1631 if (node1hdr.count > 0 && node2hdr.count > 0 &&
1632 ((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) ||
1633 (be32_to_cpu(btree2[node2hdr.count - 1].hashval) <
1634 be32_to_cpu(btree1[node1hdr.count - 1].hashval)))) {
1635 return 1;
1636 }
1637 return 0;
1638}
1639
1640/*
1641 * Link a new block into a doubly linked list of blocks (of whatever type).
1642 */
1643int /* error */
1644xfs_da3_blk_link(
1645 struct xfs_da_state *state,
1646 struct xfs_da_state_blk *old_blk,
1647 struct xfs_da_state_blk *new_blk)
1648{
1649 struct xfs_da_blkinfo *old_info;
1650 struct xfs_da_blkinfo *new_info;
1651 struct xfs_da_blkinfo *tmp_info;
1652 struct xfs_da_args *args;
1653 struct xfs_buf *bp;
1654 int before = 0;
1655 int error;
1656 struct xfs_inode *dp = state->args->dp;
1657
1658 /*
1659 * Set up environment.
1660 */
1661 args = state->args;
1662 ASSERT(args != NULL);
1663 old_info = old_blk->bp->b_addr;
1664 new_info = new_blk->bp->b_addr;
1665 ASSERT(old_blk->magic == XFS_DA_NODE_MAGIC ||
1666 old_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1667 old_blk->magic == XFS_ATTR_LEAF_MAGIC);
1668
1669 switch (old_blk->magic) {
1670 case XFS_ATTR_LEAF_MAGIC:
1671 before = xfs_attr_leaf_order(old_blk->bp, new_blk->bp);
1672 break;
1673 case XFS_DIR2_LEAFN_MAGIC:
1674 before = xfs_dir2_leafn_order(dp, old_blk->bp, new_blk->bp);
1675 break;
1676 case XFS_DA_NODE_MAGIC:
1677 before = xfs_da3_node_order(dp, old_blk->bp, new_blk->bp);
1678 break;
1679 }
1680
1681 /*
1682 * Link blocks in appropriate order.
1683 */
1684 if (before) {
1685 /*
1686 * Link new block in before existing block.
1687 */
1688 trace_xfs_da_link_before(args);
1689 new_info->forw = cpu_to_be32(old_blk->blkno);
1690 new_info->back = old_info->back;
1691 if (old_info->back) {
1692 error = xfs_da3_node_read(args->trans, dp,
1693 be32_to_cpu(old_info->back),
1694 -1, &bp, args->whichfork);
1695 if (error)
1696 return error;
1697 ASSERT(bp != NULL);
1698 tmp_info = bp->b_addr;
1699 ASSERT(tmp_info->magic == old_info->magic);
1700 ASSERT(be32_to_cpu(tmp_info->forw) == old_blk->blkno);
1701 tmp_info->forw = cpu_to_be32(new_blk->blkno);
1702 xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
1703 }
1704 old_info->back = cpu_to_be32(new_blk->blkno);
1705 } else {
1706 /*
1707 * Link new block in after existing block.
1708 */
1709 trace_xfs_da_link_after(args);
1710 new_info->forw = old_info->forw;
1711 new_info->back = cpu_to_be32(old_blk->blkno);
1712 if (old_info->forw) {
1713 error = xfs_da3_node_read(args->trans, dp,
1714 be32_to_cpu(old_info->forw),
1715 -1, &bp, args->whichfork);
1716 if (error)
1717 return error;
1718 ASSERT(bp != NULL);
1719 tmp_info = bp->b_addr;
1720 ASSERT(tmp_info->magic == old_info->magic);
1721 ASSERT(be32_to_cpu(tmp_info->back) == old_blk->blkno);
1722 tmp_info->back = cpu_to_be32(new_blk->blkno);
1723 xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
1724 }
1725 old_info->forw = cpu_to_be32(new_blk->blkno);
1726 }
1727
1728 xfs_trans_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1);
1729 xfs_trans_log_buf(args->trans, new_blk->bp, 0, sizeof(*tmp_info) - 1);
1730 return 0;
1731}
1732
1733/*
1734 * Unlink a block from a doubly linked list of blocks.
1735 */
1736STATIC int /* error */
1737xfs_da3_blk_unlink(
1738 struct xfs_da_state *state,
1739 struct xfs_da_state_blk *drop_blk,
1740 struct xfs_da_state_blk *save_blk)
1741{
1742 struct xfs_da_blkinfo *drop_info;
1743 struct xfs_da_blkinfo *save_info;
1744 struct xfs_da_blkinfo *tmp_info;
1745 struct xfs_da_args *args;
1746 struct xfs_buf *bp;
1747 int error;
1748
1749 /*
1750 * Set up environment.
1751 */
1752 args = state->args;
1753 ASSERT(args != NULL);
1754 save_info = save_blk->bp->b_addr;
1755 drop_info = drop_blk->bp->b_addr;
1756 ASSERT(save_blk->magic == XFS_DA_NODE_MAGIC ||
1757 save_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1758 save_blk->magic == XFS_ATTR_LEAF_MAGIC);
1759 ASSERT(save_blk->magic == drop_blk->magic);
1760 ASSERT((be32_to_cpu(save_info->forw) == drop_blk->blkno) ||
1761 (be32_to_cpu(save_info->back) == drop_blk->blkno));
1762 ASSERT((be32_to_cpu(drop_info->forw) == save_blk->blkno) ||
1763 (be32_to_cpu(drop_info->back) == save_blk->blkno));
1764
1765 /*
1766 * Unlink the leaf block from the doubly linked chain of leaves.
1767 */
1768 if (be32_to_cpu(save_info->back) == drop_blk->blkno) {
1769 trace_xfs_da_unlink_back(args);
1770 save_info->back = drop_info->back;
1771 if (drop_info->back) {
1772 error = xfs_da3_node_read(args->trans, args->dp,
1773 be32_to_cpu(drop_info->back),
1774 -1, &bp, args->whichfork);
1775 if (error)
1776 return error;
1777 ASSERT(bp != NULL);
1778 tmp_info = bp->b_addr;
1779 ASSERT(tmp_info->magic == save_info->magic);
1780 ASSERT(be32_to_cpu(tmp_info->forw) == drop_blk->blkno);
1781 tmp_info->forw = cpu_to_be32(save_blk->blkno);
1782 xfs_trans_log_buf(args->trans, bp, 0,
1783 sizeof(*tmp_info) - 1);
1784 }
1785 } else {
1786 trace_xfs_da_unlink_forward(args);
1787 save_info->forw = drop_info->forw;
1788 if (drop_info->forw) {
1789 error = xfs_da3_node_read(args->trans, args->dp,
1790 be32_to_cpu(drop_info->forw),
1791 -1, &bp, args->whichfork);
1792 if (error)
1793 return error;
1794 ASSERT(bp != NULL);
1795 tmp_info = bp->b_addr;
1796 ASSERT(tmp_info->magic == save_info->magic);
1797 ASSERT(be32_to_cpu(tmp_info->back) == drop_blk->blkno);
1798 tmp_info->back = cpu_to_be32(save_blk->blkno);
1799 xfs_trans_log_buf(args->trans, bp, 0,
1800 sizeof(*tmp_info) - 1);
1801 }
1802 }
1803
1804 xfs_trans_log_buf(args->trans, save_blk->bp, 0, sizeof(*save_info) - 1);
1805 return 0;
1806}
1807
1808/*
1809 * Move a path "forward" or "!forward" one block at the current level.
1810 *
1811 * This routine will adjust a "path" to point to the next block
1812 * "forward" (higher hashvalues) or "!forward" (lower hashvals) in the
1813 * Btree, including updating pointers to the intermediate nodes between
1814 * the new bottom and the root.
1815 */
1816int /* error */
1817xfs_da3_path_shift(
1818 struct xfs_da_state *state,
1819 struct xfs_da_state_path *path,
1820 int forward,
1821 int release,
1822 int *result)
1823{
1824 struct xfs_da_state_blk *blk;
1825 struct xfs_da_blkinfo *info;
1826 struct xfs_da_intnode *node;
1827 struct xfs_da_args *args;
1828 struct xfs_da_node_entry *btree;
1829 struct xfs_da3_icnode_hdr nodehdr;
1830 struct xfs_buf *bp;
1831 xfs_dablk_t blkno = 0;
1832 int level;
1833 int error;
1834 struct xfs_inode *dp = state->args->dp;
1835
1836 trace_xfs_da_path_shift(state->args);
1837
1838 /*
1839 * Roll up the Btree looking for the first block where our
1840 * current index is not at the edge of the block. Note that
1841 * we skip the bottom layer because we want the sibling block.
1842 */
1843 args = state->args;
1844 ASSERT(args != NULL);
1845 ASSERT(path != NULL);
1846 ASSERT((path->active > 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
1847 level = (path->active-1) - 1; /* skip bottom layer in path */
1848 for (blk = &path->blk[level]; level >= 0; blk--, level--) {
1849 node = blk->bp->b_addr;
1850 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1851 btree = dp->d_ops->node_tree_p(node);
1852
1853 if (forward && (blk->index < nodehdr.count - 1)) {
1854 blk->index++;
1855 blkno = be32_to_cpu(btree[blk->index].before);
1856 break;
1857 } else if (!forward && (blk->index > 0)) {
1858 blk->index--;
1859 blkno = be32_to_cpu(btree[blk->index].before);
1860 break;
1861 }
1862 }
1863 if (level < 0) {
1864 *result = -ENOENT; /* we're out of our tree */
1865 ASSERT(args->op_flags & XFS_DA_OP_OKNOENT);
1866 return 0;
1867 }
1868
1869 /*
1870 * Roll down the edge of the subtree until we reach the
1871 * same depth we were at originally.
1872 */
1873 for (blk++, level++; level < path->active; blk++, level++) {
1874 /*
1875 * Read the next child block into a local buffer.
1876 */
1877 error = xfs_da3_node_read(args->trans, dp, blkno, -1, &bp,
1878 args->whichfork);
1879 if (error)
1880 return error;
1881
1882 /*
1883 * Release the old block (if it's dirty, the trans doesn't
1884 * actually let go) and swap the local buffer into the path
1885 * structure. This ensures failure of the above read doesn't set
1886 * a NULL buffer in an active slot in the path.
1887 */
1888 if (release)
1889 xfs_trans_brelse(args->trans, blk->bp);
1890 blk->blkno = blkno;
1891 blk->bp = bp;
1892
1893 info = blk->bp->b_addr;
1894 ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
1895 info->magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) ||
1896 info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
1897 info->magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC) ||
1898 info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) ||
1899 info->magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC));
1900
1901
1902 /*
1903 * Note: we flatten the magic number to a single type so we
1904 * don't have to compare against crc/non-crc types elsewhere.
1905 */
1906 switch (be16_to_cpu(info->magic)) {
1907 case XFS_DA_NODE_MAGIC:
1908 case XFS_DA3_NODE_MAGIC:
1909 blk->magic = XFS_DA_NODE_MAGIC;
1910 node = (xfs_da_intnode_t *)info;
1911 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1912 btree = dp->d_ops->node_tree_p(node);
1913 blk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval);
1914 if (forward)
1915 blk->index = 0;
1916 else
1917 blk->index = nodehdr.count - 1;
1918 blkno = be32_to_cpu(btree[blk->index].before);
1919 break;
1920 case XFS_ATTR_LEAF_MAGIC:
1921 case XFS_ATTR3_LEAF_MAGIC:
1922 blk->magic = XFS_ATTR_LEAF_MAGIC;
1923 ASSERT(level == path->active-1);
1924 blk->index = 0;
1925 blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
1926 break;
1927 case XFS_DIR2_LEAFN_MAGIC:
1928 case XFS_DIR3_LEAFN_MAGIC:
1929 blk->magic = XFS_DIR2_LEAFN_MAGIC;
1930 ASSERT(level == path->active-1);
1931 blk->index = 0;
1932 blk->hashval = xfs_dir2_leafn_lasthash(args->dp,
1933 blk->bp, NULL);
1934 break;
1935 default:
1936 ASSERT(0);
1937 break;
1938 }
1939 }
1940 *result = 0;
1941 return 0;
1942}
1943
1944
1945/*========================================================================
1946 * Utility routines.
1947 *========================================================================*/
1948
1949/*
1950 * Implement a simple hash on a character string.
1951 * Rotate the hash value by 7 bits, then XOR each character in.
1952 * This is implemented with some source-level loop unrolling.
1953 */
1954xfs_dahash_t
1955xfs_da_hashname(const __uint8_t *name, int namelen)
1956{
1957 xfs_dahash_t hash;
1958
1959 /*
1960 * Do four characters at a time as long as we can.
1961 */
1962 for (hash = 0; namelen >= 4; namelen -= 4, name += 4)
1963 hash = (name[0] << 21) ^ (name[1] << 14) ^ (name[2] << 7) ^
1964 (name[3] << 0) ^ rol32(hash, 7 * 4);
1965
1966 /*
1967 * Now do the rest of the characters.
1968 */
1969 switch (namelen) {
1970 case 3:
1971 return (name[0] << 14) ^ (name[1] << 7) ^ (name[2] << 0) ^
1972 rol32(hash, 7 * 3);
1973 case 2:
1974 return (name[0] << 7) ^ (name[1] << 0) ^ rol32(hash, 7 * 2);
1975 case 1:
1976 return (name[0] << 0) ^ rol32(hash, 7 * 1);
1977 default: /* case 0: */
1978 return hash;
1979 }
1980}
1981
1982enum xfs_dacmp
1983xfs_da_compname(
1984 struct xfs_da_args *args,
1985 const unsigned char *name,
1986 int len)
1987{
1988 return (args->namelen == len && memcmp(args->name, name, len) == 0) ?
1989 XFS_CMP_EXACT : XFS_CMP_DIFFERENT;
1990}
1991
1992static xfs_dahash_t
1993xfs_default_hashname(
1994 struct xfs_name *name)
1995{
1996 return xfs_da_hashname(name->name, name->len);
1997}
1998
1999const struct xfs_nameops xfs_default_nameops = {
2000 .hashname = xfs_default_hashname,
2001 .compname = xfs_da_compname
2002};
2003
2004int
2005xfs_da_grow_inode_int(
2006 struct xfs_da_args *args,
2007 xfs_fileoff_t *bno,
2008 int count)
2009{
2010 struct xfs_trans *tp = args->trans;
2011 struct xfs_inode *dp = args->dp;
2012 int w = args->whichfork;
2013 xfs_rfsblock_t nblks = dp->i_d.di_nblocks;
2014 struct xfs_bmbt_irec map, *mapp;
2015 int nmap, error, got, i, mapi;
2016
2017 /*
2018 * Find a spot in the file space to put the new block.
2019 */
2020 error = xfs_bmap_first_unused(tp, dp, count, bno, w);
2021 if (error)
2022 return error;
2023
2024 /*
2025 * Try mapping it in one filesystem block.
2026 */
2027 nmap = 1;
2028 ASSERT(args->firstblock != NULL);
2029 error = xfs_bmapi_write(tp, dp, *bno, count,
2030 xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA|XFS_BMAPI_CONTIG,
2031 args->firstblock, args->total, &map, &nmap,
2032 args->dfops);
2033 if (error)
2034 return error;
2035
2036 ASSERT(nmap <= 1);
2037 if (nmap == 1) {
2038 mapp = ↦
2039 mapi = 1;
2040 } else if (nmap == 0 && count > 1) {
2041 xfs_fileoff_t b;
2042 int c;
2043
2044 /*
2045 * If we didn't get it and the block might work if fragmented,
2046 * try without the CONTIG flag. Loop until we get it all.
2047 */
2048 mapp = kmem_alloc(sizeof(*mapp) * count, KM_SLEEP);
2049 for (b = *bno, mapi = 0; b < *bno + count; ) {
2050 nmap = MIN(XFS_BMAP_MAX_NMAP, count);
2051 c = (int)(*bno + count - b);
2052 error = xfs_bmapi_write(tp, dp, b, c,
2053 xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA,
2054 args->firstblock, args->total,
2055 &mapp[mapi], &nmap, args->dfops);
2056 if (error)
2057 goto out_free_map;
2058 if (nmap < 1)
2059 break;
2060 mapi += nmap;
2061 b = mapp[mapi - 1].br_startoff +
2062 mapp[mapi - 1].br_blockcount;
2063 }
2064 } else {
2065 mapi = 0;
2066 mapp = NULL;
2067 }
2068
2069 /*
2070 * Count the blocks we got, make sure it matches the total.
2071 */
2072 for (i = 0, got = 0; i < mapi; i++)
2073 got += mapp[i].br_blockcount;
2074 if (got != count || mapp[0].br_startoff != *bno ||
2075 mapp[mapi - 1].br_startoff + mapp[mapi - 1].br_blockcount !=
2076 *bno + count) {
2077 error = -ENOSPC;
2078 goto out_free_map;
2079 }
2080
2081 /* account for newly allocated blocks in reserved blocks total */
2082 args->total -= dp->i_d.di_nblocks - nblks;
2083
2084out_free_map:
2085 if (mapp != &map)
2086 kmem_free(mapp);
2087 return error;
2088}
2089
2090/*
2091 * Add a block to the btree ahead of the file.
2092 * Return the new block number to the caller.
2093 */
2094int
2095xfs_da_grow_inode(
2096 struct xfs_da_args *args,
2097 xfs_dablk_t *new_blkno)
2098{
2099 xfs_fileoff_t bno;
2100 int error;
2101
2102 trace_xfs_da_grow_inode(args);
2103
2104 bno = args->geo->leafblk;
2105 error = xfs_da_grow_inode_int(args, &bno, args->geo->fsbcount);
2106 if (!error)
2107 *new_blkno = (xfs_dablk_t)bno;
2108 return error;
2109}
2110
2111/*
2112 * Ick. We need to always be able to remove a btree block, even
2113 * if there's no space reservation because the filesystem is full.
2114 * This is called if xfs_bunmapi on a btree block fails due to ENOSPC.
2115 * It swaps the target block with the last block in the file. The
2116 * last block in the file can always be removed since it can't cause
2117 * a bmap btree split to do that.
2118 */
2119STATIC int
2120xfs_da3_swap_lastblock(
2121 struct xfs_da_args *args,
2122 xfs_dablk_t *dead_blknop,
2123 struct xfs_buf **dead_bufp)
2124{
2125 struct xfs_da_blkinfo *dead_info;
2126 struct xfs_da_blkinfo *sib_info;
2127 struct xfs_da_intnode *par_node;
2128 struct xfs_da_intnode *dead_node;
2129 struct xfs_dir2_leaf *dead_leaf2;
2130 struct xfs_da_node_entry *btree;
2131 struct xfs_da3_icnode_hdr par_hdr;
2132 struct xfs_inode *dp;
2133 struct xfs_trans *tp;
2134 struct xfs_mount *mp;
2135 struct xfs_buf *dead_buf;
2136 struct xfs_buf *last_buf;
2137 struct xfs_buf *sib_buf;
2138 struct xfs_buf *par_buf;
2139 xfs_dahash_t dead_hash;
2140 xfs_fileoff_t lastoff;
2141 xfs_dablk_t dead_blkno;
2142 xfs_dablk_t last_blkno;
2143 xfs_dablk_t sib_blkno;
2144 xfs_dablk_t par_blkno;
2145 int error;
2146 int w;
2147 int entno;
2148 int level;
2149 int dead_level;
2150
2151 trace_xfs_da_swap_lastblock(args);
2152
2153 dead_buf = *dead_bufp;
2154 dead_blkno = *dead_blknop;
2155 tp = args->trans;
2156 dp = args->dp;
2157 w = args->whichfork;
2158 ASSERT(w == XFS_DATA_FORK);
2159 mp = dp->i_mount;
2160 lastoff = args->geo->freeblk;
2161 error = xfs_bmap_last_before(tp, dp, &lastoff, w);
2162 if (error)
2163 return error;
2164 if (unlikely(lastoff == 0)) {
2165 XFS_ERROR_REPORT("xfs_da_swap_lastblock(1)", XFS_ERRLEVEL_LOW,
2166 mp);
2167 return -EFSCORRUPTED;
2168 }
2169 /*
2170 * Read the last block in the btree space.
2171 */
2172 last_blkno = (xfs_dablk_t)lastoff - args->geo->fsbcount;
2173 error = xfs_da3_node_read(tp, dp, last_blkno, -1, &last_buf, w);
2174 if (error)
2175 return error;
2176 /*
2177 * Copy the last block into the dead buffer and log it.
2178 */
2179 memcpy(dead_buf->b_addr, last_buf->b_addr, args->geo->blksize);
2180 xfs_trans_log_buf(tp, dead_buf, 0, args->geo->blksize - 1);
2181 dead_info = dead_buf->b_addr;
2182 /*
2183 * Get values from the moved block.
2184 */
2185 if (dead_info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
2186 dead_info->magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
2187 struct xfs_dir3_icleaf_hdr leafhdr;
2188 struct xfs_dir2_leaf_entry *ents;
2189
2190 dead_leaf2 = (xfs_dir2_leaf_t *)dead_info;
2191 dp->d_ops->leaf_hdr_from_disk(&leafhdr, dead_leaf2);
2192 ents = dp->d_ops->leaf_ents_p(dead_leaf2);
2193 dead_level = 0;
2194 dead_hash = be32_to_cpu(ents[leafhdr.count - 1].hashval);
2195 } else {
2196 struct xfs_da3_icnode_hdr deadhdr;
2197
2198 dead_node = (xfs_da_intnode_t *)dead_info;
2199 dp->d_ops->node_hdr_from_disk(&deadhdr, dead_node);
2200 btree = dp->d_ops->node_tree_p(dead_node);
2201 dead_level = deadhdr.level;
2202 dead_hash = be32_to_cpu(btree[deadhdr.count - 1].hashval);
2203 }
2204 sib_buf = par_buf = NULL;
2205 /*
2206 * If the moved block has a left sibling, fix up the pointers.
2207 */
2208 if ((sib_blkno = be32_to_cpu(dead_info->back))) {
2209 error = xfs_da3_node_read(tp, dp, sib_blkno, -1, &sib_buf, w);
2210 if (error)
2211 goto done;
2212 sib_info = sib_buf->b_addr;
2213 if (unlikely(
2214 be32_to_cpu(sib_info->forw) != last_blkno ||
2215 sib_info->magic != dead_info->magic)) {
2216 XFS_ERROR_REPORT("xfs_da_swap_lastblock(2)",
2217 XFS_ERRLEVEL_LOW, mp);
2218 error = -EFSCORRUPTED;
2219 goto done;
2220 }
2221 sib_info->forw = cpu_to_be32(dead_blkno);
2222 xfs_trans_log_buf(tp, sib_buf,
2223 XFS_DA_LOGRANGE(sib_info, &sib_info->forw,
2224 sizeof(sib_info->forw)));
2225 sib_buf = NULL;
2226 }
2227 /*
2228 * If the moved block has a right sibling, fix up the pointers.
2229 */
2230 if ((sib_blkno = be32_to_cpu(dead_info->forw))) {
2231 error = xfs_da3_node_read(tp, dp, sib_blkno, -1, &sib_buf, w);
2232 if (error)
2233 goto done;
2234 sib_info = sib_buf->b_addr;
2235 if (unlikely(
2236 be32_to_cpu(sib_info->back) != last_blkno ||
2237 sib_info->magic != dead_info->magic)) {
2238 XFS_ERROR_REPORT("xfs_da_swap_lastblock(3)",
2239 XFS_ERRLEVEL_LOW, mp);
2240 error = -EFSCORRUPTED;
2241 goto done;
2242 }
2243 sib_info->back = cpu_to_be32(dead_blkno);
2244 xfs_trans_log_buf(tp, sib_buf,
2245 XFS_DA_LOGRANGE(sib_info, &sib_info->back,
2246 sizeof(sib_info->back)));
2247 sib_buf = NULL;
2248 }
2249 par_blkno = args->geo->leafblk;
2250 level = -1;
2251 /*
2252 * Walk down the tree looking for the parent of the moved block.
2253 */
2254 for (;;) {
2255 error = xfs_da3_node_read(tp, dp, par_blkno, -1, &par_buf, w);
2256 if (error)
2257 goto done;
2258 par_node = par_buf->b_addr;
2259 dp->d_ops->node_hdr_from_disk(&par_hdr, par_node);
2260 if (level >= 0 && level != par_hdr.level + 1) {
2261 XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)",
2262 XFS_ERRLEVEL_LOW, mp);
2263 error = -EFSCORRUPTED;
2264 goto done;
2265 }
2266 level = par_hdr.level;
2267 btree = dp->d_ops->node_tree_p(par_node);
2268 for (entno = 0;
2269 entno < par_hdr.count &&
2270 be32_to_cpu(btree[entno].hashval) < dead_hash;
2271 entno++)
2272 continue;
2273 if (entno == par_hdr.count) {
2274 XFS_ERROR_REPORT("xfs_da_swap_lastblock(5)",
2275 XFS_ERRLEVEL_LOW, mp);
2276 error = -EFSCORRUPTED;
2277 goto done;
2278 }
2279 par_blkno = be32_to_cpu(btree[entno].before);
2280 if (level == dead_level + 1)
2281 break;
2282 xfs_trans_brelse(tp, par_buf);
2283 par_buf = NULL;
2284 }
2285 /*
2286 * We're in the right parent block.
2287 * Look for the right entry.
2288 */
2289 for (;;) {
2290 for (;
2291 entno < par_hdr.count &&
2292 be32_to_cpu(btree[entno].before) != last_blkno;
2293 entno++)
2294 continue;
2295 if (entno < par_hdr.count)
2296 break;
2297 par_blkno = par_hdr.forw;
2298 xfs_trans_brelse(tp, par_buf);
2299 par_buf = NULL;
2300 if (unlikely(par_blkno == 0)) {
2301 XFS_ERROR_REPORT("xfs_da_swap_lastblock(6)",
2302 XFS_ERRLEVEL_LOW, mp);
2303 error = -EFSCORRUPTED;
2304 goto done;
2305 }
2306 error = xfs_da3_node_read(tp, dp, par_blkno, -1, &par_buf, w);
2307 if (error)
2308 goto done;
2309 par_node = par_buf->b_addr;
2310 dp->d_ops->node_hdr_from_disk(&par_hdr, par_node);
2311 if (par_hdr.level != level) {
2312 XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)",
2313 XFS_ERRLEVEL_LOW, mp);
2314 error = -EFSCORRUPTED;
2315 goto done;
2316 }
2317 btree = dp->d_ops->node_tree_p(par_node);
2318 entno = 0;
2319 }
2320 /*
2321 * Update the parent entry pointing to the moved block.
2322 */
2323 btree[entno].before = cpu_to_be32(dead_blkno);
2324 xfs_trans_log_buf(tp, par_buf,
2325 XFS_DA_LOGRANGE(par_node, &btree[entno].before,
2326 sizeof(btree[entno].before)));
2327 *dead_blknop = last_blkno;
2328 *dead_bufp = last_buf;
2329 return 0;
2330done:
2331 if (par_buf)
2332 xfs_trans_brelse(tp, par_buf);
2333 if (sib_buf)
2334 xfs_trans_brelse(tp, sib_buf);
2335 xfs_trans_brelse(tp, last_buf);
2336 return error;
2337}
2338
2339/*
2340 * Remove a btree block from a directory or attribute.
2341 */
2342int
2343xfs_da_shrink_inode(
2344 xfs_da_args_t *args,
2345 xfs_dablk_t dead_blkno,
2346 struct xfs_buf *dead_buf)
2347{
2348 xfs_inode_t *dp;
2349 int done, error, w, count;
2350 xfs_trans_t *tp;
2351
2352 trace_xfs_da_shrink_inode(args);
2353
2354 dp = args->dp;
2355 w = args->whichfork;
2356 tp = args->trans;
2357 count = args->geo->fsbcount;
2358 for (;;) {
2359 /*
2360 * Remove extents. If we get ENOSPC for a dir we have to move
2361 * the last block to the place we want to kill.
2362 */
2363 error = xfs_bunmapi(tp, dp, dead_blkno, count,
2364 xfs_bmapi_aflag(w), 0, args->firstblock,
2365 args->dfops, &done);
2366 if (error == -ENOSPC) {
2367 if (w != XFS_DATA_FORK)
2368 break;
2369 error = xfs_da3_swap_lastblock(args, &dead_blkno,
2370 &dead_buf);
2371 if (error)
2372 break;
2373 } else {
2374 break;
2375 }
2376 }
2377 xfs_trans_binval(tp, dead_buf);
2378 return error;
2379}
2380
2381/*
2382 * See if the mapping(s) for this btree block are valid, i.e.
2383 * don't contain holes, are logically contiguous, and cover the whole range.
2384 */
2385STATIC int
2386xfs_da_map_covers_blocks(
2387 int nmap,
2388 xfs_bmbt_irec_t *mapp,
2389 xfs_dablk_t bno,
2390 int count)
2391{
2392 int i;
2393 xfs_fileoff_t off;
2394
2395 for (i = 0, off = bno; i < nmap; i++) {
2396 if (mapp[i].br_startblock == HOLESTARTBLOCK ||
2397 mapp[i].br_startblock == DELAYSTARTBLOCK) {
2398 return 0;
2399 }
2400 if (off != mapp[i].br_startoff) {
2401 return 0;
2402 }
2403 off += mapp[i].br_blockcount;
2404 }
2405 return off == bno + count;
2406}
2407
2408/*
2409 * Convert a struct xfs_bmbt_irec to a struct xfs_buf_map.
2410 *
2411 * For the single map case, it is assumed that the caller has provided a pointer
2412 * to a valid xfs_buf_map. For the multiple map case, this function will
2413 * allocate the xfs_buf_map to hold all the maps and replace the caller's single
2414 * map pointer with the allocated map.
2415 */
2416static int
2417xfs_buf_map_from_irec(
2418 struct xfs_mount *mp,
2419 struct xfs_buf_map **mapp,
2420 int *nmaps,
2421 struct xfs_bmbt_irec *irecs,
2422 int nirecs)
2423{
2424 struct xfs_buf_map *map;
2425 int i;
2426
2427 ASSERT(*nmaps == 1);
2428 ASSERT(nirecs >= 1);
2429
2430 if (nirecs > 1) {
2431 map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map),
2432 KM_SLEEP | KM_NOFS);
2433 if (!map)
2434 return -ENOMEM;
2435 *mapp = map;
2436 }
2437
2438 *nmaps = nirecs;
2439 map = *mapp;
2440 for (i = 0; i < *nmaps; i++) {
2441 ASSERT(irecs[i].br_startblock != DELAYSTARTBLOCK &&
2442 irecs[i].br_startblock != HOLESTARTBLOCK);
2443 map[i].bm_bn = XFS_FSB_TO_DADDR(mp, irecs[i].br_startblock);
2444 map[i].bm_len = XFS_FSB_TO_BB(mp, irecs[i].br_blockcount);
2445 }
2446 return 0;
2447}
2448
2449/*
2450 * Map the block we are given ready for reading. There are three possible return
2451 * values:
2452 * -1 - will be returned if we land in a hole and mappedbno == -2 so the
2453 * caller knows not to execute a subsequent read.
2454 * 0 - if we mapped the block successfully
2455 * >0 - positive error number if there was an error.
2456 */
2457static int
2458xfs_dabuf_map(
2459 struct xfs_inode *dp,
2460 xfs_dablk_t bno,
2461 xfs_daddr_t mappedbno,
2462 int whichfork,
2463 struct xfs_buf_map **map,
2464 int *nmaps)
2465{
2466 struct xfs_mount *mp = dp->i_mount;
2467 int nfsb;
2468 int error = 0;
2469 struct xfs_bmbt_irec irec;
2470 struct xfs_bmbt_irec *irecs = &irec;
2471 int nirecs;
2472
2473 ASSERT(map && *map);
2474 ASSERT(*nmaps == 1);
2475
2476 if (whichfork == XFS_DATA_FORK)
2477 nfsb = mp->m_dir_geo->fsbcount;
2478 else
2479 nfsb = mp->m_attr_geo->fsbcount;
2480
2481 /*
2482 * Caller doesn't have a mapping. -2 means don't complain
2483 * if we land in a hole.
2484 */
2485 if (mappedbno == -1 || mappedbno == -2) {
2486 /*
2487 * Optimize the one-block case.
2488 */
2489 if (nfsb != 1)
2490 irecs = kmem_zalloc(sizeof(irec) * nfsb,
2491 KM_SLEEP | KM_NOFS);
2492
2493 nirecs = nfsb;
2494 error = xfs_bmapi_read(dp, (xfs_fileoff_t)bno, nfsb, irecs,
2495 &nirecs, xfs_bmapi_aflag(whichfork));
2496 if (error)
2497 goto out;
2498 } else {
2499 irecs->br_startblock = XFS_DADDR_TO_FSB(mp, mappedbno);
2500 irecs->br_startoff = (xfs_fileoff_t)bno;
2501 irecs->br_blockcount = nfsb;
2502 irecs->br_state = 0;
2503 nirecs = 1;
2504 }
2505
2506 if (!xfs_da_map_covers_blocks(nirecs, irecs, bno, nfsb)) {
2507 error = mappedbno == -2 ? -1 : -EFSCORRUPTED;
2508 if (unlikely(error == -EFSCORRUPTED)) {
2509 if (xfs_error_level >= XFS_ERRLEVEL_LOW) {
2510 int i;
2511 xfs_alert(mp, "%s: bno %lld dir: inode %lld",
2512 __func__, (long long)bno,
2513 (long long)dp->i_ino);
2514 for (i = 0; i < *nmaps; i++) {
2515 xfs_alert(mp,
2516"[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d",
2517 i,
2518 (long long)irecs[i].br_startoff,
2519 (long long)irecs[i].br_startblock,
2520 (long long)irecs[i].br_blockcount,
2521 irecs[i].br_state);
2522 }
2523 }
2524 XFS_ERROR_REPORT("xfs_da_do_buf(1)",
2525 XFS_ERRLEVEL_LOW, mp);
2526 }
2527 goto out;
2528 }
2529 error = xfs_buf_map_from_irec(mp, map, nmaps, irecs, nirecs);
2530out:
2531 if (irecs != &irec)
2532 kmem_free(irecs);
2533 return error;
2534}
2535
2536/*
2537 * Get a buffer for the dir/attr block.
2538 */
2539int
2540xfs_da_get_buf(
2541 struct xfs_trans *trans,
2542 struct xfs_inode *dp,
2543 xfs_dablk_t bno,
2544 xfs_daddr_t mappedbno,
2545 struct xfs_buf **bpp,
2546 int whichfork)
2547{
2548 struct xfs_buf *bp;
2549 struct xfs_buf_map map;
2550 struct xfs_buf_map *mapp;
2551 int nmap;
2552 int error;
2553
2554 *bpp = NULL;
2555 mapp = ↦
2556 nmap = 1;
2557 error = xfs_dabuf_map(dp, bno, mappedbno, whichfork,
2558 &mapp, &nmap);
2559 if (error) {
2560 /* mapping a hole is not an error, but we don't continue */
2561 if (error == -1)
2562 error = 0;
2563 goto out_free;
2564 }
2565
2566 bp = xfs_trans_get_buf_map(trans, dp->i_mount->m_ddev_targp,
2567 mapp, nmap, 0);
2568 error = bp ? bp->b_error : -EIO;
2569 if (error) {
2570 if (bp)
2571 xfs_trans_brelse(trans, bp);
2572 goto out_free;
2573 }
2574
2575 *bpp = bp;
2576
2577out_free:
2578 if (mapp != &map)
2579 kmem_free(mapp);
2580
2581 return error;
2582}
2583
2584/*
2585 * Get a buffer for the dir/attr block, fill in the contents.
2586 */
2587int
2588xfs_da_read_buf(
2589 struct xfs_trans *trans,
2590 struct xfs_inode *dp,
2591 xfs_dablk_t bno,
2592 xfs_daddr_t mappedbno,
2593 struct xfs_buf **bpp,
2594 int whichfork,
2595 const struct xfs_buf_ops *ops)
2596{
2597 struct xfs_buf *bp;
2598 struct xfs_buf_map map;
2599 struct xfs_buf_map *mapp;
2600 int nmap;
2601 int error;
2602
2603 *bpp = NULL;
2604 mapp = ↦
2605 nmap = 1;
2606 error = xfs_dabuf_map(dp, bno, mappedbno, whichfork,
2607 &mapp, &nmap);
2608 if (error) {
2609 /* mapping a hole is not an error, but we don't continue */
2610 if (error == -1)
2611 error = 0;
2612 goto out_free;
2613 }
2614
2615 error = xfs_trans_read_buf_map(dp->i_mount, trans,
2616 dp->i_mount->m_ddev_targp,
2617 mapp, nmap, 0, &bp, ops);
2618 if (error)
2619 goto out_free;
2620
2621 if (whichfork == XFS_ATTR_FORK)
2622 xfs_buf_set_ref(bp, XFS_ATTR_BTREE_REF);
2623 else
2624 xfs_buf_set_ref(bp, XFS_DIR_BTREE_REF);
2625 *bpp = bp;
2626out_free:
2627 if (mapp != &map)
2628 kmem_free(mapp);
2629
2630 return error;
2631}
2632
2633/*
2634 * Readahead the dir/attr block.
2635 */
2636int
2637xfs_da_reada_buf(
2638 struct xfs_inode *dp,
2639 xfs_dablk_t bno,
2640 xfs_daddr_t mappedbno,
2641 int whichfork,
2642 const struct xfs_buf_ops *ops)
2643{
2644 struct xfs_buf_map map;
2645 struct xfs_buf_map *mapp;
2646 int nmap;
2647 int error;
2648
2649 mapp = ↦
2650 nmap = 1;
2651 error = xfs_dabuf_map(dp, bno, mappedbno, whichfork,
2652 &mapp, &nmap);
2653 if (error) {
2654 /* mapping a hole is not an error, but we don't continue */
2655 if (error == -1)
2656 error = 0;
2657 goto out_free;
2658 }
2659
2660 mappedbno = mapp[0].bm_bn;
2661 xfs_buf_readahead_map(dp->i_mount->m_ddev_targp, mapp, nmap, ops);
2662
2663out_free:
2664 if (mapp != &map)
2665 kmem_free(mapp);
2666
2667 return error;
2668}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * Copyright (c) 2013 Red Hat, Inc.
5 * All Rights Reserved.
6 */
7#include "xfs.h"
8#include "xfs_fs.h"
9#include "xfs_shared.h"
10#include "xfs_format.h"
11#include "xfs_log_format.h"
12#include "xfs_trans_resv.h"
13#include "xfs_bit.h"
14#include "xfs_mount.h"
15#include "xfs_inode.h"
16#include "xfs_dir2.h"
17#include "xfs_dir2_priv.h"
18#include "xfs_trans.h"
19#include "xfs_bmap.h"
20#include "xfs_attr_leaf.h"
21#include "xfs_error.h"
22#include "xfs_trace.h"
23#include "xfs_buf_item.h"
24#include "xfs_log.h"
25#include "xfs_errortag.h"
26
27/*
28 * xfs_da_btree.c
29 *
30 * Routines to implement directories as Btrees of hashed names.
31 */
32
33/*========================================================================
34 * Function prototypes for the kernel.
35 *========================================================================*/
36
37/*
38 * Routines used for growing the Btree.
39 */
40STATIC int xfs_da3_root_split(xfs_da_state_t *state,
41 xfs_da_state_blk_t *existing_root,
42 xfs_da_state_blk_t *new_child);
43STATIC int xfs_da3_node_split(xfs_da_state_t *state,
44 xfs_da_state_blk_t *existing_blk,
45 xfs_da_state_blk_t *split_blk,
46 xfs_da_state_blk_t *blk_to_add,
47 int treelevel,
48 int *result);
49STATIC void xfs_da3_node_rebalance(xfs_da_state_t *state,
50 xfs_da_state_blk_t *node_blk_1,
51 xfs_da_state_blk_t *node_blk_2);
52STATIC void xfs_da3_node_add(xfs_da_state_t *state,
53 xfs_da_state_blk_t *old_node_blk,
54 xfs_da_state_blk_t *new_node_blk);
55
56/*
57 * Routines used for shrinking the Btree.
58 */
59STATIC int xfs_da3_root_join(xfs_da_state_t *state,
60 xfs_da_state_blk_t *root_blk);
61STATIC int xfs_da3_node_toosmall(xfs_da_state_t *state, int *retval);
62STATIC void xfs_da3_node_remove(xfs_da_state_t *state,
63 xfs_da_state_blk_t *drop_blk);
64STATIC void xfs_da3_node_unbalance(xfs_da_state_t *state,
65 xfs_da_state_blk_t *src_node_blk,
66 xfs_da_state_blk_t *dst_node_blk);
67
68/*
69 * Utility routines.
70 */
71STATIC int xfs_da3_blk_unlink(xfs_da_state_t *state,
72 xfs_da_state_blk_t *drop_blk,
73 xfs_da_state_blk_t *save_blk);
74
75
76struct kmem_cache *xfs_da_state_cache; /* anchor for dir/attr state */
77
78/*
79 * Allocate a dir-state structure.
80 * We don't put them on the stack since they're large.
81 */
82struct xfs_da_state *
83xfs_da_state_alloc(
84 struct xfs_da_args *args)
85{
86 struct xfs_da_state *state;
87
88 state = kmem_cache_zalloc(xfs_da_state_cache, GFP_NOFS | __GFP_NOFAIL);
89 state->args = args;
90 state->mp = args->dp->i_mount;
91 return state;
92}
93
94/*
95 * Kill the altpath contents of a da-state structure.
96 */
97STATIC void
98xfs_da_state_kill_altpath(xfs_da_state_t *state)
99{
100 int i;
101
102 for (i = 0; i < state->altpath.active; i++)
103 state->altpath.blk[i].bp = NULL;
104 state->altpath.active = 0;
105}
106
107/*
108 * Free a da-state structure.
109 */
110void
111xfs_da_state_free(xfs_da_state_t *state)
112{
113 xfs_da_state_kill_altpath(state);
114#ifdef DEBUG
115 memset((char *)state, 0, sizeof(*state));
116#endif /* DEBUG */
117 kmem_cache_free(xfs_da_state_cache, state);
118}
119
120void
121xfs_da_state_reset(
122 struct xfs_da_state *state,
123 struct xfs_da_args *args)
124{
125 xfs_da_state_kill_altpath(state);
126 memset(state, 0, sizeof(struct xfs_da_state));
127 state->args = args;
128 state->mp = state->args->dp->i_mount;
129}
130
131static inline int xfs_dabuf_nfsb(struct xfs_mount *mp, int whichfork)
132{
133 if (whichfork == XFS_DATA_FORK)
134 return mp->m_dir_geo->fsbcount;
135 return mp->m_attr_geo->fsbcount;
136}
137
138void
139xfs_da3_node_hdr_from_disk(
140 struct xfs_mount *mp,
141 struct xfs_da3_icnode_hdr *to,
142 struct xfs_da_intnode *from)
143{
144 if (xfs_has_crc(mp)) {
145 struct xfs_da3_intnode *from3 = (struct xfs_da3_intnode *)from;
146
147 to->forw = be32_to_cpu(from3->hdr.info.hdr.forw);
148 to->back = be32_to_cpu(from3->hdr.info.hdr.back);
149 to->magic = be16_to_cpu(from3->hdr.info.hdr.magic);
150 to->count = be16_to_cpu(from3->hdr.__count);
151 to->level = be16_to_cpu(from3->hdr.__level);
152 to->btree = from3->__btree;
153 ASSERT(to->magic == XFS_DA3_NODE_MAGIC);
154 } else {
155 to->forw = be32_to_cpu(from->hdr.info.forw);
156 to->back = be32_to_cpu(from->hdr.info.back);
157 to->magic = be16_to_cpu(from->hdr.info.magic);
158 to->count = be16_to_cpu(from->hdr.__count);
159 to->level = be16_to_cpu(from->hdr.__level);
160 to->btree = from->__btree;
161 ASSERT(to->magic == XFS_DA_NODE_MAGIC);
162 }
163}
164
165void
166xfs_da3_node_hdr_to_disk(
167 struct xfs_mount *mp,
168 struct xfs_da_intnode *to,
169 struct xfs_da3_icnode_hdr *from)
170{
171 if (xfs_has_crc(mp)) {
172 struct xfs_da3_intnode *to3 = (struct xfs_da3_intnode *)to;
173
174 ASSERT(from->magic == XFS_DA3_NODE_MAGIC);
175 to3->hdr.info.hdr.forw = cpu_to_be32(from->forw);
176 to3->hdr.info.hdr.back = cpu_to_be32(from->back);
177 to3->hdr.info.hdr.magic = cpu_to_be16(from->magic);
178 to3->hdr.__count = cpu_to_be16(from->count);
179 to3->hdr.__level = cpu_to_be16(from->level);
180 } else {
181 ASSERT(from->magic == XFS_DA_NODE_MAGIC);
182 to->hdr.info.forw = cpu_to_be32(from->forw);
183 to->hdr.info.back = cpu_to_be32(from->back);
184 to->hdr.info.magic = cpu_to_be16(from->magic);
185 to->hdr.__count = cpu_to_be16(from->count);
186 to->hdr.__level = cpu_to_be16(from->level);
187 }
188}
189
190/*
191 * Verify an xfs_da3_blkinfo structure. Note that the da3 fields are only
192 * accessible on v5 filesystems. This header format is common across da node,
193 * attr leaf and dir leaf blocks.
194 */
195xfs_failaddr_t
196xfs_da3_blkinfo_verify(
197 struct xfs_buf *bp,
198 struct xfs_da3_blkinfo *hdr3)
199{
200 struct xfs_mount *mp = bp->b_mount;
201 struct xfs_da_blkinfo *hdr = &hdr3->hdr;
202
203 if (!xfs_verify_magic16(bp, hdr->magic))
204 return __this_address;
205
206 if (xfs_has_crc(mp)) {
207 if (!uuid_equal(&hdr3->uuid, &mp->m_sb.sb_meta_uuid))
208 return __this_address;
209 if (be64_to_cpu(hdr3->blkno) != xfs_buf_daddr(bp))
210 return __this_address;
211 if (!xfs_log_check_lsn(mp, be64_to_cpu(hdr3->lsn)))
212 return __this_address;
213 }
214
215 return NULL;
216}
217
218static xfs_failaddr_t
219xfs_da3_node_verify(
220 struct xfs_buf *bp)
221{
222 struct xfs_mount *mp = bp->b_mount;
223 struct xfs_da_intnode *hdr = bp->b_addr;
224 struct xfs_da3_icnode_hdr ichdr;
225 xfs_failaddr_t fa;
226
227 xfs_da3_node_hdr_from_disk(mp, &ichdr, hdr);
228
229 fa = xfs_da3_blkinfo_verify(bp, bp->b_addr);
230 if (fa)
231 return fa;
232
233 if (ichdr.level == 0)
234 return __this_address;
235 if (ichdr.level > XFS_DA_NODE_MAXDEPTH)
236 return __this_address;
237 if (ichdr.count == 0)
238 return __this_address;
239
240 /*
241 * we don't know if the node is for and attribute or directory tree,
242 * so only fail if the count is outside both bounds
243 */
244 if (ichdr.count > mp->m_dir_geo->node_ents &&
245 ichdr.count > mp->m_attr_geo->node_ents)
246 return __this_address;
247
248 /* XXX: hash order check? */
249
250 return NULL;
251}
252
253static void
254xfs_da3_node_write_verify(
255 struct xfs_buf *bp)
256{
257 struct xfs_mount *mp = bp->b_mount;
258 struct xfs_buf_log_item *bip = bp->b_log_item;
259 struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
260 xfs_failaddr_t fa;
261
262 fa = xfs_da3_node_verify(bp);
263 if (fa) {
264 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
265 return;
266 }
267
268 if (!xfs_has_crc(mp))
269 return;
270
271 if (bip)
272 hdr3->info.lsn = cpu_to_be64(bip->bli_item.li_lsn);
273
274 xfs_buf_update_cksum(bp, XFS_DA3_NODE_CRC_OFF);
275}
276
277/*
278 * leaf/node format detection on trees is sketchy, so a node read can be done on
279 * leaf level blocks when detection identifies the tree as a node format tree
280 * incorrectly. In this case, we need to swap the verifier to match the correct
281 * format of the block being read.
282 */
283static void
284xfs_da3_node_read_verify(
285 struct xfs_buf *bp)
286{
287 struct xfs_da_blkinfo *info = bp->b_addr;
288 xfs_failaddr_t fa;
289
290 switch (be16_to_cpu(info->magic)) {
291 case XFS_DA3_NODE_MAGIC:
292 if (!xfs_buf_verify_cksum(bp, XFS_DA3_NODE_CRC_OFF)) {
293 xfs_verifier_error(bp, -EFSBADCRC,
294 __this_address);
295 break;
296 }
297 fallthrough;
298 case XFS_DA_NODE_MAGIC:
299 fa = xfs_da3_node_verify(bp);
300 if (fa)
301 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
302 return;
303 case XFS_ATTR_LEAF_MAGIC:
304 case XFS_ATTR3_LEAF_MAGIC:
305 bp->b_ops = &xfs_attr3_leaf_buf_ops;
306 bp->b_ops->verify_read(bp);
307 return;
308 case XFS_DIR2_LEAFN_MAGIC:
309 case XFS_DIR3_LEAFN_MAGIC:
310 bp->b_ops = &xfs_dir3_leafn_buf_ops;
311 bp->b_ops->verify_read(bp);
312 return;
313 default:
314 xfs_verifier_error(bp, -EFSCORRUPTED, __this_address);
315 break;
316 }
317}
318
319/* Verify the structure of a da3 block. */
320static xfs_failaddr_t
321xfs_da3_node_verify_struct(
322 struct xfs_buf *bp)
323{
324 struct xfs_da_blkinfo *info = bp->b_addr;
325
326 switch (be16_to_cpu(info->magic)) {
327 case XFS_DA3_NODE_MAGIC:
328 case XFS_DA_NODE_MAGIC:
329 return xfs_da3_node_verify(bp);
330 case XFS_ATTR_LEAF_MAGIC:
331 case XFS_ATTR3_LEAF_MAGIC:
332 bp->b_ops = &xfs_attr3_leaf_buf_ops;
333 return bp->b_ops->verify_struct(bp);
334 case XFS_DIR2_LEAFN_MAGIC:
335 case XFS_DIR3_LEAFN_MAGIC:
336 bp->b_ops = &xfs_dir3_leafn_buf_ops;
337 return bp->b_ops->verify_struct(bp);
338 default:
339 return __this_address;
340 }
341}
342
343const struct xfs_buf_ops xfs_da3_node_buf_ops = {
344 .name = "xfs_da3_node",
345 .magic16 = { cpu_to_be16(XFS_DA_NODE_MAGIC),
346 cpu_to_be16(XFS_DA3_NODE_MAGIC) },
347 .verify_read = xfs_da3_node_read_verify,
348 .verify_write = xfs_da3_node_write_verify,
349 .verify_struct = xfs_da3_node_verify_struct,
350};
351
352static int
353xfs_da3_node_set_type(
354 struct xfs_trans *tp,
355 struct xfs_buf *bp)
356{
357 struct xfs_da_blkinfo *info = bp->b_addr;
358
359 switch (be16_to_cpu(info->magic)) {
360 case XFS_DA_NODE_MAGIC:
361 case XFS_DA3_NODE_MAGIC:
362 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF);
363 return 0;
364 case XFS_ATTR_LEAF_MAGIC:
365 case XFS_ATTR3_LEAF_MAGIC:
366 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_ATTR_LEAF_BUF);
367 return 0;
368 case XFS_DIR2_LEAFN_MAGIC:
369 case XFS_DIR3_LEAFN_MAGIC:
370 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DIR_LEAFN_BUF);
371 return 0;
372 default:
373 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, tp->t_mountp,
374 info, sizeof(*info));
375 xfs_trans_brelse(tp, bp);
376 return -EFSCORRUPTED;
377 }
378}
379
380int
381xfs_da3_node_read(
382 struct xfs_trans *tp,
383 struct xfs_inode *dp,
384 xfs_dablk_t bno,
385 struct xfs_buf **bpp,
386 int whichfork)
387{
388 int error;
389
390 error = xfs_da_read_buf(tp, dp, bno, 0, bpp, whichfork,
391 &xfs_da3_node_buf_ops);
392 if (error || !*bpp || !tp)
393 return error;
394 return xfs_da3_node_set_type(tp, *bpp);
395}
396
397int
398xfs_da3_node_read_mapped(
399 struct xfs_trans *tp,
400 struct xfs_inode *dp,
401 xfs_daddr_t mappedbno,
402 struct xfs_buf **bpp,
403 int whichfork)
404{
405 struct xfs_mount *mp = dp->i_mount;
406 int error;
407
408 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, mappedbno,
409 XFS_FSB_TO_BB(mp, xfs_dabuf_nfsb(mp, whichfork)), 0,
410 bpp, &xfs_da3_node_buf_ops);
411 if (error || !*bpp)
412 return error;
413
414 if (whichfork == XFS_ATTR_FORK)
415 xfs_buf_set_ref(*bpp, XFS_ATTR_BTREE_REF);
416 else
417 xfs_buf_set_ref(*bpp, XFS_DIR_BTREE_REF);
418
419 if (!tp)
420 return 0;
421 return xfs_da3_node_set_type(tp, *bpp);
422}
423
424/*========================================================================
425 * Routines used for growing the Btree.
426 *========================================================================*/
427
428/*
429 * Create the initial contents of an intermediate node.
430 */
431int
432xfs_da3_node_create(
433 struct xfs_da_args *args,
434 xfs_dablk_t blkno,
435 int level,
436 struct xfs_buf **bpp,
437 int whichfork)
438{
439 struct xfs_da_intnode *node;
440 struct xfs_trans *tp = args->trans;
441 struct xfs_mount *mp = tp->t_mountp;
442 struct xfs_da3_icnode_hdr ichdr = {0};
443 struct xfs_buf *bp;
444 int error;
445 struct xfs_inode *dp = args->dp;
446
447 trace_xfs_da_node_create(args);
448 ASSERT(level <= XFS_DA_NODE_MAXDEPTH);
449
450 error = xfs_da_get_buf(tp, dp, blkno, &bp, whichfork);
451 if (error)
452 return error;
453 bp->b_ops = &xfs_da3_node_buf_ops;
454 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF);
455 node = bp->b_addr;
456
457 if (xfs_has_crc(mp)) {
458 struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
459
460 memset(hdr3, 0, sizeof(struct xfs_da3_node_hdr));
461 ichdr.magic = XFS_DA3_NODE_MAGIC;
462 hdr3->info.blkno = cpu_to_be64(xfs_buf_daddr(bp));
463 hdr3->info.owner = cpu_to_be64(args->dp->i_ino);
464 uuid_copy(&hdr3->info.uuid, &mp->m_sb.sb_meta_uuid);
465 } else {
466 ichdr.magic = XFS_DA_NODE_MAGIC;
467 }
468 ichdr.level = level;
469
470 xfs_da3_node_hdr_to_disk(dp->i_mount, node, &ichdr);
471 xfs_trans_log_buf(tp, bp,
472 XFS_DA_LOGRANGE(node, &node->hdr, args->geo->node_hdr_size));
473
474 *bpp = bp;
475 return 0;
476}
477
478/*
479 * Split a leaf node, rebalance, then possibly split
480 * intermediate nodes, rebalance, etc.
481 */
482int /* error */
483xfs_da3_split(
484 struct xfs_da_state *state)
485{
486 struct xfs_da_state_blk *oldblk;
487 struct xfs_da_state_blk *newblk;
488 struct xfs_da_state_blk *addblk;
489 struct xfs_da_intnode *node;
490 int max;
491 int action = 0;
492 int error;
493 int i;
494
495 trace_xfs_da_split(state->args);
496
497 if (XFS_TEST_ERROR(false, state->mp, XFS_ERRTAG_DA_LEAF_SPLIT))
498 return -EIO;
499
500 /*
501 * Walk back up the tree splitting/inserting/adjusting as necessary.
502 * If we need to insert and there isn't room, split the node, then
503 * decide which fragment to insert the new block from below into.
504 * Note that we may split the root this way, but we need more fixup.
505 */
506 max = state->path.active - 1;
507 ASSERT((max >= 0) && (max < XFS_DA_NODE_MAXDEPTH));
508 ASSERT(state->path.blk[max].magic == XFS_ATTR_LEAF_MAGIC ||
509 state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC);
510
511 addblk = &state->path.blk[max]; /* initial dummy value */
512 for (i = max; (i >= 0) && addblk; state->path.active--, i--) {
513 oldblk = &state->path.blk[i];
514 newblk = &state->altpath.blk[i];
515
516 /*
517 * If a leaf node then
518 * Allocate a new leaf node, then rebalance across them.
519 * else if an intermediate node then
520 * We split on the last layer, must we split the node?
521 */
522 switch (oldblk->magic) {
523 case XFS_ATTR_LEAF_MAGIC:
524 error = xfs_attr3_leaf_split(state, oldblk, newblk);
525 if ((error != 0) && (error != -ENOSPC)) {
526 return error; /* GROT: attr is inconsistent */
527 }
528 if (!error) {
529 addblk = newblk;
530 break;
531 }
532 /*
533 * Entry wouldn't fit, split the leaf again. The new
534 * extrablk will be consumed by xfs_da3_node_split if
535 * the node is split.
536 */
537 state->extravalid = 1;
538 if (state->inleaf) {
539 state->extraafter = 0; /* before newblk */
540 trace_xfs_attr_leaf_split_before(state->args);
541 error = xfs_attr3_leaf_split(state, oldblk,
542 &state->extrablk);
543 } else {
544 state->extraafter = 1; /* after newblk */
545 trace_xfs_attr_leaf_split_after(state->args);
546 error = xfs_attr3_leaf_split(state, newblk,
547 &state->extrablk);
548 }
549 if (error)
550 return error; /* GROT: attr inconsistent */
551 addblk = newblk;
552 break;
553 case XFS_DIR2_LEAFN_MAGIC:
554 error = xfs_dir2_leafn_split(state, oldblk, newblk);
555 if (error)
556 return error;
557 addblk = newblk;
558 break;
559 case XFS_DA_NODE_MAGIC:
560 error = xfs_da3_node_split(state, oldblk, newblk, addblk,
561 max - i, &action);
562 addblk->bp = NULL;
563 if (error)
564 return error; /* GROT: dir is inconsistent */
565 /*
566 * Record the newly split block for the next time thru?
567 */
568 if (action)
569 addblk = newblk;
570 else
571 addblk = NULL;
572 break;
573 }
574
575 /*
576 * Update the btree to show the new hashval for this child.
577 */
578 xfs_da3_fixhashpath(state, &state->path);
579 }
580 if (!addblk)
581 return 0;
582
583 /*
584 * xfs_da3_node_split() should have consumed any extra blocks we added
585 * during a double leaf split in the attr fork. This is guaranteed as
586 * we can't be here if the attr fork only has a single leaf block.
587 */
588 ASSERT(state->extravalid == 0 ||
589 state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC);
590
591 /*
592 * Split the root node.
593 */
594 ASSERT(state->path.active == 0);
595 oldblk = &state->path.blk[0];
596 error = xfs_da3_root_split(state, oldblk, addblk);
597 if (error)
598 goto out;
599
600 /*
601 * Update pointers to the node which used to be block 0 and just got
602 * bumped because of the addition of a new root node. Note that the
603 * original block 0 could be at any position in the list of blocks in
604 * the tree.
605 *
606 * Note: the magic numbers and sibling pointers are in the same physical
607 * place for both v2 and v3 headers (by design). Hence it doesn't matter
608 * which version of the xfs_da_intnode structure we use here as the
609 * result will be the same using either structure.
610 */
611 node = oldblk->bp->b_addr;
612 if (node->hdr.info.forw) {
613 if (be32_to_cpu(node->hdr.info.forw) != addblk->blkno) {
614 xfs_buf_mark_corrupt(oldblk->bp);
615 error = -EFSCORRUPTED;
616 goto out;
617 }
618 node = addblk->bp->b_addr;
619 node->hdr.info.back = cpu_to_be32(oldblk->blkno);
620 xfs_trans_log_buf(state->args->trans, addblk->bp,
621 XFS_DA_LOGRANGE(node, &node->hdr.info,
622 sizeof(node->hdr.info)));
623 }
624 node = oldblk->bp->b_addr;
625 if (node->hdr.info.back) {
626 if (be32_to_cpu(node->hdr.info.back) != addblk->blkno) {
627 xfs_buf_mark_corrupt(oldblk->bp);
628 error = -EFSCORRUPTED;
629 goto out;
630 }
631 node = addblk->bp->b_addr;
632 node->hdr.info.forw = cpu_to_be32(oldblk->blkno);
633 xfs_trans_log_buf(state->args->trans, addblk->bp,
634 XFS_DA_LOGRANGE(node, &node->hdr.info,
635 sizeof(node->hdr.info)));
636 }
637out:
638 addblk->bp = NULL;
639 return error;
640}
641
642/*
643 * Split the root. We have to create a new root and point to the two
644 * parts (the split old root) that we just created. Copy block zero to
645 * the EOF, extending the inode in process.
646 */
647STATIC int /* error */
648xfs_da3_root_split(
649 struct xfs_da_state *state,
650 struct xfs_da_state_blk *blk1,
651 struct xfs_da_state_blk *blk2)
652{
653 struct xfs_da_intnode *node;
654 struct xfs_da_intnode *oldroot;
655 struct xfs_da_node_entry *btree;
656 struct xfs_da3_icnode_hdr nodehdr;
657 struct xfs_da_args *args;
658 struct xfs_buf *bp;
659 struct xfs_inode *dp;
660 struct xfs_trans *tp;
661 struct xfs_dir2_leaf *leaf;
662 xfs_dablk_t blkno;
663 int level;
664 int error;
665 int size;
666
667 trace_xfs_da_root_split(state->args);
668
669 /*
670 * Copy the existing (incorrect) block from the root node position
671 * to a free space somewhere.
672 */
673 args = state->args;
674 error = xfs_da_grow_inode(args, &blkno);
675 if (error)
676 return error;
677
678 dp = args->dp;
679 tp = args->trans;
680 error = xfs_da_get_buf(tp, dp, blkno, &bp, args->whichfork);
681 if (error)
682 return error;
683 node = bp->b_addr;
684 oldroot = blk1->bp->b_addr;
685 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
686 oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC)) {
687 struct xfs_da3_icnode_hdr icnodehdr;
688
689 xfs_da3_node_hdr_from_disk(dp->i_mount, &icnodehdr, oldroot);
690 btree = icnodehdr.btree;
691 size = (int)((char *)&btree[icnodehdr.count] - (char *)oldroot);
692 level = icnodehdr.level;
693
694 /*
695 * we are about to copy oldroot to bp, so set up the type
696 * of bp while we know exactly what it will be.
697 */
698 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF);
699 } else {
700 struct xfs_dir3_icleaf_hdr leafhdr;
701
702 leaf = (xfs_dir2_leaf_t *)oldroot;
703 xfs_dir2_leaf_hdr_from_disk(dp->i_mount, &leafhdr, leaf);
704
705 ASSERT(leafhdr.magic == XFS_DIR2_LEAFN_MAGIC ||
706 leafhdr.magic == XFS_DIR3_LEAFN_MAGIC);
707 size = (int)((char *)&leafhdr.ents[leafhdr.count] -
708 (char *)leaf);
709 level = 0;
710
711 /*
712 * we are about to copy oldroot to bp, so set up the type
713 * of bp while we know exactly what it will be.
714 */
715 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DIR_LEAFN_BUF);
716 }
717
718 /*
719 * we can copy most of the information in the node from one block to
720 * another, but for CRC enabled headers we have to make sure that the
721 * block specific identifiers are kept intact. We update the buffer
722 * directly for this.
723 */
724 memcpy(node, oldroot, size);
725 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) ||
726 oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
727 struct xfs_da3_intnode *node3 = (struct xfs_da3_intnode *)node;
728
729 node3->hdr.info.blkno = cpu_to_be64(xfs_buf_daddr(bp));
730 }
731 xfs_trans_log_buf(tp, bp, 0, size - 1);
732
733 bp->b_ops = blk1->bp->b_ops;
734 xfs_trans_buf_copy_type(bp, blk1->bp);
735 blk1->bp = bp;
736 blk1->blkno = blkno;
737
738 /*
739 * Set up the new root node.
740 */
741 error = xfs_da3_node_create(args,
742 (args->whichfork == XFS_DATA_FORK) ? args->geo->leafblk : 0,
743 level + 1, &bp, args->whichfork);
744 if (error)
745 return error;
746
747 node = bp->b_addr;
748 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
749 btree = nodehdr.btree;
750 btree[0].hashval = cpu_to_be32(blk1->hashval);
751 btree[0].before = cpu_to_be32(blk1->blkno);
752 btree[1].hashval = cpu_to_be32(blk2->hashval);
753 btree[1].before = cpu_to_be32(blk2->blkno);
754 nodehdr.count = 2;
755 xfs_da3_node_hdr_to_disk(dp->i_mount, node, &nodehdr);
756
757#ifdef DEBUG
758 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
759 oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
760 ASSERT(blk1->blkno >= args->geo->leafblk &&
761 blk1->blkno < args->geo->freeblk);
762 ASSERT(blk2->blkno >= args->geo->leafblk &&
763 blk2->blkno < args->geo->freeblk);
764 }
765#endif
766
767 /* Header is already logged by xfs_da_node_create */
768 xfs_trans_log_buf(tp, bp,
769 XFS_DA_LOGRANGE(node, btree, sizeof(xfs_da_node_entry_t) * 2));
770
771 return 0;
772}
773
774/*
775 * Split the node, rebalance, then add the new entry.
776 */
777STATIC int /* error */
778xfs_da3_node_split(
779 struct xfs_da_state *state,
780 struct xfs_da_state_blk *oldblk,
781 struct xfs_da_state_blk *newblk,
782 struct xfs_da_state_blk *addblk,
783 int treelevel,
784 int *result)
785{
786 struct xfs_da_intnode *node;
787 struct xfs_da3_icnode_hdr nodehdr;
788 xfs_dablk_t blkno;
789 int newcount;
790 int error;
791 int useextra;
792 struct xfs_inode *dp = state->args->dp;
793
794 trace_xfs_da_node_split(state->args);
795
796 node = oldblk->bp->b_addr;
797 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
798
799 /*
800 * With V2 dirs the extra block is data or freespace.
801 */
802 useextra = state->extravalid && state->args->whichfork == XFS_ATTR_FORK;
803 newcount = 1 + useextra;
804 /*
805 * Do we have to split the node?
806 */
807 if (nodehdr.count + newcount > state->args->geo->node_ents) {
808 /*
809 * Allocate a new node, add to the doubly linked chain of
810 * nodes, then move some of our excess entries into it.
811 */
812 error = xfs_da_grow_inode(state->args, &blkno);
813 if (error)
814 return error; /* GROT: dir is inconsistent */
815
816 error = xfs_da3_node_create(state->args, blkno, treelevel,
817 &newblk->bp, state->args->whichfork);
818 if (error)
819 return error; /* GROT: dir is inconsistent */
820 newblk->blkno = blkno;
821 newblk->magic = XFS_DA_NODE_MAGIC;
822 xfs_da3_node_rebalance(state, oldblk, newblk);
823 error = xfs_da3_blk_link(state, oldblk, newblk);
824 if (error)
825 return error;
826 *result = 1;
827 } else {
828 *result = 0;
829 }
830
831 /*
832 * Insert the new entry(s) into the correct block
833 * (updating last hashval in the process).
834 *
835 * xfs_da3_node_add() inserts BEFORE the given index,
836 * and as a result of using node_lookup_int() we always
837 * point to a valid entry (not after one), but a split
838 * operation always results in a new block whose hashvals
839 * FOLLOW the current block.
840 *
841 * If we had double-split op below us, then add the extra block too.
842 */
843 node = oldblk->bp->b_addr;
844 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
845 if (oldblk->index <= nodehdr.count) {
846 oldblk->index++;
847 xfs_da3_node_add(state, oldblk, addblk);
848 if (useextra) {
849 if (state->extraafter)
850 oldblk->index++;
851 xfs_da3_node_add(state, oldblk, &state->extrablk);
852 state->extravalid = 0;
853 }
854 } else {
855 newblk->index++;
856 xfs_da3_node_add(state, newblk, addblk);
857 if (useextra) {
858 if (state->extraafter)
859 newblk->index++;
860 xfs_da3_node_add(state, newblk, &state->extrablk);
861 state->extravalid = 0;
862 }
863 }
864
865 return 0;
866}
867
868/*
869 * Balance the btree elements between two intermediate nodes,
870 * usually one full and one empty.
871 *
872 * NOTE: if blk2 is empty, then it will get the upper half of blk1.
873 */
874STATIC void
875xfs_da3_node_rebalance(
876 struct xfs_da_state *state,
877 struct xfs_da_state_blk *blk1,
878 struct xfs_da_state_blk *blk2)
879{
880 struct xfs_da_intnode *node1;
881 struct xfs_da_intnode *node2;
882 struct xfs_da_node_entry *btree1;
883 struct xfs_da_node_entry *btree2;
884 struct xfs_da_node_entry *btree_s;
885 struct xfs_da_node_entry *btree_d;
886 struct xfs_da3_icnode_hdr nodehdr1;
887 struct xfs_da3_icnode_hdr nodehdr2;
888 struct xfs_trans *tp;
889 int count;
890 int tmp;
891 int swap = 0;
892 struct xfs_inode *dp = state->args->dp;
893
894 trace_xfs_da_node_rebalance(state->args);
895
896 node1 = blk1->bp->b_addr;
897 node2 = blk2->bp->b_addr;
898 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr1, node1);
899 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr2, node2);
900 btree1 = nodehdr1.btree;
901 btree2 = nodehdr2.btree;
902
903 /*
904 * Figure out how many entries need to move, and in which direction.
905 * Swap the nodes around if that makes it simpler.
906 */
907 if (nodehdr1.count > 0 && nodehdr2.count > 0 &&
908 ((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) ||
909 (be32_to_cpu(btree2[nodehdr2.count - 1].hashval) <
910 be32_to_cpu(btree1[nodehdr1.count - 1].hashval)))) {
911 swap(node1, node2);
912 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr1, node1);
913 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr2, node2);
914 btree1 = nodehdr1.btree;
915 btree2 = nodehdr2.btree;
916 swap = 1;
917 }
918
919 count = (nodehdr1.count - nodehdr2.count) / 2;
920 if (count == 0)
921 return;
922 tp = state->args->trans;
923 /*
924 * Two cases: high-to-low and low-to-high.
925 */
926 if (count > 0) {
927 /*
928 * Move elements in node2 up to make a hole.
929 */
930 tmp = nodehdr2.count;
931 if (tmp > 0) {
932 tmp *= (uint)sizeof(xfs_da_node_entry_t);
933 btree_s = &btree2[0];
934 btree_d = &btree2[count];
935 memmove(btree_d, btree_s, tmp);
936 }
937
938 /*
939 * Move the req'd B-tree elements from high in node1 to
940 * low in node2.
941 */
942 nodehdr2.count += count;
943 tmp = count * (uint)sizeof(xfs_da_node_entry_t);
944 btree_s = &btree1[nodehdr1.count - count];
945 btree_d = &btree2[0];
946 memcpy(btree_d, btree_s, tmp);
947 nodehdr1.count -= count;
948 } else {
949 /*
950 * Move the req'd B-tree elements from low in node2 to
951 * high in node1.
952 */
953 count = -count;
954 tmp = count * (uint)sizeof(xfs_da_node_entry_t);
955 btree_s = &btree2[0];
956 btree_d = &btree1[nodehdr1.count];
957 memcpy(btree_d, btree_s, tmp);
958 nodehdr1.count += count;
959
960 xfs_trans_log_buf(tp, blk1->bp,
961 XFS_DA_LOGRANGE(node1, btree_d, tmp));
962
963 /*
964 * Move elements in node2 down to fill the hole.
965 */
966 tmp = nodehdr2.count - count;
967 tmp *= (uint)sizeof(xfs_da_node_entry_t);
968 btree_s = &btree2[count];
969 btree_d = &btree2[0];
970 memmove(btree_d, btree_s, tmp);
971 nodehdr2.count -= count;
972 }
973
974 /*
975 * Log header of node 1 and all current bits of node 2.
976 */
977 xfs_da3_node_hdr_to_disk(dp->i_mount, node1, &nodehdr1);
978 xfs_trans_log_buf(tp, blk1->bp,
979 XFS_DA_LOGRANGE(node1, &node1->hdr,
980 state->args->geo->node_hdr_size));
981
982 xfs_da3_node_hdr_to_disk(dp->i_mount, node2, &nodehdr2);
983 xfs_trans_log_buf(tp, blk2->bp,
984 XFS_DA_LOGRANGE(node2, &node2->hdr,
985 state->args->geo->node_hdr_size +
986 (sizeof(btree2[0]) * nodehdr2.count)));
987
988 /*
989 * Record the last hashval from each block for upward propagation.
990 * (note: don't use the swapped node pointers)
991 */
992 if (swap) {
993 node1 = blk1->bp->b_addr;
994 node2 = blk2->bp->b_addr;
995 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr1, node1);
996 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr2, node2);
997 btree1 = nodehdr1.btree;
998 btree2 = nodehdr2.btree;
999 }
1000 blk1->hashval = be32_to_cpu(btree1[nodehdr1.count - 1].hashval);
1001 blk2->hashval = be32_to_cpu(btree2[nodehdr2.count - 1].hashval);
1002
1003 /*
1004 * Adjust the expected index for insertion.
1005 */
1006 if (blk1->index >= nodehdr1.count) {
1007 blk2->index = blk1->index - nodehdr1.count;
1008 blk1->index = nodehdr1.count + 1; /* make it invalid */
1009 }
1010}
1011
1012/*
1013 * Add a new entry to an intermediate node.
1014 */
1015STATIC void
1016xfs_da3_node_add(
1017 struct xfs_da_state *state,
1018 struct xfs_da_state_blk *oldblk,
1019 struct xfs_da_state_blk *newblk)
1020{
1021 struct xfs_da_intnode *node;
1022 struct xfs_da3_icnode_hdr nodehdr;
1023 struct xfs_da_node_entry *btree;
1024 int tmp;
1025 struct xfs_inode *dp = state->args->dp;
1026
1027 trace_xfs_da_node_add(state->args);
1028
1029 node = oldblk->bp->b_addr;
1030 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
1031 btree = nodehdr.btree;
1032
1033 ASSERT(oldblk->index >= 0 && oldblk->index <= nodehdr.count);
1034 ASSERT(newblk->blkno != 0);
1035 if (state->args->whichfork == XFS_DATA_FORK)
1036 ASSERT(newblk->blkno >= state->args->geo->leafblk &&
1037 newblk->blkno < state->args->geo->freeblk);
1038
1039 /*
1040 * We may need to make some room before we insert the new node.
1041 */
1042 tmp = 0;
1043 if (oldblk->index < nodehdr.count) {
1044 tmp = (nodehdr.count - oldblk->index) * (uint)sizeof(*btree);
1045 memmove(&btree[oldblk->index + 1], &btree[oldblk->index], tmp);
1046 }
1047 btree[oldblk->index].hashval = cpu_to_be32(newblk->hashval);
1048 btree[oldblk->index].before = cpu_to_be32(newblk->blkno);
1049 xfs_trans_log_buf(state->args->trans, oldblk->bp,
1050 XFS_DA_LOGRANGE(node, &btree[oldblk->index],
1051 tmp + sizeof(*btree)));
1052
1053 nodehdr.count += 1;
1054 xfs_da3_node_hdr_to_disk(dp->i_mount, node, &nodehdr);
1055 xfs_trans_log_buf(state->args->trans, oldblk->bp,
1056 XFS_DA_LOGRANGE(node, &node->hdr,
1057 state->args->geo->node_hdr_size));
1058
1059 /*
1060 * Copy the last hash value from the oldblk to propagate upwards.
1061 */
1062 oldblk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval);
1063}
1064
1065/*========================================================================
1066 * Routines used for shrinking the Btree.
1067 *========================================================================*/
1068
1069/*
1070 * Deallocate an empty leaf node, remove it from its parent,
1071 * possibly deallocating that block, etc...
1072 */
1073int
1074xfs_da3_join(
1075 struct xfs_da_state *state)
1076{
1077 struct xfs_da_state_blk *drop_blk;
1078 struct xfs_da_state_blk *save_blk;
1079 int action = 0;
1080 int error;
1081
1082 trace_xfs_da_join(state->args);
1083
1084 drop_blk = &state->path.blk[ state->path.active-1 ];
1085 save_blk = &state->altpath.blk[ state->path.active-1 ];
1086 ASSERT(state->path.blk[0].magic == XFS_DA_NODE_MAGIC);
1087 ASSERT(drop_blk->magic == XFS_ATTR_LEAF_MAGIC ||
1088 drop_blk->magic == XFS_DIR2_LEAFN_MAGIC);
1089
1090 /*
1091 * Walk back up the tree joining/deallocating as necessary.
1092 * When we stop dropping blocks, break out.
1093 */
1094 for ( ; state->path.active >= 2; drop_blk--, save_blk--,
1095 state->path.active--) {
1096 /*
1097 * See if we can combine the block with a neighbor.
1098 * (action == 0) => no options, just leave
1099 * (action == 1) => coalesce, then unlink
1100 * (action == 2) => block empty, unlink it
1101 */
1102 switch (drop_blk->magic) {
1103 case XFS_ATTR_LEAF_MAGIC:
1104 error = xfs_attr3_leaf_toosmall(state, &action);
1105 if (error)
1106 return error;
1107 if (action == 0)
1108 return 0;
1109 xfs_attr3_leaf_unbalance(state, drop_blk, save_blk);
1110 break;
1111 case XFS_DIR2_LEAFN_MAGIC:
1112 error = xfs_dir2_leafn_toosmall(state, &action);
1113 if (error)
1114 return error;
1115 if (action == 0)
1116 return 0;
1117 xfs_dir2_leafn_unbalance(state, drop_blk, save_blk);
1118 break;
1119 case XFS_DA_NODE_MAGIC:
1120 /*
1121 * Remove the offending node, fixup hashvals,
1122 * check for a toosmall neighbor.
1123 */
1124 xfs_da3_node_remove(state, drop_blk);
1125 xfs_da3_fixhashpath(state, &state->path);
1126 error = xfs_da3_node_toosmall(state, &action);
1127 if (error)
1128 return error;
1129 if (action == 0)
1130 return 0;
1131 xfs_da3_node_unbalance(state, drop_blk, save_blk);
1132 break;
1133 }
1134 xfs_da3_fixhashpath(state, &state->altpath);
1135 error = xfs_da3_blk_unlink(state, drop_blk, save_blk);
1136 xfs_da_state_kill_altpath(state);
1137 if (error)
1138 return error;
1139 error = xfs_da_shrink_inode(state->args, drop_blk->blkno,
1140 drop_blk->bp);
1141 drop_blk->bp = NULL;
1142 if (error)
1143 return error;
1144 }
1145 /*
1146 * We joined all the way to the top. If it turns out that
1147 * we only have one entry in the root, make the child block
1148 * the new root.
1149 */
1150 xfs_da3_node_remove(state, drop_blk);
1151 xfs_da3_fixhashpath(state, &state->path);
1152 error = xfs_da3_root_join(state, &state->path.blk[0]);
1153 return error;
1154}
1155
1156#ifdef DEBUG
1157static void
1158xfs_da_blkinfo_onlychild_validate(struct xfs_da_blkinfo *blkinfo, __u16 level)
1159{
1160 __be16 magic = blkinfo->magic;
1161
1162 if (level == 1) {
1163 ASSERT(magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
1164 magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC) ||
1165 magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) ||
1166 magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC));
1167 } else {
1168 ASSERT(magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
1169 magic == cpu_to_be16(XFS_DA3_NODE_MAGIC));
1170 }
1171 ASSERT(!blkinfo->forw);
1172 ASSERT(!blkinfo->back);
1173}
1174#else /* !DEBUG */
1175#define xfs_da_blkinfo_onlychild_validate(blkinfo, level)
1176#endif /* !DEBUG */
1177
1178/*
1179 * We have only one entry in the root. Copy the only remaining child of
1180 * the old root to block 0 as the new root node.
1181 */
1182STATIC int
1183xfs_da3_root_join(
1184 struct xfs_da_state *state,
1185 struct xfs_da_state_blk *root_blk)
1186{
1187 struct xfs_da_intnode *oldroot;
1188 struct xfs_da_args *args;
1189 xfs_dablk_t child;
1190 struct xfs_buf *bp;
1191 struct xfs_da3_icnode_hdr oldroothdr;
1192 int error;
1193 struct xfs_inode *dp = state->args->dp;
1194
1195 trace_xfs_da_root_join(state->args);
1196
1197 ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC);
1198
1199 args = state->args;
1200 oldroot = root_blk->bp->b_addr;
1201 xfs_da3_node_hdr_from_disk(dp->i_mount, &oldroothdr, oldroot);
1202 ASSERT(oldroothdr.forw == 0);
1203 ASSERT(oldroothdr.back == 0);
1204
1205 /*
1206 * If the root has more than one child, then don't do anything.
1207 */
1208 if (oldroothdr.count > 1)
1209 return 0;
1210
1211 /*
1212 * Read in the (only) child block, then copy those bytes into
1213 * the root block's buffer and free the original child block.
1214 */
1215 child = be32_to_cpu(oldroothdr.btree[0].before);
1216 ASSERT(child != 0);
1217 error = xfs_da3_node_read(args->trans, dp, child, &bp, args->whichfork);
1218 if (error)
1219 return error;
1220 xfs_da_blkinfo_onlychild_validate(bp->b_addr, oldroothdr.level);
1221
1222 /*
1223 * This could be copying a leaf back into the root block in the case of
1224 * there only being a single leaf block left in the tree. Hence we have
1225 * to update the b_ops pointer as well to match the buffer type change
1226 * that could occur. For dir3 blocks we also need to update the block
1227 * number in the buffer header.
1228 */
1229 memcpy(root_blk->bp->b_addr, bp->b_addr, args->geo->blksize);
1230 root_blk->bp->b_ops = bp->b_ops;
1231 xfs_trans_buf_copy_type(root_blk->bp, bp);
1232 if (oldroothdr.magic == XFS_DA3_NODE_MAGIC) {
1233 struct xfs_da3_blkinfo *da3 = root_blk->bp->b_addr;
1234 da3->blkno = cpu_to_be64(xfs_buf_daddr(root_blk->bp));
1235 }
1236 xfs_trans_log_buf(args->trans, root_blk->bp, 0,
1237 args->geo->blksize - 1);
1238 error = xfs_da_shrink_inode(args, child, bp);
1239 return error;
1240}
1241
1242/*
1243 * Check a node block and its neighbors to see if the block should be
1244 * collapsed into one or the other neighbor. Always keep the block
1245 * with the smaller block number.
1246 * If the current block is over 50% full, don't try to join it, return 0.
1247 * If the block is empty, fill in the state structure and return 2.
1248 * If it can be collapsed, fill in the state structure and return 1.
1249 * If nothing can be done, return 0.
1250 */
1251STATIC int
1252xfs_da3_node_toosmall(
1253 struct xfs_da_state *state,
1254 int *action)
1255{
1256 struct xfs_da_intnode *node;
1257 struct xfs_da_state_blk *blk;
1258 struct xfs_da_blkinfo *info;
1259 xfs_dablk_t blkno;
1260 struct xfs_buf *bp;
1261 struct xfs_da3_icnode_hdr nodehdr;
1262 int count;
1263 int forward;
1264 int error;
1265 int retval;
1266 int i;
1267 struct xfs_inode *dp = state->args->dp;
1268
1269 trace_xfs_da_node_toosmall(state->args);
1270
1271 /*
1272 * Check for the degenerate case of the block being over 50% full.
1273 * If so, it's not worth even looking to see if we might be able
1274 * to coalesce with a sibling.
1275 */
1276 blk = &state->path.blk[ state->path.active-1 ];
1277 info = blk->bp->b_addr;
1278 node = (xfs_da_intnode_t *)info;
1279 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
1280 if (nodehdr.count > (state->args->geo->node_ents >> 1)) {
1281 *action = 0; /* blk over 50%, don't try to join */
1282 return 0; /* blk over 50%, don't try to join */
1283 }
1284
1285 /*
1286 * Check for the degenerate case of the block being empty.
1287 * If the block is empty, we'll simply delete it, no need to
1288 * coalesce it with a sibling block. We choose (arbitrarily)
1289 * to merge with the forward block unless it is NULL.
1290 */
1291 if (nodehdr.count == 0) {
1292 /*
1293 * Make altpath point to the block we want to keep and
1294 * path point to the block we want to drop (this one).
1295 */
1296 forward = (info->forw != 0);
1297 memcpy(&state->altpath, &state->path, sizeof(state->path));
1298 error = xfs_da3_path_shift(state, &state->altpath, forward,
1299 0, &retval);
1300 if (error)
1301 return error;
1302 if (retval) {
1303 *action = 0;
1304 } else {
1305 *action = 2;
1306 }
1307 return 0;
1308 }
1309
1310 /*
1311 * Examine each sibling block to see if we can coalesce with
1312 * at least 25% free space to spare. We need to figure out
1313 * whether to merge with the forward or the backward block.
1314 * We prefer coalescing with the lower numbered sibling so as
1315 * to shrink a directory over time.
1316 */
1317 count = state->args->geo->node_ents;
1318 count -= state->args->geo->node_ents >> 2;
1319 count -= nodehdr.count;
1320
1321 /* start with smaller blk num */
1322 forward = nodehdr.forw < nodehdr.back;
1323 for (i = 0; i < 2; forward = !forward, i++) {
1324 struct xfs_da3_icnode_hdr thdr;
1325 if (forward)
1326 blkno = nodehdr.forw;
1327 else
1328 blkno = nodehdr.back;
1329 if (blkno == 0)
1330 continue;
1331 error = xfs_da3_node_read(state->args->trans, dp, blkno, &bp,
1332 state->args->whichfork);
1333 if (error)
1334 return error;
1335
1336 node = bp->b_addr;
1337 xfs_da3_node_hdr_from_disk(dp->i_mount, &thdr, node);
1338 xfs_trans_brelse(state->args->trans, bp);
1339
1340 if (count - thdr.count >= 0)
1341 break; /* fits with at least 25% to spare */
1342 }
1343 if (i >= 2) {
1344 *action = 0;
1345 return 0;
1346 }
1347
1348 /*
1349 * Make altpath point to the block we want to keep (the lower
1350 * numbered block) and path point to the block we want to drop.
1351 */
1352 memcpy(&state->altpath, &state->path, sizeof(state->path));
1353 if (blkno < blk->blkno) {
1354 error = xfs_da3_path_shift(state, &state->altpath, forward,
1355 0, &retval);
1356 } else {
1357 error = xfs_da3_path_shift(state, &state->path, forward,
1358 0, &retval);
1359 }
1360 if (error)
1361 return error;
1362 if (retval) {
1363 *action = 0;
1364 return 0;
1365 }
1366 *action = 1;
1367 return 0;
1368}
1369
1370/*
1371 * Pick up the last hashvalue from an intermediate node.
1372 */
1373STATIC uint
1374xfs_da3_node_lasthash(
1375 struct xfs_inode *dp,
1376 struct xfs_buf *bp,
1377 int *count)
1378{
1379 struct xfs_da3_icnode_hdr nodehdr;
1380
1381 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, bp->b_addr);
1382 if (count)
1383 *count = nodehdr.count;
1384 if (!nodehdr.count)
1385 return 0;
1386 return be32_to_cpu(nodehdr.btree[nodehdr.count - 1].hashval);
1387}
1388
1389/*
1390 * Walk back up the tree adjusting hash values as necessary,
1391 * when we stop making changes, return.
1392 */
1393void
1394xfs_da3_fixhashpath(
1395 struct xfs_da_state *state,
1396 struct xfs_da_state_path *path)
1397{
1398 struct xfs_da_state_blk *blk;
1399 struct xfs_da_intnode *node;
1400 struct xfs_da_node_entry *btree;
1401 xfs_dahash_t lasthash=0;
1402 int level;
1403 int count;
1404 struct xfs_inode *dp = state->args->dp;
1405
1406 trace_xfs_da_fixhashpath(state->args);
1407
1408 level = path->active-1;
1409 blk = &path->blk[ level ];
1410 switch (blk->magic) {
1411 case XFS_ATTR_LEAF_MAGIC:
1412 lasthash = xfs_attr_leaf_lasthash(blk->bp, &count);
1413 if (count == 0)
1414 return;
1415 break;
1416 case XFS_DIR2_LEAFN_MAGIC:
1417 lasthash = xfs_dir2_leaf_lasthash(dp, blk->bp, &count);
1418 if (count == 0)
1419 return;
1420 break;
1421 case XFS_DA_NODE_MAGIC:
1422 lasthash = xfs_da3_node_lasthash(dp, blk->bp, &count);
1423 if (count == 0)
1424 return;
1425 break;
1426 }
1427 for (blk--, level--; level >= 0; blk--, level--) {
1428 struct xfs_da3_icnode_hdr nodehdr;
1429
1430 node = blk->bp->b_addr;
1431 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
1432 btree = nodehdr.btree;
1433 if (be32_to_cpu(btree[blk->index].hashval) == lasthash)
1434 break;
1435 blk->hashval = lasthash;
1436 btree[blk->index].hashval = cpu_to_be32(lasthash);
1437 xfs_trans_log_buf(state->args->trans, blk->bp,
1438 XFS_DA_LOGRANGE(node, &btree[blk->index],
1439 sizeof(*btree)));
1440
1441 lasthash = be32_to_cpu(btree[nodehdr.count - 1].hashval);
1442 }
1443}
1444
1445/*
1446 * Remove an entry from an intermediate node.
1447 */
1448STATIC void
1449xfs_da3_node_remove(
1450 struct xfs_da_state *state,
1451 struct xfs_da_state_blk *drop_blk)
1452{
1453 struct xfs_da_intnode *node;
1454 struct xfs_da3_icnode_hdr nodehdr;
1455 struct xfs_da_node_entry *btree;
1456 int index;
1457 int tmp;
1458 struct xfs_inode *dp = state->args->dp;
1459
1460 trace_xfs_da_node_remove(state->args);
1461
1462 node = drop_blk->bp->b_addr;
1463 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
1464 ASSERT(drop_blk->index < nodehdr.count);
1465 ASSERT(drop_blk->index >= 0);
1466
1467 /*
1468 * Copy over the offending entry, or just zero it out.
1469 */
1470 index = drop_blk->index;
1471 btree = nodehdr.btree;
1472 if (index < nodehdr.count - 1) {
1473 tmp = nodehdr.count - index - 1;
1474 tmp *= (uint)sizeof(xfs_da_node_entry_t);
1475 memmove(&btree[index], &btree[index + 1], tmp);
1476 xfs_trans_log_buf(state->args->trans, drop_blk->bp,
1477 XFS_DA_LOGRANGE(node, &btree[index], tmp));
1478 index = nodehdr.count - 1;
1479 }
1480 memset(&btree[index], 0, sizeof(xfs_da_node_entry_t));
1481 xfs_trans_log_buf(state->args->trans, drop_blk->bp,
1482 XFS_DA_LOGRANGE(node, &btree[index], sizeof(btree[index])));
1483 nodehdr.count -= 1;
1484 xfs_da3_node_hdr_to_disk(dp->i_mount, node, &nodehdr);
1485 xfs_trans_log_buf(state->args->trans, drop_blk->bp,
1486 XFS_DA_LOGRANGE(node, &node->hdr, state->args->geo->node_hdr_size));
1487
1488 /*
1489 * Copy the last hash value from the block to propagate upwards.
1490 */
1491 drop_blk->hashval = be32_to_cpu(btree[index - 1].hashval);
1492}
1493
1494/*
1495 * Unbalance the elements between two intermediate nodes,
1496 * move all Btree elements from one node into another.
1497 */
1498STATIC void
1499xfs_da3_node_unbalance(
1500 struct xfs_da_state *state,
1501 struct xfs_da_state_blk *drop_blk,
1502 struct xfs_da_state_blk *save_blk)
1503{
1504 struct xfs_da_intnode *drop_node;
1505 struct xfs_da_intnode *save_node;
1506 struct xfs_da_node_entry *drop_btree;
1507 struct xfs_da_node_entry *save_btree;
1508 struct xfs_da3_icnode_hdr drop_hdr;
1509 struct xfs_da3_icnode_hdr save_hdr;
1510 struct xfs_trans *tp;
1511 int sindex;
1512 int tmp;
1513 struct xfs_inode *dp = state->args->dp;
1514
1515 trace_xfs_da_node_unbalance(state->args);
1516
1517 drop_node = drop_blk->bp->b_addr;
1518 save_node = save_blk->bp->b_addr;
1519 xfs_da3_node_hdr_from_disk(dp->i_mount, &drop_hdr, drop_node);
1520 xfs_da3_node_hdr_from_disk(dp->i_mount, &save_hdr, save_node);
1521 drop_btree = drop_hdr.btree;
1522 save_btree = save_hdr.btree;
1523 tp = state->args->trans;
1524
1525 /*
1526 * If the dying block has lower hashvals, then move all the
1527 * elements in the remaining block up to make a hole.
1528 */
1529 if ((be32_to_cpu(drop_btree[0].hashval) <
1530 be32_to_cpu(save_btree[0].hashval)) ||
1531 (be32_to_cpu(drop_btree[drop_hdr.count - 1].hashval) <
1532 be32_to_cpu(save_btree[save_hdr.count - 1].hashval))) {
1533 /* XXX: check this - is memmove dst correct? */
1534 tmp = save_hdr.count * sizeof(xfs_da_node_entry_t);
1535 memmove(&save_btree[drop_hdr.count], &save_btree[0], tmp);
1536
1537 sindex = 0;
1538 xfs_trans_log_buf(tp, save_blk->bp,
1539 XFS_DA_LOGRANGE(save_node, &save_btree[0],
1540 (save_hdr.count + drop_hdr.count) *
1541 sizeof(xfs_da_node_entry_t)));
1542 } else {
1543 sindex = save_hdr.count;
1544 xfs_trans_log_buf(tp, save_blk->bp,
1545 XFS_DA_LOGRANGE(save_node, &save_btree[sindex],
1546 drop_hdr.count * sizeof(xfs_da_node_entry_t)));
1547 }
1548
1549 /*
1550 * Move all the B-tree elements from drop_blk to save_blk.
1551 */
1552 tmp = drop_hdr.count * (uint)sizeof(xfs_da_node_entry_t);
1553 memcpy(&save_btree[sindex], &drop_btree[0], tmp);
1554 save_hdr.count += drop_hdr.count;
1555
1556 xfs_da3_node_hdr_to_disk(dp->i_mount, save_node, &save_hdr);
1557 xfs_trans_log_buf(tp, save_blk->bp,
1558 XFS_DA_LOGRANGE(save_node, &save_node->hdr,
1559 state->args->geo->node_hdr_size));
1560
1561 /*
1562 * Save the last hashval in the remaining block for upward propagation.
1563 */
1564 save_blk->hashval = be32_to_cpu(save_btree[save_hdr.count - 1].hashval);
1565}
1566
1567/*========================================================================
1568 * Routines used for finding things in the Btree.
1569 *========================================================================*/
1570
1571/*
1572 * Walk down the Btree looking for a particular filename, filling
1573 * in the state structure as we go.
1574 *
1575 * We will set the state structure to point to each of the elements
1576 * in each of the nodes where either the hashval is or should be.
1577 *
1578 * We support duplicate hashval's so for each entry in the current
1579 * node that could contain the desired hashval, descend. This is a
1580 * pruned depth-first tree search.
1581 */
1582int /* error */
1583xfs_da3_node_lookup_int(
1584 struct xfs_da_state *state,
1585 int *result)
1586{
1587 struct xfs_da_state_blk *blk;
1588 struct xfs_da_blkinfo *curr;
1589 struct xfs_da_intnode *node;
1590 struct xfs_da_node_entry *btree;
1591 struct xfs_da3_icnode_hdr nodehdr;
1592 struct xfs_da_args *args;
1593 xfs_dablk_t blkno;
1594 xfs_dahash_t hashval;
1595 xfs_dahash_t btreehashval;
1596 int probe;
1597 int span;
1598 int max;
1599 int error;
1600 int retval;
1601 unsigned int expected_level = 0;
1602 uint16_t magic;
1603 struct xfs_inode *dp = state->args->dp;
1604
1605 args = state->args;
1606
1607 /*
1608 * Descend thru the B-tree searching each level for the right
1609 * node to use, until the right hashval is found.
1610 */
1611 blkno = args->geo->leafblk;
1612 for (blk = &state->path.blk[0], state->path.active = 1;
1613 state->path.active <= XFS_DA_NODE_MAXDEPTH;
1614 blk++, state->path.active++) {
1615 /*
1616 * Read the next node down in the tree.
1617 */
1618 blk->blkno = blkno;
1619 error = xfs_da3_node_read(args->trans, args->dp, blkno,
1620 &blk->bp, args->whichfork);
1621 if (error) {
1622 blk->blkno = 0;
1623 state->path.active--;
1624 return error;
1625 }
1626 curr = blk->bp->b_addr;
1627 magic = be16_to_cpu(curr->magic);
1628
1629 if (magic == XFS_ATTR_LEAF_MAGIC ||
1630 magic == XFS_ATTR3_LEAF_MAGIC) {
1631 blk->magic = XFS_ATTR_LEAF_MAGIC;
1632 blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
1633 break;
1634 }
1635
1636 if (magic == XFS_DIR2_LEAFN_MAGIC ||
1637 magic == XFS_DIR3_LEAFN_MAGIC) {
1638 blk->magic = XFS_DIR2_LEAFN_MAGIC;
1639 blk->hashval = xfs_dir2_leaf_lasthash(args->dp,
1640 blk->bp, NULL);
1641 break;
1642 }
1643
1644 if (magic != XFS_DA_NODE_MAGIC && magic != XFS_DA3_NODE_MAGIC) {
1645 xfs_buf_mark_corrupt(blk->bp);
1646 return -EFSCORRUPTED;
1647 }
1648
1649 blk->magic = XFS_DA_NODE_MAGIC;
1650
1651 /*
1652 * Search an intermediate node for a match.
1653 */
1654 node = blk->bp->b_addr;
1655 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr, node);
1656 btree = nodehdr.btree;
1657
1658 /* Tree taller than we can handle; bail out! */
1659 if (nodehdr.level >= XFS_DA_NODE_MAXDEPTH) {
1660 xfs_buf_mark_corrupt(blk->bp);
1661 return -EFSCORRUPTED;
1662 }
1663
1664 /* Check the level from the root. */
1665 if (blkno == args->geo->leafblk)
1666 expected_level = nodehdr.level - 1;
1667 else if (expected_level != nodehdr.level) {
1668 xfs_buf_mark_corrupt(blk->bp);
1669 return -EFSCORRUPTED;
1670 } else
1671 expected_level--;
1672
1673 max = nodehdr.count;
1674 blk->hashval = be32_to_cpu(btree[max - 1].hashval);
1675
1676 /*
1677 * Binary search. (note: small blocks will skip loop)
1678 */
1679 probe = span = max / 2;
1680 hashval = args->hashval;
1681 while (span > 4) {
1682 span /= 2;
1683 btreehashval = be32_to_cpu(btree[probe].hashval);
1684 if (btreehashval < hashval)
1685 probe += span;
1686 else if (btreehashval > hashval)
1687 probe -= span;
1688 else
1689 break;
1690 }
1691 ASSERT((probe >= 0) && (probe < max));
1692 ASSERT((span <= 4) ||
1693 (be32_to_cpu(btree[probe].hashval) == hashval));
1694
1695 /*
1696 * Since we may have duplicate hashval's, find the first
1697 * matching hashval in the node.
1698 */
1699 while (probe > 0 &&
1700 be32_to_cpu(btree[probe].hashval) >= hashval) {
1701 probe--;
1702 }
1703 while (probe < max &&
1704 be32_to_cpu(btree[probe].hashval) < hashval) {
1705 probe++;
1706 }
1707
1708 /*
1709 * Pick the right block to descend on.
1710 */
1711 if (probe == max) {
1712 blk->index = max - 1;
1713 blkno = be32_to_cpu(btree[max - 1].before);
1714 } else {
1715 blk->index = probe;
1716 blkno = be32_to_cpu(btree[probe].before);
1717 }
1718
1719 /* We can't point back to the root. */
1720 if (XFS_IS_CORRUPT(dp->i_mount, blkno == args->geo->leafblk))
1721 return -EFSCORRUPTED;
1722 }
1723
1724 if (XFS_IS_CORRUPT(dp->i_mount, expected_level != 0))
1725 return -EFSCORRUPTED;
1726
1727 /*
1728 * A leaf block that ends in the hashval that we are interested in
1729 * (final hashval == search hashval) means that the next block may
1730 * contain more entries with the same hashval, shift upward to the
1731 * next leaf and keep searching.
1732 */
1733 for (;;) {
1734 if (blk->magic == XFS_DIR2_LEAFN_MAGIC) {
1735 retval = xfs_dir2_leafn_lookup_int(blk->bp, args,
1736 &blk->index, state);
1737 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1738 retval = xfs_attr3_leaf_lookup_int(blk->bp, args);
1739 blk->index = args->index;
1740 args->blkno = blk->blkno;
1741 } else {
1742 ASSERT(0);
1743 return -EFSCORRUPTED;
1744 }
1745 if (((retval == -ENOENT) || (retval == -ENOATTR)) &&
1746 (blk->hashval == args->hashval)) {
1747 error = xfs_da3_path_shift(state, &state->path, 1, 1,
1748 &retval);
1749 if (error)
1750 return error;
1751 if (retval == 0) {
1752 continue;
1753 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1754 /* path_shift() gives ENOENT */
1755 retval = -ENOATTR;
1756 }
1757 }
1758 break;
1759 }
1760 *result = retval;
1761 return 0;
1762}
1763
1764/*========================================================================
1765 * Utility routines.
1766 *========================================================================*/
1767
1768/*
1769 * Compare two intermediate nodes for "order".
1770 */
1771STATIC int
1772xfs_da3_node_order(
1773 struct xfs_inode *dp,
1774 struct xfs_buf *node1_bp,
1775 struct xfs_buf *node2_bp)
1776{
1777 struct xfs_da_intnode *node1;
1778 struct xfs_da_intnode *node2;
1779 struct xfs_da_node_entry *btree1;
1780 struct xfs_da_node_entry *btree2;
1781 struct xfs_da3_icnode_hdr node1hdr;
1782 struct xfs_da3_icnode_hdr node2hdr;
1783
1784 node1 = node1_bp->b_addr;
1785 node2 = node2_bp->b_addr;
1786 xfs_da3_node_hdr_from_disk(dp->i_mount, &node1hdr, node1);
1787 xfs_da3_node_hdr_from_disk(dp->i_mount, &node2hdr, node2);
1788 btree1 = node1hdr.btree;
1789 btree2 = node2hdr.btree;
1790
1791 if (node1hdr.count > 0 && node2hdr.count > 0 &&
1792 ((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) ||
1793 (be32_to_cpu(btree2[node2hdr.count - 1].hashval) <
1794 be32_to_cpu(btree1[node1hdr.count - 1].hashval)))) {
1795 return 1;
1796 }
1797 return 0;
1798}
1799
1800/*
1801 * Link a new block into a doubly linked list of blocks (of whatever type).
1802 */
1803int /* error */
1804xfs_da3_blk_link(
1805 struct xfs_da_state *state,
1806 struct xfs_da_state_blk *old_blk,
1807 struct xfs_da_state_blk *new_blk)
1808{
1809 struct xfs_da_blkinfo *old_info;
1810 struct xfs_da_blkinfo *new_info;
1811 struct xfs_da_blkinfo *tmp_info;
1812 struct xfs_da_args *args;
1813 struct xfs_buf *bp;
1814 int before = 0;
1815 int error;
1816 struct xfs_inode *dp = state->args->dp;
1817
1818 /*
1819 * Set up environment.
1820 */
1821 args = state->args;
1822 ASSERT(args != NULL);
1823 old_info = old_blk->bp->b_addr;
1824 new_info = new_blk->bp->b_addr;
1825 ASSERT(old_blk->magic == XFS_DA_NODE_MAGIC ||
1826 old_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1827 old_blk->magic == XFS_ATTR_LEAF_MAGIC);
1828
1829 switch (old_blk->magic) {
1830 case XFS_ATTR_LEAF_MAGIC:
1831 before = xfs_attr_leaf_order(old_blk->bp, new_blk->bp);
1832 break;
1833 case XFS_DIR2_LEAFN_MAGIC:
1834 before = xfs_dir2_leafn_order(dp, old_blk->bp, new_blk->bp);
1835 break;
1836 case XFS_DA_NODE_MAGIC:
1837 before = xfs_da3_node_order(dp, old_blk->bp, new_blk->bp);
1838 break;
1839 }
1840
1841 /*
1842 * Link blocks in appropriate order.
1843 */
1844 if (before) {
1845 /*
1846 * Link new block in before existing block.
1847 */
1848 trace_xfs_da_link_before(args);
1849 new_info->forw = cpu_to_be32(old_blk->blkno);
1850 new_info->back = old_info->back;
1851 if (old_info->back) {
1852 error = xfs_da3_node_read(args->trans, dp,
1853 be32_to_cpu(old_info->back),
1854 &bp, args->whichfork);
1855 if (error)
1856 return error;
1857 ASSERT(bp != NULL);
1858 tmp_info = bp->b_addr;
1859 ASSERT(tmp_info->magic == old_info->magic);
1860 ASSERT(be32_to_cpu(tmp_info->forw) == old_blk->blkno);
1861 tmp_info->forw = cpu_to_be32(new_blk->blkno);
1862 xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
1863 }
1864 old_info->back = cpu_to_be32(new_blk->blkno);
1865 } else {
1866 /*
1867 * Link new block in after existing block.
1868 */
1869 trace_xfs_da_link_after(args);
1870 new_info->forw = old_info->forw;
1871 new_info->back = cpu_to_be32(old_blk->blkno);
1872 if (old_info->forw) {
1873 error = xfs_da3_node_read(args->trans, dp,
1874 be32_to_cpu(old_info->forw),
1875 &bp, args->whichfork);
1876 if (error)
1877 return error;
1878 ASSERT(bp != NULL);
1879 tmp_info = bp->b_addr;
1880 ASSERT(tmp_info->magic == old_info->magic);
1881 ASSERT(be32_to_cpu(tmp_info->back) == old_blk->blkno);
1882 tmp_info->back = cpu_to_be32(new_blk->blkno);
1883 xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
1884 }
1885 old_info->forw = cpu_to_be32(new_blk->blkno);
1886 }
1887
1888 xfs_trans_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1);
1889 xfs_trans_log_buf(args->trans, new_blk->bp, 0, sizeof(*tmp_info) - 1);
1890 return 0;
1891}
1892
1893/*
1894 * Unlink a block from a doubly linked list of blocks.
1895 */
1896STATIC int /* error */
1897xfs_da3_blk_unlink(
1898 struct xfs_da_state *state,
1899 struct xfs_da_state_blk *drop_blk,
1900 struct xfs_da_state_blk *save_blk)
1901{
1902 struct xfs_da_blkinfo *drop_info;
1903 struct xfs_da_blkinfo *save_info;
1904 struct xfs_da_blkinfo *tmp_info;
1905 struct xfs_da_args *args;
1906 struct xfs_buf *bp;
1907 int error;
1908
1909 /*
1910 * Set up environment.
1911 */
1912 args = state->args;
1913 ASSERT(args != NULL);
1914 save_info = save_blk->bp->b_addr;
1915 drop_info = drop_blk->bp->b_addr;
1916 ASSERT(save_blk->magic == XFS_DA_NODE_MAGIC ||
1917 save_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1918 save_blk->magic == XFS_ATTR_LEAF_MAGIC);
1919 ASSERT(save_blk->magic == drop_blk->magic);
1920 ASSERT((be32_to_cpu(save_info->forw) == drop_blk->blkno) ||
1921 (be32_to_cpu(save_info->back) == drop_blk->blkno));
1922 ASSERT((be32_to_cpu(drop_info->forw) == save_blk->blkno) ||
1923 (be32_to_cpu(drop_info->back) == save_blk->blkno));
1924
1925 /*
1926 * Unlink the leaf block from the doubly linked chain of leaves.
1927 */
1928 if (be32_to_cpu(save_info->back) == drop_blk->blkno) {
1929 trace_xfs_da_unlink_back(args);
1930 save_info->back = drop_info->back;
1931 if (drop_info->back) {
1932 error = xfs_da3_node_read(args->trans, args->dp,
1933 be32_to_cpu(drop_info->back),
1934 &bp, args->whichfork);
1935 if (error)
1936 return error;
1937 ASSERT(bp != NULL);
1938 tmp_info = bp->b_addr;
1939 ASSERT(tmp_info->magic == save_info->magic);
1940 ASSERT(be32_to_cpu(tmp_info->forw) == drop_blk->blkno);
1941 tmp_info->forw = cpu_to_be32(save_blk->blkno);
1942 xfs_trans_log_buf(args->trans, bp, 0,
1943 sizeof(*tmp_info) - 1);
1944 }
1945 } else {
1946 trace_xfs_da_unlink_forward(args);
1947 save_info->forw = drop_info->forw;
1948 if (drop_info->forw) {
1949 error = xfs_da3_node_read(args->trans, args->dp,
1950 be32_to_cpu(drop_info->forw),
1951 &bp, args->whichfork);
1952 if (error)
1953 return error;
1954 ASSERT(bp != NULL);
1955 tmp_info = bp->b_addr;
1956 ASSERT(tmp_info->magic == save_info->magic);
1957 ASSERT(be32_to_cpu(tmp_info->back) == drop_blk->blkno);
1958 tmp_info->back = cpu_to_be32(save_blk->blkno);
1959 xfs_trans_log_buf(args->trans, bp, 0,
1960 sizeof(*tmp_info) - 1);
1961 }
1962 }
1963
1964 xfs_trans_log_buf(args->trans, save_blk->bp, 0, sizeof(*save_info) - 1);
1965 return 0;
1966}
1967
1968/*
1969 * Move a path "forward" or "!forward" one block at the current level.
1970 *
1971 * This routine will adjust a "path" to point to the next block
1972 * "forward" (higher hashvalues) or "!forward" (lower hashvals) in the
1973 * Btree, including updating pointers to the intermediate nodes between
1974 * the new bottom and the root.
1975 */
1976int /* error */
1977xfs_da3_path_shift(
1978 struct xfs_da_state *state,
1979 struct xfs_da_state_path *path,
1980 int forward,
1981 int release,
1982 int *result)
1983{
1984 struct xfs_da_state_blk *blk;
1985 struct xfs_da_blkinfo *info;
1986 struct xfs_da_args *args;
1987 struct xfs_da_node_entry *btree;
1988 struct xfs_da3_icnode_hdr nodehdr;
1989 struct xfs_buf *bp;
1990 xfs_dablk_t blkno = 0;
1991 int level;
1992 int error;
1993 struct xfs_inode *dp = state->args->dp;
1994
1995 trace_xfs_da_path_shift(state->args);
1996
1997 /*
1998 * Roll up the Btree looking for the first block where our
1999 * current index is not at the edge of the block. Note that
2000 * we skip the bottom layer because we want the sibling block.
2001 */
2002 args = state->args;
2003 ASSERT(args != NULL);
2004 ASSERT(path != NULL);
2005 ASSERT((path->active > 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
2006 level = (path->active-1) - 1; /* skip bottom layer in path */
2007 for (; level >= 0; level--) {
2008 blk = &path->blk[level];
2009 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr,
2010 blk->bp->b_addr);
2011
2012 if (forward && (blk->index < nodehdr.count - 1)) {
2013 blk->index++;
2014 blkno = be32_to_cpu(nodehdr.btree[blk->index].before);
2015 break;
2016 } else if (!forward && (blk->index > 0)) {
2017 blk->index--;
2018 blkno = be32_to_cpu(nodehdr.btree[blk->index].before);
2019 break;
2020 }
2021 }
2022 if (level < 0) {
2023 *result = -ENOENT; /* we're out of our tree */
2024 ASSERT(args->op_flags & XFS_DA_OP_OKNOENT);
2025 return 0;
2026 }
2027
2028 /*
2029 * Roll down the edge of the subtree until we reach the
2030 * same depth we were at originally.
2031 */
2032 for (blk++, level++; level < path->active; blk++, level++) {
2033 /*
2034 * Read the next child block into a local buffer.
2035 */
2036 error = xfs_da3_node_read(args->trans, dp, blkno, &bp,
2037 args->whichfork);
2038 if (error)
2039 return error;
2040
2041 /*
2042 * Release the old block (if it's dirty, the trans doesn't
2043 * actually let go) and swap the local buffer into the path
2044 * structure. This ensures failure of the above read doesn't set
2045 * a NULL buffer in an active slot in the path.
2046 */
2047 if (release)
2048 xfs_trans_brelse(args->trans, blk->bp);
2049 blk->blkno = blkno;
2050 blk->bp = bp;
2051
2052 info = blk->bp->b_addr;
2053 ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
2054 info->magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) ||
2055 info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
2056 info->magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC) ||
2057 info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) ||
2058 info->magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC));
2059
2060
2061 /*
2062 * Note: we flatten the magic number to a single type so we
2063 * don't have to compare against crc/non-crc types elsewhere.
2064 */
2065 switch (be16_to_cpu(info->magic)) {
2066 case XFS_DA_NODE_MAGIC:
2067 case XFS_DA3_NODE_MAGIC:
2068 blk->magic = XFS_DA_NODE_MAGIC;
2069 xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr,
2070 bp->b_addr);
2071 btree = nodehdr.btree;
2072 blk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval);
2073 if (forward)
2074 blk->index = 0;
2075 else
2076 blk->index = nodehdr.count - 1;
2077 blkno = be32_to_cpu(btree[blk->index].before);
2078 break;
2079 case XFS_ATTR_LEAF_MAGIC:
2080 case XFS_ATTR3_LEAF_MAGIC:
2081 blk->magic = XFS_ATTR_LEAF_MAGIC;
2082 ASSERT(level == path->active-1);
2083 blk->index = 0;
2084 blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
2085 break;
2086 case XFS_DIR2_LEAFN_MAGIC:
2087 case XFS_DIR3_LEAFN_MAGIC:
2088 blk->magic = XFS_DIR2_LEAFN_MAGIC;
2089 ASSERT(level == path->active-1);
2090 blk->index = 0;
2091 blk->hashval = xfs_dir2_leaf_lasthash(args->dp,
2092 blk->bp, NULL);
2093 break;
2094 default:
2095 ASSERT(0);
2096 break;
2097 }
2098 }
2099 *result = 0;
2100 return 0;
2101}
2102
2103
2104/*========================================================================
2105 * Utility routines.
2106 *========================================================================*/
2107
2108/*
2109 * Implement a simple hash on a character string.
2110 * Rotate the hash value by 7 bits, then XOR each character in.
2111 * This is implemented with some source-level loop unrolling.
2112 */
2113xfs_dahash_t
2114xfs_da_hashname(const uint8_t *name, int namelen)
2115{
2116 xfs_dahash_t hash;
2117
2118 /*
2119 * Do four characters at a time as long as we can.
2120 */
2121 for (hash = 0; namelen >= 4; namelen -= 4, name += 4)
2122 hash = (name[0] << 21) ^ (name[1] << 14) ^ (name[2] << 7) ^
2123 (name[3] << 0) ^ rol32(hash, 7 * 4);
2124
2125 /*
2126 * Now do the rest of the characters.
2127 */
2128 switch (namelen) {
2129 case 3:
2130 return (name[0] << 14) ^ (name[1] << 7) ^ (name[2] << 0) ^
2131 rol32(hash, 7 * 3);
2132 case 2:
2133 return (name[0] << 7) ^ (name[1] << 0) ^ rol32(hash, 7 * 2);
2134 case 1:
2135 return (name[0] << 0) ^ rol32(hash, 7 * 1);
2136 default: /* case 0: */
2137 return hash;
2138 }
2139}
2140
2141enum xfs_dacmp
2142xfs_da_compname(
2143 struct xfs_da_args *args,
2144 const unsigned char *name,
2145 int len)
2146{
2147 return (args->namelen == len && memcmp(args->name, name, len) == 0) ?
2148 XFS_CMP_EXACT : XFS_CMP_DIFFERENT;
2149}
2150
2151int
2152xfs_da_grow_inode_int(
2153 struct xfs_da_args *args,
2154 xfs_fileoff_t *bno,
2155 int count)
2156{
2157 struct xfs_trans *tp = args->trans;
2158 struct xfs_inode *dp = args->dp;
2159 int w = args->whichfork;
2160 xfs_rfsblock_t nblks = dp->i_nblocks;
2161 struct xfs_bmbt_irec map, *mapp;
2162 int nmap, error, got, i, mapi;
2163
2164 /*
2165 * Find a spot in the file space to put the new block.
2166 */
2167 error = xfs_bmap_first_unused(tp, dp, count, bno, w);
2168 if (error)
2169 return error;
2170
2171 /*
2172 * Try mapping it in one filesystem block.
2173 */
2174 nmap = 1;
2175 error = xfs_bmapi_write(tp, dp, *bno, count,
2176 xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA|XFS_BMAPI_CONTIG,
2177 args->total, &map, &nmap);
2178 if (error)
2179 return error;
2180
2181 ASSERT(nmap <= 1);
2182 if (nmap == 1) {
2183 mapp = ↦
2184 mapi = 1;
2185 } else if (nmap == 0 && count > 1) {
2186 xfs_fileoff_t b;
2187 int c;
2188
2189 /*
2190 * If we didn't get it and the block might work if fragmented,
2191 * try without the CONTIG flag. Loop until we get it all.
2192 */
2193 mapp = kmem_alloc(sizeof(*mapp) * count, 0);
2194 for (b = *bno, mapi = 0; b < *bno + count; ) {
2195 c = (int)(*bno + count - b);
2196 nmap = min(XFS_BMAP_MAX_NMAP, c);
2197 error = xfs_bmapi_write(tp, dp, b, c,
2198 xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA,
2199 args->total, &mapp[mapi], &nmap);
2200 if (error)
2201 goto out_free_map;
2202 if (nmap < 1)
2203 break;
2204 mapi += nmap;
2205 b = mapp[mapi - 1].br_startoff +
2206 mapp[mapi - 1].br_blockcount;
2207 }
2208 } else {
2209 mapi = 0;
2210 mapp = NULL;
2211 }
2212
2213 /*
2214 * Count the blocks we got, make sure it matches the total.
2215 */
2216 for (i = 0, got = 0; i < mapi; i++)
2217 got += mapp[i].br_blockcount;
2218 if (got != count || mapp[0].br_startoff != *bno ||
2219 mapp[mapi - 1].br_startoff + mapp[mapi - 1].br_blockcount !=
2220 *bno + count) {
2221 error = -ENOSPC;
2222 goto out_free_map;
2223 }
2224
2225 /* account for newly allocated blocks in reserved blocks total */
2226 args->total -= dp->i_nblocks - nblks;
2227
2228out_free_map:
2229 if (mapp != &map)
2230 kmem_free(mapp);
2231 return error;
2232}
2233
2234/*
2235 * Add a block to the btree ahead of the file.
2236 * Return the new block number to the caller.
2237 */
2238int
2239xfs_da_grow_inode(
2240 struct xfs_da_args *args,
2241 xfs_dablk_t *new_blkno)
2242{
2243 xfs_fileoff_t bno;
2244 int error;
2245
2246 trace_xfs_da_grow_inode(args);
2247
2248 bno = args->geo->leafblk;
2249 error = xfs_da_grow_inode_int(args, &bno, args->geo->fsbcount);
2250 if (!error)
2251 *new_blkno = (xfs_dablk_t)bno;
2252 return error;
2253}
2254
2255/*
2256 * Ick. We need to always be able to remove a btree block, even
2257 * if there's no space reservation because the filesystem is full.
2258 * This is called if xfs_bunmapi on a btree block fails due to ENOSPC.
2259 * It swaps the target block with the last block in the file. The
2260 * last block in the file can always be removed since it can't cause
2261 * a bmap btree split to do that.
2262 */
2263STATIC int
2264xfs_da3_swap_lastblock(
2265 struct xfs_da_args *args,
2266 xfs_dablk_t *dead_blknop,
2267 struct xfs_buf **dead_bufp)
2268{
2269 struct xfs_da_blkinfo *dead_info;
2270 struct xfs_da_blkinfo *sib_info;
2271 struct xfs_da_intnode *par_node;
2272 struct xfs_da_intnode *dead_node;
2273 struct xfs_dir2_leaf *dead_leaf2;
2274 struct xfs_da_node_entry *btree;
2275 struct xfs_da3_icnode_hdr par_hdr;
2276 struct xfs_inode *dp;
2277 struct xfs_trans *tp;
2278 struct xfs_mount *mp;
2279 struct xfs_buf *dead_buf;
2280 struct xfs_buf *last_buf;
2281 struct xfs_buf *sib_buf;
2282 struct xfs_buf *par_buf;
2283 xfs_dahash_t dead_hash;
2284 xfs_fileoff_t lastoff;
2285 xfs_dablk_t dead_blkno;
2286 xfs_dablk_t last_blkno;
2287 xfs_dablk_t sib_blkno;
2288 xfs_dablk_t par_blkno;
2289 int error;
2290 int w;
2291 int entno;
2292 int level;
2293 int dead_level;
2294
2295 trace_xfs_da_swap_lastblock(args);
2296
2297 dead_buf = *dead_bufp;
2298 dead_blkno = *dead_blknop;
2299 tp = args->trans;
2300 dp = args->dp;
2301 w = args->whichfork;
2302 ASSERT(w == XFS_DATA_FORK);
2303 mp = dp->i_mount;
2304 lastoff = args->geo->freeblk;
2305 error = xfs_bmap_last_before(tp, dp, &lastoff, w);
2306 if (error)
2307 return error;
2308 if (XFS_IS_CORRUPT(mp, lastoff == 0))
2309 return -EFSCORRUPTED;
2310 /*
2311 * Read the last block in the btree space.
2312 */
2313 last_blkno = (xfs_dablk_t)lastoff - args->geo->fsbcount;
2314 error = xfs_da3_node_read(tp, dp, last_blkno, &last_buf, w);
2315 if (error)
2316 return error;
2317 /*
2318 * Copy the last block into the dead buffer and log it.
2319 */
2320 memcpy(dead_buf->b_addr, last_buf->b_addr, args->geo->blksize);
2321 xfs_trans_log_buf(tp, dead_buf, 0, args->geo->blksize - 1);
2322 dead_info = dead_buf->b_addr;
2323 /*
2324 * Get values from the moved block.
2325 */
2326 if (dead_info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
2327 dead_info->magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
2328 struct xfs_dir3_icleaf_hdr leafhdr;
2329 struct xfs_dir2_leaf_entry *ents;
2330
2331 dead_leaf2 = (xfs_dir2_leaf_t *)dead_info;
2332 xfs_dir2_leaf_hdr_from_disk(dp->i_mount, &leafhdr,
2333 dead_leaf2);
2334 ents = leafhdr.ents;
2335 dead_level = 0;
2336 dead_hash = be32_to_cpu(ents[leafhdr.count - 1].hashval);
2337 } else {
2338 struct xfs_da3_icnode_hdr deadhdr;
2339
2340 dead_node = (xfs_da_intnode_t *)dead_info;
2341 xfs_da3_node_hdr_from_disk(dp->i_mount, &deadhdr, dead_node);
2342 btree = deadhdr.btree;
2343 dead_level = deadhdr.level;
2344 dead_hash = be32_to_cpu(btree[deadhdr.count - 1].hashval);
2345 }
2346 sib_buf = par_buf = NULL;
2347 /*
2348 * If the moved block has a left sibling, fix up the pointers.
2349 */
2350 if ((sib_blkno = be32_to_cpu(dead_info->back))) {
2351 error = xfs_da3_node_read(tp, dp, sib_blkno, &sib_buf, w);
2352 if (error)
2353 goto done;
2354 sib_info = sib_buf->b_addr;
2355 if (XFS_IS_CORRUPT(mp,
2356 be32_to_cpu(sib_info->forw) != last_blkno ||
2357 sib_info->magic != dead_info->magic)) {
2358 error = -EFSCORRUPTED;
2359 goto done;
2360 }
2361 sib_info->forw = cpu_to_be32(dead_blkno);
2362 xfs_trans_log_buf(tp, sib_buf,
2363 XFS_DA_LOGRANGE(sib_info, &sib_info->forw,
2364 sizeof(sib_info->forw)));
2365 sib_buf = NULL;
2366 }
2367 /*
2368 * If the moved block has a right sibling, fix up the pointers.
2369 */
2370 if ((sib_blkno = be32_to_cpu(dead_info->forw))) {
2371 error = xfs_da3_node_read(tp, dp, sib_blkno, &sib_buf, w);
2372 if (error)
2373 goto done;
2374 sib_info = sib_buf->b_addr;
2375 if (XFS_IS_CORRUPT(mp,
2376 be32_to_cpu(sib_info->back) != last_blkno ||
2377 sib_info->magic != dead_info->magic)) {
2378 error = -EFSCORRUPTED;
2379 goto done;
2380 }
2381 sib_info->back = cpu_to_be32(dead_blkno);
2382 xfs_trans_log_buf(tp, sib_buf,
2383 XFS_DA_LOGRANGE(sib_info, &sib_info->back,
2384 sizeof(sib_info->back)));
2385 sib_buf = NULL;
2386 }
2387 par_blkno = args->geo->leafblk;
2388 level = -1;
2389 /*
2390 * Walk down the tree looking for the parent of the moved block.
2391 */
2392 for (;;) {
2393 error = xfs_da3_node_read(tp, dp, par_blkno, &par_buf, w);
2394 if (error)
2395 goto done;
2396 par_node = par_buf->b_addr;
2397 xfs_da3_node_hdr_from_disk(dp->i_mount, &par_hdr, par_node);
2398 if (XFS_IS_CORRUPT(mp,
2399 level >= 0 && level != par_hdr.level + 1)) {
2400 error = -EFSCORRUPTED;
2401 goto done;
2402 }
2403 level = par_hdr.level;
2404 btree = par_hdr.btree;
2405 for (entno = 0;
2406 entno < par_hdr.count &&
2407 be32_to_cpu(btree[entno].hashval) < dead_hash;
2408 entno++)
2409 continue;
2410 if (XFS_IS_CORRUPT(mp, entno == par_hdr.count)) {
2411 error = -EFSCORRUPTED;
2412 goto done;
2413 }
2414 par_blkno = be32_to_cpu(btree[entno].before);
2415 if (level == dead_level + 1)
2416 break;
2417 xfs_trans_brelse(tp, par_buf);
2418 par_buf = NULL;
2419 }
2420 /*
2421 * We're in the right parent block.
2422 * Look for the right entry.
2423 */
2424 for (;;) {
2425 for (;
2426 entno < par_hdr.count &&
2427 be32_to_cpu(btree[entno].before) != last_blkno;
2428 entno++)
2429 continue;
2430 if (entno < par_hdr.count)
2431 break;
2432 par_blkno = par_hdr.forw;
2433 xfs_trans_brelse(tp, par_buf);
2434 par_buf = NULL;
2435 if (XFS_IS_CORRUPT(mp, par_blkno == 0)) {
2436 error = -EFSCORRUPTED;
2437 goto done;
2438 }
2439 error = xfs_da3_node_read(tp, dp, par_blkno, &par_buf, w);
2440 if (error)
2441 goto done;
2442 par_node = par_buf->b_addr;
2443 xfs_da3_node_hdr_from_disk(dp->i_mount, &par_hdr, par_node);
2444 if (XFS_IS_CORRUPT(mp, par_hdr.level != level)) {
2445 error = -EFSCORRUPTED;
2446 goto done;
2447 }
2448 btree = par_hdr.btree;
2449 entno = 0;
2450 }
2451 /*
2452 * Update the parent entry pointing to the moved block.
2453 */
2454 btree[entno].before = cpu_to_be32(dead_blkno);
2455 xfs_trans_log_buf(tp, par_buf,
2456 XFS_DA_LOGRANGE(par_node, &btree[entno].before,
2457 sizeof(btree[entno].before)));
2458 *dead_blknop = last_blkno;
2459 *dead_bufp = last_buf;
2460 return 0;
2461done:
2462 if (par_buf)
2463 xfs_trans_brelse(tp, par_buf);
2464 if (sib_buf)
2465 xfs_trans_brelse(tp, sib_buf);
2466 xfs_trans_brelse(tp, last_buf);
2467 return error;
2468}
2469
2470/*
2471 * Remove a btree block from a directory or attribute.
2472 */
2473int
2474xfs_da_shrink_inode(
2475 struct xfs_da_args *args,
2476 xfs_dablk_t dead_blkno,
2477 struct xfs_buf *dead_buf)
2478{
2479 struct xfs_inode *dp;
2480 int done, error, w, count;
2481 struct xfs_trans *tp;
2482
2483 trace_xfs_da_shrink_inode(args);
2484
2485 dp = args->dp;
2486 w = args->whichfork;
2487 tp = args->trans;
2488 count = args->geo->fsbcount;
2489 for (;;) {
2490 /*
2491 * Remove extents. If we get ENOSPC for a dir we have to move
2492 * the last block to the place we want to kill.
2493 */
2494 error = xfs_bunmapi(tp, dp, dead_blkno, count,
2495 xfs_bmapi_aflag(w), 0, &done);
2496 if (error == -ENOSPC) {
2497 if (w != XFS_DATA_FORK)
2498 break;
2499 error = xfs_da3_swap_lastblock(args, &dead_blkno,
2500 &dead_buf);
2501 if (error)
2502 break;
2503 } else {
2504 break;
2505 }
2506 }
2507 xfs_trans_binval(tp, dead_buf);
2508 return error;
2509}
2510
2511static int
2512xfs_dabuf_map(
2513 struct xfs_inode *dp,
2514 xfs_dablk_t bno,
2515 unsigned int flags,
2516 int whichfork,
2517 struct xfs_buf_map **mapp,
2518 int *nmaps)
2519{
2520 struct xfs_mount *mp = dp->i_mount;
2521 int nfsb = xfs_dabuf_nfsb(mp, whichfork);
2522 struct xfs_bmbt_irec irec, *irecs = &irec;
2523 struct xfs_buf_map *map = *mapp;
2524 xfs_fileoff_t off = bno;
2525 int error = 0, nirecs, i;
2526
2527 if (nfsb > 1)
2528 irecs = kmem_zalloc(sizeof(irec) * nfsb, KM_NOFS);
2529
2530 nirecs = nfsb;
2531 error = xfs_bmapi_read(dp, bno, nfsb, irecs, &nirecs,
2532 xfs_bmapi_aflag(whichfork));
2533 if (error)
2534 goto out_free_irecs;
2535
2536 /*
2537 * Use the caller provided map for the single map case, else allocate a
2538 * larger one that needs to be free by the caller.
2539 */
2540 if (nirecs > 1) {
2541 map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map), KM_NOFS);
2542 if (!map) {
2543 error = -ENOMEM;
2544 goto out_free_irecs;
2545 }
2546 *mapp = map;
2547 }
2548
2549 for (i = 0; i < nirecs; i++) {
2550 if (irecs[i].br_startblock == HOLESTARTBLOCK ||
2551 irecs[i].br_startblock == DELAYSTARTBLOCK)
2552 goto invalid_mapping;
2553 if (off != irecs[i].br_startoff)
2554 goto invalid_mapping;
2555
2556 map[i].bm_bn = XFS_FSB_TO_DADDR(mp, irecs[i].br_startblock);
2557 map[i].bm_len = XFS_FSB_TO_BB(mp, irecs[i].br_blockcount);
2558 off += irecs[i].br_blockcount;
2559 }
2560
2561 if (off != bno + nfsb)
2562 goto invalid_mapping;
2563
2564 *nmaps = nirecs;
2565out_free_irecs:
2566 if (irecs != &irec)
2567 kmem_free(irecs);
2568 return error;
2569
2570invalid_mapping:
2571 /* Caller ok with no mapping. */
2572 if (XFS_IS_CORRUPT(mp, !(flags & XFS_DABUF_MAP_HOLE_OK))) {
2573 error = -EFSCORRUPTED;
2574 if (xfs_error_level >= XFS_ERRLEVEL_LOW) {
2575 xfs_alert(mp, "%s: bno %u inode %llu",
2576 __func__, bno, dp->i_ino);
2577
2578 for (i = 0; i < nirecs; i++) {
2579 xfs_alert(mp,
2580"[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d",
2581 i, irecs[i].br_startoff,
2582 irecs[i].br_startblock,
2583 irecs[i].br_blockcount,
2584 irecs[i].br_state);
2585 }
2586 }
2587 } else {
2588 *nmaps = 0;
2589 }
2590 goto out_free_irecs;
2591}
2592
2593/*
2594 * Get a buffer for the dir/attr block.
2595 */
2596int
2597xfs_da_get_buf(
2598 struct xfs_trans *tp,
2599 struct xfs_inode *dp,
2600 xfs_dablk_t bno,
2601 struct xfs_buf **bpp,
2602 int whichfork)
2603{
2604 struct xfs_mount *mp = dp->i_mount;
2605 struct xfs_buf *bp;
2606 struct xfs_buf_map map, *mapp = ↦
2607 int nmap = 1;
2608 int error;
2609
2610 *bpp = NULL;
2611 error = xfs_dabuf_map(dp, bno, 0, whichfork, &mapp, &nmap);
2612 if (error || nmap == 0)
2613 goto out_free;
2614
2615 error = xfs_trans_get_buf_map(tp, mp->m_ddev_targp, mapp, nmap, 0, &bp);
2616 if (error)
2617 goto out_free;
2618
2619 *bpp = bp;
2620
2621out_free:
2622 if (mapp != &map)
2623 kmem_free(mapp);
2624
2625 return error;
2626}
2627
2628/*
2629 * Get a buffer for the dir/attr block, fill in the contents.
2630 */
2631int
2632xfs_da_read_buf(
2633 struct xfs_trans *tp,
2634 struct xfs_inode *dp,
2635 xfs_dablk_t bno,
2636 unsigned int flags,
2637 struct xfs_buf **bpp,
2638 int whichfork,
2639 const struct xfs_buf_ops *ops)
2640{
2641 struct xfs_mount *mp = dp->i_mount;
2642 struct xfs_buf *bp;
2643 struct xfs_buf_map map, *mapp = ↦
2644 int nmap = 1;
2645 int error;
2646
2647 *bpp = NULL;
2648 error = xfs_dabuf_map(dp, bno, flags, whichfork, &mapp, &nmap);
2649 if (error || !nmap)
2650 goto out_free;
2651
2652 error = xfs_trans_read_buf_map(mp, tp, mp->m_ddev_targp, mapp, nmap, 0,
2653 &bp, ops);
2654 if (error)
2655 goto out_free;
2656
2657 if (whichfork == XFS_ATTR_FORK)
2658 xfs_buf_set_ref(bp, XFS_ATTR_BTREE_REF);
2659 else
2660 xfs_buf_set_ref(bp, XFS_DIR_BTREE_REF);
2661 *bpp = bp;
2662out_free:
2663 if (mapp != &map)
2664 kmem_free(mapp);
2665
2666 return error;
2667}
2668
2669/*
2670 * Readahead the dir/attr block.
2671 */
2672int
2673xfs_da_reada_buf(
2674 struct xfs_inode *dp,
2675 xfs_dablk_t bno,
2676 unsigned int flags,
2677 int whichfork,
2678 const struct xfs_buf_ops *ops)
2679{
2680 struct xfs_buf_map map;
2681 struct xfs_buf_map *mapp;
2682 int nmap;
2683 int error;
2684
2685 mapp = ↦
2686 nmap = 1;
2687 error = xfs_dabuf_map(dp, bno, flags, whichfork, &mapp, &nmap);
2688 if (error || !nmap)
2689 goto out_free;
2690
2691 xfs_buf_readahead_map(dp->i_mount->m_ddev_targp, mapp, nmap, ops);
2692
2693out_free:
2694 if (mapp != &map)
2695 kmem_free(mapp);
2696
2697 return error;
2698}