Loading...
1/*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_types.h"
21#include "xfs_bit.h"
22#include "xfs_log.h"
23#include "xfs_inum.h"
24#include "xfs_trans.h"
25#include "xfs_sb.h"
26#include "xfs_ag.h"
27#include "xfs_mount.h"
28#include "xfs_da_btree.h"
29#include "xfs_bmap_btree.h"
30#include "xfs_dir2.h"
31#include "xfs_dir2_format.h"
32#include "xfs_dir2_priv.h"
33#include "xfs_dinode.h"
34#include "xfs_inode.h"
35#include "xfs_inode_item.h"
36#include "xfs_alloc.h"
37#include "xfs_bmap.h"
38#include "xfs_attr.h"
39#include "xfs_attr_leaf.h"
40#include "xfs_error.h"
41#include "xfs_trace.h"
42
43/*
44 * xfs_da_btree.c
45 *
46 * Routines to implement directories as Btrees of hashed names.
47 */
48
49/*========================================================================
50 * Function prototypes for the kernel.
51 *========================================================================*/
52
53/*
54 * Routines used for growing the Btree.
55 */
56STATIC int xfs_da_root_split(xfs_da_state_t *state,
57 xfs_da_state_blk_t *existing_root,
58 xfs_da_state_blk_t *new_child);
59STATIC int xfs_da_node_split(xfs_da_state_t *state,
60 xfs_da_state_blk_t *existing_blk,
61 xfs_da_state_blk_t *split_blk,
62 xfs_da_state_blk_t *blk_to_add,
63 int treelevel,
64 int *result);
65STATIC void xfs_da_node_rebalance(xfs_da_state_t *state,
66 xfs_da_state_blk_t *node_blk_1,
67 xfs_da_state_blk_t *node_blk_2);
68STATIC void xfs_da_node_add(xfs_da_state_t *state,
69 xfs_da_state_blk_t *old_node_blk,
70 xfs_da_state_blk_t *new_node_blk);
71
72/*
73 * Routines used for shrinking the Btree.
74 */
75STATIC int xfs_da_root_join(xfs_da_state_t *state,
76 xfs_da_state_blk_t *root_blk);
77STATIC int xfs_da_node_toosmall(xfs_da_state_t *state, int *retval);
78STATIC void xfs_da_node_remove(xfs_da_state_t *state,
79 xfs_da_state_blk_t *drop_blk);
80STATIC void xfs_da_node_unbalance(xfs_da_state_t *state,
81 xfs_da_state_blk_t *src_node_blk,
82 xfs_da_state_blk_t *dst_node_blk);
83
84/*
85 * Utility routines.
86 */
87STATIC uint xfs_da_node_lasthash(xfs_dabuf_t *bp, int *count);
88STATIC int xfs_da_node_order(xfs_dabuf_t *node1_bp, xfs_dabuf_t *node2_bp);
89STATIC xfs_dabuf_t *xfs_da_buf_make(int nbuf, xfs_buf_t **bps);
90STATIC int xfs_da_blk_unlink(xfs_da_state_t *state,
91 xfs_da_state_blk_t *drop_blk,
92 xfs_da_state_blk_t *save_blk);
93STATIC void xfs_da_state_kill_altpath(xfs_da_state_t *state);
94
95/*========================================================================
96 * Routines used for growing the Btree.
97 *========================================================================*/
98
99/*
100 * Create the initial contents of an intermediate node.
101 */
102int
103xfs_da_node_create(xfs_da_args_t *args, xfs_dablk_t blkno, int level,
104 xfs_dabuf_t **bpp, int whichfork)
105{
106 xfs_da_intnode_t *node;
107 xfs_dabuf_t *bp;
108 int error;
109 xfs_trans_t *tp;
110
111 tp = args->trans;
112 error = xfs_da_get_buf(tp, args->dp, blkno, -1, &bp, whichfork);
113 if (error)
114 return(error);
115 ASSERT(bp != NULL);
116 node = bp->data;
117 node->hdr.info.forw = 0;
118 node->hdr.info.back = 0;
119 node->hdr.info.magic = cpu_to_be16(XFS_DA_NODE_MAGIC);
120 node->hdr.info.pad = 0;
121 node->hdr.count = 0;
122 node->hdr.level = cpu_to_be16(level);
123
124 xfs_da_log_buf(tp, bp,
125 XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
126
127 *bpp = bp;
128 return(0);
129}
130
131/*
132 * Split a leaf node, rebalance, then possibly split
133 * intermediate nodes, rebalance, etc.
134 */
135int /* error */
136xfs_da_split(xfs_da_state_t *state)
137{
138 xfs_da_state_blk_t *oldblk, *newblk, *addblk;
139 xfs_da_intnode_t *node;
140 xfs_dabuf_t *bp;
141 int max, action, error, i;
142
143 /*
144 * Walk back up the tree splitting/inserting/adjusting as necessary.
145 * If we need to insert and there isn't room, split the node, then
146 * decide which fragment to insert the new block from below into.
147 * Note that we may split the root this way, but we need more fixup.
148 */
149 max = state->path.active - 1;
150 ASSERT((max >= 0) && (max < XFS_DA_NODE_MAXDEPTH));
151 ASSERT(state->path.blk[max].magic == XFS_ATTR_LEAF_MAGIC ||
152 state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC);
153
154 addblk = &state->path.blk[max]; /* initial dummy value */
155 for (i = max; (i >= 0) && addblk; state->path.active--, i--) {
156 oldblk = &state->path.blk[i];
157 newblk = &state->altpath.blk[i];
158
159 /*
160 * If a leaf node then
161 * Allocate a new leaf node, then rebalance across them.
162 * else if an intermediate node then
163 * We split on the last layer, must we split the node?
164 */
165 switch (oldblk->magic) {
166 case XFS_ATTR_LEAF_MAGIC:
167 error = xfs_attr_leaf_split(state, oldblk, newblk);
168 if ((error != 0) && (error != ENOSPC)) {
169 return(error); /* GROT: attr is inconsistent */
170 }
171 if (!error) {
172 addblk = newblk;
173 break;
174 }
175 /*
176 * Entry wouldn't fit, split the leaf again.
177 */
178 state->extravalid = 1;
179 if (state->inleaf) {
180 state->extraafter = 0; /* before newblk */
181 error = xfs_attr_leaf_split(state, oldblk,
182 &state->extrablk);
183 } else {
184 state->extraafter = 1; /* after newblk */
185 error = xfs_attr_leaf_split(state, newblk,
186 &state->extrablk);
187 }
188 if (error)
189 return(error); /* GROT: attr inconsistent */
190 addblk = newblk;
191 break;
192 case XFS_DIR2_LEAFN_MAGIC:
193 error = xfs_dir2_leafn_split(state, oldblk, newblk);
194 if (error)
195 return error;
196 addblk = newblk;
197 break;
198 case XFS_DA_NODE_MAGIC:
199 error = xfs_da_node_split(state, oldblk, newblk, addblk,
200 max - i, &action);
201 xfs_da_buf_done(addblk->bp);
202 addblk->bp = NULL;
203 if (error)
204 return(error); /* GROT: dir is inconsistent */
205 /*
206 * Record the newly split block for the next time thru?
207 */
208 if (action)
209 addblk = newblk;
210 else
211 addblk = NULL;
212 break;
213 }
214
215 /*
216 * Update the btree to show the new hashval for this child.
217 */
218 xfs_da_fixhashpath(state, &state->path);
219 /*
220 * If we won't need this block again, it's getting dropped
221 * from the active path by the loop control, so we need
222 * to mark it done now.
223 */
224 if (i > 0 || !addblk)
225 xfs_da_buf_done(oldblk->bp);
226 }
227 if (!addblk)
228 return(0);
229
230 /*
231 * Split the root node.
232 */
233 ASSERT(state->path.active == 0);
234 oldblk = &state->path.blk[0];
235 error = xfs_da_root_split(state, oldblk, addblk);
236 if (error) {
237 xfs_da_buf_done(oldblk->bp);
238 xfs_da_buf_done(addblk->bp);
239 addblk->bp = NULL;
240 return(error); /* GROT: dir is inconsistent */
241 }
242
243 /*
244 * Update pointers to the node which used to be block 0 and
245 * just got bumped because of the addition of a new root node.
246 * There might be three blocks involved if a double split occurred,
247 * and the original block 0 could be at any position in the list.
248 */
249
250 node = oldblk->bp->data;
251 if (node->hdr.info.forw) {
252 if (be32_to_cpu(node->hdr.info.forw) == addblk->blkno) {
253 bp = addblk->bp;
254 } else {
255 ASSERT(state->extravalid);
256 bp = state->extrablk.bp;
257 }
258 node = bp->data;
259 node->hdr.info.back = cpu_to_be32(oldblk->blkno);
260 xfs_da_log_buf(state->args->trans, bp,
261 XFS_DA_LOGRANGE(node, &node->hdr.info,
262 sizeof(node->hdr.info)));
263 }
264 node = oldblk->bp->data;
265 if (node->hdr.info.back) {
266 if (be32_to_cpu(node->hdr.info.back) == addblk->blkno) {
267 bp = addblk->bp;
268 } else {
269 ASSERT(state->extravalid);
270 bp = state->extrablk.bp;
271 }
272 node = bp->data;
273 node->hdr.info.forw = cpu_to_be32(oldblk->blkno);
274 xfs_da_log_buf(state->args->trans, bp,
275 XFS_DA_LOGRANGE(node, &node->hdr.info,
276 sizeof(node->hdr.info)));
277 }
278 xfs_da_buf_done(oldblk->bp);
279 xfs_da_buf_done(addblk->bp);
280 addblk->bp = NULL;
281 return(0);
282}
283
284/*
285 * Split the root. We have to create a new root and point to the two
286 * parts (the split old root) that we just created. Copy block zero to
287 * the EOF, extending the inode in process.
288 */
289STATIC int /* error */
290xfs_da_root_split(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
291 xfs_da_state_blk_t *blk2)
292{
293 xfs_da_intnode_t *node, *oldroot;
294 xfs_da_args_t *args;
295 xfs_dablk_t blkno;
296 xfs_dabuf_t *bp;
297 int error, size;
298 xfs_inode_t *dp;
299 xfs_trans_t *tp;
300 xfs_mount_t *mp;
301 xfs_dir2_leaf_t *leaf;
302
303 /*
304 * Copy the existing (incorrect) block from the root node position
305 * to a free space somewhere.
306 */
307 args = state->args;
308 ASSERT(args != NULL);
309 error = xfs_da_grow_inode(args, &blkno);
310 if (error)
311 return(error);
312 dp = args->dp;
313 tp = args->trans;
314 mp = state->mp;
315 error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, args->whichfork);
316 if (error)
317 return(error);
318 ASSERT(bp != NULL);
319 node = bp->data;
320 oldroot = blk1->bp->data;
321 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)) {
322 size = (int)((char *)&oldroot->btree[be16_to_cpu(oldroot->hdr.count)] -
323 (char *)oldroot);
324 } else {
325 ASSERT(oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
326 leaf = (xfs_dir2_leaf_t *)oldroot;
327 size = (int)((char *)&leaf->ents[be16_to_cpu(leaf->hdr.count)] -
328 (char *)leaf);
329 }
330 memcpy(node, oldroot, size);
331 xfs_da_log_buf(tp, bp, 0, size - 1);
332 xfs_da_buf_done(blk1->bp);
333 blk1->bp = bp;
334 blk1->blkno = blkno;
335
336 /*
337 * Set up the new root node.
338 */
339 error = xfs_da_node_create(args,
340 (args->whichfork == XFS_DATA_FORK) ? mp->m_dirleafblk : 0,
341 be16_to_cpu(node->hdr.level) + 1, &bp, args->whichfork);
342 if (error)
343 return(error);
344 node = bp->data;
345 node->btree[0].hashval = cpu_to_be32(blk1->hashval);
346 node->btree[0].before = cpu_to_be32(blk1->blkno);
347 node->btree[1].hashval = cpu_to_be32(blk2->hashval);
348 node->btree[1].before = cpu_to_be32(blk2->blkno);
349 node->hdr.count = cpu_to_be16(2);
350
351#ifdef DEBUG
352 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)) {
353 ASSERT(blk1->blkno >= mp->m_dirleafblk &&
354 blk1->blkno < mp->m_dirfreeblk);
355 ASSERT(blk2->blkno >= mp->m_dirleafblk &&
356 blk2->blkno < mp->m_dirfreeblk);
357 }
358#endif
359
360 /* Header is already logged by xfs_da_node_create */
361 xfs_da_log_buf(tp, bp,
362 XFS_DA_LOGRANGE(node, node->btree,
363 sizeof(xfs_da_node_entry_t) * 2));
364 xfs_da_buf_done(bp);
365
366 return(0);
367}
368
369/*
370 * Split the node, rebalance, then add the new entry.
371 */
372STATIC int /* error */
373xfs_da_node_split(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
374 xfs_da_state_blk_t *newblk,
375 xfs_da_state_blk_t *addblk,
376 int treelevel, int *result)
377{
378 xfs_da_intnode_t *node;
379 xfs_dablk_t blkno;
380 int newcount, error;
381 int useextra;
382
383 node = oldblk->bp->data;
384 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
385
386 /*
387 * With V2 dirs the extra block is data or freespace.
388 */
389 useextra = state->extravalid && state->args->whichfork == XFS_ATTR_FORK;
390 newcount = 1 + useextra;
391 /*
392 * Do we have to split the node?
393 */
394 if ((be16_to_cpu(node->hdr.count) + newcount) > state->node_ents) {
395 /*
396 * Allocate a new node, add to the doubly linked chain of
397 * nodes, then move some of our excess entries into it.
398 */
399 error = xfs_da_grow_inode(state->args, &blkno);
400 if (error)
401 return(error); /* GROT: dir is inconsistent */
402
403 error = xfs_da_node_create(state->args, blkno, treelevel,
404 &newblk->bp, state->args->whichfork);
405 if (error)
406 return(error); /* GROT: dir is inconsistent */
407 newblk->blkno = blkno;
408 newblk->magic = XFS_DA_NODE_MAGIC;
409 xfs_da_node_rebalance(state, oldblk, newblk);
410 error = xfs_da_blk_link(state, oldblk, newblk);
411 if (error)
412 return(error);
413 *result = 1;
414 } else {
415 *result = 0;
416 }
417
418 /*
419 * Insert the new entry(s) into the correct block
420 * (updating last hashval in the process).
421 *
422 * xfs_da_node_add() inserts BEFORE the given index,
423 * and as a result of using node_lookup_int() we always
424 * point to a valid entry (not after one), but a split
425 * operation always results in a new block whose hashvals
426 * FOLLOW the current block.
427 *
428 * If we had double-split op below us, then add the extra block too.
429 */
430 node = oldblk->bp->data;
431 if (oldblk->index <= be16_to_cpu(node->hdr.count)) {
432 oldblk->index++;
433 xfs_da_node_add(state, oldblk, addblk);
434 if (useextra) {
435 if (state->extraafter)
436 oldblk->index++;
437 xfs_da_node_add(state, oldblk, &state->extrablk);
438 state->extravalid = 0;
439 }
440 } else {
441 newblk->index++;
442 xfs_da_node_add(state, newblk, addblk);
443 if (useextra) {
444 if (state->extraafter)
445 newblk->index++;
446 xfs_da_node_add(state, newblk, &state->extrablk);
447 state->extravalid = 0;
448 }
449 }
450
451 return(0);
452}
453
454/*
455 * Balance the btree elements between two intermediate nodes,
456 * usually one full and one empty.
457 *
458 * NOTE: if blk2 is empty, then it will get the upper half of blk1.
459 */
460STATIC void
461xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
462 xfs_da_state_blk_t *blk2)
463{
464 xfs_da_intnode_t *node1, *node2, *tmpnode;
465 xfs_da_node_entry_t *btree_s, *btree_d;
466 int count, tmp;
467 xfs_trans_t *tp;
468
469 node1 = blk1->bp->data;
470 node2 = blk2->bp->data;
471 /*
472 * Figure out how many entries need to move, and in which direction.
473 * Swap the nodes around if that makes it simpler.
474 */
475 if ((be16_to_cpu(node1->hdr.count) > 0) && (be16_to_cpu(node2->hdr.count) > 0) &&
476 ((be32_to_cpu(node2->btree[0].hashval) < be32_to_cpu(node1->btree[0].hashval)) ||
477 (be32_to_cpu(node2->btree[be16_to_cpu(node2->hdr.count)-1].hashval) <
478 be32_to_cpu(node1->btree[be16_to_cpu(node1->hdr.count)-1].hashval)))) {
479 tmpnode = node1;
480 node1 = node2;
481 node2 = tmpnode;
482 }
483 ASSERT(node1->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
484 ASSERT(node2->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
485 count = (be16_to_cpu(node1->hdr.count) - be16_to_cpu(node2->hdr.count)) / 2;
486 if (count == 0)
487 return;
488 tp = state->args->trans;
489 /*
490 * Two cases: high-to-low and low-to-high.
491 */
492 if (count > 0) {
493 /*
494 * Move elements in node2 up to make a hole.
495 */
496 if ((tmp = be16_to_cpu(node2->hdr.count)) > 0) {
497 tmp *= (uint)sizeof(xfs_da_node_entry_t);
498 btree_s = &node2->btree[0];
499 btree_d = &node2->btree[count];
500 memmove(btree_d, btree_s, tmp);
501 }
502
503 /*
504 * Move the req'd B-tree elements from high in node1 to
505 * low in node2.
506 */
507 be16_add_cpu(&node2->hdr.count, count);
508 tmp = count * (uint)sizeof(xfs_da_node_entry_t);
509 btree_s = &node1->btree[be16_to_cpu(node1->hdr.count) - count];
510 btree_d = &node2->btree[0];
511 memcpy(btree_d, btree_s, tmp);
512 be16_add_cpu(&node1->hdr.count, -count);
513 } else {
514 /*
515 * Move the req'd B-tree elements from low in node2 to
516 * high in node1.
517 */
518 count = -count;
519 tmp = count * (uint)sizeof(xfs_da_node_entry_t);
520 btree_s = &node2->btree[0];
521 btree_d = &node1->btree[be16_to_cpu(node1->hdr.count)];
522 memcpy(btree_d, btree_s, tmp);
523 be16_add_cpu(&node1->hdr.count, count);
524 xfs_da_log_buf(tp, blk1->bp,
525 XFS_DA_LOGRANGE(node1, btree_d, tmp));
526
527 /*
528 * Move elements in node2 down to fill the hole.
529 */
530 tmp = be16_to_cpu(node2->hdr.count) - count;
531 tmp *= (uint)sizeof(xfs_da_node_entry_t);
532 btree_s = &node2->btree[count];
533 btree_d = &node2->btree[0];
534 memmove(btree_d, btree_s, tmp);
535 be16_add_cpu(&node2->hdr.count, -count);
536 }
537
538 /*
539 * Log header of node 1 and all current bits of node 2.
540 */
541 xfs_da_log_buf(tp, blk1->bp,
542 XFS_DA_LOGRANGE(node1, &node1->hdr, sizeof(node1->hdr)));
543 xfs_da_log_buf(tp, blk2->bp,
544 XFS_DA_LOGRANGE(node2, &node2->hdr,
545 sizeof(node2->hdr) +
546 sizeof(node2->btree[0]) * be16_to_cpu(node2->hdr.count)));
547
548 /*
549 * Record the last hashval from each block for upward propagation.
550 * (note: don't use the swapped node pointers)
551 */
552 node1 = blk1->bp->data;
553 node2 = blk2->bp->data;
554 blk1->hashval = be32_to_cpu(node1->btree[be16_to_cpu(node1->hdr.count)-1].hashval);
555 blk2->hashval = be32_to_cpu(node2->btree[be16_to_cpu(node2->hdr.count)-1].hashval);
556
557 /*
558 * Adjust the expected index for insertion.
559 */
560 if (blk1->index >= be16_to_cpu(node1->hdr.count)) {
561 blk2->index = blk1->index - be16_to_cpu(node1->hdr.count);
562 blk1->index = be16_to_cpu(node1->hdr.count) + 1; /* make it invalid */
563 }
564}
565
566/*
567 * Add a new entry to an intermediate node.
568 */
569STATIC void
570xfs_da_node_add(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
571 xfs_da_state_blk_t *newblk)
572{
573 xfs_da_intnode_t *node;
574 xfs_da_node_entry_t *btree;
575 int tmp;
576
577 node = oldblk->bp->data;
578 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
579 ASSERT((oldblk->index >= 0) && (oldblk->index <= be16_to_cpu(node->hdr.count)));
580 ASSERT(newblk->blkno != 0);
581 if (state->args->whichfork == XFS_DATA_FORK)
582 ASSERT(newblk->blkno >= state->mp->m_dirleafblk &&
583 newblk->blkno < state->mp->m_dirfreeblk);
584
585 /*
586 * We may need to make some room before we insert the new node.
587 */
588 tmp = 0;
589 btree = &node->btree[ oldblk->index ];
590 if (oldblk->index < be16_to_cpu(node->hdr.count)) {
591 tmp = (be16_to_cpu(node->hdr.count) - oldblk->index) * (uint)sizeof(*btree);
592 memmove(btree + 1, btree, tmp);
593 }
594 btree->hashval = cpu_to_be32(newblk->hashval);
595 btree->before = cpu_to_be32(newblk->blkno);
596 xfs_da_log_buf(state->args->trans, oldblk->bp,
597 XFS_DA_LOGRANGE(node, btree, tmp + sizeof(*btree)));
598 be16_add_cpu(&node->hdr.count, 1);
599 xfs_da_log_buf(state->args->trans, oldblk->bp,
600 XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
601
602 /*
603 * Copy the last hash value from the oldblk to propagate upwards.
604 */
605 oldblk->hashval = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1 ].hashval);
606}
607
608/*========================================================================
609 * Routines used for shrinking the Btree.
610 *========================================================================*/
611
612/*
613 * Deallocate an empty leaf node, remove it from its parent,
614 * possibly deallocating that block, etc...
615 */
616int
617xfs_da_join(xfs_da_state_t *state)
618{
619 xfs_da_state_blk_t *drop_blk, *save_blk;
620 int action, error;
621
622 action = 0;
623 drop_blk = &state->path.blk[ state->path.active-1 ];
624 save_blk = &state->altpath.blk[ state->path.active-1 ];
625 ASSERT(state->path.blk[0].magic == XFS_DA_NODE_MAGIC);
626 ASSERT(drop_blk->magic == XFS_ATTR_LEAF_MAGIC ||
627 drop_blk->magic == XFS_DIR2_LEAFN_MAGIC);
628
629 /*
630 * Walk back up the tree joining/deallocating as necessary.
631 * When we stop dropping blocks, break out.
632 */
633 for ( ; state->path.active >= 2; drop_blk--, save_blk--,
634 state->path.active--) {
635 /*
636 * See if we can combine the block with a neighbor.
637 * (action == 0) => no options, just leave
638 * (action == 1) => coalesce, then unlink
639 * (action == 2) => block empty, unlink it
640 */
641 switch (drop_blk->magic) {
642 case XFS_ATTR_LEAF_MAGIC:
643 error = xfs_attr_leaf_toosmall(state, &action);
644 if (error)
645 return(error);
646 if (action == 0)
647 return(0);
648 xfs_attr_leaf_unbalance(state, drop_blk, save_blk);
649 break;
650 case XFS_DIR2_LEAFN_MAGIC:
651 error = xfs_dir2_leafn_toosmall(state, &action);
652 if (error)
653 return error;
654 if (action == 0)
655 return 0;
656 xfs_dir2_leafn_unbalance(state, drop_blk, save_blk);
657 break;
658 case XFS_DA_NODE_MAGIC:
659 /*
660 * Remove the offending node, fixup hashvals,
661 * check for a toosmall neighbor.
662 */
663 xfs_da_node_remove(state, drop_blk);
664 xfs_da_fixhashpath(state, &state->path);
665 error = xfs_da_node_toosmall(state, &action);
666 if (error)
667 return(error);
668 if (action == 0)
669 return 0;
670 xfs_da_node_unbalance(state, drop_blk, save_blk);
671 break;
672 }
673 xfs_da_fixhashpath(state, &state->altpath);
674 error = xfs_da_blk_unlink(state, drop_blk, save_blk);
675 xfs_da_state_kill_altpath(state);
676 if (error)
677 return(error);
678 error = xfs_da_shrink_inode(state->args, drop_blk->blkno,
679 drop_blk->bp);
680 drop_blk->bp = NULL;
681 if (error)
682 return(error);
683 }
684 /*
685 * We joined all the way to the top. If it turns out that
686 * we only have one entry in the root, make the child block
687 * the new root.
688 */
689 xfs_da_node_remove(state, drop_blk);
690 xfs_da_fixhashpath(state, &state->path);
691 error = xfs_da_root_join(state, &state->path.blk[0]);
692 return(error);
693}
694
695#ifdef DEBUG
696static void
697xfs_da_blkinfo_onlychild_validate(struct xfs_da_blkinfo *blkinfo, __u16 level)
698{
699 __be16 magic = blkinfo->magic;
700
701 if (level == 1) {
702 ASSERT(magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
703 magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
704 } else
705 ASSERT(magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
706 ASSERT(!blkinfo->forw);
707 ASSERT(!blkinfo->back);
708}
709#else /* !DEBUG */
710#define xfs_da_blkinfo_onlychild_validate(blkinfo, level)
711#endif /* !DEBUG */
712
713/*
714 * We have only one entry in the root. Copy the only remaining child of
715 * the old root to block 0 as the new root node.
716 */
717STATIC int
718xfs_da_root_join(xfs_da_state_t *state, xfs_da_state_blk_t *root_blk)
719{
720 xfs_da_intnode_t *oldroot;
721 xfs_da_args_t *args;
722 xfs_dablk_t child;
723 xfs_dabuf_t *bp;
724 int error;
725
726 args = state->args;
727 ASSERT(args != NULL);
728 ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC);
729 oldroot = root_blk->bp->data;
730 ASSERT(oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
731 ASSERT(!oldroot->hdr.info.forw);
732 ASSERT(!oldroot->hdr.info.back);
733
734 /*
735 * If the root has more than one child, then don't do anything.
736 */
737 if (be16_to_cpu(oldroot->hdr.count) > 1)
738 return(0);
739
740 /*
741 * Read in the (only) child block, then copy those bytes into
742 * the root block's buffer and free the original child block.
743 */
744 child = be32_to_cpu(oldroot->btree[0].before);
745 ASSERT(child != 0);
746 error = xfs_da_read_buf(args->trans, args->dp, child, -1, &bp,
747 args->whichfork);
748 if (error)
749 return(error);
750 ASSERT(bp != NULL);
751 xfs_da_blkinfo_onlychild_validate(bp->data,
752 be16_to_cpu(oldroot->hdr.level));
753
754 memcpy(root_blk->bp->data, bp->data, state->blocksize);
755 xfs_da_log_buf(args->trans, root_blk->bp, 0, state->blocksize - 1);
756 error = xfs_da_shrink_inode(args, child, bp);
757 return(error);
758}
759
760/*
761 * Check a node block and its neighbors to see if the block should be
762 * collapsed into one or the other neighbor. Always keep the block
763 * with the smaller block number.
764 * If the current block is over 50% full, don't try to join it, return 0.
765 * If the block is empty, fill in the state structure and return 2.
766 * If it can be collapsed, fill in the state structure and return 1.
767 * If nothing can be done, return 0.
768 */
769STATIC int
770xfs_da_node_toosmall(xfs_da_state_t *state, int *action)
771{
772 xfs_da_intnode_t *node;
773 xfs_da_state_blk_t *blk;
774 xfs_da_blkinfo_t *info;
775 int count, forward, error, retval, i;
776 xfs_dablk_t blkno;
777 xfs_dabuf_t *bp;
778
779 /*
780 * Check for the degenerate case of the block being over 50% full.
781 * If so, it's not worth even looking to see if we might be able
782 * to coalesce with a sibling.
783 */
784 blk = &state->path.blk[ state->path.active-1 ];
785 info = blk->bp->data;
786 ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
787 node = (xfs_da_intnode_t *)info;
788 count = be16_to_cpu(node->hdr.count);
789 if (count > (state->node_ents >> 1)) {
790 *action = 0; /* blk over 50%, don't try to join */
791 return(0); /* blk over 50%, don't try to join */
792 }
793
794 /*
795 * Check for the degenerate case of the block being empty.
796 * If the block is empty, we'll simply delete it, no need to
797 * coalesce it with a sibling block. We choose (arbitrarily)
798 * to merge with the forward block unless it is NULL.
799 */
800 if (count == 0) {
801 /*
802 * Make altpath point to the block we want to keep and
803 * path point to the block we want to drop (this one).
804 */
805 forward = (info->forw != 0);
806 memcpy(&state->altpath, &state->path, sizeof(state->path));
807 error = xfs_da_path_shift(state, &state->altpath, forward,
808 0, &retval);
809 if (error)
810 return(error);
811 if (retval) {
812 *action = 0;
813 } else {
814 *action = 2;
815 }
816 return(0);
817 }
818
819 /*
820 * Examine each sibling block to see if we can coalesce with
821 * at least 25% free space to spare. We need to figure out
822 * whether to merge with the forward or the backward block.
823 * We prefer coalescing with the lower numbered sibling so as
824 * to shrink a directory over time.
825 */
826 /* start with smaller blk num */
827 forward = (be32_to_cpu(info->forw) < be32_to_cpu(info->back));
828 for (i = 0; i < 2; forward = !forward, i++) {
829 if (forward)
830 blkno = be32_to_cpu(info->forw);
831 else
832 blkno = be32_to_cpu(info->back);
833 if (blkno == 0)
834 continue;
835 error = xfs_da_read_buf(state->args->trans, state->args->dp,
836 blkno, -1, &bp, state->args->whichfork);
837 if (error)
838 return(error);
839 ASSERT(bp != NULL);
840
841 node = (xfs_da_intnode_t *)info;
842 count = state->node_ents;
843 count -= state->node_ents >> 2;
844 count -= be16_to_cpu(node->hdr.count);
845 node = bp->data;
846 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
847 count -= be16_to_cpu(node->hdr.count);
848 xfs_da_brelse(state->args->trans, bp);
849 if (count >= 0)
850 break; /* fits with at least 25% to spare */
851 }
852 if (i >= 2) {
853 *action = 0;
854 return(0);
855 }
856
857 /*
858 * Make altpath point to the block we want to keep (the lower
859 * numbered block) and path point to the block we want to drop.
860 */
861 memcpy(&state->altpath, &state->path, sizeof(state->path));
862 if (blkno < blk->blkno) {
863 error = xfs_da_path_shift(state, &state->altpath, forward,
864 0, &retval);
865 if (error) {
866 return(error);
867 }
868 if (retval) {
869 *action = 0;
870 return(0);
871 }
872 } else {
873 error = xfs_da_path_shift(state, &state->path, forward,
874 0, &retval);
875 if (error) {
876 return(error);
877 }
878 if (retval) {
879 *action = 0;
880 return(0);
881 }
882 }
883 *action = 1;
884 return(0);
885}
886
887/*
888 * Walk back up the tree adjusting hash values as necessary,
889 * when we stop making changes, return.
890 */
891void
892xfs_da_fixhashpath(xfs_da_state_t *state, xfs_da_state_path_t *path)
893{
894 xfs_da_state_blk_t *blk;
895 xfs_da_intnode_t *node;
896 xfs_da_node_entry_t *btree;
897 xfs_dahash_t lasthash=0;
898 int level, count;
899
900 level = path->active-1;
901 blk = &path->blk[ level ];
902 switch (blk->magic) {
903 case XFS_ATTR_LEAF_MAGIC:
904 lasthash = xfs_attr_leaf_lasthash(blk->bp, &count);
905 if (count == 0)
906 return;
907 break;
908 case XFS_DIR2_LEAFN_MAGIC:
909 lasthash = xfs_dir2_leafn_lasthash(blk->bp, &count);
910 if (count == 0)
911 return;
912 break;
913 case XFS_DA_NODE_MAGIC:
914 lasthash = xfs_da_node_lasthash(blk->bp, &count);
915 if (count == 0)
916 return;
917 break;
918 }
919 for (blk--, level--; level >= 0; blk--, level--) {
920 node = blk->bp->data;
921 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
922 btree = &node->btree[ blk->index ];
923 if (be32_to_cpu(btree->hashval) == lasthash)
924 break;
925 blk->hashval = lasthash;
926 btree->hashval = cpu_to_be32(lasthash);
927 xfs_da_log_buf(state->args->trans, blk->bp,
928 XFS_DA_LOGRANGE(node, btree, sizeof(*btree)));
929
930 lasthash = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval);
931 }
932}
933
934/*
935 * Remove an entry from an intermediate node.
936 */
937STATIC void
938xfs_da_node_remove(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk)
939{
940 xfs_da_intnode_t *node;
941 xfs_da_node_entry_t *btree;
942 int tmp;
943
944 node = drop_blk->bp->data;
945 ASSERT(drop_blk->index < be16_to_cpu(node->hdr.count));
946 ASSERT(drop_blk->index >= 0);
947
948 /*
949 * Copy over the offending entry, or just zero it out.
950 */
951 btree = &node->btree[drop_blk->index];
952 if (drop_blk->index < (be16_to_cpu(node->hdr.count)-1)) {
953 tmp = be16_to_cpu(node->hdr.count) - drop_blk->index - 1;
954 tmp *= (uint)sizeof(xfs_da_node_entry_t);
955 memmove(btree, btree + 1, tmp);
956 xfs_da_log_buf(state->args->trans, drop_blk->bp,
957 XFS_DA_LOGRANGE(node, btree, tmp));
958 btree = &node->btree[be16_to_cpu(node->hdr.count)-1];
959 }
960 memset((char *)btree, 0, sizeof(xfs_da_node_entry_t));
961 xfs_da_log_buf(state->args->trans, drop_blk->bp,
962 XFS_DA_LOGRANGE(node, btree, sizeof(*btree)));
963 be16_add_cpu(&node->hdr.count, -1);
964 xfs_da_log_buf(state->args->trans, drop_blk->bp,
965 XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
966
967 /*
968 * Copy the last hash value from the block to propagate upwards.
969 */
970 btree--;
971 drop_blk->hashval = be32_to_cpu(btree->hashval);
972}
973
974/*
975 * Unbalance the btree elements between two intermediate nodes,
976 * move all Btree elements from one node into another.
977 */
978STATIC void
979xfs_da_node_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
980 xfs_da_state_blk_t *save_blk)
981{
982 xfs_da_intnode_t *drop_node, *save_node;
983 xfs_da_node_entry_t *btree;
984 int tmp;
985 xfs_trans_t *tp;
986
987 drop_node = drop_blk->bp->data;
988 save_node = save_blk->bp->data;
989 ASSERT(drop_node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
990 ASSERT(save_node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
991 tp = state->args->trans;
992
993 /*
994 * If the dying block has lower hashvals, then move all the
995 * elements in the remaining block up to make a hole.
996 */
997 if ((be32_to_cpu(drop_node->btree[0].hashval) < be32_to_cpu(save_node->btree[ 0 ].hashval)) ||
998 (be32_to_cpu(drop_node->btree[be16_to_cpu(drop_node->hdr.count)-1].hashval) <
999 be32_to_cpu(save_node->btree[be16_to_cpu(save_node->hdr.count)-1].hashval)))
1000 {
1001 btree = &save_node->btree[be16_to_cpu(drop_node->hdr.count)];
1002 tmp = be16_to_cpu(save_node->hdr.count) * (uint)sizeof(xfs_da_node_entry_t);
1003 memmove(btree, &save_node->btree[0], tmp);
1004 btree = &save_node->btree[0];
1005 xfs_da_log_buf(tp, save_blk->bp,
1006 XFS_DA_LOGRANGE(save_node, btree,
1007 (be16_to_cpu(save_node->hdr.count) + be16_to_cpu(drop_node->hdr.count)) *
1008 sizeof(xfs_da_node_entry_t)));
1009 } else {
1010 btree = &save_node->btree[be16_to_cpu(save_node->hdr.count)];
1011 xfs_da_log_buf(tp, save_blk->bp,
1012 XFS_DA_LOGRANGE(save_node, btree,
1013 be16_to_cpu(drop_node->hdr.count) *
1014 sizeof(xfs_da_node_entry_t)));
1015 }
1016
1017 /*
1018 * Move all the B-tree elements from drop_blk to save_blk.
1019 */
1020 tmp = be16_to_cpu(drop_node->hdr.count) * (uint)sizeof(xfs_da_node_entry_t);
1021 memcpy(btree, &drop_node->btree[0], tmp);
1022 be16_add_cpu(&save_node->hdr.count, be16_to_cpu(drop_node->hdr.count));
1023
1024 xfs_da_log_buf(tp, save_blk->bp,
1025 XFS_DA_LOGRANGE(save_node, &save_node->hdr,
1026 sizeof(save_node->hdr)));
1027
1028 /*
1029 * Save the last hashval in the remaining block for upward propagation.
1030 */
1031 save_blk->hashval = be32_to_cpu(save_node->btree[be16_to_cpu(save_node->hdr.count)-1].hashval);
1032}
1033
1034/*========================================================================
1035 * Routines used for finding things in the Btree.
1036 *========================================================================*/
1037
1038/*
1039 * Walk down the Btree looking for a particular filename, filling
1040 * in the state structure as we go.
1041 *
1042 * We will set the state structure to point to each of the elements
1043 * in each of the nodes where either the hashval is or should be.
1044 *
1045 * We support duplicate hashval's so for each entry in the current
1046 * node that could contain the desired hashval, descend. This is a
1047 * pruned depth-first tree search.
1048 */
1049int /* error */
1050xfs_da_node_lookup_int(xfs_da_state_t *state, int *result)
1051{
1052 xfs_da_state_blk_t *blk;
1053 xfs_da_blkinfo_t *curr;
1054 xfs_da_intnode_t *node;
1055 xfs_da_node_entry_t *btree;
1056 xfs_dablk_t blkno;
1057 int probe, span, max, error, retval;
1058 xfs_dahash_t hashval, btreehashval;
1059 xfs_da_args_t *args;
1060
1061 args = state->args;
1062
1063 /*
1064 * Descend thru the B-tree searching each level for the right
1065 * node to use, until the right hashval is found.
1066 */
1067 blkno = (args->whichfork == XFS_DATA_FORK)? state->mp->m_dirleafblk : 0;
1068 for (blk = &state->path.blk[0], state->path.active = 1;
1069 state->path.active <= XFS_DA_NODE_MAXDEPTH;
1070 blk++, state->path.active++) {
1071 /*
1072 * Read the next node down in the tree.
1073 */
1074 blk->blkno = blkno;
1075 error = xfs_da_read_buf(args->trans, args->dp, blkno,
1076 -1, &blk->bp, args->whichfork);
1077 if (error) {
1078 blk->blkno = 0;
1079 state->path.active--;
1080 return(error);
1081 }
1082 curr = blk->bp->data;
1083 blk->magic = be16_to_cpu(curr->magic);
1084 ASSERT(blk->magic == XFS_DA_NODE_MAGIC ||
1085 blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1086 blk->magic == XFS_ATTR_LEAF_MAGIC);
1087
1088 /*
1089 * Search an intermediate node for a match.
1090 */
1091 if (blk->magic == XFS_DA_NODE_MAGIC) {
1092 node = blk->bp->data;
1093 max = be16_to_cpu(node->hdr.count);
1094 blk->hashval = be32_to_cpu(node->btree[max-1].hashval);
1095
1096 /*
1097 * Binary search. (note: small blocks will skip loop)
1098 */
1099 probe = span = max / 2;
1100 hashval = args->hashval;
1101 for (btree = &node->btree[probe]; span > 4;
1102 btree = &node->btree[probe]) {
1103 span /= 2;
1104 btreehashval = be32_to_cpu(btree->hashval);
1105 if (btreehashval < hashval)
1106 probe += span;
1107 else if (btreehashval > hashval)
1108 probe -= span;
1109 else
1110 break;
1111 }
1112 ASSERT((probe >= 0) && (probe < max));
1113 ASSERT((span <= 4) || (be32_to_cpu(btree->hashval) == hashval));
1114
1115 /*
1116 * Since we may have duplicate hashval's, find the first
1117 * matching hashval in the node.
1118 */
1119 while ((probe > 0) && (be32_to_cpu(btree->hashval) >= hashval)) {
1120 btree--;
1121 probe--;
1122 }
1123 while ((probe < max) && (be32_to_cpu(btree->hashval) < hashval)) {
1124 btree++;
1125 probe++;
1126 }
1127
1128 /*
1129 * Pick the right block to descend on.
1130 */
1131 if (probe == max) {
1132 blk->index = max-1;
1133 blkno = be32_to_cpu(node->btree[max-1].before);
1134 } else {
1135 blk->index = probe;
1136 blkno = be32_to_cpu(btree->before);
1137 }
1138 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1139 blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
1140 break;
1141 } else if (blk->magic == XFS_DIR2_LEAFN_MAGIC) {
1142 blk->hashval = xfs_dir2_leafn_lasthash(blk->bp, NULL);
1143 break;
1144 }
1145 }
1146
1147 /*
1148 * A leaf block that ends in the hashval that we are interested in
1149 * (final hashval == search hashval) means that the next block may
1150 * contain more entries with the same hashval, shift upward to the
1151 * next leaf and keep searching.
1152 */
1153 for (;;) {
1154 if (blk->magic == XFS_DIR2_LEAFN_MAGIC) {
1155 retval = xfs_dir2_leafn_lookup_int(blk->bp, args,
1156 &blk->index, state);
1157 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1158 retval = xfs_attr_leaf_lookup_int(blk->bp, args);
1159 blk->index = args->index;
1160 args->blkno = blk->blkno;
1161 } else {
1162 ASSERT(0);
1163 return XFS_ERROR(EFSCORRUPTED);
1164 }
1165 if (((retval == ENOENT) || (retval == ENOATTR)) &&
1166 (blk->hashval == args->hashval)) {
1167 error = xfs_da_path_shift(state, &state->path, 1, 1,
1168 &retval);
1169 if (error)
1170 return(error);
1171 if (retval == 0) {
1172 continue;
1173 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1174 /* path_shift() gives ENOENT */
1175 retval = XFS_ERROR(ENOATTR);
1176 }
1177 }
1178 break;
1179 }
1180 *result = retval;
1181 return(0);
1182}
1183
1184/*========================================================================
1185 * Utility routines.
1186 *========================================================================*/
1187
1188/*
1189 * Link a new block into a doubly linked list of blocks (of whatever type).
1190 */
1191int /* error */
1192xfs_da_blk_link(xfs_da_state_t *state, xfs_da_state_blk_t *old_blk,
1193 xfs_da_state_blk_t *new_blk)
1194{
1195 xfs_da_blkinfo_t *old_info, *new_info, *tmp_info;
1196 xfs_da_args_t *args;
1197 int before=0, error;
1198 xfs_dabuf_t *bp;
1199
1200 /*
1201 * Set up environment.
1202 */
1203 args = state->args;
1204 ASSERT(args != NULL);
1205 old_info = old_blk->bp->data;
1206 new_info = new_blk->bp->data;
1207 ASSERT(old_blk->magic == XFS_DA_NODE_MAGIC ||
1208 old_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1209 old_blk->magic == XFS_ATTR_LEAF_MAGIC);
1210 ASSERT(old_blk->magic == be16_to_cpu(old_info->magic));
1211 ASSERT(new_blk->magic == be16_to_cpu(new_info->magic));
1212 ASSERT(old_blk->magic == new_blk->magic);
1213
1214 switch (old_blk->magic) {
1215 case XFS_ATTR_LEAF_MAGIC:
1216 before = xfs_attr_leaf_order(old_blk->bp, new_blk->bp);
1217 break;
1218 case XFS_DIR2_LEAFN_MAGIC:
1219 before = xfs_dir2_leafn_order(old_blk->bp, new_blk->bp);
1220 break;
1221 case XFS_DA_NODE_MAGIC:
1222 before = xfs_da_node_order(old_blk->bp, new_blk->bp);
1223 break;
1224 }
1225
1226 /*
1227 * Link blocks in appropriate order.
1228 */
1229 if (before) {
1230 /*
1231 * Link new block in before existing block.
1232 */
1233 new_info->forw = cpu_to_be32(old_blk->blkno);
1234 new_info->back = old_info->back;
1235 if (old_info->back) {
1236 error = xfs_da_read_buf(args->trans, args->dp,
1237 be32_to_cpu(old_info->back),
1238 -1, &bp, args->whichfork);
1239 if (error)
1240 return(error);
1241 ASSERT(bp != NULL);
1242 tmp_info = bp->data;
1243 ASSERT(be16_to_cpu(tmp_info->magic) == be16_to_cpu(old_info->magic));
1244 ASSERT(be32_to_cpu(tmp_info->forw) == old_blk->blkno);
1245 tmp_info->forw = cpu_to_be32(new_blk->blkno);
1246 xfs_da_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
1247 xfs_da_buf_done(bp);
1248 }
1249 old_info->back = cpu_to_be32(new_blk->blkno);
1250 } else {
1251 /*
1252 * Link new block in after existing block.
1253 */
1254 new_info->forw = old_info->forw;
1255 new_info->back = cpu_to_be32(old_blk->blkno);
1256 if (old_info->forw) {
1257 error = xfs_da_read_buf(args->trans, args->dp,
1258 be32_to_cpu(old_info->forw),
1259 -1, &bp, args->whichfork);
1260 if (error)
1261 return(error);
1262 ASSERT(bp != NULL);
1263 tmp_info = bp->data;
1264 ASSERT(tmp_info->magic == old_info->magic);
1265 ASSERT(be32_to_cpu(tmp_info->back) == old_blk->blkno);
1266 tmp_info->back = cpu_to_be32(new_blk->blkno);
1267 xfs_da_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
1268 xfs_da_buf_done(bp);
1269 }
1270 old_info->forw = cpu_to_be32(new_blk->blkno);
1271 }
1272
1273 xfs_da_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1);
1274 xfs_da_log_buf(args->trans, new_blk->bp, 0, sizeof(*tmp_info) - 1);
1275 return(0);
1276}
1277
1278/*
1279 * Compare two intermediate nodes for "order".
1280 */
1281STATIC int
1282xfs_da_node_order(xfs_dabuf_t *node1_bp, xfs_dabuf_t *node2_bp)
1283{
1284 xfs_da_intnode_t *node1, *node2;
1285
1286 node1 = node1_bp->data;
1287 node2 = node2_bp->data;
1288 ASSERT(node1->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC) &&
1289 node2->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
1290 if ((be16_to_cpu(node1->hdr.count) > 0) && (be16_to_cpu(node2->hdr.count) > 0) &&
1291 ((be32_to_cpu(node2->btree[0].hashval) <
1292 be32_to_cpu(node1->btree[0].hashval)) ||
1293 (be32_to_cpu(node2->btree[be16_to_cpu(node2->hdr.count)-1].hashval) <
1294 be32_to_cpu(node1->btree[be16_to_cpu(node1->hdr.count)-1].hashval)))) {
1295 return(1);
1296 }
1297 return(0);
1298}
1299
1300/*
1301 * Pick up the last hashvalue from an intermediate node.
1302 */
1303STATIC uint
1304xfs_da_node_lasthash(xfs_dabuf_t *bp, int *count)
1305{
1306 xfs_da_intnode_t *node;
1307
1308 node = bp->data;
1309 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
1310 if (count)
1311 *count = be16_to_cpu(node->hdr.count);
1312 if (!node->hdr.count)
1313 return(0);
1314 return be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval);
1315}
1316
1317/*
1318 * Unlink a block from a doubly linked list of blocks.
1319 */
1320STATIC int /* error */
1321xfs_da_blk_unlink(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
1322 xfs_da_state_blk_t *save_blk)
1323{
1324 xfs_da_blkinfo_t *drop_info, *save_info, *tmp_info;
1325 xfs_da_args_t *args;
1326 xfs_dabuf_t *bp;
1327 int error;
1328
1329 /*
1330 * Set up environment.
1331 */
1332 args = state->args;
1333 ASSERT(args != NULL);
1334 save_info = save_blk->bp->data;
1335 drop_info = drop_blk->bp->data;
1336 ASSERT(save_blk->magic == XFS_DA_NODE_MAGIC ||
1337 save_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1338 save_blk->magic == XFS_ATTR_LEAF_MAGIC);
1339 ASSERT(save_blk->magic == be16_to_cpu(save_info->magic));
1340 ASSERT(drop_blk->magic == be16_to_cpu(drop_info->magic));
1341 ASSERT(save_blk->magic == drop_blk->magic);
1342 ASSERT((be32_to_cpu(save_info->forw) == drop_blk->blkno) ||
1343 (be32_to_cpu(save_info->back) == drop_blk->blkno));
1344 ASSERT((be32_to_cpu(drop_info->forw) == save_blk->blkno) ||
1345 (be32_to_cpu(drop_info->back) == save_blk->blkno));
1346
1347 /*
1348 * Unlink the leaf block from the doubly linked chain of leaves.
1349 */
1350 if (be32_to_cpu(save_info->back) == drop_blk->blkno) {
1351 save_info->back = drop_info->back;
1352 if (drop_info->back) {
1353 error = xfs_da_read_buf(args->trans, args->dp,
1354 be32_to_cpu(drop_info->back),
1355 -1, &bp, args->whichfork);
1356 if (error)
1357 return(error);
1358 ASSERT(bp != NULL);
1359 tmp_info = bp->data;
1360 ASSERT(tmp_info->magic == save_info->magic);
1361 ASSERT(be32_to_cpu(tmp_info->forw) == drop_blk->blkno);
1362 tmp_info->forw = cpu_to_be32(save_blk->blkno);
1363 xfs_da_log_buf(args->trans, bp, 0,
1364 sizeof(*tmp_info) - 1);
1365 xfs_da_buf_done(bp);
1366 }
1367 } else {
1368 save_info->forw = drop_info->forw;
1369 if (drop_info->forw) {
1370 error = xfs_da_read_buf(args->trans, args->dp,
1371 be32_to_cpu(drop_info->forw),
1372 -1, &bp, args->whichfork);
1373 if (error)
1374 return(error);
1375 ASSERT(bp != NULL);
1376 tmp_info = bp->data;
1377 ASSERT(tmp_info->magic == save_info->magic);
1378 ASSERT(be32_to_cpu(tmp_info->back) == drop_blk->blkno);
1379 tmp_info->back = cpu_to_be32(save_blk->blkno);
1380 xfs_da_log_buf(args->trans, bp, 0,
1381 sizeof(*tmp_info) - 1);
1382 xfs_da_buf_done(bp);
1383 }
1384 }
1385
1386 xfs_da_log_buf(args->trans, save_blk->bp, 0, sizeof(*save_info) - 1);
1387 return(0);
1388}
1389
1390/*
1391 * Move a path "forward" or "!forward" one block at the current level.
1392 *
1393 * This routine will adjust a "path" to point to the next block
1394 * "forward" (higher hashvalues) or "!forward" (lower hashvals) in the
1395 * Btree, including updating pointers to the intermediate nodes between
1396 * the new bottom and the root.
1397 */
1398int /* error */
1399xfs_da_path_shift(xfs_da_state_t *state, xfs_da_state_path_t *path,
1400 int forward, int release, int *result)
1401{
1402 xfs_da_state_blk_t *blk;
1403 xfs_da_blkinfo_t *info;
1404 xfs_da_intnode_t *node;
1405 xfs_da_args_t *args;
1406 xfs_dablk_t blkno=0;
1407 int level, error;
1408
1409 /*
1410 * Roll up the Btree looking for the first block where our
1411 * current index is not at the edge of the block. Note that
1412 * we skip the bottom layer because we want the sibling block.
1413 */
1414 args = state->args;
1415 ASSERT(args != NULL);
1416 ASSERT(path != NULL);
1417 ASSERT((path->active > 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
1418 level = (path->active-1) - 1; /* skip bottom layer in path */
1419 for (blk = &path->blk[level]; level >= 0; blk--, level--) {
1420 ASSERT(blk->bp != NULL);
1421 node = blk->bp->data;
1422 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
1423 if (forward && (blk->index < be16_to_cpu(node->hdr.count)-1)) {
1424 blk->index++;
1425 blkno = be32_to_cpu(node->btree[blk->index].before);
1426 break;
1427 } else if (!forward && (blk->index > 0)) {
1428 blk->index--;
1429 blkno = be32_to_cpu(node->btree[blk->index].before);
1430 break;
1431 }
1432 }
1433 if (level < 0) {
1434 *result = XFS_ERROR(ENOENT); /* we're out of our tree */
1435 ASSERT(args->op_flags & XFS_DA_OP_OKNOENT);
1436 return(0);
1437 }
1438
1439 /*
1440 * Roll down the edge of the subtree until we reach the
1441 * same depth we were at originally.
1442 */
1443 for (blk++, level++; level < path->active; blk++, level++) {
1444 /*
1445 * Release the old block.
1446 * (if it's dirty, trans won't actually let go)
1447 */
1448 if (release)
1449 xfs_da_brelse(args->trans, blk->bp);
1450
1451 /*
1452 * Read the next child block.
1453 */
1454 blk->blkno = blkno;
1455 error = xfs_da_read_buf(args->trans, args->dp, blkno, -1,
1456 &blk->bp, args->whichfork);
1457 if (error)
1458 return(error);
1459 ASSERT(blk->bp != NULL);
1460 info = blk->bp->data;
1461 ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
1462 info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
1463 info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
1464 blk->magic = be16_to_cpu(info->magic);
1465 if (blk->magic == XFS_DA_NODE_MAGIC) {
1466 node = (xfs_da_intnode_t *)info;
1467 blk->hashval = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval);
1468 if (forward)
1469 blk->index = 0;
1470 else
1471 blk->index = be16_to_cpu(node->hdr.count)-1;
1472 blkno = be32_to_cpu(node->btree[blk->index].before);
1473 } else {
1474 ASSERT(level == path->active-1);
1475 blk->index = 0;
1476 switch(blk->magic) {
1477 case XFS_ATTR_LEAF_MAGIC:
1478 blk->hashval = xfs_attr_leaf_lasthash(blk->bp,
1479 NULL);
1480 break;
1481 case XFS_DIR2_LEAFN_MAGIC:
1482 blk->hashval = xfs_dir2_leafn_lasthash(blk->bp,
1483 NULL);
1484 break;
1485 default:
1486 ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC ||
1487 blk->magic == XFS_DIR2_LEAFN_MAGIC);
1488 break;
1489 }
1490 }
1491 }
1492 *result = 0;
1493 return(0);
1494}
1495
1496
1497/*========================================================================
1498 * Utility routines.
1499 *========================================================================*/
1500
1501/*
1502 * Implement a simple hash on a character string.
1503 * Rotate the hash value by 7 bits, then XOR each character in.
1504 * This is implemented with some source-level loop unrolling.
1505 */
1506xfs_dahash_t
1507xfs_da_hashname(const __uint8_t *name, int namelen)
1508{
1509 xfs_dahash_t hash;
1510
1511 /*
1512 * Do four characters at a time as long as we can.
1513 */
1514 for (hash = 0; namelen >= 4; namelen -= 4, name += 4)
1515 hash = (name[0] << 21) ^ (name[1] << 14) ^ (name[2] << 7) ^
1516 (name[3] << 0) ^ rol32(hash, 7 * 4);
1517
1518 /*
1519 * Now do the rest of the characters.
1520 */
1521 switch (namelen) {
1522 case 3:
1523 return (name[0] << 14) ^ (name[1] << 7) ^ (name[2] << 0) ^
1524 rol32(hash, 7 * 3);
1525 case 2:
1526 return (name[0] << 7) ^ (name[1] << 0) ^ rol32(hash, 7 * 2);
1527 case 1:
1528 return (name[0] << 0) ^ rol32(hash, 7 * 1);
1529 default: /* case 0: */
1530 return hash;
1531 }
1532}
1533
1534enum xfs_dacmp
1535xfs_da_compname(
1536 struct xfs_da_args *args,
1537 const unsigned char *name,
1538 int len)
1539{
1540 return (args->namelen == len && memcmp(args->name, name, len) == 0) ?
1541 XFS_CMP_EXACT : XFS_CMP_DIFFERENT;
1542}
1543
1544static xfs_dahash_t
1545xfs_default_hashname(
1546 struct xfs_name *name)
1547{
1548 return xfs_da_hashname(name->name, name->len);
1549}
1550
1551const struct xfs_nameops xfs_default_nameops = {
1552 .hashname = xfs_default_hashname,
1553 .compname = xfs_da_compname
1554};
1555
1556int
1557xfs_da_grow_inode_int(
1558 struct xfs_da_args *args,
1559 xfs_fileoff_t *bno,
1560 int count)
1561{
1562 struct xfs_trans *tp = args->trans;
1563 struct xfs_inode *dp = args->dp;
1564 int w = args->whichfork;
1565 xfs_drfsbno_t nblks = dp->i_d.di_nblocks;
1566 struct xfs_bmbt_irec map, *mapp;
1567 int nmap, error, got, i, mapi;
1568
1569 /*
1570 * Find a spot in the file space to put the new block.
1571 */
1572 error = xfs_bmap_first_unused(tp, dp, count, bno, w);
1573 if (error)
1574 return error;
1575
1576 /*
1577 * Try mapping it in one filesystem block.
1578 */
1579 nmap = 1;
1580 ASSERT(args->firstblock != NULL);
1581 error = xfs_bmapi(tp, dp, *bno, count,
1582 xfs_bmapi_aflag(w)|XFS_BMAPI_WRITE|XFS_BMAPI_METADATA|
1583 XFS_BMAPI_CONTIG,
1584 args->firstblock, args->total, &map, &nmap,
1585 args->flist);
1586 if (error)
1587 return error;
1588
1589 ASSERT(nmap <= 1);
1590 if (nmap == 1) {
1591 mapp = ↦
1592 mapi = 1;
1593 } else if (nmap == 0 && count > 1) {
1594 xfs_fileoff_t b;
1595 int c;
1596
1597 /*
1598 * If we didn't get it and the block might work if fragmented,
1599 * try without the CONTIG flag. Loop until we get it all.
1600 */
1601 mapp = kmem_alloc(sizeof(*mapp) * count, KM_SLEEP);
1602 for (b = *bno, mapi = 0; b < *bno + count; ) {
1603 nmap = MIN(XFS_BMAP_MAX_NMAP, count);
1604 c = (int)(*bno + count - b);
1605 error = xfs_bmapi(tp, dp, b, c,
1606 xfs_bmapi_aflag(w)|XFS_BMAPI_WRITE|
1607 XFS_BMAPI_METADATA,
1608 args->firstblock, args->total,
1609 &mapp[mapi], &nmap, args->flist);
1610 if (error)
1611 goto out_free_map;
1612 if (nmap < 1)
1613 break;
1614 mapi += nmap;
1615 b = mapp[mapi - 1].br_startoff +
1616 mapp[mapi - 1].br_blockcount;
1617 }
1618 } else {
1619 mapi = 0;
1620 mapp = NULL;
1621 }
1622
1623 /*
1624 * Count the blocks we got, make sure it matches the total.
1625 */
1626 for (i = 0, got = 0; i < mapi; i++)
1627 got += mapp[i].br_blockcount;
1628 if (got != count || mapp[0].br_startoff != *bno ||
1629 mapp[mapi - 1].br_startoff + mapp[mapi - 1].br_blockcount !=
1630 *bno + count) {
1631 error = XFS_ERROR(ENOSPC);
1632 goto out_free_map;
1633 }
1634
1635 /* account for newly allocated blocks in reserved blocks total */
1636 args->total -= dp->i_d.di_nblocks - nblks;
1637
1638out_free_map:
1639 if (mapp != &map)
1640 kmem_free(mapp);
1641 return error;
1642}
1643
1644/*
1645 * Add a block to the btree ahead of the file.
1646 * Return the new block number to the caller.
1647 */
1648int
1649xfs_da_grow_inode(
1650 struct xfs_da_args *args,
1651 xfs_dablk_t *new_blkno)
1652{
1653 xfs_fileoff_t bno;
1654 int count;
1655 int error;
1656
1657 if (args->whichfork == XFS_DATA_FORK) {
1658 bno = args->dp->i_mount->m_dirleafblk;
1659 count = args->dp->i_mount->m_dirblkfsbs;
1660 } else {
1661 bno = 0;
1662 count = 1;
1663 }
1664
1665 error = xfs_da_grow_inode_int(args, &bno, count);
1666 if (!error)
1667 *new_blkno = (xfs_dablk_t)bno;
1668 return error;
1669}
1670
1671/*
1672 * Ick. We need to always be able to remove a btree block, even
1673 * if there's no space reservation because the filesystem is full.
1674 * This is called if xfs_bunmapi on a btree block fails due to ENOSPC.
1675 * It swaps the target block with the last block in the file. The
1676 * last block in the file can always be removed since it can't cause
1677 * a bmap btree split to do that.
1678 */
1679STATIC int
1680xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
1681 xfs_dabuf_t **dead_bufp)
1682{
1683 xfs_dablk_t dead_blkno, last_blkno, sib_blkno, par_blkno;
1684 xfs_dabuf_t *dead_buf, *last_buf, *sib_buf, *par_buf;
1685 xfs_fileoff_t lastoff;
1686 xfs_inode_t *ip;
1687 xfs_trans_t *tp;
1688 xfs_mount_t *mp;
1689 int error, w, entno, level, dead_level;
1690 xfs_da_blkinfo_t *dead_info, *sib_info;
1691 xfs_da_intnode_t *par_node, *dead_node;
1692 xfs_dir2_leaf_t *dead_leaf2;
1693 xfs_dahash_t dead_hash;
1694
1695 dead_buf = *dead_bufp;
1696 dead_blkno = *dead_blknop;
1697 tp = args->trans;
1698 ip = args->dp;
1699 w = args->whichfork;
1700 ASSERT(w == XFS_DATA_FORK);
1701 mp = ip->i_mount;
1702 lastoff = mp->m_dirfreeblk;
1703 error = xfs_bmap_last_before(tp, ip, &lastoff, w);
1704 if (error)
1705 return error;
1706 if (unlikely(lastoff == 0)) {
1707 XFS_ERROR_REPORT("xfs_da_swap_lastblock(1)", XFS_ERRLEVEL_LOW,
1708 mp);
1709 return XFS_ERROR(EFSCORRUPTED);
1710 }
1711 /*
1712 * Read the last block in the btree space.
1713 */
1714 last_blkno = (xfs_dablk_t)lastoff - mp->m_dirblkfsbs;
1715 if ((error = xfs_da_read_buf(tp, ip, last_blkno, -1, &last_buf, w)))
1716 return error;
1717 /*
1718 * Copy the last block into the dead buffer and log it.
1719 */
1720 memcpy(dead_buf->data, last_buf->data, mp->m_dirblksize);
1721 xfs_da_log_buf(tp, dead_buf, 0, mp->m_dirblksize - 1);
1722 dead_info = dead_buf->data;
1723 /*
1724 * Get values from the moved block.
1725 */
1726 if (dead_info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)) {
1727 dead_leaf2 = (xfs_dir2_leaf_t *)dead_info;
1728 dead_level = 0;
1729 dead_hash = be32_to_cpu(dead_leaf2->ents[be16_to_cpu(dead_leaf2->hdr.count) - 1].hashval);
1730 } else {
1731 ASSERT(dead_info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
1732 dead_node = (xfs_da_intnode_t *)dead_info;
1733 dead_level = be16_to_cpu(dead_node->hdr.level);
1734 dead_hash = be32_to_cpu(dead_node->btree[be16_to_cpu(dead_node->hdr.count) - 1].hashval);
1735 }
1736 sib_buf = par_buf = NULL;
1737 /*
1738 * If the moved block has a left sibling, fix up the pointers.
1739 */
1740 if ((sib_blkno = be32_to_cpu(dead_info->back))) {
1741 if ((error = xfs_da_read_buf(tp, ip, sib_blkno, -1, &sib_buf, w)))
1742 goto done;
1743 sib_info = sib_buf->data;
1744 if (unlikely(
1745 be32_to_cpu(sib_info->forw) != last_blkno ||
1746 sib_info->magic != dead_info->magic)) {
1747 XFS_ERROR_REPORT("xfs_da_swap_lastblock(2)",
1748 XFS_ERRLEVEL_LOW, mp);
1749 error = XFS_ERROR(EFSCORRUPTED);
1750 goto done;
1751 }
1752 sib_info->forw = cpu_to_be32(dead_blkno);
1753 xfs_da_log_buf(tp, sib_buf,
1754 XFS_DA_LOGRANGE(sib_info, &sib_info->forw,
1755 sizeof(sib_info->forw)));
1756 xfs_da_buf_done(sib_buf);
1757 sib_buf = NULL;
1758 }
1759 /*
1760 * If the moved block has a right sibling, fix up the pointers.
1761 */
1762 if ((sib_blkno = be32_to_cpu(dead_info->forw))) {
1763 if ((error = xfs_da_read_buf(tp, ip, sib_blkno, -1, &sib_buf, w)))
1764 goto done;
1765 sib_info = sib_buf->data;
1766 if (unlikely(
1767 be32_to_cpu(sib_info->back) != last_blkno ||
1768 sib_info->magic != dead_info->magic)) {
1769 XFS_ERROR_REPORT("xfs_da_swap_lastblock(3)",
1770 XFS_ERRLEVEL_LOW, mp);
1771 error = XFS_ERROR(EFSCORRUPTED);
1772 goto done;
1773 }
1774 sib_info->back = cpu_to_be32(dead_blkno);
1775 xfs_da_log_buf(tp, sib_buf,
1776 XFS_DA_LOGRANGE(sib_info, &sib_info->back,
1777 sizeof(sib_info->back)));
1778 xfs_da_buf_done(sib_buf);
1779 sib_buf = NULL;
1780 }
1781 par_blkno = mp->m_dirleafblk;
1782 level = -1;
1783 /*
1784 * Walk down the tree looking for the parent of the moved block.
1785 */
1786 for (;;) {
1787 if ((error = xfs_da_read_buf(tp, ip, par_blkno, -1, &par_buf, w)))
1788 goto done;
1789 par_node = par_buf->data;
1790 if (unlikely(par_node->hdr.info.magic !=
1791 cpu_to_be16(XFS_DA_NODE_MAGIC) ||
1792 (level >= 0 && level != be16_to_cpu(par_node->hdr.level) + 1))) {
1793 XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)",
1794 XFS_ERRLEVEL_LOW, mp);
1795 error = XFS_ERROR(EFSCORRUPTED);
1796 goto done;
1797 }
1798 level = be16_to_cpu(par_node->hdr.level);
1799 for (entno = 0;
1800 entno < be16_to_cpu(par_node->hdr.count) &&
1801 be32_to_cpu(par_node->btree[entno].hashval) < dead_hash;
1802 entno++)
1803 continue;
1804 if (unlikely(entno == be16_to_cpu(par_node->hdr.count))) {
1805 XFS_ERROR_REPORT("xfs_da_swap_lastblock(5)",
1806 XFS_ERRLEVEL_LOW, mp);
1807 error = XFS_ERROR(EFSCORRUPTED);
1808 goto done;
1809 }
1810 par_blkno = be32_to_cpu(par_node->btree[entno].before);
1811 if (level == dead_level + 1)
1812 break;
1813 xfs_da_brelse(tp, par_buf);
1814 par_buf = NULL;
1815 }
1816 /*
1817 * We're in the right parent block.
1818 * Look for the right entry.
1819 */
1820 for (;;) {
1821 for (;
1822 entno < be16_to_cpu(par_node->hdr.count) &&
1823 be32_to_cpu(par_node->btree[entno].before) != last_blkno;
1824 entno++)
1825 continue;
1826 if (entno < be16_to_cpu(par_node->hdr.count))
1827 break;
1828 par_blkno = be32_to_cpu(par_node->hdr.info.forw);
1829 xfs_da_brelse(tp, par_buf);
1830 par_buf = NULL;
1831 if (unlikely(par_blkno == 0)) {
1832 XFS_ERROR_REPORT("xfs_da_swap_lastblock(6)",
1833 XFS_ERRLEVEL_LOW, mp);
1834 error = XFS_ERROR(EFSCORRUPTED);
1835 goto done;
1836 }
1837 if ((error = xfs_da_read_buf(tp, ip, par_blkno, -1, &par_buf, w)))
1838 goto done;
1839 par_node = par_buf->data;
1840 if (unlikely(
1841 be16_to_cpu(par_node->hdr.level) != level ||
1842 par_node->hdr.info.magic != cpu_to_be16(XFS_DA_NODE_MAGIC))) {
1843 XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)",
1844 XFS_ERRLEVEL_LOW, mp);
1845 error = XFS_ERROR(EFSCORRUPTED);
1846 goto done;
1847 }
1848 entno = 0;
1849 }
1850 /*
1851 * Update the parent entry pointing to the moved block.
1852 */
1853 par_node->btree[entno].before = cpu_to_be32(dead_blkno);
1854 xfs_da_log_buf(tp, par_buf,
1855 XFS_DA_LOGRANGE(par_node, &par_node->btree[entno].before,
1856 sizeof(par_node->btree[entno].before)));
1857 xfs_da_buf_done(par_buf);
1858 xfs_da_buf_done(dead_buf);
1859 *dead_blknop = last_blkno;
1860 *dead_bufp = last_buf;
1861 return 0;
1862done:
1863 if (par_buf)
1864 xfs_da_brelse(tp, par_buf);
1865 if (sib_buf)
1866 xfs_da_brelse(tp, sib_buf);
1867 xfs_da_brelse(tp, last_buf);
1868 return error;
1869}
1870
1871/*
1872 * Remove a btree block from a directory or attribute.
1873 */
1874int
1875xfs_da_shrink_inode(xfs_da_args_t *args, xfs_dablk_t dead_blkno,
1876 xfs_dabuf_t *dead_buf)
1877{
1878 xfs_inode_t *dp;
1879 int done, error, w, count;
1880 xfs_trans_t *tp;
1881 xfs_mount_t *mp;
1882
1883 dp = args->dp;
1884 w = args->whichfork;
1885 tp = args->trans;
1886 mp = dp->i_mount;
1887 if (w == XFS_DATA_FORK)
1888 count = mp->m_dirblkfsbs;
1889 else
1890 count = 1;
1891 for (;;) {
1892 /*
1893 * Remove extents. If we get ENOSPC for a dir we have to move
1894 * the last block to the place we want to kill.
1895 */
1896 if ((error = xfs_bunmapi(tp, dp, dead_blkno, count,
1897 xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA,
1898 0, args->firstblock, args->flist,
1899 &done)) == ENOSPC) {
1900 if (w != XFS_DATA_FORK)
1901 break;
1902 if ((error = xfs_da_swap_lastblock(args, &dead_blkno,
1903 &dead_buf)))
1904 break;
1905 } else {
1906 break;
1907 }
1908 }
1909 xfs_da_binval(tp, dead_buf);
1910 return error;
1911}
1912
1913/*
1914 * See if the mapping(s) for this btree block are valid, i.e.
1915 * don't contain holes, are logically contiguous, and cover the whole range.
1916 */
1917STATIC int
1918xfs_da_map_covers_blocks(
1919 int nmap,
1920 xfs_bmbt_irec_t *mapp,
1921 xfs_dablk_t bno,
1922 int count)
1923{
1924 int i;
1925 xfs_fileoff_t off;
1926
1927 for (i = 0, off = bno; i < nmap; i++) {
1928 if (mapp[i].br_startblock == HOLESTARTBLOCK ||
1929 mapp[i].br_startblock == DELAYSTARTBLOCK) {
1930 return 0;
1931 }
1932 if (off != mapp[i].br_startoff) {
1933 return 0;
1934 }
1935 off += mapp[i].br_blockcount;
1936 }
1937 return off == bno + count;
1938}
1939
1940/*
1941 * Make a dabuf.
1942 * Used for get_buf, read_buf, read_bufr, and reada_buf.
1943 */
1944STATIC int
1945xfs_da_do_buf(
1946 xfs_trans_t *trans,
1947 xfs_inode_t *dp,
1948 xfs_dablk_t bno,
1949 xfs_daddr_t *mappedbnop,
1950 xfs_dabuf_t **bpp,
1951 int whichfork,
1952 int caller)
1953{
1954 xfs_buf_t *bp = NULL;
1955 xfs_buf_t **bplist;
1956 int error=0;
1957 int i;
1958 xfs_bmbt_irec_t map;
1959 xfs_bmbt_irec_t *mapp;
1960 xfs_daddr_t mappedbno;
1961 xfs_mount_t *mp;
1962 int nbplist=0;
1963 int nfsb;
1964 int nmap;
1965 xfs_dabuf_t *rbp;
1966
1967 mp = dp->i_mount;
1968 nfsb = (whichfork == XFS_DATA_FORK) ? mp->m_dirblkfsbs : 1;
1969 mappedbno = *mappedbnop;
1970 /*
1971 * Caller doesn't have a mapping. -2 means don't complain
1972 * if we land in a hole.
1973 */
1974 if (mappedbno == -1 || mappedbno == -2) {
1975 /*
1976 * Optimize the one-block case.
1977 */
1978 if (nfsb == 1) {
1979 xfs_fsblock_t fsb;
1980
1981 if ((error =
1982 xfs_bmapi_single(trans, dp, whichfork, &fsb,
1983 (xfs_fileoff_t)bno))) {
1984 return error;
1985 }
1986 mapp = ↦
1987 if (fsb == NULLFSBLOCK) {
1988 nmap = 0;
1989 } else {
1990 map.br_startblock = fsb;
1991 map.br_startoff = (xfs_fileoff_t)bno;
1992 map.br_blockcount = 1;
1993 nmap = 1;
1994 }
1995 } else {
1996 mapp = kmem_alloc(sizeof(*mapp) * nfsb, KM_SLEEP);
1997 nmap = nfsb;
1998 if ((error = xfs_bmapi(trans, dp, (xfs_fileoff_t)bno,
1999 nfsb,
2000 XFS_BMAPI_METADATA |
2001 xfs_bmapi_aflag(whichfork),
2002 NULL, 0, mapp, &nmap, NULL)))
2003 goto exit0;
2004 }
2005 } else {
2006 map.br_startblock = XFS_DADDR_TO_FSB(mp, mappedbno);
2007 map.br_startoff = (xfs_fileoff_t)bno;
2008 map.br_blockcount = nfsb;
2009 mapp = ↦
2010 nmap = 1;
2011 }
2012 if (!xfs_da_map_covers_blocks(nmap, mapp, bno, nfsb)) {
2013 error = mappedbno == -2 ? 0 : XFS_ERROR(EFSCORRUPTED);
2014 if (unlikely(error == EFSCORRUPTED)) {
2015 if (xfs_error_level >= XFS_ERRLEVEL_LOW) {
2016 xfs_alert(mp, "%s: bno %lld dir: inode %lld",
2017 __func__, (long long)bno,
2018 (long long)dp->i_ino);
2019 for (i = 0; i < nmap; i++) {
2020 xfs_alert(mp,
2021"[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d",
2022 i,
2023 (long long)mapp[i].br_startoff,
2024 (long long)mapp[i].br_startblock,
2025 (long long)mapp[i].br_blockcount,
2026 mapp[i].br_state);
2027 }
2028 }
2029 XFS_ERROR_REPORT("xfs_da_do_buf(1)",
2030 XFS_ERRLEVEL_LOW, mp);
2031 }
2032 goto exit0;
2033 }
2034 if (caller != 3 && nmap > 1) {
2035 bplist = kmem_alloc(sizeof(*bplist) * nmap, KM_SLEEP);
2036 nbplist = 0;
2037 } else
2038 bplist = NULL;
2039 /*
2040 * Turn the mapping(s) into buffer(s).
2041 */
2042 for (i = 0; i < nmap; i++) {
2043 int nmapped;
2044
2045 mappedbno = XFS_FSB_TO_DADDR(mp, mapp[i].br_startblock);
2046 if (i == 0)
2047 *mappedbnop = mappedbno;
2048 nmapped = (int)XFS_FSB_TO_BB(mp, mapp[i].br_blockcount);
2049 switch (caller) {
2050 case 0:
2051 bp = xfs_trans_get_buf(trans, mp->m_ddev_targp,
2052 mappedbno, nmapped, 0);
2053 error = bp ? bp->b_error : XFS_ERROR(EIO);
2054 break;
2055 case 1:
2056 case 2:
2057 bp = NULL;
2058 error = xfs_trans_read_buf(mp, trans, mp->m_ddev_targp,
2059 mappedbno, nmapped, 0, &bp);
2060 break;
2061 case 3:
2062 xfs_buf_readahead(mp->m_ddev_targp, mappedbno, nmapped);
2063 error = 0;
2064 bp = NULL;
2065 break;
2066 }
2067 if (error) {
2068 if (bp)
2069 xfs_trans_brelse(trans, bp);
2070 goto exit1;
2071 }
2072 if (!bp)
2073 continue;
2074 if (caller == 1) {
2075 if (whichfork == XFS_ATTR_FORK) {
2076 XFS_BUF_SET_VTYPE_REF(bp, B_FS_ATTR_BTREE,
2077 XFS_ATTR_BTREE_REF);
2078 } else {
2079 XFS_BUF_SET_VTYPE_REF(bp, B_FS_DIR_BTREE,
2080 XFS_DIR_BTREE_REF);
2081 }
2082 }
2083 if (bplist) {
2084 bplist[nbplist++] = bp;
2085 }
2086 }
2087 /*
2088 * Build a dabuf structure.
2089 */
2090 if (bplist) {
2091 rbp = xfs_da_buf_make(nbplist, bplist);
2092 } else if (bp)
2093 rbp = xfs_da_buf_make(1, &bp);
2094 else
2095 rbp = NULL;
2096 /*
2097 * For read_buf, check the magic number.
2098 */
2099 if (caller == 1) {
2100 xfs_dir2_data_hdr_t *hdr = rbp->data;
2101 xfs_dir2_free_t *free = rbp->data;
2102 xfs_da_blkinfo_t *info = rbp->data;
2103 uint magic, magic1;
2104
2105 magic = be16_to_cpu(info->magic);
2106 magic1 = be32_to_cpu(hdr->magic);
2107 if (unlikely(
2108 XFS_TEST_ERROR((magic != XFS_DA_NODE_MAGIC) &&
2109 (magic != XFS_ATTR_LEAF_MAGIC) &&
2110 (magic != XFS_DIR2_LEAF1_MAGIC) &&
2111 (magic != XFS_DIR2_LEAFN_MAGIC) &&
2112 (magic1 != XFS_DIR2_BLOCK_MAGIC) &&
2113 (magic1 != XFS_DIR2_DATA_MAGIC) &&
2114 (free->hdr.magic != cpu_to_be32(XFS_DIR2_FREE_MAGIC)),
2115 mp, XFS_ERRTAG_DA_READ_BUF,
2116 XFS_RANDOM_DA_READ_BUF))) {
2117 trace_xfs_da_btree_corrupt(rbp->bps[0], _RET_IP_);
2118 XFS_CORRUPTION_ERROR("xfs_da_do_buf(2)",
2119 XFS_ERRLEVEL_LOW, mp, info);
2120 error = XFS_ERROR(EFSCORRUPTED);
2121 xfs_da_brelse(trans, rbp);
2122 nbplist = 0;
2123 goto exit1;
2124 }
2125 }
2126 if (bplist) {
2127 kmem_free(bplist);
2128 }
2129 if (mapp != &map) {
2130 kmem_free(mapp);
2131 }
2132 if (bpp)
2133 *bpp = rbp;
2134 return 0;
2135exit1:
2136 if (bplist) {
2137 for (i = 0; i < nbplist; i++)
2138 xfs_trans_brelse(trans, bplist[i]);
2139 kmem_free(bplist);
2140 }
2141exit0:
2142 if (mapp != &map)
2143 kmem_free(mapp);
2144 if (bpp)
2145 *bpp = NULL;
2146 return error;
2147}
2148
2149/*
2150 * Get a buffer for the dir/attr block.
2151 */
2152int
2153xfs_da_get_buf(
2154 xfs_trans_t *trans,
2155 xfs_inode_t *dp,
2156 xfs_dablk_t bno,
2157 xfs_daddr_t mappedbno,
2158 xfs_dabuf_t **bpp,
2159 int whichfork)
2160{
2161 return xfs_da_do_buf(trans, dp, bno, &mappedbno, bpp, whichfork, 0);
2162}
2163
2164/*
2165 * Get a buffer for the dir/attr block, fill in the contents.
2166 */
2167int
2168xfs_da_read_buf(
2169 xfs_trans_t *trans,
2170 xfs_inode_t *dp,
2171 xfs_dablk_t bno,
2172 xfs_daddr_t mappedbno,
2173 xfs_dabuf_t **bpp,
2174 int whichfork)
2175{
2176 return xfs_da_do_buf(trans, dp, bno, &mappedbno, bpp, whichfork, 1);
2177}
2178
2179/*
2180 * Readahead the dir/attr block.
2181 */
2182xfs_daddr_t
2183xfs_da_reada_buf(
2184 xfs_trans_t *trans,
2185 xfs_inode_t *dp,
2186 xfs_dablk_t bno,
2187 int whichfork)
2188{
2189 xfs_daddr_t rval;
2190
2191 rval = -1;
2192 if (xfs_da_do_buf(trans, dp, bno, &rval, NULL, whichfork, 3))
2193 return -1;
2194 else
2195 return rval;
2196}
2197
2198kmem_zone_t *xfs_da_state_zone; /* anchor for state struct zone */
2199kmem_zone_t *xfs_dabuf_zone; /* dabuf zone */
2200
2201/*
2202 * Allocate a dir-state structure.
2203 * We don't put them on the stack since they're large.
2204 */
2205xfs_da_state_t *
2206xfs_da_state_alloc(void)
2207{
2208 return kmem_zone_zalloc(xfs_da_state_zone, KM_NOFS);
2209}
2210
2211/*
2212 * Kill the altpath contents of a da-state structure.
2213 */
2214STATIC void
2215xfs_da_state_kill_altpath(xfs_da_state_t *state)
2216{
2217 int i;
2218
2219 for (i = 0; i < state->altpath.active; i++) {
2220 if (state->altpath.blk[i].bp) {
2221 if (state->altpath.blk[i].bp != state->path.blk[i].bp)
2222 xfs_da_buf_done(state->altpath.blk[i].bp);
2223 state->altpath.blk[i].bp = NULL;
2224 }
2225 }
2226 state->altpath.active = 0;
2227}
2228
2229/*
2230 * Free a da-state structure.
2231 */
2232void
2233xfs_da_state_free(xfs_da_state_t *state)
2234{
2235 int i;
2236
2237 xfs_da_state_kill_altpath(state);
2238 for (i = 0; i < state->path.active; i++) {
2239 if (state->path.blk[i].bp)
2240 xfs_da_buf_done(state->path.blk[i].bp);
2241 }
2242 if (state->extravalid && state->extrablk.bp)
2243 xfs_da_buf_done(state->extrablk.bp);
2244#ifdef DEBUG
2245 memset((char *)state, 0, sizeof(*state));
2246#endif /* DEBUG */
2247 kmem_zone_free(xfs_da_state_zone, state);
2248}
2249
2250/*
2251 * Create a dabuf.
2252 */
2253/* ARGSUSED */
2254STATIC xfs_dabuf_t *
2255xfs_da_buf_make(int nbuf, xfs_buf_t **bps)
2256{
2257 xfs_buf_t *bp;
2258 xfs_dabuf_t *dabuf;
2259 int i;
2260 int off;
2261
2262 if (nbuf == 1)
2263 dabuf = kmem_zone_alloc(xfs_dabuf_zone, KM_NOFS);
2264 else
2265 dabuf = kmem_alloc(XFS_DA_BUF_SIZE(nbuf), KM_NOFS);
2266 dabuf->dirty = 0;
2267 if (nbuf == 1) {
2268 dabuf->nbuf = 1;
2269 bp = bps[0];
2270 dabuf->bbcount = (short)BTOBB(XFS_BUF_COUNT(bp));
2271 dabuf->data = bp->b_addr;
2272 dabuf->bps[0] = bp;
2273 } else {
2274 dabuf->nbuf = nbuf;
2275 for (i = 0, dabuf->bbcount = 0; i < nbuf; i++) {
2276 dabuf->bps[i] = bp = bps[i];
2277 dabuf->bbcount += BTOBB(XFS_BUF_COUNT(bp));
2278 }
2279 dabuf->data = kmem_alloc(BBTOB(dabuf->bbcount), KM_SLEEP);
2280 for (i = off = 0; i < nbuf; i++, off += XFS_BUF_COUNT(bp)) {
2281 bp = bps[i];
2282 memcpy((char *)dabuf->data + off, bp->b_addr,
2283 XFS_BUF_COUNT(bp));
2284 }
2285 }
2286 return dabuf;
2287}
2288
2289/*
2290 * Un-dirty a dabuf.
2291 */
2292STATIC void
2293xfs_da_buf_clean(xfs_dabuf_t *dabuf)
2294{
2295 xfs_buf_t *bp;
2296 int i;
2297 int off;
2298
2299 if (dabuf->dirty) {
2300 ASSERT(dabuf->nbuf > 1);
2301 dabuf->dirty = 0;
2302 for (i = off = 0; i < dabuf->nbuf;
2303 i++, off += XFS_BUF_COUNT(bp)) {
2304 bp = dabuf->bps[i];
2305 memcpy(bp->b_addr, dabuf->data + off,
2306 XFS_BUF_COUNT(bp));
2307 }
2308 }
2309}
2310
2311/*
2312 * Release a dabuf.
2313 */
2314void
2315xfs_da_buf_done(xfs_dabuf_t *dabuf)
2316{
2317 ASSERT(dabuf);
2318 ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
2319 if (dabuf->dirty)
2320 xfs_da_buf_clean(dabuf);
2321 if (dabuf->nbuf > 1) {
2322 kmem_free(dabuf->data);
2323 kmem_free(dabuf);
2324 } else {
2325 kmem_zone_free(xfs_dabuf_zone, dabuf);
2326 }
2327}
2328
2329/*
2330 * Log transaction from a dabuf.
2331 */
2332void
2333xfs_da_log_buf(xfs_trans_t *tp, xfs_dabuf_t *dabuf, uint first, uint last)
2334{
2335 xfs_buf_t *bp;
2336 uint f;
2337 int i;
2338 uint l;
2339 int off;
2340
2341 ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
2342 if (dabuf->nbuf == 1) {
2343 ASSERT(dabuf->data == dabuf->bps[0]->b_addr);
2344 xfs_trans_log_buf(tp, dabuf->bps[0], first, last);
2345 return;
2346 }
2347 dabuf->dirty = 1;
2348 ASSERT(first <= last);
2349 for (i = off = 0; i < dabuf->nbuf; i++, off += XFS_BUF_COUNT(bp)) {
2350 bp = dabuf->bps[i];
2351 f = off;
2352 l = f + XFS_BUF_COUNT(bp) - 1;
2353 if (f < first)
2354 f = first;
2355 if (l > last)
2356 l = last;
2357 if (f <= l)
2358 xfs_trans_log_buf(tp, bp, f - off, l - off);
2359 /*
2360 * B_DONE is set by xfs_trans_log buf.
2361 * If we don't set it on a new buffer (get not read)
2362 * then if we don't put anything in the buffer it won't
2363 * be set, and at commit it it released into the cache,
2364 * and then a read will fail.
2365 */
2366 else if (!(XFS_BUF_ISDONE(bp)))
2367 XFS_BUF_DONE(bp);
2368 }
2369 ASSERT(last < off);
2370}
2371
2372/*
2373 * Release dabuf from a transaction.
2374 * Have to free up the dabuf before the buffers are released,
2375 * since the synchronization on the dabuf is really the lock on the buffer.
2376 */
2377void
2378xfs_da_brelse(xfs_trans_t *tp, xfs_dabuf_t *dabuf)
2379{
2380 xfs_buf_t *bp;
2381 xfs_buf_t **bplist;
2382 int i;
2383 int nbuf;
2384
2385 ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
2386 if ((nbuf = dabuf->nbuf) == 1) {
2387 bplist = &bp;
2388 bp = dabuf->bps[0];
2389 } else {
2390 bplist = kmem_alloc(nbuf * sizeof(*bplist), KM_SLEEP);
2391 memcpy(bplist, dabuf->bps, nbuf * sizeof(*bplist));
2392 }
2393 xfs_da_buf_done(dabuf);
2394 for (i = 0; i < nbuf; i++)
2395 xfs_trans_brelse(tp, bplist[i]);
2396 if (bplist != &bp)
2397 kmem_free(bplist);
2398}
2399
2400/*
2401 * Invalidate dabuf from a transaction.
2402 */
2403void
2404xfs_da_binval(xfs_trans_t *tp, xfs_dabuf_t *dabuf)
2405{
2406 xfs_buf_t *bp;
2407 xfs_buf_t **bplist;
2408 int i;
2409 int nbuf;
2410
2411 ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
2412 if ((nbuf = dabuf->nbuf) == 1) {
2413 bplist = &bp;
2414 bp = dabuf->bps[0];
2415 } else {
2416 bplist = kmem_alloc(nbuf * sizeof(*bplist), KM_SLEEP);
2417 memcpy(bplist, dabuf->bps, nbuf * sizeof(*bplist));
2418 }
2419 xfs_da_buf_done(dabuf);
2420 for (i = 0; i < nbuf; i++)
2421 xfs_trans_binval(tp, bplist[i]);
2422 if (bplist != &bp)
2423 kmem_free(bplist);
2424}
2425
2426/*
2427 * Get the first daddr from a dabuf.
2428 */
2429xfs_daddr_t
2430xfs_da_blkno(xfs_dabuf_t *dabuf)
2431{
2432 ASSERT(dabuf->nbuf);
2433 ASSERT(dabuf->data);
2434 return XFS_BUF_ADDR(dabuf->bps[0]);
2435}
1/*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_types.h"
21#include "xfs_bit.h"
22#include "xfs_log.h"
23#include "xfs_trans.h"
24#include "xfs_sb.h"
25#include "xfs_ag.h"
26#include "xfs_mount.h"
27#include "xfs_da_btree.h"
28#include "xfs_bmap_btree.h"
29#include "xfs_dir2.h"
30#include "xfs_dir2_format.h"
31#include "xfs_dir2_priv.h"
32#include "xfs_dinode.h"
33#include "xfs_inode.h"
34#include "xfs_inode_item.h"
35#include "xfs_alloc.h"
36#include "xfs_bmap.h"
37#include "xfs_attr.h"
38#include "xfs_attr_leaf.h"
39#include "xfs_error.h"
40#include "xfs_trace.h"
41
42/*
43 * xfs_da_btree.c
44 *
45 * Routines to implement directories as Btrees of hashed names.
46 */
47
48/*========================================================================
49 * Function prototypes for the kernel.
50 *========================================================================*/
51
52/*
53 * Routines used for growing the Btree.
54 */
55STATIC int xfs_da_root_split(xfs_da_state_t *state,
56 xfs_da_state_blk_t *existing_root,
57 xfs_da_state_blk_t *new_child);
58STATIC int xfs_da_node_split(xfs_da_state_t *state,
59 xfs_da_state_blk_t *existing_blk,
60 xfs_da_state_blk_t *split_blk,
61 xfs_da_state_blk_t *blk_to_add,
62 int treelevel,
63 int *result);
64STATIC void xfs_da_node_rebalance(xfs_da_state_t *state,
65 xfs_da_state_blk_t *node_blk_1,
66 xfs_da_state_blk_t *node_blk_2);
67STATIC void xfs_da_node_add(xfs_da_state_t *state,
68 xfs_da_state_blk_t *old_node_blk,
69 xfs_da_state_blk_t *new_node_blk);
70
71/*
72 * Routines used for shrinking the Btree.
73 */
74STATIC int xfs_da_root_join(xfs_da_state_t *state,
75 xfs_da_state_blk_t *root_blk);
76STATIC int xfs_da_node_toosmall(xfs_da_state_t *state, int *retval);
77STATIC void xfs_da_node_remove(xfs_da_state_t *state,
78 xfs_da_state_blk_t *drop_blk);
79STATIC void xfs_da_node_unbalance(xfs_da_state_t *state,
80 xfs_da_state_blk_t *src_node_blk,
81 xfs_da_state_blk_t *dst_node_blk);
82
83/*
84 * Utility routines.
85 */
86STATIC uint xfs_da_node_lasthash(xfs_dabuf_t *bp, int *count);
87STATIC int xfs_da_node_order(xfs_dabuf_t *node1_bp, xfs_dabuf_t *node2_bp);
88STATIC xfs_dabuf_t *xfs_da_buf_make(int nbuf, xfs_buf_t **bps);
89STATIC int xfs_da_blk_unlink(xfs_da_state_t *state,
90 xfs_da_state_blk_t *drop_blk,
91 xfs_da_state_blk_t *save_blk);
92STATIC void xfs_da_state_kill_altpath(xfs_da_state_t *state);
93
94/*========================================================================
95 * Routines used for growing the Btree.
96 *========================================================================*/
97
98/*
99 * Create the initial contents of an intermediate node.
100 */
101int
102xfs_da_node_create(xfs_da_args_t *args, xfs_dablk_t blkno, int level,
103 xfs_dabuf_t **bpp, int whichfork)
104{
105 xfs_da_intnode_t *node;
106 xfs_dabuf_t *bp;
107 int error;
108 xfs_trans_t *tp;
109
110 trace_xfs_da_node_create(args);
111
112 tp = args->trans;
113 error = xfs_da_get_buf(tp, args->dp, blkno, -1, &bp, whichfork);
114 if (error)
115 return(error);
116 ASSERT(bp != NULL);
117 node = bp->data;
118 node->hdr.info.forw = 0;
119 node->hdr.info.back = 0;
120 node->hdr.info.magic = cpu_to_be16(XFS_DA_NODE_MAGIC);
121 node->hdr.info.pad = 0;
122 node->hdr.count = 0;
123 node->hdr.level = cpu_to_be16(level);
124
125 xfs_da_log_buf(tp, bp,
126 XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
127
128 *bpp = bp;
129 return(0);
130}
131
132/*
133 * Split a leaf node, rebalance, then possibly split
134 * intermediate nodes, rebalance, etc.
135 */
136int /* error */
137xfs_da_split(xfs_da_state_t *state)
138{
139 xfs_da_state_blk_t *oldblk, *newblk, *addblk;
140 xfs_da_intnode_t *node;
141 xfs_dabuf_t *bp;
142 int max, action, error, i;
143
144 trace_xfs_da_split(state->args);
145
146 /*
147 * Walk back up the tree splitting/inserting/adjusting as necessary.
148 * If we need to insert and there isn't room, split the node, then
149 * decide which fragment to insert the new block from below into.
150 * Note that we may split the root this way, but we need more fixup.
151 */
152 max = state->path.active - 1;
153 ASSERT((max >= 0) && (max < XFS_DA_NODE_MAXDEPTH));
154 ASSERT(state->path.blk[max].magic == XFS_ATTR_LEAF_MAGIC ||
155 state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC);
156
157 addblk = &state->path.blk[max]; /* initial dummy value */
158 for (i = max; (i >= 0) && addblk; state->path.active--, i--) {
159 oldblk = &state->path.blk[i];
160 newblk = &state->altpath.blk[i];
161
162 /*
163 * If a leaf node then
164 * Allocate a new leaf node, then rebalance across them.
165 * else if an intermediate node then
166 * We split on the last layer, must we split the node?
167 */
168 switch (oldblk->magic) {
169 case XFS_ATTR_LEAF_MAGIC:
170 error = xfs_attr_leaf_split(state, oldblk, newblk);
171 if ((error != 0) && (error != ENOSPC)) {
172 return(error); /* GROT: attr is inconsistent */
173 }
174 if (!error) {
175 addblk = newblk;
176 break;
177 }
178 /*
179 * Entry wouldn't fit, split the leaf again.
180 */
181 state->extravalid = 1;
182 if (state->inleaf) {
183 state->extraafter = 0; /* before newblk */
184 trace_xfs_attr_leaf_split_before(state->args);
185 error = xfs_attr_leaf_split(state, oldblk,
186 &state->extrablk);
187 } else {
188 state->extraafter = 1; /* after newblk */
189 trace_xfs_attr_leaf_split_after(state->args);
190 error = xfs_attr_leaf_split(state, newblk,
191 &state->extrablk);
192 }
193 if (error)
194 return(error); /* GROT: attr inconsistent */
195 addblk = newblk;
196 break;
197 case XFS_DIR2_LEAFN_MAGIC:
198 error = xfs_dir2_leafn_split(state, oldblk, newblk);
199 if (error)
200 return error;
201 addblk = newblk;
202 break;
203 case XFS_DA_NODE_MAGIC:
204 error = xfs_da_node_split(state, oldblk, newblk, addblk,
205 max - i, &action);
206 xfs_da_buf_done(addblk->bp);
207 addblk->bp = NULL;
208 if (error)
209 return(error); /* GROT: dir is inconsistent */
210 /*
211 * Record the newly split block for the next time thru?
212 */
213 if (action)
214 addblk = newblk;
215 else
216 addblk = NULL;
217 break;
218 }
219
220 /*
221 * Update the btree to show the new hashval for this child.
222 */
223 xfs_da_fixhashpath(state, &state->path);
224 /*
225 * If we won't need this block again, it's getting dropped
226 * from the active path by the loop control, so we need
227 * to mark it done now.
228 */
229 if (i > 0 || !addblk)
230 xfs_da_buf_done(oldblk->bp);
231 }
232 if (!addblk)
233 return(0);
234
235 /*
236 * Split the root node.
237 */
238 ASSERT(state->path.active == 0);
239 oldblk = &state->path.blk[0];
240 error = xfs_da_root_split(state, oldblk, addblk);
241 if (error) {
242 xfs_da_buf_done(oldblk->bp);
243 xfs_da_buf_done(addblk->bp);
244 addblk->bp = NULL;
245 return(error); /* GROT: dir is inconsistent */
246 }
247
248 /*
249 * Update pointers to the node which used to be block 0 and
250 * just got bumped because of the addition of a new root node.
251 * There might be three blocks involved if a double split occurred,
252 * and the original block 0 could be at any position in the list.
253 */
254
255 node = oldblk->bp->data;
256 if (node->hdr.info.forw) {
257 if (be32_to_cpu(node->hdr.info.forw) == addblk->blkno) {
258 bp = addblk->bp;
259 } else {
260 ASSERT(state->extravalid);
261 bp = state->extrablk.bp;
262 }
263 node = bp->data;
264 node->hdr.info.back = cpu_to_be32(oldblk->blkno);
265 xfs_da_log_buf(state->args->trans, bp,
266 XFS_DA_LOGRANGE(node, &node->hdr.info,
267 sizeof(node->hdr.info)));
268 }
269 node = oldblk->bp->data;
270 if (node->hdr.info.back) {
271 if (be32_to_cpu(node->hdr.info.back) == addblk->blkno) {
272 bp = addblk->bp;
273 } else {
274 ASSERT(state->extravalid);
275 bp = state->extrablk.bp;
276 }
277 node = bp->data;
278 node->hdr.info.forw = cpu_to_be32(oldblk->blkno);
279 xfs_da_log_buf(state->args->trans, bp,
280 XFS_DA_LOGRANGE(node, &node->hdr.info,
281 sizeof(node->hdr.info)));
282 }
283 xfs_da_buf_done(oldblk->bp);
284 xfs_da_buf_done(addblk->bp);
285 addblk->bp = NULL;
286 return(0);
287}
288
289/*
290 * Split the root. We have to create a new root and point to the two
291 * parts (the split old root) that we just created. Copy block zero to
292 * the EOF, extending the inode in process.
293 */
294STATIC int /* error */
295xfs_da_root_split(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
296 xfs_da_state_blk_t *blk2)
297{
298 xfs_da_intnode_t *node, *oldroot;
299 xfs_da_args_t *args;
300 xfs_dablk_t blkno;
301 xfs_dabuf_t *bp;
302 int error, size;
303 xfs_inode_t *dp;
304 xfs_trans_t *tp;
305 xfs_mount_t *mp;
306 xfs_dir2_leaf_t *leaf;
307
308 trace_xfs_da_root_split(state->args);
309
310 /*
311 * Copy the existing (incorrect) block from the root node position
312 * to a free space somewhere.
313 */
314 args = state->args;
315 ASSERT(args != NULL);
316 error = xfs_da_grow_inode(args, &blkno);
317 if (error)
318 return(error);
319 dp = args->dp;
320 tp = args->trans;
321 mp = state->mp;
322 error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, args->whichfork);
323 if (error)
324 return(error);
325 ASSERT(bp != NULL);
326 node = bp->data;
327 oldroot = blk1->bp->data;
328 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)) {
329 size = (int)((char *)&oldroot->btree[be16_to_cpu(oldroot->hdr.count)] -
330 (char *)oldroot);
331 } else {
332 ASSERT(oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
333 leaf = (xfs_dir2_leaf_t *)oldroot;
334 size = (int)((char *)&leaf->ents[be16_to_cpu(leaf->hdr.count)] -
335 (char *)leaf);
336 }
337 memcpy(node, oldroot, size);
338 xfs_da_log_buf(tp, bp, 0, size - 1);
339 xfs_da_buf_done(blk1->bp);
340 blk1->bp = bp;
341 blk1->blkno = blkno;
342
343 /*
344 * Set up the new root node.
345 */
346 error = xfs_da_node_create(args,
347 (args->whichfork == XFS_DATA_FORK) ? mp->m_dirleafblk : 0,
348 be16_to_cpu(node->hdr.level) + 1, &bp, args->whichfork);
349 if (error)
350 return(error);
351 node = bp->data;
352 node->btree[0].hashval = cpu_to_be32(blk1->hashval);
353 node->btree[0].before = cpu_to_be32(blk1->blkno);
354 node->btree[1].hashval = cpu_to_be32(blk2->hashval);
355 node->btree[1].before = cpu_to_be32(blk2->blkno);
356 node->hdr.count = cpu_to_be16(2);
357
358#ifdef DEBUG
359 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)) {
360 ASSERT(blk1->blkno >= mp->m_dirleafblk &&
361 blk1->blkno < mp->m_dirfreeblk);
362 ASSERT(blk2->blkno >= mp->m_dirleafblk &&
363 blk2->blkno < mp->m_dirfreeblk);
364 }
365#endif
366
367 /* Header is already logged by xfs_da_node_create */
368 xfs_da_log_buf(tp, bp,
369 XFS_DA_LOGRANGE(node, node->btree,
370 sizeof(xfs_da_node_entry_t) * 2));
371 xfs_da_buf_done(bp);
372
373 return(0);
374}
375
376/*
377 * Split the node, rebalance, then add the new entry.
378 */
379STATIC int /* error */
380xfs_da_node_split(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
381 xfs_da_state_blk_t *newblk,
382 xfs_da_state_blk_t *addblk,
383 int treelevel, int *result)
384{
385 xfs_da_intnode_t *node;
386 xfs_dablk_t blkno;
387 int newcount, error;
388 int useextra;
389
390 trace_xfs_da_node_split(state->args);
391
392 node = oldblk->bp->data;
393 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
394
395 /*
396 * With V2 dirs the extra block is data or freespace.
397 */
398 useextra = state->extravalid && state->args->whichfork == XFS_ATTR_FORK;
399 newcount = 1 + useextra;
400 /*
401 * Do we have to split the node?
402 */
403 if ((be16_to_cpu(node->hdr.count) + newcount) > state->node_ents) {
404 /*
405 * Allocate a new node, add to the doubly linked chain of
406 * nodes, then move some of our excess entries into it.
407 */
408 error = xfs_da_grow_inode(state->args, &blkno);
409 if (error)
410 return(error); /* GROT: dir is inconsistent */
411
412 error = xfs_da_node_create(state->args, blkno, treelevel,
413 &newblk->bp, state->args->whichfork);
414 if (error)
415 return(error); /* GROT: dir is inconsistent */
416 newblk->blkno = blkno;
417 newblk->magic = XFS_DA_NODE_MAGIC;
418 xfs_da_node_rebalance(state, oldblk, newblk);
419 error = xfs_da_blk_link(state, oldblk, newblk);
420 if (error)
421 return(error);
422 *result = 1;
423 } else {
424 *result = 0;
425 }
426
427 /*
428 * Insert the new entry(s) into the correct block
429 * (updating last hashval in the process).
430 *
431 * xfs_da_node_add() inserts BEFORE the given index,
432 * and as a result of using node_lookup_int() we always
433 * point to a valid entry (not after one), but a split
434 * operation always results in a new block whose hashvals
435 * FOLLOW the current block.
436 *
437 * If we had double-split op below us, then add the extra block too.
438 */
439 node = oldblk->bp->data;
440 if (oldblk->index <= be16_to_cpu(node->hdr.count)) {
441 oldblk->index++;
442 xfs_da_node_add(state, oldblk, addblk);
443 if (useextra) {
444 if (state->extraafter)
445 oldblk->index++;
446 xfs_da_node_add(state, oldblk, &state->extrablk);
447 state->extravalid = 0;
448 }
449 } else {
450 newblk->index++;
451 xfs_da_node_add(state, newblk, addblk);
452 if (useextra) {
453 if (state->extraafter)
454 newblk->index++;
455 xfs_da_node_add(state, newblk, &state->extrablk);
456 state->extravalid = 0;
457 }
458 }
459
460 return(0);
461}
462
463/*
464 * Balance the btree elements between two intermediate nodes,
465 * usually one full and one empty.
466 *
467 * NOTE: if blk2 is empty, then it will get the upper half of blk1.
468 */
469STATIC void
470xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
471 xfs_da_state_blk_t *blk2)
472{
473 xfs_da_intnode_t *node1, *node2, *tmpnode;
474 xfs_da_node_entry_t *btree_s, *btree_d;
475 int count, tmp;
476 xfs_trans_t *tp;
477
478 trace_xfs_da_node_rebalance(state->args);
479
480 node1 = blk1->bp->data;
481 node2 = blk2->bp->data;
482 /*
483 * Figure out how many entries need to move, and in which direction.
484 * Swap the nodes around if that makes it simpler.
485 */
486 if ((be16_to_cpu(node1->hdr.count) > 0) && (be16_to_cpu(node2->hdr.count) > 0) &&
487 ((be32_to_cpu(node2->btree[0].hashval) < be32_to_cpu(node1->btree[0].hashval)) ||
488 (be32_to_cpu(node2->btree[be16_to_cpu(node2->hdr.count)-1].hashval) <
489 be32_to_cpu(node1->btree[be16_to_cpu(node1->hdr.count)-1].hashval)))) {
490 tmpnode = node1;
491 node1 = node2;
492 node2 = tmpnode;
493 }
494 ASSERT(node1->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
495 ASSERT(node2->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
496 count = (be16_to_cpu(node1->hdr.count) - be16_to_cpu(node2->hdr.count)) / 2;
497 if (count == 0)
498 return;
499 tp = state->args->trans;
500 /*
501 * Two cases: high-to-low and low-to-high.
502 */
503 if (count > 0) {
504 /*
505 * Move elements in node2 up to make a hole.
506 */
507 if ((tmp = be16_to_cpu(node2->hdr.count)) > 0) {
508 tmp *= (uint)sizeof(xfs_da_node_entry_t);
509 btree_s = &node2->btree[0];
510 btree_d = &node2->btree[count];
511 memmove(btree_d, btree_s, tmp);
512 }
513
514 /*
515 * Move the req'd B-tree elements from high in node1 to
516 * low in node2.
517 */
518 be16_add_cpu(&node2->hdr.count, count);
519 tmp = count * (uint)sizeof(xfs_da_node_entry_t);
520 btree_s = &node1->btree[be16_to_cpu(node1->hdr.count) - count];
521 btree_d = &node2->btree[0];
522 memcpy(btree_d, btree_s, tmp);
523 be16_add_cpu(&node1->hdr.count, -count);
524 } else {
525 /*
526 * Move the req'd B-tree elements from low in node2 to
527 * high in node1.
528 */
529 count = -count;
530 tmp = count * (uint)sizeof(xfs_da_node_entry_t);
531 btree_s = &node2->btree[0];
532 btree_d = &node1->btree[be16_to_cpu(node1->hdr.count)];
533 memcpy(btree_d, btree_s, tmp);
534 be16_add_cpu(&node1->hdr.count, count);
535 xfs_da_log_buf(tp, blk1->bp,
536 XFS_DA_LOGRANGE(node1, btree_d, tmp));
537
538 /*
539 * Move elements in node2 down to fill the hole.
540 */
541 tmp = be16_to_cpu(node2->hdr.count) - count;
542 tmp *= (uint)sizeof(xfs_da_node_entry_t);
543 btree_s = &node2->btree[count];
544 btree_d = &node2->btree[0];
545 memmove(btree_d, btree_s, tmp);
546 be16_add_cpu(&node2->hdr.count, -count);
547 }
548
549 /*
550 * Log header of node 1 and all current bits of node 2.
551 */
552 xfs_da_log_buf(tp, blk1->bp,
553 XFS_DA_LOGRANGE(node1, &node1->hdr, sizeof(node1->hdr)));
554 xfs_da_log_buf(tp, blk2->bp,
555 XFS_DA_LOGRANGE(node2, &node2->hdr,
556 sizeof(node2->hdr) +
557 sizeof(node2->btree[0]) * be16_to_cpu(node2->hdr.count)));
558
559 /*
560 * Record the last hashval from each block for upward propagation.
561 * (note: don't use the swapped node pointers)
562 */
563 node1 = blk1->bp->data;
564 node2 = blk2->bp->data;
565 blk1->hashval = be32_to_cpu(node1->btree[be16_to_cpu(node1->hdr.count)-1].hashval);
566 blk2->hashval = be32_to_cpu(node2->btree[be16_to_cpu(node2->hdr.count)-1].hashval);
567
568 /*
569 * Adjust the expected index for insertion.
570 */
571 if (blk1->index >= be16_to_cpu(node1->hdr.count)) {
572 blk2->index = blk1->index - be16_to_cpu(node1->hdr.count);
573 blk1->index = be16_to_cpu(node1->hdr.count) + 1; /* make it invalid */
574 }
575}
576
577/*
578 * Add a new entry to an intermediate node.
579 */
580STATIC void
581xfs_da_node_add(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
582 xfs_da_state_blk_t *newblk)
583{
584 xfs_da_intnode_t *node;
585 xfs_da_node_entry_t *btree;
586 int tmp;
587
588 trace_xfs_da_node_add(state->args);
589
590 node = oldblk->bp->data;
591 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
592 ASSERT((oldblk->index >= 0) && (oldblk->index <= be16_to_cpu(node->hdr.count)));
593 ASSERT(newblk->blkno != 0);
594 if (state->args->whichfork == XFS_DATA_FORK)
595 ASSERT(newblk->blkno >= state->mp->m_dirleafblk &&
596 newblk->blkno < state->mp->m_dirfreeblk);
597
598 /*
599 * We may need to make some room before we insert the new node.
600 */
601 tmp = 0;
602 btree = &node->btree[ oldblk->index ];
603 if (oldblk->index < be16_to_cpu(node->hdr.count)) {
604 tmp = (be16_to_cpu(node->hdr.count) - oldblk->index) * (uint)sizeof(*btree);
605 memmove(btree + 1, btree, tmp);
606 }
607 btree->hashval = cpu_to_be32(newblk->hashval);
608 btree->before = cpu_to_be32(newblk->blkno);
609 xfs_da_log_buf(state->args->trans, oldblk->bp,
610 XFS_DA_LOGRANGE(node, btree, tmp + sizeof(*btree)));
611 be16_add_cpu(&node->hdr.count, 1);
612 xfs_da_log_buf(state->args->trans, oldblk->bp,
613 XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
614
615 /*
616 * Copy the last hash value from the oldblk to propagate upwards.
617 */
618 oldblk->hashval = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1 ].hashval);
619}
620
621/*========================================================================
622 * Routines used for shrinking the Btree.
623 *========================================================================*/
624
625/*
626 * Deallocate an empty leaf node, remove it from its parent,
627 * possibly deallocating that block, etc...
628 */
629int
630xfs_da_join(xfs_da_state_t *state)
631{
632 xfs_da_state_blk_t *drop_blk, *save_blk;
633 int action, error;
634
635 trace_xfs_da_join(state->args);
636
637 action = 0;
638 drop_blk = &state->path.blk[ state->path.active-1 ];
639 save_blk = &state->altpath.blk[ state->path.active-1 ];
640 ASSERT(state->path.blk[0].magic == XFS_DA_NODE_MAGIC);
641 ASSERT(drop_blk->magic == XFS_ATTR_LEAF_MAGIC ||
642 drop_blk->magic == XFS_DIR2_LEAFN_MAGIC);
643
644 /*
645 * Walk back up the tree joining/deallocating as necessary.
646 * When we stop dropping blocks, break out.
647 */
648 for ( ; state->path.active >= 2; drop_blk--, save_blk--,
649 state->path.active--) {
650 /*
651 * See if we can combine the block with a neighbor.
652 * (action == 0) => no options, just leave
653 * (action == 1) => coalesce, then unlink
654 * (action == 2) => block empty, unlink it
655 */
656 switch (drop_blk->magic) {
657 case XFS_ATTR_LEAF_MAGIC:
658 error = xfs_attr_leaf_toosmall(state, &action);
659 if (error)
660 return(error);
661 if (action == 0)
662 return(0);
663 xfs_attr_leaf_unbalance(state, drop_blk, save_blk);
664 break;
665 case XFS_DIR2_LEAFN_MAGIC:
666 error = xfs_dir2_leafn_toosmall(state, &action);
667 if (error)
668 return error;
669 if (action == 0)
670 return 0;
671 xfs_dir2_leafn_unbalance(state, drop_blk, save_blk);
672 break;
673 case XFS_DA_NODE_MAGIC:
674 /*
675 * Remove the offending node, fixup hashvals,
676 * check for a toosmall neighbor.
677 */
678 xfs_da_node_remove(state, drop_blk);
679 xfs_da_fixhashpath(state, &state->path);
680 error = xfs_da_node_toosmall(state, &action);
681 if (error)
682 return(error);
683 if (action == 0)
684 return 0;
685 xfs_da_node_unbalance(state, drop_blk, save_blk);
686 break;
687 }
688 xfs_da_fixhashpath(state, &state->altpath);
689 error = xfs_da_blk_unlink(state, drop_blk, save_blk);
690 xfs_da_state_kill_altpath(state);
691 if (error)
692 return(error);
693 error = xfs_da_shrink_inode(state->args, drop_blk->blkno,
694 drop_blk->bp);
695 drop_blk->bp = NULL;
696 if (error)
697 return(error);
698 }
699 /*
700 * We joined all the way to the top. If it turns out that
701 * we only have one entry in the root, make the child block
702 * the new root.
703 */
704 xfs_da_node_remove(state, drop_blk);
705 xfs_da_fixhashpath(state, &state->path);
706 error = xfs_da_root_join(state, &state->path.blk[0]);
707 return(error);
708}
709
710#ifdef DEBUG
711static void
712xfs_da_blkinfo_onlychild_validate(struct xfs_da_blkinfo *blkinfo, __u16 level)
713{
714 __be16 magic = blkinfo->magic;
715
716 if (level == 1) {
717 ASSERT(magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
718 magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
719 } else
720 ASSERT(magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
721 ASSERT(!blkinfo->forw);
722 ASSERT(!blkinfo->back);
723}
724#else /* !DEBUG */
725#define xfs_da_blkinfo_onlychild_validate(blkinfo, level)
726#endif /* !DEBUG */
727
728/*
729 * We have only one entry in the root. Copy the only remaining child of
730 * the old root to block 0 as the new root node.
731 */
732STATIC int
733xfs_da_root_join(xfs_da_state_t *state, xfs_da_state_blk_t *root_blk)
734{
735 xfs_da_intnode_t *oldroot;
736 xfs_da_args_t *args;
737 xfs_dablk_t child;
738 xfs_dabuf_t *bp;
739 int error;
740
741 trace_xfs_da_root_join(state->args);
742
743 args = state->args;
744 ASSERT(args != NULL);
745 ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC);
746 oldroot = root_blk->bp->data;
747 ASSERT(oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
748 ASSERT(!oldroot->hdr.info.forw);
749 ASSERT(!oldroot->hdr.info.back);
750
751 /*
752 * If the root has more than one child, then don't do anything.
753 */
754 if (be16_to_cpu(oldroot->hdr.count) > 1)
755 return(0);
756
757 /*
758 * Read in the (only) child block, then copy those bytes into
759 * the root block's buffer and free the original child block.
760 */
761 child = be32_to_cpu(oldroot->btree[0].before);
762 ASSERT(child != 0);
763 error = xfs_da_read_buf(args->trans, args->dp, child, -1, &bp,
764 args->whichfork);
765 if (error)
766 return(error);
767 ASSERT(bp != NULL);
768 xfs_da_blkinfo_onlychild_validate(bp->data,
769 be16_to_cpu(oldroot->hdr.level));
770
771 memcpy(root_blk->bp->data, bp->data, state->blocksize);
772 xfs_da_log_buf(args->trans, root_blk->bp, 0, state->blocksize - 1);
773 error = xfs_da_shrink_inode(args, child, bp);
774 return(error);
775}
776
777/*
778 * Check a node block and its neighbors to see if the block should be
779 * collapsed into one or the other neighbor. Always keep the block
780 * with the smaller block number.
781 * If the current block is over 50% full, don't try to join it, return 0.
782 * If the block is empty, fill in the state structure and return 2.
783 * If it can be collapsed, fill in the state structure and return 1.
784 * If nothing can be done, return 0.
785 */
786STATIC int
787xfs_da_node_toosmall(xfs_da_state_t *state, int *action)
788{
789 xfs_da_intnode_t *node;
790 xfs_da_state_blk_t *blk;
791 xfs_da_blkinfo_t *info;
792 int count, forward, error, retval, i;
793 xfs_dablk_t blkno;
794 xfs_dabuf_t *bp;
795
796 /*
797 * Check for the degenerate case of the block being over 50% full.
798 * If so, it's not worth even looking to see if we might be able
799 * to coalesce with a sibling.
800 */
801 blk = &state->path.blk[ state->path.active-1 ];
802 info = blk->bp->data;
803 ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
804 node = (xfs_da_intnode_t *)info;
805 count = be16_to_cpu(node->hdr.count);
806 if (count > (state->node_ents >> 1)) {
807 *action = 0; /* blk over 50%, don't try to join */
808 return(0); /* blk over 50%, don't try to join */
809 }
810
811 /*
812 * Check for the degenerate case of the block being empty.
813 * If the block is empty, we'll simply delete it, no need to
814 * coalesce it with a sibling block. We choose (arbitrarily)
815 * to merge with the forward block unless it is NULL.
816 */
817 if (count == 0) {
818 /*
819 * Make altpath point to the block we want to keep and
820 * path point to the block we want to drop (this one).
821 */
822 forward = (info->forw != 0);
823 memcpy(&state->altpath, &state->path, sizeof(state->path));
824 error = xfs_da_path_shift(state, &state->altpath, forward,
825 0, &retval);
826 if (error)
827 return(error);
828 if (retval) {
829 *action = 0;
830 } else {
831 *action = 2;
832 }
833 return(0);
834 }
835
836 /*
837 * Examine each sibling block to see if we can coalesce with
838 * at least 25% free space to spare. We need to figure out
839 * whether to merge with the forward or the backward block.
840 * We prefer coalescing with the lower numbered sibling so as
841 * to shrink a directory over time.
842 */
843 /* start with smaller blk num */
844 forward = (be32_to_cpu(info->forw) < be32_to_cpu(info->back));
845 for (i = 0; i < 2; forward = !forward, i++) {
846 if (forward)
847 blkno = be32_to_cpu(info->forw);
848 else
849 blkno = be32_to_cpu(info->back);
850 if (blkno == 0)
851 continue;
852 error = xfs_da_read_buf(state->args->trans, state->args->dp,
853 blkno, -1, &bp, state->args->whichfork);
854 if (error)
855 return(error);
856 ASSERT(bp != NULL);
857
858 node = (xfs_da_intnode_t *)info;
859 count = state->node_ents;
860 count -= state->node_ents >> 2;
861 count -= be16_to_cpu(node->hdr.count);
862 node = bp->data;
863 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
864 count -= be16_to_cpu(node->hdr.count);
865 xfs_da_brelse(state->args->trans, bp);
866 if (count >= 0)
867 break; /* fits with at least 25% to spare */
868 }
869 if (i >= 2) {
870 *action = 0;
871 return(0);
872 }
873
874 /*
875 * Make altpath point to the block we want to keep (the lower
876 * numbered block) and path point to the block we want to drop.
877 */
878 memcpy(&state->altpath, &state->path, sizeof(state->path));
879 if (blkno < blk->blkno) {
880 error = xfs_da_path_shift(state, &state->altpath, forward,
881 0, &retval);
882 if (error) {
883 return(error);
884 }
885 if (retval) {
886 *action = 0;
887 return(0);
888 }
889 } else {
890 error = xfs_da_path_shift(state, &state->path, forward,
891 0, &retval);
892 if (error) {
893 return(error);
894 }
895 if (retval) {
896 *action = 0;
897 return(0);
898 }
899 }
900 *action = 1;
901 return(0);
902}
903
904/*
905 * Walk back up the tree adjusting hash values as necessary,
906 * when we stop making changes, return.
907 */
908void
909xfs_da_fixhashpath(xfs_da_state_t *state, xfs_da_state_path_t *path)
910{
911 xfs_da_state_blk_t *blk;
912 xfs_da_intnode_t *node;
913 xfs_da_node_entry_t *btree;
914 xfs_dahash_t lasthash=0;
915 int level, count;
916
917 level = path->active-1;
918 blk = &path->blk[ level ];
919 switch (blk->magic) {
920 case XFS_ATTR_LEAF_MAGIC:
921 lasthash = xfs_attr_leaf_lasthash(blk->bp, &count);
922 if (count == 0)
923 return;
924 break;
925 case XFS_DIR2_LEAFN_MAGIC:
926 lasthash = xfs_dir2_leafn_lasthash(blk->bp, &count);
927 if (count == 0)
928 return;
929 break;
930 case XFS_DA_NODE_MAGIC:
931 lasthash = xfs_da_node_lasthash(blk->bp, &count);
932 if (count == 0)
933 return;
934 break;
935 }
936 for (blk--, level--; level >= 0; blk--, level--) {
937 node = blk->bp->data;
938 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
939 btree = &node->btree[ blk->index ];
940 if (be32_to_cpu(btree->hashval) == lasthash)
941 break;
942 blk->hashval = lasthash;
943 btree->hashval = cpu_to_be32(lasthash);
944 xfs_da_log_buf(state->args->trans, blk->bp,
945 XFS_DA_LOGRANGE(node, btree, sizeof(*btree)));
946
947 lasthash = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval);
948 }
949}
950
951/*
952 * Remove an entry from an intermediate node.
953 */
954STATIC void
955xfs_da_node_remove(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk)
956{
957 xfs_da_intnode_t *node;
958 xfs_da_node_entry_t *btree;
959 int tmp;
960
961 trace_xfs_da_node_remove(state->args);
962
963 node = drop_blk->bp->data;
964 ASSERT(drop_blk->index < be16_to_cpu(node->hdr.count));
965 ASSERT(drop_blk->index >= 0);
966
967 /*
968 * Copy over the offending entry, or just zero it out.
969 */
970 btree = &node->btree[drop_blk->index];
971 if (drop_blk->index < (be16_to_cpu(node->hdr.count)-1)) {
972 tmp = be16_to_cpu(node->hdr.count) - drop_blk->index - 1;
973 tmp *= (uint)sizeof(xfs_da_node_entry_t);
974 memmove(btree, btree + 1, tmp);
975 xfs_da_log_buf(state->args->trans, drop_blk->bp,
976 XFS_DA_LOGRANGE(node, btree, tmp));
977 btree = &node->btree[be16_to_cpu(node->hdr.count)-1];
978 }
979 memset((char *)btree, 0, sizeof(xfs_da_node_entry_t));
980 xfs_da_log_buf(state->args->trans, drop_blk->bp,
981 XFS_DA_LOGRANGE(node, btree, sizeof(*btree)));
982 be16_add_cpu(&node->hdr.count, -1);
983 xfs_da_log_buf(state->args->trans, drop_blk->bp,
984 XFS_DA_LOGRANGE(node, &node->hdr, sizeof(node->hdr)));
985
986 /*
987 * Copy the last hash value from the block to propagate upwards.
988 */
989 btree--;
990 drop_blk->hashval = be32_to_cpu(btree->hashval);
991}
992
993/*
994 * Unbalance the btree elements between two intermediate nodes,
995 * move all Btree elements from one node into another.
996 */
997STATIC void
998xfs_da_node_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
999 xfs_da_state_blk_t *save_blk)
1000{
1001 xfs_da_intnode_t *drop_node, *save_node;
1002 xfs_da_node_entry_t *btree;
1003 int tmp;
1004 xfs_trans_t *tp;
1005
1006 trace_xfs_da_node_unbalance(state->args);
1007
1008 drop_node = drop_blk->bp->data;
1009 save_node = save_blk->bp->data;
1010 ASSERT(drop_node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
1011 ASSERT(save_node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
1012 tp = state->args->trans;
1013
1014 /*
1015 * If the dying block has lower hashvals, then move all the
1016 * elements in the remaining block up to make a hole.
1017 */
1018 if ((be32_to_cpu(drop_node->btree[0].hashval) < be32_to_cpu(save_node->btree[ 0 ].hashval)) ||
1019 (be32_to_cpu(drop_node->btree[be16_to_cpu(drop_node->hdr.count)-1].hashval) <
1020 be32_to_cpu(save_node->btree[be16_to_cpu(save_node->hdr.count)-1].hashval)))
1021 {
1022 btree = &save_node->btree[be16_to_cpu(drop_node->hdr.count)];
1023 tmp = be16_to_cpu(save_node->hdr.count) * (uint)sizeof(xfs_da_node_entry_t);
1024 memmove(btree, &save_node->btree[0], tmp);
1025 btree = &save_node->btree[0];
1026 xfs_da_log_buf(tp, save_blk->bp,
1027 XFS_DA_LOGRANGE(save_node, btree,
1028 (be16_to_cpu(save_node->hdr.count) + be16_to_cpu(drop_node->hdr.count)) *
1029 sizeof(xfs_da_node_entry_t)));
1030 } else {
1031 btree = &save_node->btree[be16_to_cpu(save_node->hdr.count)];
1032 xfs_da_log_buf(tp, save_blk->bp,
1033 XFS_DA_LOGRANGE(save_node, btree,
1034 be16_to_cpu(drop_node->hdr.count) *
1035 sizeof(xfs_da_node_entry_t)));
1036 }
1037
1038 /*
1039 * Move all the B-tree elements from drop_blk to save_blk.
1040 */
1041 tmp = be16_to_cpu(drop_node->hdr.count) * (uint)sizeof(xfs_da_node_entry_t);
1042 memcpy(btree, &drop_node->btree[0], tmp);
1043 be16_add_cpu(&save_node->hdr.count, be16_to_cpu(drop_node->hdr.count));
1044
1045 xfs_da_log_buf(tp, save_blk->bp,
1046 XFS_DA_LOGRANGE(save_node, &save_node->hdr,
1047 sizeof(save_node->hdr)));
1048
1049 /*
1050 * Save the last hashval in the remaining block for upward propagation.
1051 */
1052 save_blk->hashval = be32_to_cpu(save_node->btree[be16_to_cpu(save_node->hdr.count)-1].hashval);
1053}
1054
1055/*========================================================================
1056 * Routines used for finding things in the Btree.
1057 *========================================================================*/
1058
1059/*
1060 * Walk down the Btree looking for a particular filename, filling
1061 * in the state structure as we go.
1062 *
1063 * We will set the state structure to point to each of the elements
1064 * in each of the nodes where either the hashval is or should be.
1065 *
1066 * We support duplicate hashval's so for each entry in the current
1067 * node that could contain the desired hashval, descend. This is a
1068 * pruned depth-first tree search.
1069 */
1070int /* error */
1071xfs_da_node_lookup_int(xfs_da_state_t *state, int *result)
1072{
1073 xfs_da_state_blk_t *blk;
1074 xfs_da_blkinfo_t *curr;
1075 xfs_da_intnode_t *node;
1076 xfs_da_node_entry_t *btree;
1077 xfs_dablk_t blkno;
1078 int probe, span, max, error, retval;
1079 xfs_dahash_t hashval, btreehashval;
1080 xfs_da_args_t *args;
1081
1082 args = state->args;
1083
1084 /*
1085 * Descend thru the B-tree searching each level for the right
1086 * node to use, until the right hashval is found.
1087 */
1088 blkno = (args->whichfork == XFS_DATA_FORK)? state->mp->m_dirleafblk : 0;
1089 for (blk = &state->path.blk[0], state->path.active = 1;
1090 state->path.active <= XFS_DA_NODE_MAXDEPTH;
1091 blk++, state->path.active++) {
1092 /*
1093 * Read the next node down in the tree.
1094 */
1095 blk->blkno = blkno;
1096 error = xfs_da_read_buf(args->trans, args->dp, blkno,
1097 -1, &blk->bp, args->whichfork);
1098 if (error) {
1099 blk->blkno = 0;
1100 state->path.active--;
1101 return(error);
1102 }
1103 curr = blk->bp->data;
1104 blk->magic = be16_to_cpu(curr->magic);
1105 ASSERT(blk->magic == XFS_DA_NODE_MAGIC ||
1106 blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1107 blk->magic == XFS_ATTR_LEAF_MAGIC);
1108
1109 /*
1110 * Search an intermediate node for a match.
1111 */
1112 if (blk->magic == XFS_DA_NODE_MAGIC) {
1113 node = blk->bp->data;
1114 max = be16_to_cpu(node->hdr.count);
1115 blk->hashval = be32_to_cpu(node->btree[max-1].hashval);
1116
1117 /*
1118 * Binary search. (note: small blocks will skip loop)
1119 */
1120 probe = span = max / 2;
1121 hashval = args->hashval;
1122 for (btree = &node->btree[probe]; span > 4;
1123 btree = &node->btree[probe]) {
1124 span /= 2;
1125 btreehashval = be32_to_cpu(btree->hashval);
1126 if (btreehashval < hashval)
1127 probe += span;
1128 else if (btreehashval > hashval)
1129 probe -= span;
1130 else
1131 break;
1132 }
1133 ASSERT((probe >= 0) && (probe < max));
1134 ASSERT((span <= 4) || (be32_to_cpu(btree->hashval) == hashval));
1135
1136 /*
1137 * Since we may have duplicate hashval's, find the first
1138 * matching hashval in the node.
1139 */
1140 while ((probe > 0) && (be32_to_cpu(btree->hashval) >= hashval)) {
1141 btree--;
1142 probe--;
1143 }
1144 while ((probe < max) && (be32_to_cpu(btree->hashval) < hashval)) {
1145 btree++;
1146 probe++;
1147 }
1148
1149 /*
1150 * Pick the right block to descend on.
1151 */
1152 if (probe == max) {
1153 blk->index = max-1;
1154 blkno = be32_to_cpu(node->btree[max-1].before);
1155 } else {
1156 blk->index = probe;
1157 blkno = be32_to_cpu(btree->before);
1158 }
1159 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1160 blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
1161 break;
1162 } else if (blk->magic == XFS_DIR2_LEAFN_MAGIC) {
1163 blk->hashval = xfs_dir2_leafn_lasthash(blk->bp, NULL);
1164 break;
1165 }
1166 }
1167
1168 /*
1169 * A leaf block that ends in the hashval that we are interested in
1170 * (final hashval == search hashval) means that the next block may
1171 * contain more entries with the same hashval, shift upward to the
1172 * next leaf and keep searching.
1173 */
1174 for (;;) {
1175 if (blk->magic == XFS_DIR2_LEAFN_MAGIC) {
1176 retval = xfs_dir2_leafn_lookup_int(blk->bp, args,
1177 &blk->index, state);
1178 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1179 retval = xfs_attr_leaf_lookup_int(blk->bp, args);
1180 blk->index = args->index;
1181 args->blkno = blk->blkno;
1182 } else {
1183 ASSERT(0);
1184 return XFS_ERROR(EFSCORRUPTED);
1185 }
1186 if (((retval == ENOENT) || (retval == ENOATTR)) &&
1187 (blk->hashval == args->hashval)) {
1188 error = xfs_da_path_shift(state, &state->path, 1, 1,
1189 &retval);
1190 if (error)
1191 return(error);
1192 if (retval == 0) {
1193 continue;
1194 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1195 /* path_shift() gives ENOENT */
1196 retval = XFS_ERROR(ENOATTR);
1197 }
1198 }
1199 break;
1200 }
1201 *result = retval;
1202 return(0);
1203}
1204
1205/*========================================================================
1206 * Utility routines.
1207 *========================================================================*/
1208
1209/*
1210 * Link a new block into a doubly linked list of blocks (of whatever type).
1211 */
1212int /* error */
1213xfs_da_blk_link(xfs_da_state_t *state, xfs_da_state_blk_t *old_blk,
1214 xfs_da_state_blk_t *new_blk)
1215{
1216 xfs_da_blkinfo_t *old_info, *new_info, *tmp_info;
1217 xfs_da_args_t *args;
1218 int before=0, error;
1219 xfs_dabuf_t *bp;
1220
1221 /*
1222 * Set up environment.
1223 */
1224 args = state->args;
1225 ASSERT(args != NULL);
1226 old_info = old_blk->bp->data;
1227 new_info = new_blk->bp->data;
1228 ASSERT(old_blk->magic == XFS_DA_NODE_MAGIC ||
1229 old_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1230 old_blk->magic == XFS_ATTR_LEAF_MAGIC);
1231 ASSERT(old_blk->magic == be16_to_cpu(old_info->magic));
1232 ASSERT(new_blk->magic == be16_to_cpu(new_info->magic));
1233 ASSERT(old_blk->magic == new_blk->magic);
1234
1235 switch (old_blk->magic) {
1236 case XFS_ATTR_LEAF_MAGIC:
1237 before = xfs_attr_leaf_order(old_blk->bp, new_blk->bp);
1238 break;
1239 case XFS_DIR2_LEAFN_MAGIC:
1240 before = xfs_dir2_leafn_order(old_blk->bp, new_blk->bp);
1241 break;
1242 case XFS_DA_NODE_MAGIC:
1243 before = xfs_da_node_order(old_blk->bp, new_blk->bp);
1244 break;
1245 }
1246
1247 /*
1248 * Link blocks in appropriate order.
1249 */
1250 if (before) {
1251 /*
1252 * Link new block in before existing block.
1253 */
1254 trace_xfs_da_link_before(args);
1255 new_info->forw = cpu_to_be32(old_blk->blkno);
1256 new_info->back = old_info->back;
1257 if (old_info->back) {
1258 error = xfs_da_read_buf(args->trans, args->dp,
1259 be32_to_cpu(old_info->back),
1260 -1, &bp, args->whichfork);
1261 if (error)
1262 return(error);
1263 ASSERT(bp != NULL);
1264 tmp_info = bp->data;
1265 ASSERT(be16_to_cpu(tmp_info->magic) == be16_to_cpu(old_info->magic));
1266 ASSERT(be32_to_cpu(tmp_info->forw) == old_blk->blkno);
1267 tmp_info->forw = cpu_to_be32(new_blk->blkno);
1268 xfs_da_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
1269 xfs_da_buf_done(bp);
1270 }
1271 old_info->back = cpu_to_be32(new_blk->blkno);
1272 } else {
1273 /*
1274 * Link new block in after existing block.
1275 */
1276 trace_xfs_da_link_after(args);
1277 new_info->forw = old_info->forw;
1278 new_info->back = cpu_to_be32(old_blk->blkno);
1279 if (old_info->forw) {
1280 error = xfs_da_read_buf(args->trans, args->dp,
1281 be32_to_cpu(old_info->forw),
1282 -1, &bp, args->whichfork);
1283 if (error)
1284 return(error);
1285 ASSERT(bp != NULL);
1286 tmp_info = bp->data;
1287 ASSERT(tmp_info->magic == old_info->magic);
1288 ASSERT(be32_to_cpu(tmp_info->back) == old_blk->blkno);
1289 tmp_info->back = cpu_to_be32(new_blk->blkno);
1290 xfs_da_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
1291 xfs_da_buf_done(bp);
1292 }
1293 old_info->forw = cpu_to_be32(new_blk->blkno);
1294 }
1295
1296 xfs_da_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1);
1297 xfs_da_log_buf(args->trans, new_blk->bp, 0, sizeof(*tmp_info) - 1);
1298 return(0);
1299}
1300
1301/*
1302 * Compare two intermediate nodes for "order".
1303 */
1304STATIC int
1305xfs_da_node_order(xfs_dabuf_t *node1_bp, xfs_dabuf_t *node2_bp)
1306{
1307 xfs_da_intnode_t *node1, *node2;
1308
1309 node1 = node1_bp->data;
1310 node2 = node2_bp->data;
1311 ASSERT(node1->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC) &&
1312 node2->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
1313 if ((be16_to_cpu(node1->hdr.count) > 0) && (be16_to_cpu(node2->hdr.count) > 0) &&
1314 ((be32_to_cpu(node2->btree[0].hashval) <
1315 be32_to_cpu(node1->btree[0].hashval)) ||
1316 (be32_to_cpu(node2->btree[be16_to_cpu(node2->hdr.count)-1].hashval) <
1317 be32_to_cpu(node1->btree[be16_to_cpu(node1->hdr.count)-1].hashval)))) {
1318 return(1);
1319 }
1320 return(0);
1321}
1322
1323/*
1324 * Pick up the last hashvalue from an intermediate node.
1325 */
1326STATIC uint
1327xfs_da_node_lasthash(xfs_dabuf_t *bp, int *count)
1328{
1329 xfs_da_intnode_t *node;
1330
1331 node = bp->data;
1332 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
1333 if (count)
1334 *count = be16_to_cpu(node->hdr.count);
1335 if (!node->hdr.count)
1336 return(0);
1337 return be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval);
1338}
1339
1340/*
1341 * Unlink a block from a doubly linked list of blocks.
1342 */
1343STATIC int /* error */
1344xfs_da_blk_unlink(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
1345 xfs_da_state_blk_t *save_blk)
1346{
1347 xfs_da_blkinfo_t *drop_info, *save_info, *tmp_info;
1348 xfs_da_args_t *args;
1349 xfs_dabuf_t *bp;
1350 int error;
1351
1352 /*
1353 * Set up environment.
1354 */
1355 args = state->args;
1356 ASSERT(args != NULL);
1357 save_info = save_blk->bp->data;
1358 drop_info = drop_blk->bp->data;
1359 ASSERT(save_blk->magic == XFS_DA_NODE_MAGIC ||
1360 save_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1361 save_blk->magic == XFS_ATTR_LEAF_MAGIC);
1362 ASSERT(save_blk->magic == be16_to_cpu(save_info->magic));
1363 ASSERT(drop_blk->magic == be16_to_cpu(drop_info->magic));
1364 ASSERT(save_blk->magic == drop_blk->magic);
1365 ASSERT((be32_to_cpu(save_info->forw) == drop_blk->blkno) ||
1366 (be32_to_cpu(save_info->back) == drop_blk->blkno));
1367 ASSERT((be32_to_cpu(drop_info->forw) == save_blk->blkno) ||
1368 (be32_to_cpu(drop_info->back) == save_blk->blkno));
1369
1370 /*
1371 * Unlink the leaf block from the doubly linked chain of leaves.
1372 */
1373 if (be32_to_cpu(save_info->back) == drop_blk->blkno) {
1374 trace_xfs_da_unlink_back(args);
1375 save_info->back = drop_info->back;
1376 if (drop_info->back) {
1377 error = xfs_da_read_buf(args->trans, args->dp,
1378 be32_to_cpu(drop_info->back),
1379 -1, &bp, args->whichfork);
1380 if (error)
1381 return(error);
1382 ASSERT(bp != NULL);
1383 tmp_info = bp->data;
1384 ASSERT(tmp_info->magic == save_info->magic);
1385 ASSERT(be32_to_cpu(tmp_info->forw) == drop_blk->blkno);
1386 tmp_info->forw = cpu_to_be32(save_blk->blkno);
1387 xfs_da_log_buf(args->trans, bp, 0,
1388 sizeof(*tmp_info) - 1);
1389 xfs_da_buf_done(bp);
1390 }
1391 } else {
1392 trace_xfs_da_unlink_forward(args);
1393 save_info->forw = drop_info->forw;
1394 if (drop_info->forw) {
1395 error = xfs_da_read_buf(args->trans, args->dp,
1396 be32_to_cpu(drop_info->forw),
1397 -1, &bp, args->whichfork);
1398 if (error)
1399 return(error);
1400 ASSERT(bp != NULL);
1401 tmp_info = bp->data;
1402 ASSERT(tmp_info->magic == save_info->magic);
1403 ASSERT(be32_to_cpu(tmp_info->back) == drop_blk->blkno);
1404 tmp_info->back = cpu_to_be32(save_blk->blkno);
1405 xfs_da_log_buf(args->trans, bp, 0,
1406 sizeof(*tmp_info) - 1);
1407 xfs_da_buf_done(bp);
1408 }
1409 }
1410
1411 xfs_da_log_buf(args->trans, save_blk->bp, 0, sizeof(*save_info) - 1);
1412 return(0);
1413}
1414
1415/*
1416 * Move a path "forward" or "!forward" one block at the current level.
1417 *
1418 * This routine will adjust a "path" to point to the next block
1419 * "forward" (higher hashvalues) or "!forward" (lower hashvals) in the
1420 * Btree, including updating pointers to the intermediate nodes between
1421 * the new bottom and the root.
1422 */
1423int /* error */
1424xfs_da_path_shift(xfs_da_state_t *state, xfs_da_state_path_t *path,
1425 int forward, int release, int *result)
1426{
1427 xfs_da_state_blk_t *blk;
1428 xfs_da_blkinfo_t *info;
1429 xfs_da_intnode_t *node;
1430 xfs_da_args_t *args;
1431 xfs_dablk_t blkno=0;
1432 int level, error;
1433
1434 /*
1435 * Roll up the Btree looking for the first block where our
1436 * current index is not at the edge of the block. Note that
1437 * we skip the bottom layer because we want the sibling block.
1438 */
1439 args = state->args;
1440 ASSERT(args != NULL);
1441 ASSERT(path != NULL);
1442 ASSERT((path->active > 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
1443 level = (path->active-1) - 1; /* skip bottom layer in path */
1444 for (blk = &path->blk[level]; level >= 0; blk--, level--) {
1445 ASSERT(blk->bp != NULL);
1446 node = blk->bp->data;
1447 ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
1448 if (forward && (blk->index < be16_to_cpu(node->hdr.count)-1)) {
1449 blk->index++;
1450 blkno = be32_to_cpu(node->btree[blk->index].before);
1451 break;
1452 } else if (!forward && (blk->index > 0)) {
1453 blk->index--;
1454 blkno = be32_to_cpu(node->btree[blk->index].before);
1455 break;
1456 }
1457 }
1458 if (level < 0) {
1459 *result = XFS_ERROR(ENOENT); /* we're out of our tree */
1460 ASSERT(args->op_flags & XFS_DA_OP_OKNOENT);
1461 return(0);
1462 }
1463
1464 /*
1465 * Roll down the edge of the subtree until we reach the
1466 * same depth we were at originally.
1467 */
1468 for (blk++, level++; level < path->active; blk++, level++) {
1469 /*
1470 * Release the old block.
1471 * (if it's dirty, trans won't actually let go)
1472 */
1473 if (release)
1474 xfs_da_brelse(args->trans, blk->bp);
1475
1476 /*
1477 * Read the next child block.
1478 */
1479 blk->blkno = blkno;
1480 error = xfs_da_read_buf(args->trans, args->dp, blkno, -1,
1481 &blk->bp, args->whichfork);
1482 if (error)
1483 return(error);
1484 ASSERT(blk->bp != NULL);
1485 info = blk->bp->data;
1486 ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
1487 info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
1488 info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
1489 blk->magic = be16_to_cpu(info->magic);
1490 if (blk->magic == XFS_DA_NODE_MAGIC) {
1491 node = (xfs_da_intnode_t *)info;
1492 blk->hashval = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval);
1493 if (forward)
1494 blk->index = 0;
1495 else
1496 blk->index = be16_to_cpu(node->hdr.count)-1;
1497 blkno = be32_to_cpu(node->btree[blk->index].before);
1498 } else {
1499 ASSERT(level == path->active-1);
1500 blk->index = 0;
1501 switch(blk->magic) {
1502 case XFS_ATTR_LEAF_MAGIC:
1503 blk->hashval = xfs_attr_leaf_lasthash(blk->bp,
1504 NULL);
1505 break;
1506 case XFS_DIR2_LEAFN_MAGIC:
1507 blk->hashval = xfs_dir2_leafn_lasthash(blk->bp,
1508 NULL);
1509 break;
1510 default:
1511 ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC ||
1512 blk->magic == XFS_DIR2_LEAFN_MAGIC);
1513 break;
1514 }
1515 }
1516 }
1517 *result = 0;
1518 return(0);
1519}
1520
1521
1522/*========================================================================
1523 * Utility routines.
1524 *========================================================================*/
1525
1526/*
1527 * Implement a simple hash on a character string.
1528 * Rotate the hash value by 7 bits, then XOR each character in.
1529 * This is implemented with some source-level loop unrolling.
1530 */
1531xfs_dahash_t
1532xfs_da_hashname(const __uint8_t *name, int namelen)
1533{
1534 xfs_dahash_t hash;
1535
1536 /*
1537 * Do four characters at a time as long as we can.
1538 */
1539 for (hash = 0; namelen >= 4; namelen -= 4, name += 4)
1540 hash = (name[0] << 21) ^ (name[1] << 14) ^ (name[2] << 7) ^
1541 (name[3] << 0) ^ rol32(hash, 7 * 4);
1542
1543 /*
1544 * Now do the rest of the characters.
1545 */
1546 switch (namelen) {
1547 case 3:
1548 return (name[0] << 14) ^ (name[1] << 7) ^ (name[2] << 0) ^
1549 rol32(hash, 7 * 3);
1550 case 2:
1551 return (name[0] << 7) ^ (name[1] << 0) ^ rol32(hash, 7 * 2);
1552 case 1:
1553 return (name[0] << 0) ^ rol32(hash, 7 * 1);
1554 default: /* case 0: */
1555 return hash;
1556 }
1557}
1558
1559enum xfs_dacmp
1560xfs_da_compname(
1561 struct xfs_da_args *args,
1562 const unsigned char *name,
1563 int len)
1564{
1565 return (args->namelen == len && memcmp(args->name, name, len) == 0) ?
1566 XFS_CMP_EXACT : XFS_CMP_DIFFERENT;
1567}
1568
1569static xfs_dahash_t
1570xfs_default_hashname(
1571 struct xfs_name *name)
1572{
1573 return xfs_da_hashname(name->name, name->len);
1574}
1575
1576const struct xfs_nameops xfs_default_nameops = {
1577 .hashname = xfs_default_hashname,
1578 .compname = xfs_da_compname
1579};
1580
1581int
1582xfs_da_grow_inode_int(
1583 struct xfs_da_args *args,
1584 xfs_fileoff_t *bno,
1585 int count)
1586{
1587 struct xfs_trans *tp = args->trans;
1588 struct xfs_inode *dp = args->dp;
1589 int w = args->whichfork;
1590 xfs_drfsbno_t nblks = dp->i_d.di_nblocks;
1591 struct xfs_bmbt_irec map, *mapp;
1592 int nmap, error, got, i, mapi;
1593
1594 /*
1595 * Find a spot in the file space to put the new block.
1596 */
1597 error = xfs_bmap_first_unused(tp, dp, count, bno, w);
1598 if (error)
1599 return error;
1600
1601 /*
1602 * Try mapping it in one filesystem block.
1603 */
1604 nmap = 1;
1605 ASSERT(args->firstblock != NULL);
1606 error = xfs_bmapi_write(tp, dp, *bno, count,
1607 xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA|XFS_BMAPI_CONTIG,
1608 args->firstblock, args->total, &map, &nmap,
1609 args->flist);
1610 if (error)
1611 return error;
1612
1613 ASSERT(nmap <= 1);
1614 if (nmap == 1) {
1615 mapp = ↦
1616 mapi = 1;
1617 } else if (nmap == 0 && count > 1) {
1618 xfs_fileoff_t b;
1619 int c;
1620
1621 /*
1622 * If we didn't get it and the block might work if fragmented,
1623 * try without the CONTIG flag. Loop until we get it all.
1624 */
1625 mapp = kmem_alloc(sizeof(*mapp) * count, KM_SLEEP);
1626 for (b = *bno, mapi = 0; b < *bno + count; ) {
1627 nmap = MIN(XFS_BMAP_MAX_NMAP, count);
1628 c = (int)(*bno + count - b);
1629 error = xfs_bmapi_write(tp, dp, b, c,
1630 xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA,
1631 args->firstblock, args->total,
1632 &mapp[mapi], &nmap, args->flist);
1633 if (error)
1634 goto out_free_map;
1635 if (nmap < 1)
1636 break;
1637 mapi += nmap;
1638 b = mapp[mapi - 1].br_startoff +
1639 mapp[mapi - 1].br_blockcount;
1640 }
1641 } else {
1642 mapi = 0;
1643 mapp = NULL;
1644 }
1645
1646 /*
1647 * Count the blocks we got, make sure it matches the total.
1648 */
1649 for (i = 0, got = 0; i < mapi; i++)
1650 got += mapp[i].br_blockcount;
1651 if (got != count || mapp[0].br_startoff != *bno ||
1652 mapp[mapi - 1].br_startoff + mapp[mapi - 1].br_blockcount !=
1653 *bno + count) {
1654 error = XFS_ERROR(ENOSPC);
1655 goto out_free_map;
1656 }
1657
1658 /* account for newly allocated blocks in reserved blocks total */
1659 args->total -= dp->i_d.di_nblocks - nblks;
1660
1661out_free_map:
1662 if (mapp != &map)
1663 kmem_free(mapp);
1664 return error;
1665}
1666
1667/*
1668 * Add a block to the btree ahead of the file.
1669 * Return the new block number to the caller.
1670 */
1671int
1672xfs_da_grow_inode(
1673 struct xfs_da_args *args,
1674 xfs_dablk_t *new_blkno)
1675{
1676 xfs_fileoff_t bno;
1677 int count;
1678 int error;
1679
1680 trace_xfs_da_grow_inode(args);
1681
1682 if (args->whichfork == XFS_DATA_FORK) {
1683 bno = args->dp->i_mount->m_dirleafblk;
1684 count = args->dp->i_mount->m_dirblkfsbs;
1685 } else {
1686 bno = 0;
1687 count = 1;
1688 }
1689
1690 error = xfs_da_grow_inode_int(args, &bno, count);
1691 if (!error)
1692 *new_blkno = (xfs_dablk_t)bno;
1693 return error;
1694}
1695
1696/*
1697 * Ick. We need to always be able to remove a btree block, even
1698 * if there's no space reservation because the filesystem is full.
1699 * This is called if xfs_bunmapi on a btree block fails due to ENOSPC.
1700 * It swaps the target block with the last block in the file. The
1701 * last block in the file can always be removed since it can't cause
1702 * a bmap btree split to do that.
1703 */
1704STATIC int
1705xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
1706 xfs_dabuf_t **dead_bufp)
1707{
1708 xfs_dablk_t dead_blkno, last_blkno, sib_blkno, par_blkno;
1709 xfs_dabuf_t *dead_buf, *last_buf, *sib_buf, *par_buf;
1710 xfs_fileoff_t lastoff;
1711 xfs_inode_t *ip;
1712 xfs_trans_t *tp;
1713 xfs_mount_t *mp;
1714 int error, w, entno, level, dead_level;
1715 xfs_da_blkinfo_t *dead_info, *sib_info;
1716 xfs_da_intnode_t *par_node, *dead_node;
1717 xfs_dir2_leaf_t *dead_leaf2;
1718 xfs_dahash_t dead_hash;
1719
1720 trace_xfs_da_swap_lastblock(args);
1721
1722 dead_buf = *dead_bufp;
1723 dead_blkno = *dead_blknop;
1724 tp = args->trans;
1725 ip = args->dp;
1726 w = args->whichfork;
1727 ASSERT(w == XFS_DATA_FORK);
1728 mp = ip->i_mount;
1729 lastoff = mp->m_dirfreeblk;
1730 error = xfs_bmap_last_before(tp, ip, &lastoff, w);
1731 if (error)
1732 return error;
1733 if (unlikely(lastoff == 0)) {
1734 XFS_ERROR_REPORT("xfs_da_swap_lastblock(1)", XFS_ERRLEVEL_LOW,
1735 mp);
1736 return XFS_ERROR(EFSCORRUPTED);
1737 }
1738 /*
1739 * Read the last block in the btree space.
1740 */
1741 last_blkno = (xfs_dablk_t)lastoff - mp->m_dirblkfsbs;
1742 if ((error = xfs_da_read_buf(tp, ip, last_blkno, -1, &last_buf, w)))
1743 return error;
1744 /*
1745 * Copy the last block into the dead buffer and log it.
1746 */
1747 memcpy(dead_buf->data, last_buf->data, mp->m_dirblksize);
1748 xfs_da_log_buf(tp, dead_buf, 0, mp->m_dirblksize - 1);
1749 dead_info = dead_buf->data;
1750 /*
1751 * Get values from the moved block.
1752 */
1753 if (dead_info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)) {
1754 dead_leaf2 = (xfs_dir2_leaf_t *)dead_info;
1755 dead_level = 0;
1756 dead_hash = be32_to_cpu(dead_leaf2->ents[be16_to_cpu(dead_leaf2->hdr.count) - 1].hashval);
1757 } else {
1758 ASSERT(dead_info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
1759 dead_node = (xfs_da_intnode_t *)dead_info;
1760 dead_level = be16_to_cpu(dead_node->hdr.level);
1761 dead_hash = be32_to_cpu(dead_node->btree[be16_to_cpu(dead_node->hdr.count) - 1].hashval);
1762 }
1763 sib_buf = par_buf = NULL;
1764 /*
1765 * If the moved block has a left sibling, fix up the pointers.
1766 */
1767 if ((sib_blkno = be32_to_cpu(dead_info->back))) {
1768 if ((error = xfs_da_read_buf(tp, ip, sib_blkno, -1, &sib_buf, w)))
1769 goto done;
1770 sib_info = sib_buf->data;
1771 if (unlikely(
1772 be32_to_cpu(sib_info->forw) != last_blkno ||
1773 sib_info->magic != dead_info->magic)) {
1774 XFS_ERROR_REPORT("xfs_da_swap_lastblock(2)",
1775 XFS_ERRLEVEL_LOW, mp);
1776 error = XFS_ERROR(EFSCORRUPTED);
1777 goto done;
1778 }
1779 sib_info->forw = cpu_to_be32(dead_blkno);
1780 xfs_da_log_buf(tp, sib_buf,
1781 XFS_DA_LOGRANGE(sib_info, &sib_info->forw,
1782 sizeof(sib_info->forw)));
1783 xfs_da_buf_done(sib_buf);
1784 sib_buf = NULL;
1785 }
1786 /*
1787 * If the moved block has a right sibling, fix up the pointers.
1788 */
1789 if ((sib_blkno = be32_to_cpu(dead_info->forw))) {
1790 if ((error = xfs_da_read_buf(tp, ip, sib_blkno, -1, &sib_buf, w)))
1791 goto done;
1792 sib_info = sib_buf->data;
1793 if (unlikely(
1794 be32_to_cpu(sib_info->back) != last_blkno ||
1795 sib_info->magic != dead_info->magic)) {
1796 XFS_ERROR_REPORT("xfs_da_swap_lastblock(3)",
1797 XFS_ERRLEVEL_LOW, mp);
1798 error = XFS_ERROR(EFSCORRUPTED);
1799 goto done;
1800 }
1801 sib_info->back = cpu_to_be32(dead_blkno);
1802 xfs_da_log_buf(tp, sib_buf,
1803 XFS_DA_LOGRANGE(sib_info, &sib_info->back,
1804 sizeof(sib_info->back)));
1805 xfs_da_buf_done(sib_buf);
1806 sib_buf = NULL;
1807 }
1808 par_blkno = mp->m_dirleafblk;
1809 level = -1;
1810 /*
1811 * Walk down the tree looking for the parent of the moved block.
1812 */
1813 for (;;) {
1814 if ((error = xfs_da_read_buf(tp, ip, par_blkno, -1, &par_buf, w)))
1815 goto done;
1816 par_node = par_buf->data;
1817 if (unlikely(par_node->hdr.info.magic !=
1818 cpu_to_be16(XFS_DA_NODE_MAGIC) ||
1819 (level >= 0 && level != be16_to_cpu(par_node->hdr.level) + 1))) {
1820 XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)",
1821 XFS_ERRLEVEL_LOW, mp);
1822 error = XFS_ERROR(EFSCORRUPTED);
1823 goto done;
1824 }
1825 level = be16_to_cpu(par_node->hdr.level);
1826 for (entno = 0;
1827 entno < be16_to_cpu(par_node->hdr.count) &&
1828 be32_to_cpu(par_node->btree[entno].hashval) < dead_hash;
1829 entno++)
1830 continue;
1831 if (unlikely(entno == be16_to_cpu(par_node->hdr.count))) {
1832 XFS_ERROR_REPORT("xfs_da_swap_lastblock(5)",
1833 XFS_ERRLEVEL_LOW, mp);
1834 error = XFS_ERROR(EFSCORRUPTED);
1835 goto done;
1836 }
1837 par_blkno = be32_to_cpu(par_node->btree[entno].before);
1838 if (level == dead_level + 1)
1839 break;
1840 xfs_da_brelse(tp, par_buf);
1841 par_buf = NULL;
1842 }
1843 /*
1844 * We're in the right parent block.
1845 * Look for the right entry.
1846 */
1847 for (;;) {
1848 for (;
1849 entno < be16_to_cpu(par_node->hdr.count) &&
1850 be32_to_cpu(par_node->btree[entno].before) != last_blkno;
1851 entno++)
1852 continue;
1853 if (entno < be16_to_cpu(par_node->hdr.count))
1854 break;
1855 par_blkno = be32_to_cpu(par_node->hdr.info.forw);
1856 xfs_da_brelse(tp, par_buf);
1857 par_buf = NULL;
1858 if (unlikely(par_blkno == 0)) {
1859 XFS_ERROR_REPORT("xfs_da_swap_lastblock(6)",
1860 XFS_ERRLEVEL_LOW, mp);
1861 error = XFS_ERROR(EFSCORRUPTED);
1862 goto done;
1863 }
1864 if ((error = xfs_da_read_buf(tp, ip, par_blkno, -1, &par_buf, w)))
1865 goto done;
1866 par_node = par_buf->data;
1867 if (unlikely(
1868 be16_to_cpu(par_node->hdr.level) != level ||
1869 par_node->hdr.info.magic != cpu_to_be16(XFS_DA_NODE_MAGIC))) {
1870 XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)",
1871 XFS_ERRLEVEL_LOW, mp);
1872 error = XFS_ERROR(EFSCORRUPTED);
1873 goto done;
1874 }
1875 entno = 0;
1876 }
1877 /*
1878 * Update the parent entry pointing to the moved block.
1879 */
1880 par_node->btree[entno].before = cpu_to_be32(dead_blkno);
1881 xfs_da_log_buf(tp, par_buf,
1882 XFS_DA_LOGRANGE(par_node, &par_node->btree[entno].before,
1883 sizeof(par_node->btree[entno].before)));
1884 xfs_da_buf_done(par_buf);
1885 xfs_da_buf_done(dead_buf);
1886 *dead_blknop = last_blkno;
1887 *dead_bufp = last_buf;
1888 return 0;
1889done:
1890 if (par_buf)
1891 xfs_da_brelse(tp, par_buf);
1892 if (sib_buf)
1893 xfs_da_brelse(tp, sib_buf);
1894 xfs_da_brelse(tp, last_buf);
1895 return error;
1896}
1897
1898/*
1899 * Remove a btree block from a directory or attribute.
1900 */
1901int
1902xfs_da_shrink_inode(xfs_da_args_t *args, xfs_dablk_t dead_blkno,
1903 xfs_dabuf_t *dead_buf)
1904{
1905 xfs_inode_t *dp;
1906 int done, error, w, count;
1907 xfs_trans_t *tp;
1908 xfs_mount_t *mp;
1909
1910 trace_xfs_da_shrink_inode(args);
1911
1912 dp = args->dp;
1913 w = args->whichfork;
1914 tp = args->trans;
1915 mp = dp->i_mount;
1916 if (w == XFS_DATA_FORK)
1917 count = mp->m_dirblkfsbs;
1918 else
1919 count = 1;
1920 for (;;) {
1921 /*
1922 * Remove extents. If we get ENOSPC for a dir we have to move
1923 * the last block to the place we want to kill.
1924 */
1925 if ((error = xfs_bunmapi(tp, dp, dead_blkno, count,
1926 xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA,
1927 0, args->firstblock, args->flist,
1928 &done)) == ENOSPC) {
1929 if (w != XFS_DATA_FORK)
1930 break;
1931 if ((error = xfs_da_swap_lastblock(args, &dead_blkno,
1932 &dead_buf)))
1933 break;
1934 } else {
1935 break;
1936 }
1937 }
1938 xfs_da_binval(tp, dead_buf);
1939 return error;
1940}
1941
1942/*
1943 * See if the mapping(s) for this btree block are valid, i.e.
1944 * don't contain holes, are logically contiguous, and cover the whole range.
1945 */
1946STATIC int
1947xfs_da_map_covers_blocks(
1948 int nmap,
1949 xfs_bmbt_irec_t *mapp,
1950 xfs_dablk_t bno,
1951 int count)
1952{
1953 int i;
1954 xfs_fileoff_t off;
1955
1956 for (i = 0, off = bno; i < nmap; i++) {
1957 if (mapp[i].br_startblock == HOLESTARTBLOCK ||
1958 mapp[i].br_startblock == DELAYSTARTBLOCK) {
1959 return 0;
1960 }
1961 if (off != mapp[i].br_startoff) {
1962 return 0;
1963 }
1964 off += mapp[i].br_blockcount;
1965 }
1966 return off == bno + count;
1967}
1968
1969/*
1970 * Make a dabuf.
1971 * Used for get_buf, read_buf, read_bufr, and reada_buf.
1972 */
1973STATIC int
1974xfs_da_do_buf(
1975 xfs_trans_t *trans,
1976 xfs_inode_t *dp,
1977 xfs_dablk_t bno,
1978 xfs_daddr_t *mappedbnop,
1979 xfs_dabuf_t **bpp,
1980 int whichfork,
1981 int caller)
1982{
1983 xfs_buf_t *bp = NULL;
1984 xfs_buf_t **bplist;
1985 int error=0;
1986 int i;
1987 xfs_bmbt_irec_t map;
1988 xfs_bmbt_irec_t *mapp;
1989 xfs_daddr_t mappedbno;
1990 xfs_mount_t *mp;
1991 int nbplist=0;
1992 int nfsb;
1993 int nmap;
1994 xfs_dabuf_t *rbp;
1995
1996 mp = dp->i_mount;
1997 nfsb = (whichfork == XFS_DATA_FORK) ? mp->m_dirblkfsbs : 1;
1998 mappedbno = *mappedbnop;
1999 /*
2000 * Caller doesn't have a mapping. -2 means don't complain
2001 * if we land in a hole.
2002 */
2003 if (mappedbno == -1 || mappedbno == -2) {
2004 /*
2005 * Optimize the one-block case.
2006 */
2007 if (nfsb == 1)
2008 mapp = ↦
2009 else
2010 mapp = kmem_alloc(sizeof(*mapp) * nfsb, KM_SLEEP);
2011
2012 nmap = nfsb;
2013 error = xfs_bmapi_read(dp, (xfs_fileoff_t)bno, nfsb, mapp,
2014 &nmap, xfs_bmapi_aflag(whichfork));
2015 if (error)
2016 goto exit0;
2017 } else {
2018 map.br_startblock = XFS_DADDR_TO_FSB(mp, mappedbno);
2019 map.br_startoff = (xfs_fileoff_t)bno;
2020 map.br_blockcount = nfsb;
2021 mapp = ↦
2022 nmap = 1;
2023 }
2024 if (!xfs_da_map_covers_blocks(nmap, mapp, bno, nfsb)) {
2025 error = mappedbno == -2 ? 0 : XFS_ERROR(EFSCORRUPTED);
2026 if (unlikely(error == EFSCORRUPTED)) {
2027 if (xfs_error_level >= XFS_ERRLEVEL_LOW) {
2028 xfs_alert(mp, "%s: bno %lld dir: inode %lld",
2029 __func__, (long long)bno,
2030 (long long)dp->i_ino);
2031 for (i = 0; i < nmap; i++) {
2032 xfs_alert(mp,
2033"[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d",
2034 i,
2035 (long long)mapp[i].br_startoff,
2036 (long long)mapp[i].br_startblock,
2037 (long long)mapp[i].br_blockcount,
2038 mapp[i].br_state);
2039 }
2040 }
2041 XFS_ERROR_REPORT("xfs_da_do_buf(1)",
2042 XFS_ERRLEVEL_LOW, mp);
2043 }
2044 goto exit0;
2045 }
2046 if (caller != 3 && nmap > 1) {
2047 bplist = kmem_alloc(sizeof(*bplist) * nmap, KM_SLEEP);
2048 nbplist = 0;
2049 } else
2050 bplist = NULL;
2051 /*
2052 * Turn the mapping(s) into buffer(s).
2053 */
2054 for (i = 0; i < nmap; i++) {
2055 int nmapped;
2056
2057 mappedbno = XFS_FSB_TO_DADDR(mp, mapp[i].br_startblock);
2058 if (i == 0)
2059 *mappedbnop = mappedbno;
2060 nmapped = (int)XFS_FSB_TO_BB(mp, mapp[i].br_blockcount);
2061 switch (caller) {
2062 case 0:
2063 bp = xfs_trans_get_buf(trans, mp->m_ddev_targp,
2064 mappedbno, nmapped, 0);
2065 error = bp ? bp->b_error : XFS_ERROR(EIO);
2066 break;
2067 case 1:
2068 case 2:
2069 bp = NULL;
2070 error = xfs_trans_read_buf(mp, trans, mp->m_ddev_targp,
2071 mappedbno, nmapped, 0, &bp);
2072 break;
2073 case 3:
2074 xfs_buf_readahead(mp->m_ddev_targp, mappedbno, nmapped);
2075 error = 0;
2076 bp = NULL;
2077 break;
2078 }
2079 if (error) {
2080 if (bp)
2081 xfs_trans_brelse(trans, bp);
2082 goto exit1;
2083 }
2084 if (!bp)
2085 continue;
2086 if (caller == 1) {
2087 if (whichfork == XFS_ATTR_FORK)
2088 xfs_buf_set_ref(bp, XFS_ATTR_BTREE_REF);
2089 else
2090 xfs_buf_set_ref(bp, XFS_DIR_BTREE_REF);
2091 }
2092 if (bplist) {
2093 bplist[nbplist++] = bp;
2094 }
2095 }
2096 /*
2097 * Build a dabuf structure.
2098 */
2099 if (bplist) {
2100 rbp = xfs_da_buf_make(nbplist, bplist);
2101 } else if (bp)
2102 rbp = xfs_da_buf_make(1, &bp);
2103 else
2104 rbp = NULL;
2105 /*
2106 * For read_buf, check the magic number.
2107 */
2108 if (caller == 1) {
2109 xfs_dir2_data_hdr_t *hdr = rbp->data;
2110 xfs_dir2_free_t *free = rbp->data;
2111 xfs_da_blkinfo_t *info = rbp->data;
2112 uint magic, magic1;
2113
2114 magic = be16_to_cpu(info->magic);
2115 magic1 = be32_to_cpu(hdr->magic);
2116 if (unlikely(
2117 XFS_TEST_ERROR((magic != XFS_DA_NODE_MAGIC) &&
2118 (magic != XFS_ATTR_LEAF_MAGIC) &&
2119 (magic != XFS_DIR2_LEAF1_MAGIC) &&
2120 (magic != XFS_DIR2_LEAFN_MAGIC) &&
2121 (magic1 != XFS_DIR2_BLOCK_MAGIC) &&
2122 (magic1 != XFS_DIR2_DATA_MAGIC) &&
2123 (free->hdr.magic != cpu_to_be32(XFS_DIR2_FREE_MAGIC)),
2124 mp, XFS_ERRTAG_DA_READ_BUF,
2125 XFS_RANDOM_DA_READ_BUF))) {
2126 trace_xfs_da_btree_corrupt(rbp->bps[0], _RET_IP_);
2127 XFS_CORRUPTION_ERROR("xfs_da_do_buf(2)",
2128 XFS_ERRLEVEL_LOW, mp, info);
2129 error = XFS_ERROR(EFSCORRUPTED);
2130 xfs_da_brelse(trans, rbp);
2131 nbplist = 0;
2132 goto exit1;
2133 }
2134 }
2135 if (bplist) {
2136 kmem_free(bplist);
2137 }
2138 if (mapp != &map) {
2139 kmem_free(mapp);
2140 }
2141 if (bpp)
2142 *bpp = rbp;
2143 return 0;
2144exit1:
2145 if (bplist) {
2146 for (i = 0; i < nbplist; i++)
2147 xfs_trans_brelse(trans, bplist[i]);
2148 kmem_free(bplist);
2149 }
2150exit0:
2151 if (mapp != &map)
2152 kmem_free(mapp);
2153 if (bpp)
2154 *bpp = NULL;
2155 return error;
2156}
2157
2158/*
2159 * Get a buffer for the dir/attr block.
2160 */
2161int
2162xfs_da_get_buf(
2163 xfs_trans_t *trans,
2164 xfs_inode_t *dp,
2165 xfs_dablk_t bno,
2166 xfs_daddr_t mappedbno,
2167 xfs_dabuf_t **bpp,
2168 int whichfork)
2169{
2170 return xfs_da_do_buf(trans, dp, bno, &mappedbno, bpp, whichfork, 0);
2171}
2172
2173/*
2174 * Get a buffer for the dir/attr block, fill in the contents.
2175 */
2176int
2177xfs_da_read_buf(
2178 xfs_trans_t *trans,
2179 xfs_inode_t *dp,
2180 xfs_dablk_t bno,
2181 xfs_daddr_t mappedbno,
2182 xfs_dabuf_t **bpp,
2183 int whichfork)
2184{
2185 return xfs_da_do_buf(trans, dp, bno, &mappedbno, bpp, whichfork, 1);
2186}
2187
2188/*
2189 * Readahead the dir/attr block.
2190 */
2191xfs_daddr_t
2192xfs_da_reada_buf(
2193 xfs_trans_t *trans,
2194 xfs_inode_t *dp,
2195 xfs_dablk_t bno,
2196 int whichfork)
2197{
2198 xfs_daddr_t rval;
2199
2200 rval = -1;
2201 if (xfs_da_do_buf(trans, dp, bno, &rval, NULL, whichfork, 3))
2202 return -1;
2203 else
2204 return rval;
2205}
2206
2207kmem_zone_t *xfs_da_state_zone; /* anchor for state struct zone */
2208kmem_zone_t *xfs_dabuf_zone; /* dabuf zone */
2209
2210/*
2211 * Allocate a dir-state structure.
2212 * We don't put them on the stack since they're large.
2213 */
2214xfs_da_state_t *
2215xfs_da_state_alloc(void)
2216{
2217 return kmem_zone_zalloc(xfs_da_state_zone, KM_NOFS);
2218}
2219
2220/*
2221 * Kill the altpath contents of a da-state structure.
2222 */
2223STATIC void
2224xfs_da_state_kill_altpath(xfs_da_state_t *state)
2225{
2226 int i;
2227
2228 for (i = 0; i < state->altpath.active; i++) {
2229 if (state->altpath.blk[i].bp) {
2230 if (state->altpath.blk[i].bp != state->path.blk[i].bp)
2231 xfs_da_buf_done(state->altpath.blk[i].bp);
2232 state->altpath.blk[i].bp = NULL;
2233 }
2234 }
2235 state->altpath.active = 0;
2236}
2237
2238/*
2239 * Free a da-state structure.
2240 */
2241void
2242xfs_da_state_free(xfs_da_state_t *state)
2243{
2244 int i;
2245
2246 xfs_da_state_kill_altpath(state);
2247 for (i = 0; i < state->path.active; i++) {
2248 if (state->path.blk[i].bp)
2249 xfs_da_buf_done(state->path.blk[i].bp);
2250 }
2251 if (state->extravalid && state->extrablk.bp)
2252 xfs_da_buf_done(state->extrablk.bp);
2253#ifdef DEBUG
2254 memset((char *)state, 0, sizeof(*state));
2255#endif /* DEBUG */
2256 kmem_zone_free(xfs_da_state_zone, state);
2257}
2258
2259/*
2260 * Create a dabuf.
2261 */
2262/* ARGSUSED */
2263STATIC xfs_dabuf_t *
2264xfs_da_buf_make(int nbuf, xfs_buf_t **bps)
2265{
2266 xfs_buf_t *bp;
2267 xfs_dabuf_t *dabuf;
2268 int i;
2269 int off;
2270
2271 if (nbuf == 1)
2272 dabuf = kmem_zone_alloc(xfs_dabuf_zone, KM_NOFS);
2273 else
2274 dabuf = kmem_alloc(XFS_DA_BUF_SIZE(nbuf), KM_NOFS);
2275 dabuf->dirty = 0;
2276 if (nbuf == 1) {
2277 dabuf->nbuf = 1;
2278 bp = bps[0];
2279 dabuf->bbcount = bp->b_length;
2280 dabuf->data = bp->b_addr;
2281 dabuf->bps[0] = bp;
2282 } else {
2283 dabuf->nbuf = nbuf;
2284 for (i = 0, dabuf->bbcount = 0; i < nbuf; i++) {
2285 dabuf->bps[i] = bp = bps[i];
2286 dabuf->bbcount += bp->b_length;
2287 }
2288 dabuf->data = kmem_alloc(BBTOB(dabuf->bbcount), KM_SLEEP);
2289 for (i = off = 0; i < nbuf; i++, off += BBTOB(bp->b_length)) {
2290 bp = bps[i];
2291 memcpy((char *)dabuf->data + off, bp->b_addr,
2292 BBTOB(bp->b_length));
2293 }
2294 }
2295 return dabuf;
2296}
2297
2298/*
2299 * Un-dirty a dabuf.
2300 */
2301STATIC void
2302xfs_da_buf_clean(xfs_dabuf_t *dabuf)
2303{
2304 xfs_buf_t *bp;
2305 int i;
2306 int off;
2307
2308 if (dabuf->dirty) {
2309 ASSERT(dabuf->nbuf > 1);
2310 dabuf->dirty = 0;
2311 for (i = off = 0; i < dabuf->nbuf;
2312 i++, off += BBTOB(bp->b_length)) {
2313 bp = dabuf->bps[i];
2314 memcpy(bp->b_addr, dabuf->data + off,
2315 BBTOB(bp->b_length));
2316 }
2317 }
2318}
2319
2320/*
2321 * Release a dabuf.
2322 */
2323void
2324xfs_da_buf_done(xfs_dabuf_t *dabuf)
2325{
2326 ASSERT(dabuf);
2327 ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
2328 if (dabuf->dirty)
2329 xfs_da_buf_clean(dabuf);
2330 if (dabuf->nbuf > 1) {
2331 kmem_free(dabuf->data);
2332 kmem_free(dabuf);
2333 } else {
2334 kmem_zone_free(xfs_dabuf_zone, dabuf);
2335 }
2336}
2337
2338/*
2339 * Log transaction from a dabuf.
2340 */
2341void
2342xfs_da_log_buf(xfs_trans_t *tp, xfs_dabuf_t *dabuf, uint first, uint last)
2343{
2344 xfs_buf_t *bp;
2345 uint f;
2346 int i;
2347 uint l;
2348 int off;
2349
2350 ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
2351 if (dabuf->nbuf == 1) {
2352 ASSERT(dabuf->data == dabuf->bps[0]->b_addr);
2353 xfs_trans_log_buf(tp, dabuf->bps[0], first, last);
2354 return;
2355 }
2356 dabuf->dirty = 1;
2357 ASSERT(first <= last);
2358 for (i = off = 0; i < dabuf->nbuf; i++, off += BBTOB(bp->b_length)) {
2359 bp = dabuf->bps[i];
2360 f = off;
2361 l = f + BBTOB(bp->b_length) - 1;
2362 if (f < first)
2363 f = first;
2364 if (l > last)
2365 l = last;
2366 if (f <= l)
2367 xfs_trans_log_buf(tp, bp, f - off, l - off);
2368 /*
2369 * B_DONE is set by xfs_trans_log buf.
2370 * If we don't set it on a new buffer (get not read)
2371 * then if we don't put anything in the buffer it won't
2372 * be set, and at commit it it released into the cache,
2373 * and then a read will fail.
2374 */
2375 else if (!(XFS_BUF_ISDONE(bp)))
2376 XFS_BUF_DONE(bp);
2377 }
2378 ASSERT(last < off);
2379}
2380
2381/*
2382 * Release dabuf from a transaction.
2383 * Have to free up the dabuf before the buffers are released,
2384 * since the synchronization on the dabuf is really the lock on the buffer.
2385 */
2386void
2387xfs_da_brelse(xfs_trans_t *tp, xfs_dabuf_t *dabuf)
2388{
2389 xfs_buf_t *bp;
2390 xfs_buf_t **bplist;
2391 int i;
2392 int nbuf;
2393
2394 ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
2395 if ((nbuf = dabuf->nbuf) == 1) {
2396 bplist = &bp;
2397 bp = dabuf->bps[0];
2398 } else {
2399 bplist = kmem_alloc(nbuf * sizeof(*bplist), KM_SLEEP);
2400 memcpy(bplist, dabuf->bps, nbuf * sizeof(*bplist));
2401 }
2402 xfs_da_buf_done(dabuf);
2403 for (i = 0; i < nbuf; i++)
2404 xfs_trans_brelse(tp, bplist[i]);
2405 if (bplist != &bp)
2406 kmem_free(bplist);
2407}
2408
2409/*
2410 * Invalidate dabuf from a transaction.
2411 */
2412void
2413xfs_da_binval(xfs_trans_t *tp, xfs_dabuf_t *dabuf)
2414{
2415 xfs_buf_t *bp;
2416 xfs_buf_t **bplist;
2417 int i;
2418 int nbuf;
2419
2420 ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
2421 if ((nbuf = dabuf->nbuf) == 1) {
2422 bplist = &bp;
2423 bp = dabuf->bps[0];
2424 } else {
2425 bplist = kmem_alloc(nbuf * sizeof(*bplist), KM_SLEEP);
2426 memcpy(bplist, dabuf->bps, nbuf * sizeof(*bplist));
2427 }
2428 xfs_da_buf_done(dabuf);
2429 for (i = 0; i < nbuf; i++)
2430 xfs_trans_binval(tp, bplist[i]);
2431 if (bplist != &bp)
2432 kmem_free(bplist);
2433}
2434
2435/*
2436 * Get the first daddr from a dabuf.
2437 */
2438xfs_daddr_t
2439xfs_da_blkno(xfs_dabuf_t *dabuf)
2440{
2441 ASSERT(dabuf->nbuf);
2442 ASSERT(dabuf->data);
2443 return XFS_BUF_ADDR(dabuf->bps[0]);
2444}