Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/fs/hfsplus/bnode.c
4 *
5 * Copyright (C) 2001
6 * Brad Boyer (flar@allandria.com)
7 * (C) 2003 Ardis Technologies <roman@ardistech.com>
8 *
9 * Handle basic btree node operations
10 */
11
12#include <linux/string.h>
13#include <linux/slab.h>
14#include <linux/pagemap.h>
15#include <linux/fs.h>
16#include <linux/swap.h>
17
18#include "hfsplus_fs.h"
19#include "hfsplus_raw.h"
20
21/* Copy a specified range of bytes from the raw data of a node */
22void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
23{
24 struct page **pagep;
25 int l;
26
27 off += node->page_offset;
28 pagep = node->page + (off >> PAGE_SHIFT);
29 off &= ~PAGE_MASK;
30
31 l = min_t(int, len, PAGE_SIZE - off);
32 memcpy(buf, kmap(*pagep) + off, l);
33 kunmap(*pagep);
34
35 while ((len -= l) != 0) {
36 buf += l;
37 l = min_t(int, len, PAGE_SIZE);
38 memcpy(buf, kmap(*++pagep), l);
39 kunmap(*pagep);
40 }
41}
42
43u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off)
44{
45 __be16 data;
46 /* TODO: optimize later... */
47 hfs_bnode_read(node, &data, off, 2);
48 return be16_to_cpu(data);
49}
50
51u8 hfs_bnode_read_u8(struct hfs_bnode *node, int off)
52{
53 u8 data;
54 /* TODO: optimize later... */
55 hfs_bnode_read(node, &data, off, 1);
56 return data;
57}
58
59void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off)
60{
61 struct hfs_btree *tree;
62 int key_len;
63
64 tree = node->tree;
65 if (node->type == HFS_NODE_LEAF ||
66 tree->attributes & HFS_TREE_VARIDXKEYS ||
67 node->tree->cnid == HFSPLUS_ATTR_CNID)
68 key_len = hfs_bnode_read_u16(node, off) + 2;
69 else
70 key_len = tree->max_key_len + 2;
71
72 hfs_bnode_read(node, key, off, key_len);
73}
74
75void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len)
76{
77 struct page **pagep;
78 int l;
79
80 off += node->page_offset;
81 pagep = node->page + (off >> PAGE_SHIFT);
82 off &= ~PAGE_MASK;
83
84 l = min_t(int, len, PAGE_SIZE - off);
85 memcpy(kmap(*pagep) + off, buf, l);
86 set_page_dirty(*pagep);
87 kunmap(*pagep);
88
89 while ((len -= l) != 0) {
90 buf += l;
91 l = min_t(int, len, PAGE_SIZE);
92 memcpy(kmap(*++pagep), buf, l);
93 set_page_dirty(*pagep);
94 kunmap(*pagep);
95 }
96}
97
98void hfs_bnode_write_u16(struct hfs_bnode *node, int off, u16 data)
99{
100 __be16 v = cpu_to_be16(data);
101 /* TODO: optimize later... */
102 hfs_bnode_write(node, &v, off, 2);
103}
104
105void hfs_bnode_clear(struct hfs_bnode *node, int off, int len)
106{
107 struct page **pagep;
108 int l;
109
110 off += node->page_offset;
111 pagep = node->page + (off >> PAGE_SHIFT);
112 off &= ~PAGE_MASK;
113
114 l = min_t(int, len, PAGE_SIZE - off);
115 memset(kmap(*pagep) + off, 0, l);
116 set_page_dirty(*pagep);
117 kunmap(*pagep);
118
119 while ((len -= l) != 0) {
120 l = min_t(int, len, PAGE_SIZE);
121 memset(kmap(*++pagep), 0, l);
122 set_page_dirty(*pagep);
123 kunmap(*pagep);
124 }
125}
126
127void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
128 struct hfs_bnode *src_node, int src, int len)
129{
130 struct page **src_page, **dst_page;
131 int l;
132
133 hfs_dbg(BNODE_MOD, "copybytes: %u,%u,%u\n", dst, src, len);
134 if (!len)
135 return;
136 src += src_node->page_offset;
137 dst += dst_node->page_offset;
138 src_page = src_node->page + (src >> PAGE_SHIFT);
139 src &= ~PAGE_MASK;
140 dst_page = dst_node->page + (dst >> PAGE_SHIFT);
141 dst &= ~PAGE_MASK;
142
143 if (src == dst) {
144 l = min_t(int, len, PAGE_SIZE - src);
145 memcpy(kmap(*dst_page) + src, kmap(*src_page) + src, l);
146 kunmap(*src_page);
147 set_page_dirty(*dst_page);
148 kunmap(*dst_page);
149
150 while ((len -= l) != 0) {
151 l = min_t(int, len, PAGE_SIZE);
152 memcpy(kmap(*++dst_page), kmap(*++src_page), l);
153 kunmap(*src_page);
154 set_page_dirty(*dst_page);
155 kunmap(*dst_page);
156 }
157 } else {
158 void *src_ptr, *dst_ptr;
159
160 do {
161 src_ptr = kmap(*src_page) + src;
162 dst_ptr = kmap(*dst_page) + dst;
163 if (PAGE_SIZE - src < PAGE_SIZE - dst) {
164 l = PAGE_SIZE - src;
165 src = 0;
166 dst += l;
167 } else {
168 l = PAGE_SIZE - dst;
169 src += l;
170 dst = 0;
171 }
172 l = min(len, l);
173 memcpy(dst_ptr, src_ptr, l);
174 kunmap(*src_page);
175 set_page_dirty(*dst_page);
176 kunmap(*dst_page);
177 if (!dst)
178 dst_page++;
179 else
180 src_page++;
181 } while ((len -= l));
182 }
183}
184
185void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
186{
187 struct page **src_page, **dst_page;
188 int l;
189
190 hfs_dbg(BNODE_MOD, "movebytes: %u,%u,%u\n", dst, src, len);
191 if (!len)
192 return;
193 src += node->page_offset;
194 dst += node->page_offset;
195 if (dst > src) {
196 src += len - 1;
197 src_page = node->page + (src >> PAGE_SHIFT);
198 src = (src & ~PAGE_MASK) + 1;
199 dst += len - 1;
200 dst_page = node->page + (dst >> PAGE_SHIFT);
201 dst = (dst & ~PAGE_MASK) + 1;
202
203 if (src == dst) {
204 while (src < len) {
205 memmove(kmap(*dst_page), kmap(*src_page), src);
206 kunmap(*src_page);
207 set_page_dirty(*dst_page);
208 kunmap(*dst_page);
209 len -= src;
210 src = PAGE_SIZE;
211 src_page--;
212 dst_page--;
213 }
214 src -= len;
215 memmove(kmap(*dst_page) + src,
216 kmap(*src_page) + src, len);
217 kunmap(*src_page);
218 set_page_dirty(*dst_page);
219 kunmap(*dst_page);
220 } else {
221 void *src_ptr, *dst_ptr;
222
223 do {
224 src_ptr = kmap(*src_page) + src;
225 dst_ptr = kmap(*dst_page) + dst;
226 if (src < dst) {
227 l = src;
228 src = PAGE_SIZE;
229 dst -= l;
230 } else {
231 l = dst;
232 src -= l;
233 dst = PAGE_SIZE;
234 }
235 l = min(len, l);
236 memmove(dst_ptr - l, src_ptr - l, l);
237 kunmap(*src_page);
238 set_page_dirty(*dst_page);
239 kunmap(*dst_page);
240 if (dst == PAGE_SIZE)
241 dst_page--;
242 else
243 src_page--;
244 } while ((len -= l));
245 }
246 } else {
247 src_page = node->page + (src >> PAGE_SHIFT);
248 src &= ~PAGE_MASK;
249 dst_page = node->page + (dst >> PAGE_SHIFT);
250 dst &= ~PAGE_MASK;
251
252 if (src == dst) {
253 l = min_t(int, len, PAGE_SIZE - src);
254 memmove(kmap(*dst_page) + src,
255 kmap(*src_page) + src, l);
256 kunmap(*src_page);
257 set_page_dirty(*dst_page);
258 kunmap(*dst_page);
259
260 while ((len -= l) != 0) {
261 l = min_t(int, len, PAGE_SIZE);
262 memmove(kmap(*++dst_page),
263 kmap(*++src_page), l);
264 kunmap(*src_page);
265 set_page_dirty(*dst_page);
266 kunmap(*dst_page);
267 }
268 } else {
269 void *src_ptr, *dst_ptr;
270
271 do {
272 src_ptr = kmap(*src_page) + src;
273 dst_ptr = kmap(*dst_page) + dst;
274 if (PAGE_SIZE - src <
275 PAGE_SIZE - dst) {
276 l = PAGE_SIZE - src;
277 src = 0;
278 dst += l;
279 } else {
280 l = PAGE_SIZE - dst;
281 src += l;
282 dst = 0;
283 }
284 l = min(len, l);
285 memmove(dst_ptr, src_ptr, l);
286 kunmap(*src_page);
287 set_page_dirty(*dst_page);
288 kunmap(*dst_page);
289 if (!dst)
290 dst_page++;
291 else
292 src_page++;
293 } while ((len -= l));
294 }
295 }
296}
297
298void hfs_bnode_dump(struct hfs_bnode *node)
299{
300 struct hfs_bnode_desc desc;
301 __be32 cnid;
302 int i, off, key_off;
303
304 hfs_dbg(BNODE_MOD, "bnode: %d\n", node->this);
305 hfs_bnode_read(node, &desc, 0, sizeof(desc));
306 hfs_dbg(BNODE_MOD, "%d, %d, %d, %d, %d\n",
307 be32_to_cpu(desc.next), be32_to_cpu(desc.prev),
308 desc.type, desc.height, be16_to_cpu(desc.num_recs));
309
310 off = node->tree->node_size - 2;
311 for (i = be16_to_cpu(desc.num_recs); i >= 0; off -= 2, i--) {
312 key_off = hfs_bnode_read_u16(node, off);
313 hfs_dbg(BNODE_MOD, " %d", key_off);
314 if (i && node->type == HFS_NODE_INDEX) {
315 int tmp;
316
317 if (node->tree->attributes & HFS_TREE_VARIDXKEYS ||
318 node->tree->cnid == HFSPLUS_ATTR_CNID)
319 tmp = hfs_bnode_read_u16(node, key_off) + 2;
320 else
321 tmp = node->tree->max_key_len + 2;
322 hfs_dbg_cont(BNODE_MOD, " (%d", tmp);
323 hfs_bnode_read(node, &cnid, key_off + tmp, 4);
324 hfs_dbg_cont(BNODE_MOD, ",%d)", be32_to_cpu(cnid));
325 } else if (i && node->type == HFS_NODE_LEAF) {
326 int tmp;
327
328 tmp = hfs_bnode_read_u16(node, key_off);
329 hfs_dbg_cont(BNODE_MOD, " (%d)", tmp);
330 }
331 }
332 hfs_dbg_cont(BNODE_MOD, "\n");
333}
334
335void hfs_bnode_unlink(struct hfs_bnode *node)
336{
337 struct hfs_btree *tree;
338 struct hfs_bnode *tmp;
339 __be32 cnid;
340
341 tree = node->tree;
342 if (node->prev) {
343 tmp = hfs_bnode_find(tree, node->prev);
344 if (IS_ERR(tmp))
345 return;
346 tmp->next = node->next;
347 cnid = cpu_to_be32(tmp->next);
348 hfs_bnode_write(tmp, &cnid,
349 offsetof(struct hfs_bnode_desc, next), 4);
350 hfs_bnode_put(tmp);
351 } else if (node->type == HFS_NODE_LEAF)
352 tree->leaf_head = node->next;
353
354 if (node->next) {
355 tmp = hfs_bnode_find(tree, node->next);
356 if (IS_ERR(tmp))
357 return;
358 tmp->prev = node->prev;
359 cnid = cpu_to_be32(tmp->prev);
360 hfs_bnode_write(tmp, &cnid,
361 offsetof(struct hfs_bnode_desc, prev), 4);
362 hfs_bnode_put(tmp);
363 } else if (node->type == HFS_NODE_LEAF)
364 tree->leaf_tail = node->prev;
365
366 /* move down? */
367 if (!node->prev && !node->next)
368 hfs_dbg(BNODE_MOD, "hfs_btree_del_level\n");
369 if (!node->parent) {
370 tree->root = 0;
371 tree->depth = 0;
372 }
373 set_bit(HFS_BNODE_DELETED, &node->flags);
374}
375
376static inline int hfs_bnode_hash(u32 num)
377{
378 num = (num >> 16) + num;
379 num += num >> 8;
380 return num & (NODE_HASH_SIZE - 1);
381}
382
383struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *tree, u32 cnid)
384{
385 struct hfs_bnode *node;
386
387 if (cnid >= tree->node_count) {
388 pr_err("request for non-existent node %d in B*Tree\n",
389 cnid);
390 return NULL;
391 }
392
393 for (node = tree->node_hash[hfs_bnode_hash(cnid)];
394 node; node = node->next_hash)
395 if (node->this == cnid)
396 return node;
397 return NULL;
398}
399
400static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
401{
402 struct hfs_bnode *node, *node2;
403 struct address_space *mapping;
404 struct page *page;
405 int size, block, i, hash;
406 loff_t off;
407
408 if (cnid >= tree->node_count) {
409 pr_err("request for non-existent node %d in B*Tree\n",
410 cnid);
411 return NULL;
412 }
413
414 size = sizeof(struct hfs_bnode) + tree->pages_per_bnode *
415 sizeof(struct page *);
416 node = kzalloc(size, GFP_KERNEL);
417 if (!node)
418 return NULL;
419 node->tree = tree;
420 node->this = cnid;
421 set_bit(HFS_BNODE_NEW, &node->flags);
422 atomic_set(&node->refcnt, 1);
423 hfs_dbg(BNODE_REFS, "new_node(%d:%d): 1\n",
424 node->tree->cnid, node->this);
425 init_waitqueue_head(&node->lock_wq);
426 spin_lock(&tree->hash_lock);
427 node2 = hfs_bnode_findhash(tree, cnid);
428 if (!node2) {
429 hash = hfs_bnode_hash(cnid);
430 node->next_hash = tree->node_hash[hash];
431 tree->node_hash[hash] = node;
432 tree->node_hash_cnt++;
433 } else {
434 spin_unlock(&tree->hash_lock);
435 kfree(node);
436 wait_event(node2->lock_wq,
437 !test_bit(HFS_BNODE_NEW, &node2->flags));
438 return node2;
439 }
440 spin_unlock(&tree->hash_lock);
441
442 mapping = tree->inode->i_mapping;
443 off = (loff_t)cnid << tree->node_size_shift;
444 block = off >> PAGE_SHIFT;
445 node->page_offset = off & ~PAGE_MASK;
446 for (i = 0; i < tree->pages_per_bnode; block++, i++) {
447 page = read_mapping_page(mapping, block, NULL);
448 if (IS_ERR(page))
449 goto fail;
450 if (PageError(page)) {
451 put_page(page);
452 goto fail;
453 }
454 node->page[i] = page;
455 }
456
457 return node;
458fail:
459 set_bit(HFS_BNODE_ERROR, &node->flags);
460 return node;
461}
462
463void hfs_bnode_unhash(struct hfs_bnode *node)
464{
465 struct hfs_bnode **p;
466
467 hfs_dbg(BNODE_REFS, "remove_node(%d:%d): %d\n",
468 node->tree->cnid, node->this, atomic_read(&node->refcnt));
469 for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)];
470 *p && *p != node; p = &(*p)->next_hash)
471 ;
472 BUG_ON(!*p);
473 *p = node->next_hash;
474 node->tree->node_hash_cnt--;
475}
476
477/* Load a particular node out of a tree */
478struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num)
479{
480 struct hfs_bnode *node;
481 struct hfs_bnode_desc *desc;
482 int i, rec_off, off, next_off;
483 int entry_size, key_size;
484
485 spin_lock(&tree->hash_lock);
486 node = hfs_bnode_findhash(tree, num);
487 if (node) {
488 hfs_bnode_get(node);
489 spin_unlock(&tree->hash_lock);
490 wait_event(node->lock_wq,
491 !test_bit(HFS_BNODE_NEW, &node->flags));
492 if (test_bit(HFS_BNODE_ERROR, &node->flags))
493 goto node_error;
494 return node;
495 }
496 spin_unlock(&tree->hash_lock);
497 node = __hfs_bnode_create(tree, num);
498 if (!node)
499 return ERR_PTR(-ENOMEM);
500 if (test_bit(HFS_BNODE_ERROR, &node->flags))
501 goto node_error;
502 if (!test_bit(HFS_BNODE_NEW, &node->flags))
503 return node;
504
505 desc = (struct hfs_bnode_desc *)(kmap(node->page[0]) +
506 node->page_offset);
507 node->prev = be32_to_cpu(desc->prev);
508 node->next = be32_to_cpu(desc->next);
509 node->num_recs = be16_to_cpu(desc->num_recs);
510 node->type = desc->type;
511 node->height = desc->height;
512 kunmap(node->page[0]);
513
514 switch (node->type) {
515 case HFS_NODE_HEADER:
516 case HFS_NODE_MAP:
517 if (node->height != 0)
518 goto node_error;
519 break;
520 case HFS_NODE_LEAF:
521 if (node->height != 1)
522 goto node_error;
523 break;
524 case HFS_NODE_INDEX:
525 if (node->height <= 1 || node->height > tree->depth)
526 goto node_error;
527 break;
528 default:
529 goto node_error;
530 }
531
532 rec_off = tree->node_size - 2;
533 off = hfs_bnode_read_u16(node, rec_off);
534 if (off != sizeof(struct hfs_bnode_desc))
535 goto node_error;
536 for (i = 1; i <= node->num_recs; off = next_off, i++) {
537 rec_off -= 2;
538 next_off = hfs_bnode_read_u16(node, rec_off);
539 if (next_off <= off ||
540 next_off > tree->node_size ||
541 next_off & 1)
542 goto node_error;
543 entry_size = next_off - off;
544 if (node->type != HFS_NODE_INDEX &&
545 node->type != HFS_NODE_LEAF)
546 continue;
547 key_size = hfs_bnode_read_u16(node, off) + 2;
548 if (key_size >= entry_size || key_size & 1)
549 goto node_error;
550 }
551 clear_bit(HFS_BNODE_NEW, &node->flags);
552 wake_up(&node->lock_wq);
553 return node;
554
555node_error:
556 set_bit(HFS_BNODE_ERROR, &node->flags);
557 clear_bit(HFS_BNODE_NEW, &node->flags);
558 wake_up(&node->lock_wq);
559 hfs_bnode_put(node);
560 return ERR_PTR(-EIO);
561}
562
563void hfs_bnode_free(struct hfs_bnode *node)
564{
565 int i;
566
567 for (i = 0; i < node->tree->pages_per_bnode; i++)
568 if (node->page[i])
569 put_page(node->page[i]);
570 kfree(node);
571}
572
573struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
574{
575 struct hfs_bnode *node;
576 struct page **pagep;
577 int i;
578
579 spin_lock(&tree->hash_lock);
580 node = hfs_bnode_findhash(tree, num);
581 spin_unlock(&tree->hash_lock);
582 if (node) {
583 pr_crit("new node %u already hashed?\n", num);
584 WARN_ON(1);
585 return node;
586 }
587 node = __hfs_bnode_create(tree, num);
588 if (!node)
589 return ERR_PTR(-ENOMEM);
590 if (test_bit(HFS_BNODE_ERROR, &node->flags)) {
591 hfs_bnode_put(node);
592 return ERR_PTR(-EIO);
593 }
594
595 pagep = node->page;
596 memset(kmap(*pagep) + node->page_offset, 0,
597 min_t(int, PAGE_SIZE, tree->node_size));
598 set_page_dirty(*pagep);
599 kunmap(*pagep);
600 for (i = 1; i < tree->pages_per_bnode; i++) {
601 memset(kmap(*++pagep), 0, PAGE_SIZE);
602 set_page_dirty(*pagep);
603 kunmap(*pagep);
604 }
605 clear_bit(HFS_BNODE_NEW, &node->flags);
606 wake_up(&node->lock_wq);
607
608 return node;
609}
610
611void hfs_bnode_get(struct hfs_bnode *node)
612{
613 if (node) {
614 atomic_inc(&node->refcnt);
615 hfs_dbg(BNODE_REFS, "get_node(%d:%d): %d\n",
616 node->tree->cnid, node->this,
617 atomic_read(&node->refcnt));
618 }
619}
620
621/* Dispose of resources used by a node */
622void hfs_bnode_put(struct hfs_bnode *node)
623{
624 if (node) {
625 struct hfs_btree *tree = node->tree;
626 int i;
627
628 hfs_dbg(BNODE_REFS, "put_node(%d:%d): %d\n",
629 node->tree->cnid, node->this,
630 atomic_read(&node->refcnt));
631 BUG_ON(!atomic_read(&node->refcnt));
632 if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock))
633 return;
634 for (i = 0; i < tree->pages_per_bnode; i++) {
635 if (!node->page[i])
636 continue;
637 mark_page_accessed(node->page[i]);
638 }
639
640 if (test_bit(HFS_BNODE_DELETED, &node->flags)) {
641 hfs_bnode_unhash(node);
642 spin_unlock(&tree->hash_lock);
643 if (hfs_bnode_need_zeroout(tree))
644 hfs_bnode_clear(node, 0, tree->node_size);
645 hfs_bmap_free(node);
646 hfs_bnode_free(node);
647 return;
648 }
649 spin_unlock(&tree->hash_lock);
650 }
651}
652
653/*
654 * Unused nodes have to be zeroed if this is the catalog tree and
655 * a corresponding flag in the volume header is set.
656 */
657bool hfs_bnode_need_zeroout(struct hfs_btree *tree)
658{
659 struct super_block *sb = tree->inode->i_sb;
660 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
661 const u32 volume_attr = be32_to_cpu(sbi->s_vhdr->attributes);
662
663 return tree->cnid == HFSPLUS_CAT_CNID &&
664 volume_attr & HFSPLUS_VOL_UNUSED_NODE_FIX;
665}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/fs/hfsplus/bnode.c
4 *
5 * Copyright (C) 2001
6 * Brad Boyer (flar@allandria.com)
7 * (C) 2003 Ardis Technologies <roman@ardistech.com>
8 *
9 * Handle basic btree node operations
10 */
11
12#include <linux/string.h>
13#include <linux/slab.h>
14#include <linux/pagemap.h>
15#include <linux/fs.h>
16#include <linux/swap.h>
17
18#include "hfsplus_fs.h"
19#include "hfsplus_raw.h"
20
21/* Copy a specified range of bytes from the raw data of a node */
22void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
23{
24 struct page **pagep;
25 int l;
26
27 off += node->page_offset;
28 pagep = node->page + (off >> PAGE_SHIFT);
29 off &= ~PAGE_MASK;
30
31 l = min_t(int, len, PAGE_SIZE - off);
32 memcpy_from_page(buf, *pagep, off, l);
33
34 while ((len -= l) != 0) {
35 buf += l;
36 l = min_t(int, len, PAGE_SIZE);
37 memcpy_from_page(buf, *++pagep, 0, l);
38 }
39}
40
41u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off)
42{
43 __be16 data;
44 /* TODO: optimize later... */
45 hfs_bnode_read(node, &data, off, 2);
46 return be16_to_cpu(data);
47}
48
49u8 hfs_bnode_read_u8(struct hfs_bnode *node, int off)
50{
51 u8 data;
52 /* TODO: optimize later... */
53 hfs_bnode_read(node, &data, off, 1);
54 return data;
55}
56
57void hfs_bnode_read_key(struct hfs_bnode *node, void *key, int off)
58{
59 struct hfs_btree *tree;
60 int key_len;
61
62 tree = node->tree;
63 if (node->type == HFS_NODE_LEAF ||
64 tree->attributes & HFS_TREE_VARIDXKEYS ||
65 node->tree->cnid == HFSPLUS_ATTR_CNID)
66 key_len = hfs_bnode_read_u16(node, off) + 2;
67 else
68 key_len = tree->max_key_len + 2;
69
70 hfs_bnode_read(node, key, off, key_len);
71}
72
73void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len)
74{
75 struct page **pagep;
76 int l;
77
78 off += node->page_offset;
79 pagep = node->page + (off >> PAGE_SHIFT);
80 off &= ~PAGE_MASK;
81
82 l = min_t(int, len, PAGE_SIZE - off);
83 memcpy_to_page(*pagep, off, buf, l);
84 set_page_dirty(*pagep);
85
86 while ((len -= l) != 0) {
87 buf += l;
88 l = min_t(int, len, PAGE_SIZE);
89 memcpy_to_page(*++pagep, 0, buf, l);
90 set_page_dirty(*pagep);
91 }
92}
93
94void hfs_bnode_write_u16(struct hfs_bnode *node, int off, u16 data)
95{
96 __be16 v = cpu_to_be16(data);
97 /* TODO: optimize later... */
98 hfs_bnode_write(node, &v, off, 2);
99}
100
101void hfs_bnode_clear(struct hfs_bnode *node, int off, int len)
102{
103 struct page **pagep;
104 int l;
105
106 off += node->page_offset;
107 pagep = node->page + (off >> PAGE_SHIFT);
108 off &= ~PAGE_MASK;
109
110 l = min_t(int, len, PAGE_SIZE - off);
111 memzero_page(*pagep, off, l);
112 set_page_dirty(*pagep);
113
114 while ((len -= l) != 0) {
115 l = min_t(int, len, PAGE_SIZE);
116 memzero_page(*++pagep, 0, l);
117 set_page_dirty(*pagep);
118 }
119}
120
121void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
122 struct hfs_bnode *src_node, int src, int len)
123{
124 struct page **src_page, **dst_page;
125 int l;
126
127 hfs_dbg(BNODE_MOD, "copybytes: %u,%u,%u\n", dst, src, len);
128 if (!len)
129 return;
130 src += src_node->page_offset;
131 dst += dst_node->page_offset;
132 src_page = src_node->page + (src >> PAGE_SHIFT);
133 src &= ~PAGE_MASK;
134 dst_page = dst_node->page + (dst >> PAGE_SHIFT);
135 dst &= ~PAGE_MASK;
136
137 if (src == dst) {
138 l = min_t(int, len, PAGE_SIZE - src);
139 memcpy_page(*dst_page, src, *src_page, src, l);
140 set_page_dirty(*dst_page);
141
142 while ((len -= l) != 0) {
143 l = min_t(int, len, PAGE_SIZE);
144 memcpy_page(*++dst_page, 0, *++src_page, 0, l);
145 set_page_dirty(*dst_page);
146 }
147 } else {
148 void *src_ptr, *dst_ptr;
149
150 do {
151 dst_ptr = kmap_local_page(*dst_page) + dst;
152 src_ptr = kmap_local_page(*src_page) + src;
153 if (PAGE_SIZE - src < PAGE_SIZE - dst) {
154 l = PAGE_SIZE - src;
155 src = 0;
156 dst += l;
157 } else {
158 l = PAGE_SIZE - dst;
159 src += l;
160 dst = 0;
161 }
162 l = min(len, l);
163 memcpy(dst_ptr, src_ptr, l);
164 kunmap_local(src_ptr);
165 set_page_dirty(*dst_page);
166 kunmap_local(dst_ptr);
167 if (!dst)
168 dst_page++;
169 else
170 src_page++;
171 } while ((len -= l));
172 }
173}
174
175void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
176{
177 struct page **src_page, **dst_page;
178 void *src_ptr, *dst_ptr;
179 int l;
180
181 hfs_dbg(BNODE_MOD, "movebytes: %u,%u,%u\n", dst, src, len);
182 if (!len)
183 return;
184 src += node->page_offset;
185 dst += node->page_offset;
186 if (dst > src) {
187 src += len - 1;
188 src_page = node->page + (src >> PAGE_SHIFT);
189 src = (src & ~PAGE_MASK) + 1;
190 dst += len - 1;
191 dst_page = node->page + (dst >> PAGE_SHIFT);
192 dst = (dst & ~PAGE_MASK) + 1;
193
194 if (src == dst) {
195 while (src < len) {
196 dst_ptr = kmap_local_page(*dst_page);
197 src_ptr = kmap_local_page(*src_page);
198 memmove(dst_ptr, src_ptr, src);
199 kunmap_local(src_ptr);
200 set_page_dirty(*dst_page);
201 kunmap_local(dst_ptr);
202 len -= src;
203 src = PAGE_SIZE;
204 src_page--;
205 dst_page--;
206 }
207 src -= len;
208 dst_ptr = kmap_local_page(*dst_page);
209 src_ptr = kmap_local_page(*src_page);
210 memmove(dst_ptr + src, src_ptr + src, len);
211 kunmap_local(src_ptr);
212 set_page_dirty(*dst_page);
213 kunmap_local(dst_ptr);
214 } else {
215 do {
216 dst_ptr = kmap_local_page(*dst_page) + dst;
217 src_ptr = kmap_local_page(*src_page) + src;
218 if (src < dst) {
219 l = src;
220 src = PAGE_SIZE;
221 dst -= l;
222 } else {
223 l = dst;
224 src -= l;
225 dst = PAGE_SIZE;
226 }
227 l = min(len, l);
228 memmove(dst_ptr - l, src_ptr - l, l);
229 kunmap_local(src_ptr);
230 set_page_dirty(*dst_page);
231 kunmap_local(dst_ptr);
232 if (dst == PAGE_SIZE)
233 dst_page--;
234 else
235 src_page--;
236 } while ((len -= l));
237 }
238 } else {
239 src_page = node->page + (src >> PAGE_SHIFT);
240 src &= ~PAGE_MASK;
241 dst_page = node->page + (dst >> PAGE_SHIFT);
242 dst &= ~PAGE_MASK;
243
244 if (src == dst) {
245 l = min_t(int, len, PAGE_SIZE - src);
246
247 dst_ptr = kmap_local_page(*dst_page) + src;
248 src_ptr = kmap_local_page(*src_page) + src;
249 memmove(dst_ptr, src_ptr, l);
250 kunmap_local(src_ptr);
251 set_page_dirty(*dst_page);
252 kunmap_local(dst_ptr);
253
254 while ((len -= l) != 0) {
255 l = min_t(int, len, PAGE_SIZE);
256 dst_ptr = kmap_local_page(*++dst_page);
257 src_ptr = kmap_local_page(*++src_page);
258 memmove(dst_ptr, src_ptr, l);
259 kunmap_local(src_ptr);
260 set_page_dirty(*dst_page);
261 kunmap_local(dst_ptr);
262 }
263 } else {
264 do {
265 dst_ptr = kmap_local_page(*dst_page) + dst;
266 src_ptr = kmap_local_page(*src_page) + src;
267 if (PAGE_SIZE - src <
268 PAGE_SIZE - dst) {
269 l = PAGE_SIZE - src;
270 src = 0;
271 dst += l;
272 } else {
273 l = PAGE_SIZE - dst;
274 src += l;
275 dst = 0;
276 }
277 l = min(len, l);
278 memmove(dst_ptr, src_ptr, l);
279 kunmap_local(src_ptr);
280 set_page_dirty(*dst_page);
281 kunmap_local(dst_ptr);
282 if (!dst)
283 dst_page++;
284 else
285 src_page++;
286 } while ((len -= l));
287 }
288 }
289}
290
291void hfs_bnode_dump(struct hfs_bnode *node)
292{
293 struct hfs_bnode_desc desc;
294 __be32 cnid;
295 int i, off, key_off;
296
297 hfs_dbg(BNODE_MOD, "bnode: %d\n", node->this);
298 hfs_bnode_read(node, &desc, 0, sizeof(desc));
299 hfs_dbg(BNODE_MOD, "%d, %d, %d, %d, %d\n",
300 be32_to_cpu(desc.next), be32_to_cpu(desc.prev),
301 desc.type, desc.height, be16_to_cpu(desc.num_recs));
302
303 off = node->tree->node_size - 2;
304 for (i = be16_to_cpu(desc.num_recs); i >= 0; off -= 2, i--) {
305 key_off = hfs_bnode_read_u16(node, off);
306 hfs_dbg(BNODE_MOD, " %d", key_off);
307 if (i && node->type == HFS_NODE_INDEX) {
308 int tmp;
309
310 if (node->tree->attributes & HFS_TREE_VARIDXKEYS ||
311 node->tree->cnid == HFSPLUS_ATTR_CNID)
312 tmp = hfs_bnode_read_u16(node, key_off) + 2;
313 else
314 tmp = node->tree->max_key_len + 2;
315 hfs_dbg_cont(BNODE_MOD, " (%d", tmp);
316 hfs_bnode_read(node, &cnid, key_off + tmp, 4);
317 hfs_dbg_cont(BNODE_MOD, ",%d)", be32_to_cpu(cnid));
318 } else if (i && node->type == HFS_NODE_LEAF) {
319 int tmp;
320
321 tmp = hfs_bnode_read_u16(node, key_off);
322 hfs_dbg_cont(BNODE_MOD, " (%d)", tmp);
323 }
324 }
325 hfs_dbg_cont(BNODE_MOD, "\n");
326}
327
328void hfs_bnode_unlink(struct hfs_bnode *node)
329{
330 struct hfs_btree *tree;
331 struct hfs_bnode *tmp;
332 __be32 cnid;
333
334 tree = node->tree;
335 if (node->prev) {
336 tmp = hfs_bnode_find(tree, node->prev);
337 if (IS_ERR(tmp))
338 return;
339 tmp->next = node->next;
340 cnid = cpu_to_be32(tmp->next);
341 hfs_bnode_write(tmp, &cnid,
342 offsetof(struct hfs_bnode_desc, next), 4);
343 hfs_bnode_put(tmp);
344 } else if (node->type == HFS_NODE_LEAF)
345 tree->leaf_head = node->next;
346
347 if (node->next) {
348 tmp = hfs_bnode_find(tree, node->next);
349 if (IS_ERR(tmp))
350 return;
351 tmp->prev = node->prev;
352 cnid = cpu_to_be32(tmp->prev);
353 hfs_bnode_write(tmp, &cnid,
354 offsetof(struct hfs_bnode_desc, prev), 4);
355 hfs_bnode_put(tmp);
356 } else if (node->type == HFS_NODE_LEAF)
357 tree->leaf_tail = node->prev;
358
359 /* move down? */
360 if (!node->prev && !node->next)
361 hfs_dbg(BNODE_MOD, "hfs_btree_del_level\n");
362 if (!node->parent) {
363 tree->root = 0;
364 tree->depth = 0;
365 }
366 set_bit(HFS_BNODE_DELETED, &node->flags);
367}
368
369static inline int hfs_bnode_hash(u32 num)
370{
371 num = (num >> 16) + num;
372 num += num >> 8;
373 return num & (NODE_HASH_SIZE - 1);
374}
375
376struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *tree, u32 cnid)
377{
378 struct hfs_bnode *node;
379
380 if (cnid >= tree->node_count) {
381 pr_err("request for non-existent node %d in B*Tree\n",
382 cnid);
383 return NULL;
384 }
385
386 for (node = tree->node_hash[hfs_bnode_hash(cnid)];
387 node; node = node->next_hash)
388 if (node->this == cnid)
389 return node;
390 return NULL;
391}
392
393static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
394{
395 struct hfs_bnode *node, *node2;
396 struct address_space *mapping;
397 struct page *page;
398 int size, block, i, hash;
399 loff_t off;
400
401 if (cnid >= tree->node_count) {
402 pr_err("request for non-existent node %d in B*Tree\n",
403 cnid);
404 return NULL;
405 }
406
407 size = sizeof(struct hfs_bnode) + tree->pages_per_bnode *
408 sizeof(struct page *);
409 node = kzalloc(size, GFP_KERNEL);
410 if (!node)
411 return NULL;
412 node->tree = tree;
413 node->this = cnid;
414 set_bit(HFS_BNODE_NEW, &node->flags);
415 atomic_set(&node->refcnt, 1);
416 hfs_dbg(BNODE_REFS, "new_node(%d:%d): 1\n",
417 node->tree->cnid, node->this);
418 init_waitqueue_head(&node->lock_wq);
419 spin_lock(&tree->hash_lock);
420 node2 = hfs_bnode_findhash(tree, cnid);
421 if (!node2) {
422 hash = hfs_bnode_hash(cnid);
423 node->next_hash = tree->node_hash[hash];
424 tree->node_hash[hash] = node;
425 tree->node_hash_cnt++;
426 } else {
427 spin_unlock(&tree->hash_lock);
428 kfree(node);
429 wait_event(node2->lock_wq,
430 !test_bit(HFS_BNODE_NEW, &node2->flags));
431 return node2;
432 }
433 spin_unlock(&tree->hash_lock);
434
435 mapping = tree->inode->i_mapping;
436 off = (loff_t)cnid << tree->node_size_shift;
437 block = off >> PAGE_SHIFT;
438 node->page_offset = off & ~PAGE_MASK;
439 for (i = 0; i < tree->pages_per_bnode; block++, i++) {
440 page = read_mapping_page(mapping, block, NULL);
441 if (IS_ERR(page))
442 goto fail;
443 node->page[i] = page;
444 }
445
446 return node;
447fail:
448 set_bit(HFS_BNODE_ERROR, &node->flags);
449 return node;
450}
451
452void hfs_bnode_unhash(struct hfs_bnode *node)
453{
454 struct hfs_bnode **p;
455
456 hfs_dbg(BNODE_REFS, "remove_node(%d:%d): %d\n",
457 node->tree->cnid, node->this, atomic_read(&node->refcnt));
458 for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)];
459 *p && *p != node; p = &(*p)->next_hash)
460 ;
461 BUG_ON(!*p);
462 *p = node->next_hash;
463 node->tree->node_hash_cnt--;
464}
465
466/* Load a particular node out of a tree */
467struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num)
468{
469 struct hfs_bnode *node;
470 struct hfs_bnode_desc *desc;
471 int i, rec_off, off, next_off;
472 int entry_size, key_size;
473
474 spin_lock(&tree->hash_lock);
475 node = hfs_bnode_findhash(tree, num);
476 if (node) {
477 hfs_bnode_get(node);
478 spin_unlock(&tree->hash_lock);
479 wait_event(node->lock_wq,
480 !test_bit(HFS_BNODE_NEW, &node->flags));
481 if (test_bit(HFS_BNODE_ERROR, &node->flags))
482 goto node_error;
483 return node;
484 }
485 spin_unlock(&tree->hash_lock);
486 node = __hfs_bnode_create(tree, num);
487 if (!node)
488 return ERR_PTR(-ENOMEM);
489 if (test_bit(HFS_BNODE_ERROR, &node->flags))
490 goto node_error;
491 if (!test_bit(HFS_BNODE_NEW, &node->flags))
492 return node;
493
494 desc = (struct hfs_bnode_desc *)(kmap_local_page(node->page[0]) +
495 node->page_offset);
496 node->prev = be32_to_cpu(desc->prev);
497 node->next = be32_to_cpu(desc->next);
498 node->num_recs = be16_to_cpu(desc->num_recs);
499 node->type = desc->type;
500 node->height = desc->height;
501 kunmap_local(desc);
502
503 switch (node->type) {
504 case HFS_NODE_HEADER:
505 case HFS_NODE_MAP:
506 if (node->height != 0)
507 goto node_error;
508 break;
509 case HFS_NODE_LEAF:
510 if (node->height != 1)
511 goto node_error;
512 break;
513 case HFS_NODE_INDEX:
514 if (node->height <= 1 || node->height > tree->depth)
515 goto node_error;
516 break;
517 default:
518 goto node_error;
519 }
520
521 rec_off = tree->node_size - 2;
522 off = hfs_bnode_read_u16(node, rec_off);
523 if (off != sizeof(struct hfs_bnode_desc))
524 goto node_error;
525 for (i = 1; i <= node->num_recs; off = next_off, i++) {
526 rec_off -= 2;
527 next_off = hfs_bnode_read_u16(node, rec_off);
528 if (next_off <= off ||
529 next_off > tree->node_size ||
530 next_off & 1)
531 goto node_error;
532 entry_size = next_off - off;
533 if (node->type != HFS_NODE_INDEX &&
534 node->type != HFS_NODE_LEAF)
535 continue;
536 key_size = hfs_bnode_read_u16(node, off) + 2;
537 if (key_size >= entry_size || key_size & 1)
538 goto node_error;
539 }
540 clear_bit(HFS_BNODE_NEW, &node->flags);
541 wake_up(&node->lock_wq);
542 return node;
543
544node_error:
545 set_bit(HFS_BNODE_ERROR, &node->flags);
546 clear_bit(HFS_BNODE_NEW, &node->flags);
547 wake_up(&node->lock_wq);
548 hfs_bnode_put(node);
549 return ERR_PTR(-EIO);
550}
551
552void hfs_bnode_free(struct hfs_bnode *node)
553{
554 int i;
555
556 for (i = 0; i < node->tree->pages_per_bnode; i++)
557 if (node->page[i])
558 put_page(node->page[i]);
559 kfree(node);
560}
561
562struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
563{
564 struct hfs_bnode *node;
565 struct page **pagep;
566 int i;
567
568 spin_lock(&tree->hash_lock);
569 node = hfs_bnode_findhash(tree, num);
570 spin_unlock(&tree->hash_lock);
571 if (node) {
572 pr_crit("new node %u already hashed?\n", num);
573 WARN_ON(1);
574 return node;
575 }
576 node = __hfs_bnode_create(tree, num);
577 if (!node)
578 return ERR_PTR(-ENOMEM);
579 if (test_bit(HFS_BNODE_ERROR, &node->flags)) {
580 hfs_bnode_put(node);
581 return ERR_PTR(-EIO);
582 }
583
584 pagep = node->page;
585 memzero_page(*pagep, node->page_offset,
586 min_t(int, PAGE_SIZE, tree->node_size));
587 set_page_dirty(*pagep);
588 for (i = 1; i < tree->pages_per_bnode; i++) {
589 memzero_page(*++pagep, 0, PAGE_SIZE);
590 set_page_dirty(*pagep);
591 }
592 clear_bit(HFS_BNODE_NEW, &node->flags);
593 wake_up(&node->lock_wq);
594
595 return node;
596}
597
598void hfs_bnode_get(struct hfs_bnode *node)
599{
600 if (node) {
601 atomic_inc(&node->refcnt);
602 hfs_dbg(BNODE_REFS, "get_node(%d:%d): %d\n",
603 node->tree->cnid, node->this,
604 atomic_read(&node->refcnt));
605 }
606}
607
608/* Dispose of resources used by a node */
609void hfs_bnode_put(struct hfs_bnode *node)
610{
611 if (node) {
612 struct hfs_btree *tree = node->tree;
613 int i;
614
615 hfs_dbg(BNODE_REFS, "put_node(%d:%d): %d\n",
616 node->tree->cnid, node->this,
617 atomic_read(&node->refcnt));
618 BUG_ON(!atomic_read(&node->refcnt));
619 if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock))
620 return;
621 for (i = 0; i < tree->pages_per_bnode; i++) {
622 if (!node->page[i])
623 continue;
624 mark_page_accessed(node->page[i]);
625 }
626
627 if (test_bit(HFS_BNODE_DELETED, &node->flags)) {
628 hfs_bnode_unhash(node);
629 spin_unlock(&tree->hash_lock);
630 if (hfs_bnode_need_zeroout(tree))
631 hfs_bnode_clear(node, 0, tree->node_size);
632 hfs_bmap_free(node);
633 hfs_bnode_free(node);
634 return;
635 }
636 spin_unlock(&tree->hash_lock);
637 }
638}
639
640/*
641 * Unused nodes have to be zeroed if this is the catalog tree and
642 * a corresponding flag in the volume header is set.
643 */
644bool hfs_bnode_need_zeroout(struct hfs_btree *tree)
645{
646 struct super_block *sb = tree->inode->i_sb;
647 struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
648 const u32 volume_attr = be32_to_cpu(sbi->s_vhdr->attributes);
649
650 return tree->cnid == HFSPLUS_CAT_CNID &&
651 volume_attr & HFSPLUS_VOL_UNUSED_NODE_FIX;
652}