Linux Audio

Check our new training course

Loading...
v3.1
 
  1/*
  2 *  linux/fs/hfsplus/btree.c
  3 *
  4 * Copyright (C) 2001
  5 * Brad Boyer (flar@allandria.com)
  6 * (C) 2003 Ardis Technologies <roman@ardistech.com>
  7 *
  8 * Handle opening/closing btree
  9 */
 10
 11#include <linux/slab.h>
 12#include <linux/pagemap.h>
 13#include <linux/log2.h>
 14
 15#include "hfsplus_fs.h"
 16#include "hfsplus_raw.h"
 17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 18
 19/* Get a reference to a B*Tree and do some initial checks */
 20struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
 21{
 22	struct hfs_btree *tree;
 23	struct hfs_btree_header_rec *head;
 24	struct address_space *mapping;
 25	struct inode *inode;
 26	struct page *page;
 27	unsigned int size;
 28
 29	tree = kzalloc(sizeof(*tree), GFP_KERNEL);
 30	if (!tree)
 31		return NULL;
 32
 33	mutex_init(&tree->tree_lock);
 34	spin_lock_init(&tree->hash_lock);
 35	tree->sb = sb;
 36	tree->cnid = id;
 37	inode = hfsplus_iget(sb, id);
 38	if (IS_ERR(inode))
 39		goto free_tree;
 40	tree->inode = inode;
 41
 42	if (!HFSPLUS_I(tree->inode)->first_blocks) {
 43		printk(KERN_ERR
 44		       "hfs: invalid btree extent records (0 size).\n");
 45		goto free_inode;
 46	}
 47
 48	mapping = tree->inode->i_mapping;
 49	page = read_mapping_page(mapping, 0, NULL);
 50	if (IS_ERR(page))
 51		goto free_inode;
 52
 53	/* Load the header */
 54	head = (struct hfs_btree_header_rec *)(kmap(page) +
 55		sizeof(struct hfs_bnode_desc));
 56	tree->root = be32_to_cpu(head->root);
 57	tree->leaf_count = be32_to_cpu(head->leaf_count);
 58	tree->leaf_head = be32_to_cpu(head->leaf_head);
 59	tree->leaf_tail = be32_to_cpu(head->leaf_tail);
 60	tree->node_count = be32_to_cpu(head->node_count);
 61	tree->free_nodes = be32_to_cpu(head->free_nodes);
 62	tree->attributes = be32_to_cpu(head->attributes);
 63	tree->node_size = be16_to_cpu(head->node_size);
 64	tree->max_key_len = be16_to_cpu(head->max_key_len);
 65	tree->depth = be16_to_cpu(head->depth);
 66
 67	/* Verify the tree and set the correct compare function */
 68	switch (id) {
 69	case HFSPLUS_EXT_CNID:
 70		if (tree->max_key_len != HFSPLUS_EXT_KEYLEN - sizeof(u16)) {
 71			printk(KERN_ERR "hfs: invalid extent max_key_len %d\n",
 72				tree->max_key_len);
 73			goto fail_page;
 74		}
 75		if (tree->attributes & HFS_TREE_VARIDXKEYS) {
 76			printk(KERN_ERR "hfs: invalid extent btree flag\n");
 77			goto fail_page;
 78		}
 79
 80		tree->keycmp = hfsplus_ext_cmp_key;
 81		break;
 82	case HFSPLUS_CAT_CNID:
 83		if (tree->max_key_len != HFSPLUS_CAT_KEYLEN - sizeof(u16)) {
 84			printk(KERN_ERR "hfs: invalid catalog max_key_len %d\n",
 85				tree->max_key_len);
 86			goto fail_page;
 87		}
 88		if (!(tree->attributes & HFS_TREE_VARIDXKEYS)) {
 89			printk(KERN_ERR "hfs: invalid catalog btree flag\n");
 90			goto fail_page;
 91		}
 92
 93		if (test_bit(HFSPLUS_SB_HFSX, &HFSPLUS_SB(sb)->flags) &&
 94		    (head->key_type == HFSPLUS_KEY_BINARY))
 95			tree->keycmp = hfsplus_cat_bin_cmp_key;
 96		else {
 97			tree->keycmp = hfsplus_cat_case_cmp_key;
 98			set_bit(HFSPLUS_SB_CASEFOLD, &HFSPLUS_SB(sb)->flags);
 99		}
100		break;
 
 
 
 
 
 
 
 
101	default:
102		printk(KERN_ERR "hfs: unknown B*Tree requested\n");
103		goto fail_page;
104	}
105
106	if (!(tree->attributes & HFS_TREE_BIGKEYS)) {
107		printk(KERN_ERR "hfs: invalid btree flag\n");
108		goto fail_page;
109	}
110
111	size = tree->node_size;
112	if (!is_power_of_2(size))
113		goto fail_page;
114	if (!tree->node_count)
115		goto fail_page;
116
117	tree->node_size_shift = ffs(size) - 1;
118
119	tree->pages_per_bnode =
120		(tree->node_size + PAGE_CACHE_SIZE - 1) >>
121		PAGE_CACHE_SHIFT;
122
123	kunmap(page);
124	page_cache_release(page);
125	return tree;
126
127 fail_page:
128	page_cache_release(page);
 
129 free_inode:
130	tree->inode->i_mapping->a_ops = &hfsplus_aops;
131	iput(tree->inode);
132 free_tree:
133	kfree(tree);
134	return NULL;
135}
136
137/* Release resources used by a btree */
138void hfs_btree_close(struct hfs_btree *tree)
139{
140	struct hfs_bnode *node;
141	int i;
142
143	if (!tree)
144		return;
145
146	for (i = 0; i < NODE_HASH_SIZE; i++) {
147		while ((node = tree->node_hash[i])) {
148			tree->node_hash[i] = node->next_hash;
149			if (atomic_read(&node->refcnt))
150				printk(KERN_CRIT "hfs: node %d:%d "
151						"still has %d user(s)!\n",
152					node->tree->cnid, node->this,
153					atomic_read(&node->refcnt));
154			hfs_bnode_free(node);
155			tree->node_hash_cnt--;
156		}
157	}
158	iput(tree->inode);
159	kfree(tree);
160}
161
162void hfs_btree_write(struct hfs_btree *tree)
163{
164	struct hfs_btree_header_rec *head;
165	struct hfs_bnode *node;
166	struct page *page;
167
168	node = hfs_bnode_find(tree, 0);
169	if (IS_ERR(node))
170		/* panic? */
171		return;
172	/* Load the header */
173	page = node->page[0];
174	head = (struct hfs_btree_header_rec *)(kmap(page) +
175		sizeof(struct hfs_bnode_desc));
176
177	head->root = cpu_to_be32(tree->root);
178	head->leaf_count = cpu_to_be32(tree->leaf_count);
179	head->leaf_head = cpu_to_be32(tree->leaf_head);
180	head->leaf_tail = cpu_to_be32(tree->leaf_tail);
181	head->node_count = cpu_to_be32(tree->node_count);
182	head->free_nodes = cpu_to_be32(tree->free_nodes);
183	head->attributes = cpu_to_be32(tree->attributes);
184	head->depth = cpu_to_be16(tree->depth);
185
186	kunmap(page);
187	set_page_dirty(page);
188	hfs_bnode_put(node);
 
189}
190
191static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx)
192{
193	struct hfs_btree *tree = prev->tree;
194	struct hfs_bnode *node;
195	struct hfs_bnode_desc desc;
196	__be32 cnid;
197
198	node = hfs_bnode_create(tree, idx);
199	if (IS_ERR(node))
200		return node;
201
202	tree->free_nodes--;
203	prev->next = idx;
204	cnid = cpu_to_be32(idx);
205	hfs_bnode_write(prev, &cnid, offsetof(struct hfs_bnode_desc, next), 4);
206
207	node->type = HFS_NODE_MAP;
208	node->num_recs = 1;
209	hfs_bnode_clear(node, 0, tree->node_size);
210	desc.next = 0;
211	desc.prev = 0;
212	desc.type = HFS_NODE_MAP;
213	desc.height = 0;
214	desc.num_recs = cpu_to_be16(1);
215	desc.reserved = 0;
216	hfs_bnode_write(node, &desc, 0, sizeof(desc));
217	hfs_bnode_write_u16(node, 14, 0x8000);
218	hfs_bnode_write_u16(node, tree->node_size - 2, 14);
219	hfs_bnode_write_u16(node, tree->node_size - 4, tree->node_size - 6);
220
221	return node;
222}
223
224struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
 
225{
226	struct hfs_bnode *node, *next_node;
227	struct page **pagep;
228	u32 nidx, idx;
229	unsigned off;
230	u16 off16;
231	u16 len;
232	u8 *data, byte, m;
233	int i;
234
235	while (!tree->free_nodes) {
236		struct inode *inode = tree->inode;
237		struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
238		u32 count;
239		int res;
240
241		res = hfsplus_file_extend(inode);
 
242		if (res)
243			return ERR_PTR(res);
244		hip->phys_size = inode->i_size =
245			(loff_t)hip->alloc_blocks <<
246				HFSPLUS_SB(tree->sb)->alloc_blksz_shift;
247		hip->fs_blocks =
248			hip->alloc_blocks << HFSPLUS_SB(tree->sb)->fs_shift;
249		inode_set_bytes(inode, inode->i_size);
250		count = inode->i_size >> tree->node_size_shift;
251		tree->free_nodes = count - tree->node_count;
252		tree->node_count = count;
253	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
254
255	nidx = 0;
256	node = hfs_bnode_find(tree, nidx);
257	if (IS_ERR(node))
258		return node;
259	len = hfs_brec_lenoff(node, 2, &off16);
260	off = off16;
261
262	off += node->page_offset;
263	pagep = node->page + (off >> PAGE_CACHE_SHIFT);
264	data = kmap(*pagep);
265	off &= ~PAGE_CACHE_MASK;
266	idx = 0;
267
268	for (;;) {
269		while (len) {
270			byte = data[off];
271			if (byte != 0xff) {
272				for (m = 0x80, i = 0; i < 8; m >>= 1, i++) {
273					if (!(byte & m)) {
274						idx += i;
275						data[off] |= m;
276						set_page_dirty(*pagep);
277						kunmap(*pagep);
278						tree->free_nodes--;
279						mark_inode_dirty(tree->inode);
280						hfs_bnode_put(node);
281						return hfs_bnode_create(tree,
282							idx);
283					}
284				}
285			}
286			if (++off >= PAGE_CACHE_SIZE) {
287				kunmap(*pagep);
288				data = kmap(*++pagep);
289				off = 0;
290			}
291			idx += 8;
292			len--;
293		}
294		kunmap(*pagep);
295		nidx = node->next;
296		if (!nidx) {
297			dprint(DBG_BNODE_MOD, "hfs: create new bmap node.\n");
298			next_node = hfs_bmap_new_bmap(node, idx);
299		} else
300			next_node = hfs_bnode_find(tree, nidx);
301		hfs_bnode_put(node);
302		if (IS_ERR(next_node))
303			return next_node;
304		node = next_node;
305
306		len = hfs_brec_lenoff(node, 0, &off16);
307		off = off16;
308		off += node->page_offset;
309		pagep = node->page + (off >> PAGE_CACHE_SHIFT);
310		data = kmap(*pagep);
311		off &= ~PAGE_CACHE_MASK;
312	}
313}
314
315void hfs_bmap_free(struct hfs_bnode *node)
316{
317	struct hfs_btree *tree;
318	struct page *page;
319	u16 off, len;
320	u32 nidx;
321	u8 *data, byte, m;
322
323	dprint(DBG_BNODE_MOD, "btree_free_node: %u\n", node->this);
324	BUG_ON(!node->this);
325	tree = node->tree;
326	nidx = node->this;
327	node = hfs_bnode_find(tree, 0);
328	if (IS_ERR(node))
329		return;
330	len = hfs_brec_lenoff(node, 2, &off);
331	while (nidx >= len * 8) {
332		u32 i;
333
334		nidx -= len * 8;
335		i = node->next;
336		hfs_bnode_put(node);
337		if (!i) {
338			/* panic */;
339			printk(KERN_CRIT "hfs: unable to free bnode %u. "
340					"bmap not found!\n",
341				node->this);
 
342			return;
343		}
 
344		node = hfs_bnode_find(tree, i);
345		if (IS_ERR(node))
346			return;
347		if (node->type != HFS_NODE_MAP) {
348			/* panic */;
349			printk(KERN_CRIT "hfs: invalid bmap found! "
350					"(%u,%d)\n",
351				node->this, node->type);
352			hfs_bnode_put(node);
353			return;
354		}
355		len = hfs_brec_lenoff(node, 0, &off);
356	}
357	off += node->page_offset + nidx / 8;
358	page = node->page[off >> PAGE_CACHE_SHIFT];
359	data = kmap(page);
360	off &= ~PAGE_CACHE_MASK;
361	m = 1 << (~nidx & 7);
362	byte = data[off];
363	if (!(byte & m)) {
364		printk(KERN_CRIT "hfs: trying to free free bnode "
365				"%u(%d)\n",
366			node->this, node->type);
367		kunmap(page);
368		hfs_bnode_put(node);
369		return;
370	}
371	data[off] = byte & ~m;
372	set_page_dirty(page);
373	kunmap(page);
374	hfs_bnode_put(node);
375	tree->free_nodes++;
376	mark_inode_dirty(tree->inode);
377}
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  linux/fs/hfsplus/btree.c
  4 *
  5 * Copyright (C) 2001
  6 * Brad Boyer (flar@allandria.com)
  7 * (C) 2003 Ardis Technologies <roman@ardistech.com>
  8 *
  9 * Handle opening/closing btree
 10 */
 11
 12#include <linux/slab.h>
 13#include <linux/pagemap.h>
 14#include <linux/log2.h>
 15
 16#include "hfsplus_fs.h"
 17#include "hfsplus_raw.h"
 18
 19/*
 20 * Initial source code of clump size calculation is gotten
 21 * from http://opensource.apple.com/tarballs/diskdev_cmds/
 22 */
 23#define CLUMP_ENTRIES	15
 24
 25static short clumptbl[CLUMP_ENTRIES * 3] = {
 26/*
 27 *	    Volume	Attributes	 Catalog	 Extents
 28 *	     Size	Clump (MB)	Clump (MB)	Clump (MB)
 29 */
 30	/*   1GB */	  4,		  4,		 4,
 31	/*   2GB */	  6,		  6,		 4,
 32	/*   4GB */	  8,		  8,		 4,
 33	/*   8GB */	 11,		 11,		 5,
 34	/*
 35	 * For volumes 16GB and larger, we want to make sure that a full OS
 36	 * install won't require fragmentation of the Catalog or Attributes
 37	 * B-trees.  We do this by making the clump sizes sufficiently large,
 38	 * and by leaving a gap after the B-trees for them to grow into.
 39	 *
 40	 * For SnowLeopard 10A298, a FullNetInstall with all packages selected
 41	 * results in:
 42	 * Catalog B-tree Header
 43	 *	nodeSize:          8192
 44	 *	totalNodes:       31616
 45	 *	freeNodes:         1978
 46	 * (used = 231.55 MB)
 47	 * Attributes B-tree Header
 48	 *	nodeSize:          8192
 49	 *	totalNodes:       63232
 50	 *	freeNodes:          958
 51	 * (used = 486.52 MB)
 52	 *
 53	 * We also want Time Machine backup volumes to have a sufficiently
 54	 * large clump size to reduce fragmentation.
 55	 *
 56	 * The series of numbers for Catalog and Attribute form a geometric
 57	 * series. For Catalog (16GB to 512GB), each term is 8**(1/5) times
 58	 * the previous term.  For Attributes (16GB to 512GB), each term is
 59	 * 4**(1/5) times the previous term.  For 1TB to 16TB, each term is
 60	 * 2**(1/5) times the previous term.
 61	 */
 62	/*  16GB */	 64,		 32,		 5,
 63	/*  32GB */	 84,		 49,		 6,
 64	/*  64GB */	111,		 74,		 7,
 65	/* 128GB */	147,		111,		 8,
 66	/* 256GB */	194,		169,		 9,
 67	/* 512GB */	256,		256,		11,
 68	/*   1TB */	294,		294,		14,
 69	/*   2TB */	338,		338,		16,
 70	/*   4TB */	388,		388,		20,
 71	/*   8TB */	446,		446,		25,
 72	/*  16TB */	512,		512,		32
 73};
 74
 75u32 hfsplus_calc_btree_clump_size(u32 block_size, u32 node_size,
 76					u64 sectors, int file_id)
 77{
 78	u32 mod = max(node_size, block_size);
 79	u32 clump_size;
 80	int column;
 81	int i;
 82
 83	/* Figure out which column of the above table to use for this file. */
 84	switch (file_id) {
 85	case HFSPLUS_ATTR_CNID:
 86		column = 0;
 87		break;
 88	case HFSPLUS_CAT_CNID:
 89		column = 1;
 90		break;
 91	default:
 92		column = 2;
 93		break;
 94	}
 95
 96	/*
 97	 * The default clump size is 0.8% of the volume size. And
 98	 * it must also be a multiple of the node and block size.
 99	 */
100	if (sectors < 0x200000) {
101		clump_size = sectors << 2;	/*  0.8 %  */
102		if (clump_size < (8 * node_size))
103			clump_size = 8 * node_size;
104	} else {
105		/* turn exponent into table index... */
106		for (i = 0, sectors = sectors >> 22;
107		     sectors && (i < CLUMP_ENTRIES - 1);
108		     ++i, sectors = sectors >> 1) {
109			/* empty body */
110		}
111
112		clump_size = clumptbl[column + (i) * 3] * 1024 * 1024;
113	}
114
115	/*
116	 * Round the clump size to a multiple of node and block size.
117	 * NOTE: This rounds down.
118	 */
119	clump_size /= mod;
120	clump_size *= mod;
121
122	/*
123	 * Rounding down could have rounded down to 0 if the block size was
124	 * greater than the clump size.  If so, just use one block or node.
125	 */
126	if (clump_size == 0)
127		clump_size = mod;
128
129	return clump_size;
130}
131
132/* Get a reference to a B*Tree and do some initial checks */
133struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
134{
135	struct hfs_btree *tree;
136	struct hfs_btree_header_rec *head;
137	struct address_space *mapping;
138	struct inode *inode;
139	struct page *page;
140	unsigned int size;
141
142	tree = kzalloc(sizeof(*tree), GFP_KERNEL);
143	if (!tree)
144		return NULL;
145
146	mutex_init(&tree->tree_lock);
147	spin_lock_init(&tree->hash_lock);
148	tree->sb = sb;
149	tree->cnid = id;
150	inode = hfsplus_iget(sb, id);
151	if (IS_ERR(inode))
152		goto free_tree;
153	tree->inode = inode;
154
155	if (!HFSPLUS_I(tree->inode)->first_blocks) {
156		pr_err("invalid btree extent records (0 size)\n");
 
157		goto free_inode;
158	}
159
160	mapping = tree->inode->i_mapping;
161	page = read_mapping_page(mapping, 0, NULL);
162	if (IS_ERR(page))
163		goto free_inode;
164
165	/* Load the header */
166	head = (struct hfs_btree_header_rec *)(kmap_local_page(page) +
167		sizeof(struct hfs_bnode_desc));
168	tree->root = be32_to_cpu(head->root);
169	tree->leaf_count = be32_to_cpu(head->leaf_count);
170	tree->leaf_head = be32_to_cpu(head->leaf_head);
171	tree->leaf_tail = be32_to_cpu(head->leaf_tail);
172	tree->node_count = be32_to_cpu(head->node_count);
173	tree->free_nodes = be32_to_cpu(head->free_nodes);
174	tree->attributes = be32_to_cpu(head->attributes);
175	tree->node_size = be16_to_cpu(head->node_size);
176	tree->max_key_len = be16_to_cpu(head->max_key_len);
177	tree->depth = be16_to_cpu(head->depth);
178
179	/* Verify the tree and set the correct compare function */
180	switch (id) {
181	case HFSPLUS_EXT_CNID:
182		if (tree->max_key_len != HFSPLUS_EXT_KEYLEN - sizeof(u16)) {
183			pr_err("invalid extent max_key_len %d\n",
184				tree->max_key_len);
185			goto fail_page;
186		}
187		if (tree->attributes & HFS_TREE_VARIDXKEYS) {
188			pr_err("invalid extent btree flag\n");
189			goto fail_page;
190		}
191
192		tree->keycmp = hfsplus_ext_cmp_key;
193		break;
194	case HFSPLUS_CAT_CNID:
195		if (tree->max_key_len != HFSPLUS_CAT_KEYLEN - sizeof(u16)) {
196			pr_err("invalid catalog max_key_len %d\n",
197				tree->max_key_len);
198			goto fail_page;
199		}
200		if (!(tree->attributes & HFS_TREE_VARIDXKEYS)) {
201			pr_err("invalid catalog btree flag\n");
202			goto fail_page;
203		}
204
205		if (test_bit(HFSPLUS_SB_HFSX, &HFSPLUS_SB(sb)->flags) &&
206		    (head->key_type == HFSPLUS_KEY_BINARY))
207			tree->keycmp = hfsplus_cat_bin_cmp_key;
208		else {
209			tree->keycmp = hfsplus_cat_case_cmp_key;
210			set_bit(HFSPLUS_SB_CASEFOLD, &HFSPLUS_SB(sb)->flags);
211		}
212		break;
213	case HFSPLUS_ATTR_CNID:
214		if (tree->max_key_len != HFSPLUS_ATTR_KEYLEN - sizeof(u16)) {
215			pr_err("invalid attributes max_key_len %d\n",
216				tree->max_key_len);
217			goto fail_page;
218		}
219		tree->keycmp = hfsplus_attr_bin_cmp_key;
220		break;
221	default:
222		pr_err("unknown B*Tree requested\n");
223		goto fail_page;
224	}
225
226	if (!(tree->attributes & HFS_TREE_BIGKEYS)) {
227		pr_err("invalid btree flag\n");
228		goto fail_page;
229	}
230
231	size = tree->node_size;
232	if (!is_power_of_2(size))
233		goto fail_page;
234	if (!tree->node_count)
235		goto fail_page;
236
237	tree->node_size_shift = ffs(size) - 1;
238
239	tree->pages_per_bnode =
240		(tree->node_size + PAGE_SIZE - 1) >>
241		PAGE_SHIFT;
242
243	kunmap_local(head);
244	put_page(page);
245	return tree;
246
247 fail_page:
248	kunmap_local(head);
249	put_page(page);
250 free_inode:
251	tree->inode->i_mapping->a_ops = &hfsplus_aops;
252	iput(tree->inode);
253 free_tree:
254	kfree(tree);
255	return NULL;
256}
257
258/* Release resources used by a btree */
259void hfs_btree_close(struct hfs_btree *tree)
260{
261	struct hfs_bnode *node;
262	int i;
263
264	if (!tree)
265		return;
266
267	for (i = 0; i < NODE_HASH_SIZE; i++) {
268		while ((node = tree->node_hash[i])) {
269			tree->node_hash[i] = node->next_hash;
270			if (atomic_read(&node->refcnt))
271				pr_crit("node %d:%d "
272						"still has %d user(s)!\n",
273					node->tree->cnid, node->this,
274					atomic_read(&node->refcnt));
275			hfs_bnode_free(node);
276			tree->node_hash_cnt--;
277		}
278	}
279	iput(tree->inode);
280	kfree(tree);
281}
282
283int hfs_btree_write(struct hfs_btree *tree)
284{
285	struct hfs_btree_header_rec *head;
286	struct hfs_bnode *node;
287	struct page *page;
288
289	node = hfs_bnode_find(tree, 0);
290	if (IS_ERR(node))
291		/* panic? */
292		return -EIO;
293	/* Load the header */
294	page = node->page[0];
295	head = (struct hfs_btree_header_rec *)(kmap_local_page(page) +
296		sizeof(struct hfs_bnode_desc));
297
298	head->root = cpu_to_be32(tree->root);
299	head->leaf_count = cpu_to_be32(tree->leaf_count);
300	head->leaf_head = cpu_to_be32(tree->leaf_head);
301	head->leaf_tail = cpu_to_be32(tree->leaf_tail);
302	head->node_count = cpu_to_be32(tree->node_count);
303	head->free_nodes = cpu_to_be32(tree->free_nodes);
304	head->attributes = cpu_to_be32(tree->attributes);
305	head->depth = cpu_to_be16(tree->depth);
306
307	kunmap_local(head);
308	set_page_dirty(page);
309	hfs_bnode_put(node);
310	return 0;
311}
312
313static struct hfs_bnode *hfs_bmap_new_bmap(struct hfs_bnode *prev, u32 idx)
314{
315	struct hfs_btree *tree = prev->tree;
316	struct hfs_bnode *node;
317	struct hfs_bnode_desc desc;
318	__be32 cnid;
319
320	node = hfs_bnode_create(tree, idx);
321	if (IS_ERR(node))
322		return node;
323
324	tree->free_nodes--;
325	prev->next = idx;
326	cnid = cpu_to_be32(idx);
327	hfs_bnode_write(prev, &cnid, offsetof(struct hfs_bnode_desc, next), 4);
328
329	node->type = HFS_NODE_MAP;
330	node->num_recs = 1;
331	hfs_bnode_clear(node, 0, tree->node_size);
332	desc.next = 0;
333	desc.prev = 0;
334	desc.type = HFS_NODE_MAP;
335	desc.height = 0;
336	desc.num_recs = cpu_to_be16(1);
337	desc.reserved = 0;
338	hfs_bnode_write(node, &desc, 0, sizeof(desc));
339	hfs_bnode_write_u16(node, 14, 0x8000);
340	hfs_bnode_write_u16(node, tree->node_size - 2, 14);
341	hfs_bnode_write_u16(node, tree->node_size - 4, tree->node_size - 6);
342
343	return node;
344}
345
346/* Make sure @tree has enough space for the @rsvd_nodes */
347int hfs_bmap_reserve(struct hfs_btree *tree, int rsvd_nodes)
348{
349	struct inode *inode = tree->inode;
350	struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
351	u32 count;
352	int res;
 
 
 
 
353
354	if (rsvd_nodes <= 0)
355		return 0;
 
 
 
356
357	while (tree->free_nodes < rsvd_nodes) {
358		res = hfsplus_file_extend(inode, hfs_bnode_need_zeroout(tree));
359		if (res)
360			return res;
361		hip->phys_size = inode->i_size =
362			(loff_t)hip->alloc_blocks <<
363				HFSPLUS_SB(tree->sb)->alloc_blksz_shift;
364		hip->fs_blocks =
365			hip->alloc_blocks << HFSPLUS_SB(tree->sb)->fs_shift;
366		inode_set_bytes(inode, inode->i_size);
367		count = inode->i_size >> tree->node_size_shift;
368		tree->free_nodes += count - tree->node_count;
369		tree->node_count = count;
370	}
371	return 0;
372}
373
374struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
375{
376	struct hfs_bnode *node, *next_node;
377	struct page **pagep;
378	u32 nidx, idx;
379	unsigned off;
380	u16 off16;
381	u16 len;
382	u8 *data, byte, m;
383	int i, res;
384
385	res = hfs_bmap_reserve(tree, 1);
386	if (res)
387		return ERR_PTR(res);
388
389	nidx = 0;
390	node = hfs_bnode_find(tree, nidx);
391	if (IS_ERR(node))
392		return node;
393	len = hfs_brec_lenoff(node, 2, &off16);
394	off = off16;
395
396	off += node->page_offset;
397	pagep = node->page + (off >> PAGE_SHIFT);
398	data = kmap_local_page(*pagep);
399	off &= ~PAGE_MASK;
400	idx = 0;
401
402	for (;;) {
403		while (len) {
404			byte = data[off];
405			if (byte != 0xff) {
406				for (m = 0x80, i = 0; i < 8; m >>= 1, i++) {
407					if (!(byte & m)) {
408						idx += i;
409						data[off] |= m;
410						set_page_dirty(*pagep);
411						kunmap_local(data);
412						tree->free_nodes--;
413						mark_inode_dirty(tree->inode);
414						hfs_bnode_put(node);
415						return hfs_bnode_create(tree,
416							idx);
417					}
418				}
419			}
420			if (++off >= PAGE_SIZE) {
421				kunmap_local(data);
422				data = kmap_local_page(*++pagep);
423				off = 0;
424			}
425			idx += 8;
426			len--;
427		}
428		kunmap_local(data);
429		nidx = node->next;
430		if (!nidx) {
431			hfs_dbg(BNODE_MOD, "create new bmap node\n");
432			next_node = hfs_bmap_new_bmap(node, idx);
433		} else
434			next_node = hfs_bnode_find(tree, nidx);
435		hfs_bnode_put(node);
436		if (IS_ERR(next_node))
437			return next_node;
438		node = next_node;
439
440		len = hfs_brec_lenoff(node, 0, &off16);
441		off = off16;
442		off += node->page_offset;
443		pagep = node->page + (off >> PAGE_SHIFT);
444		data = kmap_local_page(*pagep);
445		off &= ~PAGE_MASK;
446	}
447}
448
449void hfs_bmap_free(struct hfs_bnode *node)
450{
451	struct hfs_btree *tree;
452	struct page *page;
453	u16 off, len;
454	u32 nidx;
455	u8 *data, byte, m;
456
457	hfs_dbg(BNODE_MOD, "btree_free_node: %u\n", node->this);
458	BUG_ON(!node->this);
459	tree = node->tree;
460	nidx = node->this;
461	node = hfs_bnode_find(tree, 0);
462	if (IS_ERR(node))
463		return;
464	len = hfs_brec_lenoff(node, 2, &off);
465	while (nidx >= len * 8) {
466		u32 i;
467
468		nidx -= len * 8;
469		i = node->next;
 
470		if (!i) {
471			/* panic */;
472			pr_crit("unable to free bnode %u. "
473					"bmap not found!\n",
474				node->this);
475			hfs_bnode_put(node);
476			return;
477		}
478		hfs_bnode_put(node);
479		node = hfs_bnode_find(tree, i);
480		if (IS_ERR(node))
481			return;
482		if (node->type != HFS_NODE_MAP) {
483			/* panic */;
484			pr_crit("invalid bmap found! "
485					"(%u,%d)\n",
486				node->this, node->type);
487			hfs_bnode_put(node);
488			return;
489		}
490		len = hfs_brec_lenoff(node, 0, &off);
491	}
492	off += node->page_offset + nidx / 8;
493	page = node->page[off >> PAGE_SHIFT];
494	data = kmap_local_page(page);
495	off &= ~PAGE_MASK;
496	m = 1 << (~nidx & 7);
497	byte = data[off];
498	if (!(byte & m)) {
499		pr_crit("trying to free free bnode "
500				"%u(%d)\n",
501			node->this, node->type);
502		kunmap_local(data);
503		hfs_bnode_put(node);
504		return;
505	}
506	data[off] = byte & ~m;
507	set_page_dirty(page);
508	kunmap_local(data);
509	hfs_bnode_put(node);
510	tree->free_nodes++;
511	mark_inode_dirty(tree->inode);
512}