Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Mar 24-27, 2025, special US time zones
Register
Loading...
v3.15
  1/*
  2 *  linux/fs/hfs/extent.c
  3 *
  4 * Copyright (C) 1995-1997  Paul H. Hargrove
  5 * (C) 2003 Ardis Technologies <roman@ardistech.com>
  6 * This file may be distributed under the terms of the GNU General Public License.
  7 *
  8 * This file contains the functions related to the extents B-tree.
  9 */
 10
 11#include <linux/pagemap.h>
 12
 13#include "hfs_fs.h"
 14#include "btree.h"
 15
 16/*================ File-local functions ================*/
 17
 18/*
 19 * build_key
 20 */
 21static void hfs_ext_build_key(hfs_btree_key *key, u32 cnid, u16 block, u8 type)
 22{
 23	key->key_len = 7;
 24	key->ext.FkType = type;
 25	key->ext.FNum = cpu_to_be32(cnid);
 26	key->ext.FABN = cpu_to_be16(block);
 27}
 28
 29/*
 30 * hfs_ext_compare()
 31 *
 32 * Description:
 33 *   This is the comparison function used for the extents B-tree.  In
 34 *   comparing extent B-tree entries, the file id is the most
 35 *   significant field (compared as unsigned ints); the fork type is
 36 *   the second most significant field (compared as unsigned chars);
 37 *   and the allocation block number field is the least significant
 38 *   (compared as unsigned ints).
 39 * Input Variable(s):
 40 *   struct hfs_ext_key *key1: pointer to the first key to compare
 41 *   struct hfs_ext_key *key2: pointer to the second key to compare
 42 * Output Variable(s):
 43 *   NONE
 44 * Returns:
 45 *   int: negative if key1<key2, positive if key1>key2, and 0 if key1==key2
 46 * Preconditions:
 47 *   key1 and key2 point to "valid" (struct hfs_ext_key)s.
 48 * Postconditions:
 49 *   This function has no side-effects */
 50int hfs_ext_keycmp(const btree_key *key1, const btree_key *key2)
 51{
 52	__be32 fnum1, fnum2;
 53	__be16 block1, block2;
 54
 55	fnum1 = key1->ext.FNum;
 56	fnum2 = key2->ext.FNum;
 57	if (fnum1 != fnum2)
 58		return be32_to_cpu(fnum1) < be32_to_cpu(fnum2) ? -1 : 1;
 59	if (key1->ext.FkType != key2->ext.FkType)
 60		return key1->ext.FkType < key2->ext.FkType ? -1 : 1;
 61
 62	block1 = key1->ext.FABN;
 63	block2 = key2->ext.FABN;
 64	if (block1 == block2)
 65		return 0;
 66	return be16_to_cpu(block1) < be16_to_cpu(block2) ? -1 : 1;
 67}
 68
 69/*
 70 * hfs_ext_find_block
 71 *
 72 * Find a block within an extent record
 73 */
 74static u16 hfs_ext_find_block(struct hfs_extent *ext, u16 off)
 75{
 76	int i;
 77	u16 count;
 78
 79	for (i = 0; i < 3; ext++, i++) {
 80		count = be16_to_cpu(ext->count);
 81		if (off < count)
 82			return be16_to_cpu(ext->block) + off;
 83		off -= count;
 84	}
 85	/* panic? */
 86	return 0;
 87}
 88
 89static int hfs_ext_block_count(struct hfs_extent *ext)
 90{
 91	int i;
 92	u16 count = 0;
 93
 94	for (i = 0; i < 3; ext++, i++)
 95		count += be16_to_cpu(ext->count);
 96	return count;
 97}
 98
 99static u16 hfs_ext_lastblock(struct hfs_extent *ext)
100{
101	int i;
102
103	ext += 2;
104	for (i = 0; i < 2; ext--, i++)
105		if (ext->count)
106			break;
107	return be16_to_cpu(ext->block) + be16_to_cpu(ext->count);
108}
109
110static int __hfs_ext_write_extent(struct inode *inode, struct hfs_find_data *fd)
111{
112	int res;
113
114	hfs_ext_build_key(fd->search_key, inode->i_ino, HFS_I(inode)->cached_start,
115			  HFS_IS_RSRC(inode) ?  HFS_FK_RSRC : HFS_FK_DATA);
116	res = hfs_brec_find(fd);
117	if (HFS_I(inode)->flags & HFS_FLG_EXT_NEW) {
118		if (res != -ENOENT)
119			return res;
 
 
 
 
120		hfs_brec_insert(fd, HFS_I(inode)->cached_extents, sizeof(hfs_extent_rec));
121		HFS_I(inode)->flags &= ~(HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW);
122	} else {
123		if (res)
124			return res;
125		hfs_bnode_write(fd->bnode, HFS_I(inode)->cached_extents, fd->entryoffset, fd->entrylength);
126		HFS_I(inode)->flags &= ~HFS_FLG_EXT_DIRTY;
127	}
128	return 0;
129}
130
131int hfs_ext_write_extent(struct inode *inode)
132{
133	struct hfs_find_data fd;
134	int res = 0;
135
136	if (HFS_I(inode)->flags & HFS_FLG_EXT_DIRTY) {
137		res = hfs_find_init(HFS_SB(inode->i_sb)->ext_tree, &fd);
138		if (res)
139			return res;
140		res = __hfs_ext_write_extent(inode, &fd);
141		hfs_find_exit(&fd);
142	}
143	return res;
144}
145
146static inline int __hfs_ext_read_extent(struct hfs_find_data *fd, struct hfs_extent *extent,
147					u32 cnid, u32 block, u8 type)
148{
149	int res;
150
151	hfs_ext_build_key(fd->search_key, cnid, block, type);
152	fd->key->ext.FNum = 0;
153	res = hfs_brec_find(fd);
154	if (res && res != -ENOENT)
155		return res;
156	if (fd->key->ext.FNum != fd->search_key->ext.FNum ||
157	    fd->key->ext.FkType != fd->search_key->ext.FkType)
158		return -ENOENT;
159	if (fd->entrylength != sizeof(hfs_extent_rec))
160		return -EIO;
161	hfs_bnode_read(fd->bnode, extent, fd->entryoffset, sizeof(hfs_extent_rec));
162	return 0;
163}
164
165static inline int __hfs_ext_cache_extent(struct hfs_find_data *fd, struct inode *inode, u32 block)
166{
167	int res;
168
169	if (HFS_I(inode)->flags & HFS_FLG_EXT_DIRTY) {
170		res = __hfs_ext_write_extent(inode, fd);
171		if (res)
172			return res;
173	}
174
175	res = __hfs_ext_read_extent(fd, HFS_I(inode)->cached_extents, inode->i_ino,
176				    block, HFS_IS_RSRC(inode) ? HFS_FK_RSRC : HFS_FK_DATA);
177	if (!res) {
178		HFS_I(inode)->cached_start = be16_to_cpu(fd->key->ext.FABN);
179		HFS_I(inode)->cached_blocks = hfs_ext_block_count(HFS_I(inode)->cached_extents);
180	} else {
181		HFS_I(inode)->cached_start = HFS_I(inode)->cached_blocks = 0;
182		HFS_I(inode)->flags &= ~(HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW);
183	}
184	return res;
185}
186
187static int hfs_ext_read_extent(struct inode *inode, u16 block)
188{
189	struct hfs_find_data fd;
190	int res;
191
192	if (block >= HFS_I(inode)->cached_start &&
193	    block < HFS_I(inode)->cached_start + HFS_I(inode)->cached_blocks)
194		return 0;
195
196	res = hfs_find_init(HFS_SB(inode->i_sb)->ext_tree, &fd);
197	if (!res) {
198		res = __hfs_ext_cache_extent(&fd, inode, block);
199		hfs_find_exit(&fd);
200	}
201	return res;
202}
203
204static void hfs_dump_extent(struct hfs_extent *extent)
205{
206	int i;
207
208	hfs_dbg(EXTENT, "   ");
209	for (i = 0; i < 3; i++)
210		hfs_dbg_cont(EXTENT, " %u:%u",
211			     be16_to_cpu(extent[i].block),
212			     be16_to_cpu(extent[i].count));
213	hfs_dbg_cont(EXTENT, "\n");
214}
215
216static int hfs_add_extent(struct hfs_extent *extent, u16 offset,
217			  u16 alloc_block, u16 block_count)
218{
219	u16 count, start;
220	int i;
221
222	hfs_dump_extent(extent);
223	for (i = 0; i < 3; extent++, i++) {
224		count = be16_to_cpu(extent->count);
225		if (offset == count) {
226			start = be16_to_cpu(extent->block);
227			if (alloc_block != start + count) {
228				if (++i >= 3)
229					return -ENOSPC;
230				extent++;
231				extent->block = cpu_to_be16(alloc_block);
232			} else
233				block_count += count;
234			extent->count = cpu_to_be16(block_count);
235			return 0;
236		} else if (offset < count)
237			break;
238		offset -= count;
239	}
240	/* panic? */
241	return -EIO;
242}
243
244static int hfs_free_extents(struct super_block *sb, struct hfs_extent *extent,
245			    u16 offset, u16 block_nr)
246{
247	u16 count, start;
248	int i;
249
250	hfs_dump_extent(extent);
251	for (i = 0; i < 3; extent++, i++) {
252		count = be16_to_cpu(extent->count);
253		if (offset == count)
254			goto found;
255		else if (offset < count)
256			break;
257		offset -= count;
258	}
259	/* panic? */
260	return -EIO;
261found:
262	for (;;) {
263		start = be16_to_cpu(extent->block);
264		if (count <= block_nr) {
265			hfs_clear_vbm_bits(sb, start, count);
266			extent->block = 0;
267			extent->count = 0;
268			block_nr -= count;
269		} else {
270			count -= block_nr;
271			hfs_clear_vbm_bits(sb, start + count, block_nr);
272			extent->count = cpu_to_be16(count);
273			block_nr = 0;
274		}
275		if (!block_nr || !i)
276			return 0;
277		i--;
278		extent--;
279		count = be16_to_cpu(extent->count);
280	}
281}
282
283int hfs_free_fork(struct super_block *sb, struct hfs_cat_file *file, int type)
284{
285	struct hfs_find_data fd;
286	u32 total_blocks, blocks, start;
287	u32 cnid = be32_to_cpu(file->FlNum);
288	struct hfs_extent *extent;
289	int res, i;
290
291	if (type == HFS_FK_DATA) {
292		total_blocks = be32_to_cpu(file->PyLen);
293		extent = file->ExtRec;
294	} else {
295		total_blocks = be32_to_cpu(file->RPyLen);
296		extent = file->RExtRec;
297	}
298	total_blocks /= HFS_SB(sb)->alloc_blksz;
299	if (!total_blocks)
300		return 0;
301
302	blocks = 0;
303	for (i = 0; i < 3; extent++, i++)
304		blocks += be16_to_cpu(extent[i].count);
305
306	res = hfs_free_extents(sb, extent, blocks, blocks);
307	if (res)
308		return res;
309	if (total_blocks == blocks)
310		return 0;
311
312	res = hfs_find_init(HFS_SB(sb)->ext_tree, &fd);
313	if (res)
314		return res;
315	do {
316		res = __hfs_ext_read_extent(&fd, extent, cnid, total_blocks, type);
317		if (res)
318			break;
319		start = be16_to_cpu(fd.key->ext.FABN);
320		hfs_free_extents(sb, extent, total_blocks - start, total_blocks);
321		hfs_brec_remove(&fd);
322		total_blocks = start;
323	} while (total_blocks > blocks);
324	hfs_find_exit(&fd);
325
326	return res;
327}
328
329/*
330 * hfs_get_block
331 */
332int hfs_get_block(struct inode *inode, sector_t block,
333		  struct buffer_head *bh_result, int create)
334{
335	struct super_block *sb;
336	u16 dblock, ablock;
337	int res;
338
339	sb = inode->i_sb;
340	/* Convert inode block to disk allocation block */
341	ablock = (u32)block / HFS_SB(sb)->fs_div;
342
343	if (block >= HFS_I(inode)->fs_blocks) {
344		if (block > HFS_I(inode)->fs_blocks || !create)
 
 
345			return -EIO;
346		if (ablock >= HFS_I(inode)->alloc_blocks) {
347			res = hfs_extend_file(inode);
348			if (res)
349				return res;
350		}
351	} else
352		create = 0;
353
354	if (ablock < HFS_I(inode)->first_blocks) {
355		dblock = hfs_ext_find_block(HFS_I(inode)->first_extents, ablock);
356		goto done;
357	}
358
359	mutex_lock(&HFS_I(inode)->extents_lock);
360	res = hfs_ext_read_extent(inode, ablock);
361	if (!res)
362		dblock = hfs_ext_find_block(HFS_I(inode)->cached_extents,
363					    ablock - HFS_I(inode)->cached_start);
364	else {
365		mutex_unlock(&HFS_I(inode)->extents_lock);
366		return -EIO;
367	}
368	mutex_unlock(&HFS_I(inode)->extents_lock);
369
370done:
371	map_bh(bh_result, sb, HFS_SB(sb)->fs_start +
372	       dblock * HFS_SB(sb)->fs_div +
373	       (u32)block % HFS_SB(sb)->fs_div);
374
375	if (create) {
376		set_buffer_new(bh_result);
377		HFS_I(inode)->phys_size += sb->s_blocksize;
378		HFS_I(inode)->fs_blocks++;
379		inode_add_bytes(inode, sb->s_blocksize);
380		mark_inode_dirty(inode);
381	}
382	return 0;
383}
384
385int hfs_extend_file(struct inode *inode)
386{
387	struct super_block *sb = inode->i_sb;
388	u32 start, len, goal;
389	int res;
390
391	mutex_lock(&HFS_I(inode)->extents_lock);
392	if (HFS_I(inode)->alloc_blocks == HFS_I(inode)->first_blocks)
393		goal = hfs_ext_lastblock(HFS_I(inode)->first_extents);
394	else {
395		res = hfs_ext_read_extent(inode, HFS_I(inode)->alloc_blocks);
396		if (res)
397			goto out;
398		goal = hfs_ext_lastblock(HFS_I(inode)->cached_extents);
399	}
400
401	len = HFS_I(inode)->clump_blocks;
402	start = hfs_vbm_search_free(sb, goal, &len);
403	if (!len) {
404		res = -ENOSPC;
405		goto out;
406	}
407
408	hfs_dbg(EXTENT, "extend %lu: %u,%u\n", inode->i_ino, start, len);
409	if (HFS_I(inode)->alloc_blocks == HFS_I(inode)->first_blocks) {
410		if (!HFS_I(inode)->first_blocks) {
411			hfs_dbg(EXTENT, "first extents\n");
412			/* no extents yet */
413			HFS_I(inode)->first_extents[0].block = cpu_to_be16(start);
414			HFS_I(inode)->first_extents[0].count = cpu_to_be16(len);
415			res = 0;
416		} else {
417			/* try to append to extents in inode */
418			res = hfs_add_extent(HFS_I(inode)->first_extents,
419					     HFS_I(inode)->alloc_blocks,
420					     start, len);
421			if (res == -ENOSPC)
422				goto insert_extent;
423		}
424		if (!res) {
425			hfs_dump_extent(HFS_I(inode)->first_extents);
426			HFS_I(inode)->first_blocks += len;
427		}
428	} else {
429		res = hfs_add_extent(HFS_I(inode)->cached_extents,
430				     HFS_I(inode)->alloc_blocks -
431				     HFS_I(inode)->cached_start,
432				     start, len);
433		if (!res) {
434			hfs_dump_extent(HFS_I(inode)->cached_extents);
435			HFS_I(inode)->flags |= HFS_FLG_EXT_DIRTY;
436			HFS_I(inode)->cached_blocks += len;
437		} else if (res == -ENOSPC)
438			goto insert_extent;
439	}
440out:
441	mutex_unlock(&HFS_I(inode)->extents_lock);
442	if (!res) {
443		HFS_I(inode)->alloc_blocks += len;
444		mark_inode_dirty(inode);
445		if (inode->i_ino < HFS_FIRSTUSER_CNID)
446			set_bit(HFS_FLG_ALT_MDB_DIRTY, &HFS_SB(sb)->flags);
447		set_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags);
448		hfs_mark_mdb_dirty(sb);
449	}
450	return res;
451
452insert_extent:
453	hfs_dbg(EXTENT, "insert new extent\n");
454	res = hfs_ext_write_extent(inode);
455	if (res)
456		goto out;
457
458	memset(HFS_I(inode)->cached_extents, 0, sizeof(hfs_extent_rec));
459	HFS_I(inode)->cached_extents[0].block = cpu_to_be16(start);
460	HFS_I(inode)->cached_extents[0].count = cpu_to_be16(len);
461	hfs_dump_extent(HFS_I(inode)->cached_extents);
462	HFS_I(inode)->flags |= HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW;
463	HFS_I(inode)->cached_start = HFS_I(inode)->alloc_blocks;
464	HFS_I(inode)->cached_blocks = len;
465
466	res = 0;
467	goto out;
468}
469
470void hfs_file_truncate(struct inode *inode)
471{
472	struct super_block *sb = inode->i_sb;
473	struct hfs_find_data fd;
474	u16 blk_cnt, alloc_cnt, start;
475	u32 size;
476	int res;
477
478	hfs_dbg(INODE, "truncate: %lu, %Lu -> %Lu\n",
479		inode->i_ino, (long long)HFS_I(inode)->phys_size,
480		inode->i_size);
481	if (inode->i_size > HFS_I(inode)->phys_size) {
482		struct address_space *mapping = inode->i_mapping;
483		void *fsdata;
484		struct page *page;
485
486		/* XXX: Can use generic_cont_expand? */
487		size = inode->i_size - 1;
488		res = pagecache_write_begin(NULL, mapping, size+1, 0,
489				AOP_FLAG_UNINTERRUPTIBLE, &page, &fsdata);
490		if (!res) {
491			res = pagecache_write_end(NULL, mapping, size+1, 0, 0,
492					page, fsdata);
493		}
494		if (res)
495			inode->i_size = HFS_I(inode)->phys_size;
496		return;
497	} else if (inode->i_size == HFS_I(inode)->phys_size)
498		return;
499	size = inode->i_size + HFS_SB(sb)->alloc_blksz - 1;
500	blk_cnt = size / HFS_SB(sb)->alloc_blksz;
501	alloc_cnt = HFS_I(inode)->alloc_blocks;
502	if (blk_cnt == alloc_cnt)
503		goto out;
504
505	mutex_lock(&HFS_I(inode)->extents_lock);
506	res = hfs_find_init(HFS_SB(sb)->ext_tree, &fd);
507	if (res) {
508		mutex_unlock(&HFS_I(inode)->extents_lock);
509		/* XXX: We lack error handling of hfs_file_truncate() */
510		return;
511	}
512	while (1) {
513		if (alloc_cnt == HFS_I(inode)->first_blocks) {
514			hfs_free_extents(sb, HFS_I(inode)->first_extents,
515					 alloc_cnt, alloc_cnt - blk_cnt);
516			hfs_dump_extent(HFS_I(inode)->first_extents);
517			HFS_I(inode)->first_blocks = blk_cnt;
518			break;
519		}
520		res = __hfs_ext_cache_extent(&fd, inode, alloc_cnt);
521		if (res)
522			break;
523		start = HFS_I(inode)->cached_start;
524		hfs_free_extents(sb, HFS_I(inode)->cached_extents,
525				 alloc_cnt - start, alloc_cnt - blk_cnt);
526		hfs_dump_extent(HFS_I(inode)->cached_extents);
527		if (blk_cnt > start) {
528			HFS_I(inode)->flags |= HFS_FLG_EXT_DIRTY;
529			break;
530		}
531		alloc_cnt = start;
532		HFS_I(inode)->cached_start = HFS_I(inode)->cached_blocks = 0;
533		HFS_I(inode)->flags &= ~(HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW);
534		hfs_brec_remove(&fd);
535	}
536	hfs_find_exit(&fd);
537	mutex_unlock(&HFS_I(inode)->extents_lock);
538
539	HFS_I(inode)->alloc_blocks = blk_cnt;
540out:
541	HFS_I(inode)->phys_size = inode->i_size;
542	HFS_I(inode)->fs_blocks = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
543	inode_set_bytes(inode, HFS_I(inode)->fs_blocks << sb->s_blocksize_bits);
544	mark_inode_dirty(inode);
545}
v5.9
  1/*
  2 *  linux/fs/hfs/extent.c
  3 *
  4 * Copyright (C) 1995-1997  Paul H. Hargrove
  5 * (C) 2003 Ardis Technologies <roman@ardistech.com>
  6 * This file may be distributed under the terms of the GNU General Public License.
  7 *
  8 * This file contains the functions related to the extents B-tree.
  9 */
 10
 11#include <linux/pagemap.h>
 12
 13#include "hfs_fs.h"
 14#include "btree.h"
 15
 16/*================ File-local functions ================*/
 17
 18/*
 19 * build_key
 20 */
 21static void hfs_ext_build_key(hfs_btree_key *key, u32 cnid, u16 block, u8 type)
 22{
 23	key->key_len = 7;
 24	key->ext.FkType = type;
 25	key->ext.FNum = cpu_to_be32(cnid);
 26	key->ext.FABN = cpu_to_be16(block);
 27}
 28
 29/*
 30 * hfs_ext_compare()
 31 *
 32 * Description:
 33 *   This is the comparison function used for the extents B-tree.  In
 34 *   comparing extent B-tree entries, the file id is the most
 35 *   significant field (compared as unsigned ints); the fork type is
 36 *   the second most significant field (compared as unsigned chars);
 37 *   and the allocation block number field is the least significant
 38 *   (compared as unsigned ints).
 39 * Input Variable(s):
 40 *   struct hfs_ext_key *key1: pointer to the first key to compare
 41 *   struct hfs_ext_key *key2: pointer to the second key to compare
 42 * Output Variable(s):
 43 *   NONE
 44 * Returns:
 45 *   int: negative if key1<key2, positive if key1>key2, and 0 if key1==key2
 46 * Preconditions:
 47 *   key1 and key2 point to "valid" (struct hfs_ext_key)s.
 48 * Postconditions:
 49 *   This function has no side-effects */
 50int hfs_ext_keycmp(const btree_key *key1, const btree_key *key2)
 51{
 52	__be32 fnum1, fnum2;
 53	__be16 block1, block2;
 54
 55	fnum1 = key1->ext.FNum;
 56	fnum2 = key2->ext.FNum;
 57	if (fnum1 != fnum2)
 58		return be32_to_cpu(fnum1) < be32_to_cpu(fnum2) ? -1 : 1;
 59	if (key1->ext.FkType != key2->ext.FkType)
 60		return key1->ext.FkType < key2->ext.FkType ? -1 : 1;
 61
 62	block1 = key1->ext.FABN;
 63	block2 = key2->ext.FABN;
 64	if (block1 == block2)
 65		return 0;
 66	return be16_to_cpu(block1) < be16_to_cpu(block2) ? -1 : 1;
 67}
 68
 69/*
 70 * hfs_ext_find_block
 71 *
 72 * Find a block within an extent record
 73 */
 74static u16 hfs_ext_find_block(struct hfs_extent *ext, u16 off)
 75{
 76	int i;
 77	u16 count;
 78
 79	for (i = 0; i < 3; ext++, i++) {
 80		count = be16_to_cpu(ext->count);
 81		if (off < count)
 82			return be16_to_cpu(ext->block) + off;
 83		off -= count;
 84	}
 85	/* panic? */
 86	return 0;
 87}
 88
 89static int hfs_ext_block_count(struct hfs_extent *ext)
 90{
 91	int i;
 92	u16 count = 0;
 93
 94	for (i = 0; i < 3; ext++, i++)
 95		count += be16_to_cpu(ext->count);
 96	return count;
 97}
 98
 99static u16 hfs_ext_lastblock(struct hfs_extent *ext)
100{
101	int i;
102
103	ext += 2;
104	for (i = 0; i < 2; ext--, i++)
105		if (ext->count)
106			break;
107	return be16_to_cpu(ext->block) + be16_to_cpu(ext->count);
108}
109
110static int __hfs_ext_write_extent(struct inode *inode, struct hfs_find_data *fd)
111{
112	int res;
113
114	hfs_ext_build_key(fd->search_key, inode->i_ino, HFS_I(inode)->cached_start,
115			  HFS_IS_RSRC(inode) ?  HFS_FK_RSRC : HFS_FK_DATA);
116	res = hfs_brec_find(fd);
117	if (HFS_I(inode)->flags & HFS_FLG_EXT_NEW) {
118		if (res != -ENOENT)
119			return res;
120		/* Fail early and avoid ENOSPC during the btree operation */
121		res = hfs_bmap_reserve(fd->tree, fd->tree->depth + 1);
122		if (res)
123			return res;
124		hfs_brec_insert(fd, HFS_I(inode)->cached_extents, sizeof(hfs_extent_rec));
125		HFS_I(inode)->flags &= ~(HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW);
126	} else {
127		if (res)
128			return res;
129		hfs_bnode_write(fd->bnode, HFS_I(inode)->cached_extents, fd->entryoffset, fd->entrylength);
130		HFS_I(inode)->flags &= ~HFS_FLG_EXT_DIRTY;
131	}
132	return 0;
133}
134
135int hfs_ext_write_extent(struct inode *inode)
136{
137	struct hfs_find_data fd;
138	int res = 0;
139
140	if (HFS_I(inode)->flags & HFS_FLG_EXT_DIRTY) {
141		res = hfs_find_init(HFS_SB(inode->i_sb)->ext_tree, &fd);
142		if (res)
143			return res;
144		res = __hfs_ext_write_extent(inode, &fd);
145		hfs_find_exit(&fd);
146	}
147	return res;
148}
149
150static inline int __hfs_ext_read_extent(struct hfs_find_data *fd, struct hfs_extent *extent,
151					u32 cnid, u32 block, u8 type)
152{
153	int res;
154
155	hfs_ext_build_key(fd->search_key, cnid, block, type);
156	fd->key->ext.FNum = 0;
157	res = hfs_brec_find(fd);
158	if (res && res != -ENOENT)
159		return res;
160	if (fd->key->ext.FNum != fd->search_key->ext.FNum ||
161	    fd->key->ext.FkType != fd->search_key->ext.FkType)
162		return -ENOENT;
163	if (fd->entrylength != sizeof(hfs_extent_rec))
164		return -EIO;
165	hfs_bnode_read(fd->bnode, extent, fd->entryoffset, sizeof(hfs_extent_rec));
166	return 0;
167}
168
169static inline int __hfs_ext_cache_extent(struct hfs_find_data *fd, struct inode *inode, u32 block)
170{
171	int res;
172
173	if (HFS_I(inode)->flags & HFS_FLG_EXT_DIRTY) {
174		res = __hfs_ext_write_extent(inode, fd);
175		if (res)
176			return res;
177	}
178
179	res = __hfs_ext_read_extent(fd, HFS_I(inode)->cached_extents, inode->i_ino,
180				    block, HFS_IS_RSRC(inode) ? HFS_FK_RSRC : HFS_FK_DATA);
181	if (!res) {
182		HFS_I(inode)->cached_start = be16_to_cpu(fd->key->ext.FABN);
183		HFS_I(inode)->cached_blocks = hfs_ext_block_count(HFS_I(inode)->cached_extents);
184	} else {
185		HFS_I(inode)->cached_start = HFS_I(inode)->cached_blocks = 0;
186		HFS_I(inode)->flags &= ~(HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW);
187	}
188	return res;
189}
190
191static int hfs_ext_read_extent(struct inode *inode, u16 block)
192{
193	struct hfs_find_data fd;
194	int res;
195
196	if (block >= HFS_I(inode)->cached_start &&
197	    block < HFS_I(inode)->cached_start + HFS_I(inode)->cached_blocks)
198		return 0;
199
200	res = hfs_find_init(HFS_SB(inode->i_sb)->ext_tree, &fd);
201	if (!res) {
202		res = __hfs_ext_cache_extent(&fd, inode, block);
203		hfs_find_exit(&fd);
204	}
205	return res;
206}
207
208static void hfs_dump_extent(struct hfs_extent *extent)
209{
210	int i;
211
212	hfs_dbg(EXTENT, "   ");
213	for (i = 0; i < 3; i++)
214		hfs_dbg_cont(EXTENT, " %u:%u",
215			     be16_to_cpu(extent[i].block),
216			     be16_to_cpu(extent[i].count));
217	hfs_dbg_cont(EXTENT, "\n");
218}
219
220static int hfs_add_extent(struct hfs_extent *extent, u16 offset,
221			  u16 alloc_block, u16 block_count)
222{
223	u16 count, start;
224	int i;
225
226	hfs_dump_extent(extent);
227	for (i = 0; i < 3; extent++, i++) {
228		count = be16_to_cpu(extent->count);
229		if (offset == count) {
230			start = be16_to_cpu(extent->block);
231			if (alloc_block != start + count) {
232				if (++i >= 3)
233					return -ENOSPC;
234				extent++;
235				extent->block = cpu_to_be16(alloc_block);
236			} else
237				block_count += count;
238			extent->count = cpu_to_be16(block_count);
239			return 0;
240		} else if (offset < count)
241			break;
242		offset -= count;
243	}
244	/* panic? */
245	return -EIO;
246}
247
248static int hfs_free_extents(struct super_block *sb, struct hfs_extent *extent,
249			    u16 offset, u16 block_nr)
250{
251	u16 count, start;
252	int i;
253
254	hfs_dump_extent(extent);
255	for (i = 0; i < 3; extent++, i++) {
256		count = be16_to_cpu(extent->count);
257		if (offset == count)
258			goto found;
259		else if (offset < count)
260			break;
261		offset -= count;
262	}
263	/* panic? */
264	return -EIO;
265found:
266	for (;;) {
267		start = be16_to_cpu(extent->block);
268		if (count <= block_nr) {
269			hfs_clear_vbm_bits(sb, start, count);
270			extent->block = 0;
271			extent->count = 0;
272			block_nr -= count;
273		} else {
274			count -= block_nr;
275			hfs_clear_vbm_bits(sb, start + count, block_nr);
276			extent->count = cpu_to_be16(count);
277			block_nr = 0;
278		}
279		if (!block_nr || !i)
280			return 0;
281		i--;
282		extent--;
283		count = be16_to_cpu(extent->count);
284	}
285}
286
287int hfs_free_fork(struct super_block *sb, struct hfs_cat_file *file, int type)
288{
289	struct hfs_find_data fd;
290	u32 total_blocks, blocks, start;
291	u32 cnid = be32_to_cpu(file->FlNum);
292	struct hfs_extent *extent;
293	int res, i;
294
295	if (type == HFS_FK_DATA) {
296		total_blocks = be32_to_cpu(file->PyLen);
297		extent = file->ExtRec;
298	} else {
299		total_blocks = be32_to_cpu(file->RPyLen);
300		extent = file->RExtRec;
301	}
302	total_blocks /= HFS_SB(sb)->alloc_blksz;
303	if (!total_blocks)
304		return 0;
305
306	blocks = 0;
307	for (i = 0; i < 3; i++)
308		blocks += be16_to_cpu(extent[i].count);
309
310	res = hfs_free_extents(sb, extent, blocks, blocks);
311	if (res)
312		return res;
313	if (total_blocks == blocks)
314		return 0;
315
316	res = hfs_find_init(HFS_SB(sb)->ext_tree, &fd);
317	if (res)
318		return res;
319	do {
320		res = __hfs_ext_read_extent(&fd, extent, cnid, total_blocks, type);
321		if (res)
322			break;
323		start = be16_to_cpu(fd.key->ext.FABN);
324		hfs_free_extents(sb, extent, total_blocks - start, total_blocks);
325		hfs_brec_remove(&fd);
326		total_blocks = start;
327	} while (total_blocks > blocks);
328	hfs_find_exit(&fd);
329
330	return res;
331}
332
333/*
334 * hfs_get_block
335 */
336int hfs_get_block(struct inode *inode, sector_t block,
337		  struct buffer_head *bh_result, int create)
338{
339	struct super_block *sb;
340	u16 dblock, ablock;
341	int res;
342
343	sb = inode->i_sb;
344	/* Convert inode block to disk allocation block */
345	ablock = (u32)block / HFS_SB(sb)->fs_div;
346
347	if (block >= HFS_I(inode)->fs_blocks) {
348		if (!create)
349			return 0;
350		if (block > HFS_I(inode)->fs_blocks)
351			return -EIO;
352		if (ablock >= HFS_I(inode)->alloc_blocks) {
353			res = hfs_extend_file(inode);
354			if (res)
355				return res;
356		}
357	} else
358		create = 0;
359
360	if (ablock < HFS_I(inode)->first_blocks) {
361		dblock = hfs_ext_find_block(HFS_I(inode)->first_extents, ablock);
362		goto done;
363	}
364
365	mutex_lock(&HFS_I(inode)->extents_lock);
366	res = hfs_ext_read_extent(inode, ablock);
367	if (!res)
368		dblock = hfs_ext_find_block(HFS_I(inode)->cached_extents,
369					    ablock - HFS_I(inode)->cached_start);
370	else {
371		mutex_unlock(&HFS_I(inode)->extents_lock);
372		return -EIO;
373	}
374	mutex_unlock(&HFS_I(inode)->extents_lock);
375
376done:
377	map_bh(bh_result, sb, HFS_SB(sb)->fs_start +
378	       dblock * HFS_SB(sb)->fs_div +
379	       (u32)block % HFS_SB(sb)->fs_div);
380
381	if (create) {
382		set_buffer_new(bh_result);
383		HFS_I(inode)->phys_size += sb->s_blocksize;
384		HFS_I(inode)->fs_blocks++;
385		inode_add_bytes(inode, sb->s_blocksize);
386		mark_inode_dirty(inode);
387	}
388	return 0;
389}
390
391int hfs_extend_file(struct inode *inode)
392{
393	struct super_block *sb = inode->i_sb;
394	u32 start, len, goal;
395	int res;
396
397	mutex_lock(&HFS_I(inode)->extents_lock);
398	if (HFS_I(inode)->alloc_blocks == HFS_I(inode)->first_blocks)
399		goal = hfs_ext_lastblock(HFS_I(inode)->first_extents);
400	else {
401		res = hfs_ext_read_extent(inode, HFS_I(inode)->alloc_blocks);
402		if (res)
403			goto out;
404		goal = hfs_ext_lastblock(HFS_I(inode)->cached_extents);
405	}
406
407	len = HFS_I(inode)->clump_blocks;
408	start = hfs_vbm_search_free(sb, goal, &len);
409	if (!len) {
410		res = -ENOSPC;
411		goto out;
412	}
413
414	hfs_dbg(EXTENT, "extend %lu: %u,%u\n", inode->i_ino, start, len);
415	if (HFS_I(inode)->alloc_blocks == HFS_I(inode)->first_blocks) {
416		if (!HFS_I(inode)->first_blocks) {
417			hfs_dbg(EXTENT, "first extents\n");
418			/* no extents yet */
419			HFS_I(inode)->first_extents[0].block = cpu_to_be16(start);
420			HFS_I(inode)->first_extents[0].count = cpu_to_be16(len);
421			res = 0;
422		} else {
423			/* try to append to extents in inode */
424			res = hfs_add_extent(HFS_I(inode)->first_extents,
425					     HFS_I(inode)->alloc_blocks,
426					     start, len);
427			if (res == -ENOSPC)
428				goto insert_extent;
429		}
430		if (!res) {
431			hfs_dump_extent(HFS_I(inode)->first_extents);
432			HFS_I(inode)->first_blocks += len;
433		}
434	} else {
435		res = hfs_add_extent(HFS_I(inode)->cached_extents,
436				     HFS_I(inode)->alloc_blocks -
437				     HFS_I(inode)->cached_start,
438				     start, len);
439		if (!res) {
440			hfs_dump_extent(HFS_I(inode)->cached_extents);
441			HFS_I(inode)->flags |= HFS_FLG_EXT_DIRTY;
442			HFS_I(inode)->cached_blocks += len;
443		} else if (res == -ENOSPC)
444			goto insert_extent;
445	}
446out:
447	mutex_unlock(&HFS_I(inode)->extents_lock);
448	if (!res) {
449		HFS_I(inode)->alloc_blocks += len;
450		mark_inode_dirty(inode);
451		if (inode->i_ino < HFS_FIRSTUSER_CNID)
452			set_bit(HFS_FLG_ALT_MDB_DIRTY, &HFS_SB(sb)->flags);
453		set_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags);
454		hfs_mark_mdb_dirty(sb);
455	}
456	return res;
457
458insert_extent:
459	hfs_dbg(EXTENT, "insert new extent\n");
460	res = hfs_ext_write_extent(inode);
461	if (res)
462		goto out;
463
464	memset(HFS_I(inode)->cached_extents, 0, sizeof(hfs_extent_rec));
465	HFS_I(inode)->cached_extents[0].block = cpu_to_be16(start);
466	HFS_I(inode)->cached_extents[0].count = cpu_to_be16(len);
467	hfs_dump_extent(HFS_I(inode)->cached_extents);
468	HFS_I(inode)->flags |= HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW;
469	HFS_I(inode)->cached_start = HFS_I(inode)->alloc_blocks;
470	HFS_I(inode)->cached_blocks = len;
471
472	res = 0;
473	goto out;
474}
475
476void hfs_file_truncate(struct inode *inode)
477{
478	struct super_block *sb = inode->i_sb;
479	struct hfs_find_data fd;
480	u16 blk_cnt, alloc_cnt, start;
481	u32 size;
482	int res;
483
484	hfs_dbg(INODE, "truncate: %lu, %Lu -> %Lu\n",
485		inode->i_ino, (long long)HFS_I(inode)->phys_size,
486		inode->i_size);
487	if (inode->i_size > HFS_I(inode)->phys_size) {
488		struct address_space *mapping = inode->i_mapping;
489		void *fsdata;
490		struct page *page;
491
492		/* XXX: Can use generic_cont_expand? */
493		size = inode->i_size - 1;
494		res = pagecache_write_begin(NULL, mapping, size+1, 0, 0,
495					    &page, &fsdata);
496		if (!res) {
497			res = pagecache_write_end(NULL, mapping, size+1, 0, 0,
498					page, fsdata);
499		}
500		if (res)
501			inode->i_size = HFS_I(inode)->phys_size;
502		return;
503	} else if (inode->i_size == HFS_I(inode)->phys_size)
504		return;
505	size = inode->i_size + HFS_SB(sb)->alloc_blksz - 1;
506	blk_cnt = size / HFS_SB(sb)->alloc_blksz;
507	alloc_cnt = HFS_I(inode)->alloc_blocks;
508	if (blk_cnt == alloc_cnt)
509		goto out;
510
511	mutex_lock(&HFS_I(inode)->extents_lock);
512	res = hfs_find_init(HFS_SB(sb)->ext_tree, &fd);
513	if (res) {
514		mutex_unlock(&HFS_I(inode)->extents_lock);
515		/* XXX: We lack error handling of hfs_file_truncate() */
516		return;
517	}
518	while (1) {
519		if (alloc_cnt == HFS_I(inode)->first_blocks) {
520			hfs_free_extents(sb, HFS_I(inode)->first_extents,
521					 alloc_cnt, alloc_cnt - blk_cnt);
522			hfs_dump_extent(HFS_I(inode)->first_extents);
523			HFS_I(inode)->first_blocks = blk_cnt;
524			break;
525		}
526		res = __hfs_ext_cache_extent(&fd, inode, alloc_cnt);
527		if (res)
528			break;
529		start = HFS_I(inode)->cached_start;
530		hfs_free_extents(sb, HFS_I(inode)->cached_extents,
531				 alloc_cnt - start, alloc_cnt - blk_cnt);
532		hfs_dump_extent(HFS_I(inode)->cached_extents);
533		if (blk_cnt > start) {
534			HFS_I(inode)->flags |= HFS_FLG_EXT_DIRTY;
535			break;
536		}
537		alloc_cnt = start;
538		HFS_I(inode)->cached_start = HFS_I(inode)->cached_blocks = 0;
539		HFS_I(inode)->flags &= ~(HFS_FLG_EXT_DIRTY|HFS_FLG_EXT_NEW);
540		hfs_brec_remove(&fd);
541	}
542	hfs_find_exit(&fd);
543	mutex_unlock(&HFS_I(inode)->extents_lock);
544
545	HFS_I(inode)->alloc_blocks = blk_cnt;
546out:
547	HFS_I(inode)->phys_size = inode->i_size;
548	HFS_I(inode)->fs_blocks = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
549	inode_set_bytes(inode, HFS_I(inode)->fs_blocks << sb->s_blocksize_bits);
550	mark_inode_dirty(inode);
551}