Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  linux/fs/ufs/balloc.c
  4 *
  5 * Copyright (C) 1998
  6 * Daniel Pirkl <daniel.pirkl@email.cz>
  7 * Charles University, Faculty of Mathematics and Physics
  8 *
  9 * UFS2 write support Evgeniy Dushistov <dushistov@mail.ru>, 2007
 10 */
 11
 12#include <linux/fs.h>
 13#include <linux/stat.h>
 14#include <linux/time.h>
 15#include <linux/string.h>
 16#include <linux/buffer_head.h>
 17#include <linux/capability.h>
 18#include <linux/bitops.h>
 19#include <linux/bio.h>
 20#include <asm/byteorder.h>
 21
 22#include "ufs_fs.h"
 23#include "ufs.h"
 24#include "swab.h"
 25#include "util.h"
 26
 27#define INVBLOCK ((u64)-1L)
 28
 29static u64 ufs_add_fragments(struct inode *, u64, unsigned, unsigned);
 30static u64 ufs_alloc_fragments(struct inode *, unsigned, u64, unsigned, int *);
 31static u64 ufs_alloccg_block(struct inode *, struct ufs_cg_private_info *, u64, int *);
 32static u64 ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, u64, unsigned);
 33static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
 34static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
 35
 36static void adjust_free_blocks(struct super_block *sb,
 37			       struct ufs_cylinder_group *ucg,
 38			       struct ufs_cg_private_info *ucpi,
 39			       unsigned fragment, int delta)
 40{
 41	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
 42
 43	if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
 44		ufs_clusteracct(sb, ucpi, fragment, delta);
 45
 46	fs32_add(sb, &ucg->cg_cs.cs_nbfree, delta);
 47	uspi->cs_total.cs_nbfree += delta;
 48	fs32_add(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, delta);
 49
 50	if (uspi->fs_magic != UFS2_MAGIC) {
 51		unsigned cylno = ufs_cbtocylno(fragment);
 52
 53		fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
 54					  ufs_cbtorpos(fragment)), delta);
 55		fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), delta);
 56	}
 57}
 58
 59/*
 60 * Free 'count' fragments from fragment number 'fragment'
 61 */
 62void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
 63{
 64	struct super_block * sb;
 65	struct ufs_sb_private_info * uspi;
 
 66	struct ufs_cg_private_info * ucpi;
 67	struct ufs_cylinder_group * ucg;
 68	unsigned cgno, bit, end_bit, bbase, blkmap, i;
 
 69	
 70	sb = inode->i_sb;
 71	uspi = UFS_SB(sb)->s_uspi;
 
 72	
 73	UFSD("ENTER, fragment %llu, count %u\n",
 74	     (unsigned long long)fragment, count);
 75	
 76	if (ufs_fragnum(fragment) + count > uspi->s_fpb)
 77		ufs_error (sb, "ufs_free_fragments", "internal error");
 78
 79	mutex_lock(&UFS_SB(sb)->s_lock);
 80	
 81	cgno = ufs_dtog(uspi, fragment);
 82	bit = ufs_dtogd(uspi, fragment);
 83	if (cgno >= uspi->s_ncg) {
 84		ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
 85		goto failed;
 86	}
 87		
 88	ucpi = ufs_load_cylinder (sb, cgno);
 89	if (!ucpi) 
 90		goto failed;
 91	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
 92	if (!ufs_cg_chkmagic(sb, ucg)) {
 93		ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
 94		goto failed;
 95	}
 96
 97	end_bit = bit + count;
 98	bbase = ufs_blknum (bit);
 99	blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
100	ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
101	for (i = bit; i < end_bit; i++) {
102		if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i))
103			ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i);
104		else 
105			ufs_error (sb, "ufs_free_fragments",
106				   "bit already cleared for fragment %u", i);
107	}
108
109	inode_sub_bytes(inode, count << uspi->s_fshift);
110	fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
111	uspi->cs_total.cs_nffree += count;
112	fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
113	blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
114	ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
115
116	/*
117	 * Trying to reassemble free fragments into block
118	 */
119	if (ubh_isblockset(uspi, ucpi, bbase)) {
 
120		fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
121		uspi->cs_total.cs_nffree -= uspi->s_fpb;
122		fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
123		adjust_free_blocks(sb, ucg, ucpi, bbase, 1);
 
 
 
 
 
 
 
 
 
 
 
124	}
125	
126	ubh_mark_buffer_dirty (USPI_UBH(uspi));
127	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
128	if (sb->s_flags & SB_SYNCHRONOUS)
129		ubh_sync_block(UCPI_UBH(ucpi));
130	ufs_mark_sb_dirty(sb);
131
132	mutex_unlock(&UFS_SB(sb)->s_lock);
133	UFSD("EXIT\n");
134	return;
135
136failed:
137	mutex_unlock(&UFS_SB(sb)->s_lock);
138	UFSD("EXIT (FAILED)\n");
139	return;
140}
141
142/*
143 * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
144 */
145void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count)
146{
147	struct super_block * sb;
148	struct ufs_sb_private_info * uspi;
 
149	struct ufs_cg_private_info * ucpi;
150	struct ufs_cylinder_group * ucg;
151	unsigned overflow, cgno, bit, end_bit, i;
 
152	
153	sb = inode->i_sb;
154	uspi = UFS_SB(sb)->s_uspi;
 
155
156	UFSD("ENTER, fragment %llu, count %u\n",
157	     (unsigned long long)fragment, count);
158	
159	if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
160		ufs_error (sb, "ufs_free_blocks", "internal error, "
161			   "fragment %llu, count %u\n",
162			   (unsigned long long)fragment, count);
163		goto failed;
164	}
165
166	mutex_lock(&UFS_SB(sb)->s_lock);
167	
168do_more:
169	overflow = 0;
170	cgno = ufs_dtog(uspi, fragment);
171	bit = ufs_dtogd(uspi, fragment);
172	if (cgno >= uspi->s_ncg) {
173		ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
174		goto failed_unlock;
175	}
176	end_bit = bit + count;
177	if (end_bit > uspi->s_fpg) {
178		overflow = bit + count - uspi->s_fpg;
179		count -= overflow;
180		end_bit -= overflow;
181	}
182
183	ucpi = ufs_load_cylinder (sb, cgno);
184	if (!ucpi) 
185		goto failed_unlock;
186	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
187	if (!ufs_cg_chkmagic(sb, ucg)) {
188		ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
189		goto failed_unlock;
190	}
191
192	for (i = bit; i < end_bit; i += uspi->s_fpb) {
193		if (ubh_isblockset(uspi, ucpi, i)) {
 
194			ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
195		}
196		ubh_setblock(uspi, ucpi, i);
197		inode_sub_bytes(inode, uspi->s_fpb << uspi->s_fshift);
198		adjust_free_blocks(sb, ucg, ucpi, i, 1);
 
 
 
 
 
 
 
 
 
 
 
 
199	}
200
201	ubh_mark_buffer_dirty (USPI_UBH(uspi));
202	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
203	if (sb->s_flags & SB_SYNCHRONOUS)
204		ubh_sync_block(UCPI_UBH(ucpi));
205
206	if (overflow) {
207		fragment += count;
208		count = overflow;
209		goto do_more;
210	}
211
212	ufs_mark_sb_dirty(sb);
213	mutex_unlock(&UFS_SB(sb)->s_lock);
214	UFSD("EXIT\n");
215	return;
216
217failed_unlock:
218	mutex_unlock(&UFS_SB(sb)->s_lock);
219failed:
220	UFSD("EXIT (FAILED)\n");
221	return;
222}
223
224/*
225 * Modify inode page cache in such way:
226 * have - blocks with b_blocknr equal to oldb...oldb+count-1
227 * get - blocks with b_blocknr equal to newb...newb+count-1
228 * also we suppose that oldb...oldb+count-1 blocks
229 * situated at the end of file.
230 *
231 * We can come here from ufs_writepage or ufs_prepare_write,
232 * locked_folio is argument of these functions, so we already lock it.
233 */
234static void ufs_change_blocknr(struct inode *inode, sector_t beg,
235			       unsigned int count, sector_t oldb,
236			       sector_t newb, struct folio *locked_folio)
237{
238	struct folio *folio;
239	const unsigned blks_per_page =
240		1 << (PAGE_SHIFT - inode->i_blkbits);
241	const unsigned mask = blks_per_page - 1;
242	struct address_space * const mapping = inode->i_mapping;
243	pgoff_t index, cur_index, last_index;
244	unsigned pos, j, lblock;
245	sector_t end, i;
 
246	struct buffer_head *head, *bh;
247
248	UFSD("ENTER, ino %lu, count %u, oldb %llu, newb %llu\n",
249	      inode->i_ino, count,
250	     (unsigned long long)oldb, (unsigned long long)newb);
251
252	BUG_ON(!folio_test_locked(locked_folio));
 
253
254	cur_index = locked_folio->index;
255	end = count + beg;
256	last_index = end >> (PAGE_SHIFT - inode->i_blkbits);
257	for (i = beg; i < end; i = (i | mask) + 1) {
258		index = i >> (PAGE_SHIFT - inode->i_blkbits);
259
260		if (likely(cur_index != index)) {
261			folio = ufs_get_locked_folio(mapping, index);
262			if (!folio) /* it was truncated */
263				continue;
264			if (IS_ERR(folio)) {/* or EIO */
265				ufs_error(inode->i_sb, __func__,
266					  "read of page %llu failed\n",
267					  (unsigned long long)index);
268				continue;
269			}
270		} else
271			folio = locked_folio;
272
273		head = folio_buffers(folio);
274		bh = head;
275		pos = i & mask;
276		for (j = 0; j < pos; ++j)
277			bh = bh->b_this_page;
278
 
279		if (unlikely(index == last_index))
280			lblock = end & mask;
281		else
282			lblock = blks_per_page;
283
284		do {
285			if (j >= lblock)
286				break;
287			pos = (i - beg) + j;
288
289			if (!buffer_mapped(bh))
290					map_bh(bh, inode->i_sb, oldb + pos);
291			if (bh_read(bh, 0) < 0) {
292				ufs_error(inode->i_sb, __func__,
293					  "read of block failed\n");
294				break;
 
 
 
 
295			}
296
297			UFSD(" change from %llu to %llu, pos %u\n",
298			     (unsigned long long)(pos + oldb),
299			     (unsigned long long)(pos + newb), pos);
300
301			bh->b_blocknr = newb + pos;
302			clean_bdev_bh_alias(bh);
 
303			mark_buffer_dirty(bh);
304			++j;
305			bh = bh->b_this_page;
306		} while (bh != head);
307
308		if (likely(cur_index != index))
309			ufs_put_locked_folio(folio);
310 	}
311	UFSD("EXIT\n");
312}
313
314static void ufs_clear_frags(struct inode *inode, sector_t beg, unsigned int n,
315			    int sync)
316{
317	struct buffer_head *bh;
318	sector_t end = beg + n;
319
320	for (; beg < end; ++beg) {
321		bh = sb_getblk(inode->i_sb, beg);
322		lock_buffer(bh);
323		memset(bh->b_data, 0, inode->i_sb->s_blocksize);
324		set_buffer_uptodate(bh);
325		mark_buffer_dirty(bh);
326		unlock_buffer(bh);
327		if (IS_SYNC(inode) || sync)
328			sync_dirty_buffer(bh);
329		brelse(bh);
330	}
331}
332
333u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
334			   u64 goal, unsigned count, int *err,
335			   struct folio *locked_folio)
336{
337	struct super_block * sb;
338	struct ufs_sb_private_info * uspi;
339	struct ufs_super_block_first * usb1;
340	unsigned cgno, oldcount, newcount;
341	u64 tmp, request, result;
342	
343	UFSD("ENTER, ino %lu, fragment %llu, goal %llu, count %u\n",
344	     inode->i_ino, (unsigned long long)fragment,
345	     (unsigned long long)goal, count);
346	
347	sb = inode->i_sb;
348	uspi = UFS_SB(sb)->s_uspi;
349	usb1 = ubh_get_usb_first(uspi);
350	*err = -ENOSPC;
351
352	mutex_lock(&UFS_SB(sb)->s_lock);
353	tmp = ufs_data_ptr_to_cpu(sb, p);
354
355	if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
356		ufs_warning(sb, "ufs_new_fragments", "internal warning"
357			    " fragment %llu, count %u",
358			    (unsigned long long)fragment, count);
359		count = uspi->s_fpb - ufs_fragnum(fragment); 
360	}
361	oldcount = ufs_fragnum (fragment);
362	newcount = oldcount + count;
363
364	/*
365	 * Somebody else has just allocated our fragments
366	 */
367	if (oldcount) {
368		if (!tmp) {
369			ufs_error(sb, "ufs_new_fragments", "internal error, "
370				  "fragment %llu, tmp %llu\n",
371				  (unsigned long long)fragment,
372				  (unsigned long long)tmp);
373			mutex_unlock(&UFS_SB(sb)->s_lock);
374			return INVBLOCK;
375		}
376		if (fragment < UFS_I(inode)->i_lastfrag) {
377			UFSD("EXIT (ALREADY ALLOCATED)\n");
378			mutex_unlock(&UFS_SB(sb)->s_lock);
379			return 0;
380		}
381	}
382	else {
383		if (tmp) {
384			UFSD("EXIT (ALREADY ALLOCATED)\n");
385			mutex_unlock(&UFS_SB(sb)->s_lock);
386			return 0;
387		}
388	}
389
390	/*
391	 * There is not enough space for user on the device
392	 */
393	if (unlikely(ufs_freefrags(uspi) <= uspi->s_root_blocks)) {
394		if (!capable(CAP_SYS_RESOURCE)) {
395			mutex_unlock(&UFS_SB(sb)->s_lock);
396			UFSD("EXIT (FAILED)\n");
397			return 0;
398		}
399	}
400
401	if (goal >= uspi->s_size) 
402		goal = 0;
403	if (goal == 0) 
404		cgno = ufs_inotocg (inode->i_ino);
405	else
406		cgno = ufs_dtog(uspi, goal);
407	 
408	/*
409	 * allocate new fragment
410	 */
411	if (oldcount == 0) {
412		result = ufs_alloc_fragments (inode, cgno, goal, count, err);
413		if (result) {
414			ufs_clear_frags(inode, result + oldcount,
415					newcount - oldcount, locked_folio != NULL);
416			*err = 0;
417			write_seqlock(&UFS_I(inode)->meta_lock);
418			ufs_cpu_to_data_ptr(sb, p, result);
 
419			UFS_I(inode)->i_lastfrag =
420				max(UFS_I(inode)->i_lastfrag, fragment + count);
421			write_sequnlock(&UFS_I(inode)->meta_lock);
 
422		}
423		mutex_unlock(&UFS_SB(sb)->s_lock);
424		UFSD("EXIT, result %llu\n", (unsigned long long)result);
425		return result;
426	}
427
428	/*
429	 * resize block
430	 */
431	result = ufs_add_fragments(inode, tmp, oldcount, newcount);
432	if (result) {
433		*err = 0;
434		read_seqlock_excl(&UFS_I(inode)->meta_lock);
435		UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
436						fragment + count);
437		read_sequnlock_excl(&UFS_I(inode)->meta_lock);
438		ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
439				locked_folio != NULL);
440		mutex_unlock(&UFS_SB(sb)->s_lock);
441		UFSD("EXIT, result %llu\n", (unsigned long long)result);
442		return result;
443	}
444
445	/*
446	 * allocate new block and move data
447	 */
448	if (fs32_to_cpu(sb, usb1->fs_optim) == UFS_OPTSPACE) {
 
449		request = newcount;
450		if (uspi->cs_total.cs_nffree < uspi->s_space_to_time)
451			usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
452	} else {
 
 
 
 
 
 
453		request = uspi->s_fpb;
454		if (uspi->cs_total.cs_nffree > uspi->s_time_to_space)
455			usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTSPACE);
 
 
 
456	}
457	result = ufs_alloc_fragments (inode, cgno, goal, request, err);
458	if (result) {
459		ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
460				locked_folio != NULL);
461		mutex_unlock(&UFS_SB(sb)->s_lock);
462		ufs_change_blocknr(inode, fragment - oldcount, oldcount,
463				   uspi->s_sbbase + tmp,
464				   uspi->s_sbbase + result, locked_folio);
465		*err = 0;
466		write_seqlock(&UFS_I(inode)->meta_lock);
467		ufs_cpu_to_data_ptr(sb, p, result);
 
468		UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
469						fragment + count);
470		write_sequnlock(&UFS_I(inode)->meta_lock);
471		if (newcount < request)
472			ufs_free_fragments (inode, result + newcount, request - newcount);
473		ufs_free_fragments (inode, tmp, oldcount);
474		UFSD("EXIT, result %llu\n", (unsigned long long)result);
475		return result;
476	}
477
478	mutex_unlock(&UFS_SB(sb)->s_lock);
479	UFSD("EXIT (FAILED)\n");
480	return 0;
481}		
482
483static bool try_add_frags(struct inode *inode, unsigned frags)
484{
485	unsigned size = frags * i_blocksize(inode);
486	spin_lock(&inode->i_lock);
487	__inode_add_bytes(inode, size);
488	if (unlikely((u32)inode->i_blocks != inode->i_blocks)) {
489		__inode_sub_bytes(inode, size);
490		spin_unlock(&inode->i_lock);
491		return false;
492	}
493	spin_unlock(&inode->i_lock);
494	return true;
495}
496
497static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
498			     unsigned oldcount, unsigned newcount)
499{
500	struct super_block * sb;
501	struct ufs_sb_private_info * uspi;
 
502	struct ufs_cg_private_info * ucpi;
503	struct ufs_cylinder_group * ucg;
504	unsigned cgno, fragno, fragoff, count, fragsize, i;
505	
506	UFSD("ENTER, fragment %llu, oldcount %u, newcount %u\n",
507	     (unsigned long long)fragment, oldcount, newcount);
508	
509	sb = inode->i_sb;
510	uspi = UFS_SB(sb)->s_uspi;
 
511	count = newcount - oldcount;
512	
513	cgno = ufs_dtog(uspi, fragment);
514	if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
515		return 0;
516	if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
517		return 0;
518	ucpi = ufs_load_cylinder (sb, cgno);
519	if (!ucpi)
520		return 0;
521	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
522	if (!ufs_cg_chkmagic(sb, ucg)) {
523		ufs_panic (sb, "ufs_add_fragments",
524			"internal error, bad magic number on cg %u", cgno);
525		return 0;
526	}
527
528	fragno = ufs_dtogd(uspi, fragment);
529	fragoff = ufs_fragnum (fragno);
530	for (i = oldcount; i < newcount; i++)
531		if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
532			return 0;
533
534	if (!try_add_frags(inode, count))
535		return 0;
536	/*
537	 * Block can be extended
538	 */
539	ucg->cg_time = ufs_get_seconds(sb);
540	for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
541		if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
542			break;
543	fragsize = i - oldcount;
544	if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
545		ufs_panic (sb, "ufs_add_fragments",
546			"internal error or corrupted bitmap on cg %u", cgno);
547	fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
548	if (fragsize != count)
549		fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
550	for (i = oldcount; i < newcount; i++)
551		ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
552
553	fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
554	fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
555	uspi->cs_total.cs_nffree -= count;
556	
557	ubh_mark_buffer_dirty (USPI_UBH(uspi));
558	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
559	if (sb->s_flags & SB_SYNCHRONOUS)
560		ubh_sync_block(UCPI_UBH(ucpi));
561	ufs_mark_sb_dirty(sb);
562
563	UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment);
564	
565	return fragment;
566}
567
568#define UFS_TEST_FREE_SPACE_CG \
569	ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
570	if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
571		goto cg_found; \
572	for (k = count; k < uspi->s_fpb; k++) \
573		if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
574			goto cg_found; 
575
576static u64 ufs_alloc_fragments(struct inode *inode, unsigned cgno,
577			       u64 goal, unsigned count, int *err)
578{
579	struct super_block * sb;
580	struct ufs_sb_private_info * uspi;
 
581	struct ufs_cg_private_info * ucpi;
582	struct ufs_cylinder_group * ucg;
583	unsigned oldcg, i, j, k, allocsize;
584	u64 result;
585	
586	UFSD("ENTER, ino %lu, cgno %u, goal %llu, count %u\n",
587	     inode->i_ino, cgno, (unsigned long long)goal, count);
588
589	sb = inode->i_sb;
590	uspi = UFS_SB(sb)->s_uspi;
 
591	oldcg = cgno;
592	
593	/*
594	 * 1. searching on preferred cylinder group
595	 */
596	UFS_TEST_FREE_SPACE_CG
597
598	/*
599	 * 2. quadratic rehash
600	 */
601	for (j = 1; j < uspi->s_ncg; j *= 2) {
602		cgno += j;
603		if (cgno >= uspi->s_ncg) 
604			cgno -= uspi->s_ncg;
605		UFS_TEST_FREE_SPACE_CG
606	}
607
608	/*
609	 * 3. brute force search
610	 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
611	 */
612	cgno = (oldcg + 1) % uspi->s_ncg;
613	for (j = 2; j < uspi->s_ncg; j++) {
614		cgno++;
615		if (cgno >= uspi->s_ncg)
616			cgno = 0;
617		UFS_TEST_FREE_SPACE_CG
618	}
619	
620	UFSD("EXIT (FAILED)\n");
621	return 0;
622
623cg_found:
624	ucpi = ufs_load_cylinder (sb, cgno);
625	if (!ucpi)
626		return 0;
627	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
628	if (!ufs_cg_chkmagic(sb, ucg)) 
629		ufs_panic (sb, "ufs_alloc_fragments",
630			"internal error, bad magic number on cg %u", cgno);
631	ucg->cg_time = ufs_get_seconds(sb);
632
633	if (count == uspi->s_fpb) {
634		result = ufs_alloccg_block (inode, ucpi, goal, err);
635		if (result == INVBLOCK)
636			return 0;
637		goto succed;
638	}
639
640	for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
641		if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
642			break;
643	
644	if (allocsize == uspi->s_fpb) {
645		result = ufs_alloccg_block (inode, ucpi, goal, err);
646		if (result == INVBLOCK)
647			return 0;
648		goal = ufs_dtogd(uspi, result);
649		for (i = count; i < uspi->s_fpb; i++)
650			ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
651		i = uspi->s_fpb - count;
652
653		inode_sub_bytes(inode, i << uspi->s_fshift);
654		fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
655		uspi->cs_total.cs_nffree += i;
656		fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
657		fs32_add(sb, &ucg->cg_frsum[i], 1);
658		goto succed;
659	}
660
661	result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
662	if (result == INVBLOCK)
663		return 0;
664	if (!try_add_frags(inode, count))
665		return 0;
666	for (i = 0; i < count; i++)
667		ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
668	
669	fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
670	uspi->cs_total.cs_nffree -= count;
671	fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
672	fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
673
674	if (count != allocsize)
675		fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
676
677succed:
678	ubh_mark_buffer_dirty (USPI_UBH(uspi));
679	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
680	if (sb->s_flags & SB_SYNCHRONOUS)
681		ubh_sync_block(UCPI_UBH(ucpi));
682	ufs_mark_sb_dirty(sb);
683
684	result += cgno * uspi->s_fpg;
685	UFSD("EXIT3, result %llu\n", (unsigned long long)result);
686	return result;
687}
688
689static u64 ufs_alloccg_block(struct inode *inode,
690			     struct ufs_cg_private_info *ucpi,
691			     u64 goal, int *err)
692{
693	struct super_block * sb;
694	struct ufs_sb_private_info * uspi;
 
695	struct ufs_cylinder_group * ucg;
696	u64 result;
697
698	UFSD("ENTER, goal %llu\n", (unsigned long long)goal);
699
700	sb = inode->i_sb;
701	uspi = UFS_SB(sb)->s_uspi;
 
702	ucg = ubh_get_ucg(UCPI_UBH(ucpi));
703
704	if (goal == 0) {
705		goal = ucpi->c_rotor;
706		goto norot;
707	}
708	goal = ufs_blknum (goal);
709	goal = ufs_dtogd(uspi, goal);
710	
711	/*
712	 * If the requested block is available, use it.
713	 */
714	if (ubh_isblockset(uspi, ucpi, goal)) {
715		result = goal;
716		goto gotit;
717	}
718	
719norot:	
720	result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
721	if (result == INVBLOCK)
722		return INVBLOCK;
723	ucpi->c_rotor = result;
724gotit:
725	if (!try_add_frags(inode, uspi->s_fpb))
726		return 0;
727	ubh_clrblock(uspi, ucpi, result);
728	adjust_free_blocks(sb, ucg, ucpi, result, -1);
 
 
 
 
 
 
 
 
 
 
 
 
729	
730	UFSD("EXIT, result %llu\n", (unsigned long long)result);
731
732	return result;
733}
734
735static unsigned ubh_scanc(struct ufs_sb_private_info *uspi,
736			  struct ufs_buffer_head *ubh,
737			  unsigned begin, unsigned size,
738			  unsigned char *table, unsigned char mask)
739{
740	unsigned rest, offset;
741	unsigned char *cp;
742	
743
744	offset = begin & ~uspi->s_fmask;
745	begin >>= uspi->s_fshift;
746	for (;;) {
747		if ((offset + size) < uspi->s_fsize)
748			rest = size;
749		else
750			rest = uspi->s_fsize - offset;
751		size -= rest;
752		cp = ubh->bh[begin]->b_data + offset;
753		while ((table[*cp++] & mask) == 0 && --rest)
754			;
755		if (rest || !size)
756			break;
757		begin++;
758		offset = 0;
759	}
760	return (size + rest);
761}
762
763/*
764 * Find a block of the specified size in the specified cylinder group.
765 * @sp: pointer to super block
766 * @ucpi: pointer to cylinder group info
767 * @goal: near which block we want find new one
768 * @count: specified size
769 */
770static u64 ufs_bitmap_search(struct super_block *sb,
771			     struct ufs_cg_private_info *ucpi,
772			     u64 goal, unsigned count)
773{
774	/*
775	 * Bit patterns for identifying fragments in the block map
776	 * used as ((map & mask_arr) == want_arr)
777	 */
778	static const int mask_arr[9] = {
779		0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
780	};
781	static const int want_arr[9] = {
782		0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
783	};
784	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
 
 
785	unsigned start, length, loc;
786	unsigned pos, want, blockmap, mask, end;
787	u64 result;
788
789	UFSD("ENTER, cg %u, goal %llu, count %u\n", ucpi->c_cgx,
790	     (unsigned long long)goal, count);
791
 
 
 
792	if (goal)
793		start = ufs_dtogd(uspi, goal) >> 3;
794	else
795		start = ucpi->c_frotor >> 3;
796		
797	length = ((uspi->s_fpg + 7) >> 3) - start;
798	loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff + start, length,
799		(uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
800		1 << (count - 1 + (uspi->s_fpb & 7))); 
801	if (loc == 0) {
802		length = start + 1;
803		loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff, length,
804				(uspi->s_fpb == 8) ? ufs_fragtable_8fpb :
805				ufs_fragtable_other,
806				1 << (count - 1 + (uspi->s_fpb & 7)));
807		if (loc == 0) {
808			ufs_error(sb, "ufs_bitmap_search",
809				  "bitmap corrupted on cg %u, start %u,"
810				  " length %u, count %u, freeoff %u\n",
811				  ucpi->c_cgx, start, length, count,
812				  ucpi->c_freeoff);
813			return INVBLOCK;
814		}
815		start = 0;
816	}
817	result = (start + length - loc) << 3;
818	ucpi->c_frotor = result;
819
820	/*
821	 * found the byte in the map
822	 */
823
824	for (end = result + 8; result < end; result += uspi->s_fpb) {
825		blockmap = ubh_blkmap(UCPI_UBH(ucpi), ucpi->c_freeoff, result);
826		blockmap <<= 1;
827		mask = mask_arr[count];
828		want = want_arr[count];
829		for (pos = 0; pos <= uspi->s_fpb - count; pos++) {
830			if ((blockmap & mask) == want) {
831				UFSD("EXIT, result %llu\n",
832				     (unsigned long long)result);
833				return result + pos;
834 			}
835			mask <<= 1;
836			want <<= 1;
837 		}
838 	}
839
840	ufs_error(sb, "ufs_bitmap_search", "block not in map on cg %u\n",
841		  ucpi->c_cgx);
842	UFSD("EXIT (FAILED)\n");
843	return INVBLOCK;
844}
845
846static void ufs_clusteracct(struct super_block * sb,
847	struct ufs_cg_private_info * ucpi, unsigned frag, int cnt)
848{
849	struct ufs_sb_private_info * uspi = UFS_SB(sb)->s_uspi;
850	int i, start, end, forw, back;
851	unsigned blkno = ufs_fragstoblks(frag);
852	
 
853	if (uspi->s_contigsumsize <= 0)
854		return;
855
856	if (cnt > 0)
857		ubh_setbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
858	else
859		ubh_clrbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
860
861	/*
862	 * Find the size of the cluster going forward.
863	 */
864	start = blkno + 1;
865	end = start + uspi->s_contigsumsize;
866	if ( end >= ucpi->c_nclusterblks)
867		end = ucpi->c_nclusterblks;
868	i = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, end, start);
869	if (i > end)
870		i = end;
871	forw = i - start;
872	
873	/*
874	 * Find the size of the cluster going backward.
875	 */
876	start = blkno - 1;
877	end = start - uspi->s_contigsumsize;
878	if (end < 0 ) 
879		end = -1;
880	i = ubh_find_last_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, start, end);
881	if ( i < end) 
882		i = end;
883	back = start - i;
884	
885	/*
886	 * Account for old cluster and the possibly new forward and
887	 * back clusters.
888	 */
889	i = back + forw + 1;
890	if (i > uspi->s_contigsumsize)
891		i = uspi->s_contigsumsize;
892	fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (i << 2)), cnt);
893	if (back > 0)
894		fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (back << 2)), cnt);
895	if (forw > 0)
896		fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (forw << 2)), cnt);
897}
898
899
900static unsigned char ufs_fragtable_8fpb[] = {
901	0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
902	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
903	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
904	0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
905	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,	
906	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
907	0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
908	0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
909	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
910	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
911	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
912	0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
913	0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
914	0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
915	0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
916	0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
917};
918
919static unsigned char ufs_fragtable_other[] = {
920	0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
921	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
922	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
923	0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
924	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
925	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
926	0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
927	0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
928	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
929	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
930	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
931	0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
932	0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
933	0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E,	0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
934	0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
935	0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,
936};
v3.5.6
 
  1/*
  2 *  linux/fs/ufs/balloc.c
  3 *
  4 * Copyright (C) 1998
  5 * Daniel Pirkl <daniel.pirkl@email.cz>
  6 * Charles University, Faculty of Mathematics and Physics
  7 *
  8 * UFS2 write support Evgeniy Dushistov <dushistov@mail.ru>, 2007
  9 */
 10
 11#include <linux/fs.h>
 12#include <linux/stat.h>
 13#include <linux/time.h>
 14#include <linux/string.h>
 15#include <linux/buffer_head.h>
 16#include <linux/capability.h>
 17#include <linux/bitops.h>
 
 18#include <asm/byteorder.h>
 19
 20#include "ufs_fs.h"
 21#include "ufs.h"
 22#include "swab.h"
 23#include "util.h"
 24
 25#define INVBLOCK ((u64)-1L)
 26
 27static u64 ufs_add_fragments(struct inode *, u64, unsigned, unsigned, int *);
 28static u64 ufs_alloc_fragments(struct inode *, unsigned, u64, unsigned, int *);
 29static u64 ufs_alloccg_block(struct inode *, struct ufs_cg_private_info *, u64, int *);
 30static u64 ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, u64, unsigned);
 31static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
 32static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
 33
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 34/*
 35 * Free 'count' fragments from fragment number 'fragment'
 36 */
 37void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
 38{
 39	struct super_block * sb;
 40	struct ufs_sb_private_info * uspi;
 41	struct ufs_super_block_first * usb1;
 42	struct ufs_cg_private_info * ucpi;
 43	struct ufs_cylinder_group * ucg;
 44	unsigned cgno, bit, end_bit, bbase, blkmap, i;
 45	u64 blkno;
 46	
 47	sb = inode->i_sb;
 48	uspi = UFS_SB(sb)->s_uspi;
 49	usb1 = ubh_get_usb_first(uspi);
 50	
 51	UFSD("ENTER, fragment %llu, count %u\n",
 52	     (unsigned long long)fragment, count);
 53	
 54	if (ufs_fragnum(fragment) + count > uspi->s_fpg)
 55		ufs_error (sb, "ufs_free_fragments", "internal error");
 56	
 57	lock_super(sb);
 58	
 59	cgno = ufs_dtog(uspi, fragment);
 60	bit = ufs_dtogd(uspi, fragment);
 61	if (cgno >= uspi->s_ncg) {
 62		ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
 63		goto failed;
 64	}
 65		
 66	ucpi = ufs_load_cylinder (sb, cgno);
 67	if (!ucpi) 
 68		goto failed;
 69	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
 70	if (!ufs_cg_chkmagic(sb, ucg)) {
 71		ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
 72		goto failed;
 73	}
 74
 75	end_bit = bit + count;
 76	bbase = ufs_blknum (bit);
 77	blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
 78	ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
 79	for (i = bit; i < end_bit; i++) {
 80		if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i))
 81			ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i);
 82		else 
 83			ufs_error (sb, "ufs_free_fragments",
 84				   "bit already cleared for fragment %u", i);
 85	}
 86	
 
 87	fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
 88	uspi->cs_total.cs_nffree += count;
 89	fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
 90	blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
 91	ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
 92
 93	/*
 94	 * Trying to reassemble free fragments into block
 95	 */
 96	blkno = ufs_fragstoblks (bbase);
 97	if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
 98		fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
 99		uspi->cs_total.cs_nffree -= uspi->s_fpb;
100		fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
101		if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
102			ufs_clusteracct (sb, ucpi, blkno, 1);
103		fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
104		uspi->cs_total.cs_nbfree++;
105		fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
106		if (uspi->fs_magic != UFS2_MAGIC) {
107			unsigned cylno = ufs_cbtocylno (bbase);
108
109			fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
110						  ufs_cbtorpos(bbase)), 1);
111			fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
112		}
113	}
114	
115	ubh_mark_buffer_dirty (USPI_UBH(uspi));
116	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
117	if (sb->s_flags & MS_SYNCHRONOUS)
118		ubh_sync_block(UCPI_UBH(ucpi));
119	sb->s_dirt = 1;
120	
121	unlock_super (sb);
122	UFSD("EXIT\n");
123	return;
124
125failed:
126	unlock_super (sb);
127	UFSD("EXIT (FAILED)\n");
128	return;
129}
130
131/*
132 * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
133 */
134void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count)
135{
136	struct super_block * sb;
137	struct ufs_sb_private_info * uspi;
138	struct ufs_super_block_first * usb1;
139	struct ufs_cg_private_info * ucpi;
140	struct ufs_cylinder_group * ucg;
141	unsigned overflow, cgno, bit, end_bit, i;
142	u64 blkno;
143	
144	sb = inode->i_sb;
145	uspi = UFS_SB(sb)->s_uspi;
146	usb1 = ubh_get_usb_first(uspi);
147
148	UFSD("ENTER, fragment %llu, count %u\n",
149	     (unsigned long long)fragment, count);
150	
151	if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
152		ufs_error (sb, "ufs_free_blocks", "internal error, "
153			   "fragment %llu, count %u\n",
154			   (unsigned long long)fragment, count);
155		goto failed;
156	}
157
158	lock_super(sb);
159	
160do_more:
161	overflow = 0;
162	cgno = ufs_dtog(uspi, fragment);
163	bit = ufs_dtogd(uspi, fragment);
164	if (cgno >= uspi->s_ncg) {
165		ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
166		goto failed_unlock;
167	}
168	end_bit = bit + count;
169	if (end_bit > uspi->s_fpg) {
170		overflow = bit + count - uspi->s_fpg;
171		count -= overflow;
172		end_bit -= overflow;
173	}
174
175	ucpi = ufs_load_cylinder (sb, cgno);
176	if (!ucpi) 
177		goto failed_unlock;
178	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
179	if (!ufs_cg_chkmagic(sb, ucg)) {
180		ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
181		goto failed_unlock;
182	}
183
184	for (i = bit; i < end_bit; i += uspi->s_fpb) {
185		blkno = ufs_fragstoblks(i);
186		if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
187			ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
188		}
189		ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
190		if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
191			ufs_clusteracct (sb, ucpi, blkno, 1);
192
193		fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
194		uspi->cs_total.cs_nbfree++;
195		fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
196
197		if (uspi->fs_magic != UFS2_MAGIC) {
198			unsigned cylno = ufs_cbtocylno(i);
199
200			fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
201						  ufs_cbtorpos(i)), 1);
202			fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
203		}
204	}
205
206	ubh_mark_buffer_dirty (USPI_UBH(uspi));
207	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
208	if (sb->s_flags & MS_SYNCHRONOUS)
209		ubh_sync_block(UCPI_UBH(ucpi));
210
211	if (overflow) {
212		fragment += count;
213		count = overflow;
214		goto do_more;
215	}
216
217	sb->s_dirt = 1;
218	unlock_super (sb);
219	UFSD("EXIT\n");
220	return;
221
222failed_unlock:
223	unlock_super (sb);
224failed:
225	UFSD("EXIT (FAILED)\n");
226	return;
227}
228
229/*
230 * Modify inode page cache in such way:
231 * have - blocks with b_blocknr equal to oldb...oldb+count-1
232 * get - blocks with b_blocknr equal to newb...newb+count-1
233 * also we suppose that oldb...oldb+count-1 blocks
234 * situated at the end of file.
235 *
236 * We can come here from ufs_writepage or ufs_prepare_write,
237 * locked_page is argument of these functions, so we already lock it.
238 */
239static void ufs_change_blocknr(struct inode *inode, sector_t beg,
240			       unsigned int count, sector_t oldb,
241			       sector_t newb, struct page *locked_page)
242{
 
243	const unsigned blks_per_page =
244		1 << (PAGE_CACHE_SHIFT - inode->i_blkbits);
245	const unsigned mask = blks_per_page - 1;
246	struct address_space * const mapping = inode->i_mapping;
247	pgoff_t index, cur_index, last_index;
248	unsigned pos, j, lblock;
249	sector_t end, i;
250	struct page *page;
251	struct buffer_head *head, *bh;
252
253	UFSD("ENTER, ino %lu, count %u, oldb %llu, newb %llu\n",
254	      inode->i_ino, count,
255	     (unsigned long long)oldb, (unsigned long long)newb);
256
257	BUG_ON(!locked_page);
258	BUG_ON(!PageLocked(locked_page));
259
260	cur_index = locked_page->index;
261	end = count + beg;
262	last_index = end >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
263	for (i = beg; i < end; i = (i | mask) + 1) {
264		index = i >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
265
266		if (likely(cur_index != index)) {
267			page = ufs_get_locked_page(mapping, index);
268			if (!page)/* it was truncated */
269				continue;
270			if (IS_ERR(page)) {/* or EIO */
271				ufs_error(inode->i_sb, __func__,
272					  "read of page %llu failed\n",
273					  (unsigned long long)index);
274				continue;
275			}
276		} else
277			page = locked_page;
278
279		head = page_buffers(page);
280		bh = head;
281		pos = i & mask;
282		for (j = 0; j < pos; ++j)
283			bh = bh->b_this_page;
284
285
286		if (unlikely(index == last_index))
287			lblock = end & mask;
288		else
289			lblock = blks_per_page;
290
291		do {
292			if (j >= lblock)
293				break;
294			pos = (i - beg) + j;
295
296			if (!buffer_mapped(bh))
297					map_bh(bh, inode->i_sb, oldb + pos);
298			if (!buffer_uptodate(bh)) {
299				ll_rw_block(READ, 1, &bh);
300				wait_on_buffer(bh);
301				if (!buffer_uptodate(bh)) {
302					ufs_error(inode->i_sb, __func__,
303						  "read of block failed\n");
304					break;
305				}
306			}
307
308			UFSD(" change from %llu to %llu, pos %u\n",
309			     (unsigned long long)(pos + oldb),
310			     (unsigned long long)(pos + newb), pos);
311
312			bh->b_blocknr = newb + pos;
313			unmap_underlying_metadata(bh->b_bdev,
314						  bh->b_blocknr);
315			mark_buffer_dirty(bh);
316			++j;
317			bh = bh->b_this_page;
318		} while (bh != head);
319
320		if (likely(cur_index != index))
321			ufs_put_locked_page(page);
322 	}
323	UFSD("EXIT\n");
324}
325
326static void ufs_clear_frags(struct inode *inode, sector_t beg, unsigned int n,
327			    int sync)
328{
329	struct buffer_head *bh;
330	sector_t end = beg + n;
331
332	for (; beg < end; ++beg) {
333		bh = sb_getblk(inode->i_sb, beg);
334		lock_buffer(bh);
335		memset(bh->b_data, 0, inode->i_sb->s_blocksize);
336		set_buffer_uptodate(bh);
337		mark_buffer_dirty(bh);
338		unlock_buffer(bh);
339		if (IS_SYNC(inode) || sync)
340			sync_dirty_buffer(bh);
341		brelse(bh);
342	}
343}
344
345u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
346			   u64 goal, unsigned count, int *err,
347			   struct page *locked_page)
348{
349	struct super_block * sb;
350	struct ufs_sb_private_info * uspi;
351	struct ufs_super_block_first * usb1;
352	unsigned cgno, oldcount, newcount;
353	u64 tmp, request, result;
354	
355	UFSD("ENTER, ino %lu, fragment %llu, goal %llu, count %u\n",
356	     inode->i_ino, (unsigned long long)fragment,
357	     (unsigned long long)goal, count);
358	
359	sb = inode->i_sb;
360	uspi = UFS_SB(sb)->s_uspi;
361	usb1 = ubh_get_usb_first(uspi);
362	*err = -ENOSPC;
363
364	lock_super (sb);
365	tmp = ufs_data_ptr_to_cpu(sb, p);
366
367	if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
368		ufs_warning(sb, "ufs_new_fragments", "internal warning"
369			    " fragment %llu, count %u",
370			    (unsigned long long)fragment, count);
371		count = uspi->s_fpb - ufs_fragnum(fragment); 
372	}
373	oldcount = ufs_fragnum (fragment);
374	newcount = oldcount + count;
375
376	/*
377	 * Somebody else has just allocated our fragments
378	 */
379	if (oldcount) {
380		if (!tmp) {
381			ufs_error(sb, "ufs_new_fragments", "internal error, "
382				  "fragment %llu, tmp %llu\n",
383				  (unsigned long long)fragment,
384				  (unsigned long long)tmp);
385			unlock_super(sb);
386			return INVBLOCK;
387		}
388		if (fragment < UFS_I(inode)->i_lastfrag) {
389			UFSD("EXIT (ALREADY ALLOCATED)\n");
390			unlock_super (sb);
391			return 0;
392		}
393	}
394	else {
395		if (tmp) {
396			UFSD("EXIT (ALREADY ALLOCATED)\n");
397			unlock_super(sb);
398			return 0;
399		}
400	}
401
402	/*
403	 * There is not enough space for user on the device
404	 */
405	if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) {
406		unlock_super (sb);
407		UFSD("EXIT (FAILED)\n");
408		return 0;
 
 
409	}
410
411	if (goal >= uspi->s_size) 
412		goal = 0;
413	if (goal == 0) 
414		cgno = ufs_inotocg (inode->i_ino);
415	else
416		cgno = ufs_dtog(uspi, goal);
417	 
418	/*
419	 * allocate new fragment
420	 */
421	if (oldcount == 0) {
422		result = ufs_alloc_fragments (inode, cgno, goal, count, err);
423		if (result) {
 
 
 
 
424			ufs_cpu_to_data_ptr(sb, p, result);
425			*err = 0;
426			UFS_I(inode)->i_lastfrag =
427				max(UFS_I(inode)->i_lastfrag, fragment + count);
428			ufs_clear_frags(inode, result + oldcount,
429					newcount - oldcount, locked_page != NULL);
430		}
431		unlock_super(sb);
432		UFSD("EXIT, result %llu\n", (unsigned long long)result);
433		return result;
434	}
435
436	/*
437	 * resize block
438	 */
439	result = ufs_add_fragments (inode, tmp, oldcount, newcount, err);
440	if (result) {
441		*err = 0;
 
442		UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
443						fragment + count);
 
444		ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
445				locked_page != NULL);
446		unlock_super(sb);
447		UFSD("EXIT, result %llu\n", (unsigned long long)result);
448		return result;
449	}
450
451	/*
452	 * allocate new block and move data
453	 */
454	switch (fs32_to_cpu(sb, usb1->fs_optim)) {
455	    case UFS_OPTSPACE:
456		request = newcount;
457		if (uspi->s_minfree < 5 || uspi->cs_total.cs_nffree
458		    > uspi->s_dsize * uspi->s_minfree / (2 * 100))
459			break;
460		usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
461		break;
462	    default:
463		usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
464	
465	    case UFS_OPTTIME:
466		request = uspi->s_fpb;
467		if (uspi->cs_total.cs_nffree < uspi->s_dsize *
468		    (uspi->s_minfree - 2) / 100)
469			break;
470		usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
471		break;
472	}
473	result = ufs_alloc_fragments (inode, cgno, goal, request, err);
474	if (result) {
475		ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
476				locked_page != NULL);
 
477		ufs_change_blocknr(inode, fragment - oldcount, oldcount,
478				   uspi->s_sbbase + tmp,
479				   uspi->s_sbbase + result, locked_page);
 
 
480		ufs_cpu_to_data_ptr(sb, p, result);
481		*err = 0;
482		UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
483						fragment + count);
484		unlock_super(sb);
485		if (newcount < request)
486			ufs_free_fragments (inode, result + newcount, request - newcount);
487		ufs_free_fragments (inode, tmp, oldcount);
488		UFSD("EXIT, result %llu\n", (unsigned long long)result);
489		return result;
490	}
491
492	unlock_super(sb);
493	UFSD("EXIT (FAILED)\n");
494	return 0;
495}		
496
 
 
 
 
 
 
 
 
 
 
 
 
 
 
497static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
498			     unsigned oldcount, unsigned newcount, int *err)
499{
500	struct super_block * sb;
501	struct ufs_sb_private_info * uspi;
502	struct ufs_super_block_first * usb1;
503	struct ufs_cg_private_info * ucpi;
504	struct ufs_cylinder_group * ucg;
505	unsigned cgno, fragno, fragoff, count, fragsize, i;
506	
507	UFSD("ENTER, fragment %llu, oldcount %u, newcount %u\n",
508	     (unsigned long long)fragment, oldcount, newcount);
509	
510	sb = inode->i_sb;
511	uspi = UFS_SB(sb)->s_uspi;
512	usb1 = ubh_get_usb_first (uspi);
513	count = newcount - oldcount;
514	
515	cgno = ufs_dtog(uspi, fragment);
516	if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
517		return 0;
518	if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
519		return 0;
520	ucpi = ufs_load_cylinder (sb, cgno);
521	if (!ucpi)
522		return 0;
523	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
524	if (!ufs_cg_chkmagic(sb, ucg)) {
525		ufs_panic (sb, "ufs_add_fragments",
526			"internal error, bad magic number on cg %u", cgno);
527		return 0;
528	}
529
530	fragno = ufs_dtogd(uspi, fragment);
531	fragoff = ufs_fragnum (fragno);
532	for (i = oldcount; i < newcount; i++)
533		if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
534			return 0;
 
 
 
535	/*
536	 * Block can be extended
537	 */
538	ucg->cg_time = cpu_to_fs32(sb, get_seconds());
539	for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
540		if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
541			break;
542	fragsize = i - oldcount;
543	if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
544		ufs_panic (sb, "ufs_add_fragments",
545			"internal error or corrupted bitmap on cg %u", cgno);
546	fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
547	if (fragsize != count)
548		fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
549	for (i = oldcount; i < newcount; i++)
550		ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
551
552	fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
553	fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
554	uspi->cs_total.cs_nffree -= count;
555	
556	ubh_mark_buffer_dirty (USPI_UBH(uspi));
557	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
558	if (sb->s_flags & MS_SYNCHRONOUS)
559		ubh_sync_block(UCPI_UBH(ucpi));
560	sb->s_dirt = 1;
561
562	UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment);
563	
564	return fragment;
565}
566
567#define UFS_TEST_FREE_SPACE_CG \
568	ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
569	if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
570		goto cg_found; \
571	for (k = count; k < uspi->s_fpb; k++) \
572		if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
573			goto cg_found; 
574
575static u64 ufs_alloc_fragments(struct inode *inode, unsigned cgno,
576			       u64 goal, unsigned count, int *err)
577{
578	struct super_block * sb;
579	struct ufs_sb_private_info * uspi;
580	struct ufs_super_block_first * usb1;
581	struct ufs_cg_private_info * ucpi;
582	struct ufs_cylinder_group * ucg;
583	unsigned oldcg, i, j, k, allocsize;
584	u64 result;
585	
586	UFSD("ENTER, ino %lu, cgno %u, goal %llu, count %u\n",
587	     inode->i_ino, cgno, (unsigned long long)goal, count);
588
589	sb = inode->i_sb;
590	uspi = UFS_SB(sb)->s_uspi;
591	usb1 = ubh_get_usb_first(uspi);
592	oldcg = cgno;
593	
594	/*
595	 * 1. searching on preferred cylinder group
596	 */
597	UFS_TEST_FREE_SPACE_CG
598
599	/*
600	 * 2. quadratic rehash
601	 */
602	for (j = 1; j < uspi->s_ncg; j *= 2) {
603		cgno += j;
604		if (cgno >= uspi->s_ncg) 
605			cgno -= uspi->s_ncg;
606		UFS_TEST_FREE_SPACE_CG
607	}
608
609	/*
610	 * 3. brute force search
611	 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
612	 */
613	cgno = (oldcg + 1) % uspi->s_ncg;
614	for (j = 2; j < uspi->s_ncg; j++) {
615		cgno++;
616		if (cgno >= uspi->s_ncg)
617			cgno = 0;
618		UFS_TEST_FREE_SPACE_CG
619	}
620	
621	UFSD("EXIT (FAILED)\n");
622	return 0;
623
624cg_found:
625	ucpi = ufs_load_cylinder (sb, cgno);
626	if (!ucpi)
627		return 0;
628	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
629	if (!ufs_cg_chkmagic(sb, ucg)) 
630		ufs_panic (sb, "ufs_alloc_fragments",
631			"internal error, bad magic number on cg %u", cgno);
632	ucg->cg_time = cpu_to_fs32(sb, get_seconds());
633
634	if (count == uspi->s_fpb) {
635		result = ufs_alloccg_block (inode, ucpi, goal, err);
636		if (result == INVBLOCK)
637			return 0;
638		goto succed;
639	}
640
641	for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
642		if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
643			break;
644	
645	if (allocsize == uspi->s_fpb) {
646		result = ufs_alloccg_block (inode, ucpi, goal, err);
647		if (result == INVBLOCK)
648			return 0;
649		goal = ufs_dtogd(uspi, result);
650		for (i = count; i < uspi->s_fpb; i++)
651			ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
652		i = uspi->s_fpb - count;
653
 
654		fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
655		uspi->cs_total.cs_nffree += i;
656		fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
657		fs32_add(sb, &ucg->cg_frsum[i], 1);
658		goto succed;
659	}
660
661	result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
662	if (result == INVBLOCK)
663		return 0;
 
 
664	for (i = 0; i < count; i++)
665		ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
666	
667	fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
668	uspi->cs_total.cs_nffree -= count;
669	fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
670	fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
671
672	if (count != allocsize)
673		fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
674
675succed:
676	ubh_mark_buffer_dirty (USPI_UBH(uspi));
677	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
678	if (sb->s_flags & MS_SYNCHRONOUS)
679		ubh_sync_block(UCPI_UBH(ucpi));
680	sb->s_dirt = 1;
681
682	result += cgno * uspi->s_fpg;
683	UFSD("EXIT3, result %llu\n", (unsigned long long)result);
684	return result;
685}
686
687static u64 ufs_alloccg_block(struct inode *inode,
688			     struct ufs_cg_private_info *ucpi,
689			     u64 goal, int *err)
690{
691	struct super_block * sb;
692	struct ufs_sb_private_info * uspi;
693	struct ufs_super_block_first * usb1;
694	struct ufs_cylinder_group * ucg;
695	u64 result, blkno;
696
697	UFSD("ENTER, goal %llu\n", (unsigned long long)goal);
698
699	sb = inode->i_sb;
700	uspi = UFS_SB(sb)->s_uspi;
701	usb1 = ubh_get_usb_first(uspi);
702	ucg = ubh_get_ucg(UCPI_UBH(ucpi));
703
704	if (goal == 0) {
705		goal = ucpi->c_rotor;
706		goto norot;
707	}
708	goal = ufs_blknum (goal);
709	goal = ufs_dtogd(uspi, goal);
710	
711	/*
712	 * If the requested block is available, use it.
713	 */
714	if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, ufs_fragstoblks(goal))) {
715		result = goal;
716		goto gotit;
717	}
718	
719norot:	
720	result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
721	if (result == INVBLOCK)
722		return INVBLOCK;
723	ucpi->c_rotor = result;
724gotit:
725	blkno = ufs_fragstoblks(result);
726	ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
727	if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
728		ufs_clusteracct (sb, ucpi, blkno, -1);
729
730	fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
731	uspi->cs_total.cs_nbfree--;
732	fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
733
734	if (uspi->fs_magic != UFS2_MAGIC) {
735		unsigned cylno = ufs_cbtocylno((unsigned)result);
736
737		fs16_sub(sb, &ubh_cg_blks(ucpi, cylno,
738					  ufs_cbtorpos((unsigned)result)), 1);
739		fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
740	}
741	
742	UFSD("EXIT, result %llu\n", (unsigned long long)result);
743
744	return result;
745}
746
747static unsigned ubh_scanc(struct ufs_sb_private_info *uspi,
748			  struct ufs_buffer_head *ubh,
749			  unsigned begin, unsigned size,
750			  unsigned char *table, unsigned char mask)
751{
752	unsigned rest, offset;
753	unsigned char *cp;
754	
755
756	offset = begin & ~uspi->s_fmask;
757	begin >>= uspi->s_fshift;
758	for (;;) {
759		if ((offset + size) < uspi->s_fsize)
760			rest = size;
761		else
762			rest = uspi->s_fsize - offset;
763		size -= rest;
764		cp = ubh->bh[begin]->b_data + offset;
765		while ((table[*cp++] & mask) == 0 && --rest)
766			;
767		if (rest || !size)
768			break;
769		begin++;
770		offset = 0;
771	}
772	return (size + rest);
773}
774
775/*
776 * Find a block of the specified size in the specified cylinder group.
777 * @sp: pointer to super block
778 * @ucpi: pointer to cylinder group info
779 * @goal: near which block we want find new one
780 * @count: specified size
781 */
782static u64 ufs_bitmap_search(struct super_block *sb,
783			     struct ufs_cg_private_info *ucpi,
784			     u64 goal, unsigned count)
785{
786	/*
787	 * Bit patterns for identifying fragments in the block map
788	 * used as ((map & mask_arr) == want_arr)
789	 */
790	static const int mask_arr[9] = {
791		0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
792	};
793	static const int want_arr[9] = {
794		0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
795	};
796	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
797	struct ufs_super_block_first *usb1;
798	struct ufs_cylinder_group *ucg;
799	unsigned start, length, loc;
800	unsigned pos, want, blockmap, mask, end;
801	u64 result;
802
803	UFSD("ENTER, cg %u, goal %llu, count %u\n", ucpi->c_cgx,
804	     (unsigned long long)goal, count);
805
806	usb1 = ubh_get_usb_first (uspi);
807	ucg = ubh_get_ucg(UCPI_UBH(ucpi));
808
809	if (goal)
810		start = ufs_dtogd(uspi, goal) >> 3;
811	else
812		start = ucpi->c_frotor >> 3;
813		
814	length = ((uspi->s_fpg + 7) >> 3) - start;
815	loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff + start, length,
816		(uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
817		1 << (count - 1 + (uspi->s_fpb & 7))); 
818	if (loc == 0) {
819		length = start + 1;
820		loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff, length,
821				(uspi->s_fpb == 8) ? ufs_fragtable_8fpb :
822				ufs_fragtable_other,
823				1 << (count - 1 + (uspi->s_fpb & 7)));
824		if (loc == 0) {
825			ufs_error(sb, "ufs_bitmap_search",
826				  "bitmap corrupted on cg %u, start %u,"
827				  " length %u, count %u, freeoff %u\n",
828				  ucpi->c_cgx, start, length, count,
829				  ucpi->c_freeoff);
830			return INVBLOCK;
831		}
832		start = 0;
833	}
834	result = (start + length - loc) << 3;
835	ucpi->c_frotor = result;
836
837	/*
838	 * found the byte in the map
839	 */
840
841	for (end = result + 8; result < end; result += uspi->s_fpb) {
842		blockmap = ubh_blkmap(UCPI_UBH(ucpi), ucpi->c_freeoff, result);
843		blockmap <<= 1;
844		mask = mask_arr[count];
845		want = want_arr[count];
846		for (pos = 0; pos <= uspi->s_fpb - count; pos++) {
847			if ((blockmap & mask) == want) {
848				UFSD("EXIT, result %llu\n",
849				     (unsigned long long)result);
850				return result + pos;
851 			}
852			mask <<= 1;
853			want <<= 1;
854 		}
855 	}
856
857	ufs_error(sb, "ufs_bitmap_search", "block not in map on cg %u\n",
858		  ucpi->c_cgx);
859	UFSD("EXIT (FAILED)\n");
860	return INVBLOCK;
861}
862
863static void ufs_clusteracct(struct super_block * sb,
864	struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
865{
866	struct ufs_sb_private_info * uspi;
867	int i, start, end, forw, back;
 
868	
869	uspi = UFS_SB(sb)->s_uspi;
870	if (uspi->s_contigsumsize <= 0)
871		return;
872
873	if (cnt > 0)
874		ubh_setbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
875	else
876		ubh_clrbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
877
878	/*
879	 * Find the size of the cluster going forward.
880	 */
881	start = blkno + 1;
882	end = start + uspi->s_contigsumsize;
883	if ( end >= ucpi->c_nclusterblks)
884		end = ucpi->c_nclusterblks;
885	i = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, end, start);
886	if (i > end)
887		i = end;
888	forw = i - start;
889	
890	/*
891	 * Find the size of the cluster going backward.
892	 */
893	start = blkno - 1;
894	end = start - uspi->s_contigsumsize;
895	if (end < 0 ) 
896		end = -1;
897	i = ubh_find_last_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, start, end);
898	if ( i < end) 
899		i = end;
900	back = start - i;
901	
902	/*
903	 * Account for old cluster and the possibly new forward and
904	 * back clusters.
905	 */
906	i = back + forw + 1;
907	if (i > uspi->s_contigsumsize)
908		i = uspi->s_contigsumsize;
909	fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (i << 2)), cnt);
910	if (back > 0)
911		fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (back << 2)), cnt);
912	if (forw > 0)
913		fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (forw << 2)), cnt);
914}
915
916
917static unsigned char ufs_fragtable_8fpb[] = {
918	0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
919	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
920	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
921	0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
922	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,	
923	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
924	0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
925	0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
926	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
927	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
928	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
929	0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
930	0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
931	0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
932	0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
933	0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
934};
935
936static unsigned char ufs_fragtable_other[] = {
937	0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
938	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
939	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
940	0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
941	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
942	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
943	0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
944	0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
945	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
946	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
947	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
948	0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
949	0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
950	0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E,	0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
951	0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
952	0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,
953};