Linux Audio

Check our new training course

Linux BSP development engineering services

Need help to port Linux and bootloaders to your hardware?
Loading...
v4.10.11
  1/*
  2 *  linux/fs/ufs/balloc.c
  3 *
  4 * Copyright (C) 1998
  5 * Daniel Pirkl <daniel.pirkl@email.cz>
  6 * Charles University, Faculty of Mathematics and Physics
  7 *
  8 * UFS2 write support Evgeniy Dushistov <dushistov@mail.ru>, 2007
  9 */
 10
 11#include <linux/fs.h>
 12#include <linux/stat.h>
 13#include <linux/time.h>
 14#include <linux/string.h>
 15#include <linux/buffer_head.h>
 16#include <linux/capability.h>
 17#include <linux/bitops.h>
 18#include <linux/bio.h>
 19#include <asm/byteorder.h>
 20
 21#include "ufs_fs.h"
 22#include "ufs.h"
 23#include "swab.h"
 24#include "util.h"
 25
 26#define INVBLOCK ((u64)-1L)
 27
 28static u64 ufs_add_fragments(struct inode *, u64, unsigned, unsigned);
 29static u64 ufs_alloc_fragments(struct inode *, unsigned, u64, unsigned, int *);
 30static u64 ufs_alloccg_block(struct inode *, struct ufs_cg_private_info *, u64, int *);
 31static u64 ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, u64, unsigned);
 32static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
 33static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
 34
 35/*
 36 * Free 'count' fragments from fragment number 'fragment'
 37 */
 38void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
 39{
 40	struct super_block * sb;
 41	struct ufs_sb_private_info * uspi;
 42	struct ufs_cg_private_info * ucpi;
 43	struct ufs_cylinder_group * ucg;
 44	unsigned cgno, bit, end_bit, bbase, blkmap, i;
 45	u64 blkno;
 46	
 47	sb = inode->i_sb;
 48	uspi = UFS_SB(sb)->s_uspi;
 49	
 50	UFSD("ENTER, fragment %llu, count %u\n",
 51	     (unsigned long long)fragment, count);
 52	
 53	if (ufs_fragnum(fragment) + count > uspi->s_fpg)
 54		ufs_error (sb, "ufs_free_fragments", "internal error");
 55
 56	mutex_lock(&UFS_SB(sb)->s_lock);
 57	
 58	cgno = ufs_dtog(uspi, fragment);
 59	bit = ufs_dtogd(uspi, fragment);
 60	if (cgno >= uspi->s_ncg) {
 61		ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
 62		goto failed;
 63	}
 64		
 65	ucpi = ufs_load_cylinder (sb, cgno);
 66	if (!ucpi) 
 67		goto failed;
 68	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
 69	if (!ufs_cg_chkmagic(sb, ucg)) {
 70		ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
 71		goto failed;
 72	}
 73
 74	end_bit = bit + count;
 75	bbase = ufs_blknum (bit);
 76	blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
 77	ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
 78	for (i = bit; i < end_bit; i++) {
 79		if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i))
 80			ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i);
 81		else 
 82			ufs_error (sb, "ufs_free_fragments",
 83				   "bit already cleared for fragment %u", i);
 84	}
 85	
 86	fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
 87	uspi->cs_total.cs_nffree += count;
 88	fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
 89	blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
 90	ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
 91
 92	/*
 93	 * Trying to reassemble free fragments into block
 94	 */
 95	blkno = ufs_fragstoblks (bbase);
 96	if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
 97		fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
 98		uspi->cs_total.cs_nffree -= uspi->s_fpb;
 99		fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
100		if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
101			ufs_clusteracct (sb, ucpi, blkno, 1);
102		fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
103		uspi->cs_total.cs_nbfree++;
104		fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
105		if (uspi->fs_magic != UFS2_MAGIC) {
106			unsigned cylno = ufs_cbtocylno (bbase);
107
108			fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
109						  ufs_cbtorpos(bbase)), 1);
110			fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
111		}
112	}
113	
114	ubh_mark_buffer_dirty (USPI_UBH(uspi));
115	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
116	if (sb->s_flags & MS_SYNCHRONOUS)
117		ubh_sync_block(UCPI_UBH(ucpi));
118	ufs_mark_sb_dirty(sb);
119
120	mutex_unlock(&UFS_SB(sb)->s_lock);
121	UFSD("EXIT\n");
122	return;
123
124failed:
125	mutex_unlock(&UFS_SB(sb)->s_lock);
126	UFSD("EXIT (FAILED)\n");
127	return;
128}
129
130/*
131 * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
132 */
133void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count)
134{
135	struct super_block * sb;
136	struct ufs_sb_private_info * uspi;
137	struct ufs_cg_private_info * ucpi;
138	struct ufs_cylinder_group * ucg;
139	unsigned overflow, cgno, bit, end_bit, i;
140	u64 blkno;
141	
142	sb = inode->i_sb;
143	uspi = UFS_SB(sb)->s_uspi;
144
145	UFSD("ENTER, fragment %llu, count %u\n",
146	     (unsigned long long)fragment, count);
147	
148	if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
149		ufs_error (sb, "ufs_free_blocks", "internal error, "
150			   "fragment %llu, count %u\n",
151			   (unsigned long long)fragment, count);
152		goto failed;
153	}
154
155	mutex_lock(&UFS_SB(sb)->s_lock);
156	
157do_more:
158	overflow = 0;
159	cgno = ufs_dtog(uspi, fragment);
160	bit = ufs_dtogd(uspi, fragment);
161	if (cgno >= uspi->s_ncg) {
162		ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
163		goto failed_unlock;
164	}
165	end_bit = bit + count;
166	if (end_bit > uspi->s_fpg) {
167		overflow = bit + count - uspi->s_fpg;
168		count -= overflow;
169		end_bit -= overflow;
170	}
171
172	ucpi = ufs_load_cylinder (sb, cgno);
173	if (!ucpi) 
174		goto failed_unlock;
175	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
176	if (!ufs_cg_chkmagic(sb, ucg)) {
177		ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
178		goto failed_unlock;
179	}
180
181	for (i = bit; i < end_bit; i += uspi->s_fpb) {
182		blkno = ufs_fragstoblks(i);
183		if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
184			ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
185		}
186		ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
187		if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
188			ufs_clusteracct (sb, ucpi, blkno, 1);
189
190		fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
191		uspi->cs_total.cs_nbfree++;
192		fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
193
194		if (uspi->fs_magic != UFS2_MAGIC) {
195			unsigned cylno = ufs_cbtocylno(i);
196
197			fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
198						  ufs_cbtorpos(i)), 1);
199			fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
200		}
201	}
202
203	ubh_mark_buffer_dirty (USPI_UBH(uspi));
204	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
205	if (sb->s_flags & MS_SYNCHRONOUS)
206		ubh_sync_block(UCPI_UBH(ucpi));
207
208	if (overflow) {
209		fragment += count;
210		count = overflow;
211		goto do_more;
212	}
213
214	ufs_mark_sb_dirty(sb);
215	mutex_unlock(&UFS_SB(sb)->s_lock);
216	UFSD("EXIT\n");
217	return;
218
219failed_unlock:
220	mutex_unlock(&UFS_SB(sb)->s_lock);
221failed:
222	UFSD("EXIT (FAILED)\n");
223	return;
224}
225
226/*
227 * Modify inode page cache in such way:
228 * have - blocks with b_blocknr equal to oldb...oldb+count-1
229 * get - blocks with b_blocknr equal to newb...newb+count-1
230 * also we suppose that oldb...oldb+count-1 blocks
231 * situated at the end of file.
232 *
233 * We can come here from ufs_writepage or ufs_prepare_write,
234 * locked_page is argument of these functions, so we already lock it.
235 */
236static void ufs_change_blocknr(struct inode *inode, sector_t beg,
237			       unsigned int count, sector_t oldb,
238			       sector_t newb, struct page *locked_page)
239{
240	const unsigned blks_per_page =
241		1 << (PAGE_SHIFT - inode->i_blkbits);
242	const unsigned mask = blks_per_page - 1;
243	struct address_space * const mapping = inode->i_mapping;
244	pgoff_t index, cur_index, last_index;
245	unsigned pos, j, lblock;
246	sector_t end, i;
247	struct page *page;
248	struct buffer_head *head, *bh;
249
250	UFSD("ENTER, ino %lu, count %u, oldb %llu, newb %llu\n",
251	      inode->i_ino, count,
252	     (unsigned long long)oldb, (unsigned long long)newb);
253
254	BUG_ON(!locked_page);
255	BUG_ON(!PageLocked(locked_page));
256
257	cur_index = locked_page->index;
258	end = count + beg;
259	last_index = end >> (PAGE_SHIFT - inode->i_blkbits);
260	for (i = beg; i < end; i = (i | mask) + 1) {
261		index = i >> (PAGE_SHIFT - inode->i_blkbits);
262
263		if (likely(cur_index != index)) {
264			page = ufs_get_locked_page(mapping, index);
265			if (!page)/* it was truncated */
266				continue;
267			if (IS_ERR(page)) {/* or EIO */
268				ufs_error(inode->i_sb, __func__,
269					  "read of page %llu failed\n",
270					  (unsigned long long)index);
271				continue;
272			}
273		} else
274			page = locked_page;
275
276		head = page_buffers(page);
277		bh = head;
278		pos = i & mask;
279		for (j = 0; j < pos; ++j)
280			bh = bh->b_this_page;
281
282
283		if (unlikely(index == last_index))
284			lblock = end & mask;
285		else
286			lblock = blks_per_page;
287
288		do {
289			if (j >= lblock)
290				break;
291			pos = (i - beg) + j;
292
293			if (!buffer_mapped(bh))
294					map_bh(bh, inode->i_sb, oldb + pos);
295			if (!buffer_uptodate(bh)) {
296				ll_rw_block(REQ_OP_READ, 0, 1, &bh);
297				wait_on_buffer(bh);
298				if (!buffer_uptodate(bh)) {
299					ufs_error(inode->i_sb, __func__,
300						  "read of block failed\n");
301					break;
302				}
303			}
304
305			UFSD(" change from %llu to %llu, pos %u\n",
306			     (unsigned long long)(pos + oldb),
307			     (unsigned long long)(pos + newb), pos);
308
309			bh->b_blocknr = newb + pos;
310			clean_bdev_bh_alias(bh);
 
311			mark_buffer_dirty(bh);
312			++j;
313			bh = bh->b_this_page;
314		} while (bh != head);
315
316		if (likely(cur_index != index))
317			ufs_put_locked_page(page);
318 	}
319	UFSD("EXIT\n");
320}
321
322static void ufs_clear_frags(struct inode *inode, sector_t beg, unsigned int n,
323			    int sync)
324{
325	struct buffer_head *bh;
326	sector_t end = beg + n;
327
328	for (; beg < end; ++beg) {
329		bh = sb_getblk(inode->i_sb, beg);
330		lock_buffer(bh);
331		memset(bh->b_data, 0, inode->i_sb->s_blocksize);
332		set_buffer_uptodate(bh);
333		mark_buffer_dirty(bh);
334		unlock_buffer(bh);
335		if (IS_SYNC(inode) || sync)
336			sync_dirty_buffer(bh);
337		brelse(bh);
338	}
339}
340
341u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
342			   u64 goal, unsigned count, int *err,
343			   struct page *locked_page)
344{
345	struct super_block * sb;
346	struct ufs_sb_private_info * uspi;
347	struct ufs_super_block_first * usb1;
348	unsigned cgno, oldcount, newcount;
349	u64 tmp, request, result;
350	
351	UFSD("ENTER, ino %lu, fragment %llu, goal %llu, count %u\n",
352	     inode->i_ino, (unsigned long long)fragment,
353	     (unsigned long long)goal, count);
354	
355	sb = inode->i_sb;
356	uspi = UFS_SB(sb)->s_uspi;
357	usb1 = ubh_get_usb_first(uspi);
358	*err = -ENOSPC;
359
360	mutex_lock(&UFS_SB(sb)->s_lock);
361	tmp = ufs_data_ptr_to_cpu(sb, p);
362
363	if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
364		ufs_warning(sb, "ufs_new_fragments", "internal warning"
365			    " fragment %llu, count %u",
366			    (unsigned long long)fragment, count);
367		count = uspi->s_fpb - ufs_fragnum(fragment); 
368	}
369	oldcount = ufs_fragnum (fragment);
370	newcount = oldcount + count;
371
372	/*
373	 * Somebody else has just allocated our fragments
374	 */
375	if (oldcount) {
376		if (!tmp) {
377			ufs_error(sb, "ufs_new_fragments", "internal error, "
378				  "fragment %llu, tmp %llu\n",
379				  (unsigned long long)fragment,
380				  (unsigned long long)tmp);
381			mutex_unlock(&UFS_SB(sb)->s_lock);
382			return INVBLOCK;
383		}
384		if (fragment < UFS_I(inode)->i_lastfrag) {
385			UFSD("EXIT (ALREADY ALLOCATED)\n");
386			mutex_unlock(&UFS_SB(sb)->s_lock);
387			return 0;
388		}
389	}
390	else {
391		if (tmp) {
392			UFSD("EXIT (ALREADY ALLOCATED)\n");
393			mutex_unlock(&UFS_SB(sb)->s_lock);
394			return 0;
395		}
396	}
397
398	/*
399	 * There is not enough space for user on the device
400	 */
401	if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) {
402		mutex_unlock(&UFS_SB(sb)->s_lock);
403		UFSD("EXIT (FAILED)\n");
404		return 0;
405	}
406
407	if (goal >= uspi->s_size) 
408		goal = 0;
409	if (goal == 0) 
410		cgno = ufs_inotocg (inode->i_ino);
411	else
412		cgno = ufs_dtog(uspi, goal);
413	 
414	/*
415	 * allocate new fragment
416	 */
417	if (oldcount == 0) {
418		result = ufs_alloc_fragments (inode, cgno, goal, count, err);
419		if (result) {
420			ufs_clear_frags(inode, result + oldcount,
421					newcount - oldcount, locked_page != NULL);
422			write_seqlock(&UFS_I(inode)->meta_lock);
423			ufs_cpu_to_data_ptr(sb, p, result);
424			write_sequnlock(&UFS_I(inode)->meta_lock);
425			*err = 0;
426			UFS_I(inode)->i_lastfrag =
427				max(UFS_I(inode)->i_lastfrag, fragment + count);
 
 
428		}
429		mutex_unlock(&UFS_SB(sb)->s_lock);
430		UFSD("EXIT, result %llu\n", (unsigned long long)result);
431		return result;
432	}
433
434	/*
435	 * resize block
436	 */
437	result = ufs_add_fragments(inode, tmp, oldcount, newcount);
438	if (result) {
439		*err = 0;
440		UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
441						fragment + count);
442		ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
443				locked_page != NULL);
444		mutex_unlock(&UFS_SB(sb)->s_lock);
445		UFSD("EXIT, result %llu\n", (unsigned long long)result);
446		return result;
447	}
448
449	/*
450	 * allocate new block and move data
451	 */
452	switch (fs32_to_cpu(sb, usb1->fs_optim)) {
453	    case UFS_OPTSPACE:
454		request = newcount;
455		if (uspi->s_minfree < 5 || uspi->cs_total.cs_nffree
456		    > uspi->s_dsize * uspi->s_minfree / (2 * 100))
457			break;
458		usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
459		break;
460	    default:
461		usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
462	
463	    case UFS_OPTTIME:
464		request = uspi->s_fpb;
465		if (uspi->cs_total.cs_nffree < uspi->s_dsize *
466		    (uspi->s_minfree - 2) / 100)
467			break;
468		usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
469		break;
470	}
471	result = ufs_alloc_fragments (inode, cgno, goal, request, err);
472	if (result) {
473		ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
474				locked_page != NULL);
475		ufs_change_blocknr(inode, fragment - oldcount, oldcount,
476				   uspi->s_sbbase + tmp,
477				   uspi->s_sbbase + result, locked_page);
478		write_seqlock(&UFS_I(inode)->meta_lock);
479		ufs_cpu_to_data_ptr(sb, p, result);
480		write_sequnlock(&UFS_I(inode)->meta_lock);
481		*err = 0;
482		UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
483						fragment + count);
484		mutex_unlock(&UFS_SB(sb)->s_lock);
485		if (newcount < request)
486			ufs_free_fragments (inode, result + newcount, request - newcount);
487		ufs_free_fragments (inode, tmp, oldcount);
488		UFSD("EXIT, result %llu\n", (unsigned long long)result);
489		return result;
490	}
491
492	mutex_unlock(&UFS_SB(sb)->s_lock);
493	UFSD("EXIT (FAILED)\n");
494	return 0;
495}		
496
497static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
498			     unsigned oldcount, unsigned newcount)
499{
500	struct super_block * sb;
501	struct ufs_sb_private_info * uspi;
502	struct ufs_cg_private_info * ucpi;
503	struct ufs_cylinder_group * ucg;
504	unsigned cgno, fragno, fragoff, count, fragsize, i;
505	
506	UFSD("ENTER, fragment %llu, oldcount %u, newcount %u\n",
507	     (unsigned long long)fragment, oldcount, newcount);
508	
509	sb = inode->i_sb;
510	uspi = UFS_SB(sb)->s_uspi;
511	count = newcount - oldcount;
512	
513	cgno = ufs_dtog(uspi, fragment);
514	if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
515		return 0;
516	if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
517		return 0;
518	ucpi = ufs_load_cylinder (sb, cgno);
519	if (!ucpi)
520		return 0;
521	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
522	if (!ufs_cg_chkmagic(sb, ucg)) {
523		ufs_panic (sb, "ufs_add_fragments",
524			"internal error, bad magic number on cg %u", cgno);
525		return 0;
526	}
527
528	fragno = ufs_dtogd(uspi, fragment);
529	fragoff = ufs_fragnum (fragno);
530	for (i = oldcount; i < newcount; i++)
531		if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
532			return 0;
533	/*
534	 * Block can be extended
535	 */
536	ucg->cg_time = cpu_to_fs32(sb, get_seconds());
537	for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
538		if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
539			break;
540	fragsize = i - oldcount;
541	if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
542		ufs_panic (sb, "ufs_add_fragments",
543			"internal error or corrupted bitmap on cg %u", cgno);
544	fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
545	if (fragsize != count)
546		fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
547	for (i = oldcount; i < newcount; i++)
548		ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
549
550	fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
551	fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
552	uspi->cs_total.cs_nffree -= count;
553	
554	ubh_mark_buffer_dirty (USPI_UBH(uspi));
555	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
556	if (sb->s_flags & MS_SYNCHRONOUS)
557		ubh_sync_block(UCPI_UBH(ucpi));
558	ufs_mark_sb_dirty(sb);
559
560	UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment);
561	
562	return fragment;
563}
564
565#define UFS_TEST_FREE_SPACE_CG \
566	ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
567	if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
568		goto cg_found; \
569	for (k = count; k < uspi->s_fpb; k++) \
570		if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
571			goto cg_found; 
572
573static u64 ufs_alloc_fragments(struct inode *inode, unsigned cgno,
574			       u64 goal, unsigned count, int *err)
575{
576	struct super_block * sb;
577	struct ufs_sb_private_info * uspi;
578	struct ufs_cg_private_info * ucpi;
579	struct ufs_cylinder_group * ucg;
580	unsigned oldcg, i, j, k, allocsize;
581	u64 result;
582	
583	UFSD("ENTER, ino %lu, cgno %u, goal %llu, count %u\n",
584	     inode->i_ino, cgno, (unsigned long long)goal, count);
585
586	sb = inode->i_sb;
587	uspi = UFS_SB(sb)->s_uspi;
588	oldcg = cgno;
589	
590	/*
591	 * 1. searching on preferred cylinder group
592	 */
593	UFS_TEST_FREE_SPACE_CG
594
595	/*
596	 * 2. quadratic rehash
597	 */
598	for (j = 1; j < uspi->s_ncg; j *= 2) {
599		cgno += j;
600		if (cgno >= uspi->s_ncg) 
601			cgno -= uspi->s_ncg;
602		UFS_TEST_FREE_SPACE_CG
603	}
604
605	/*
606	 * 3. brute force search
607	 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
608	 */
609	cgno = (oldcg + 1) % uspi->s_ncg;
610	for (j = 2; j < uspi->s_ncg; j++) {
611		cgno++;
612		if (cgno >= uspi->s_ncg)
613			cgno = 0;
614		UFS_TEST_FREE_SPACE_CG
615	}
616	
617	UFSD("EXIT (FAILED)\n");
618	return 0;
619
620cg_found:
621	ucpi = ufs_load_cylinder (sb, cgno);
622	if (!ucpi)
623		return 0;
624	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
625	if (!ufs_cg_chkmagic(sb, ucg)) 
626		ufs_panic (sb, "ufs_alloc_fragments",
627			"internal error, bad magic number on cg %u", cgno);
628	ucg->cg_time = cpu_to_fs32(sb, get_seconds());
629
630	if (count == uspi->s_fpb) {
631		result = ufs_alloccg_block (inode, ucpi, goal, err);
632		if (result == INVBLOCK)
633			return 0;
634		goto succed;
635	}
636
637	for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
638		if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
639			break;
640	
641	if (allocsize == uspi->s_fpb) {
642		result = ufs_alloccg_block (inode, ucpi, goal, err);
643		if (result == INVBLOCK)
644			return 0;
645		goal = ufs_dtogd(uspi, result);
646		for (i = count; i < uspi->s_fpb; i++)
647			ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
648		i = uspi->s_fpb - count;
649
650		fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
651		uspi->cs_total.cs_nffree += i;
652		fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
653		fs32_add(sb, &ucg->cg_frsum[i], 1);
654		goto succed;
655	}
656
657	result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
658	if (result == INVBLOCK)
659		return 0;
660	for (i = 0; i < count; i++)
661		ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
662	
663	fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
664	uspi->cs_total.cs_nffree -= count;
665	fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
666	fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
667
668	if (count != allocsize)
669		fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
670
671succed:
672	ubh_mark_buffer_dirty (USPI_UBH(uspi));
673	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
674	if (sb->s_flags & MS_SYNCHRONOUS)
675		ubh_sync_block(UCPI_UBH(ucpi));
676	ufs_mark_sb_dirty(sb);
677
678	result += cgno * uspi->s_fpg;
679	UFSD("EXIT3, result %llu\n", (unsigned long long)result);
680	return result;
681}
682
683static u64 ufs_alloccg_block(struct inode *inode,
684			     struct ufs_cg_private_info *ucpi,
685			     u64 goal, int *err)
686{
687	struct super_block * sb;
688	struct ufs_sb_private_info * uspi;
689	struct ufs_cylinder_group * ucg;
690	u64 result, blkno;
691
692	UFSD("ENTER, goal %llu\n", (unsigned long long)goal);
693
694	sb = inode->i_sb;
695	uspi = UFS_SB(sb)->s_uspi;
696	ucg = ubh_get_ucg(UCPI_UBH(ucpi));
697
698	if (goal == 0) {
699		goal = ucpi->c_rotor;
700		goto norot;
701	}
702	goal = ufs_blknum (goal);
703	goal = ufs_dtogd(uspi, goal);
704	
705	/*
706	 * If the requested block is available, use it.
707	 */
708	if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, ufs_fragstoblks(goal))) {
709		result = goal;
710		goto gotit;
711	}
712	
713norot:	
714	result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
715	if (result == INVBLOCK)
716		return INVBLOCK;
717	ucpi->c_rotor = result;
718gotit:
719	blkno = ufs_fragstoblks(result);
720	ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
721	if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
722		ufs_clusteracct (sb, ucpi, blkno, -1);
723
724	fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
725	uspi->cs_total.cs_nbfree--;
726	fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
727
728	if (uspi->fs_magic != UFS2_MAGIC) {
729		unsigned cylno = ufs_cbtocylno((unsigned)result);
730
731		fs16_sub(sb, &ubh_cg_blks(ucpi, cylno,
732					  ufs_cbtorpos((unsigned)result)), 1);
733		fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
734	}
735	
736	UFSD("EXIT, result %llu\n", (unsigned long long)result);
737
738	return result;
739}
740
741static unsigned ubh_scanc(struct ufs_sb_private_info *uspi,
742			  struct ufs_buffer_head *ubh,
743			  unsigned begin, unsigned size,
744			  unsigned char *table, unsigned char mask)
745{
746	unsigned rest, offset;
747	unsigned char *cp;
748	
749
750	offset = begin & ~uspi->s_fmask;
751	begin >>= uspi->s_fshift;
752	for (;;) {
753		if ((offset + size) < uspi->s_fsize)
754			rest = size;
755		else
756			rest = uspi->s_fsize - offset;
757		size -= rest;
758		cp = ubh->bh[begin]->b_data + offset;
759		while ((table[*cp++] & mask) == 0 && --rest)
760			;
761		if (rest || !size)
762			break;
763		begin++;
764		offset = 0;
765	}
766	return (size + rest);
767}
768
769/*
770 * Find a block of the specified size in the specified cylinder group.
771 * @sp: pointer to super block
772 * @ucpi: pointer to cylinder group info
773 * @goal: near which block we want find new one
774 * @count: specified size
775 */
776static u64 ufs_bitmap_search(struct super_block *sb,
777			     struct ufs_cg_private_info *ucpi,
778			     u64 goal, unsigned count)
779{
780	/*
781	 * Bit patterns for identifying fragments in the block map
782	 * used as ((map & mask_arr) == want_arr)
783	 */
784	static const int mask_arr[9] = {
785		0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
786	};
787	static const int want_arr[9] = {
788		0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
789	};
790	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
 
791	unsigned start, length, loc;
792	unsigned pos, want, blockmap, mask, end;
793	u64 result;
794
795	UFSD("ENTER, cg %u, goal %llu, count %u\n", ucpi->c_cgx,
796	     (unsigned long long)goal, count);
 
 
797
798	if (goal)
799		start = ufs_dtogd(uspi, goal) >> 3;
800	else
801		start = ucpi->c_frotor >> 3;
802		
803	length = ((uspi->s_fpg + 7) >> 3) - start;
804	loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff + start, length,
805		(uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
806		1 << (count - 1 + (uspi->s_fpb & 7))); 
807	if (loc == 0) {
808		length = start + 1;
809		loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff, length,
810				(uspi->s_fpb == 8) ? ufs_fragtable_8fpb :
811				ufs_fragtable_other,
812				1 << (count - 1 + (uspi->s_fpb & 7)));
813		if (loc == 0) {
814			ufs_error(sb, "ufs_bitmap_search",
815				  "bitmap corrupted on cg %u, start %u,"
816				  " length %u, count %u, freeoff %u\n",
817				  ucpi->c_cgx, start, length, count,
818				  ucpi->c_freeoff);
819			return INVBLOCK;
820		}
821		start = 0;
822	}
823	result = (start + length - loc) << 3;
824	ucpi->c_frotor = result;
825
826	/*
827	 * found the byte in the map
828	 */
829
830	for (end = result + 8; result < end; result += uspi->s_fpb) {
831		blockmap = ubh_blkmap(UCPI_UBH(ucpi), ucpi->c_freeoff, result);
832		blockmap <<= 1;
833		mask = mask_arr[count];
834		want = want_arr[count];
835		for (pos = 0; pos <= uspi->s_fpb - count; pos++) {
836			if ((blockmap & mask) == want) {
837				UFSD("EXIT, result %llu\n",
838				     (unsigned long long)result);
839				return result + pos;
840 			}
841			mask <<= 1;
842			want <<= 1;
843 		}
844 	}
845
846	ufs_error(sb, "ufs_bitmap_search", "block not in map on cg %u\n",
847		  ucpi->c_cgx);
848	UFSD("EXIT (FAILED)\n");
849	return INVBLOCK;
850}
851
852static void ufs_clusteracct(struct super_block * sb,
853	struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
854{
855	struct ufs_sb_private_info * uspi;
856	int i, start, end, forw, back;
857	
858	uspi = UFS_SB(sb)->s_uspi;
859	if (uspi->s_contigsumsize <= 0)
860		return;
861
862	if (cnt > 0)
863		ubh_setbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
864	else
865		ubh_clrbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
866
867	/*
868	 * Find the size of the cluster going forward.
869	 */
870	start = blkno + 1;
871	end = start + uspi->s_contigsumsize;
872	if ( end >= ucpi->c_nclusterblks)
873		end = ucpi->c_nclusterblks;
874	i = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, end, start);
875	if (i > end)
876		i = end;
877	forw = i - start;
878	
879	/*
880	 * Find the size of the cluster going backward.
881	 */
882	start = blkno - 1;
883	end = start - uspi->s_contigsumsize;
884	if (end < 0 ) 
885		end = -1;
886	i = ubh_find_last_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, start, end);
887	if ( i < end) 
888		i = end;
889	back = start - i;
890	
891	/*
892	 * Account for old cluster and the possibly new forward and
893	 * back clusters.
894	 */
895	i = back + forw + 1;
896	if (i > uspi->s_contigsumsize)
897		i = uspi->s_contigsumsize;
898	fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (i << 2)), cnt);
899	if (back > 0)
900		fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (back << 2)), cnt);
901	if (forw > 0)
902		fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (forw << 2)), cnt);
903}
904
905
906static unsigned char ufs_fragtable_8fpb[] = {
907	0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
908	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
909	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
910	0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
911	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,	
912	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
913	0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
914	0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
915	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
916	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
917	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
918	0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
919	0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
920	0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
921	0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
922	0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
923};
924
925static unsigned char ufs_fragtable_other[] = {
926	0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
927	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
928	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
929	0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
930	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
931	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
932	0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
933	0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
934	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
935	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
936	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
937	0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
938	0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
939	0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E,	0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
940	0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
941	0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,
942};
v3.15
  1/*
  2 *  linux/fs/ufs/balloc.c
  3 *
  4 * Copyright (C) 1998
  5 * Daniel Pirkl <daniel.pirkl@email.cz>
  6 * Charles University, Faculty of Mathematics and Physics
  7 *
  8 * UFS2 write support Evgeniy Dushistov <dushistov@mail.ru>, 2007
  9 */
 10
 11#include <linux/fs.h>
 12#include <linux/stat.h>
 13#include <linux/time.h>
 14#include <linux/string.h>
 15#include <linux/buffer_head.h>
 16#include <linux/capability.h>
 17#include <linux/bitops.h>
 
 18#include <asm/byteorder.h>
 19
 20#include "ufs_fs.h"
 21#include "ufs.h"
 22#include "swab.h"
 23#include "util.h"
 24
 25#define INVBLOCK ((u64)-1L)
 26
 27static u64 ufs_add_fragments(struct inode *, u64, unsigned, unsigned, int *);
 28static u64 ufs_alloc_fragments(struct inode *, unsigned, u64, unsigned, int *);
 29static u64 ufs_alloccg_block(struct inode *, struct ufs_cg_private_info *, u64, int *);
 30static u64 ufs_bitmap_search (struct super_block *, struct ufs_cg_private_info *, u64, unsigned);
 31static unsigned char ufs_fragtable_8fpb[], ufs_fragtable_other[];
 32static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *, unsigned, int);
 33
 34/*
 35 * Free 'count' fragments from fragment number 'fragment'
 36 */
 37void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count)
 38{
 39	struct super_block * sb;
 40	struct ufs_sb_private_info * uspi;
 41	struct ufs_cg_private_info * ucpi;
 42	struct ufs_cylinder_group * ucg;
 43	unsigned cgno, bit, end_bit, bbase, blkmap, i;
 44	u64 blkno;
 45	
 46	sb = inode->i_sb;
 47	uspi = UFS_SB(sb)->s_uspi;
 48	
 49	UFSD("ENTER, fragment %llu, count %u\n",
 50	     (unsigned long long)fragment, count);
 51	
 52	if (ufs_fragnum(fragment) + count > uspi->s_fpg)
 53		ufs_error (sb, "ufs_free_fragments", "internal error");
 54	
 55	mutex_lock(&UFS_SB(sb)->s_lock);
 56	
 57	cgno = ufs_dtog(uspi, fragment);
 58	bit = ufs_dtogd(uspi, fragment);
 59	if (cgno >= uspi->s_ncg) {
 60		ufs_panic (sb, "ufs_free_fragments", "freeing blocks are outside device");
 61		goto failed;
 62	}
 63		
 64	ucpi = ufs_load_cylinder (sb, cgno);
 65	if (!ucpi) 
 66		goto failed;
 67	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
 68	if (!ufs_cg_chkmagic(sb, ucg)) {
 69		ufs_panic (sb, "ufs_free_fragments", "internal error, bad magic number on cg %u", cgno);
 70		goto failed;
 71	}
 72
 73	end_bit = bit + count;
 74	bbase = ufs_blknum (bit);
 75	blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
 76	ufs_fragacct (sb, blkmap, ucg->cg_frsum, -1);
 77	for (i = bit; i < end_bit; i++) {
 78		if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, i))
 79			ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, i);
 80		else 
 81			ufs_error (sb, "ufs_free_fragments",
 82				   "bit already cleared for fragment %u", i);
 83	}
 84	
 85	fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
 86	uspi->cs_total.cs_nffree += count;
 87	fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
 88	blkmap = ubh_blkmap (UCPI_UBH(ucpi), ucpi->c_freeoff, bbase);
 89	ufs_fragacct(sb, blkmap, ucg->cg_frsum, 1);
 90
 91	/*
 92	 * Trying to reassemble free fragments into block
 93	 */
 94	blkno = ufs_fragstoblks (bbase);
 95	if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
 96		fs32_sub(sb, &ucg->cg_cs.cs_nffree, uspi->s_fpb);
 97		uspi->cs_total.cs_nffree -= uspi->s_fpb;
 98		fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, uspi->s_fpb);
 99		if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
100			ufs_clusteracct (sb, ucpi, blkno, 1);
101		fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
102		uspi->cs_total.cs_nbfree++;
103		fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
104		if (uspi->fs_magic != UFS2_MAGIC) {
105			unsigned cylno = ufs_cbtocylno (bbase);
106
107			fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
108						  ufs_cbtorpos(bbase)), 1);
109			fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
110		}
111	}
112	
113	ubh_mark_buffer_dirty (USPI_UBH(uspi));
114	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
115	if (sb->s_flags & MS_SYNCHRONOUS)
116		ubh_sync_block(UCPI_UBH(ucpi));
117	ufs_mark_sb_dirty(sb);
118	
119	mutex_unlock(&UFS_SB(sb)->s_lock);
120	UFSD("EXIT\n");
121	return;
122
123failed:
124	mutex_unlock(&UFS_SB(sb)->s_lock);
125	UFSD("EXIT (FAILED)\n");
126	return;
127}
128
129/*
130 * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
131 */
132void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count)
133{
134	struct super_block * sb;
135	struct ufs_sb_private_info * uspi;
136	struct ufs_cg_private_info * ucpi;
137	struct ufs_cylinder_group * ucg;
138	unsigned overflow, cgno, bit, end_bit, i;
139	u64 blkno;
140	
141	sb = inode->i_sb;
142	uspi = UFS_SB(sb)->s_uspi;
143
144	UFSD("ENTER, fragment %llu, count %u\n",
145	     (unsigned long long)fragment, count);
146	
147	if ((fragment & uspi->s_fpbmask) || (count & uspi->s_fpbmask)) {
148		ufs_error (sb, "ufs_free_blocks", "internal error, "
149			   "fragment %llu, count %u\n",
150			   (unsigned long long)fragment, count);
151		goto failed;
152	}
153
154	mutex_lock(&UFS_SB(sb)->s_lock);
155	
156do_more:
157	overflow = 0;
158	cgno = ufs_dtog(uspi, fragment);
159	bit = ufs_dtogd(uspi, fragment);
160	if (cgno >= uspi->s_ncg) {
161		ufs_panic (sb, "ufs_free_blocks", "freeing blocks are outside device");
162		goto failed_unlock;
163	}
164	end_bit = bit + count;
165	if (end_bit > uspi->s_fpg) {
166		overflow = bit + count - uspi->s_fpg;
167		count -= overflow;
168		end_bit -= overflow;
169	}
170
171	ucpi = ufs_load_cylinder (sb, cgno);
172	if (!ucpi) 
173		goto failed_unlock;
174	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
175	if (!ufs_cg_chkmagic(sb, ucg)) {
176		ufs_panic (sb, "ufs_free_blocks", "internal error, bad magic number on cg %u", cgno);
177		goto failed_unlock;
178	}
179
180	for (i = bit; i < end_bit; i += uspi->s_fpb) {
181		blkno = ufs_fragstoblks(i);
182		if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno)) {
183			ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
184		}
185		ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
186		if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
187			ufs_clusteracct (sb, ucpi, blkno, 1);
188
189		fs32_add(sb, &ucg->cg_cs.cs_nbfree, 1);
190		uspi->cs_total.cs_nbfree++;
191		fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nbfree, 1);
192
193		if (uspi->fs_magic != UFS2_MAGIC) {
194			unsigned cylno = ufs_cbtocylno(i);
195
196			fs16_add(sb, &ubh_cg_blks(ucpi, cylno,
197						  ufs_cbtorpos(i)), 1);
198			fs32_add(sb, &ubh_cg_blktot(ucpi, cylno), 1);
199		}
200	}
201
202	ubh_mark_buffer_dirty (USPI_UBH(uspi));
203	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
204	if (sb->s_flags & MS_SYNCHRONOUS)
205		ubh_sync_block(UCPI_UBH(ucpi));
206
207	if (overflow) {
208		fragment += count;
209		count = overflow;
210		goto do_more;
211	}
212
213	ufs_mark_sb_dirty(sb);
214	mutex_unlock(&UFS_SB(sb)->s_lock);
215	UFSD("EXIT\n");
216	return;
217
218failed_unlock:
219	mutex_unlock(&UFS_SB(sb)->s_lock);
220failed:
221	UFSD("EXIT (FAILED)\n");
222	return;
223}
224
225/*
226 * Modify inode page cache in such way:
227 * have - blocks with b_blocknr equal to oldb...oldb+count-1
228 * get - blocks with b_blocknr equal to newb...newb+count-1
229 * also we suppose that oldb...oldb+count-1 blocks
230 * situated at the end of file.
231 *
232 * We can come here from ufs_writepage or ufs_prepare_write,
233 * locked_page is argument of these functions, so we already lock it.
234 */
235static void ufs_change_blocknr(struct inode *inode, sector_t beg,
236			       unsigned int count, sector_t oldb,
237			       sector_t newb, struct page *locked_page)
238{
239	const unsigned blks_per_page =
240		1 << (PAGE_CACHE_SHIFT - inode->i_blkbits);
241	const unsigned mask = blks_per_page - 1;
242	struct address_space * const mapping = inode->i_mapping;
243	pgoff_t index, cur_index, last_index;
244	unsigned pos, j, lblock;
245	sector_t end, i;
246	struct page *page;
247	struct buffer_head *head, *bh;
248
249	UFSD("ENTER, ino %lu, count %u, oldb %llu, newb %llu\n",
250	      inode->i_ino, count,
251	     (unsigned long long)oldb, (unsigned long long)newb);
252
253	BUG_ON(!locked_page);
254	BUG_ON(!PageLocked(locked_page));
255
256	cur_index = locked_page->index;
257	end = count + beg;
258	last_index = end >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
259	for (i = beg; i < end; i = (i | mask) + 1) {
260		index = i >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
261
262		if (likely(cur_index != index)) {
263			page = ufs_get_locked_page(mapping, index);
264			if (!page)/* it was truncated */
265				continue;
266			if (IS_ERR(page)) {/* or EIO */
267				ufs_error(inode->i_sb, __func__,
268					  "read of page %llu failed\n",
269					  (unsigned long long)index);
270				continue;
271			}
272		} else
273			page = locked_page;
274
275		head = page_buffers(page);
276		bh = head;
277		pos = i & mask;
278		for (j = 0; j < pos; ++j)
279			bh = bh->b_this_page;
280
281
282		if (unlikely(index == last_index))
283			lblock = end & mask;
284		else
285			lblock = blks_per_page;
286
287		do {
288			if (j >= lblock)
289				break;
290			pos = (i - beg) + j;
291
292			if (!buffer_mapped(bh))
293					map_bh(bh, inode->i_sb, oldb + pos);
294			if (!buffer_uptodate(bh)) {
295				ll_rw_block(READ, 1, &bh);
296				wait_on_buffer(bh);
297				if (!buffer_uptodate(bh)) {
298					ufs_error(inode->i_sb, __func__,
299						  "read of block failed\n");
300					break;
301				}
302			}
303
304			UFSD(" change from %llu to %llu, pos %u\n",
305			     (unsigned long long)(pos + oldb),
306			     (unsigned long long)(pos + newb), pos);
307
308			bh->b_blocknr = newb + pos;
309			unmap_underlying_metadata(bh->b_bdev,
310						  bh->b_blocknr);
311			mark_buffer_dirty(bh);
312			++j;
313			bh = bh->b_this_page;
314		} while (bh != head);
315
316		if (likely(cur_index != index))
317			ufs_put_locked_page(page);
318 	}
319	UFSD("EXIT\n");
320}
321
322static void ufs_clear_frags(struct inode *inode, sector_t beg, unsigned int n,
323			    int sync)
324{
325	struct buffer_head *bh;
326	sector_t end = beg + n;
327
328	for (; beg < end; ++beg) {
329		bh = sb_getblk(inode->i_sb, beg);
330		lock_buffer(bh);
331		memset(bh->b_data, 0, inode->i_sb->s_blocksize);
332		set_buffer_uptodate(bh);
333		mark_buffer_dirty(bh);
334		unlock_buffer(bh);
335		if (IS_SYNC(inode) || sync)
336			sync_dirty_buffer(bh);
337		brelse(bh);
338	}
339}
340
341u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
342			   u64 goal, unsigned count, int *err,
343			   struct page *locked_page)
344{
345	struct super_block * sb;
346	struct ufs_sb_private_info * uspi;
347	struct ufs_super_block_first * usb1;
348	unsigned cgno, oldcount, newcount;
349	u64 tmp, request, result;
350	
351	UFSD("ENTER, ino %lu, fragment %llu, goal %llu, count %u\n",
352	     inode->i_ino, (unsigned long long)fragment,
353	     (unsigned long long)goal, count);
354	
355	sb = inode->i_sb;
356	uspi = UFS_SB(sb)->s_uspi;
357	usb1 = ubh_get_usb_first(uspi);
358	*err = -ENOSPC;
359
360	mutex_lock(&UFS_SB(sb)->s_lock);
361	tmp = ufs_data_ptr_to_cpu(sb, p);
362
363	if (count + ufs_fragnum(fragment) > uspi->s_fpb) {
364		ufs_warning(sb, "ufs_new_fragments", "internal warning"
365			    " fragment %llu, count %u",
366			    (unsigned long long)fragment, count);
367		count = uspi->s_fpb - ufs_fragnum(fragment); 
368	}
369	oldcount = ufs_fragnum (fragment);
370	newcount = oldcount + count;
371
372	/*
373	 * Somebody else has just allocated our fragments
374	 */
375	if (oldcount) {
376		if (!tmp) {
377			ufs_error(sb, "ufs_new_fragments", "internal error, "
378				  "fragment %llu, tmp %llu\n",
379				  (unsigned long long)fragment,
380				  (unsigned long long)tmp);
381			mutex_unlock(&UFS_SB(sb)->s_lock);
382			return INVBLOCK;
383		}
384		if (fragment < UFS_I(inode)->i_lastfrag) {
385			UFSD("EXIT (ALREADY ALLOCATED)\n");
386			mutex_unlock(&UFS_SB(sb)->s_lock);
387			return 0;
388		}
389	}
390	else {
391		if (tmp) {
392			UFSD("EXIT (ALREADY ALLOCATED)\n");
393			mutex_unlock(&UFS_SB(sb)->s_lock);
394			return 0;
395		}
396	}
397
398	/*
399	 * There is not enough space for user on the device
400	 */
401	if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) {
402		mutex_unlock(&UFS_SB(sb)->s_lock);
403		UFSD("EXIT (FAILED)\n");
404		return 0;
405	}
406
407	if (goal >= uspi->s_size) 
408		goal = 0;
409	if (goal == 0) 
410		cgno = ufs_inotocg (inode->i_ino);
411	else
412		cgno = ufs_dtog(uspi, goal);
413	 
414	/*
415	 * allocate new fragment
416	 */
417	if (oldcount == 0) {
418		result = ufs_alloc_fragments (inode, cgno, goal, count, err);
419		if (result) {
 
 
 
420			ufs_cpu_to_data_ptr(sb, p, result);
 
421			*err = 0;
422			UFS_I(inode)->i_lastfrag =
423				max(UFS_I(inode)->i_lastfrag, fragment + count);
424			ufs_clear_frags(inode, result + oldcount,
425					newcount - oldcount, locked_page != NULL);
426		}
427		mutex_unlock(&UFS_SB(sb)->s_lock);
428		UFSD("EXIT, result %llu\n", (unsigned long long)result);
429		return result;
430	}
431
432	/*
433	 * resize block
434	 */
435	result = ufs_add_fragments (inode, tmp, oldcount, newcount, err);
436	if (result) {
437		*err = 0;
438		UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
439						fragment + count);
440		ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
441				locked_page != NULL);
442		mutex_unlock(&UFS_SB(sb)->s_lock);
443		UFSD("EXIT, result %llu\n", (unsigned long long)result);
444		return result;
445	}
446
447	/*
448	 * allocate new block and move data
449	 */
450	switch (fs32_to_cpu(sb, usb1->fs_optim)) {
451	    case UFS_OPTSPACE:
452		request = newcount;
453		if (uspi->s_minfree < 5 || uspi->cs_total.cs_nffree
454		    > uspi->s_dsize * uspi->s_minfree / (2 * 100))
455			break;
456		usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
457		break;
458	    default:
459		usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
460	
461	    case UFS_OPTTIME:
462		request = uspi->s_fpb;
463		if (uspi->cs_total.cs_nffree < uspi->s_dsize *
464		    (uspi->s_minfree - 2) / 100)
465			break;
466		usb1->fs_optim = cpu_to_fs32(sb, UFS_OPTTIME);
467		break;
468	}
469	result = ufs_alloc_fragments (inode, cgno, goal, request, err);
470	if (result) {
471		ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
472				locked_page != NULL);
473		ufs_change_blocknr(inode, fragment - oldcount, oldcount,
474				   uspi->s_sbbase + tmp,
475				   uspi->s_sbbase + result, locked_page);
 
476		ufs_cpu_to_data_ptr(sb, p, result);
 
477		*err = 0;
478		UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
479						fragment + count);
480		mutex_unlock(&UFS_SB(sb)->s_lock);
481		if (newcount < request)
482			ufs_free_fragments (inode, result + newcount, request - newcount);
483		ufs_free_fragments (inode, tmp, oldcount);
484		UFSD("EXIT, result %llu\n", (unsigned long long)result);
485		return result;
486	}
487
488	mutex_unlock(&UFS_SB(sb)->s_lock);
489	UFSD("EXIT (FAILED)\n");
490	return 0;
491}		
492
493static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
494			     unsigned oldcount, unsigned newcount, int *err)
495{
496	struct super_block * sb;
497	struct ufs_sb_private_info * uspi;
498	struct ufs_cg_private_info * ucpi;
499	struct ufs_cylinder_group * ucg;
500	unsigned cgno, fragno, fragoff, count, fragsize, i;
501	
502	UFSD("ENTER, fragment %llu, oldcount %u, newcount %u\n",
503	     (unsigned long long)fragment, oldcount, newcount);
504	
505	sb = inode->i_sb;
506	uspi = UFS_SB(sb)->s_uspi;
507	count = newcount - oldcount;
508	
509	cgno = ufs_dtog(uspi, fragment);
510	if (fs32_to_cpu(sb, UFS_SB(sb)->fs_cs(cgno).cs_nffree) < count)
511		return 0;
512	if ((ufs_fragnum (fragment) + newcount) > uspi->s_fpb)
513		return 0;
514	ucpi = ufs_load_cylinder (sb, cgno);
515	if (!ucpi)
516		return 0;
517	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
518	if (!ufs_cg_chkmagic(sb, ucg)) {
519		ufs_panic (sb, "ufs_add_fragments",
520			"internal error, bad magic number on cg %u", cgno);
521		return 0;
522	}
523
524	fragno = ufs_dtogd(uspi, fragment);
525	fragoff = ufs_fragnum (fragno);
526	for (i = oldcount; i < newcount; i++)
527		if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
528			return 0;
529	/*
530	 * Block can be extended
531	 */
532	ucg->cg_time = cpu_to_fs32(sb, get_seconds());
533	for (i = newcount; i < (uspi->s_fpb - fragoff); i++)
534		if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
535			break;
536	fragsize = i - oldcount;
537	if (!fs32_to_cpu(sb, ucg->cg_frsum[fragsize]))
538		ufs_panic (sb, "ufs_add_fragments",
539			"internal error or corrupted bitmap on cg %u", cgno);
540	fs32_sub(sb, &ucg->cg_frsum[fragsize], 1);
541	if (fragsize != count)
542		fs32_add(sb, &ucg->cg_frsum[fragsize - count], 1);
543	for (i = oldcount; i < newcount; i++)
544		ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i);
545
546	fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
547	fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
548	uspi->cs_total.cs_nffree -= count;
549	
550	ubh_mark_buffer_dirty (USPI_UBH(uspi));
551	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
552	if (sb->s_flags & MS_SYNCHRONOUS)
553		ubh_sync_block(UCPI_UBH(ucpi));
554	ufs_mark_sb_dirty(sb);
555
556	UFSD("EXIT, fragment %llu\n", (unsigned long long)fragment);
557	
558	return fragment;
559}
560
561#define UFS_TEST_FREE_SPACE_CG \
562	ucg = (struct ufs_cylinder_group *) UFS_SB(sb)->s_ucg[cgno]->b_data; \
563	if (fs32_to_cpu(sb, ucg->cg_cs.cs_nbfree)) \
564		goto cg_found; \
565	for (k = count; k < uspi->s_fpb; k++) \
566		if (fs32_to_cpu(sb, ucg->cg_frsum[k])) \
567			goto cg_found; 
568
569static u64 ufs_alloc_fragments(struct inode *inode, unsigned cgno,
570			       u64 goal, unsigned count, int *err)
571{
572	struct super_block * sb;
573	struct ufs_sb_private_info * uspi;
574	struct ufs_cg_private_info * ucpi;
575	struct ufs_cylinder_group * ucg;
576	unsigned oldcg, i, j, k, allocsize;
577	u64 result;
578	
579	UFSD("ENTER, ino %lu, cgno %u, goal %llu, count %u\n",
580	     inode->i_ino, cgno, (unsigned long long)goal, count);
581
582	sb = inode->i_sb;
583	uspi = UFS_SB(sb)->s_uspi;
584	oldcg = cgno;
585	
586	/*
587	 * 1. searching on preferred cylinder group
588	 */
589	UFS_TEST_FREE_SPACE_CG
590
591	/*
592	 * 2. quadratic rehash
593	 */
594	for (j = 1; j < uspi->s_ncg; j *= 2) {
595		cgno += j;
596		if (cgno >= uspi->s_ncg) 
597			cgno -= uspi->s_ncg;
598		UFS_TEST_FREE_SPACE_CG
599	}
600
601	/*
602	 * 3. brute force search
603	 * We start at i = 2 ( 0 is checked at 1.step, 1 at 2.step )
604	 */
605	cgno = (oldcg + 1) % uspi->s_ncg;
606	for (j = 2; j < uspi->s_ncg; j++) {
607		cgno++;
608		if (cgno >= uspi->s_ncg)
609			cgno = 0;
610		UFS_TEST_FREE_SPACE_CG
611	}
612	
613	UFSD("EXIT (FAILED)\n");
614	return 0;
615
616cg_found:
617	ucpi = ufs_load_cylinder (sb, cgno);
618	if (!ucpi)
619		return 0;
620	ucg = ubh_get_ucg (UCPI_UBH(ucpi));
621	if (!ufs_cg_chkmagic(sb, ucg)) 
622		ufs_panic (sb, "ufs_alloc_fragments",
623			"internal error, bad magic number on cg %u", cgno);
624	ucg->cg_time = cpu_to_fs32(sb, get_seconds());
625
626	if (count == uspi->s_fpb) {
627		result = ufs_alloccg_block (inode, ucpi, goal, err);
628		if (result == INVBLOCK)
629			return 0;
630		goto succed;
631	}
632
633	for (allocsize = count; allocsize < uspi->s_fpb; allocsize++)
634		if (fs32_to_cpu(sb, ucg->cg_frsum[allocsize]) != 0)
635			break;
636	
637	if (allocsize == uspi->s_fpb) {
638		result = ufs_alloccg_block (inode, ucpi, goal, err);
639		if (result == INVBLOCK)
640			return 0;
641		goal = ufs_dtogd(uspi, result);
642		for (i = count; i < uspi->s_fpb; i++)
643			ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
644		i = uspi->s_fpb - count;
645
646		fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
647		uspi->cs_total.cs_nffree += i;
648		fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
649		fs32_add(sb, &ucg->cg_frsum[i], 1);
650		goto succed;
651	}
652
653	result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
654	if (result == INVBLOCK)
655		return 0;
656	for (i = 0; i < count; i++)
657		ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
658	
659	fs32_sub(sb, &ucg->cg_cs.cs_nffree, count);
660	uspi->cs_total.cs_nffree -= count;
661	fs32_sub(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
662	fs32_sub(sb, &ucg->cg_frsum[allocsize], 1);
663
664	if (count != allocsize)
665		fs32_add(sb, &ucg->cg_frsum[allocsize - count], 1);
666
667succed:
668	ubh_mark_buffer_dirty (USPI_UBH(uspi));
669	ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
670	if (sb->s_flags & MS_SYNCHRONOUS)
671		ubh_sync_block(UCPI_UBH(ucpi));
672	ufs_mark_sb_dirty(sb);
673
674	result += cgno * uspi->s_fpg;
675	UFSD("EXIT3, result %llu\n", (unsigned long long)result);
676	return result;
677}
678
679static u64 ufs_alloccg_block(struct inode *inode,
680			     struct ufs_cg_private_info *ucpi,
681			     u64 goal, int *err)
682{
683	struct super_block * sb;
684	struct ufs_sb_private_info * uspi;
685	struct ufs_cylinder_group * ucg;
686	u64 result, blkno;
687
688	UFSD("ENTER, goal %llu\n", (unsigned long long)goal);
689
690	sb = inode->i_sb;
691	uspi = UFS_SB(sb)->s_uspi;
692	ucg = ubh_get_ucg(UCPI_UBH(ucpi));
693
694	if (goal == 0) {
695		goal = ucpi->c_rotor;
696		goto norot;
697	}
698	goal = ufs_blknum (goal);
699	goal = ufs_dtogd(uspi, goal);
700	
701	/*
702	 * If the requested block is available, use it.
703	 */
704	if (ubh_isblockset(UCPI_UBH(ucpi), ucpi->c_freeoff, ufs_fragstoblks(goal))) {
705		result = goal;
706		goto gotit;
707	}
708	
709norot:	
710	result = ufs_bitmap_search (sb, ucpi, goal, uspi->s_fpb);
711	if (result == INVBLOCK)
712		return INVBLOCK;
713	ucpi->c_rotor = result;
714gotit:
715	blkno = ufs_fragstoblks(result);
716	ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
717	if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
718		ufs_clusteracct (sb, ucpi, blkno, -1);
719
720	fs32_sub(sb, &ucg->cg_cs.cs_nbfree, 1);
721	uspi->cs_total.cs_nbfree--;
722	fs32_sub(sb, &UFS_SB(sb)->fs_cs(ucpi->c_cgx).cs_nbfree, 1);
723
724	if (uspi->fs_magic != UFS2_MAGIC) {
725		unsigned cylno = ufs_cbtocylno((unsigned)result);
726
727		fs16_sub(sb, &ubh_cg_blks(ucpi, cylno,
728					  ufs_cbtorpos((unsigned)result)), 1);
729		fs32_sub(sb, &ubh_cg_blktot(ucpi, cylno), 1);
730	}
731	
732	UFSD("EXIT, result %llu\n", (unsigned long long)result);
733
734	return result;
735}
736
737static unsigned ubh_scanc(struct ufs_sb_private_info *uspi,
738			  struct ufs_buffer_head *ubh,
739			  unsigned begin, unsigned size,
740			  unsigned char *table, unsigned char mask)
741{
742	unsigned rest, offset;
743	unsigned char *cp;
744	
745
746	offset = begin & ~uspi->s_fmask;
747	begin >>= uspi->s_fshift;
748	for (;;) {
749		if ((offset + size) < uspi->s_fsize)
750			rest = size;
751		else
752			rest = uspi->s_fsize - offset;
753		size -= rest;
754		cp = ubh->bh[begin]->b_data + offset;
755		while ((table[*cp++] & mask) == 0 && --rest)
756			;
757		if (rest || !size)
758			break;
759		begin++;
760		offset = 0;
761	}
762	return (size + rest);
763}
764
765/*
766 * Find a block of the specified size in the specified cylinder group.
767 * @sp: pointer to super block
768 * @ucpi: pointer to cylinder group info
769 * @goal: near which block we want find new one
770 * @count: specified size
771 */
772static u64 ufs_bitmap_search(struct super_block *sb,
773			     struct ufs_cg_private_info *ucpi,
774			     u64 goal, unsigned count)
775{
776	/*
777	 * Bit patterns for identifying fragments in the block map
778	 * used as ((map & mask_arr) == want_arr)
779	 */
780	static const int mask_arr[9] = {
781		0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff, 0x1ff, 0x3ff
782	};
783	static const int want_arr[9] = {
784		0x0, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe
785	};
786	struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
787	struct ufs_cylinder_group *ucg;
788	unsigned start, length, loc;
789	unsigned pos, want, blockmap, mask, end;
790	u64 result;
791
792	UFSD("ENTER, cg %u, goal %llu, count %u\n", ucpi->c_cgx,
793	     (unsigned long long)goal, count);
794
795	ucg = ubh_get_ucg(UCPI_UBH(ucpi));
796
797	if (goal)
798		start = ufs_dtogd(uspi, goal) >> 3;
799	else
800		start = ucpi->c_frotor >> 3;
801		
802	length = ((uspi->s_fpg + 7) >> 3) - start;
803	loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff + start, length,
804		(uspi->s_fpb == 8) ? ufs_fragtable_8fpb : ufs_fragtable_other,
805		1 << (count - 1 + (uspi->s_fpb & 7))); 
806	if (loc == 0) {
807		length = start + 1;
808		loc = ubh_scanc(uspi, UCPI_UBH(ucpi), ucpi->c_freeoff, length,
809				(uspi->s_fpb == 8) ? ufs_fragtable_8fpb :
810				ufs_fragtable_other,
811				1 << (count - 1 + (uspi->s_fpb & 7)));
812		if (loc == 0) {
813			ufs_error(sb, "ufs_bitmap_search",
814				  "bitmap corrupted on cg %u, start %u,"
815				  " length %u, count %u, freeoff %u\n",
816				  ucpi->c_cgx, start, length, count,
817				  ucpi->c_freeoff);
818			return INVBLOCK;
819		}
820		start = 0;
821	}
822	result = (start + length - loc) << 3;
823	ucpi->c_frotor = result;
824
825	/*
826	 * found the byte in the map
827	 */
828
829	for (end = result + 8; result < end; result += uspi->s_fpb) {
830		blockmap = ubh_blkmap(UCPI_UBH(ucpi), ucpi->c_freeoff, result);
831		blockmap <<= 1;
832		mask = mask_arr[count];
833		want = want_arr[count];
834		for (pos = 0; pos <= uspi->s_fpb - count; pos++) {
835			if ((blockmap & mask) == want) {
836				UFSD("EXIT, result %llu\n",
837				     (unsigned long long)result);
838				return result + pos;
839 			}
840			mask <<= 1;
841			want <<= 1;
842 		}
843 	}
844
845	ufs_error(sb, "ufs_bitmap_search", "block not in map on cg %u\n",
846		  ucpi->c_cgx);
847	UFSD("EXIT (FAILED)\n");
848	return INVBLOCK;
849}
850
851static void ufs_clusteracct(struct super_block * sb,
852	struct ufs_cg_private_info * ucpi, unsigned blkno, int cnt)
853{
854	struct ufs_sb_private_info * uspi;
855	int i, start, end, forw, back;
856	
857	uspi = UFS_SB(sb)->s_uspi;
858	if (uspi->s_contigsumsize <= 0)
859		return;
860
861	if (cnt > 0)
862		ubh_setbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
863	else
864		ubh_clrbit(UCPI_UBH(ucpi), ucpi->c_clusteroff, blkno);
865
866	/*
867	 * Find the size of the cluster going forward.
868	 */
869	start = blkno + 1;
870	end = start + uspi->s_contigsumsize;
871	if ( end >= ucpi->c_nclusterblks)
872		end = ucpi->c_nclusterblks;
873	i = ubh_find_next_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, end, start);
874	if (i > end)
875		i = end;
876	forw = i - start;
877	
878	/*
879	 * Find the size of the cluster going backward.
880	 */
881	start = blkno - 1;
882	end = start - uspi->s_contigsumsize;
883	if (end < 0 ) 
884		end = -1;
885	i = ubh_find_last_zero_bit (UCPI_UBH(ucpi), ucpi->c_clusteroff, start, end);
886	if ( i < end) 
887		i = end;
888	back = start - i;
889	
890	/*
891	 * Account for old cluster and the possibly new forward and
892	 * back clusters.
893	 */
894	i = back + forw + 1;
895	if (i > uspi->s_contigsumsize)
896		i = uspi->s_contigsumsize;
897	fs32_add(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (i << 2)), cnt);
898	if (back > 0)
899		fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (back << 2)), cnt);
900	if (forw > 0)
901		fs32_sub(sb, (__fs32*)ubh_get_addr(UCPI_UBH(ucpi), ucpi->c_clustersumoff + (forw << 2)), cnt);
902}
903
904
905static unsigned char ufs_fragtable_8fpb[] = {
906	0x00, 0x01, 0x01, 0x02, 0x01, 0x01, 0x02, 0x04, 0x01, 0x01, 0x01, 0x03, 0x02, 0x03, 0x04, 0x08,
907	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x02, 0x03, 0x03, 0x02, 0x04, 0x05, 0x08, 0x10,
908	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
909	0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x04, 0x05, 0x05, 0x06, 0x08, 0x09, 0x10, 0x20,
910	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,	
911	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
912	0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
913	0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x08, 0x09, 0x09, 0x0A, 0x10, 0x11, 0x20, 0x40,
914	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
915	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x03, 0x03, 0x03, 0x03, 0x05, 0x05, 0x09, 0x11,
916	0x01, 0x01, 0x01, 0x03, 0x01, 0x01, 0x03, 0x05, 0x01, 0x01, 0x01, 0x03, 0x03, 0x03, 0x05, 0x09,
917	0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x05, 0x05, 0x05, 0x07, 0x09, 0x09, 0x11, 0x21,
918	0x02, 0x03, 0x03, 0x02, 0x03, 0x03, 0x02, 0x06, 0x03, 0x03, 0x03, 0x03, 0x02, 0x03, 0x06, 0x0A,
919	0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x07, 0x02, 0x03, 0x03, 0x02, 0x06, 0x07, 0x0A, 0x12,
920	0x04, 0x05, 0x05, 0x06, 0x05, 0x05, 0x06, 0x04, 0x05, 0x05, 0x05, 0x07, 0x06, 0x07, 0x04, 0x0C,
921	0x08, 0x09, 0x09, 0x0A, 0x09, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x11, 0x12, 0x20, 0x21, 0x40, 0x80,
922};
923
924static unsigned char ufs_fragtable_other[] = {
925	0x00, 0x16, 0x16, 0x2A, 0x16, 0x16, 0x26, 0x4E, 0x16, 0x16, 0x16, 0x3E, 0x2A, 0x3E, 0x4E, 0x8A,
926	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
927	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
928	0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
929	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
930	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
931	0x26, 0x36, 0x36, 0x2E, 0x36, 0x36, 0x26, 0x6E, 0x36, 0x36, 0x36, 0x3E, 0x2E, 0x3E, 0x6E, 0xAE,
932	0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
933	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
934	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
935	0x16, 0x16, 0x16, 0x3E, 0x16, 0x16, 0x36, 0x5E, 0x16, 0x16, 0x16, 0x3E, 0x3E, 0x3E, 0x5E, 0x9E,
936	0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
937	0x2A, 0x3E, 0x3E, 0x2A, 0x3E, 0x3E, 0x2E, 0x6E, 0x3E, 0x3E, 0x3E, 0x3E, 0x2A, 0x3E, 0x6E, 0xAA,
938	0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E,	0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x3E, 0x7E, 0xBE,
939	0x4E, 0x5E, 0x5E, 0x6E, 0x5E, 0x5E, 0x6E, 0x4E, 0x5E, 0x5E, 0x5E, 0x7E, 0x6E, 0x7E, 0x4E, 0xCE,
940	0x8A, 0x9E, 0x9E, 0xAA, 0x9E, 0x9E, 0xAE, 0xCE, 0x9E, 0x9E, 0x9E, 0xBE, 0xAA, 0xBE, 0xCE, 0x8A,
941};