Linux Audio

Check our new training course

Loading...
v3.15
   1/*
   2 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
   3 * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
   4 *
   5 * This copyrighted material is made available to anyone wishing to use,
   6 * modify, copy, or redistribute it subject to the terms and conditions
   7 * of the GNU General Public License version 2.
   8 */
   9
  10#include <linux/spinlock.h>
  11#include <linux/completion.h>
  12#include <linux/buffer_head.h>
  13#include <linux/blkdev.h>
  14#include <linux/gfs2_ondisk.h>
  15#include <linux/crc32.h>
  16
  17#include "gfs2.h"
  18#include "incore.h"
  19#include "bmap.h"
  20#include "glock.h"
  21#include "inode.h"
  22#include "meta_io.h"
  23#include "quota.h"
  24#include "rgrp.h"
  25#include "log.h"
  26#include "super.h"
  27#include "trans.h"
  28#include "dir.h"
  29#include "util.h"
  30#include "trace_gfs2.h"
  31
  32/* This doesn't need to be that large as max 64 bit pointers in a 4k
  33 * block is 512, so __u16 is fine for that. It saves stack space to
  34 * keep it small.
  35 */
  36struct metapath {
  37	struct buffer_head *mp_bh[GFS2_MAX_META_HEIGHT];
  38	__u16 mp_list[GFS2_MAX_META_HEIGHT];
  39};
  40
  41struct strip_mine {
  42	int sm_first;
  43	unsigned int sm_height;
  44};
  45
  46/**
  47 * gfs2_unstuffer_page - unstuff a stuffed inode into a block cached by a page
  48 * @ip: the inode
  49 * @dibh: the dinode buffer
  50 * @block: the block number that was allocated
  51 * @page: The (optional) page. This is looked up if @page is NULL
  52 *
  53 * Returns: errno
  54 */
  55
  56static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
  57			       u64 block, struct page *page)
  58{
  59	struct inode *inode = &ip->i_inode;
  60	struct buffer_head *bh;
  61	int release = 0;
  62
  63	if (!page || page->index) {
  64		page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS);
  65		if (!page)
  66			return -ENOMEM;
  67		release = 1;
  68	}
  69
  70	if (!PageUptodate(page)) {
  71		void *kaddr = kmap(page);
  72		u64 dsize = i_size_read(inode);
  73 
  74		if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode)))
  75			dsize = dibh->b_size - sizeof(struct gfs2_dinode);
  76
  77		memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
  78		memset(kaddr + dsize, 0, PAGE_CACHE_SIZE - dsize);
  79		kunmap(page);
  80
  81		SetPageUptodate(page);
  82	}
  83
  84	if (!page_has_buffers(page))
  85		create_empty_buffers(page, 1 << inode->i_blkbits,
  86				     (1 << BH_Uptodate));
  87
  88	bh = page_buffers(page);
  89
  90	if (!buffer_mapped(bh))
  91		map_bh(bh, inode->i_sb, block);
  92
  93	set_buffer_uptodate(bh);
  94	if (!gfs2_is_jdata(ip))
  95		mark_buffer_dirty(bh);
  96	if (!gfs2_is_writeback(ip))
  97		gfs2_trans_add_data(ip->i_gl, bh);
  98
  99	if (release) {
 100		unlock_page(page);
 101		page_cache_release(page);
 102	}
 103
 104	return 0;
 105}
 106
 107/**
 108 * gfs2_unstuff_dinode - Unstuff a dinode when the data has grown too big
 109 * @ip: The GFS2 inode to unstuff
 110 * @page: The (optional) page. This is looked up if the @page is NULL
 111 *
 112 * This routine unstuffs a dinode and returns it to a "normal" state such
 113 * that the height can be grown in the traditional way.
 114 *
 115 * Returns: errno
 116 */
 117
 118int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page)
 119{
 120	struct buffer_head *bh, *dibh;
 121	struct gfs2_dinode *di;
 122	u64 block = 0;
 123	int isdir = gfs2_is_dir(ip);
 124	int error;
 125
 126	down_write(&ip->i_rw_mutex);
 127
 128	error = gfs2_meta_inode_buffer(ip, &dibh);
 129	if (error)
 130		goto out;
 131
 132	if (i_size_read(&ip->i_inode)) {
 133		/* Get a free block, fill it with the stuffed data,
 134		   and write it out to disk */
 135
 136		unsigned int n = 1;
 137		error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
 138		if (error)
 139			goto out_brelse;
 140		if (isdir) {
 141			gfs2_trans_add_unrevoke(GFS2_SB(&ip->i_inode), block, 1);
 142			error = gfs2_dir_get_new_buffer(ip, block, &bh);
 143			if (error)
 144				goto out_brelse;
 145			gfs2_buffer_copy_tail(bh, sizeof(struct gfs2_meta_header),
 146					      dibh, sizeof(struct gfs2_dinode));
 147			brelse(bh);
 148		} else {
 149			error = gfs2_unstuffer_page(ip, dibh, block, page);
 150			if (error)
 151				goto out_brelse;
 152		}
 153	}
 154
 155	/*  Set up the pointer to the new block  */
 156
 157	gfs2_trans_add_meta(ip->i_gl, dibh);
 158	di = (struct gfs2_dinode *)dibh->b_data;
 159	gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
 160
 161	if (i_size_read(&ip->i_inode)) {
 162		*(__be64 *)(di + 1) = cpu_to_be64(block);
 163		gfs2_add_inode_blocks(&ip->i_inode, 1);
 164		di->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
 165	}
 166
 167	ip->i_height = 1;
 168	di->di_height = cpu_to_be16(1);
 169
 170out_brelse:
 171	brelse(dibh);
 172out:
 173	up_write(&ip->i_rw_mutex);
 174	return error;
 175}
 176
 177
 178/**
 179 * find_metapath - Find path through the metadata tree
 180 * @sdp: The superblock
 181 * @mp: The metapath to return the result in
 182 * @block: The disk block to look up
 183 * @height: The pre-calculated height of the metadata tree
 184 *
 185 *   This routine returns a struct metapath structure that defines a path
 186 *   through the metadata of inode "ip" to get to block "block".
 187 *
 188 *   Example:
 189 *   Given:  "ip" is a height 3 file, "offset" is 101342453, and this is a
 190 *   filesystem with a blocksize of 4096.
 191 *
 192 *   find_metapath() would return a struct metapath structure set to:
 193 *   mp_offset = 101342453, mp_height = 3, mp_list[0] = 0, mp_list[1] = 48,
 194 *   and mp_list[2] = 165.
 195 *
 196 *   That means that in order to get to the block containing the byte at
 197 *   offset 101342453, we would load the indirect block pointed to by pointer
 198 *   0 in the dinode.  We would then load the indirect block pointed to by
 199 *   pointer 48 in that indirect block.  We would then load the data block
 200 *   pointed to by pointer 165 in that indirect block.
 201 *
 202 *             ----------------------------------------
 203 *             | Dinode |                             |
 204 *             |        |                            4|
 205 *             |        |0 1 2 3 4 5                 9|
 206 *             |        |                            6|
 207 *             ----------------------------------------
 208 *                       |
 209 *                       |
 210 *                       V
 211 *             ----------------------------------------
 212 *             | Indirect Block                       |
 213 *             |                                     5|
 214 *             |            4 4 4 4 4 5 5            1|
 215 *             |0           5 6 7 8 9 0 1            2|
 216 *             ----------------------------------------
 217 *                                |
 218 *                                |
 219 *                                V
 220 *             ----------------------------------------
 221 *             | Indirect Block                       |
 222 *             |                         1 1 1 1 1   5|
 223 *             |                         6 6 6 6 6   1|
 224 *             |0                        3 4 5 6 7   2|
 225 *             ----------------------------------------
 226 *                                           |
 227 *                                           |
 228 *                                           V
 229 *             ----------------------------------------
 230 *             | Data block containing offset         |
 231 *             |            101342453                 |
 232 *             |                                      |
 233 *             |                                      |
 234 *             ----------------------------------------
 235 *
 236 */
 237
 238static void find_metapath(const struct gfs2_sbd *sdp, u64 block,
 239			  struct metapath *mp, unsigned int height)
 240{
 241	unsigned int i;
 242
 243	for (i = height; i--;)
 244		mp->mp_list[i] = do_div(block, sdp->sd_inptrs);
 245
 246}
 247
 248static inline unsigned int metapath_branch_start(const struct metapath *mp)
 249{
 250	if (mp->mp_list[0] == 0)
 251		return 2;
 252	return 1;
 253}
 254
 255/**
 256 * metapointer - Return pointer to start of metadata in a buffer
 257 * @height: The metadata height (0 = dinode)
 258 * @mp: The metapath
 259 *
 260 * Return a pointer to the block number of the next height of the metadata
 261 * tree given a buffer containing the pointer to the current height of the
 262 * metadata tree.
 263 */
 264
 265static inline __be64 *metapointer(unsigned int height, const struct metapath *mp)
 266{
 267	struct buffer_head *bh = mp->mp_bh[height];
 268	unsigned int head_size = (height > 0) ?
 269		sizeof(struct gfs2_meta_header) : sizeof(struct gfs2_dinode);
 270	return ((__be64 *)(bh->b_data + head_size)) + mp->mp_list[height];
 271}
 272
 273static void gfs2_metapath_ra(struct gfs2_glock *gl,
 274			     const struct buffer_head *bh, const __be64 *pos)
 275{
 276	struct buffer_head *rabh;
 277	const __be64 *endp = (const __be64 *)(bh->b_data + bh->b_size);
 278	const __be64 *t;
 279
 280	for (t = pos; t < endp; t++) {
 281		if (!*t)
 282			continue;
 283
 284		rabh = gfs2_getbuf(gl, be64_to_cpu(*t), CREATE);
 285		if (trylock_buffer(rabh)) {
 286			if (!buffer_uptodate(rabh)) {
 287				rabh->b_end_io = end_buffer_read_sync;
 288				submit_bh(READA | REQ_META, rabh);
 289				continue;
 290			}
 291			unlock_buffer(rabh);
 292		}
 293		brelse(rabh);
 294	}
 295}
 296
 297/**
 298 * lookup_metapath - Walk the metadata tree to a specific point
 299 * @ip: The inode
 300 * @mp: The metapath
 301 *
 302 * Assumes that the inode's buffer has already been looked up and
 303 * hooked onto mp->mp_bh[0] and that the metapath has been initialised
 304 * by find_metapath().
 305 *
 306 * If this function encounters part of the tree which has not been
 307 * allocated, it returns the current height of the tree at the point
 308 * at which it found the unallocated block. Blocks which are found are
 309 * added to the mp->mp_bh[] list.
 310 *
 311 * Returns: error or height of metadata tree
 312 */
 313
 314static int lookup_metapath(struct gfs2_inode *ip, struct metapath *mp)
 315{
 316	unsigned int end_of_metadata = ip->i_height - 1;
 317	unsigned int x;
 318	__be64 *ptr;
 319	u64 dblock;
 320	int ret;
 321
 322	for (x = 0; x < end_of_metadata; x++) {
 323		ptr = metapointer(x, mp);
 324		dblock = be64_to_cpu(*ptr);
 325		if (!dblock)
 326			return x + 1;
 327
 328		ret = gfs2_meta_indirect_buffer(ip, x+1, dblock, &mp->mp_bh[x+1]);
 329		if (ret)
 330			return ret;
 331	}
 332
 333	return ip->i_height;
 334}
 335
 336static inline void release_metapath(struct metapath *mp)
 337{
 338	int i;
 339
 340	for (i = 0; i < GFS2_MAX_META_HEIGHT; i++) {
 341		if (mp->mp_bh[i] == NULL)
 342			break;
 343		brelse(mp->mp_bh[i]);
 344	}
 345}
 346
 347/**
 348 * gfs2_extent_length - Returns length of an extent of blocks
 349 * @start: Start of the buffer
 350 * @len: Length of the buffer in bytes
 351 * @ptr: Current position in the buffer
 352 * @limit: Max extent length to return (0 = unlimited)
 353 * @eob: Set to 1 if we hit "end of block"
 354 *
 355 * If the first block is zero (unallocated) it will return the number of
 356 * unallocated blocks in the extent, otherwise it will return the number
 357 * of contiguous blocks in the extent.
 358 *
 359 * Returns: The length of the extent (minimum of one block)
 360 */
 361
 362static inline unsigned int gfs2_extent_length(void *start, unsigned int len, __be64 *ptr, unsigned limit, int *eob)
 363{
 364	const __be64 *end = (start + len);
 365	const __be64 *first = ptr;
 366	u64 d = be64_to_cpu(*ptr);
 367
 368	*eob = 0;
 369	do {
 370		ptr++;
 371		if (ptr >= end)
 372			break;
 373		if (limit && --limit == 0)
 374			break;
 375		if (d)
 376			d++;
 377	} while(be64_to_cpu(*ptr) == d);
 378	if (ptr >= end)
 379		*eob = 1;
 380	return (ptr - first);
 381}
 382
 383static inline void bmap_lock(struct gfs2_inode *ip, int create)
 384{
 385	if (create)
 386		down_write(&ip->i_rw_mutex);
 387	else
 388		down_read(&ip->i_rw_mutex);
 389}
 390
 391static inline void bmap_unlock(struct gfs2_inode *ip, int create)
 392{
 393	if (create)
 394		up_write(&ip->i_rw_mutex);
 395	else
 396		up_read(&ip->i_rw_mutex);
 397}
 398
 399static inline __be64 *gfs2_indirect_init(struct metapath *mp,
 400					 struct gfs2_glock *gl, unsigned int i,
 401					 unsigned offset, u64 bn)
 402{
 403	__be64 *ptr = (__be64 *)(mp->mp_bh[i - 1]->b_data +
 404		       ((i > 1) ? sizeof(struct gfs2_meta_header) :
 405				 sizeof(struct gfs2_dinode)));
 406	BUG_ON(i < 1);
 407	BUG_ON(mp->mp_bh[i] != NULL);
 408	mp->mp_bh[i] = gfs2_meta_new(gl, bn);
 409	gfs2_trans_add_meta(gl, mp->mp_bh[i]);
 410	gfs2_metatype_set(mp->mp_bh[i], GFS2_METATYPE_IN, GFS2_FORMAT_IN);
 411	gfs2_buffer_clear_tail(mp->mp_bh[i], sizeof(struct gfs2_meta_header));
 412	ptr += offset;
 413	*ptr = cpu_to_be64(bn);
 414	return ptr;
 415}
 416
 417enum alloc_state {
 418	ALLOC_DATA = 0,
 419	ALLOC_GROW_DEPTH = 1,
 420	ALLOC_GROW_HEIGHT = 2,
 421	/* ALLOC_UNSTUFF = 3,   TBD and rather complicated */
 422};
 423
 424/**
 425 * gfs2_bmap_alloc - Build a metadata tree of the requested height
 426 * @inode: The GFS2 inode
 427 * @lblock: The logical starting block of the extent
 428 * @bh_map: This is used to return the mapping details
 429 * @mp: The metapath
 430 * @sheight: The starting height (i.e. whats already mapped)
 431 * @height: The height to build to
 432 * @maxlen: The max number of data blocks to alloc
 433 *
 434 * In this routine we may have to alloc:
 435 *   i) Indirect blocks to grow the metadata tree height
 436 *  ii) Indirect blocks to fill in lower part of the metadata tree
 437 * iii) Data blocks
 438 *
 439 * The function is in two parts. The first part works out the total
 440 * number of blocks which we need. The second part does the actual
 441 * allocation asking for an extent at a time (if enough contiguous free
 442 * blocks are available, there will only be one request per bmap call)
 443 * and uses the state machine to initialise the blocks in order.
 444 *
 445 * Returns: errno on error
 446 */
 447
 448static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock,
 449			   struct buffer_head *bh_map, struct metapath *mp,
 450			   const unsigned int sheight,
 451			   const unsigned int height,
 452			   const unsigned int maxlen)
 453{
 454	struct gfs2_inode *ip = GFS2_I(inode);
 455	struct gfs2_sbd *sdp = GFS2_SB(inode);
 456	struct super_block *sb = sdp->sd_vfs;
 457	struct buffer_head *dibh = mp->mp_bh[0];
 458	u64 bn, dblock = 0;
 459	unsigned n, i, blks, alloced = 0, iblks = 0, branch_start = 0;
 460	unsigned dblks = 0;
 461	unsigned ptrs_per_blk;
 462	const unsigned end_of_metadata = height - 1;
 463	int ret;
 464	int eob = 0;
 465	enum alloc_state state;
 466	__be64 *ptr;
 467	__be64 zero_bn = 0;
 468
 469	BUG_ON(sheight < 1);
 470	BUG_ON(dibh == NULL);
 471
 472	gfs2_trans_add_meta(ip->i_gl, dibh);
 473
 474	if (height == sheight) {
 475		struct buffer_head *bh;
 476		/* Bottom indirect block exists, find unalloced extent size */
 477		ptr = metapointer(end_of_metadata, mp);
 478		bh = mp->mp_bh[end_of_metadata];
 479		dblks = gfs2_extent_length(bh->b_data, bh->b_size, ptr, maxlen,
 480					   &eob);
 481		BUG_ON(dblks < 1);
 482		state = ALLOC_DATA;
 483	} else {
 484		/* Need to allocate indirect blocks */
 485		ptrs_per_blk = height > 1 ? sdp->sd_inptrs : sdp->sd_diptrs;
 486		dblks = min(maxlen, ptrs_per_blk - mp->mp_list[end_of_metadata]);
 487		if (height == ip->i_height) {
 488			/* Writing into existing tree, extend tree down */
 489			iblks = height - sheight;
 490			state = ALLOC_GROW_DEPTH;
 491		} else {
 492			/* Building up tree height */
 493			state = ALLOC_GROW_HEIGHT;
 494			iblks = height - ip->i_height;
 495			branch_start = metapath_branch_start(mp);
 496			iblks += (height - branch_start);
 497		}
 498	}
 499
 500	/* start of the second part of the function (state machine) */
 501
 502	blks = dblks + iblks;
 503	i = sheight;
 504	do {
 505		int error;
 506		n = blks - alloced;
 507		error = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL);
 508		if (error)
 509			return error;
 510		alloced += n;
 511		if (state != ALLOC_DATA || gfs2_is_jdata(ip))
 512			gfs2_trans_add_unrevoke(sdp, bn, n);
 513		switch (state) {
 514		/* Growing height of tree */
 515		case ALLOC_GROW_HEIGHT:
 516			if (i == 1) {
 517				ptr = (__be64 *)(dibh->b_data +
 518						 sizeof(struct gfs2_dinode));
 519				zero_bn = *ptr;
 520			}
 521			for (; i - 1 < height - ip->i_height && n > 0; i++, n--)
 522				gfs2_indirect_init(mp, ip->i_gl, i, 0, bn++);
 523			if (i - 1 == height - ip->i_height) {
 524				i--;
 525				gfs2_buffer_copy_tail(mp->mp_bh[i],
 526						sizeof(struct gfs2_meta_header),
 527						dibh, sizeof(struct gfs2_dinode));
 528				gfs2_buffer_clear_tail(dibh,
 529						sizeof(struct gfs2_dinode) +
 530						sizeof(__be64));
 531				ptr = (__be64 *)(mp->mp_bh[i]->b_data +
 532					sizeof(struct gfs2_meta_header));
 533				*ptr = zero_bn;
 534				state = ALLOC_GROW_DEPTH;
 535				for(i = branch_start; i < height; i++) {
 536					if (mp->mp_bh[i] == NULL)
 537						break;
 538					brelse(mp->mp_bh[i]);
 539					mp->mp_bh[i] = NULL;
 540				}
 541				i = branch_start;
 542			}
 543			if (n == 0)
 544				break;
 545		/* Branching from existing tree */
 546		case ALLOC_GROW_DEPTH:
 547			if (i > 1 && i < height)
 548				gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[i-1]);
 549			for (; i < height && n > 0; i++, n--)
 550				gfs2_indirect_init(mp, ip->i_gl, i,
 551						   mp->mp_list[i-1], bn++);
 552			if (i == height)
 553				state = ALLOC_DATA;
 554			if (n == 0)
 555				break;
 556		/* Tree complete, adding data blocks */
 557		case ALLOC_DATA:
 558			BUG_ON(n > dblks);
 559			BUG_ON(mp->mp_bh[end_of_metadata] == NULL);
 560			gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[end_of_metadata]);
 561			dblks = n;
 562			ptr = metapointer(end_of_metadata, mp);
 563			dblock = bn;
 564			while (n-- > 0)
 565				*ptr++ = cpu_to_be64(bn++);
 566			if (buffer_zeronew(bh_map)) {
 567				ret = sb_issue_zeroout(sb, dblock, dblks,
 568						       GFP_NOFS);
 569				if (ret) {
 570					fs_err(sdp,
 571					       "Failed to zero data buffers\n");
 572					clear_buffer_zeronew(bh_map);
 573				}
 574			}
 575			break;
 576		}
 577	} while ((state != ALLOC_DATA) || !dblock);
 578
 579	ip->i_height = height;
 580	gfs2_add_inode_blocks(&ip->i_inode, alloced);
 581	gfs2_dinode_out(ip, mp->mp_bh[0]->b_data);
 582	map_bh(bh_map, inode->i_sb, dblock);
 583	bh_map->b_size = dblks << inode->i_blkbits;
 584	set_buffer_new(bh_map);
 585	return 0;
 586}
 587
 588/**
 589 * gfs2_block_map - Map a block from an inode to a disk block
 590 * @inode: The inode
 591 * @lblock: The logical block number
 592 * @bh_map: The bh to be mapped
 593 * @create: True if its ok to alloc blocks to satify the request
 594 *
 595 * Sets buffer_mapped() if successful, sets buffer_boundary() if a
 596 * read of metadata will be required before the next block can be
 597 * mapped. Sets buffer_new() if new blocks were allocated.
 598 *
 599 * Returns: errno
 600 */
 601
 602int gfs2_block_map(struct inode *inode, sector_t lblock,
 603		   struct buffer_head *bh_map, int create)
 604{
 605	struct gfs2_inode *ip = GFS2_I(inode);
 606	struct gfs2_sbd *sdp = GFS2_SB(inode);
 607	unsigned int bsize = sdp->sd_sb.sb_bsize;
 608	const unsigned int maxlen = bh_map->b_size >> inode->i_blkbits;
 609	const u64 *arr = sdp->sd_heightsize;
 610	__be64 *ptr;
 611	u64 size;
 612	struct metapath mp;
 613	int ret;
 614	int eob;
 615	unsigned int len;
 616	struct buffer_head *bh;
 617	u8 height;
 618
 619	BUG_ON(maxlen == 0);
 620
 621	memset(mp.mp_bh, 0, sizeof(mp.mp_bh));
 622	bmap_lock(ip, create);
 623	clear_buffer_mapped(bh_map);
 624	clear_buffer_new(bh_map);
 625	clear_buffer_boundary(bh_map);
 626	trace_gfs2_bmap(ip, bh_map, lblock, create, 1);
 627	if (gfs2_is_dir(ip)) {
 628		bsize = sdp->sd_jbsize;
 629		arr = sdp->sd_jheightsize;
 630	}
 631
 632	ret = gfs2_meta_inode_buffer(ip, &mp.mp_bh[0]);
 633	if (ret)
 634		goto out;
 635
 636	height = ip->i_height;
 637	size = (lblock + 1) * bsize;
 638	while (size > arr[height])
 639		height++;
 640	find_metapath(sdp, lblock, &mp, height);
 641	ret = 1;
 642	if (height > ip->i_height || gfs2_is_stuffed(ip))
 643		goto do_alloc;
 644	ret = lookup_metapath(ip, &mp);
 645	if (ret < 0)
 646		goto out;
 647	if (ret != ip->i_height)
 648		goto do_alloc;
 649	ptr = metapointer(ip->i_height - 1, &mp);
 650	if (*ptr == 0)
 651		goto do_alloc;
 652	map_bh(bh_map, inode->i_sb, be64_to_cpu(*ptr));
 653	bh = mp.mp_bh[ip->i_height - 1];
 654	len = gfs2_extent_length(bh->b_data, bh->b_size, ptr, maxlen, &eob);
 655	bh_map->b_size = (len << inode->i_blkbits);
 656	if (eob)
 657		set_buffer_boundary(bh_map);
 658	ret = 0;
 659out:
 660	release_metapath(&mp);
 661	trace_gfs2_bmap(ip, bh_map, lblock, create, ret);
 662	bmap_unlock(ip, create);
 663	return ret;
 664
 665do_alloc:
 666	/* All allocations are done here, firstly check create flag */
 667	if (!create) {
 668		BUG_ON(gfs2_is_stuffed(ip));
 669		ret = 0;
 670		goto out;
 671	}
 672
 673	/* At this point ret is the tree depth of already allocated blocks */
 674	ret = gfs2_bmap_alloc(inode, lblock, bh_map, &mp, ret, height, maxlen);
 675	goto out;
 676}
 677
 678/*
 679 * Deprecated: do not use in new code
 680 */
 681int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsigned *extlen)
 682{
 683	struct buffer_head bh = { .b_state = 0, .b_blocknr = 0 };
 684	int ret;
 685	int create = *new;
 686
 687	BUG_ON(!extlen);
 688	BUG_ON(!dblock);
 689	BUG_ON(!new);
 690
 691	bh.b_size = 1 << (inode->i_blkbits + (create ? 0 : 5));
 692	ret = gfs2_block_map(inode, lblock, &bh, create);
 693	*extlen = bh.b_size >> inode->i_blkbits;
 694	*dblock = bh.b_blocknr;
 695	if (buffer_new(&bh))
 696		*new = 1;
 697	else
 698		*new = 0;
 699	return ret;
 700}
 701
 702/**
 703 * do_strip - Look for a layer a particular layer of the file and strip it off
 704 * @ip: the inode
 705 * @dibh: the dinode buffer
 706 * @bh: A buffer of pointers
 707 * @top: The first pointer in the buffer
 708 * @bottom: One more than the last pointer
 709 * @height: the height this buffer is at
 710 * @data: a pointer to a struct strip_mine
 711 *
 712 * Returns: errno
 713 */
 714
 715static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh,
 716		    struct buffer_head *bh, __be64 *top, __be64 *bottom,
 717		    unsigned int height, struct strip_mine *sm)
 718{
 719	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 720	struct gfs2_rgrp_list rlist;
 721	u64 bn, bstart;
 722	u32 blen, btotal;
 723	__be64 *p;
 724	unsigned int rg_blocks = 0;
 725	int metadata;
 726	unsigned int revokes = 0;
 727	int x;
 728	int error;
 729
 730	error = gfs2_rindex_update(sdp);
 731	if (error)
 732		return error;
 733
 734	if (!*top)
 735		sm->sm_first = 0;
 736
 737	if (height != sm->sm_height)
 738		return 0;
 739
 740	if (sm->sm_first) {
 741		top++;
 742		sm->sm_first = 0;
 743	}
 744
 745	metadata = (height != ip->i_height - 1);
 746	if (metadata)
 747		revokes = (height) ? sdp->sd_inptrs : sdp->sd_diptrs;
 748	else if (ip->i_depth)
 749		revokes = sdp->sd_inptrs;
 750
 751	memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
 752	bstart = 0;
 753	blen = 0;
 754
 755	for (p = top; p < bottom; p++) {
 756		if (!*p)
 757			continue;
 758
 759		bn = be64_to_cpu(*p);
 760
 761		if (bstart + blen == bn)
 762			blen++;
 763		else {
 764			if (bstart)
 765				gfs2_rlist_add(ip, &rlist, bstart);
 766
 767			bstart = bn;
 768			blen = 1;
 769		}
 770	}
 771
 772	if (bstart)
 773		gfs2_rlist_add(ip, &rlist, bstart);
 774	else
 775		goto out; /* Nothing to do */
 776
 777	gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE);
 778
 779	for (x = 0; x < rlist.rl_rgrps; x++) {
 780		struct gfs2_rgrpd *rgd;
 781		rgd = rlist.rl_ghs[x].gh_gl->gl_object;
 782		rg_blocks += rgd->rd_length;
 783	}
 784
 785	error = gfs2_glock_nq_m(rlist.rl_rgrps, rlist.rl_ghs);
 786	if (error)
 787		goto out_rlist;
 788
 789	if (gfs2_rs_active(ip->i_res)) /* needs to be done with the rgrp glock held */
 790		gfs2_rs_deltree(ip->i_res);
 791
 792	error = gfs2_trans_begin(sdp, rg_blocks + RES_DINODE +
 793				 RES_INDIRECT + RES_STATFS + RES_QUOTA,
 794				 revokes);
 795	if (error)
 796		goto out_rg_gunlock;
 797
 798	down_write(&ip->i_rw_mutex);
 799
 800	gfs2_trans_add_meta(ip->i_gl, dibh);
 801	gfs2_trans_add_meta(ip->i_gl, bh);
 802
 803	bstart = 0;
 804	blen = 0;
 805	btotal = 0;
 806
 807	for (p = top; p < bottom; p++) {
 808		if (!*p)
 809			continue;
 810
 811		bn = be64_to_cpu(*p);
 812
 813		if (bstart + blen == bn)
 814			blen++;
 815		else {
 816			if (bstart) {
 817				__gfs2_free_blocks(ip, bstart, blen, metadata);
 818				btotal += blen;
 819			}
 820
 821			bstart = bn;
 822			blen = 1;
 823		}
 824
 825		*p = 0;
 826		gfs2_add_inode_blocks(&ip->i_inode, -1);
 827	}
 828	if (bstart) {
 829		__gfs2_free_blocks(ip, bstart, blen, metadata);
 830		btotal += blen;
 831	}
 832
 833	gfs2_statfs_change(sdp, 0, +btotal, 0);
 834	gfs2_quota_change(ip, -(s64)btotal, ip->i_inode.i_uid,
 835			  ip->i_inode.i_gid);
 836
 837	ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
 838
 839	gfs2_dinode_out(ip, dibh->b_data);
 840
 841	up_write(&ip->i_rw_mutex);
 842
 843	gfs2_trans_end(sdp);
 844
 845out_rg_gunlock:
 846	gfs2_glock_dq_m(rlist.rl_rgrps, rlist.rl_ghs);
 847out_rlist:
 848	gfs2_rlist_free(&rlist);
 849out:
 850	return error;
 851}
 852
 853/**
 854 * recursive_scan - recursively scan through the end of a file
 855 * @ip: the inode
 856 * @dibh: the dinode buffer
 857 * @mp: the path through the metadata to the point to start
 858 * @height: the height the recursion is at
 859 * @block: the indirect block to look at
 860 * @first: 1 if this is the first block
 861 * @sm: data opaque to this function to pass to @bc
 862 *
 863 * When this is first called @height and @block should be zero and
 864 * @first should be 1.
 865 *
 866 * Returns: errno
 867 */
 868
 869static int recursive_scan(struct gfs2_inode *ip, struct buffer_head *dibh,
 870			  struct metapath *mp, unsigned int height,
 871			  u64 block, int first, struct strip_mine *sm)
 872{
 873	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 874	struct buffer_head *bh = NULL;
 875	__be64 *top, *bottom;
 876	u64 bn;
 877	int error;
 878	int mh_size = sizeof(struct gfs2_meta_header);
 879
 880	if (!height) {
 881		error = gfs2_meta_inode_buffer(ip, &bh);
 882		if (error)
 883			return error;
 884		dibh = bh;
 885
 886		top = (__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)) + mp->mp_list[0];
 887		bottom = (__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)) + sdp->sd_diptrs;
 888	} else {
 889		error = gfs2_meta_indirect_buffer(ip, height, block, &bh);
 890		if (error)
 891			return error;
 892
 893		top = (__be64 *)(bh->b_data + mh_size) +
 894				  (first ? mp->mp_list[height] : 0);
 895
 896		bottom = (__be64 *)(bh->b_data + mh_size) + sdp->sd_inptrs;
 897	}
 898
 899	error = do_strip(ip, dibh, bh, top, bottom, height, sm);
 900	if (error)
 901		goto out;
 902
 903	if (height < ip->i_height - 1) {
 904
 905		gfs2_metapath_ra(ip->i_gl, bh, top);
 906
 907		for (; top < bottom; top++, first = 0) {
 908			if (!*top)
 909				continue;
 910
 911			bn = be64_to_cpu(*top);
 912
 913			error = recursive_scan(ip, dibh, mp, height + 1, bn,
 914					       first, sm);
 915			if (error)
 916				break;
 917		}
 918	}
 919out:
 920	brelse(bh);
 921	return error;
 922}
 923
 924
 925/**
 926 * gfs2_block_truncate_page - Deal with zeroing out data for truncate
 927 *
 928 * This is partly borrowed from ext3.
 929 */
 930static int gfs2_block_truncate_page(struct address_space *mapping, loff_t from)
 931{
 932	struct inode *inode = mapping->host;
 933	struct gfs2_inode *ip = GFS2_I(inode);
 934	unsigned long index = from >> PAGE_CACHE_SHIFT;
 935	unsigned offset = from & (PAGE_CACHE_SIZE-1);
 936	unsigned blocksize, iblock, length, pos;
 937	struct buffer_head *bh;
 938	struct page *page;
 939	int err;
 940
 941	page = find_or_create_page(mapping, index, GFP_NOFS);
 942	if (!page)
 943		return 0;
 944
 945	blocksize = inode->i_sb->s_blocksize;
 946	length = blocksize - (offset & (blocksize - 1));
 947	iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
 948
 949	if (!page_has_buffers(page))
 950		create_empty_buffers(page, blocksize, 0);
 951
 952	/* Find the buffer that contains "offset" */
 953	bh = page_buffers(page);
 954	pos = blocksize;
 955	while (offset >= pos) {
 956		bh = bh->b_this_page;
 957		iblock++;
 958		pos += blocksize;
 959	}
 960
 961	err = 0;
 962
 963	if (!buffer_mapped(bh)) {
 964		gfs2_block_map(inode, iblock, bh, 0);
 965		/* unmapped? It's a hole - nothing to do */
 966		if (!buffer_mapped(bh))
 967			goto unlock;
 968	}
 969
 970	/* Ok, it's mapped. Make sure it's up-to-date */
 971	if (PageUptodate(page))
 972		set_buffer_uptodate(bh);
 973
 974	if (!buffer_uptodate(bh)) {
 975		err = -EIO;
 976		ll_rw_block(READ, 1, &bh);
 977		wait_on_buffer(bh);
 978		/* Uhhuh. Read error. Complain and punt. */
 979		if (!buffer_uptodate(bh))
 980			goto unlock;
 981		err = 0;
 982	}
 983
 984	if (!gfs2_is_writeback(ip))
 985		gfs2_trans_add_data(ip->i_gl, bh);
 986
 987	zero_user(page, offset, length);
 988	mark_buffer_dirty(bh);
 989unlock:
 990	unlock_page(page);
 991	page_cache_release(page);
 992	return err;
 993}
 994
 995/**
 996 * gfs2_journaled_truncate - Wrapper for truncate_pagecache for jdata files
 997 * @inode: The inode being truncated
 998 * @oldsize: The original (larger) size
 999 * @newsize: The new smaller size
1000 *
1001 * With jdata files, we have to journal a revoke for each block which is
1002 * truncated. As a result, we need to split this into separate transactions
1003 * if the number of pages being truncated gets too large.
1004 */
1005
1006#define GFS2_JTRUNC_REVOKES 8192
1007
1008static int gfs2_journaled_truncate(struct inode *inode, u64 oldsize, u64 newsize)
1009{
1010	struct gfs2_sbd *sdp = GFS2_SB(inode);
1011	u64 max_chunk = GFS2_JTRUNC_REVOKES * sdp->sd_vfs->s_blocksize;
1012	u64 chunk;
1013	int error;
1014
1015	while (oldsize != newsize) {
1016		chunk = oldsize - newsize;
1017		if (chunk > max_chunk)
1018			chunk = max_chunk;
1019		truncate_pagecache(inode, oldsize - chunk);
1020		oldsize -= chunk;
1021		gfs2_trans_end(sdp);
1022		error = gfs2_trans_begin(sdp, RES_DINODE, GFS2_JTRUNC_REVOKES);
1023		if (error)
1024			return error;
1025	}
1026
1027	return 0;
1028}
1029
1030static int trunc_start(struct inode *inode, u64 oldsize, u64 newsize)
1031{
1032	struct gfs2_inode *ip = GFS2_I(inode);
1033	struct gfs2_sbd *sdp = GFS2_SB(inode);
1034	struct address_space *mapping = inode->i_mapping;
1035	struct buffer_head *dibh;
1036	int journaled = gfs2_is_jdata(ip);
1037	int error;
1038
1039	if (journaled)
1040		error = gfs2_trans_begin(sdp, RES_DINODE + RES_JDATA, GFS2_JTRUNC_REVOKES);
1041	else
1042		error = gfs2_trans_begin(sdp, RES_DINODE, 0);
1043	if (error)
1044		return error;
1045
1046	error = gfs2_meta_inode_buffer(ip, &dibh);
1047	if (error)
1048		goto out;
1049
1050	gfs2_trans_add_meta(ip->i_gl, dibh);
1051
1052	if (gfs2_is_stuffed(ip)) {
1053		gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + newsize);
1054	} else {
1055		if (newsize & (u64)(sdp->sd_sb.sb_bsize - 1)) {
1056			error = gfs2_block_truncate_page(mapping, newsize);
1057			if (error)
1058				goto out_brelse;
1059		}
1060		ip->i_diskflags |= GFS2_DIF_TRUNC_IN_PROG;
1061	}
1062
1063	i_size_write(inode, newsize);
1064	ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
1065	gfs2_dinode_out(ip, dibh->b_data);
1066
1067	if (journaled)
1068		error = gfs2_journaled_truncate(inode, oldsize, newsize);
1069	else
1070		truncate_pagecache(inode, newsize);
1071
1072	if (error) {
1073		brelse(dibh);
1074		return error;
1075	}
1076
1077out_brelse:
1078	brelse(dibh);
1079out:
1080	gfs2_trans_end(sdp);
1081	return error;
1082}
1083
1084static int trunc_dealloc(struct gfs2_inode *ip, u64 size)
1085{
1086	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1087	unsigned int height = ip->i_height;
1088	u64 lblock;
1089	struct metapath mp;
1090	int error;
1091
1092	if (!size)
1093		lblock = 0;
1094	else
1095		lblock = (size - 1) >> sdp->sd_sb.sb_bsize_shift;
1096
1097	find_metapath(sdp, lblock, &mp, ip->i_height);
1098	error = gfs2_rindex_update(sdp);
1099	if (error)
1100		return error;
1101
1102	error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
1103	if (error)
1104		return error;
1105
1106	while (height--) {
1107		struct strip_mine sm;
1108		sm.sm_first = !!size;
1109		sm.sm_height = height;
1110
1111		error = recursive_scan(ip, NULL, &mp, 0, 0, 1, &sm);
1112		if (error)
1113			break;
1114	}
1115
1116	gfs2_quota_unhold(ip);
1117
 
 
1118	return error;
1119}
1120
1121static int trunc_end(struct gfs2_inode *ip)
1122{
1123	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1124	struct buffer_head *dibh;
1125	int error;
1126
1127	error = gfs2_trans_begin(sdp, RES_DINODE, 0);
1128	if (error)
1129		return error;
1130
1131	down_write(&ip->i_rw_mutex);
1132
1133	error = gfs2_meta_inode_buffer(ip, &dibh);
1134	if (error)
1135		goto out;
1136
1137	if (!i_size_read(&ip->i_inode)) {
1138		ip->i_height = 0;
1139		ip->i_goal = ip->i_no_addr;
1140		gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
1141		gfs2_ordered_del_inode(ip);
1142	}
1143	ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
1144	ip->i_diskflags &= ~GFS2_DIF_TRUNC_IN_PROG;
1145
1146	gfs2_trans_add_meta(ip->i_gl, dibh);
1147	gfs2_dinode_out(ip, dibh->b_data);
1148	brelse(dibh);
1149
1150out:
1151	up_write(&ip->i_rw_mutex);
1152	gfs2_trans_end(sdp);
1153	return error;
1154}
1155
1156/**
1157 * do_shrink - make a file smaller
1158 * @inode: the inode
1159 * @oldsize: the current inode size
1160 * @newsize: the size to make the file
1161 *
1162 * Called with an exclusive lock on @inode. The @size must
1163 * be equal to or smaller than the current inode size.
1164 *
1165 * Returns: errno
1166 */
1167
1168static int do_shrink(struct inode *inode, u64 oldsize, u64 newsize)
1169{
1170	struct gfs2_inode *ip = GFS2_I(inode);
1171	int error;
1172
1173	error = trunc_start(inode, oldsize, newsize);
1174	if (error < 0)
1175		return error;
1176	if (gfs2_is_stuffed(ip))
1177		return 0;
1178
1179	error = trunc_dealloc(ip, newsize);
1180	if (error == 0)
1181		error = trunc_end(ip);
1182
1183	return error;
1184}
1185
1186void gfs2_trim_blocks(struct inode *inode)
1187{
1188	u64 size = inode->i_size;
1189	int ret;
1190
1191	ret = do_shrink(inode, size, size);
1192	WARN_ON(ret != 0);
1193}
1194
1195/**
1196 * do_grow - Touch and update inode size
1197 * @inode: The inode
1198 * @size: The new size
1199 *
1200 * This function updates the timestamps on the inode and
1201 * may also increase the size of the inode. This function
1202 * must not be called with @size any smaller than the current
1203 * inode size.
1204 *
1205 * Although it is not strictly required to unstuff files here,
1206 * earlier versions of GFS2 have a bug in the stuffed file reading
1207 * code which will result in a buffer overrun if the size is larger
1208 * than the max stuffed file size. In order to prevent this from
1209 * occurring, such files are unstuffed, but in other cases we can
1210 * just update the inode size directly.
1211 *
1212 * Returns: 0 on success, or -ve on error
1213 */
1214
1215static int do_grow(struct inode *inode, u64 size)
1216{
1217	struct gfs2_inode *ip = GFS2_I(inode);
1218	struct gfs2_sbd *sdp = GFS2_SB(inode);
1219	struct gfs2_alloc_parms ap = { .target = 1, };
1220	struct buffer_head *dibh;
 
1221	int error;
1222	int unstuff = 0;
1223
1224	if (gfs2_is_stuffed(ip) &&
1225	    (size > (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)))) {
 
 
 
 
1226		error = gfs2_quota_lock_check(ip);
1227		if (error)
1228			return error;
1229
1230		error = gfs2_inplace_reserve(ip, &ap);
1231		if (error)
1232			goto do_grow_qunlock;
1233		unstuff = 1;
1234	}
1235
1236	error = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS + RES_RG_BIT +
1237				 (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF ?
1238				  0 : RES_QUOTA), 0);
1239	if (error)
1240		goto do_grow_release;
1241
1242	if (unstuff) {
1243		error = gfs2_unstuff_dinode(ip, NULL);
1244		if (error)
1245			goto do_end_trans;
1246	}
1247
1248	error = gfs2_meta_inode_buffer(ip, &dibh);
1249	if (error)
1250		goto do_end_trans;
1251
1252	i_size_write(inode, size);
1253	ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
1254	gfs2_trans_add_meta(ip->i_gl, dibh);
1255	gfs2_dinode_out(ip, dibh->b_data);
1256	brelse(dibh);
1257
1258do_end_trans:
1259	gfs2_trans_end(sdp);
1260do_grow_release:
1261	if (unstuff) {
1262		gfs2_inplace_release(ip);
1263do_grow_qunlock:
1264		gfs2_quota_unlock(ip);
 
 
1265	}
1266	return error;
1267}
1268
1269/**
1270 * gfs2_setattr_size - make a file a given size
1271 * @inode: the inode
1272 * @newsize: the size to make the file
1273 *
1274 * The file size can grow, shrink, or stay the same size. This
1275 * is called holding i_mutex and an exclusive glock on the inode
1276 * in question.
1277 *
1278 * Returns: errno
1279 */
1280
1281int gfs2_setattr_size(struct inode *inode, u64 newsize)
1282{
1283	struct gfs2_inode *ip = GFS2_I(inode);
1284	int ret;
1285	u64 oldsize;
1286
1287	BUG_ON(!S_ISREG(inode->i_mode));
1288
1289	ret = inode_newsize_ok(inode, newsize);
1290	if (ret)
1291		return ret;
1292
1293	ret = get_write_access(inode);
1294	if (ret)
1295		return ret;
1296
1297	inode_dio_wait(inode);
1298
1299	ret = gfs2_rs_alloc(ip);
1300	if (ret)
1301		goto out;
1302
1303	oldsize = inode->i_size;
1304	if (newsize >= oldsize) {
1305		ret = do_grow(inode, newsize);
1306		goto out;
1307	}
1308
1309	gfs2_rs_deltree(ip->i_res);
1310	ret = do_shrink(inode, oldsize, newsize);
1311out:
1312	put_write_access(inode);
1313	return ret;
1314}
1315
1316int gfs2_truncatei_resume(struct gfs2_inode *ip)
1317{
1318	int error;
1319	error = trunc_dealloc(ip, i_size_read(&ip->i_inode));
1320	if (!error)
1321		error = trunc_end(ip);
1322	return error;
1323}
1324
1325int gfs2_file_dealloc(struct gfs2_inode *ip)
1326{
1327	return trunc_dealloc(ip, 0);
1328}
1329
1330/**
1331 * gfs2_free_journal_extents - Free cached journal bmap info
1332 * @jd: The journal
1333 *
1334 */
1335
1336void gfs2_free_journal_extents(struct gfs2_jdesc *jd)
1337{
1338	struct gfs2_journal_extent *jext;
1339
1340	while(!list_empty(&jd->extent_list)) {
1341		jext = list_entry(jd->extent_list.next, struct gfs2_journal_extent, list);
1342		list_del(&jext->list);
1343		kfree(jext);
1344	}
1345}
1346
1347/**
1348 * gfs2_add_jextent - Add or merge a new extent to extent cache
1349 * @jd: The journal descriptor
1350 * @lblock: The logical block at start of new extent
1351 * @pblock: The physical block at start of new extent
1352 * @blocks: Size of extent in fs blocks
1353 *
1354 * Returns: 0 on success or -ENOMEM
1355 */
1356
1357static int gfs2_add_jextent(struct gfs2_jdesc *jd, u64 lblock, u64 dblock, u64 blocks)
1358{
1359	struct gfs2_journal_extent *jext;
1360
1361	if (!list_empty(&jd->extent_list)) {
1362		jext = list_entry(jd->extent_list.prev, struct gfs2_journal_extent, list);
1363		if ((jext->dblock + jext->blocks) == dblock) {
1364			jext->blocks += blocks;
1365			return 0;
1366		}
1367	}
1368
1369	jext = kzalloc(sizeof(struct gfs2_journal_extent), GFP_NOFS);
1370	if (jext == NULL)
1371		return -ENOMEM;
1372	jext->dblock = dblock;
1373	jext->lblock = lblock;
1374	jext->blocks = blocks;
1375	list_add_tail(&jext->list, &jd->extent_list);
1376	jd->nr_extents++;
1377	return 0;
1378}
1379
1380/**
1381 * gfs2_map_journal_extents - Cache journal bmap info
1382 * @sdp: The super block
1383 * @jd: The journal to map
1384 *
1385 * Create a reusable "extent" mapping from all logical
1386 * blocks to all physical blocks for the given journal.  This will save
1387 * us time when writing journal blocks.  Most journals will have only one
1388 * extent that maps all their logical blocks.  That's because gfs2.mkfs
1389 * arranges the journal blocks sequentially to maximize performance.
1390 * So the extent would map the first block for the entire file length.
1391 * However, gfs2_jadd can happen while file activity is happening, so
1392 * those journals may not be sequential.  Less likely is the case where
1393 * the users created their own journals by mounting the metafs and
1394 * laying it out.  But it's still possible.  These journals might have
1395 * several extents.
1396 *
1397 * Returns: 0 on success, or error on failure
1398 */
1399
1400int gfs2_map_journal_extents(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd)
1401{
1402	u64 lblock = 0;
1403	u64 lblock_stop;
1404	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
1405	struct buffer_head bh;
1406	unsigned int shift = sdp->sd_sb.sb_bsize_shift;
1407	u64 size;
1408	int rc;
1409
1410	lblock_stop = i_size_read(jd->jd_inode) >> shift;
1411	size = (lblock_stop - lblock) << shift;
1412	jd->nr_extents = 0;
1413	WARN_ON(!list_empty(&jd->extent_list));
1414
1415	do {
1416		bh.b_state = 0;
1417		bh.b_blocknr = 0;
1418		bh.b_size = size;
1419		rc = gfs2_block_map(jd->jd_inode, lblock, &bh, 0);
1420		if (rc || !buffer_mapped(&bh))
1421			goto fail;
1422		rc = gfs2_add_jextent(jd, lblock, bh.b_blocknr, bh.b_size >> shift);
1423		if (rc)
1424			goto fail;
1425		size -= bh.b_size;
1426		lblock += (bh.b_size >> ip->i_inode.i_blkbits);
1427	} while(size > 0);
1428
1429	fs_info(sdp, "journal %d mapped with %u extents\n", jd->jd_jid,
1430		jd->nr_extents);
1431	return 0;
1432
1433fail:
1434	fs_warn(sdp, "error %d mapping journal %u at offset %llu (extent %u)\n",
1435		rc, jd->jd_jid,
1436		(unsigned long long)(i_size_read(jd->jd_inode) - size),
1437		jd->nr_extents);
1438	fs_warn(sdp, "bmap=%d lblock=%llu block=%llu, state=0x%08lx, size=%llu\n",
1439		rc, (unsigned long long)lblock, (unsigned long long)bh.b_blocknr,
1440		bh.b_state, (unsigned long long)bh.b_size);
1441	gfs2_free_journal_extents(jd);
1442	return rc;
1443}
1444
1445/**
1446 * gfs2_write_alloc_required - figure out if a write will require an allocation
1447 * @ip: the file being written to
1448 * @offset: the offset to write to
1449 * @len: the number of bytes being written
1450 *
1451 * Returns: 1 if an alloc is required, 0 otherwise
1452 */
1453
1454int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
1455			      unsigned int len)
1456{
1457	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1458	struct buffer_head bh;
1459	unsigned int shift;
1460	u64 lblock, lblock_stop, size;
1461	u64 end_of_file;
1462
1463	if (!len)
1464		return 0;
1465
1466	if (gfs2_is_stuffed(ip)) {
1467		if (offset + len >
1468		    sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode))
1469			return 1;
1470		return 0;
1471	}
1472
1473	shift = sdp->sd_sb.sb_bsize_shift;
1474	BUG_ON(gfs2_is_dir(ip));
1475	end_of_file = (i_size_read(&ip->i_inode) + sdp->sd_sb.sb_bsize - 1) >> shift;
1476	lblock = offset >> shift;
1477	lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift;
1478	if (lblock_stop > end_of_file)
1479		return 1;
1480
1481	size = (lblock_stop - lblock) << shift;
1482	do {
1483		bh.b_state = 0;
1484		bh.b_size = size;
1485		gfs2_block_map(&ip->i_inode, lblock, &bh, 0);
1486		if (!buffer_mapped(&bh))
1487			return 1;
1488		size -= bh.b_size;
1489		lblock += (bh.b_size >> ip->i_inode.i_blkbits);
1490	} while(size > 0);
1491
1492	return 0;
1493}
1494
v3.5.6
   1/*
   2 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
   3 * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
   4 *
   5 * This copyrighted material is made available to anyone wishing to use,
   6 * modify, copy, or redistribute it subject to the terms and conditions
   7 * of the GNU General Public License version 2.
   8 */
   9
  10#include <linux/spinlock.h>
  11#include <linux/completion.h>
  12#include <linux/buffer_head.h>
  13#include <linux/blkdev.h>
  14#include <linux/gfs2_ondisk.h>
  15#include <linux/crc32.h>
  16
  17#include "gfs2.h"
  18#include "incore.h"
  19#include "bmap.h"
  20#include "glock.h"
  21#include "inode.h"
  22#include "meta_io.h"
  23#include "quota.h"
  24#include "rgrp.h"
 
  25#include "super.h"
  26#include "trans.h"
  27#include "dir.h"
  28#include "util.h"
  29#include "trace_gfs2.h"
  30
  31/* This doesn't need to be that large as max 64 bit pointers in a 4k
  32 * block is 512, so __u16 is fine for that. It saves stack space to
  33 * keep it small.
  34 */
  35struct metapath {
  36	struct buffer_head *mp_bh[GFS2_MAX_META_HEIGHT];
  37	__u16 mp_list[GFS2_MAX_META_HEIGHT];
  38};
  39
  40struct strip_mine {
  41	int sm_first;
  42	unsigned int sm_height;
  43};
  44
  45/**
  46 * gfs2_unstuffer_page - unstuff a stuffed inode into a block cached by a page
  47 * @ip: the inode
  48 * @dibh: the dinode buffer
  49 * @block: the block number that was allocated
  50 * @page: The (optional) page. This is looked up if @page is NULL
  51 *
  52 * Returns: errno
  53 */
  54
  55static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
  56			       u64 block, struct page *page)
  57{
  58	struct inode *inode = &ip->i_inode;
  59	struct buffer_head *bh;
  60	int release = 0;
  61
  62	if (!page || page->index) {
  63		page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS);
  64		if (!page)
  65			return -ENOMEM;
  66		release = 1;
  67	}
  68
  69	if (!PageUptodate(page)) {
  70		void *kaddr = kmap(page);
  71		u64 dsize = i_size_read(inode);
  72 
  73		if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode)))
  74			dsize = dibh->b_size - sizeof(struct gfs2_dinode);
  75
  76		memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
  77		memset(kaddr + dsize, 0, PAGE_CACHE_SIZE - dsize);
  78		kunmap(page);
  79
  80		SetPageUptodate(page);
  81	}
  82
  83	if (!page_has_buffers(page))
  84		create_empty_buffers(page, 1 << inode->i_blkbits,
  85				     (1 << BH_Uptodate));
  86
  87	bh = page_buffers(page);
  88
  89	if (!buffer_mapped(bh))
  90		map_bh(bh, inode->i_sb, block);
  91
  92	set_buffer_uptodate(bh);
  93	if (!gfs2_is_jdata(ip))
  94		mark_buffer_dirty(bh);
  95	if (!gfs2_is_writeback(ip))
  96		gfs2_trans_add_bh(ip->i_gl, bh, 0);
  97
  98	if (release) {
  99		unlock_page(page);
 100		page_cache_release(page);
 101	}
 102
 103	return 0;
 104}
 105
 106/**
 107 * gfs2_unstuff_dinode - Unstuff a dinode when the data has grown too big
 108 * @ip: The GFS2 inode to unstuff
 109 * @page: The (optional) page. This is looked up if the @page is NULL
 110 *
 111 * This routine unstuffs a dinode and returns it to a "normal" state such
 112 * that the height can be grown in the traditional way.
 113 *
 114 * Returns: errno
 115 */
 116
 117int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page)
 118{
 119	struct buffer_head *bh, *dibh;
 120	struct gfs2_dinode *di;
 121	u64 block = 0;
 122	int isdir = gfs2_is_dir(ip);
 123	int error;
 124
 125	down_write(&ip->i_rw_mutex);
 126
 127	error = gfs2_meta_inode_buffer(ip, &dibh);
 128	if (error)
 129		goto out;
 130
 131	if (i_size_read(&ip->i_inode)) {
 132		/* Get a free block, fill it with the stuffed data,
 133		   and write it out to disk */
 134
 135		unsigned int n = 1;
 136		error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
 137		if (error)
 138			goto out_brelse;
 139		if (isdir) {
 140			gfs2_trans_add_unrevoke(GFS2_SB(&ip->i_inode), block, 1);
 141			error = gfs2_dir_get_new_buffer(ip, block, &bh);
 142			if (error)
 143				goto out_brelse;
 144			gfs2_buffer_copy_tail(bh, sizeof(struct gfs2_meta_header),
 145					      dibh, sizeof(struct gfs2_dinode));
 146			brelse(bh);
 147		} else {
 148			error = gfs2_unstuffer_page(ip, dibh, block, page);
 149			if (error)
 150				goto out_brelse;
 151		}
 152	}
 153
 154	/*  Set up the pointer to the new block  */
 155
 156	gfs2_trans_add_bh(ip->i_gl, dibh, 1);
 157	di = (struct gfs2_dinode *)dibh->b_data;
 158	gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
 159
 160	if (i_size_read(&ip->i_inode)) {
 161		*(__be64 *)(di + 1) = cpu_to_be64(block);
 162		gfs2_add_inode_blocks(&ip->i_inode, 1);
 163		di->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
 164	}
 165
 166	ip->i_height = 1;
 167	di->di_height = cpu_to_be16(1);
 168
 169out_brelse:
 170	brelse(dibh);
 171out:
 172	up_write(&ip->i_rw_mutex);
 173	return error;
 174}
 175
 176
 177/**
 178 * find_metapath - Find path through the metadata tree
 179 * @sdp: The superblock
 180 * @mp: The metapath to return the result in
 181 * @block: The disk block to look up
 182 * @height: The pre-calculated height of the metadata tree
 183 *
 184 *   This routine returns a struct metapath structure that defines a path
 185 *   through the metadata of inode "ip" to get to block "block".
 186 *
 187 *   Example:
 188 *   Given:  "ip" is a height 3 file, "offset" is 101342453, and this is a
 189 *   filesystem with a blocksize of 4096.
 190 *
 191 *   find_metapath() would return a struct metapath structure set to:
 192 *   mp_offset = 101342453, mp_height = 3, mp_list[0] = 0, mp_list[1] = 48,
 193 *   and mp_list[2] = 165.
 194 *
 195 *   That means that in order to get to the block containing the byte at
 196 *   offset 101342453, we would load the indirect block pointed to by pointer
 197 *   0 in the dinode.  We would then load the indirect block pointed to by
 198 *   pointer 48 in that indirect block.  We would then load the data block
 199 *   pointed to by pointer 165 in that indirect block.
 200 *
 201 *             ----------------------------------------
 202 *             | Dinode |                             |
 203 *             |        |                            4|
 204 *             |        |0 1 2 3 4 5                 9|
 205 *             |        |                            6|
 206 *             ----------------------------------------
 207 *                       |
 208 *                       |
 209 *                       V
 210 *             ----------------------------------------
 211 *             | Indirect Block                       |
 212 *             |                                     5|
 213 *             |            4 4 4 4 4 5 5            1|
 214 *             |0           5 6 7 8 9 0 1            2|
 215 *             ----------------------------------------
 216 *                                |
 217 *                                |
 218 *                                V
 219 *             ----------------------------------------
 220 *             | Indirect Block                       |
 221 *             |                         1 1 1 1 1   5|
 222 *             |                         6 6 6 6 6   1|
 223 *             |0                        3 4 5 6 7   2|
 224 *             ----------------------------------------
 225 *                                           |
 226 *                                           |
 227 *                                           V
 228 *             ----------------------------------------
 229 *             | Data block containing offset         |
 230 *             |            101342453                 |
 231 *             |                                      |
 232 *             |                                      |
 233 *             ----------------------------------------
 234 *
 235 */
 236
 237static void find_metapath(const struct gfs2_sbd *sdp, u64 block,
 238			  struct metapath *mp, unsigned int height)
 239{
 240	unsigned int i;
 241
 242	for (i = height; i--;)
 243		mp->mp_list[i] = do_div(block, sdp->sd_inptrs);
 244
 245}
 246
 247static inline unsigned int metapath_branch_start(const struct metapath *mp)
 248{
 249	if (mp->mp_list[0] == 0)
 250		return 2;
 251	return 1;
 252}
 253
 254/**
 255 * metapointer - Return pointer to start of metadata in a buffer
 256 * @height: The metadata height (0 = dinode)
 257 * @mp: The metapath
 258 *
 259 * Return a pointer to the block number of the next height of the metadata
 260 * tree given a buffer containing the pointer to the current height of the
 261 * metadata tree.
 262 */
 263
 264static inline __be64 *metapointer(unsigned int height, const struct metapath *mp)
 265{
 266	struct buffer_head *bh = mp->mp_bh[height];
 267	unsigned int head_size = (height > 0) ?
 268		sizeof(struct gfs2_meta_header) : sizeof(struct gfs2_dinode);
 269	return ((__be64 *)(bh->b_data + head_size)) + mp->mp_list[height];
 270}
 271
 272static void gfs2_metapath_ra(struct gfs2_glock *gl,
 273			     const struct buffer_head *bh, const __be64 *pos)
 274{
 275	struct buffer_head *rabh;
 276	const __be64 *endp = (const __be64 *)(bh->b_data + bh->b_size);
 277	const __be64 *t;
 278
 279	for (t = pos; t < endp; t++) {
 280		if (!*t)
 281			continue;
 282
 283		rabh = gfs2_getbuf(gl, be64_to_cpu(*t), CREATE);
 284		if (trylock_buffer(rabh)) {
 285			if (!buffer_uptodate(rabh)) {
 286				rabh->b_end_io = end_buffer_read_sync;
 287				submit_bh(READA | REQ_META, rabh);
 288				continue;
 289			}
 290			unlock_buffer(rabh);
 291		}
 292		brelse(rabh);
 293	}
 294}
 295
 296/**
 297 * lookup_metapath - Walk the metadata tree to a specific point
 298 * @ip: The inode
 299 * @mp: The metapath
 300 *
 301 * Assumes that the inode's buffer has already been looked up and
 302 * hooked onto mp->mp_bh[0] and that the metapath has been initialised
 303 * by find_metapath().
 304 *
 305 * If this function encounters part of the tree which has not been
 306 * allocated, it returns the current height of the tree at the point
 307 * at which it found the unallocated block. Blocks which are found are
 308 * added to the mp->mp_bh[] list.
 309 *
 310 * Returns: error or height of metadata tree
 311 */
 312
 313static int lookup_metapath(struct gfs2_inode *ip, struct metapath *mp)
 314{
 315	unsigned int end_of_metadata = ip->i_height - 1;
 316	unsigned int x;
 317	__be64 *ptr;
 318	u64 dblock;
 319	int ret;
 320
 321	for (x = 0; x < end_of_metadata; x++) {
 322		ptr = metapointer(x, mp);
 323		dblock = be64_to_cpu(*ptr);
 324		if (!dblock)
 325			return x + 1;
 326
 327		ret = gfs2_meta_indirect_buffer(ip, x+1, dblock, &mp->mp_bh[x+1]);
 328		if (ret)
 329			return ret;
 330	}
 331
 332	return ip->i_height;
 333}
 334
 335static inline void release_metapath(struct metapath *mp)
 336{
 337	int i;
 338
 339	for (i = 0; i < GFS2_MAX_META_HEIGHT; i++) {
 340		if (mp->mp_bh[i] == NULL)
 341			break;
 342		brelse(mp->mp_bh[i]);
 343	}
 344}
 345
 346/**
 347 * gfs2_extent_length - Returns length of an extent of blocks
 348 * @start: Start of the buffer
 349 * @len: Length of the buffer in bytes
 350 * @ptr: Current position in the buffer
 351 * @limit: Max extent length to return (0 = unlimited)
 352 * @eob: Set to 1 if we hit "end of block"
 353 *
 354 * If the first block is zero (unallocated) it will return the number of
 355 * unallocated blocks in the extent, otherwise it will return the number
 356 * of contiguous blocks in the extent.
 357 *
 358 * Returns: The length of the extent (minimum of one block)
 359 */
 360
 361static inline unsigned int gfs2_extent_length(void *start, unsigned int len, __be64 *ptr, unsigned limit, int *eob)
 362{
 363	const __be64 *end = (start + len);
 364	const __be64 *first = ptr;
 365	u64 d = be64_to_cpu(*ptr);
 366
 367	*eob = 0;
 368	do {
 369		ptr++;
 370		if (ptr >= end)
 371			break;
 372		if (limit && --limit == 0)
 373			break;
 374		if (d)
 375			d++;
 376	} while(be64_to_cpu(*ptr) == d);
 377	if (ptr >= end)
 378		*eob = 1;
 379	return (ptr - first);
 380}
 381
 382static inline void bmap_lock(struct gfs2_inode *ip, int create)
 383{
 384	if (create)
 385		down_write(&ip->i_rw_mutex);
 386	else
 387		down_read(&ip->i_rw_mutex);
 388}
 389
 390static inline void bmap_unlock(struct gfs2_inode *ip, int create)
 391{
 392	if (create)
 393		up_write(&ip->i_rw_mutex);
 394	else
 395		up_read(&ip->i_rw_mutex);
 396}
 397
 398static inline __be64 *gfs2_indirect_init(struct metapath *mp,
 399					 struct gfs2_glock *gl, unsigned int i,
 400					 unsigned offset, u64 bn)
 401{
 402	__be64 *ptr = (__be64 *)(mp->mp_bh[i - 1]->b_data +
 403		       ((i > 1) ? sizeof(struct gfs2_meta_header) :
 404				 sizeof(struct gfs2_dinode)));
 405	BUG_ON(i < 1);
 406	BUG_ON(mp->mp_bh[i] != NULL);
 407	mp->mp_bh[i] = gfs2_meta_new(gl, bn);
 408	gfs2_trans_add_bh(gl, mp->mp_bh[i], 1);
 409	gfs2_metatype_set(mp->mp_bh[i], GFS2_METATYPE_IN, GFS2_FORMAT_IN);
 410	gfs2_buffer_clear_tail(mp->mp_bh[i], sizeof(struct gfs2_meta_header));
 411	ptr += offset;
 412	*ptr = cpu_to_be64(bn);
 413	return ptr;
 414}
 415
 416enum alloc_state {
 417	ALLOC_DATA = 0,
 418	ALLOC_GROW_DEPTH = 1,
 419	ALLOC_GROW_HEIGHT = 2,
 420	/* ALLOC_UNSTUFF = 3,   TBD and rather complicated */
 421};
 422
 423/**
 424 * gfs2_bmap_alloc - Build a metadata tree of the requested height
 425 * @inode: The GFS2 inode
 426 * @lblock: The logical starting block of the extent
 427 * @bh_map: This is used to return the mapping details
 428 * @mp: The metapath
 429 * @sheight: The starting height (i.e. whats already mapped)
 430 * @height: The height to build to
 431 * @maxlen: The max number of data blocks to alloc
 432 *
 433 * In this routine we may have to alloc:
 434 *   i) Indirect blocks to grow the metadata tree height
 435 *  ii) Indirect blocks to fill in lower part of the metadata tree
 436 * iii) Data blocks
 437 *
 438 * The function is in two parts. The first part works out the total
 439 * number of blocks which we need. The second part does the actual
 440 * allocation asking for an extent at a time (if enough contiguous free
 441 * blocks are available, there will only be one request per bmap call)
 442 * and uses the state machine to initialise the blocks in order.
 443 *
 444 * Returns: errno on error
 445 */
 446
 447static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock,
 448			   struct buffer_head *bh_map, struct metapath *mp,
 449			   const unsigned int sheight,
 450			   const unsigned int height,
 451			   const unsigned int maxlen)
 452{
 453	struct gfs2_inode *ip = GFS2_I(inode);
 454	struct gfs2_sbd *sdp = GFS2_SB(inode);
 455	struct super_block *sb = sdp->sd_vfs;
 456	struct buffer_head *dibh = mp->mp_bh[0];
 457	u64 bn, dblock = 0;
 458	unsigned n, i, blks, alloced = 0, iblks = 0, branch_start = 0;
 459	unsigned dblks = 0;
 460	unsigned ptrs_per_blk;
 461	const unsigned end_of_metadata = height - 1;
 462	int ret;
 463	int eob = 0;
 464	enum alloc_state state;
 465	__be64 *ptr;
 466	__be64 zero_bn = 0;
 467
 468	BUG_ON(sheight < 1);
 469	BUG_ON(dibh == NULL);
 470
 471	gfs2_trans_add_bh(ip->i_gl, dibh, 1);
 472
 473	if (height == sheight) {
 474		struct buffer_head *bh;
 475		/* Bottom indirect block exists, find unalloced extent size */
 476		ptr = metapointer(end_of_metadata, mp);
 477		bh = mp->mp_bh[end_of_metadata];
 478		dblks = gfs2_extent_length(bh->b_data, bh->b_size, ptr, maxlen,
 479					   &eob);
 480		BUG_ON(dblks < 1);
 481		state = ALLOC_DATA;
 482	} else {
 483		/* Need to allocate indirect blocks */
 484		ptrs_per_blk = height > 1 ? sdp->sd_inptrs : sdp->sd_diptrs;
 485		dblks = min(maxlen, ptrs_per_blk - mp->mp_list[end_of_metadata]);
 486		if (height == ip->i_height) {
 487			/* Writing into existing tree, extend tree down */
 488			iblks = height - sheight;
 489			state = ALLOC_GROW_DEPTH;
 490		} else {
 491			/* Building up tree height */
 492			state = ALLOC_GROW_HEIGHT;
 493			iblks = height - ip->i_height;
 494			branch_start = metapath_branch_start(mp);
 495			iblks += (height - branch_start);
 496		}
 497	}
 498
 499	/* start of the second part of the function (state machine) */
 500
 501	blks = dblks + iblks;
 502	i = sheight;
 503	do {
 504		int error;
 505		n = blks - alloced;
 506		error = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL);
 507		if (error)
 508			return error;
 509		alloced += n;
 510		if (state != ALLOC_DATA || gfs2_is_jdata(ip))
 511			gfs2_trans_add_unrevoke(sdp, bn, n);
 512		switch (state) {
 513		/* Growing height of tree */
 514		case ALLOC_GROW_HEIGHT:
 515			if (i == 1) {
 516				ptr = (__be64 *)(dibh->b_data +
 517						 sizeof(struct gfs2_dinode));
 518				zero_bn = *ptr;
 519			}
 520			for (; i - 1 < height - ip->i_height && n > 0; i++, n--)
 521				gfs2_indirect_init(mp, ip->i_gl, i, 0, bn++);
 522			if (i - 1 == height - ip->i_height) {
 523				i--;
 524				gfs2_buffer_copy_tail(mp->mp_bh[i],
 525						sizeof(struct gfs2_meta_header),
 526						dibh, sizeof(struct gfs2_dinode));
 527				gfs2_buffer_clear_tail(dibh,
 528						sizeof(struct gfs2_dinode) +
 529						sizeof(__be64));
 530				ptr = (__be64 *)(mp->mp_bh[i]->b_data +
 531					sizeof(struct gfs2_meta_header));
 532				*ptr = zero_bn;
 533				state = ALLOC_GROW_DEPTH;
 534				for(i = branch_start; i < height; i++) {
 535					if (mp->mp_bh[i] == NULL)
 536						break;
 537					brelse(mp->mp_bh[i]);
 538					mp->mp_bh[i] = NULL;
 539				}
 540				i = branch_start;
 541			}
 542			if (n == 0)
 543				break;
 544		/* Branching from existing tree */
 545		case ALLOC_GROW_DEPTH:
 546			if (i > 1 && i < height)
 547				gfs2_trans_add_bh(ip->i_gl, mp->mp_bh[i-1], 1);
 548			for (; i < height && n > 0; i++, n--)
 549				gfs2_indirect_init(mp, ip->i_gl, i,
 550						   mp->mp_list[i-1], bn++);
 551			if (i == height)
 552				state = ALLOC_DATA;
 553			if (n == 0)
 554				break;
 555		/* Tree complete, adding data blocks */
 556		case ALLOC_DATA:
 557			BUG_ON(n > dblks);
 558			BUG_ON(mp->mp_bh[end_of_metadata] == NULL);
 559			gfs2_trans_add_bh(ip->i_gl, mp->mp_bh[end_of_metadata], 1);
 560			dblks = n;
 561			ptr = metapointer(end_of_metadata, mp);
 562			dblock = bn;
 563			while (n-- > 0)
 564				*ptr++ = cpu_to_be64(bn++);
 565			if (buffer_zeronew(bh_map)) {
 566				ret = sb_issue_zeroout(sb, dblock, dblks,
 567						       GFP_NOFS);
 568				if (ret) {
 569					fs_err(sdp,
 570					       "Failed to zero data buffers\n");
 571					clear_buffer_zeronew(bh_map);
 572				}
 573			}
 574			break;
 575		}
 576	} while ((state != ALLOC_DATA) || !dblock);
 577
 578	ip->i_height = height;
 579	gfs2_add_inode_blocks(&ip->i_inode, alloced);
 580	gfs2_dinode_out(ip, mp->mp_bh[0]->b_data);
 581	map_bh(bh_map, inode->i_sb, dblock);
 582	bh_map->b_size = dblks << inode->i_blkbits;
 583	set_buffer_new(bh_map);
 584	return 0;
 585}
 586
 587/**
 588 * gfs2_block_map - Map a block from an inode to a disk block
 589 * @inode: The inode
 590 * @lblock: The logical block number
 591 * @bh_map: The bh to be mapped
 592 * @create: True if its ok to alloc blocks to satify the request
 593 *
 594 * Sets buffer_mapped() if successful, sets buffer_boundary() if a
 595 * read of metadata will be required before the next block can be
 596 * mapped. Sets buffer_new() if new blocks were allocated.
 597 *
 598 * Returns: errno
 599 */
 600
 601int gfs2_block_map(struct inode *inode, sector_t lblock,
 602		   struct buffer_head *bh_map, int create)
 603{
 604	struct gfs2_inode *ip = GFS2_I(inode);
 605	struct gfs2_sbd *sdp = GFS2_SB(inode);
 606	unsigned int bsize = sdp->sd_sb.sb_bsize;
 607	const unsigned int maxlen = bh_map->b_size >> inode->i_blkbits;
 608	const u64 *arr = sdp->sd_heightsize;
 609	__be64 *ptr;
 610	u64 size;
 611	struct metapath mp;
 612	int ret;
 613	int eob;
 614	unsigned int len;
 615	struct buffer_head *bh;
 616	u8 height;
 617
 618	BUG_ON(maxlen == 0);
 619
 620	memset(mp.mp_bh, 0, sizeof(mp.mp_bh));
 621	bmap_lock(ip, create);
 622	clear_buffer_mapped(bh_map);
 623	clear_buffer_new(bh_map);
 624	clear_buffer_boundary(bh_map);
 625	trace_gfs2_bmap(ip, bh_map, lblock, create, 1);
 626	if (gfs2_is_dir(ip)) {
 627		bsize = sdp->sd_jbsize;
 628		arr = sdp->sd_jheightsize;
 629	}
 630
 631	ret = gfs2_meta_inode_buffer(ip, &mp.mp_bh[0]);
 632	if (ret)
 633		goto out;
 634
 635	height = ip->i_height;
 636	size = (lblock + 1) * bsize;
 637	while (size > arr[height])
 638		height++;
 639	find_metapath(sdp, lblock, &mp, height);
 640	ret = 1;
 641	if (height > ip->i_height || gfs2_is_stuffed(ip))
 642		goto do_alloc;
 643	ret = lookup_metapath(ip, &mp);
 644	if (ret < 0)
 645		goto out;
 646	if (ret != ip->i_height)
 647		goto do_alloc;
 648	ptr = metapointer(ip->i_height - 1, &mp);
 649	if (*ptr == 0)
 650		goto do_alloc;
 651	map_bh(bh_map, inode->i_sb, be64_to_cpu(*ptr));
 652	bh = mp.mp_bh[ip->i_height - 1];
 653	len = gfs2_extent_length(bh->b_data, bh->b_size, ptr, maxlen, &eob);
 654	bh_map->b_size = (len << inode->i_blkbits);
 655	if (eob)
 656		set_buffer_boundary(bh_map);
 657	ret = 0;
 658out:
 659	release_metapath(&mp);
 660	trace_gfs2_bmap(ip, bh_map, lblock, create, ret);
 661	bmap_unlock(ip, create);
 662	return ret;
 663
 664do_alloc:
 665	/* All allocations are done here, firstly check create flag */
 666	if (!create) {
 667		BUG_ON(gfs2_is_stuffed(ip));
 668		ret = 0;
 669		goto out;
 670	}
 671
 672	/* At this point ret is the tree depth of already allocated blocks */
 673	ret = gfs2_bmap_alloc(inode, lblock, bh_map, &mp, ret, height, maxlen);
 674	goto out;
 675}
 676
 677/*
 678 * Deprecated: do not use in new code
 679 */
 680int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsigned *extlen)
 681{
 682	struct buffer_head bh = { .b_state = 0, .b_blocknr = 0 };
 683	int ret;
 684	int create = *new;
 685
 686	BUG_ON(!extlen);
 687	BUG_ON(!dblock);
 688	BUG_ON(!new);
 689
 690	bh.b_size = 1 << (inode->i_blkbits + (create ? 0 : 5));
 691	ret = gfs2_block_map(inode, lblock, &bh, create);
 692	*extlen = bh.b_size >> inode->i_blkbits;
 693	*dblock = bh.b_blocknr;
 694	if (buffer_new(&bh))
 695		*new = 1;
 696	else
 697		*new = 0;
 698	return ret;
 699}
 700
 701/**
 702 * do_strip - Look for a layer a particular layer of the file and strip it off
 703 * @ip: the inode
 704 * @dibh: the dinode buffer
 705 * @bh: A buffer of pointers
 706 * @top: The first pointer in the buffer
 707 * @bottom: One more than the last pointer
 708 * @height: the height this buffer is at
 709 * @data: a pointer to a struct strip_mine
 710 *
 711 * Returns: errno
 712 */
 713
 714static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh,
 715		    struct buffer_head *bh, __be64 *top, __be64 *bottom,
 716		    unsigned int height, struct strip_mine *sm)
 717{
 718	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 719	struct gfs2_rgrp_list rlist;
 720	u64 bn, bstart;
 721	u32 blen, btotal;
 722	__be64 *p;
 723	unsigned int rg_blocks = 0;
 724	int metadata;
 725	unsigned int revokes = 0;
 726	int x;
 727	int error;
 728
 729	error = gfs2_rindex_update(sdp);
 730	if (error)
 731		return error;
 732
 733	if (!*top)
 734		sm->sm_first = 0;
 735
 736	if (height != sm->sm_height)
 737		return 0;
 738
 739	if (sm->sm_first) {
 740		top++;
 741		sm->sm_first = 0;
 742	}
 743
 744	metadata = (height != ip->i_height - 1);
 745	if (metadata)
 746		revokes = (height) ? sdp->sd_inptrs : sdp->sd_diptrs;
 747	else if (ip->i_depth)
 748		revokes = sdp->sd_inptrs;
 749
 750	memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
 751	bstart = 0;
 752	blen = 0;
 753
 754	for (p = top; p < bottom; p++) {
 755		if (!*p)
 756			continue;
 757
 758		bn = be64_to_cpu(*p);
 759
 760		if (bstart + blen == bn)
 761			blen++;
 762		else {
 763			if (bstart)
 764				gfs2_rlist_add(ip, &rlist, bstart);
 765
 766			bstart = bn;
 767			blen = 1;
 768		}
 769	}
 770
 771	if (bstart)
 772		gfs2_rlist_add(ip, &rlist, bstart);
 773	else
 774		goto out; /* Nothing to do */
 775
 776	gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE);
 777
 778	for (x = 0; x < rlist.rl_rgrps; x++) {
 779		struct gfs2_rgrpd *rgd;
 780		rgd = rlist.rl_ghs[x].gh_gl->gl_object;
 781		rg_blocks += rgd->rd_length;
 782	}
 783
 784	error = gfs2_glock_nq_m(rlist.rl_rgrps, rlist.rl_ghs);
 785	if (error)
 786		goto out_rlist;
 787
 
 
 
 788	error = gfs2_trans_begin(sdp, rg_blocks + RES_DINODE +
 789				 RES_INDIRECT + RES_STATFS + RES_QUOTA,
 790				 revokes);
 791	if (error)
 792		goto out_rg_gunlock;
 793
 794	down_write(&ip->i_rw_mutex);
 795
 796	gfs2_trans_add_bh(ip->i_gl, dibh, 1);
 797	gfs2_trans_add_bh(ip->i_gl, bh, 1);
 798
 799	bstart = 0;
 800	blen = 0;
 801	btotal = 0;
 802
 803	for (p = top; p < bottom; p++) {
 804		if (!*p)
 805			continue;
 806
 807		bn = be64_to_cpu(*p);
 808
 809		if (bstart + blen == bn)
 810			blen++;
 811		else {
 812			if (bstart) {
 813				__gfs2_free_blocks(ip, bstart, blen, metadata);
 814				btotal += blen;
 815			}
 816
 817			bstart = bn;
 818			blen = 1;
 819		}
 820
 821		*p = 0;
 822		gfs2_add_inode_blocks(&ip->i_inode, -1);
 823	}
 824	if (bstart) {
 825		__gfs2_free_blocks(ip, bstart, blen, metadata);
 826		btotal += blen;
 827	}
 828
 829	gfs2_statfs_change(sdp, 0, +btotal, 0);
 830	gfs2_quota_change(ip, -(s64)btotal, ip->i_inode.i_uid,
 831			  ip->i_inode.i_gid);
 832
 833	ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
 834
 835	gfs2_dinode_out(ip, dibh->b_data);
 836
 837	up_write(&ip->i_rw_mutex);
 838
 839	gfs2_trans_end(sdp);
 840
 841out_rg_gunlock:
 842	gfs2_glock_dq_m(rlist.rl_rgrps, rlist.rl_ghs);
 843out_rlist:
 844	gfs2_rlist_free(&rlist);
 845out:
 846	return error;
 847}
 848
 849/**
 850 * recursive_scan - recursively scan through the end of a file
 851 * @ip: the inode
 852 * @dibh: the dinode buffer
 853 * @mp: the path through the metadata to the point to start
 854 * @height: the height the recursion is at
 855 * @block: the indirect block to look at
 856 * @first: 1 if this is the first block
 857 * @sm: data opaque to this function to pass to @bc
 858 *
 859 * When this is first called @height and @block should be zero and
 860 * @first should be 1.
 861 *
 862 * Returns: errno
 863 */
 864
 865static int recursive_scan(struct gfs2_inode *ip, struct buffer_head *dibh,
 866			  struct metapath *mp, unsigned int height,
 867			  u64 block, int first, struct strip_mine *sm)
 868{
 869	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 870	struct buffer_head *bh = NULL;
 871	__be64 *top, *bottom;
 872	u64 bn;
 873	int error;
 874	int mh_size = sizeof(struct gfs2_meta_header);
 875
 876	if (!height) {
 877		error = gfs2_meta_inode_buffer(ip, &bh);
 878		if (error)
 879			return error;
 880		dibh = bh;
 881
 882		top = (__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)) + mp->mp_list[0];
 883		bottom = (__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)) + sdp->sd_diptrs;
 884	} else {
 885		error = gfs2_meta_indirect_buffer(ip, height, block, &bh);
 886		if (error)
 887			return error;
 888
 889		top = (__be64 *)(bh->b_data + mh_size) +
 890				  (first ? mp->mp_list[height] : 0);
 891
 892		bottom = (__be64 *)(bh->b_data + mh_size) + sdp->sd_inptrs;
 893	}
 894
 895	error = do_strip(ip, dibh, bh, top, bottom, height, sm);
 896	if (error)
 897		goto out;
 898
 899	if (height < ip->i_height - 1) {
 900
 901		gfs2_metapath_ra(ip->i_gl, bh, top);
 902
 903		for (; top < bottom; top++, first = 0) {
 904			if (!*top)
 905				continue;
 906
 907			bn = be64_to_cpu(*top);
 908
 909			error = recursive_scan(ip, dibh, mp, height + 1, bn,
 910					       first, sm);
 911			if (error)
 912				break;
 913		}
 914	}
 915out:
 916	brelse(bh);
 917	return error;
 918}
 919
 920
 921/**
 922 * gfs2_block_truncate_page - Deal with zeroing out data for truncate
 923 *
 924 * This is partly borrowed from ext3.
 925 */
 926static int gfs2_block_truncate_page(struct address_space *mapping, loff_t from)
 927{
 928	struct inode *inode = mapping->host;
 929	struct gfs2_inode *ip = GFS2_I(inode);
 930	unsigned long index = from >> PAGE_CACHE_SHIFT;
 931	unsigned offset = from & (PAGE_CACHE_SIZE-1);
 932	unsigned blocksize, iblock, length, pos;
 933	struct buffer_head *bh;
 934	struct page *page;
 935	int err;
 936
 937	page = find_or_create_page(mapping, index, GFP_NOFS);
 938	if (!page)
 939		return 0;
 940
 941	blocksize = inode->i_sb->s_blocksize;
 942	length = blocksize - (offset & (blocksize - 1));
 943	iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
 944
 945	if (!page_has_buffers(page))
 946		create_empty_buffers(page, blocksize, 0);
 947
 948	/* Find the buffer that contains "offset" */
 949	bh = page_buffers(page);
 950	pos = blocksize;
 951	while (offset >= pos) {
 952		bh = bh->b_this_page;
 953		iblock++;
 954		pos += blocksize;
 955	}
 956
 957	err = 0;
 958
 959	if (!buffer_mapped(bh)) {
 960		gfs2_block_map(inode, iblock, bh, 0);
 961		/* unmapped? It's a hole - nothing to do */
 962		if (!buffer_mapped(bh))
 963			goto unlock;
 964	}
 965
 966	/* Ok, it's mapped. Make sure it's up-to-date */
 967	if (PageUptodate(page))
 968		set_buffer_uptodate(bh);
 969
 970	if (!buffer_uptodate(bh)) {
 971		err = -EIO;
 972		ll_rw_block(READ, 1, &bh);
 973		wait_on_buffer(bh);
 974		/* Uhhuh. Read error. Complain and punt. */
 975		if (!buffer_uptodate(bh))
 976			goto unlock;
 977		err = 0;
 978	}
 979
 980	if (!gfs2_is_writeback(ip))
 981		gfs2_trans_add_bh(ip->i_gl, bh, 0);
 982
 983	zero_user(page, offset, length);
 984	mark_buffer_dirty(bh);
 985unlock:
 986	unlock_page(page);
 987	page_cache_release(page);
 988	return err;
 989}
 990
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 991static int trunc_start(struct inode *inode, u64 oldsize, u64 newsize)
 992{
 993	struct gfs2_inode *ip = GFS2_I(inode);
 994	struct gfs2_sbd *sdp = GFS2_SB(inode);
 995	struct address_space *mapping = inode->i_mapping;
 996	struct buffer_head *dibh;
 997	int journaled = gfs2_is_jdata(ip);
 998	int error;
 999
1000	error = gfs2_trans_begin(sdp,
1001				 RES_DINODE + (journaled ? RES_JDATA : 0), 0);
 
 
1002	if (error)
1003		return error;
1004
1005	error = gfs2_meta_inode_buffer(ip, &dibh);
1006	if (error)
1007		goto out;
1008
1009	gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1010
1011	if (gfs2_is_stuffed(ip)) {
1012		gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + newsize);
1013	} else {
1014		if (newsize & (u64)(sdp->sd_sb.sb_bsize - 1)) {
1015			error = gfs2_block_truncate_page(mapping, newsize);
1016			if (error)
1017				goto out_brelse;
1018		}
1019		ip->i_diskflags |= GFS2_DIF_TRUNC_IN_PROG;
1020	}
1021
1022	i_size_write(inode, newsize);
1023	ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
1024	gfs2_dinode_out(ip, dibh->b_data);
1025
1026	truncate_pagecache(inode, oldsize, newsize);
 
 
 
 
 
 
 
 
 
1027out_brelse:
1028	brelse(dibh);
1029out:
1030	gfs2_trans_end(sdp);
1031	return error;
1032}
1033
1034static int trunc_dealloc(struct gfs2_inode *ip, u64 size)
1035{
1036	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1037	unsigned int height = ip->i_height;
1038	u64 lblock;
1039	struct metapath mp;
1040	int error;
1041
1042	if (!size)
1043		lblock = 0;
1044	else
1045		lblock = (size - 1) >> sdp->sd_sb.sb_bsize_shift;
1046
1047	find_metapath(sdp, lblock, &mp, ip->i_height);
1048	if (!gfs2_qadata_get(ip))
1049		return -ENOMEM;
 
1050
1051	error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
1052	if (error)
1053		goto out;
1054
1055	while (height--) {
1056		struct strip_mine sm;
1057		sm.sm_first = !!size;
1058		sm.sm_height = height;
1059
1060		error = recursive_scan(ip, NULL, &mp, 0, 0, 1, &sm);
1061		if (error)
1062			break;
1063	}
1064
1065	gfs2_quota_unhold(ip);
1066
1067out:
1068	gfs2_qadata_put(ip);
1069	return error;
1070}
1071
1072static int trunc_end(struct gfs2_inode *ip)
1073{
1074	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1075	struct buffer_head *dibh;
1076	int error;
1077
1078	error = gfs2_trans_begin(sdp, RES_DINODE, 0);
1079	if (error)
1080		return error;
1081
1082	down_write(&ip->i_rw_mutex);
1083
1084	error = gfs2_meta_inode_buffer(ip, &dibh);
1085	if (error)
1086		goto out;
1087
1088	if (!i_size_read(&ip->i_inode)) {
1089		ip->i_height = 0;
1090		ip->i_goal = ip->i_no_addr;
1091		gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
 
1092	}
1093	ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
1094	ip->i_diskflags &= ~GFS2_DIF_TRUNC_IN_PROG;
1095
1096	gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1097	gfs2_dinode_out(ip, dibh->b_data);
1098	brelse(dibh);
1099
1100out:
1101	up_write(&ip->i_rw_mutex);
1102	gfs2_trans_end(sdp);
1103	return error;
1104}
1105
1106/**
1107 * do_shrink - make a file smaller
1108 * @inode: the inode
1109 * @oldsize: the current inode size
1110 * @newsize: the size to make the file
1111 *
1112 * Called with an exclusive lock on @inode. The @size must
1113 * be equal to or smaller than the current inode size.
1114 *
1115 * Returns: errno
1116 */
1117
1118static int do_shrink(struct inode *inode, u64 oldsize, u64 newsize)
1119{
1120	struct gfs2_inode *ip = GFS2_I(inode);
1121	int error;
1122
1123	error = trunc_start(inode, oldsize, newsize);
1124	if (error < 0)
1125		return error;
1126	if (gfs2_is_stuffed(ip))
1127		return 0;
1128
1129	error = trunc_dealloc(ip, newsize);
1130	if (error == 0)
1131		error = trunc_end(ip);
1132
1133	return error;
1134}
1135
1136void gfs2_trim_blocks(struct inode *inode)
1137{
1138	u64 size = inode->i_size;
1139	int ret;
1140
1141	ret = do_shrink(inode, size, size);
1142	WARN_ON(ret != 0);
1143}
1144
1145/**
1146 * do_grow - Touch and update inode size
1147 * @inode: The inode
1148 * @size: The new size
1149 *
1150 * This function updates the timestamps on the inode and
1151 * may also increase the size of the inode. This function
1152 * must not be called with @size any smaller than the current
1153 * inode size.
1154 *
1155 * Although it is not strictly required to unstuff files here,
1156 * earlier versions of GFS2 have a bug in the stuffed file reading
1157 * code which will result in a buffer overrun if the size is larger
1158 * than the max stuffed file size. In order to prevent this from
1159 * occurring, such files are unstuffed, but in other cases we can
1160 * just update the inode size directly.
1161 *
1162 * Returns: 0 on success, or -ve on error
1163 */
1164
1165static int do_grow(struct inode *inode, u64 size)
1166{
1167	struct gfs2_inode *ip = GFS2_I(inode);
1168	struct gfs2_sbd *sdp = GFS2_SB(inode);
 
1169	struct buffer_head *dibh;
1170	struct gfs2_qadata *qa = NULL;
1171	int error;
1172	int unstuff = 0;
1173
1174	if (gfs2_is_stuffed(ip) &&
1175	    (size > (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)))) {
1176		qa = gfs2_qadata_get(ip);
1177		if (qa == NULL)
1178			return -ENOMEM;
1179
1180		error = gfs2_quota_lock_check(ip);
1181		if (error)
1182			goto do_grow_alloc_put;
1183
1184		error = gfs2_inplace_reserve(ip, 1);
1185		if (error)
1186			goto do_grow_qunlock;
1187		unstuff = 1;
1188	}
1189
1190	error = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS + RES_RG_BIT, 0);
 
 
1191	if (error)
1192		goto do_grow_release;
1193
1194	if (unstuff) {
1195		error = gfs2_unstuff_dinode(ip, NULL);
1196		if (error)
1197			goto do_end_trans;
1198	}
1199
1200	error = gfs2_meta_inode_buffer(ip, &dibh);
1201	if (error)
1202		goto do_end_trans;
1203
1204	i_size_write(inode, size);
1205	ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
1206	gfs2_trans_add_bh(ip->i_gl, dibh, 1);
1207	gfs2_dinode_out(ip, dibh->b_data);
1208	brelse(dibh);
1209
1210do_end_trans:
1211	gfs2_trans_end(sdp);
1212do_grow_release:
1213	if (unstuff) {
1214		gfs2_inplace_release(ip);
1215do_grow_qunlock:
1216		gfs2_quota_unlock(ip);
1217do_grow_alloc_put:
1218		gfs2_qadata_put(ip);
1219	}
1220	return error;
1221}
1222
1223/**
1224 * gfs2_setattr_size - make a file a given size
1225 * @inode: the inode
1226 * @newsize: the size to make the file
1227 *
1228 * The file size can grow, shrink, or stay the same size. This
1229 * is called holding i_mutex and an exclusive glock on the inode
1230 * in question.
1231 *
1232 * Returns: errno
1233 */
1234
1235int gfs2_setattr_size(struct inode *inode, u64 newsize)
1236{
 
1237	int ret;
1238	u64 oldsize;
1239
1240	BUG_ON(!S_ISREG(inode->i_mode));
1241
1242	ret = inode_newsize_ok(inode, newsize);
1243	if (ret)
1244		return ret;
1245
 
 
 
 
1246	inode_dio_wait(inode);
1247
 
 
 
 
1248	oldsize = inode->i_size;
1249	if (newsize >= oldsize)
1250		return do_grow(inode, newsize);
 
 
1251
1252	return do_shrink(inode, oldsize, newsize);
 
 
 
 
1253}
1254
1255int gfs2_truncatei_resume(struct gfs2_inode *ip)
1256{
1257	int error;
1258	error = trunc_dealloc(ip, i_size_read(&ip->i_inode));
1259	if (!error)
1260		error = trunc_end(ip);
1261	return error;
1262}
1263
1264int gfs2_file_dealloc(struct gfs2_inode *ip)
1265{
1266	return trunc_dealloc(ip, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1267}
1268
1269/**
1270 * gfs2_write_alloc_required - figure out if a write will require an allocation
1271 * @ip: the file being written to
1272 * @offset: the offset to write to
1273 * @len: the number of bytes being written
1274 *
1275 * Returns: 1 if an alloc is required, 0 otherwise
1276 */
1277
1278int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
1279			      unsigned int len)
1280{
1281	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1282	struct buffer_head bh;
1283	unsigned int shift;
1284	u64 lblock, lblock_stop, size;
1285	u64 end_of_file;
1286
1287	if (!len)
1288		return 0;
1289
1290	if (gfs2_is_stuffed(ip)) {
1291		if (offset + len >
1292		    sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode))
1293			return 1;
1294		return 0;
1295	}
1296
1297	shift = sdp->sd_sb.sb_bsize_shift;
1298	BUG_ON(gfs2_is_dir(ip));
1299	end_of_file = (i_size_read(&ip->i_inode) + sdp->sd_sb.sb_bsize - 1) >> shift;
1300	lblock = offset >> shift;
1301	lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift;
1302	if (lblock_stop > end_of_file)
1303		return 1;
1304
1305	size = (lblock_stop - lblock) << shift;
1306	do {
1307		bh.b_state = 0;
1308		bh.b_size = size;
1309		gfs2_block_map(&ip->i_inode, lblock, &bh, 0);
1310		if (!buffer_mapped(&bh))
1311			return 1;
1312		size -= bh.b_size;
1313		lblock += (bh.b_size >> ip->i_inode.i_blkbits);
1314	} while(size > 0);
1315
1316	return 0;
1317}
1318