Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
   4 * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
 
 
 
 
   5 */
   6
   7#include <linux/spinlock.h>
   8#include <linux/completion.h>
   9#include <linux/buffer_head.h>
  10#include <linux/blkdev.h>
  11#include <linux/gfs2_ondisk.h>
  12#include <linux/crc32.h>
  13#include <linux/iomap.h>
  14#include <linux/ktime.h>
  15
  16#include "gfs2.h"
  17#include "incore.h"
  18#include "bmap.h"
  19#include "glock.h"
  20#include "inode.h"
  21#include "meta_io.h"
  22#include "quota.h"
  23#include "rgrp.h"
  24#include "log.h"
  25#include "super.h"
  26#include "trans.h"
  27#include "dir.h"
  28#include "util.h"
  29#include "aops.h"
  30#include "trace_gfs2.h"
  31
  32/* This doesn't need to be that large as max 64 bit pointers in a 4k
  33 * block is 512, so __u16 is fine for that. It saves stack space to
  34 * keep it small.
  35 */
  36struct metapath {
  37	struct buffer_head *mp_bh[GFS2_MAX_META_HEIGHT];
  38	__u16 mp_list[GFS2_MAX_META_HEIGHT];
  39	int mp_fheight; /* find_metapath height */
  40	int mp_aheight; /* actual height (lookup height) */
  41};
  42
  43static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length);
 
 
 
  44
  45/**
  46 * gfs2_unstuffer_page - unstuff a stuffed inode into a block cached by a page
  47 * @ip: the inode
  48 * @dibh: the dinode buffer
  49 * @block: the block number that was allocated
  50 * @page: The (optional) page. This is looked up if @page is NULL
  51 *
  52 * Returns: errno
  53 */
  54
  55static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
  56			       u64 block, struct page *page)
  57{
  58	struct inode *inode = &ip->i_inode;
  59	struct buffer_head *bh;
  60	int release = 0;
  61
  62	if (!page || page->index) {
  63		page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS);
  64		if (!page)
  65			return -ENOMEM;
  66		release = 1;
  67	}
  68
  69	if (!PageUptodate(page)) {
  70		void *kaddr = kmap(page);
  71		u64 dsize = i_size_read(inode);
  72 
  73		if (dsize > gfs2_max_stuffed_size(ip))
  74			dsize = gfs2_max_stuffed_size(ip);
  75
  76		memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
  77		memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
  78		kunmap(page);
  79
  80		SetPageUptodate(page);
  81	}
  82
  83	if (!page_has_buffers(page))
  84		create_empty_buffers(page, BIT(inode->i_blkbits),
  85				     BIT(BH_Uptodate));
  86
  87	bh = page_buffers(page);
  88
  89	if (!buffer_mapped(bh))
  90		map_bh(bh, inode->i_sb, block);
  91
  92	set_buffer_uptodate(bh);
  93	if (gfs2_is_jdata(ip))
  94		gfs2_trans_add_data(ip->i_gl, bh);
  95	else {
  96		mark_buffer_dirty(bh);
  97		gfs2_ordered_add_inode(ip);
  98	}
  99
 100	if (release) {
 101		unlock_page(page);
 102		put_page(page);
 103	}
 104
 105	return 0;
 106}
 107
 108/**
 109 * gfs2_unstuff_dinode - Unstuff a dinode when the data has grown too big
 110 * @ip: The GFS2 inode to unstuff
 111 * @page: The (optional) page. This is looked up if the @page is NULL
 112 *
 113 * This routine unstuffs a dinode and returns it to a "normal" state such
 114 * that the height can be grown in the traditional way.
 115 *
 116 * Returns: errno
 117 */
 118
 119int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page)
 120{
 121	struct buffer_head *bh, *dibh;
 122	struct gfs2_dinode *di;
 123	u64 block = 0;
 124	int isdir = gfs2_is_dir(ip);
 125	int error;
 126
 127	down_write(&ip->i_rw_mutex);
 128
 129	error = gfs2_meta_inode_buffer(ip, &dibh);
 130	if (error)
 131		goto out;
 132
 133	if (i_size_read(&ip->i_inode)) {
 134		/* Get a free block, fill it with the stuffed data,
 135		   and write it out to disk */
 136
 137		unsigned int n = 1;
 138		error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
 139		if (error)
 140			goto out_brelse;
 141		if (isdir) {
 142			gfs2_trans_remove_revoke(GFS2_SB(&ip->i_inode), block, 1);
 143			error = gfs2_dir_get_new_buffer(ip, block, &bh);
 144			if (error)
 145				goto out_brelse;
 146			gfs2_buffer_copy_tail(bh, sizeof(struct gfs2_meta_header),
 147					      dibh, sizeof(struct gfs2_dinode));
 148			brelse(bh);
 149		} else {
 150			error = gfs2_unstuffer_page(ip, dibh, block, page);
 151			if (error)
 152				goto out_brelse;
 153		}
 154	}
 155
 156	/*  Set up the pointer to the new block  */
 157
 158	gfs2_trans_add_meta(ip->i_gl, dibh);
 159	di = (struct gfs2_dinode *)dibh->b_data;
 160	gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
 161
 162	if (i_size_read(&ip->i_inode)) {
 163		*(__be64 *)(di + 1) = cpu_to_be64(block);
 164		gfs2_add_inode_blocks(&ip->i_inode, 1);
 165		di->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
 166	}
 167
 168	ip->i_height = 1;
 169	di->di_height = cpu_to_be16(1);
 170
 171out_brelse:
 172	brelse(dibh);
 173out:
 174	up_write(&ip->i_rw_mutex);
 175	return error;
 176}
 177
 178
 179/**
 180 * find_metapath - Find path through the metadata tree
 181 * @sdp: The superblock
 182 * @block: The disk block to look up
 183 * @mp: The metapath to return the result in
 
 184 * @height: The pre-calculated height of the metadata tree
 185 *
 186 *   This routine returns a struct metapath structure that defines a path
 187 *   through the metadata of inode "ip" to get to block "block".
 188 *
 189 *   Example:
 190 *   Given:  "ip" is a height 3 file, "offset" is 101342453, and this is a
 191 *   filesystem with a blocksize of 4096.
 192 *
 193 *   find_metapath() would return a struct metapath structure set to:
 194 *   mp_fheight = 3, mp_list[0] = 0, mp_list[1] = 48, and mp_list[2] = 165.
 
 195 *
 196 *   That means that in order to get to the block containing the byte at
 197 *   offset 101342453, we would load the indirect block pointed to by pointer
 198 *   0 in the dinode.  We would then load the indirect block pointed to by
 199 *   pointer 48 in that indirect block.  We would then load the data block
 200 *   pointed to by pointer 165 in that indirect block.
 201 *
 202 *             ----------------------------------------
 203 *             | Dinode |                             |
 204 *             |        |                            4|
 205 *             |        |0 1 2 3 4 5                 9|
 206 *             |        |                            6|
 207 *             ----------------------------------------
 208 *                       |
 209 *                       |
 210 *                       V
 211 *             ----------------------------------------
 212 *             | Indirect Block                       |
 213 *             |                                     5|
 214 *             |            4 4 4 4 4 5 5            1|
 215 *             |0           5 6 7 8 9 0 1            2|
 216 *             ----------------------------------------
 217 *                                |
 218 *                                |
 219 *                                V
 220 *             ----------------------------------------
 221 *             | Indirect Block                       |
 222 *             |                         1 1 1 1 1   5|
 223 *             |                         6 6 6 6 6   1|
 224 *             |0                        3 4 5 6 7   2|
 225 *             ----------------------------------------
 226 *                                           |
 227 *                                           |
 228 *                                           V
 229 *             ----------------------------------------
 230 *             | Data block containing offset         |
 231 *             |            101342453                 |
 232 *             |                                      |
 233 *             |                                      |
 234 *             ----------------------------------------
 235 *
 236 */
 237
 238static void find_metapath(const struct gfs2_sbd *sdp, u64 block,
 239			  struct metapath *mp, unsigned int height)
 240{
 241	unsigned int i;
 242
 243	mp->mp_fheight = height;
 244	for (i = height; i--;)
 245		mp->mp_list[i] = do_div(block, sdp->sd_inptrs);
 
 246}
 247
 248static inline unsigned int metapath_branch_start(const struct metapath *mp)
 249{
 250	if (mp->mp_list[0] == 0)
 251		return 2;
 252	return 1;
 253}
 254
 255/**
 256 * metaptr1 - Return the first possible metadata pointer in a metapath buffer
 257 * @height: The metadata height (0 = dinode)
 258 * @mp: The metapath
 259 */
 260static inline __be64 *metaptr1(unsigned int height, const struct metapath *mp)
 261{
 262	struct buffer_head *bh = mp->mp_bh[height];
 263	if (height == 0)
 264		return ((__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)));
 265	return ((__be64 *)(bh->b_data + sizeof(struct gfs2_meta_header)));
 266}
 267
 268/**
 269 * metapointer - Return pointer to start of metadata in a buffer
 270 * @height: The metadata height (0 = dinode)
 271 * @mp: The metapath
 272 *
 273 * Return a pointer to the block number of the next height of the metadata
 274 * tree given a buffer containing the pointer to the current height of the
 275 * metadata tree.
 276 */
 277
 278static inline __be64 *metapointer(unsigned int height, const struct metapath *mp)
 279{
 280	__be64 *p = metaptr1(height, mp);
 281	return p + mp->mp_list[height];
 282}
 283
 284static inline const __be64 *metaend(unsigned int height, const struct metapath *mp)
 285{
 286	const struct buffer_head *bh = mp->mp_bh[height];
 287	return (const __be64 *)(bh->b_data + bh->b_size);
 288}
 289
 290static void clone_metapath(struct metapath *clone, struct metapath *mp)
 291{
 292	unsigned int hgt;
 293
 294	*clone = *mp;
 295	for (hgt = 0; hgt < mp->mp_aheight; hgt++)
 296		get_bh(clone->mp_bh[hgt]);
 297}
 298
 299static void gfs2_metapath_ra(struct gfs2_glock *gl, __be64 *start, __be64 *end)
 
 300{
 
 
 301	const __be64 *t;
 302
 303	for (t = start; t < end; t++) {
 304		struct buffer_head *rabh;
 305
 306		if (!*t)
 307			continue;
 308
 309		rabh = gfs2_getbuf(gl, be64_to_cpu(*t), CREATE);
 310		if (trylock_buffer(rabh)) {
 311			if (!buffer_uptodate(rabh)) {
 312				rabh->b_end_io = end_buffer_read_sync;
 313				submit_bh(REQ_OP_READ,
 314					  REQ_RAHEAD | REQ_META | REQ_PRIO,
 315					  rabh);
 316				continue;
 317			}
 318			unlock_buffer(rabh);
 319		}
 320		brelse(rabh);
 321	}
 322}
 323
 324static int __fillup_metapath(struct gfs2_inode *ip, struct metapath *mp,
 325			     unsigned int x, unsigned int h)
 326{
 327	for (; x < h; x++) {
 328		__be64 *ptr = metapointer(x, mp);
 329		u64 dblock = be64_to_cpu(*ptr);
 330		int ret;
 331
 332		if (!dblock)
 333			break;
 334		ret = gfs2_meta_indirect_buffer(ip, x + 1, dblock, &mp->mp_bh[x + 1]);
 335		if (ret)
 336			return ret;
 337	}
 338	mp->mp_aheight = x + 1;
 339	return 0;
 340}
 341
 342/**
 343 * lookup_metapath - Walk the metadata tree to a specific point
 344 * @ip: The inode
 345 * @mp: The metapath
 346 *
 347 * Assumes that the inode's buffer has already been looked up and
 348 * hooked onto mp->mp_bh[0] and that the metapath has been initialised
 349 * by find_metapath().
 350 *
 351 * If this function encounters part of the tree which has not been
 352 * allocated, it returns the current height of the tree at the point
 353 * at which it found the unallocated block. Blocks which are found are
 354 * added to the mp->mp_bh[] list.
 355 *
 356 * Returns: error
 357 */
 358
 359static int lookup_metapath(struct gfs2_inode *ip, struct metapath *mp)
 360{
 361	return __fillup_metapath(ip, mp, 0, ip->i_height - 1);
 362}
 363
 364/**
 365 * fillup_metapath - fill up buffers for the metadata path to a specific height
 366 * @ip: The inode
 367 * @mp: The metapath
 368 * @h: The height to which it should be mapped
 369 *
 370 * Similar to lookup_metapath, but does lookups for a range of heights
 371 *
 372 * Returns: error or the number of buffers filled
 373 */
 374
 375static int fillup_metapath(struct gfs2_inode *ip, struct metapath *mp, int h)
 376{
 377	unsigned int x = 0;
 378	int ret;
 379
 380	if (h) {
 381		/* find the first buffer we need to look up. */
 382		for (x = h - 1; x > 0; x--) {
 383			if (mp->mp_bh[x])
 384				break;
 385		}
 386	}
 387	ret = __fillup_metapath(ip, mp, x, h);
 388	if (ret)
 389		return ret;
 390	return mp->mp_aheight - x - 1;
 391}
 392
 393static sector_t metapath_to_block(struct gfs2_sbd *sdp, struct metapath *mp)
 394{
 395	sector_t factor = 1, block = 0;
 396	int hgt;
 397
 398	for (hgt = mp->mp_fheight - 1; hgt >= 0; hgt--) {
 399		if (hgt < mp->mp_aheight)
 400			block += mp->mp_list[hgt] * factor;
 401		factor *= sdp->sd_inptrs;
 402	}
 403	return block;
 
 404}
 405
 406static void release_metapath(struct metapath *mp)
 407{
 408	int i;
 409
 410	for (i = 0; i < GFS2_MAX_META_HEIGHT; i++) {
 411		if (mp->mp_bh[i] == NULL)
 412			break;
 413		brelse(mp->mp_bh[i]);
 414		mp->mp_bh[i] = NULL;
 415	}
 416}
 417
 418/**
 419 * gfs2_extent_length - Returns length of an extent of blocks
 420 * @bh: The metadata block
 421 * @ptr: Current position in @bh
 422 * @limit: Max extent length to return
 
 423 * @eob: Set to 1 if we hit "end of block"
 424 *
 
 
 
 
 425 * Returns: The length of the extent (minimum of one block)
 426 */
 427
 428static inline unsigned int gfs2_extent_length(struct buffer_head *bh, __be64 *ptr, size_t limit, int *eob)
 429{
 430	const __be64 *end = (__be64 *)(bh->b_data + bh->b_size);
 431	const __be64 *first = ptr;
 432	u64 d = be64_to_cpu(*ptr);
 433
 434	*eob = 0;
 435	do {
 436		ptr++;
 437		if (ptr >= end)
 438			break;
 439		d++;
 
 
 
 440	} while(be64_to_cpu(*ptr) == d);
 441	if (ptr >= end)
 442		*eob = 1;
 443	return ptr - first;
 444}
 445
 446enum walker_status { WALK_STOP, WALK_FOLLOW, WALK_CONTINUE };
 447
 448/*
 449 * gfs2_metadata_walker - walk an indirect block
 450 * @mp: Metapath to indirect block
 451 * @ptrs: Number of pointers to look at
 452 *
 453 * When returning WALK_FOLLOW, the walker must update @mp to point at the right
 454 * indirect block to follow.
 455 */
 456typedef enum walker_status (*gfs2_metadata_walker)(struct metapath *mp,
 457						   unsigned int ptrs);
 458
 459/*
 460 * gfs2_walk_metadata - walk a tree of indirect blocks
 461 * @inode: The inode
 462 * @mp: Starting point of walk
 463 * @max_len: Maximum number of blocks to walk
 464 * @walker: Called during the walk
 465 *
 466 * Returns 1 if the walk was stopped by @walker, 0 if we went past @max_len or
 467 * past the end of metadata, and a negative error code otherwise.
 468 */
 469
 470static int gfs2_walk_metadata(struct inode *inode, struct metapath *mp,
 471		u64 max_len, gfs2_metadata_walker walker)
 472{
 473	struct gfs2_inode *ip = GFS2_I(inode);
 474	struct gfs2_sbd *sdp = GFS2_SB(inode);
 475	u64 factor = 1;
 476	unsigned int hgt;
 477	int ret;
 478
 479	/*
 480	 * The walk starts in the lowest allocated indirect block, which may be
 481	 * before the position indicated by @mp.  Adjust @max_len accordingly
 482	 * to avoid a short walk.
 483	 */
 484	for (hgt = mp->mp_fheight - 1; hgt >= mp->mp_aheight; hgt--) {
 485		max_len += mp->mp_list[hgt] * factor;
 486		mp->mp_list[hgt] = 0;
 487		factor *= sdp->sd_inptrs;
 488	}
 489
 490	for (;;) {
 491		u16 start = mp->mp_list[hgt];
 492		enum walker_status status;
 493		unsigned int ptrs;
 494		u64 len;
 495
 496		/* Walk indirect block. */
 497		ptrs = (hgt >= 1 ? sdp->sd_inptrs : sdp->sd_diptrs) - start;
 498		len = ptrs * factor;
 499		if (len > max_len)
 500			ptrs = DIV_ROUND_UP_ULL(max_len, factor);
 501		status = walker(mp, ptrs);
 502		switch (status) {
 503		case WALK_STOP:
 504			return 1;
 505		case WALK_FOLLOW:
 506			BUG_ON(mp->mp_aheight == mp->mp_fheight);
 507			ptrs = mp->mp_list[hgt] - start;
 508			len = ptrs * factor;
 509			break;
 510		case WALK_CONTINUE:
 511			break;
 512		}
 513		if (len >= max_len)
 514			break;
 515		max_len -= len;
 516		if (status == WALK_FOLLOW)
 517			goto fill_up_metapath;
 518
 519lower_metapath:
 520		/* Decrease height of metapath. */
 521		brelse(mp->mp_bh[hgt]);
 522		mp->mp_bh[hgt] = NULL;
 523		mp->mp_list[hgt] = 0;
 524		if (!hgt)
 525			break;
 526		hgt--;
 527		factor *= sdp->sd_inptrs;
 528
 529		/* Advance in metadata tree. */
 530		(mp->mp_list[hgt])++;
 531		if (mp->mp_list[hgt] >= sdp->sd_inptrs) {
 532			if (!hgt)
 533				break;
 534			goto lower_metapath;
 535		}
 536
 537fill_up_metapath:
 538		/* Increase height of metapath. */
 539		ret = fillup_metapath(ip, mp, ip->i_height - 1);
 540		if (ret < 0)
 541			return ret;
 542		hgt += ret;
 543		for (; ret; ret--)
 544			do_div(factor, sdp->sd_inptrs);
 545		mp->mp_aheight = hgt + 1;
 546	}
 547	return 0;
 548}
 549
 550static enum walker_status gfs2_hole_walker(struct metapath *mp,
 551					   unsigned int ptrs)
 552{
 553	const __be64 *start, *ptr, *end;
 554	unsigned int hgt;
 555
 556	hgt = mp->mp_aheight - 1;
 557	start = metapointer(hgt, mp);
 558	end = start + ptrs;
 559
 560	for (ptr = start; ptr < end; ptr++) {
 561		if (*ptr) {
 562			mp->mp_list[hgt] += ptr - start;
 563			if (mp->mp_aheight == mp->mp_fheight)
 564				return WALK_STOP;
 565			return WALK_FOLLOW;
 566		}
 567	}
 568	return WALK_CONTINUE;
 569}
 570
 571/**
 572 * gfs2_hole_size - figure out the size of a hole
 573 * @inode: The inode
 574 * @lblock: The logical starting block number
 575 * @len: How far to look (in blocks)
 576 * @mp: The metapath at lblock
 577 * @iomap: The iomap to store the hole size in
 578 *
 579 * This function modifies @mp.
 580 *
 581 * Returns: errno on error
 582 */
 583static int gfs2_hole_size(struct inode *inode, sector_t lblock, u64 len,
 584			  struct metapath *mp, struct iomap *iomap)
 585{
 586	struct metapath clone;
 587	u64 hole_size;
 588	int ret;
 589
 590	clone_metapath(&clone, mp);
 591	ret = gfs2_walk_metadata(inode, &clone, len, gfs2_hole_walker);
 592	if (ret < 0)
 593		goto out;
 594
 595	if (ret == 1)
 596		hole_size = metapath_to_block(GFS2_SB(inode), &clone) - lblock;
 597	else
 598		hole_size = len;
 599	iomap->length = hole_size << inode->i_blkbits;
 600	ret = 0;
 601
 602out:
 603	release_metapath(&clone);
 604	return ret;
 605}
 606
 607static inline __be64 *gfs2_indirect_init(struct metapath *mp,
 608					 struct gfs2_glock *gl, unsigned int i,
 609					 unsigned offset, u64 bn)
 610{
 611	__be64 *ptr = (__be64 *)(mp->mp_bh[i - 1]->b_data +
 612		       ((i > 1) ? sizeof(struct gfs2_meta_header) :
 613				 sizeof(struct gfs2_dinode)));
 614	BUG_ON(i < 1);
 615	BUG_ON(mp->mp_bh[i] != NULL);
 616	mp->mp_bh[i] = gfs2_meta_new(gl, bn);
 617	gfs2_trans_add_meta(gl, mp->mp_bh[i]);
 618	gfs2_metatype_set(mp->mp_bh[i], GFS2_METATYPE_IN, GFS2_FORMAT_IN);
 619	gfs2_buffer_clear_tail(mp->mp_bh[i], sizeof(struct gfs2_meta_header));
 620	ptr += offset;
 621	*ptr = cpu_to_be64(bn);
 622	return ptr;
 623}
 624
 625enum alloc_state {
 626	ALLOC_DATA = 0,
 627	ALLOC_GROW_DEPTH = 1,
 628	ALLOC_GROW_HEIGHT = 2,
 629	/* ALLOC_UNSTUFF = 3,   TBD and rather complicated */
 630};
 631
 632/**
 633 * gfs2_iomap_alloc - Build a metadata tree of the requested height
 634 * @inode: The GFS2 inode
 635 * @iomap: The iomap structure
 636 * @mp: The metapath, with proper height information calculated
 
 
 
 
 637 *
 638 * In this routine we may have to alloc:
 639 *   i) Indirect blocks to grow the metadata tree height
 640 *  ii) Indirect blocks to fill in lower part of the metadata tree
 641 * iii) Data blocks
 642 *
 643 * This function is called after gfs2_iomap_get, which works out the
 644 * total number of blocks which we need via gfs2_alloc_size.
 645 *
 646 * We then do the actual allocation asking for an extent at a time (if
 647 * enough contiguous free blocks are available, there will only be one
 648 * allocation request per call) and uses the state machine to initialise
 649 * the blocks in order.
 650 *
 651 * Right now, this function will allocate at most one indirect block
 652 * worth of data -- with a default block size of 4K, that's slightly
 653 * less than 2M.  If this limitation is ever removed to allow huge
 654 * allocations, we would probably still want to limit the iomap size we
 655 * return to avoid stalling other tasks during huge writes; the next
 656 * iomap iteration would then find the blocks already allocated.
 657 *
 658 * Returns: errno on error
 659 */
 660
 661static int gfs2_iomap_alloc(struct inode *inode, struct iomap *iomap,
 662			    struct metapath *mp)
 
 
 
 663{
 664	struct gfs2_inode *ip = GFS2_I(inode);
 665	struct gfs2_sbd *sdp = GFS2_SB(inode);
 
 666	struct buffer_head *dibh = mp->mp_bh[0];
 667	u64 bn;
 668	unsigned n, i, blks, alloced = 0, iblks = 0, branch_start = 0;
 669	size_t dblks = iomap->length >> inode->i_blkbits;
 670	const unsigned end_of_metadata = mp->mp_fheight - 1;
 
 671	int ret;
 
 672	enum alloc_state state;
 673	__be64 *ptr;
 674	__be64 zero_bn = 0;
 675
 676	BUG_ON(mp->mp_aheight < 1);
 677	BUG_ON(dibh == NULL);
 678	BUG_ON(dblks < 1);
 679
 680	gfs2_trans_add_meta(ip->i_gl, dibh);
 681
 682	down_write(&ip->i_rw_mutex);
 683
 684	if (mp->mp_fheight == mp->mp_aheight) {
 685		/* Bottom indirect block exists */
 
 
 
 
 686		state = ALLOC_DATA;
 687	} else {
 688		/* Need to allocate indirect blocks */
 689		if (mp->mp_fheight == ip->i_height) {
 
 
 
 690			/* Writing into existing tree, extend tree down */
 691			iblks = mp->mp_fheight - mp->mp_aheight;
 692			state = ALLOC_GROW_DEPTH;
 693		} else {
 694			/* Building up tree height */
 695			state = ALLOC_GROW_HEIGHT;
 696			iblks = mp->mp_fheight - ip->i_height;
 697			branch_start = metapath_branch_start(mp);
 698			iblks += (mp->mp_fheight - branch_start);
 699		}
 700	}
 701
 702	/* start of the second part of the function (state machine) */
 703
 704	blks = dblks + iblks;
 705	i = mp->mp_aheight;
 706	do {
 
 707		n = blks - alloced;
 708		ret = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL);
 709		if (ret)
 710			goto out;
 711		alloced += n;
 712		if (state != ALLOC_DATA || gfs2_is_jdata(ip))
 713			gfs2_trans_remove_revoke(sdp, bn, n);
 714		switch (state) {
 715		/* Growing height of tree */
 716		case ALLOC_GROW_HEIGHT:
 717			if (i == 1) {
 718				ptr = (__be64 *)(dibh->b_data +
 719						 sizeof(struct gfs2_dinode));
 720				zero_bn = *ptr;
 721			}
 722			for (; i - 1 < mp->mp_fheight - ip->i_height && n > 0;
 723			     i++, n--)
 724				gfs2_indirect_init(mp, ip->i_gl, i, 0, bn++);
 725			if (i - 1 == mp->mp_fheight - ip->i_height) {
 726				i--;
 727				gfs2_buffer_copy_tail(mp->mp_bh[i],
 728						sizeof(struct gfs2_meta_header),
 729						dibh, sizeof(struct gfs2_dinode));
 730				gfs2_buffer_clear_tail(dibh,
 731						sizeof(struct gfs2_dinode) +
 732						sizeof(__be64));
 733				ptr = (__be64 *)(mp->mp_bh[i]->b_data +
 734					sizeof(struct gfs2_meta_header));
 735				*ptr = zero_bn;
 736				state = ALLOC_GROW_DEPTH;
 737				for(i = branch_start; i < mp->mp_fheight; i++) {
 738					if (mp->mp_bh[i] == NULL)
 739						break;
 740					brelse(mp->mp_bh[i]);
 741					mp->mp_bh[i] = NULL;
 742				}
 743				i = branch_start;
 744			}
 745			if (n == 0)
 746				break;
 747		/* fall through - To branching from existing tree */
 748		case ALLOC_GROW_DEPTH:
 749			if (i > 1 && i < mp->mp_fheight)
 750				gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[i-1]);
 751			for (; i < mp->mp_fheight && n > 0; i++, n--)
 752				gfs2_indirect_init(mp, ip->i_gl, i,
 753						   mp->mp_list[i-1], bn++);
 754			if (i == mp->mp_fheight)
 755				state = ALLOC_DATA;
 756			if (n == 0)
 757				break;
 758		/* fall through - To tree complete, adding data blocks */
 759		case ALLOC_DATA:
 760			BUG_ON(n > dblks);
 761			BUG_ON(mp->mp_bh[end_of_metadata] == NULL);
 762			gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[end_of_metadata]);
 763			dblks = n;
 764			ptr = metapointer(end_of_metadata, mp);
 765			iomap->addr = bn << inode->i_blkbits;
 766			iomap->flags |= IOMAP_F_MERGED | IOMAP_F_NEW;
 767			while (n-- > 0)
 768				*ptr++ = cpu_to_be64(bn++);
 
 
 
 
 
 
 
 
 
 769			break;
 770		}
 771	} while (iomap->addr == IOMAP_NULL_ADDR);
 772
 773	iomap->type = IOMAP_MAPPED;
 774	iomap->length = (u64)dblks << inode->i_blkbits;
 775	ip->i_height = mp->mp_fheight;
 776	gfs2_add_inode_blocks(&ip->i_inode, alloced);
 777	gfs2_dinode_out(ip, dibh->b_data);
 778out:
 779	up_write(&ip->i_rw_mutex);
 780	return ret;
 
 781}
 782
 783#define IOMAP_F_GFS2_BOUNDARY IOMAP_F_PRIVATE
 784
 785/**
 786 * gfs2_alloc_size - Compute the maximum allocation size
 787 * @inode: The inode
 788 * @mp: The metapath
 789 * @size: Requested size in blocks
 790 *
 791 * Compute the maximum size of the next allocation at @mp.
 792 *
 793 * Returns: size in blocks
 794 */
 795static u64 gfs2_alloc_size(struct inode *inode, struct metapath *mp, u64 size)
 796{
 797	struct gfs2_inode *ip = GFS2_I(inode);
 798	struct gfs2_sbd *sdp = GFS2_SB(inode);
 799	const __be64 *first, *ptr, *end;
 800
 801	/*
 802	 * For writes to stuffed files, this function is called twice via
 803	 * gfs2_iomap_get, before and after unstuffing. The size we return the
 804	 * first time needs to be large enough to get the reservation and
 805	 * allocation sizes right.  The size we return the second time must
 806	 * be exact or else gfs2_iomap_alloc won't do the right thing.
 807	 */
 808
 809	if (gfs2_is_stuffed(ip) || mp->mp_fheight != mp->mp_aheight) {
 810		unsigned int maxsize = mp->mp_fheight > 1 ?
 811			sdp->sd_inptrs : sdp->sd_diptrs;
 812		maxsize -= mp->mp_list[mp->mp_fheight - 1];
 813		if (size > maxsize)
 814			size = maxsize;
 815		return size;
 816	}
 817
 818	first = metapointer(ip->i_height - 1, mp);
 819	end = metaend(ip->i_height - 1, mp);
 820	if (end - first > size)
 821		end = first + size;
 822	for (ptr = first; ptr < end; ptr++) {
 823		if (*ptr)
 824			break;
 825	}
 826	return ptr - first;
 827}
 828
 829/**
 830 * gfs2_iomap_get - Map blocks from an inode to disk blocks
 831 * @inode: The inode
 832 * @pos: Starting position in bytes
 833 * @length: Length to map, in bytes
 834 * @flags: iomap flags
 835 * @iomap: The iomap structure
 836 * @mp: The metapath
 837 *
 838 * Returns: errno
 839 */
 840static int gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
 841			  unsigned flags, struct iomap *iomap,
 842			  struct metapath *mp)
 843{
 844	struct gfs2_inode *ip = GFS2_I(inode);
 845	struct gfs2_sbd *sdp = GFS2_SB(inode);
 846	loff_t size = i_size_read(inode);
 
 
 847	__be64 *ptr;
 848	sector_t lblock;
 849	sector_t lblock_stop;
 850	int ret;
 851	int eob;
 852	u64 len;
 853	struct buffer_head *dibh = NULL, *bh;
 854	u8 height;
 855
 856	if (!length)
 857		return -EINVAL;
 858
 859	down_read(&ip->i_rw_mutex);
 
 
 
 
 
 
 
 
 
 860
 861	ret = gfs2_meta_inode_buffer(ip, &dibh);
 862	if (ret)
 863		goto unlock;
 864	mp->mp_bh[0] = dibh;
 865
 866	if (gfs2_is_stuffed(ip)) {
 867		if (flags & IOMAP_WRITE) {
 868			loff_t max_size = gfs2_max_stuffed_size(ip);
 869
 870			if (pos + length > max_size)
 871				goto unstuff;
 872			iomap->length = max_size;
 873		} else {
 874			if (pos >= size) {
 875				if (flags & IOMAP_REPORT) {
 876					ret = -ENOENT;
 877					goto unlock;
 878				} else {
 879					/* report a hole */
 880					iomap->offset = pos;
 881					iomap->length = length;
 882					goto do_alloc;
 883				}
 884			}
 885			iomap->length = size;
 886		}
 887		iomap->addr = (ip->i_no_addr << inode->i_blkbits) +
 888			      sizeof(struct gfs2_dinode);
 889		iomap->type = IOMAP_INLINE;
 890		iomap->inline_data = dibh->b_data + sizeof(struct gfs2_dinode);
 891		goto out;
 892	}
 893
 894unstuff:
 895	lblock = pos >> inode->i_blkbits;
 896	iomap->offset = lblock << inode->i_blkbits;
 897	lblock_stop = (pos + length - 1) >> inode->i_blkbits;
 898	len = lblock_stop - lblock + 1;
 899	iomap->length = len << inode->i_blkbits;
 900
 901	height = ip->i_height;
 902	while ((lblock + 1) * sdp->sd_sb.sb_bsize > sdp->sd_heightsize[height])
 
 903		height++;
 904	find_metapath(sdp, lblock, mp, height);
 
 905	if (height > ip->i_height || gfs2_is_stuffed(ip))
 906		goto do_alloc;
 907
 908	ret = lookup_metapath(ip, mp);
 909	if (ret)
 910		goto unlock;
 911
 912	if (mp->mp_aheight != ip->i_height)
 913		goto do_alloc;
 914
 915	ptr = metapointer(ip->i_height - 1, mp);
 916	if (*ptr == 0)
 917		goto do_alloc;
 918
 919	bh = mp->mp_bh[ip->i_height - 1];
 920	len = gfs2_extent_length(bh, ptr, len, &eob);
 921
 922	iomap->addr = be64_to_cpu(*ptr) << inode->i_blkbits;
 923	iomap->length = len << inode->i_blkbits;
 924	iomap->type = IOMAP_MAPPED;
 925	iomap->flags |= IOMAP_F_MERGED;
 926	if (eob)
 927		iomap->flags |= IOMAP_F_GFS2_BOUNDARY;
 928
 929out:
 930	iomap->bdev = inode->i_sb->s_bdev;
 931unlock:
 932	up_read(&ip->i_rw_mutex);
 933	return ret;
 934
 935do_alloc:
 936	iomap->addr = IOMAP_NULL_ADDR;
 937	iomap->type = IOMAP_HOLE;
 938	if (flags & IOMAP_REPORT) {
 939		if (pos >= size)
 940			ret = -ENOENT;
 941		else if (height == ip->i_height)
 942			ret = gfs2_hole_size(inode, lblock, len, mp, iomap);
 943		else
 944			iomap->length = size - pos;
 945	} else if (flags & IOMAP_WRITE) {
 946		u64 alloc_size;
 947
 948		if (flags & IOMAP_DIRECT)
 949			goto out;  /* (see gfs2_file_direct_write) */
 950
 951		len = gfs2_alloc_size(inode, mp, len);
 952		alloc_size = len << inode->i_blkbits;
 953		if (alloc_size < iomap->length)
 954			iomap->length = alloc_size;
 955	} else {
 956		if (pos < size && height == ip->i_height)
 957			ret = gfs2_hole_size(inode, lblock, len, mp, iomap);
 958	}
 
 
 
 959	goto out;
 960}
 961
 962/**
 963 * gfs2_lblk_to_dblk - convert logical block to disk block
 964 * @inode: the inode of the file we're mapping
 965 * @lblock: the block relative to the start of the file
 966 * @dblock: the returned dblock, if no error
 967 *
 968 * This function maps a single block from a file logical block (relative to
 969 * the start of the file) to a file system absolute block using iomap.
 970 *
 971 * Returns: the absolute file system block, or an error
 972 */
 973int gfs2_lblk_to_dblk(struct inode *inode, u32 lblock, u64 *dblock)
 974{
 975	struct iomap iomap = { };
 976	struct metapath mp = { .mp_aheight = 1, };
 977	loff_t pos = (loff_t)lblock << inode->i_blkbits;
 978	int ret;
 
 979
 980	ret = gfs2_iomap_get(inode, pos, i_blocksize(inode), 0, &iomap, &mp);
 981	release_metapath(&mp);
 982	if (ret == 0)
 983		*dblock = iomap.addr >> inode->i_blkbits;
 984
 
 
 
 
 
 
 
 
 985	return ret;
 986}
 987
 988static int gfs2_write_lock(struct inode *inode)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 989{
 990	struct gfs2_inode *ip = GFS2_I(inode);
 991	struct gfs2_sbd *sdp = GFS2_SB(inode);
 
 
 
 
 
 
 
 992	int error;
 993
 994	gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
 995	error = gfs2_glock_nq(&ip->i_gh);
 996	if (error)
 997		goto out_uninit;
 998	if (&ip->i_inode == sdp->sd_rindex) {
 999		struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
1000
1001		error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE,
1002					   GL_NOCACHE, &m_ip->i_gh);
1003		if (error)
1004			goto out_unlock;
1005	}
1006	return 0;
1007
1008out_unlock:
1009	gfs2_glock_dq(&ip->i_gh);
1010out_uninit:
1011	gfs2_holder_uninit(&ip->i_gh);
1012	return error;
1013}
1014
1015static void gfs2_write_unlock(struct inode *inode)
1016{
1017	struct gfs2_inode *ip = GFS2_I(inode);
1018	struct gfs2_sbd *sdp = GFS2_SB(inode);
1019
1020	if (&ip->i_inode == sdp->sd_rindex) {
1021		struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
 
 
 
1022
1023		gfs2_glock_dq_uninit(&m_ip->i_gh);
1024	}
1025	gfs2_glock_dq_uninit(&ip->i_gh);
1026}
1027
1028static int gfs2_iomap_page_prepare(struct inode *inode, loff_t pos,
1029				   unsigned len, struct iomap *iomap)
1030{
1031	unsigned int blockmask = i_blocksize(inode) - 1;
1032	struct gfs2_sbd *sdp = GFS2_SB(inode);
1033	unsigned int blocks;
1034
1035	blocks = ((pos & blockmask) + len + blockmask) >> inode->i_blkbits;
1036	return gfs2_trans_begin(sdp, RES_DINODE + blocks, 0);
1037}
1038
1039static void gfs2_iomap_page_done(struct inode *inode, loff_t pos,
1040				 unsigned copied, struct page *page,
1041				 struct iomap *iomap)
1042{
1043	struct gfs2_trans *tr = current->journal_info;
1044	struct gfs2_inode *ip = GFS2_I(inode);
1045	struct gfs2_sbd *sdp = GFS2_SB(inode);
1046
1047	if (page && !gfs2_is_stuffed(ip))
1048		gfs2_page_add_databufs(ip, page, offset_in_page(pos), copied);
 
 
1049
1050	if (tr->tr_num_buf_new)
1051		__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
 
 
1052
1053	gfs2_trans_end(sdp);
1054}
1055
1056static const struct iomap_page_ops gfs2_iomap_page_ops = {
1057	.page_prepare = gfs2_iomap_page_prepare,
1058	.page_done = gfs2_iomap_page_done,
1059};
 
1060
1061static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
1062				  loff_t length, unsigned flags,
1063				  struct iomap *iomap,
1064				  struct metapath *mp)
1065{
1066	struct gfs2_inode *ip = GFS2_I(inode);
1067	struct gfs2_sbd *sdp = GFS2_SB(inode);
1068	bool unstuff;
1069	int ret;
1070
1071	unstuff = gfs2_is_stuffed(ip) &&
1072		  pos + length > gfs2_max_stuffed_size(ip);
1073
1074	if (unstuff || iomap->type == IOMAP_HOLE) {
1075		unsigned int data_blocks, ind_blocks;
1076		struct gfs2_alloc_parms ap = {};
1077		unsigned int rblocks;
1078		struct gfs2_trans *tr;
1079
1080		gfs2_write_calc_reserv(ip, iomap->length, &data_blocks,
1081				       &ind_blocks);
1082		ap.target = data_blocks + ind_blocks;
1083		ret = gfs2_quota_lock_check(ip, &ap);
1084		if (ret)
1085			return ret;
1086
1087		ret = gfs2_inplace_reserve(ip, &ap);
1088		if (ret)
1089			goto out_qunlock;
1090
1091		rblocks = RES_DINODE + ind_blocks;
1092		if (gfs2_is_jdata(ip))
1093			rblocks += data_blocks;
1094		if (ind_blocks || data_blocks)
1095			rblocks += RES_STATFS + RES_QUOTA;
1096		if (inode == sdp->sd_rindex)
1097			rblocks += 2 * RES_STATFS;
1098		rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks);
1099
1100		ret = gfs2_trans_begin(sdp, rblocks,
1101				       iomap->length >> inode->i_blkbits);
1102		if (ret)
1103			goto out_trans_fail;
1104
1105		if (unstuff) {
1106			ret = gfs2_unstuff_dinode(ip, NULL);
1107			if (ret)
1108				goto out_trans_end;
1109			release_metapath(mp);
1110			ret = gfs2_iomap_get(inode, iomap->offset,
1111					     iomap->length, flags, iomap, mp);
1112			if (ret)
1113				goto out_trans_end;
1114		}
1115
1116		if (iomap->type == IOMAP_HOLE) {
1117			ret = gfs2_iomap_alloc(inode, iomap, mp);
1118			if (ret) {
1119				gfs2_trans_end(sdp);
1120				gfs2_inplace_release(ip);
1121				punch_hole(ip, iomap->offset, iomap->length);
1122				goto out_qunlock;
 
1123			}
1124		}
1125
1126		tr = current->journal_info;
1127		if (tr->tr_num_buf_new)
1128			__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1129
1130		gfs2_trans_end(sdp);
 
 
 
 
 
1131	}
1132
1133	if (gfs2_is_stuffed(ip) || gfs2_is_jdata(ip))
1134		iomap->page_ops = &gfs2_iomap_page_ops;
1135	return 0;
 
 
 
 
 
 
1136
1137out_trans_end:
1138	gfs2_trans_end(sdp);
1139out_trans_fail:
1140	gfs2_inplace_release(ip);
1141out_qunlock:
1142	gfs2_quota_unlock(ip);
1143	return ret;
1144}
1145
1146static inline bool gfs2_iomap_need_write_lock(unsigned flags)
1147{
1148	return (flags & IOMAP_WRITE) && !(flags & IOMAP_DIRECT);
 
 
 
1149}
1150
1151static int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
1152			    unsigned flags, struct iomap *iomap)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1153{
1154	struct gfs2_inode *ip = GFS2_I(inode);
1155	struct metapath mp = { .mp_aheight = 1, };
1156	int ret;
 
 
 
1157
1158	iomap->flags |= IOMAP_F_BUFFER_HEAD;
 
 
 
 
1159
1160	trace_gfs2_iomap_start(ip, pos, length, flags);
1161	if (gfs2_iomap_need_write_lock(flags)) {
1162		ret = gfs2_write_lock(inode);
1163		if (ret)
1164			goto out;
1165	}
1166
1167	ret = gfs2_iomap_get(inode, pos, length, flags, iomap, &mp);
1168	if (ret)
1169		goto out_unlock;
1170
1171	switch(flags & (IOMAP_WRITE | IOMAP_ZERO)) {
1172	case IOMAP_WRITE:
1173		if (flags & IOMAP_DIRECT) {
1174			/*
1175			 * Silently fall back to buffered I/O for stuffed files
1176			 * or if we've got a hole (see gfs2_file_direct_write).
1177			 */
1178			if (iomap->type != IOMAP_MAPPED)
1179				ret = -ENOTBLK;
1180			goto out_unlock;
1181		}
1182		break;
1183	case IOMAP_ZERO:
1184		if (iomap->type == IOMAP_HOLE)
1185			goto out_unlock;
1186		break;
1187	default:
1188		goto out_unlock;
1189	}
1190
1191	ret = gfs2_iomap_begin_write(inode, pos, length, flags, iomap, &mp);
1192
1193out_unlock:
1194	if (ret && gfs2_iomap_need_write_lock(flags))
1195		gfs2_write_unlock(inode);
1196	release_metapath(&mp);
1197out:
1198	trace_gfs2_iomap_end(ip, iomap, ret);
1199	return ret;
1200}
1201
1202static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length,
1203			  ssize_t written, unsigned flags, struct iomap *iomap)
1204{
1205	struct gfs2_inode *ip = GFS2_I(inode);
1206	struct gfs2_sbd *sdp = GFS2_SB(inode);
1207
1208	switch (flags & (IOMAP_WRITE | IOMAP_ZERO)) {
1209	case IOMAP_WRITE:
1210		if (flags & IOMAP_DIRECT)
1211			return 0;
1212		break;
1213	case IOMAP_ZERO:
1214		 if (iomap->type == IOMAP_HOLE)
1215			 return 0;
1216		 break;
1217	default:
1218		 return 0;
1219	}
1220
1221	if (!gfs2_is_stuffed(ip))
1222		gfs2_ordered_add_inode(ip);
1223
1224	if (inode == sdp->sd_rindex)
1225		adjust_fs_space(inode);
1226
1227	gfs2_inplace_release(ip);
1228
1229	if (length != written && (iomap->flags & IOMAP_F_NEW)) {
1230		/* Deallocate blocks that were just allocated. */
1231		loff_t blockmask = i_blocksize(inode) - 1;
1232		loff_t end = (pos + length) & ~blockmask;
1233
1234		pos = (pos + written + blockmask) & ~blockmask;
1235		if (pos < end) {
1236			truncate_pagecache_range(inode, pos, end - 1);
1237			punch_hole(ip, pos, end - pos);
1238		}
1239	}
1240
1241	if (ip->i_qadata && ip->i_qadata->qa_qd_num)
1242		gfs2_quota_unlock(ip);
 
1243
1244	if (unlikely(!written))
1245		goto out_unlock;
1246
1247	if (iomap->flags & IOMAP_F_SIZE_CHANGED)
1248		mark_inode_dirty(inode);
1249	set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
1250
1251out_unlock:
1252	if (gfs2_iomap_need_write_lock(flags))
1253		gfs2_write_unlock(inode);
1254	return 0;
 
1255}
1256
1257const struct iomap_ops gfs2_iomap_ops = {
1258	.iomap_begin = gfs2_iomap_begin,
1259	.iomap_end = gfs2_iomap_end,
1260};
1261
1262/**
1263 * gfs2_block_map - Map one or more blocks of an inode to a disk block
1264 * @inode: The inode
1265 * @lblock: The logical block number
1266 * @bh_map: The bh to be mapped
1267 * @create: True if its ok to alloc blocks to satify the request
1268 *
1269 * The size of the requested mapping is defined in bh_map->b_size.
1270 *
1271 * Clears buffer_mapped(bh_map) and leaves bh_map->b_size unchanged
1272 * when @lblock is not mapped.  Sets buffer_mapped(bh_map) and
1273 * bh_map->b_size to indicate the size of the mapping when @lblock and
1274 * successive blocks are mapped, up to the requested size.
1275 *
1276 * Sets buffer_boundary() if a read of metadata will be required
1277 * before the next block can be mapped. Sets buffer_new() if new
1278 * blocks were allocated.
1279 *
1280 * Returns: errno
1281 */
1282
1283int gfs2_block_map(struct inode *inode, sector_t lblock,
1284		   struct buffer_head *bh_map, int create)
1285{
 
1286	struct gfs2_inode *ip = GFS2_I(inode);
1287	loff_t pos = (loff_t)lblock << inode->i_blkbits;
1288	loff_t length = bh_map->b_size;
1289	struct metapath mp = { .mp_aheight = 1, };
1290	struct iomap iomap = { };
1291	int ret;
 
1292
1293	clear_buffer_mapped(bh_map);
1294	clear_buffer_new(bh_map);
1295	clear_buffer_boundary(bh_map);
1296	trace_gfs2_bmap(ip, bh_map, lblock, create, 1);
1297
1298	if (create) {
1299		ret = gfs2_iomap_get(inode, pos, length, IOMAP_WRITE, &iomap, &mp);
1300		if (!ret && iomap.type == IOMAP_HOLE)
1301			ret = gfs2_iomap_alloc(inode, &iomap, &mp);
1302		release_metapath(&mp);
1303	} else {
1304		ret = gfs2_iomap_get(inode, pos, length, 0, &iomap, &mp);
1305		release_metapath(&mp);
 
 
 
 
 
 
1306	}
1307	if (ret)
1308		goto out;
1309
1310	if (iomap.length > bh_map->b_size) {
1311		iomap.length = bh_map->b_size;
1312		iomap.flags &= ~IOMAP_F_GFS2_BOUNDARY;
1313	}
1314	if (iomap.addr != IOMAP_NULL_ADDR)
1315		map_bh(bh_map, inode->i_sb, iomap.addr >> inode->i_blkbits);
1316	bh_map->b_size = iomap.length;
1317	if (iomap.flags & IOMAP_F_GFS2_BOUNDARY)
1318		set_buffer_boundary(bh_map);
1319	if (iomap.flags & IOMAP_F_NEW)
1320		set_buffer_new(bh_map);
1321
1322out:
1323	trace_gfs2_bmap(ip, bh_map, lblock, create, ret);
1324	return ret;
1325}
 
 
1326
1327/*
1328 * Deprecated: do not use in new code
1329 */
1330int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsigned *extlen)
1331{
1332	struct buffer_head bh = { .b_state = 0, .b_blocknr = 0 };
1333	int ret;
1334	int create = *new;
1335
1336	BUG_ON(!extlen);
1337	BUG_ON(!dblock);
1338	BUG_ON(!new);
 
 
 
 
 
 
1339
1340	bh.b_size = BIT(inode->i_blkbits + (create ? 0 : 5));
1341	ret = gfs2_block_map(inode, lblock, &bh, create);
1342	*extlen = bh.b_size >> inode->i_blkbits;
1343	*dblock = bh.b_blocknr;
1344	if (buffer_new(&bh))
1345		*new = 1;
1346	else
1347		*new = 0;
1348	return ret;
1349}
1350
1351static int gfs2_block_zero_range(struct inode *inode, loff_t from,
1352				 unsigned int length)
1353{
1354	return iomap_zero_range(inode, from, length, NULL, &gfs2_iomap_ops);
 
 
1355}
1356
1357#define GFS2_JTRUNC_REVOKES 8192
1358
1359/**
1360 * gfs2_journaled_truncate - Wrapper for truncate_pagecache for jdata files
1361 * @inode: The inode being truncated
1362 * @oldsize: The original (larger) size
1363 * @newsize: The new smaller size
1364 *
1365 * With jdata files, we have to journal a revoke for each block which is
1366 * truncated. As a result, we need to split this into separate transactions
1367 * if the number of pages being truncated gets too large.
1368 */
1369
1370static int gfs2_journaled_truncate(struct inode *inode, u64 oldsize, u64 newsize)
1371{
1372	struct gfs2_sbd *sdp = GFS2_SB(inode);
1373	u64 max_chunk = GFS2_JTRUNC_REVOKES * sdp->sd_vfs->s_blocksize;
1374	u64 chunk;
1375	int error;
1376
1377	while (oldsize != newsize) {
1378		struct gfs2_trans *tr;
1379		unsigned int offs;
1380
1381		chunk = oldsize - newsize;
1382		if (chunk > max_chunk)
1383			chunk = max_chunk;
1384
1385		offs = oldsize & ~PAGE_MASK;
1386		if (offs && chunk > PAGE_SIZE)
1387			chunk = offs + ((chunk - offs) & PAGE_MASK);
1388
1389		truncate_pagecache(inode, oldsize - chunk);
1390		oldsize -= chunk;
1391
1392		tr = current->journal_info;
1393		if (!test_bit(TR_TOUCHED, &tr->tr_flags))
1394			continue;
1395
1396		gfs2_trans_end(sdp);
1397		error = gfs2_trans_begin(sdp, RES_DINODE, GFS2_JTRUNC_REVOKES);
1398		if (error)
1399			return error;
1400	}
1401
1402	return 0;
1403}
1404
1405static int trunc_start(struct inode *inode, u64 newsize)
1406{
1407	struct gfs2_inode *ip = GFS2_I(inode);
1408	struct gfs2_sbd *sdp = GFS2_SB(inode);
1409	struct buffer_head *dibh = NULL;
 
1410	int journaled = gfs2_is_jdata(ip);
1411	u64 oldsize = inode->i_size;
1412	int error;
1413
1414	if (journaled)
1415		error = gfs2_trans_begin(sdp, RES_DINODE + RES_JDATA, GFS2_JTRUNC_REVOKES);
1416	else
1417		error = gfs2_trans_begin(sdp, RES_DINODE, 0);
1418	if (error)
1419		return error;
1420
1421	error = gfs2_meta_inode_buffer(ip, &dibh);
1422	if (error)
1423		goto out;
1424
1425	gfs2_trans_add_meta(ip->i_gl, dibh);
1426
1427	if (gfs2_is_stuffed(ip)) {
1428		gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + newsize);
1429	} else {
1430		unsigned int blocksize = i_blocksize(inode);
1431		unsigned int offs = newsize & (blocksize - 1);
1432		if (offs) {
1433			error = gfs2_block_zero_range(inode, newsize,
1434						      blocksize - offs);
1435			if (error)
1436				goto out;
1437		}
1438		ip->i_diskflags |= GFS2_DIF_TRUNC_IN_PROG;
1439	}
1440
1441	i_size_write(inode, newsize);
1442	ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
1443	gfs2_dinode_out(ip, dibh->b_data);
1444
1445	if (journaled)
1446		error = gfs2_journaled_truncate(inode, oldsize, newsize);
1447	else
1448		truncate_pagecache(inode, newsize);
1449
1450out:
1451	brelse(dibh);
1452	if (current->journal_info)
1453		gfs2_trans_end(sdp);
1454	return error;
1455}
1456
1457int gfs2_iomap_get_alloc(struct inode *inode, loff_t pos, loff_t length,
1458			 struct iomap *iomap)
1459{
1460	struct metapath mp = { .mp_aheight = 1, };
1461	int ret;
1462
1463	ret = gfs2_iomap_get(inode, pos, length, IOMAP_WRITE, iomap, &mp);
1464	if (!ret && iomap->type == IOMAP_HOLE)
1465		ret = gfs2_iomap_alloc(inode, iomap, &mp);
1466	release_metapath(&mp);
1467	return ret;
1468}
1469
1470/**
1471 * sweep_bh_for_rgrps - find an rgrp in a meta buffer and free blocks therein
1472 * @ip: inode
1473 * @rg_gh: holder of resource group glock
1474 * @bh: buffer head to sweep
1475 * @start: starting point in bh
1476 * @end: end point in bh
1477 * @meta: true if bh points to metadata (rather than data)
1478 * @btotal: place to keep count of total blocks freed
1479 *
1480 * We sweep a metadata buffer (provided by the metapath) for blocks we need to
1481 * free, and free them all. However, we do it one rgrp at a time. If this
1482 * block has references to multiple rgrps, we break it into individual
1483 * transactions. This allows other processes to use the rgrps while we're
1484 * focused on a single one, for better concurrency / performance.
1485 * At every transaction boundary, we rewrite the inode into the journal.
1486 * That way the bitmaps are kept consistent with the inode and we can recover
1487 * if we're interrupted by power-outages.
1488 *
1489 * Returns: 0, or return code if an error occurred.
1490 *          *btotal has the total number of blocks freed
1491 */
1492static int sweep_bh_for_rgrps(struct gfs2_inode *ip, struct gfs2_holder *rd_gh,
1493			      struct buffer_head *bh, __be64 *start, __be64 *end,
1494			      bool meta, u32 *btotal)
1495{
1496	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1497	struct gfs2_rgrpd *rgd;
1498	struct gfs2_trans *tr;
1499	__be64 *p;
1500	int blks_outside_rgrp;
1501	u64 bn, bstart, isize_blks;
1502	s64 blen; /* needs to be s64 or gfs2_add_inode_blocks breaks */
1503	int ret = 0;
1504	bool buf_in_tr = false; /* buffer was added to transaction */
1505
1506more_rgrps:
1507	rgd = NULL;
1508	if (gfs2_holder_initialized(rd_gh)) {
1509		rgd = gfs2_glock2rgrp(rd_gh->gh_gl);
1510		gfs2_assert_withdraw(sdp,
1511			     gfs2_glock_is_locked_by_me(rd_gh->gh_gl));
1512	}
1513	blks_outside_rgrp = 0;
1514	bstart = 0;
1515	blen = 0;
1516
1517	for (p = start; p < end; p++) {
1518		if (!*p)
1519			continue;
1520		bn = be64_to_cpu(*p);
1521
1522		if (rgd) {
1523			if (!rgrp_contains_block(rgd, bn)) {
1524				blks_outside_rgrp++;
1525				continue;
1526			}
1527		} else {
1528			rgd = gfs2_blk2rgrpd(sdp, bn, true);
1529			if (unlikely(!rgd)) {
1530				ret = -EIO;
1531				goto out;
1532			}
1533			ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
1534						 0, rd_gh);
1535			if (ret)
1536				goto out;
1537
1538			/* Must be done with the rgrp glock held: */
1539			if (gfs2_rs_active(&ip->i_res) &&
1540			    rgd == ip->i_res.rs_rbm.rgd)
1541				gfs2_rs_deltree(&ip->i_res);
1542		}
1543
1544		/* The size of our transactions will be unknown until we
1545		   actually process all the metadata blocks that relate to
1546		   the rgrp. So we estimate. We know it can't be more than
1547		   the dinode's i_blocks and we don't want to exceed the
1548		   journal flush threshold, sd_log_thresh2. */
1549		if (current->journal_info == NULL) {
1550			unsigned int jblocks_rqsted, revokes;
1551
1552			jblocks_rqsted = rgd->rd_length + RES_DINODE +
1553				RES_INDIRECT;
1554			isize_blks = gfs2_get_inode_blocks(&ip->i_inode);
1555			if (isize_blks > atomic_read(&sdp->sd_log_thresh2))
1556				jblocks_rqsted +=
1557					atomic_read(&sdp->sd_log_thresh2);
1558			else
1559				jblocks_rqsted += isize_blks;
1560			revokes = jblocks_rqsted;
1561			if (meta)
1562				revokes += end - start;
1563			else if (ip->i_depth)
1564				revokes += sdp->sd_inptrs;
1565			ret = gfs2_trans_begin(sdp, jblocks_rqsted, revokes);
1566			if (ret)
1567				goto out_unlock;
1568			down_write(&ip->i_rw_mutex);
1569		}
1570		/* check if we will exceed the transaction blocks requested */
1571		tr = current->journal_info;
1572		if (tr->tr_num_buf_new + RES_STATFS +
1573		    RES_QUOTA >= atomic_read(&sdp->sd_log_thresh2)) {
1574			/* We set blks_outside_rgrp to ensure the loop will
1575			   be repeated for the same rgrp, but with a new
1576			   transaction. */
1577			blks_outside_rgrp++;
1578			/* This next part is tricky. If the buffer was added
1579			   to the transaction, we've already set some block
1580			   pointers to 0, so we better follow through and free
1581			   them, or we will introduce corruption (so break).
1582			   This may be impossible, or at least rare, but I
1583			   decided to cover the case regardless.
1584
1585			   If the buffer was not added to the transaction
1586			   (this call), doing so would exceed our transaction
1587			   size, so we need to end the transaction and start a
1588			   new one (so goto). */
1589
1590			if (buf_in_tr)
1591				break;
1592			goto out_unlock;
1593		}
1594
1595		gfs2_trans_add_meta(ip->i_gl, bh);
1596		buf_in_tr = true;
1597		*p = 0;
1598		if (bstart + blen == bn) {
1599			blen++;
1600			continue;
1601		}
1602		if (bstart) {
1603			__gfs2_free_blocks(ip, rgd, bstart, (u32)blen, meta);
1604			(*btotal) += blen;
1605			gfs2_add_inode_blocks(&ip->i_inode, -blen);
1606		}
1607		bstart = bn;
1608		blen = 1;
1609	}
1610	if (bstart) {
1611		__gfs2_free_blocks(ip, rgd, bstart, (u32)blen, meta);
1612		(*btotal) += blen;
1613		gfs2_add_inode_blocks(&ip->i_inode, -blen);
1614	}
1615out_unlock:
1616	if (!ret && blks_outside_rgrp) { /* If buffer still has non-zero blocks
1617					    outside the rgrp we just processed,
1618					    do it all over again. */
1619		if (current->journal_info) {
1620			struct buffer_head *dibh;
1621
1622			ret = gfs2_meta_inode_buffer(ip, &dibh);
1623			if (ret)
1624				goto out;
1625
1626			/* Every transaction boundary, we rewrite the dinode
1627			   to keep its di_blocks current in case of failure. */
1628			ip->i_inode.i_mtime = ip->i_inode.i_ctime =
1629				current_time(&ip->i_inode);
1630			gfs2_trans_add_meta(ip->i_gl, dibh);
1631			gfs2_dinode_out(ip, dibh->b_data);
1632			brelse(dibh);
1633			up_write(&ip->i_rw_mutex);
1634			gfs2_trans_end(sdp);
1635			buf_in_tr = false;
1636		}
1637		gfs2_glock_dq_uninit(rd_gh);
1638		cond_resched();
1639		goto more_rgrps;
1640	}
1641out:
1642	return ret;
1643}
1644
1645static bool mp_eq_to_hgt(struct metapath *mp, __u16 *list, unsigned int h)
1646{
1647	if (memcmp(mp->mp_list, list, h * sizeof(mp->mp_list[0])))
1648		return false;
1649	return true;
1650}
1651
1652/**
1653 * find_nonnull_ptr - find a non-null pointer given a metapath and height
1654 * @mp: starting metapath
1655 * @h: desired height to search
1656 *
1657 * Assumes the metapath is valid (with buffers) out to height h.
1658 * Returns: true if a non-null pointer was found in the metapath buffer
1659 *          false if all remaining pointers are NULL in the buffer
1660 */
1661static bool find_nonnull_ptr(struct gfs2_sbd *sdp, struct metapath *mp,
1662			     unsigned int h,
1663			     __u16 *end_list, unsigned int end_aligned)
1664{
1665	struct buffer_head *bh = mp->mp_bh[h];
1666	__be64 *first, *ptr, *end;
1667
1668	first = metaptr1(h, mp);
1669	ptr = first + mp->mp_list[h];
1670	end = (__be64 *)(bh->b_data + bh->b_size);
1671	if (end_list && mp_eq_to_hgt(mp, end_list, h)) {
1672		bool keep_end = h < end_aligned;
1673		end = first + end_list[h] + keep_end;
1674	}
1675
1676	while (ptr < end) {
1677		if (*ptr) { /* if we have a non-null pointer */
1678			mp->mp_list[h] = ptr - first;
1679			h++;
1680			if (h < GFS2_MAX_META_HEIGHT)
1681				mp->mp_list[h] = 0;
1682			return true;
1683		}
1684		ptr++;
1685	}
1686	return false;
1687}
1688
1689enum dealloc_states {
1690	DEALLOC_MP_FULL = 0,    /* Strip a metapath with all buffers read in */
1691	DEALLOC_MP_LOWER = 1,   /* lower the metapath strip height */
1692	DEALLOC_FILL_MP = 2,  /* Fill in the metapath to the given height. */
1693	DEALLOC_DONE = 3,       /* process complete */
1694};
1695
1696static inline void
1697metapointer_range(struct metapath *mp, int height,
1698		  __u16 *start_list, unsigned int start_aligned,
1699		  __u16 *end_list, unsigned int end_aligned,
1700		  __be64 **start, __be64 **end)
1701{
1702	struct buffer_head *bh = mp->mp_bh[height];
1703	__be64 *first;
1704
1705	first = metaptr1(height, mp);
1706	*start = first;
1707	if (mp_eq_to_hgt(mp, start_list, height)) {
1708		bool keep_start = height < start_aligned;
1709		*start = first + start_list[height] + keep_start;
1710	}
1711	*end = (__be64 *)(bh->b_data + bh->b_size);
1712	if (end_list && mp_eq_to_hgt(mp, end_list, height)) {
1713		bool keep_end = height < end_aligned;
1714		*end = first + end_list[height] + keep_end;
1715	}
1716}
1717
1718static inline bool walk_done(struct gfs2_sbd *sdp,
1719			     struct metapath *mp, int height,
1720			     __u16 *end_list, unsigned int end_aligned)
1721{
1722	__u16 end;
1723
1724	if (end_list) {
1725		bool keep_end = height < end_aligned;
1726		if (!mp_eq_to_hgt(mp, end_list, height))
1727			return false;
1728		end = end_list[height] + keep_end;
1729	} else
1730		end = (height > 0) ? sdp->sd_inptrs : sdp->sd_diptrs;
1731	return mp->mp_list[height] >= end;
1732}
1733
1734/**
1735 * punch_hole - deallocate blocks in a file
1736 * @ip: inode to truncate
1737 * @offset: the start of the hole
1738 * @length: the size of the hole (or 0 for truncate)
1739 *
1740 * Punch a hole into a file or truncate a file at a given position.  This
1741 * function operates in whole blocks (@offset and @length are rounded
1742 * accordingly); partially filled blocks must be cleared otherwise.
1743 *
1744 * This function works from the bottom up, and from the right to the left. In
1745 * other words, it strips off the highest layer (data) before stripping any of
1746 * the metadata. Doing it this way is best in case the operation is interrupted
1747 * by power failure, etc.  The dinode is rewritten in every transaction to
1748 * guarantee integrity.
1749 */
1750static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
1751{
1752	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1753	u64 maxsize = sdp->sd_heightsize[ip->i_height];
1754	struct metapath mp = {};
1755	struct buffer_head *dibh, *bh;
1756	struct gfs2_holder rd_gh;
1757	unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift;
1758	u64 lblock = (offset + (1 << bsize_shift) - 1) >> bsize_shift;
1759	__u16 start_list[GFS2_MAX_META_HEIGHT];
1760	__u16 __end_list[GFS2_MAX_META_HEIGHT], *end_list = NULL;
1761	unsigned int start_aligned, uninitialized_var(end_aligned);
1762	unsigned int strip_h = ip->i_height - 1;
1763	u32 btotal = 0;
1764	int ret, state;
1765	int mp_h; /* metapath buffers are read in to this height */
1766	u64 prev_bnr = 0;
1767	__be64 *start, *end;
1768
1769	if (offset >= maxsize) {
1770		/*
1771		 * The starting point lies beyond the allocated meta-data;
1772		 * there are no blocks do deallocate.
1773		 */
1774		return 0;
1775	}
1776
1777	/*
1778	 * The start position of the hole is defined by lblock, start_list, and
1779	 * start_aligned.  The end position of the hole is defined by lend,
1780	 * end_list, and end_aligned.
1781	 *
1782	 * start_aligned and end_aligned define down to which height the start
1783	 * and end positions are aligned to the metadata tree (i.e., the
1784	 * position is a multiple of the metadata granularity at the height
1785	 * above).  This determines at which heights additional meta pointers
1786	 * needs to be preserved for the remaining data.
1787	 */
1788
1789	if (length) {
1790		u64 end_offset = offset + length;
1791		u64 lend;
1792
1793		/*
1794		 * Clip the end at the maximum file size for the given height:
1795		 * that's how far the metadata goes; files bigger than that
1796		 * will have additional layers of indirection.
1797		 */
1798		if (end_offset > maxsize)
1799			end_offset = maxsize;
1800		lend = end_offset >> bsize_shift;
1801
1802		if (lblock >= lend)
1803			return 0;
1804
1805		find_metapath(sdp, lend, &mp, ip->i_height);
1806		end_list = __end_list;
1807		memcpy(end_list, mp.mp_list, sizeof(mp.mp_list));
1808
1809		for (mp_h = ip->i_height - 1; mp_h > 0; mp_h--) {
1810			if (end_list[mp_h])
1811				break;
1812		}
1813		end_aligned = mp_h;
1814	}
1815
1816	find_metapath(sdp, lblock, &mp, ip->i_height);
1817	memcpy(start_list, mp.mp_list, sizeof(start_list));
1818
1819	for (mp_h = ip->i_height - 1; mp_h > 0; mp_h--) {
1820		if (start_list[mp_h])
1821			break;
1822	}
1823	start_aligned = mp_h;
1824
1825	ret = gfs2_meta_inode_buffer(ip, &dibh);
1826	if (ret)
1827		return ret;
1828
1829	mp.mp_bh[0] = dibh;
1830	ret = lookup_metapath(ip, &mp);
1831	if (ret)
1832		goto out_metapath;
1833
1834	/* issue read-ahead on metadata */
1835	for (mp_h = 0; mp_h < mp.mp_aheight - 1; mp_h++) {
1836		metapointer_range(&mp, mp_h, start_list, start_aligned,
1837				  end_list, end_aligned, &start, &end);
1838		gfs2_metapath_ra(ip->i_gl, start, end);
1839	}
1840
1841	if (mp.mp_aheight == ip->i_height)
1842		state = DEALLOC_MP_FULL; /* We have a complete metapath */
1843	else
1844		state = DEALLOC_FILL_MP; /* deal with partial metapath */
1845
1846	ret = gfs2_rindex_update(sdp);
1847	if (ret)
1848		goto out_metapath;
1849
1850	ret = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
1851	if (ret)
1852		goto out_metapath;
1853	gfs2_holder_mark_uninitialized(&rd_gh);
1854
1855	mp_h = strip_h;
1856
1857	while (state != DEALLOC_DONE) {
1858		switch (state) {
1859		/* Truncate a full metapath at the given strip height.
1860		 * Note that strip_h == mp_h in order to be in this state. */
1861		case DEALLOC_MP_FULL:
1862			bh = mp.mp_bh[mp_h];
1863			gfs2_assert_withdraw(sdp, bh);
1864			if (gfs2_assert_withdraw(sdp,
1865						 prev_bnr != bh->b_blocknr)) {
1866				fs_emerg(sdp, "inode %llu, block:%llu, i_h:%u,"
1867					 "s_h:%u, mp_h:%u\n",
1868				       (unsigned long long)ip->i_no_addr,
1869				       prev_bnr, ip->i_height, strip_h, mp_h);
1870			}
1871			prev_bnr = bh->b_blocknr;
1872
1873			if (gfs2_metatype_check(sdp, bh,
1874						(mp_h ? GFS2_METATYPE_IN :
1875							GFS2_METATYPE_DI))) {
1876				ret = -EIO;
1877				goto out;
1878			}
1879
1880			/*
1881			 * Below, passing end_aligned as 0 gives us the
1882			 * metapointer range excluding the end point: the end
1883			 * point is the first metapath we must not deallocate!
1884			 */
1885
1886			metapointer_range(&mp, mp_h, start_list, start_aligned,
1887					  end_list, 0 /* end_aligned */,
1888					  &start, &end);
1889			ret = sweep_bh_for_rgrps(ip, &rd_gh, mp.mp_bh[mp_h],
1890						 start, end,
1891						 mp_h != ip->i_height - 1,
1892						 &btotal);
1893
1894			/* If we hit an error or just swept dinode buffer,
1895			   just exit. */
1896			if (ret || !mp_h) {
1897				state = DEALLOC_DONE;
1898				break;
1899			}
1900			state = DEALLOC_MP_LOWER;
1901			break;
1902
1903		/* lower the metapath strip height */
1904		case DEALLOC_MP_LOWER:
1905			/* We're done with the current buffer, so release it,
1906			   unless it's the dinode buffer. Then back up to the
1907			   previous pointer. */
1908			if (mp_h) {
1909				brelse(mp.mp_bh[mp_h]);
1910				mp.mp_bh[mp_h] = NULL;
1911			}
1912			/* If we can't get any lower in height, we've stripped
1913			   off all we can. Next step is to back up and start
1914			   stripping the previous level of metadata. */
1915			if (mp_h == 0) {
1916				strip_h--;
1917				memcpy(mp.mp_list, start_list, sizeof(start_list));
1918				mp_h = strip_h;
1919				state = DEALLOC_FILL_MP;
1920				break;
1921			}
1922			mp.mp_list[mp_h] = 0;
1923			mp_h--; /* search one metadata height down */
1924			mp.mp_list[mp_h]++;
1925			if (walk_done(sdp, &mp, mp_h, end_list, end_aligned))
1926				break;
1927			/* Here we've found a part of the metapath that is not
1928			 * allocated. We need to search at that height for the
1929			 * next non-null pointer. */
1930			if (find_nonnull_ptr(sdp, &mp, mp_h, end_list, end_aligned)) {
1931				state = DEALLOC_FILL_MP;
1932				mp_h++;
1933			}
1934			/* No more non-null pointers at this height. Back up
1935			   to the previous height and try again. */
1936			break; /* loop around in the same state */
1937
1938		/* Fill the metapath with buffers to the given height. */
1939		case DEALLOC_FILL_MP:
1940			/* Fill the buffers out to the current height. */
1941			ret = fillup_metapath(ip, &mp, mp_h);
1942			if (ret < 0)
1943				goto out;
1944
1945			/* On the first pass, issue read-ahead on metadata. */
1946			if (mp.mp_aheight > 1 && strip_h == ip->i_height - 1) {
1947				unsigned int height = mp.mp_aheight - 1;
1948
1949				/* No read-ahead for data blocks. */
1950				if (mp.mp_aheight - 1 == strip_h)
1951					height--;
1952
1953				for (; height >= mp.mp_aheight - ret; height--) {
1954					metapointer_range(&mp, height,
1955							  start_list, start_aligned,
1956							  end_list, end_aligned,
1957							  &start, &end);
1958					gfs2_metapath_ra(ip->i_gl, start, end);
1959				}
1960			}
1961
1962			/* If buffers found for the entire strip height */
1963			if (mp.mp_aheight - 1 == strip_h) {
1964				state = DEALLOC_MP_FULL;
1965				break;
1966			}
1967			if (mp.mp_aheight < ip->i_height) /* We have a partial height */
1968				mp_h = mp.mp_aheight - 1;
1969
1970			/* If we find a non-null block pointer, crawl a bit
1971			   higher up in the metapath and try again, otherwise
1972			   we need to look lower for a new starting point. */
1973			if (find_nonnull_ptr(sdp, &mp, mp_h, end_list, end_aligned))
1974				mp_h++;
1975			else
1976				state = DEALLOC_MP_LOWER;
1977			break;
1978		}
1979	}
1980
1981	if (btotal) {
1982		if (current->journal_info == NULL) {
1983			ret = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS +
1984					       RES_QUOTA, 0);
1985			if (ret)
1986				goto out;
1987			down_write(&ip->i_rw_mutex);
1988		}
1989		gfs2_statfs_change(sdp, 0, +btotal, 0);
1990		gfs2_quota_change(ip, -(s64)btotal, ip->i_inode.i_uid,
1991				  ip->i_inode.i_gid);
1992		ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
1993		gfs2_trans_add_meta(ip->i_gl, dibh);
1994		gfs2_dinode_out(ip, dibh->b_data);
1995		up_write(&ip->i_rw_mutex);
1996		gfs2_trans_end(sdp);
1997	}
1998
1999out:
2000	if (gfs2_holder_initialized(&rd_gh))
2001		gfs2_glock_dq_uninit(&rd_gh);
2002	if (current->journal_info) {
2003		up_write(&ip->i_rw_mutex);
2004		gfs2_trans_end(sdp);
2005		cond_resched();
2006	}
2007	gfs2_quota_unhold(ip);
2008out_metapath:
2009	release_metapath(&mp);
2010	return ret;
2011}
2012
2013static int trunc_end(struct gfs2_inode *ip)
2014{
2015	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2016	struct buffer_head *dibh;
2017	int error;
2018
2019	error = gfs2_trans_begin(sdp, RES_DINODE, 0);
2020	if (error)
2021		return error;
2022
2023	down_write(&ip->i_rw_mutex);
2024
2025	error = gfs2_meta_inode_buffer(ip, &dibh);
2026	if (error)
2027		goto out;
2028
2029	if (!i_size_read(&ip->i_inode)) {
2030		ip->i_height = 0;
2031		ip->i_goal = ip->i_no_addr;
2032		gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
2033		gfs2_ordered_del_inode(ip);
2034	}
2035	ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
2036	ip->i_diskflags &= ~GFS2_DIF_TRUNC_IN_PROG;
2037
2038	gfs2_trans_add_meta(ip->i_gl, dibh);
2039	gfs2_dinode_out(ip, dibh->b_data);
2040	brelse(dibh);
2041
2042out:
2043	up_write(&ip->i_rw_mutex);
2044	gfs2_trans_end(sdp);
2045	return error;
2046}
2047
2048/**
2049 * do_shrink - make a file smaller
2050 * @inode: the inode
 
2051 * @newsize: the size to make the file
2052 *
2053 * Called with an exclusive lock on @inode. The @size must
2054 * be equal to or smaller than the current inode size.
2055 *
2056 * Returns: errno
2057 */
2058
2059static int do_shrink(struct inode *inode, u64 newsize)
2060{
2061	struct gfs2_inode *ip = GFS2_I(inode);
2062	int error;
2063
2064	error = trunc_start(inode, newsize);
2065	if (error < 0)
2066		return error;
2067	if (gfs2_is_stuffed(ip))
2068		return 0;
2069
2070	error = punch_hole(ip, newsize, 0);
2071	if (error == 0)
2072		error = trunc_end(ip);
2073
2074	return error;
2075}
2076
2077void gfs2_trim_blocks(struct inode *inode)
2078{
 
2079	int ret;
2080
2081	ret = do_shrink(inode, inode->i_size);
2082	WARN_ON(ret != 0);
2083}
2084
2085/**
2086 * do_grow - Touch and update inode size
2087 * @inode: The inode
2088 * @size: The new size
2089 *
2090 * This function updates the timestamps on the inode and
2091 * may also increase the size of the inode. This function
2092 * must not be called with @size any smaller than the current
2093 * inode size.
2094 *
2095 * Although it is not strictly required to unstuff files here,
2096 * earlier versions of GFS2 have a bug in the stuffed file reading
2097 * code which will result in a buffer overrun if the size is larger
2098 * than the max stuffed file size. In order to prevent this from
2099 * occurring, such files are unstuffed, but in other cases we can
2100 * just update the inode size directly.
2101 *
2102 * Returns: 0 on success, or -ve on error
2103 */
2104
2105static int do_grow(struct inode *inode, u64 size)
2106{
2107	struct gfs2_inode *ip = GFS2_I(inode);
2108	struct gfs2_sbd *sdp = GFS2_SB(inode);
2109	struct gfs2_alloc_parms ap = { .target = 1, };
2110	struct buffer_head *dibh;
2111	int error;
2112	int unstuff = 0;
2113
2114	if (gfs2_is_stuffed(ip) && size > gfs2_max_stuffed_size(ip)) {
 
2115		error = gfs2_quota_lock_check(ip, &ap);
2116		if (error)
2117			return error;
2118
2119		error = gfs2_inplace_reserve(ip, &ap);
2120		if (error)
2121			goto do_grow_qunlock;
2122		unstuff = 1;
2123	}
2124
2125	error = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS + RES_RG_BIT +
2126				 (unstuff &&
2127				  gfs2_is_jdata(ip) ? RES_JDATA : 0) +
2128				 (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF ?
2129				  0 : RES_QUOTA), 0);
2130	if (error)
2131		goto do_grow_release;
2132
2133	if (unstuff) {
2134		error = gfs2_unstuff_dinode(ip, NULL);
2135		if (error)
2136			goto do_end_trans;
2137	}
2138
2139	error = gfs2_meta_inode_buffer(ip, &dibh);
2140	if (error)
2141		goto do_end_trans;
2142
2143	truncate_setsize(inode, size);
2144	ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
2145	gfs2_trans_add_meta(ip->i_gl, dibh);
2146	gfs2_dinode_out(ip, dibh->b_data);
2147	brelse(dibh);
2148
2149do_end_trans:
2150	gfs2_trans_end(sdp);
2151do_grow_release:
2152	if (unstuff) {
2153		gfs2_inplace_release(ip);
2154do_grow_qunlock:
2155		gfs2_quota_unlock(ip);
2156	}
2157	return error;
2158}
2159
2160/**
2161 * gfs2_setattr_size - make a file a given size
2162 * @inode: the inode
2163 * @newsize: the size to make the file
2164 *
2165 * The file size can grow, shrink, or stay the same size. This
2166 * is called holding i_rwsem and an exclusive glock on the inode
2167 * in question.
2168 *
2169 * Returns: errno
2170 */
2171
2172int gfs2_setattr_size(struct inode *inode, u64 newsize)
2173{
2174	struct gfs2_inode *ip = GFS2_I(inode);
2175	int ret;
 
2176
2177	BUG_ON(!S_ISREG(inode->i_mode));
2178
2179	ret = inode_newsize_ok(inode, newsize);
2180	if (ret)
2181		return ret;
2182
2183	inode_dio_wait(inode);
2184
2185	ret = gfs2_rsqa_alloc(ip);
2186	if (ret)
2187		goto out;
2188
2189	if (newsize >= inode->i_size) {
 
2190		ret = do_grow(inode, newsize);
2191		goto out;
2192	}
2193
2194	ret = do_shrink(inode, newsize);
2195out:
2196	gfs2_rsqa_delete(ip, NULL);
2197	return ret;
2198}
2199
2200int gfs2_truncatei_resume(struct gfs2_inode *ip)
2201{
2202	int error;
2203	error = punch_hole(ip, i_size_read(&ip->i_inode), 0);
2204	if (!error)
2205		error = trunc_end(ip);
2206	return error;
2207}
2208
2209int gfs2_file_dealloc(struct gfs2_inode *ip)
2210{
2211	return punch_hole(ip, 0, 0);
2212}
2213
2214/**
2215 * gfs2_free_journal_extents - Free cached journal bmap info
2216 * @jd: The journal
2217 *
2218 */
2219
2220void gfs2_free_journal_extents(struct gfs2_jdesc *jd)
2221{
2222	struct gfs2_journal_extent *jext;
2223
2224	while(!list_empty(&jd->extent_list)) {
2225		jext = list_entry(jd->extent_list.next, struct gfs2_journal_extent, list);
2226		list_del(&jext->list);
2227		kfree(jext);
2228	}
2229}
2230
2231/**
2232 * gfs2_add_jextent - Add or merge a new extent to extent cache
2233 * @jd: The journal descriptor
2234 * @lblock: The logical block at start of new extent
2235 * @dblock: The physical block at start of new extent
2236 * @blocks: Size of extent in fs blocks
2237 *
2238 * Returns: 0 on success or -ENOMEM
2239 */
2240
2241static int gfs2_add_jextent(struct gfs2_jdesc *jd, u64 lblock, u64 dblock, u64 blocks)
2242{
2243	struct gfs2_journal_extent *jext;
2244
2245	if (!list_empty(&jd->extent_list)) {
2246		jext = list_entry(jd->extent_list.prev, struct gfs2_journal_extent, list);
2247		if ((jext->dblock + jext->blocks) == dblock) {
2248			jext->blocks += blocks;
2249			return 0;
2250		}
2251	}
2252
2253	jext = kzalloc(sizeof(struct gfs2_journal_extent), GFP_NOFS);
2254	if (jext == NULL)
2255		return -ENOMEM;
2256	jext->dblock = dblock;
2257	jext->lblock = lblock;
2258	jext->blocks = blocks;
2259	list_add_tail(&jext->list, &jd->extent_list);
2260	jd->nr_extents++;
2261	return 0;
2262}
2263
2264/**
2265 * gfs2_map_journal_extents - Cache journal bmap info
2266 * @sdp: The super block
2267 * @jd: The journal to map
2268 *
2269 * Create a reusable "extent" mapping from all logical
2270 * blocks to all physical blocks for the given journal.  This will save
2271 * us time when writing journal blocks.  Most journals will have only one
2272 * extent that maps all their logical blocks.  That's because gfs2.mkfs
2273 * arranges the journal blocks sequentially to maximize performance.
2274 * So the extent would map the first block for the entire file length.
2275 * However, gfs2_jadd can happen while file activity is happening, so
2276 * those journals may not be sequential.  Less likely is the case where
2277 * the users created their own journals by mounting the metafs and
2278 * laying it out.  But it's still possible.  These journals might have
2279 * several extents.
2280 *
2281 * Returns: 0 on success, or error on failure
2282 */
2283
2284int gfs2_map_journal_extents(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd)
2285{
2286	u64 lblock = 0;
2287	u64 lblock_stop;
2288	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
2289	struct buffer_head bh;
2290	unsigned int shift = sdp->sd_sb.sb_bsize_shift;
2291	u64 size;
2292	int rc;
2293	ktime_t start, end;
2294
2295	start = ktime_get();
2296	lblock_stop = i_size_read(jd->jd_inode) >> shift;
2297	size = (lblock_stop - lblock) << shift;
2298	jd->nr_extents = 0;
2299	WARN_ON(!list_empty(&jd->extent_list));
2300
2301	do {
2302		bh.b_state = 0;
2303		bh.b_blocknr = 0;
2304		bh.b_size = size;
2305		rc = gfs2_block_map(jd->jd_inode, lblock, &bh, 0);
2306		if (rc || !buffer_mapped(&bh))
2307			goto fail;
2308		rc = gfs2_add_jextent(jd, lblock, bh.b_blocknr, bh.b_size >> shift);
2309		if (rc)
2310			goto fail;
2311		size -= bh.b_size;
2312		lblock += (bh.b_size >> ip->i_inode.i_blkbits);
2313	} while(size > 0);
2314
2315	end = ktime_get();
2316	fs_info(sdp, "journal %d mapped with %u extents in %lldms\n", jd->jd_jid,
2317		jd->nr_extents, ktime_ms_delta(end, start));
2318	return 0;
2319
2320fail:
2321	fs_warn(sdp, "error %d mapping journal %u at offset %llu (extent %u)\n",
2322		rc, jd->jd_jid,
2323		(unsigned long long)(i_size_read(jd->jd_inode) - size),
2324		jd->nr_extents);
2325	fs_warn(sdp, "bmap=%d lblock=%llu block=%llu, state=0x%08lx, size=%llu\n",
2326		rc, (unsigned long long)lblock, (unsigned long long)bh.b_blocknr,
2327		bh.b_state, (unsigned long long)bh.b_size);
2328	gfs2_free_journal_extents(jd);
2329	return rc;
2330}
2331
2332/**
2333 * gfs2_write_alloc_required - figure out if a write will require an allocation
2334 * @ip: the file being written to
2335 * @offset: the offset to write to
2336 * @len: the number of bytes being written
2337 *
2338 * Returns: 1 if an alloc is required, 0 otherwise
2339 */
2340
2341int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
2342			      unsigned int len)
2343{
2344	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2345	struct buffer_head bh;
2346	unsigned int shift;
2347	u64 lblock, lblock_stop, size;
2348	u64 end_of_file;
2349
2350	if (!len)
2351		return 0;
2352
2353	if (gfs2_is_stuffed(ip)) {
2354		if (offset + len > gfs2_max_stuffed_size(ip))
 
2355			return 1;
2356		return 0;
2357	}
2358
2359	shift = sdp->sd_sb.sb_bsize_shift;
2360	BUG_ON(gfs2_is_dir(ip));
2361	end_of_file = (i_size_read(&ip->i_inode) + sdp->sd_sb.sb_bsize - 1) >> shift;
2362	lblock = offset >> shift;
2363	lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift;
2364	if (lblock_stop > end_of_file && ip != GFS2_I(sdp->sd_rindex))
2365		return 1;
2366
2367	size = (lblock_stop - lblock) << shift;
2368	do {
2369		bh.b_state = 0;
2370		bh.b_size = size;
2371		gfs2_block_map(&ip->i_inode, lblock, &bh, 0);
2372		if (!buffer_mapped(&bh))
2373			return 1;
2374		size -= bh.b_size;
2375		lblock += (bh.b_size >> ip->i_inode.i_blkbits);
2376	} while(size > 0);
2377
2378	return 0;
2379}
2380
2381static int stuffed_zero_range(struct inode *inode, loff_t offset, loff_t length)
2382{
2383	struct gfs2_inode *ip = GFS2_I(inode);
2384	struct buffer_head *dibh;
2385	int error;
2386
2387	if (offset >= inode->i_size)
2388		return 0;
2389	if (offset + length > inode->i_size)
2390		length = inode->i_size - offset;
2391
2392	error = gfs2_meta_inode_buffer(ip, &dibh);
2393	if (error)
2394		return error;
2395	gfs2_trans_add_meta(ip->i_gl, dibh);
2396	memset(dibh->b_data + sizeof(struct gfs2_dinode) + offset, 0,
2397	       length);
2398	brelse(dibh);
2399	return 0;
2400}
2401
2402static int gfs2_journaled_truncate_range(struct inode *inode, loff_t offset,
2403					 loff_t length)
2404{
2405	struct gfs2_sbd *sdp = GFS2_SB(inode);
2406	loff_t max_chunk = GFS2_JTRUNC_REVOKES * sdp->sd_vfs->s_blocksize;
2407	int error;
2408
2409	while (length) {
2410		struct gfs2_trans *tr;
2411		loff_t chunk;
2412		unsigned int offs;
2413
2414		chunk = length;
2415		if (chunk > max_chunk)
2416			chunk = max_chunk;
2417
2418		offs = offset & ~PAGE_MASK;
2419		if (offs && chunk > PAGE_SIZE)
2420			chunk = offs + ((chunk - offs) & PAGE_MASK);
2421
2422		truncate_pagecache_range(inode, offset, chunk);
2423		offset += chunk;
2424		length -= chunk;
2425
2426		tr = current->journal_info;
2427		if (!test_bit(TR_TOUCHED, &tr->tr_flags))
2428			continue;
2429
2430		gfs2_trans_end(sdp);
2431		error = gfs2_trans_begin(sdp, RES_DINODE, GFS2_JTRUNC_REVOKES);
2432		if (error)
2433			return error;
2434	}
2435	return 0;
2436}
2437
2438int __gfs2_punch_hole(struct file *file, loff_t offset, loff_t length)
2439{
2440	struct inode *inode = file_inode(file);
2441	struct gfs2_inode *ip = GFS2_I(inode);
2442	struct gfs2_sbd *sdp = GFS2_SB(inode);
2443	int error;
2444
2445	if (gfs2_is_jdata(ip))
2446		error = gfs2_trans_begin(sdp, RES_DINODE + 2 * RES_JDATA,
2447					 GFS2_JTRUNC_REVOKES);
2448	else
2449		error = gfs2_trans_begin(sdp, RES_DINODE, 0);
2450	if (error)
2451		return error;
2452
2453	if (gfs2_is_stuffed(ip)) {
2454		error = stuffed_zero_range(inode, offset, length);
2455		if (error)
2456			goto out;
2457	} else {
2458		unsigned int start_off, end_len, blocksize;
2459
2460		blocksize = i_blocksize(inode);
2461		start_off = offset & (blocksize - 1);
2462		end_len = (offset + length) & (blocksize - 1);
2463		if (start_off) {
2464			unsigned int len = length;
2465			if (length > blocksize - start_off)
2466				len = blocksize - start_off;
2467			error = gfs2_block_zero_range(inode, offset, len);
2468			if (error)
2469				goto out;
2470			if (start_off + length < blocksize)
2471				end_len = 0;
2472		}
2473		if (end_len) {
2474			error = gfs2_block_zero_range(inode,
2475				offset + length - end_len, end_len);
2476			if (error)
2477				goto out;
2478		}
2479	}
2480
2481	if (gfs2_is_jdata(ip)) {
2482		BUG_ON(!current->journal_info);
2483		gfs2_journaled_truncate_range(inode, offset, length);
2484	} else
2485		truncate_pagecache_range(inode, offset, offset + length - 1);
2486
2487	file_update_time(file);
2488	mark_inode_dirty(inode);
2489
2490	if (current->journal_info)
2491		gfs2_trans_end(sdp);
2492
2493	if (!gfs2_is_stuffed(ip))
2494		error = punch_hole(ip, offset, length);
2495
2496out:
2497	if (current->journal_info)
2498		gfs2_trans_end(sdp);
2499	return error;
2500}
v4.10.11
 
   1/*
   2 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
   3 * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
   4 *
   5 * This copyrighted material is made available to anyone wishing to use,
   6 * modify, copy, or redistribute it subject to the terms and conditions
   7 * of the GNU General Public License version 2.
   8 */
   9
  10#include <linux/spinlock.h>
  11#include <linux/completion.h>
  12#include <linux/buffer_head.h>
  13#include <linux/blkdev.h>
  14#include <linux/gfs2_ondisk.h>
  15#include <linux/crc32.h>
 
 
  16
  17#include "gfs2.h"
  18#include "incore.h"
  19#include "bmap.h"
  20#include "glock.h"
  21#include "inode.h"
  22#include "meta_io.h"
  23#include "quota.h"
  24#include "rgrp.h"
  25#include "log.h"
  26#include "super.h"
  27#include "trans.h"
  28#include "dir.h"
  29#include "util.h"
 
  30#include "trace_gfs2.h"
  31
  32/* This doesn't need to be that large as max 64 bit pointers in a 4k
  33 * block is 512, so __u16 is fine for that. It saves stack space to
  34 * keep it small.
  35 */
  36struct metapath {
  37	struct buffer_head *mp_bh[GFS2_MAX_META_HEIGHT];
  38	__u16 mp_list[GFS2_MAX_META_HEIGHT];
 
 
  39};
  40
  41struct strip_mine {
  42	int sm_first;
  43	unsigned int sm_height;
  44};
  45
  46/**
  47 * gfs2_unstuffer_page - unstuff a stuffed inode into a block cached by a page
  48 * @ip: the inode
  49 * @dibh: the dinode buffer
  50 * @block: the block number that was allocated
  51 * @page: The (optional) page. This is looked up if @page is NULL
  52 *
  53 * Returns: errno
  54 */
  55
  56static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
  57			       u64 block, struct page *page)
  58{
  59	struct inode *inode = &ip->i_inode;
  60	struct buffer_head *bh;
  61	int release = 0;
  62
  63	if (!page || page->index) {
  64		page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS);
  65		if (!page)
  66			return -ENOMEM;
  67		release = 1;
  68	}
  69
  70	if (!PageUptodate(page)) {
  71		void *kaddr = kmap(page);
  72		u64 dsize = i_size_read(inode);
  73 
  74		if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode)))
  75			dsize = dibh->b_size - sizeof(struct gfs2_dinode);
  76
  77		memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
  78		memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
  79		kunmap(page);
  80
  81		SetPageUptodate(page);
  82	}
  83
  84	if (!page_has_buffers(page))
  85		create_empty_buffers(page, BIT(inode->i_blkbits),
  86				     BIT(BH_Uptodate));
  87
  88	bh = page_buffers(page);
  89
  90	if (!buffer_mapped(bh))
  91		map_bh(bh, inode->i_sb, block);
  92
  93	set_buffer_uptodate(bh);
  94	if (!gfs2_is_jdata(ip))
 
 
  95		mark_buffer_dirty(bh);
  96	if (!gfs2_is_writeback(ip))
  97		gfs2_trans_add_data(ip->i_gl, bh);
  98
  99	if (release) {
 100		unlock_page(page);
 101		put_page(page);
 102	}
 103
 104	return 0;
 105}
 106
 107/**
 108 * gfs2_unstuff_dinode - Unstuff a dinode when the data has grown too big
 109 * @ip: The GFS2 inode to unstuff
 110 * @page: The (optional) page. This is looked up if the @page is NULL
 111 *
 112 * This routine unstuffs a dinode and returns it to a "normal" state such
 113 * that the height can be grown in the traditional way.
 114 *
 115 * Returns: errno
 116 */
 117
 118int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page)
 119{
 120	struct buffer_head *bh, *dibh;
 121	struct gfs2_dinode *di;
 122	u64 block = 0;
 123	int isdir = gfs2_is_dir(ip);
 124	int error;
 125
 126	down_write(&ip->i_rw_mutex);
 127
 128	error = gfs2_meta_inode_buffer(ip, &dibh);
 129	if (error)
 130		goto out;
 131
 132	if (i_size_read(&ip->i_inode)) {
 133		/* Get a free block, fill it with the stuffed data,
 134		   and write it out to disk */
 135
 136		unsigned int n = 1;
 137		error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
 138		if (error)
 139			goto out_brelse;
 140		if (isdir) {
 141			gfs2_trans_add_unrevoke(GFS2_SB(&ip->i_inode), block, 1);
 142			error = gfs2_dir_get_new_buffer(ip, block, &bh);
 143			if (error)
 144				goto out_brelse;
 145			gfs2_buffer_copy_tail(bh, sizeof(struct gfs2_meta_header),
 146					      dibh, sizeof(struct gfs2_dinode));
 147			brelse(bh);
 148		} else {
 149			error = gfs2_unstuffer_page(ip, dibh, block, page);
 150			if (error)
 151				goto out_brelse;
 152		}
 153	}
 154
 155	/*  Set up the pointer to the new block  */
 156
 157	gfs2_trans_add_meta(ip->i_gl, dibh);
 158	di = (struct gfs2_dinode *)dibh->b_data;
 159	gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
 160
 161	if (i_size_read(&ip->i_inode)) {
 162		*(__be64 *)(di + 1) = cpu_to_be64(block);
 163		gfs2_add_inode_blocks(&ip->i_inode, 1);
 164		di->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
 165	}
 166
 167	ip->i_height = 1;
 168	di->di_height = cpu_to_be16(1);
 169
 170out_brelse:
 171	brelse(dibh);
 172out:
 173	up_write(&ip->i_rw_mutex);
 174	return error;
 175}
 176
 177
 178/**
 179 * find_metapath - Find path through the metadata tree
 180 * @sdp: The superblock
 
 181 * @mp: The metapath to return the result in
 182 * @block: The disk block to look up
 183 * @height: The pre-calculated height of the metadata tree
 184 *
 185 *   This routine returns a struct metapath structure that defines a path
 186 *   through the metadata of inode "ip" to get to block "block".
 187 *
 188 *   Example:
 189 *   Given:  "ip" is a height 3 file, "offset" is 101342453, and this is a
 190 *   filesystem with a blocksize of 4096.
 191 *
 192 *   find_metapath() would return a struct metapath structure set to:
 193 *   mp_offset = 101342453, mp_height = 3, mp_list[0] = 0, mp_list[1] = 48,
 194 *   and mp_list[2] = 165.
 195 *
 196 *   That means that in order to get to the block containing the byte at
 197 *   offset 101342453, we would load the indirect block pointed to by pointer
 198 *   0 in the dinode.  We would then load the indirect block pointed to by
 199 *   pointer 48 in that indirect block.  We would then load the data block
 200 *   pointed to by pointer 165 in that indirect block.
 201 *
 202 *             ----------------------------------------
 203 *             | Dinode |                             |
 204 *             |        |                            4|
 205 *             |        |0 1 2 3 4 5                 9|
 206 *             |        |                            6|
 207 *             ----------------------------------------
 208 *                       |
 209 *                       |
 210 *                       V
 211 *             ----------------------------------------
 212 *             | Indirect Block                       |
 213 *             |                                     5|
 214 *             |            4 4 4 4 4 5 5            1|
 215 *             |0           5 6 7 8 9 0 1            2|
 216 *             ----------------------------------------
 217 *                                |
 218 *                                |
 219 *                                V
 220 *             ----------------------------------------
 221 *             | Indirect Block                       |
 222 *             |                         1 1 1 1 1   5|
 223 *             |                         6 6 6 6 6   1|
 224 *             |0                        3 4 5 6 7   2|
 225 *             ----------------------------------------
 226 *                                           |
 227 *                                           |
 228 *                                           V
 229 *             ----------------------------------------
 230 *             | Data block containing offset         |
 231 *             |            101342453                 |
 232 *             |                                      |
 233 *             |                                      |
 234 *             ----------------------------------------
 235 *
 236 */
 237
 238static void find_metapath(const struct gfs2_sbd *sdp, u64 block,
 239			  struct metapath *mp, unsigned int height)
 240{
 241	unsigned int i;
 242
 
 243	for (i = height; i--;)
 244		mp->mp_list[i] = do_div(block, sdp->sd_inptrs);
 245
 246}
 247
 248static inline unsigned int metapath_branch_start(const struct metapath *mp)
 249{
 250	if (mp->mp_list[0] == 0)
 251		return 2;
 252	return 1;
 253}
 254
 255/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 256 * metapointer - Return pointer to start of metadata in a buffer
 257 * @height: The metadata height (0 = dinode)
 258 * @mp: The metapath
 259 *
 260 * Return a pointer to the block number of the next height of the metadata
 261 * tree given a buffer containing the pointer to the current height of the
 262 * metadata tree.
 263 */
 264
 265static inline __be64 *metapointer(unsigned int height, const struct metapath *mp)
 266{
 267	struct buffer_head *bh = mp->mp_bh[height];
 268	unsigned int head_size = (height > 0) ?
 269		sizeof(struct gfs2_meta_header) : sizeof(struct gfs2_dinode);
 270	return ((__be64 *)(bh->b_data + head_size)) + mp->mp_list[height];
 
 
 
 
 
 
 
 
 
 
 
 
 
 271}
 272
 273static void gfs2_metapath_ra(struct gfs2_glock *gl,
 274			     const struct buffer_head *bh, const __be64 *pos)
 275{
 276	struct buffer_head *rabh;
 277	const __be64 *endp = (const __be64 *)(bh->b_data + bh->b_size);
 278	const __be64 *t;
 279
 280	for (t = pos; t < endp; t++) {
 
 
 281		if (!*t)
 282			continue;
 283
 284		rabh = gfs2_getbuf(gl, be64_to_cpu(*t), CREATE);
 285		if (trylock_buffer(rabh)) {
 286			if (!buffer_uptodate(rabh)) {
 287				rabh->b_end_io = end_buffer_read_sync;
 288				submit_bh(REQ_OP_READ, REQ_RAHEAD | REQ_META,
 289						rabh);
 
 290				continue;
 291			}
 292			unlock_buffer(rabh);
 293		}
 294		brelse(rabh);
 295	}
 296}
 297
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 298/**
 299 * lookup_metapath - Walk the metadata tree to a specific point
 300 * @ip: The inode
 301 * @mp: The metapath
 302 *
 303 * Assumes that the inode's buffer has already been looked up and
 304 * hooked onto mp->mp_bh[0] and that the metapath has been initialised
 305 * by find_metapath().
 306 *
 307 * If this function encounters part of the tree which has not been
 308 * allocated, it returns the current height of the tree at the point
 309 * at which it found the unallocated block. Blocks which are found are
 310 * added to the mp->mp_bh[] list.
 311 *
 312 * Returns: error or height of metadata tree
 313 */
 314
 315static int lookup_metapath(struct gfs2_inode *ip, struct metapath *mp)
 316{
 317	unsigned int end_of_metadata = ip->i_height - 1;
 318	unsigned int x;
 319	__be64 *ptr;
 320	u64 dblock;
 
 
 
 
 
 
 
 
 
 
 
 
 
 321	int ret;
 322
 323	for (x = 0; x < end_of_metadata; x++) {
 324		ptr = metapointer(x, mp);
 325		dblock = be64_to_cpu(*ptr);
 326		if (!dblock)
 327			return x + 1;
 
 
 
 
 
 
 
 
 
 
 
 
 328
 329		ret = gfs2_meta_indirect_buffer(ip, x+1, dblock, &mp->mp_bh[x+1]);
 330		if (ret)
 331			return ret;
 
 332	}
 333
 334	return ip->i_height;
 335}
 336
 337static inline void release_metapath(struct metapath *mp)
 338{
 339	int i;
 340
 341	for (i = 0; i < GFS2_MAX_META_HEIGHT; i++) {
 342		if (mp->mp_bh[i] == NULL)
 343			break;
 344		brelse(mp->mp_bh[i]);
 
 345	}
 346}
 347
 348/**
 349 * gfs2_extent_length - Returns length of an extent of blocks
 350 * @start: Start of the buffer
 351 * @len: Length of the buffer in bytes
 352 * @ptr: Current position in the buffer
 353 * @limit: Max extent length to return (0 = unlimited)
 354 * @eob: Set to 1 if we hit "end of block"
 355 *
 356 * If the first block is zero (unallocated) it will return the number of
 357 * unallocated blocks in the extent, otherwise it will return the number
 358 * of contiguous blocks in the extent.
 359 *
 360 * Returns: The length of the extent (minimum of one block)
 361 */
 362
 363static inline unsigned int gfs2_extent_length(void *start, unsigned int len, __be64 *ptr, size_t limit, int *eob)
 364{
 365	const __be64 *end = (start + len);
 366	const __be64 *first = ptr;
 367	u64 d = be64_to_cpu(*ptr);
 368
 369	*eob = 0;
 370	do {
 371		ptr++;
 372		if (ptr >= end)
 373			break;
 374		if (limit && --limit == 0)
 375			break;
 376		if (d)
 377			d++;
 378	} while(be64_to_cpu(*ptr) == d);
 379	if (ptr >= end)
 380		*eob = 1;
 381	return (ptr - first);
 382}
 383
 384static inline void bmap_lock(struct gfs2_inode *ip, int create)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 385{
 386	if (create)
 387		down_write(&ip->i_rw_mutex);
 388	else
 389		down_read(&ip->i_rw_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 390}
 391
 392static inline void bmap_unlock(struct gfs2_inode *ip, int create)
 
 
 
 
 
 
 
 
 
 
 
 
 
 393{
 394	if (create)
 395		up_write(&ip->i_rw_mutex);
 
 
 
 
 
 
 
 
 
 396	else
 397		up_read(&ip->i_rw_mutex);
 
 
 
 
 
 
 398}
 399
 400static inline __be64 *gfs2_indirect_init(struct metapath *mp,
 401					 struct gfs2_glock *gl, unsigned int i,
 402					 unsigned offset, u64 bn)
 403{
 404	__be64 *ptr = (__be64 *)(mp->mp_bh[i - 1]->b_data +
 405		       ((i > 1) ? sizeof(struct gfs2_meta_header) :
 406				 sizeof(struct gfs2_dinode)));
 407	BUG_ON(i < 1);
 408	BUG_ON(mp->mp_bh[i] != NULL);
 409	mp->mp_bh[i] = gfs2_meta_new(gl, bn);
 410	gfs2_trans_add_meta(gl, mp->mp_bh[i]);
 411	gfs2_metatype_set(mp->mp_bh[i], GFS2_METATYPE_IN, GFS2_FORMAT_IN);
 412	gfs2_buffer_clear_tail(mp->mp_bh[i], sizeof(struct gfs2_meta_header));
 413	ptr += offset;
 414	*ptr = cpu_to_be64(bn);
 415	return ptr;
 416}
 417
 418enum alloc_state {
 419	ALLOC_DATA = 0,
 420	ALLOC_GROW_DEPTH = 1,
 421	ALLOC_GROW_HEIGHT = 2,
 422	/* ALLOC_UNSTUFF = 3,   TBD and rather complicated */
 423};
 424
 425/**
 426 * gfs2_bmap_alloc - Build a metadata tree of the requested height
 427 * @inode: The GFS2 inode
 428 * @lblock: The logical starting block of the extent
 429 * @bh_map: This is used to return the mapping details
 430 * @mp: The metapath
 431 * @sheight: The starting height (i.e. whats already mapped)
 432 * @height: The height to build to
 433 * @maxlen: The max number of data blocks to alloc
 434 *
 435 * In this routine we may have to alloc:
 436 *   i) Indirect blocks to grow the metadata tree height
 437 *  ii) Indirect blocks to fill in lower part of the metadata tree
 438 * iii) Data blocks
 439 *
 440 * The function is in two parts. The first part works out the total
 441 * number of blocks which we need. The second part does the actual
 442 * allocation asking for an extent at a time (if enough contiguous free
 443 * blocks are available, there will only be one request per bmap call)
 444 * and uses the state machine to initialise the blocks in order.
 
 
 
 
 
 
 
 
 
 445 *
 446 * Returns: errno on error
 447 */
 448
 449static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock,
 450			   struct buffer_head *bh_map, struct metapath *mp,
 451			   const unsigned int sheight,
 452			   const unsigned int height,
 453			   const size_t maxlen)
 454{
 455	struct gfs2_inode *ip = GFS2_I(inode);
 456	struct gfs2_sbd *sdp = GFS2_SB(inode);
 457	struct super_block *sb = sdp->sd_vfs;
 458	struct buffer_head *dibh = mp->mp_bh[0];
 459	u64 bn, dblock = 0;
 460	unsigned n, i, blks, alloced = 0, iblks = 0, branch_start = 0;
 461	unsigned dblks = 0;
 462	unsigned ptrs_per_blk;
 463	const unsigned end_of_metadata = height - 1;
 464	int ret;
 465	int eob = 0;
 466	enum alloc_state state;
 467	__be64 *ptr;
 468	__be64 zero_bn = 0;
 469
 470	BUG_ON(sheight < 1);
 471	BUG_ON(dibh == NULL);
 
 472
 473	gfs2_trans_add_meta(ip->i_gl, dibh);
 474
 475	if (height == sheight) {
 476		struct buffer_head *bh;
 477		/* Bottom indirect block exists, find unalloced extent size */
 478		ptr = metapointer(end_of_metadata, mp);
 479		bh = mp->mp_bh[end_of_metadata];
 480		dblks = gfs2_extent_length(bh->b_data, bh->b_size, ptr, maxlen,
 481					   &eob);
 482		BUG_ON(dblks < 1);
 483		state = ALLOC_DATA;
 484	} else {
 485		/* Need to allocate indirect blocks */
 486		ptrs_per_blk = height > 1 ? sdp->sd_inptrs : sdp->sd_diptrs;
 487		dblks = min(maxlen, (size_t)(ptrs_per_blk -
 488					     mp->mp_list[end_of_metadata]));
 489		if (height == ip->i_height) {
 490			/* Writing into existing tree, extend tree down */
 491			iblks = height - sheight;
 492			state = ALLOC_GROW_DEPTH;
 493		} else {
 494			/* Building up tree height */
 495			state = ALLOC_GROW_HEIGHT;
 496			iblks = height - ip->i_height;
 497			branch_start = metapath_branch_start(mp);
 498			iblks += (height - branch_start);
 499		}
 500	}
 501
 502	/* start of the second part of the function (state machine) */
 503
 504	blks = dblks + iblks;
 505	i = sheight;
 506	do {
 507		int error;
 508		n = blks - alloced;
 509		error = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL);
 510		if (error)
 511			return error;
 512		alloced += n;
 513		if (state != ALLOC_DATA || gfs2_is_jdata(ip))
 514			gfs2_trans_add_unrevoke(sdp, bn, n);
 515		switch (state) {
 516		/* Growing height of tree */
 517		case ALLOC_GROW_HEIGHT:
 518			if (i == 1) {
 519				ptr = (__be64 *)(dibh->b_data +
 520						 sizeof(struct gfs2_dinode));
 521				zero_bn = *ptr;
 522			}
 523			for (; i - 1 < height - ip->i_height && n > 0; i++, n--)
 
 524				gfs2_indirect_init(mp, ip->i_gl, i, 0, bn++);
 525			if (i - 1 == height - ip->i_height) {
 526				i--;
 527				gfs2_buffer_copy_tail(mp->mp_bh[i],
 528						sizeof(struct gfs2_meta_header),
 529						dibh, sizeof(struct gfs2_dinode));
 530				gfs2_buffer_clear_tail(dibh,
 531						sizeof(struct gfs2_dinode) +
 532						sizeof(__be64));
 533				ptr = (__be64 *)(mp->mp_bh[i]->b_data +
 534					sizeof(struct gfs2_meta_header));
 535				*ptr = zero_bn;
 536				state = ALLOC_GROW_DEPTH;
 537				for(i = branch_start; i < height; i++) {
 538					if (mp->mp_bh[i] == NULL)
 539						break;
 540					brelse(mp->mp_bh[i]);
 541					mp->mp_bh[i] = NULL;
 542				}
 543				i = branch_start;
 544			}
 545			if (n == 0)
 546				break;
 547		/* Branching from existing tree */
 548		case ALLOC_GROW_DEPTH:
 549			if (i > 1 && i < height)
 550				gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[i-1]);
 551			for (; i < height && n > 0; i++, n--)
 552				gfs2_indirect_init(mp, ip->i_gl, i,
 553						   mp->mp_list[i-1], bn++);
 554			if (i == height)
 555				state = ALLOC_DATA;
 556			if (n == 0)
 557				break;
 558		/* Tree complete, adding data blocks */
 559		case ALLOC_DATA:
 560			BUG_ON(n > dblks);
 561			BUG_ON(mp->mp_bh[end_of_metadata] == NULL);
 562			gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[end_of_metadata]);
 563			dblks = n;
 564			ptr = metapointer(end_of_metadata, mp);
 565			dblock = bn;
 
 566			while (n-- > 0)
 567				*ptr++ = cpu_to_be64(bn++);
 568			if (buffer_zeronew(bh_map)) {
 569				ret = sb_issue_zeroout(sb, dblock, dblks,
 570						       GFP_NOFS);
 571				if (ret) {
 572					fs_err(sdp,
 573					       "Failed to zero data buffers\n");
 574					clear_buffer_zeronew(bh_map);
 575				}
 576			}
 577			break;
 578		}
 579	} while ((state != ALLOC_DATA) || !dblock);
 580
 581	ip->i_height = height;
 
 
 582	gfs2_add_inode_blocks(&ip->i_inode, alloced);
 583	gfs2_dinode_out(ip, mp->mp_bh[0]->b_data);
 584	map_bh(bh_map, inode->i_sb, dblock);
 585	bh_map->b_size = dblks << inode->i_blkbits;
 586	set_buffer_new(bh_map);
 587	return 0;
 588}
 589
 
 
 590/**
 591 * gfs2_block_map - Map a block from an inode to a disk block
 592 * @inode: The inode
 593 * @lblock: The logical block number
 594 * @bh_map: The bh to be mapped
 595 * @create: True if its ok to alloc blocks to satify the request
 
 596 *
 597 * Sets buffer_mapped() if successful, sets buffer_boundary() if a
 598 * read of metadata will be required before the next block can be
 599 * mapped. Sets buffer_new() if new blocks were allocated.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 600 *
 601 * Returns: errno
 602 */
 603
 604int gfs2_block_map(struct inode *inode, sector_t lblock,
 605		   struct buffer_head *bh_map, int create)
 606{
 607	struct gfs2_inode *ip = GFS2_I(inode);
 608	struct gfs2_sbd *sdp = GFS2_SB(inode);
 609	unsigned int bsize = sdp->sd_sb.sb_bsize;
 610	const size_t maxlen = bh_map->b_size >> inode->i_blkbits;
 611	const u64 *arr = sdp->sd_heightsize;
 612	__be64 *ptr;
 613	u64 size;
 614	struct metapath mp;
 615	int ret;
 616	int eob;
 617	unsigned int len;
 618	struct buffer_head *bh;
 619	u8 height;
 620
 621	BUG_ON(maxlen == 0);
 
 622
 623	memset(mp.mp_bh, 0, sizeof(mp.mp_bh));
 624	bmap_lock(ip, create);
 625	clear_buffer_mapped(bh_map);
 626	clear_buffer_new(bh_map);
 627	clear_buffer_boundary(bh_map);
 628	trace_gfs2_bmap(ip, bh_map, lblock, create, 1);
 629	if (gfs2_is_dir(ip)) {
 630		bsize = sdp->sd_jbsize;
 631		arr = sdp->sd_jheightsize;
 632	}
 633
 634	ret = gfs2_meta_inode_buffer(ip, &mp.mp_bh[0]);
 635	if (ret)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 636		goto out;
 
 
 
 
 
 
 
 
 637
 638	height = ip->i_height;
 639	size = (lblock + 1) * bsize;
 640	while (size > arr[height])
 641		height++;
 642	find_metapath(sdp, lblock, &mp, height);
 643	ret = 1;
 644	if (height > ip->i_height || gfs2_is_stuffed(ip))
 645		goto do_alloc;
 646	ret = lookup_metapath(ip, &mp);
 647	if (ret < 0)
 648		goto out;
 649	if (ret != ip->i_height)
 
 
 650		goto do_alloc;
 651	ptr = metapointer(ip->i_height - 1, &mp);
 
 652	if (*ptr == 0)
 653		goto do_alloc;
 654	map_bh(bh_map, inode->i_sb, be64_to_cpu(*ptr));
 655	bh = mp.mp_bh[ip->i_height - 1];
 656	len = gfs2_extent_length(bh->b_data, bh->b_size, ptr, maxlen, &eob);
 657	bh_map->b_size = (len << inode->i_blkbits);
 
 
 
 
 658	if (eob)
 659		set_buffer_boundary(bh_map);
 660	ret = 0;
 661out:
 662	release_metapath(&mp);
 663	trace_gfs2_bmap(ip, bh_map, lblock, create, ret);
 664	bmap_unlock(ip, create);
 665	return ret;
 666
 667do_alloc:
 668	/* All allocations are done here, firstly check create flag */
 669	if (!create) {
 670		BUG_ON(gfs2_is_stuffed(ip));
 671		ret = 0;
 672		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 673	}
 674
 675	/* At this point ret is the tree depth of already allocated blocks */
 676	ret = gfs2_bmap_alloc(inode, lblock, bh_map, &mp, ret, height, maxlen);
 677	goto out;
 678}
 679
 680/*
 681 * Deprecated: do not use in new code
 
 
 
 
 
 
 
 
 682 */
 683int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsigned *extlen)
 684{
 685	struct buffer_head bh = { .b_state = 0, .b_blocknr = 0 };
 
 
 686	int ret;
 687	int create = *new;
 688
 689	BUG_ON(!extlen);
 690	BUG_ON(!dblock);
 691	BUG_ON(!new);
 
 692
 693	bh.b_size = BIT(inode->i_blkbits + (create ? 0 : 5));
 694	ret = gfs2_block_map(inode, lblock, &bh, create);
 695	*extlen = bh.b_size >> inode->i_blkbits;
 696	*dblock = bh.b_blocknr;
 697	if (buffer_new(&bh))
 698		*new = 1;
 699	else
 700		*new = 0;
 701	return ret;
 702}
 703
 704/**
 705 * do_strip - Look for a layer a particular layer of the file and strip it off
 706 * @ip: the inode
 707 * @dibh: the dinode buffer
 708 * @bh: A buffer of pointers
 709 * @top: The first pointer in the buffer
 710 * @bottom: One more than the last pointer
 711 * @height: the height this buffer is at
 712 * @sm: a pointer to a struct strip_mine
 713 *
 714 * Returns: errno
 715 */
 716
 717static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh,
 718		    struct buffer_head *bh, __be64 *top, __be64 *bottom,
 719		    unsigned int height, struct strip_mine *sm)
 720{
 721	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 722	struct gfs2_rgrp_list rlist;
 723	u64 bn, bstart;
 724	u32 blen, btotal;
 725	__be64 *p;
 726	unsigned int rg_blocks = 0;
 727	int metadata;
 728	unsigned int revokes = 0;
 729	int x;
 730	int error;
 731
 732	error = gfs2_rindex_update(sdp);
 
 733	if (error)
 734		return error;
 
 
 735
 736	if (!*top)
 737		sm->sm_first = 0;
 
 
 
 
 738
 739	if (height != sm->sm_height)
 740		return 0;
 
 
 
 
 741
 742	if (sm->sm_first) {
 743		top++;
 744		sm->sm_first = 0;
 745	}
 746
 747	metadata = (height != ip->i_height - 1);
 748	if (metadata)
 749		revokes = (height) ? sdp->sd_inptrs : sdp->sd_diptrs;
 750	else if (ip->i_depth)
 751		revokes = sdp->sd_inptrs;
 752
 753	memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
 754	bstart = 0;
 755	blen = 0;
 
 756
 757	for (p = top; p < bottom; p++) {
 758		if (!*p)
 759			continue;
 
 
 
 760
 761		bn = be64_to_cpu(*p);
 
 
 762
 763		if (bstart + blen == bn)
 764			blen++;
 765		else {
 766			if (bstart)
 767				gfs2_rlist_add(ip, &rlist, bstart);
 
 
 768
 769			bstart = bn;
 770			blen = 1;
 771		}
 772	}
 773
 774	if (bstart)
 775		gfs2_rlist_add(ip, &rlist, bstart);
 776	else
 777		goto out; /* Nothing to do */
 778
 779	gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE);
 
 780
 781	for (x = 0; x < rlist.rl_rgrps; x++) {
 782		struct gfs2_rgrpd *rgd;
 783		rgd = rlist.rl_ghs[x].gh_gl->gl_object;
 784		rg_blocks += rgd->rd_length;
 785	}
 786
 787	error = gfs2_glock_nq_m(rlist.rl_rgrps, rlist.rl_ghs);
 788	if (error)
 789		goto out_rlist;
 
 
 
 
 
 
 790
 791	if (gfs2_rs_active(&ip->i_res)) /* needs to be done with the rgrp glock held */
 792		gfs2_rs_deltree(&ip->i_res);
 793
 794	error = gfs2_trans_begin(sdp, rg_blocks + RES_DINODE +
 795				 RES_INDIRECT + RES_STATFS + RES_QUOTA,
 796				 revokes);
 797	if (error)
 798		goto out_rg_gunlock;
 
 
 
 
 
 
 
 799
 800	down_write(&ip->i_rw_mutex);
 
 
 801
 802	gfs2_trans_add_meta(ip->i_gl, dibh);
 803	gfs2_trans_add_meta(ip->i_gl, bh);
 
 
 
 
 
 
 804
 805	bstart = 0;
 806	blen = 0;
 807	btotal = 0;
 
 808
 809	for (p = top; p < bottom; p++) {
 810		if (!*p)
 811			continue;
 
 
 
 
 
 
 
 812
 813		bn = be64_to_cpu(*p);
 814
 815		if (bstart + blen == bn)
 816			blen++;
 817		else {
 818			if (bstart) {
 819				__gfs2_free_blocks(ip, bstart, blen, metadata);
 820				btotal += blen;
 821			}
 
 822
 823			bstart = bn;
 824			blen = 1;
 825		}
 826
 827		*p = 0;
 828		gfs2_add_inode_blocks(&ip->i_inode, -1);
 829	}
 830	if (bstart) {
 831		__gfs2_free_blocks(ip, bstart, blen, metadata);
 832		btotal += blen;
 833	}
 834
 835	gfs2_statfs_change(sdp, 0, +btotal, 0);
 836	gfs2_quota_change(ip, -(s64)btotal, ip->i_inode.i_uid,
 837			  ip->i_inode.i_gid);
 838
 839	ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
 840
 841	gfs2_dinode_out(ip, dibh->b_data);
 842
 843	up_write(&ip->i_rw_mutex);
 844
 
 845	gfs2_trans_end(sdp);
 
 
 
 
 
 
 846
 847out_rg_gunlock:
 848	gfs2_glock_dq_m(rlist.rl_rgrps, rlist.rl_ghs);
 849out_rlist:
 850	gfs2_rlist_free(&rlist);
 851out:
 852	return error;
 853}
 854
 855/**
 856 * recursive_scan - recursively scan through the end of a file
 857 * @ip: the inode
 858 * @dibh: the dinode buffer
 859 * @mp: the path through the metadata to the point to start
 860 * @height: the height the recursion is at
 861 * @block: the indirect block to look at
 862 * @first: 1 if this is the first block
 863 * @sm: data opaque to this function to pass to @bc
 864 *
 865 * When this is first called @height and @block should be zero and
 866 * @first should be 1.
 867 *
 868 * Returns: errno
 869 */
 870
 871static int recursive_scan(struct gfs2_inode *ip, struct buffer_head *dibh,
 872			  struct metapath *mp, unsigned int height,
 873			  u64 block, int first, struct strip_mine *sm)
 874{
 875	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 876	struct buffer_head *bh = NULL;
 877	__be64 *top, *bottom;
 878	u64 bn;
 879	int error;
 880	int mh_size = sizeof(struct gfs2_meta_header);
 881
 882	if (!height) {
 883		error = gfs2_meta_inode_buffer(ip, &bh);
 884		if (error)
 885			return error;
 886		dibh = bh;
 887
 888		top = (__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)) + mp->mp_list[0];
 889		bottom = (__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)) + sdp->sd_diptrs;
 890	} else {
 891		error = gfs2_meta_indirect_buffer(ip, height, block, &bh);
 892		if (error)
 893			return error;
 894
 895		top = (__be64 *)(bh->b_data + mh_size) +
 896				  (first ? mp->mp_list[height] : 0);
 
 897
 898		bottom = (__be64 *)(bh->b_data + mh_size) + sdp->sd_inptrs;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 899	}
 900
 901	error = do_strip(ip, dibh, bh, top, bottom, height, sm);
 902	if (error)
 903		goto out;
 
 
 
 
 
 
 
 904
 905	if (height < ip->i_height - 1) {
 
 
 
 
 906
 907		gfs2_metapath_ra(ip->i_gl, bh, top);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 908
 909		for (; top < bottom; top++, first = 0) {
 910			if (!*top)
 911				continue;
 912
 913			bn = be64_to_cpu(*top);
 
 914
 915			error = recursive_scan(ip, dibh, mp, height + 1, bn,
 916					       first, sm);
 917			if (error)
 918				break;
 919		}
 920	}
 921out:
 922	brelse(bh);
 923	return error;
 924}
 925
 
 
 
 
 926
 927/**
 928 * gfs2_block_truncate_page - Deal with zeroing out data for truncate
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 929 *
 930 * This is partly borrowed from ext3.
 931 */
 932static int gfs2_block_truncate_page(struct address_space *mapping, loff_t from)
 
 
 933{
 934	struct inode *inode = mapping->host;
 935	struct gfs2_inode *ip = GFS2_I(inode);
 936	unsigned long index = from >> PAGE_SHIFT;
 937	unsigned offset = from & (PAGE_SIZE-1);
 938	unsigned blocksize, iblock, length, pos;
 939	struct buffer_head *bh;
 940	struct page *page;
 941	int err;
 942
 943	page = find_or_create_page(mapping, index, GFP_NOFS);
 944	if (!page)
 945		return 0;
 
 946
 947	blocksize = inode->i_sb->s_blocksize;
 948	length = blocksize - (offset & (blocksize - 1));
 949	iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
 950
 951	if (!page_has_buffers(page))
 952		create_empty_buffers(page, blocksize, 0);
 953
 954	/* Find the buffer that contains "offset" */
 955	bh = page_buffers(page);
 956	pos = blocksize;
 957	while (offset >= pos) {
 958		bh = bh->b_this_page;
 959		iblock++;
 960		pos += blocksize;
 961	}
 
 
 962
 963	err = 0;
 
 
 
 
 
 
 
 
 
 
 964
 965	if (!buffer_mapped(bh)) {
 966		gfs2_block_map(inode, iblock, bh, 0);
 967		/* unmapped? It's a hole - nothing to do */
 968		if (!buffer_mapped(bh))
 969			goto unlock;
 970	}
 971
 972	/* Ok, it's mapped. Make sure it's up-to-date */
 973	if (PageUptodate(page))
 974		set_buffer_uptodate(bh);
 
 
 
 
 
 975
 976	if (!buffer_uptodate(bh)) {
 977		err = -EIO;
 978		ll_rw_block(REQ_OP_READ, 0, 1, &bh);
 979		wait_on_buffer(bh);
 980		/* Uhhuh. Read error. Complain and punt. */
 981		if (!buffer_uptodate(bh))
 982			goto unlock;
 983		err = 0;
 984	}
 985
 986	if (!gfs2_is_writeback(ip))
 987		gfs2_trans_add_data(ip->i_gl, bh);
 
 
 
 
 
 
 
 
 988
 989	zero_user(page, offset, length);
 990	mark_buffer_dirty(bh);
 991unlock:
 992	unlock_page(page);
 993	put_page(page);
 994	return err;
 995}
 996
 997#define GFS2_JTRUNC_REVOKES 8192
 998
 999/**
1000 * gfs2_journaled_truncate - Wrapper for truncate_pagecache for jdata files
1001 * @inode: The inode being truncated
1002 * @oldsize: The original (larger) size
1003 * @newsize: The new smaller size
1004 *
1005 * With jdata files, we have to journal a revoke for each block which is
1006 * truncated. As a result, we need to split this into separate transactions
1007 * if the number of pages being truncated gets too large.
1008 */
1009
1010static int gfs2_journaled_truncate(struct inode *inode, u64 oldsize, u64 newsize)
1011{
1012	struct gfs2_sbd *sdp = GFS2_SB(inode);
1013	u64 max_chunk = GFS2_JTRUNC_REVOKES * sdp->sd_vfs->s_blocksize;
1014	u64 chunk;
1015	int error;
1016
1017	while (oldsize != newsize) {
 
 
 
1018		chunk = oldsize - newsize;
1019		if (chunk > max_chunk)
1020			chunk = max_chunk;
 
 
 
 
 
1021		truncate_pagecache(inode, oldsize - chunk);
1022		oldsize -= chunk;
 
 
 
 
 
1023		gfs2_trans_end(sdp);
1024		error = gfs2_trans_begin(sdp, RES_DINODE, GFS2_JTRUNC_REVOKES);
1025		if (error)
1026			return error;
1027	}
1028
1029	return 0;
1030}
1031
1032static int trunc_start(struct inode *inode, u64 oldsize, u64 newsize)
1033{
1034	struct gfs2_inode *ip = GFS2_I(inode);
1035	struct gfs2_sbd *sdp = GFS2_SB(inode);
1036	struct address_space *mapping = inode->i_mapping;
1037	struct buffer_head *dibh;
1038	int journaled = gfs2_is_jdata(ip);
 
1039	int error;
1040
1041	if (journaled)
1042		error = gfs2_trans_begin(sdp, RES_DINODE + RES_JDATA, GFS2_JTRUNC_REVOKES);
1043	else
1044		error = gfs2_trans_begin(sdp, RES_DINODE, 0);
1045	if (error)
1046		return error;
1047
1048	error = gfs2_meta_inode_buffer(ip, &dibh);
1049	if (error)
1050		goto out;
1051
1052	gfs2_trans_add_meta(ip->i_gl, dibh);
1053
1054	if (gfs2_is_stuffed(ip)) {
1055		gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + newsize);
1056	} else {
1057		if (newsize & (u64)(sdp->sd_sb.sb_bsize - 1)) {
1058			error = gfs2_block_truncate_page(mapping, newsize);
 
 
 
1059			if (error)
1060				goto out_brelse;
1061		}
1062		ip->i_diskflags |= GFS2_DIF_TRUNC_IN_PROG;
1063	}
1064
1065	i_size_write(inode, newsize);
1066	ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
1067	gfs2_dinode_out(ip, dibh->b_data);
1068
1069	if (journaled)
1070		error = gfs2_journaled_truncate(inode, oldsize, newsize);
1071	else
1072		truncate_pagecache(inode, newsize);
1073
1074	if (error) {
1075		brelse(dibh);
1076		return error;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1077	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1078
1079out_brelse:
1080	brelse(dibh);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1081out:
1082	gfs2_trans_end(sdp);
1083	return error;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1084}
1085
1086static int trunc_dealloc(struct gfs2_inode *ip, u64 size)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1087{
1088	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1089	unsigned int height = ip->i_height;
1090	u64 lblock;
1091	struct metapath mp;
1092	int error;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1093
1094	if (!size)
1095		lblock = 0;
1096	else
1097		lblock = (size - 1) >> sdp->sd_sb.sb_bsize_shift;
 
 
 
 
 
 
 
 
 
1098
1099	find_metapath(sdp, lblock, &mp, ip->i_height);
1100	error = gfs2_rindex_update(sdp);
1101	if (error)
1102		return error;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1103
1104	error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
1105	if (error)
1106		return error;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1107
1108	while (height--) {
1109		struct strip_mine sm;
1110		sm.sm_first = !!size;
1111		sm.sm_height = height;
 
 
 
1112
1113		error = recursive_scan(ip, NULL, &mp, 0, 0, 1, &sm);
1114		if (error)
 
 
 
 
 
1115			break;
 
1116	}
1117
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1118	gfs2_quota_unhold(ip);
1119
1120	return error;
 
1121}
1122
1123static int trunc_end(struct gfs2_inode *ip)
1124{
1125	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1126	struct buffer_head *dibh;
1127	int error;
1128
1129	error = gfs2_trans_begin(sdp, RES_DINODE, 0);
1130	if (error)
1131		return error;
1132
1133	down_write(&ip->i_rw_mutex);
1134
1135	error = gfs2_meta_inode_buffer(ip, &dibh);
1136	if (error)
1137		goto out;
1138
1139	if (!i_size_read(&ip->i_inode)) {
1140		ip->i_height = 0;
1141		ip->i_goal = ip->i_no_addr;
1142		gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
1143		gfs2_ordered_del_inode(ip);
1144	}
1145	ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
1146	ip->i_diskflags &= ~GFS2_DIF_TRUNC_IN_PROG;
1147
1148	gfs2_trans_add_meta(ip->i_gl, dibh);
1149	gfs2_dinode_out(ip, dibh->b_data);
1150	brelse(dibh);
1151
1152out:
1153	up_write(&ip->i_rw_mutex);
1154	gfs2_trans_end(sdp);
1155	return error;
1156}
1157
1158/**
1159 * do_shrink - make a file smaller
1160 * @inode: the inode
1161 * @oldsize: the current inode size
1162 * @newsize: the size to make the file
1163 *
1164 * Called with an exclusive lock on @inode. The @size must
1165 * be equal to or smaller than the current inode size.
1166 *
1167 * Returns: errno
1168 */
1169
1170static int do_shrink(struct inode *inode, u64 oldsize, u64 newsize)
1171{
1172	struct gfs2_inode *ip = GFS2_I(inode);
1173	int error;
1174
1175	error = trunc_start(inode, oldsize, newsize);
1176	if (error < 0)
1177		return error;
1178	if (gfs2_is_stuffed(ip))
1179		return 0;
1180
1181	error = trunc_dealloc(ip, newsize);
1182	if (error == 0)
1183		error = trunc_end(ip);
1184
1185	return error;
1186}
1187
1188void gfs2_trim_blocks(struct inode *inode)
1189{
1190	u64 size = inode->i_size;
1191	int ret;
1192
1193	ret = do_shrink(inode, size, size);
1194	WARN_ON(ret != 0);
1195}
1196
1197/**
1198 * do_grow - Touch and update inode size
1199 * @inode: The inode
1200 * @size: The new size
1201 *
1202 * This function updates the timestamps on the inode and
1203 * may also increase the size of the inode. This function
1204 * must not be called with @size any smaller than the current
1205 * inode size.
1206 *
1207 * Although it is not strictly required to unstuff files here,
1208 * earlier versions of GFS2 have a bug in the stuffed file reading
1209 * code which will result in a buffer overrun if the size is larger
1210 * than the max stuffed file size. In order to prevent this from
1211 * occurring, such files are unstuffed, but in other cases we can
1212 * just update the inode size directly.
1213 *
1214 * Returns: 0 on success, or -ve on error
1215 */
1216
1217static int do_grow(struct inode *inode, u64 size)
1218{
1219	struct gfs2_inode *ip = GFS2_I(inode);
1220	struct gfs2_sbd *sdp = GFS2_SB(inode);
1221	struct gfs2_alloc_parms ap = { .target = 1, };
1222	struct buffer_head *dibh;
1223	int error;
1224	int unstuff = 0;
1225
1226	if (gfs2_is_stuffed(ip) &&
1227	    (size > (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)))) {
1228		error = gfs2_quota_lock_check(ip, &ap);
1229		if (error)
1230			return error;
1231
1232		error = gfs2_inplace_reserve(ip, &ap);
1233		if (error)
1234			goto do_grow_qunlock;
1235		unstuff = 1;
1236	}
1237
1238	error = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS + RES_RG_BIT +
 
 
1239				 (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF ?
1240				  0 : RES_QUOTA), 0);
1241	if (error)
1242		goto do_grow_release;
1243
1244	if (unstuff) {
1245		error = gfs2_unstuff_dinode(ip, NULL);
1246		if (error)
1247			goto do_end_trans;
1248	}
1249
1250	error = gfs2_meta_inode_buffer(ip, &dibh);
1251	if (error)
1252		goto do_end_trans;
1253
1254	i_size_write(inode, size);
1255	ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
1256	gfs2_trans_add_meta(ip->i_gl, dibh);
1257	gfs2_dinode_out(ip, dibh->b_data);
1258	brelse(dibh);
1259
1260do_end_trans:
1261	gfs2_trans_end(sdp);
1262do_grow_release:
1263	if (unstuff) {
1264		gfs2_inplace_release(ip);
1265do_grow_qunlock:
1266		gfs2_quota_unlock(ip);
1267	}
1268	return error;
1269}
1270
1271/**
1272 * gfs2_setattr_size - make a file a given size
1273 * @inode: the inode
1274 * @newsize: the size to make the file
1275 *
1276 * The file size can grow, shrink, or stay the same size. This
1277 * is called holding i_mutex and an exclusive glock on the inode
1278 * in question.
1279 *
1280 * Returns: errno
1281 */
1282
1283int gfs2_setattr_size(struct inode *inode, u64 newsize)
1284{
1285	struct gfs2_inode *ip = GFS2_I(inode);
1286	int ret;
1287	u64 oldsize;
1288
1289	BUG_ON(!S_ISREG(inode->i_mode));
1290
1291	ret = inode_newsize_ok(inode, newsize);
1292	if (ret)
1293		return ret;
1294
1295	inode_dio_wait(inode);
1296
1297	ret = gfs2_rsqa_alloc(ip);
1298	if (ret)
1299		goto out;
1300
1301	oldsize = inode->i_size;
1302	if (newsize >= oldsize) {
1303		ret = do_grow(inode, newsize);
1304		goto out;
1305	}
1306
1307	ret = do_shrink(inode, oldsize, newsize);
1308out:
1309	gfs2_rsqa_delete(ip, NULL);
1310	return ret;
1311}
1312
1313int gfs2_truncatei_resume(struct gfs2_inode *ip)
1314{
1315	int error;
1316	error = trunc_dealloc(ip, i_size_read(&ip->i_inode));
1317	if (!error)
1318		error = trunc_end(ip);
1319	return error;
1320}
1321
1322int gfs2_file_dealloc(struct gfs2_inode *ip)
1323{
1324	return trunc_dealloc(ip, 0);
1325}
1326
1327/**
1328 * gfs2_free_journal_extents - Free cached journal bmap info
1329 * @jd: The journal
1330 *
1331 */
1332
1333void gfs2_free_journal_extents(struct gfs2_jdesc *jd)
1334{
1335	struct gfs2_journal_extent *jext;
1336
1337	while(!list_empty(&jd->extent_list)) {
1338		jext = list_entry(jd->extent_list.next, struct gfs2_journal_extent, list);
1339		list_del(&jext->list);
1340		kfree(jext);
1341	}
1342}
1343
1344/**
1345 * gfs2_add_jextent - Add or merge a new extent to extent cache
1346 * @jd: The journal descriptor
1347 * @lblock: The logical block at start of new extent
1348 * @dblock: The physical block at start of new extent
1349 * @blocks: Size of extent in fs blocks
1350 *
1351 * Returns: 0 on success or -ENOMEM
1352 */
1353
1354static int gfs2_add_jextent(struct gfs2_jdesc *jd, u64 lblock, u64 dblock, u64 blocks)
1355{
1356	struct gfs2_journal_extent *jext;
1357
1358	if (!list_empty(&jd->extent_list)) {
1359		jext = list_entry(jd->extent_list.prev, struct gfs2_journal_extent, list);
1360		if ((jext->dblock + jext->blocks) == dblock) {
1361			jext->blocks += blocks;
1362			return 0;
1363		}
1364	}
1365
1366	jext = kzalloc(sizeof(struct gfs2_journal_extent), GFP_NOFS);
1367	if (jext == NULL)
1368		return -ENOMEM;
1369	jext->dblock = dblock;
1370	jext->lblock = lblock;
1371	jext->blocks = blocks;
1372	list_add_tail(&jext->list, &jd->extent_list);
1373	jd->nr_extents++;
1374	return 0;
1375}
1376
1377/**
1378 * gfs2_map_journal_extents - Cache journal bmap info
1379 * @sdp: The super block
1380 * @jd: The journal to map
1381 *
1382 * Create a reusable "extent" mapping from all logical
1383 * blocks to all physical blocks for the given journal.  This will save
1384 * us time when writing journal blocks.  Most journals will have only one
1385 * extent that maps all their logical blocks.  That's because gfs2.mkfs
1386 * arranges the journal blocks sequentially to maximize performance.
1387 * So the extent would map the first block for the entire file length.
1388 * However, gfs2_jadd can happen while file activity is happening, so
1389 * those journals may not be sequential.  Less likely is the case where
1390 * the users created their own journals by mounting the metafs and
1391 * laying it out.  But it's still possible.  These journals might have
1392 * several extents.
1393 *
1394 * Returns: 0 on success, or error on failure
1395 */
1396
1397int gfs2_map_journal_extents(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd)
1398{
1399	u64 lblock = 0;
1400	u64 lblock_stop;
1401	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
1402	struct buffer_head bh;
1403	unsigned int shift = sdp->sd_sb.sb_bsize_shift;
1404	u64 size;
1405	int rc;
 
1406
 
1407	lblock_stop = i_size_read(jd->jd_inode) >> shift;
1408	size = (lblock_stop - lblock) << shift;
1409	jd->nr_extents = 0;
1410	WARN_ON(!list_empty(&jd->extent_list));
1411
1412	do {
1413		bh.b_state = 0;
1414		bh.b_blocknr = 0;
1415		bh.b_size = size;
1416		rc = gfs2_block_map(jd->jd_inode, lblock, &bh, 0);
1417		if (rc || !buffer_mapped(&bh))
1418			goto fail;
1419		rc = gfs2_add_jextent(jd, lblock, bh.b_blocknr, bh.b_size >> shift);
1420		if (rc)
1421			goto fail;
1422		size -= bh.b_size;
1423		lblock += (bh.b_size >> ip->i_inode.i_blkbits);
1424	} while(size > 0);
1425
1426	fs_info(sdp, "journal %d mapped with %u extents\n", jd->jd_jid,
1427		jd->nr_extents);
 
1428	return 0;
1429
1430fail:
1431	fs_warn(sdp, "error %d mapping journal %u at offset %llu (extent %u)\n",
1432		rc, jd->jd_jid,
1433		(unsigned long long)(i_size_read(jd->jd_inode) - size),
1434		jd->nr_extents);
1435	fs_warn(sdp, "bmap=%d lblock=%llu block=%llu, state=0x%08lx, size=%llu\n",
1436		rc, (unsigned long long)lblock, (unsigned long long)bh.b_blocknr,
1437		bh.b_state, (unsigned long long)bh.b_size);
1438	gfs2_free_journal_extents(jd);
1439	return rc;
1440}
1441
1442/**
1443 * gfs2_write_alloc_required - figure out if a write will require an allocation
1444 * @ip: the file being written to
1445 * @offset: the offset to write to
1446 * @len: the number of bytes being written
1447 *
1448 * Returns: 1 if an alloc is required, 0 otherwise
1449 */
1450
1451int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
1452			      unsigned int len)
1453{
1454	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1455	struct buffer_head bh;
1456	unsigned int shift;
1457	u64 lblock, lblock_stop, size;
1458	u64 end_of_file;
1459
1460	if (!len)
1461		return 0;
1462
1463	if (gfs2_is_stuffed(ip)) {
1464		if (offset + len >
1465		    sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode))
1466			return 1;
1467		return 0;
1468	}
1469
1470	shift = sdp->sd_sb.sb_bsize_shift;
1471	BUG_ON(gfs2_is_dir(ip));
1472	end_of_file = (i_size_read(&ip->i_inode) + sdp->sd_sb.sb_bsize - 1) >> shift;
1473	lblock = offset >> shift;
1474	lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift;
1475	if (lblock_stop > end_of_file)
1476		return 1;
1477
1478	size = (lblock_stop - lblock) << shift;
1479	do {
1480		bh.b_state = 0;
1481		bh.b_size = size;
1482		gfs2_block_map(&ip->i_inode, lblock, &bh, 0);
1483		if (!buffer_mapped(&bh))
1484			return 1;
1485		size -= bh.b_size;
1486		lblock += (bh.b_size >> ip->i_inode.i_blkbits);
1487	} while(size > 0);
1488
1489	return 0;
1490}
1491