Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
   4 * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
 
 
 
 
   5 */
   6
   7#include <linux/spinlock.h>
   8#include <linux/completion.h>
   9#include <linux/buffer_head.h>
  10#include <linux/blkdev.h>
  11#include <linux/gfs2_ondisk.h>
  12#include <linux/crc32.h>
  13#include <linux/iomap.h>
  14#include <linux/ktime.h>
  15
  16#include "gfs2.h"
  17#include "incore.h"
  18#include "bmap.h"
  19#include "glock.h"
  20#include "inode.h"
  21#include "meta_io.h"
  22#include "quota.h"
  23#include "rgrp.h"
  24#include "log.h"
  25#include "super.h"
  26#include "trans.h"
  27#include "dir.h"
  28#include "util.h"
  29#include "aops.h"
  30#include "trace_gfs2.h"
  31
  32/* This doesn't need to be that large as max 64 bit pointers in a 4k
  33 * block is 512, so __u16 is fine for that. It saves stack space to
  34 * keep it small.
  35 */
  36struct metapath {
  37	struct buffer_head *mp_bh[GFS2_MAX_META_HEIGHT];
  38	__u16 mp_list[GFS2_MAX_META_HEIGHT];
  39	int mp_fheight; /* find_metapath height */
  40	int mp_aheight; /* actual height (lookup height) */
  41};
  42
  43static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length);
  44
  45/**
  46 * gfs2_unstuffer_page - unstuff a stuffed inode into a block cached by a page
  47 * @ip: the inode
  48 * @dibh: the dinode buffer
  49 * @block: the block number that was allocated
  50 * @page: The (optional) page. This is looked up if @page is NULL
  51 *
  52 * Returns: errno
  53 */
  54
  55static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
  56			       u64 block, struct page *page)
  57{
  58	struct inode *inode = &ip->i_inode;
 
 
 
 
 
 
 
 
 
  59
  60	if (!PageUptodate(page)) {
  61		void *kaddr = kmap(page);
  62		u64 dsize = i_size_read(inode);
  63 
 
 
 
  64		memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
  65		memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
  66		kunmap(page);
  67
  68		SetPageUptodate(page);
  69	}
  70
  71	if (gfs2_is_jdata(ip)) {
  72		struct buffer_head *bh;
  73
  74		if (!page_has_buffers(page))
  75			create_empty_buffers(page, BIT(inode->i_blkbits),
  76					     BIT(BH_Uptodate));
  77
  78		bh = page_buffers(page);
  79		if (!buffer_mapped(bh))
  80			map_bh(bh, inode->i_sb, block);
  81
  82		set_buffer_uptodate(bh);
 
  83		gfs2_trans_add_data(ip->i_gl, bh);
  84	} else {
  85		set_page_dirty(page);
  86		gfs2_ordered_add_inode(ip);
 
  87	}
  88
  89	return 0;
  90}
  91
  92static int __gfs2_unstuff_inode(struct gfs2_inode *ip, struct page *page)
 
 
 
 
 
 
 
 
 
 
 
  93{
  94	struct buffer_head *bh, *dibh;
  95	struct gfs2_dinode *di;
  96	u64 block = 0;
  97	int isdir = gfs2_is_dir(ip);
  98	int error;
  99
 
 
 100	error = gfs2_meta_inode_buffer(ip, &dibh);
 101	if (error)
 102		return error;
 103
 104	if (i_size_read(&ip->i_inode)) {
 105		/* Get a free block, fill it with the stuffed data,
 106		   and write it out to disk */
 107
 108		unsigned int n = 1;
 109		error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
 110		if (error)
 111			goto out_brelse;
 112		if (isdir) {
 113			gfs2_trans_remove_revoke(GFS2_SB(&ip->i_inode), block, 1);
 114			error = gfs2_dir_get_new_buffer(ip, block, &bh);
 115			if (error)
 116				goto out_brelse;
 117			gfs2_buffer_copy_tail(bh, sizeof(struct gfs2_meta_header),
 118					      dibh, sizeof(struct gfs2_dinode));
 119			brelse(bh);
 120		} else {
 121			error = gfs2_unstuffer_page(ip, dibh, block, page);
 122			if (error)
 123				goto out_brelse;
 124		}
 125	}
 126
 127	/*  Set up the pointer to the new block  */
 128
 129	gfs2_trans_add_meta(ip->i_gl, dibh);
 130	di = (struct gfs2_dinode *)dibh->b_data;
 131	gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
 132
 133	if (i_size_read(&ip->i_inode)) {
 134		*(__be64 *)(di + 1) = cpu_to_be64(block);
 135		gfs2_add_inode_blocks(&ip->i_inode, 1);
 136		di->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
 137	}
 138
 139	ip->i_height = 1;
 140	di->di_height = cpu_to_be16(1);
 141
 142out_brelse:
 143	brelse(dibh);
 144	return error;
 145}
 146
 147/**
 148 * gfs2_unstuff_dinode - Unstuff a dinode when the data has grown too big
 149 * @ip: The GFS2 inode to unstuff
 150 *
 151 * This routine unstuffs a dinode and returns it to a "normal" state such
 152 * that the height can be grown in the traditional way.
 153 *
 154 * Returns: errno
 155 */
 156
 157int gfs2_unstuff_dinode(struct gfs2_inode *ip)
 158{
 159	struct inode *inode = &ip->i_inode;
 160	struct page *page;
 161	int error;
 162
 163	down_write(&ip->i_rw_mutex);
 164	page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS);
 165	error = -ENOMEM;
 166	if (!page)
 167		goto out;
 168	error = __gfs2_unstuff_inode(ip, page);
 169	unlock_page(page);
 170	put_page(page);
 171out:
 172	up_write(&ip->i_rw_mutex);
 173	return error;
 174}
 175
 
 176/**
 177 * find_metapath - Find path through the metadata tree
 178 * @sdp: The superblock
 179 * @block: The disk block to look up
 180 * @mp: The metapath to return the result in
 
 181 * @height: The pre-calculated height of the metadata tree
 182 *
 183 *   This routine returns a struct metapath structure that defines a path
 184 *   through the metadata of inode "ip" to get to block "block".
 185 *
 186 *   Example:
 187 *   Given:  "ip" is a height 3 file, "offset" is 101342453, and this is a
 188 *   filesystem with a blocksize of 4096.
 189 *
 190 *   find_metapath() would return a struct metapath structure set to:
 191 *   mp_fheight = 3, mp_list[0] = 0, mp_list[1] = 48, and mp_list[2] = 165.
 
 192 *
 193 *   That means that in order to get to the block containing the byte at
 194 *   offset 101342453, we would load the indirect block pointed to by pointer
 195 *   0 in the dinode.  We would then load the indirect block pointed to by
 196 *   pointer 48 in that indirect block.  We would then load the data block
 197 *   pointed to by pointer 165 in that indirect block.
 198 *
 199 *             ----------------------------------------
 200 *             | Dinode |                             |
 201 *             |        |                            4|
 202 *             |        |0 1 2 3 4 5                 9|
 203 *             |        |                            6|
 204 *             ----------------------------------------
 205 *                       |
 206 *                       |
 207 *                       V
 208 *             ----------------------------------------
 209 *             | Indirect Block                       |
 210 *             |                                     5|
 211 *             |            4 4 4 4 4 5 5            1|
 212 *             |0           5 6 7 8 9 0 1            2|
 213 *             ----------------------------------------
 214 *                                |
 215 *                                |
 216 *                                V
 217 *             ----------------------------------------
 218 *             | Indirect Block                       |
 219 *             |                         1 1 1 1 1   5|
 220 *             |                         6 6 6 6 6   1|
 221 *             |0                        3 4 5 6 7   2|
 222 *             ----------------------------------------
 223 *                                           |
 224 *                                           |
 225 *                                           V
 226 *             ----------------------------------------
 227 *             | Data block containing offset         |
 228 *             |            101342453                 |
 229 *             |                                      |
 230 *             |                                      |
 231 *             ----------------------------------------
 232 *
 233 */
 234
 235static void find_metapath(const struct gfs2_sbd *sdp, u64 block,
 236			  struct metapath *mp, unsigned int height)
 237{
 238	unsigned int i;
 239
 240	mp->mp_fheight = height;
 241	for (i = height; i--;)
 242		mp->mp_list[i] = do_div(block, sdp->sd_inptrs);
 243}
 244
 245static inline unsigned int metapath_branch_start(const struct metapath *mp)
 246{
 247	if (mp->mp_list[0] == 0)
 248		return 2;
 249	return 1;
 250}
 251
 252/**
 253 * metaptr1 - Return the first possible metadata pointer in a metapath buffer
 254 * @height: The metadata height (0 = dinode)
 255 * @mp: The metapath
 256 */
 257static inline __be64 *metaptr1(unsigned int height, const struct metapath *mp)
 258{
 259	struct buffer_head *bh = mp->mp_bh[height];
 260	if (height == 0)
 261		return ((__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)));
 262	return ((__be64 *)(bh->b_data + sizeof(struct gfs2_meta_header)));
 263}
 264
 265/**
 266 * metapointer - Return pointer to start of metadata in a buffer
 267 * @height: The metadata height (0 = dinode)
 268 * @mp: The metapath
 269 *
 270 * Return a pointer to the block number of the next height of the metadata
 271 * tree given a buffer containing the pointer to the current height of the
 272 * metadata tree.
 273 */
 274
 275static inline __be64 *metapointer(unsigned int height, const struct metapath *mp)
 276{
 277	__be64 *p = metaptr1(height, mp);
 278	return p + mp->mp_list[height];
 279}
 280
 281static inline const __be64 *metaend(unsigned int height, const struct metapath *mp)
 282{
 283	const struct buffer_head *bh = mp->mp_bh[height];
 284	return (const __be64 *)(bh->b_data + bh->b_size);
 285}
 286
 287static void clone_metapath(struct metapath *clone, struct metapath *mp)
 288{
 289	unsigned int hgt;
 290
 291	*clone = *mp;
 292	for (hgt = 0; hgt < mp->mp_aheight; hgt++)
 293		get_bh(clone->mp_bh[hgt]);
 294}
 295
 296static void gfs2_metapath_ra(struct gfs2_glock *gl, __be64 *start, __be64 *end)
 297{
 298	const __be64 *t;
 299
 300	for (t = start; t < end; t++) {
 301		struct buffer_head *rabh;
 302
 303		if (!*t)
 304			continue;
 305
 306		rabh = gfs2_getbuf(gl, be64_to_cpu(*t), CREATE);
 307		if (trylock_buffer(rabh)) {
 308			if (!buffer_uptodate(rabh)) {
 309				rabh->b_end_io = end_buffer_read_sync;
 310				submit_bh(REQ_OP_READ | REQ_RAHEAD | REQ_META |
 311					  REQ_PRIO, rabh);
 
 312				continue;
 313			}
 314			unlock_buffer(rabh);
 315		}
 316		brelse(rabh);
 317	}
 318}
 319
 320static int __fillup_metapath(struct gfs2_inode *ip, struct metapath *mp,
 321			     unsigned int x, unsigned int h)
 322{
 323	for (; x < h; x++) {
 324		__be64 *ptr = metapointer(x, mp);
 325		u64 dblock = be64_to_cpu(*ptr);
 326		int ret;
 327
 328		if (!dblock)
 329			break;
 330		ret = gfs2_meta_buffer(ip, GFS2_METATYPE_IN, dblock, &mp->mp_bh[x + 1]);
 331		if (ret)
 332			return ret;
 333	}
 334	mp->mp_aheight = x + 1;
 335	return 0;
 336}
 337
 338/**
 339 * lookup_metapath - Walk the metadata tree to a specific point
 340 * @ip: The inode
 341 * @mp: The metapath
 342 *
 343 * Assumes that the inode's buffer has already been looked up and
 344 * hooked onto mp->mp_bh[0] and that the metapath has been initialised
 345 * by find_metapath().
 346 *
 347 * If this function encounters part of the tree which has not been
 348 * allocated, it returns the current height of the tree at the point
 349 * at which it found the unallocated block. Blocks which are found are
 350 * added to the mp->mp_bh[] list.
 351 *
 352 * Returns: error
 353 */
 354
 355static int lookup_metapath(struct gfs2_inode *ip, struct metapath *mp)
 356{
 357	return __fillup_metapath(ip, mp, 0, ip->i_height - 1);
 358}
 359
 360/**
 361 * fillup_metapath - fill up buffers for the metadata path to a specific height
 362 * @ip: The inode
 363 * @mp: The metapath
 364 * @h: The height to which it should be mapped
 365 *
 366 * Similar to lookup_metapath, but does lookups for a range of heights
 367 *
 368 * Returns: error or the number of buffers filled
 369 */
 370
 371static int fillup_metapath(struct gfs2_inode *ip, struct metapath *mp, int h)
 372{
 373	unsigned int x = 0;
 374	int ret;
 375
 376	if (h) {
 377		/* find the first buffer we need to look up. */
 378		for (x = h - 1; x > 0; x--) {
 379			if (mp->mp_bh[x])
 380				break;
 381		}
 382	}
 383	ret = __fillup_metapath(ip, mp, x, h);
 384	if (ret)
 385		return ret;
 386	return mp->mp_aheight - x - 1;
 387}
 388
 389static sector_t metapath_to_block(struct gfs2_sbd *sdp, struct metapath *mp)
 390{
 391	sector_t factor = 1, block = 0;
 392	int hgt;
 393
 394	for (hgt = mp->mp_fheight - 1; hgt >= 0; hgt--) {
 395		if (hgt < mp->mp_aheight)
 396			block += mp->mp_list[hgt] * factor;
 397		factor *= sdp->sd_inptrs;
 398	}
 399	return block;
 400}
 401
 402static void release_metapath(struct metapath *mp)
 403{
 404	int i;
 405
 406	for (i = 0; i < GFS2_MAX_META_HEIGHT; i++) {
 407		if (mp->mp_bh[i] == NULL)
 408			break;
 409		brelse(mp->mp_bh[i]);
 410		mp->mp_bh[i] = NULL;
 411	}
 412}
 413
 414/**
 415 * gfs2_extent_length - Returns length of an extent of blocks
 416 * @bh: The metadata block
 417 * @ptr: Current position in @bh
 418 * @limit: Max extent length to return
 
 419 * @eob: Set to 1 if we hit "end of block"
 420 *
 
 
 
 
 421 * Returns: The length of the extent (minimum of one block)
 422 */
 423
 424static inline unsigned int gfs2_extent_length(struct buffer_head *bh, __be64 *ptr, size_t limit, int *eob)
 425{
 426	const __be64 *end = (__be64 *)(bh->b_data + bh->b_size);
 427	const __be64 *first = ptr;
 428	u64 d = be64_to_cpu(*ptr);
 429
 430	*eob = 0;
 431	do {
 432		ptr++;
 433		if (ptr >= end)
 434			break;
 435		d++;
 
 
 
 436	} while(be64_to_cpu(*ptr) == d);
 437	if (ptr >= end)
 438		*eob = 1;
 439	return ptr - first;
 440}
 441
 442enum walker_status { WALK_STOP, WALK_FOLLOW, WALK_CONTINUE };
 443
 444/*
 445 * gfs2_metadata_walker - walk an indirect block
 446 * @mp: Metapath to indirect block
 447 * @ptrs: Number of pointers to look at
 448 *
 449 * When returning WALK_FOLLOW, the walker must update @mp to point at the right
 450 * indirect block to follow.
 451 */
 452typedef enum walker_status (*gfs2_metadata_walker)(struct metapath *mp,
 453						   unsigned int ptrs);
 454
 455/*
 456 * gfs2_walk_metadata - walk a tree of indirect blocks
 457 * @inode: The inode
 458 * @mp: Starting point of walk
 459 * @max_len: Maximum number of blocks to walk
 460 * @walker: Called during the walk
 461 *
 462 * Returns 1 if the walk was stopped by @walker, 0 if we went past @max_len or
 463 * past the end of metadata, and a negative error code otherwise.
 464 */
 465
 466static int gfs2_walk_metadata(struct inode *inode, struct metapath *mp,
 467		u64 max_len, gfs2_metadata_walker walker)
 468{
 469	struct gfs2_inode *ip = GFS2_I(inode);
 470	struct gfs2_sbd *sdp = GFS2_SB(inode);
 471	u64 factor = 1;
 472	unsigned int hgt;
 473	int ret;
 474
 475	/*
 476	 * The walk starts in the lowest allocated indirect block, which may be
 477	 * before the position indicated by @mp.  Adjust @max_len accordingly
 478	 * to avoid a short walk.
 479	 */
 480	for (hgt = mp->mp_fheight - 1; hgt >= mp->mp_aheight; hgt--) {
 481		max_len += mp->mp_list[hgt] * factor;
 482		mp->mp_list[hgt] = 0;
 483		factor *= sdp->sd_inptrs;
 484	}
 485
 486	for (;;) {
 487		u16 start = mp->mp_list[hgt];
 488		enum walker_status status;
 489		unsigned int ptrs;
 490		u64 len;
 491
 492		/* Walk indirect block. */
 493		ptrs = (hgt >= 1 ? sdp->sd_inptrs : sdp->sd_diptrs) - start;
 494		len = ptrs * factor;
 495		if (len > max_len)
 496			ptrs = DIV_ROUND_UP_ULL(max_len, factor);
 497		status = walker(mp, ptrs);
 498		switch (status) {
 499		case WALK_STOP:
 500			return 1;
 501		case WALK_FOLLOW:
 502			BUG_ON(mp->mp_aheight == mp->mp_fheight);
 503			ptrs = mp->mp_list[hgt] - start;
 504			len = ptrs * factor;
 505			break;
 506		case WALK_CONTINUE:
 507			break;
 508		}
 509		if (len >= max_len)
 510			break;
 511		max_len -= len;
 512		if (status == WALK_FOLLOW)
 513			goto fill_up_metapath;
 514
 515lower_metapath:
 516		/* Decrease height of metapath. */
 517		brelse(mp->mp_bh[hgt]);
 518		mp->mp_bh[hgt] = NULL;
 519		mp->mp_list[hgt] = 0;
 520		if (!hgt)
 521			break;
 522		hgt--;
 523		factor *= sdp->sd_inptrs;
 524
 525		/* Advance in metadata tree. */
 526		(mp->mp_list[hgt])++;
 527		if (hgt) {
 528			if (mp->mp_list[hgt] >= sdp->sd_inptrs)
 529				goto lower_metapath;
 530		} else {
 531			if (mp->mp_list[hgt] >= sdp->sd_diptrs)
 532				break;
 533		}
 534
 535fill_up_metapath:
 536		/* Increase height of metapath. */
 537		ret = fillup_metapath(ip, mp, ip->i_height - 1);
 538		if (ret < 0)
 539			return ret;
 540		hgt += ret;
 541		for (; ret; ret--)
 542			do_div(factor, sdp->sd_inptrs);
 543		mp->mp_aheight = hgt + 1;
 544	}
 545	return 0;
 546}
 547
 548static enum walker_status gfs2_hole_walker(struct metapath *mp,
 549					   unsigned int ptrs)
 550{
 551	const __be64 *start, *ptr, *end;
 552	unsigned int hgt;
 553
 554	hgt = mp->mp_aheight - 1;
 555	start = metapointer(hgt, mp);
 556	end = start + ptrs;
 557
 558	for (ptr = start; ptr < end; ptr++) {
 559		if (*ptr) {
 560			mp->mp_list[hgt] += ptr - start;
 561			if (mp->mp_aheight == mp->mp_fheight)
 562				return WALK_STOP;
 563			return WALK_FOLLOW;
 564		}
 565	}
 566	return WALK_CONTINUE;
 567}
 568
 569/**
 570 * gfs2_hole_size - figure out the size of a hole
 571 * @inode: The inode
 572 * @lblock: The logical starting block number
 573 * @len: How far to look (in blocks)
 574 * @mp: The metapath at lblock
 575 * @iomap: The iomap to store the hole size in
 576 *
 577 * This function modifies @mp.
 578 *
 579 * Returns: errno on error
 580 */
 581static int gfs2_hole_size(struct inode *inode, sector_t lblock, u64 len,
 582			  struct metapath *mp, struct iomap *iomap)
 583{
 584	struct metapath clone;
 585	u64 hole_size;
 586	int ret;
 587
 588	clone_metapath(&clone, mp);
 589	ret = gfs2_walk_metadata(inode, &clone, len, gfs2_hole_walker);
 590	if (ret < 0)
 591		goto out;
 592
 593	if (ret == 1)
 594		hole_size = metapath_to_block(GFS2_SB(inode), &clone) - lblock;
 595	else
 596		hole_size = len;
 597	iomap->length = hole_size << inode->i_blkbits;
 598	ret = 0;
 599
 600out:
 601	release_metapath(&clone);
 602	return ret;
 603}
 604
 605static inline void gfs2_indirect_init(struct metapath *mp,
 606				      struct gfs2_glock *gl, unsigned int i,
 607				      unsigned offset, u64 bn)
 608{
 609	__be64 *ptr = (__be64 *)(mp->mp_bh[i - 1]->b_data +
 610		       ((i > 1) ? sizeof(struct gfs2_meta_header) :
 611				 sizeof(struct gfs2_dinode)));
 612	BUG_ON(i < 1);
 613	BUG_ON(mp->mp_bh[i] != NULL);
 614	mp->mp_bh[i] = gfs2_meta_new(gl, bn);
 615	gfs2_trans_add_meta(gl, mp->mp_bh[i]);
 616	gfs2_metatype_set(mp->mp_bh[i], GFS2_METATYPE_IN, GFS2_FORMAT_IN);
 617	gfs2_buffer_clear_tail(mp->mp_bh[i], sizeof(struct gfs2_meta_header));
 618	ptr += offset;
 619	*ptr = cpu_to_be64(bn);
 
 620}
 621
 622enum alloc_state {
 623	ALLOC_DATA = 0,
 624	ALLOC_GROW_DEPTH = 1,
 625	ALLOC_GROW_HEIGHT = 2,
 626	/* ALLOC_UNSTUFF = 3,   TBD and rather complicated */
 627};
 628
 629/**
 630 * __gfs2_iomap_alloc - Build a metadata tree of the requested height
 631 * @inode: The GFS2 inode
 632 * @iomap: The iomap structure
 
 
 633 * @mp: The metapath, with proper height information calculated
 
 
 
 634 *
 635 * In this routine we may have to alloc:
 636 *   i) Indirect blocks to grow the metadata tree height
 637 *  ii) Indirect blocks to fill in lower part of the metadata tree
 638 * iii) Data blocks
 639 *
 640 * This function is called after __gfs2_iomap_get, which works out the
 641 * total number of blocks which we need via gfs2_alloc_size.
 642 *
 643 * We then do the actual allocation asking for an extent at a time (if
 644 * enough contiguous free blocks are available, there will only be one
 645 * allocation request per call) and uses the state machine to initialise
 646 * the blocks in order.
 647 *
 648 * Right now, this function will allocate at most one indirect block
 649 * worth of data -- with a default block size of 4K, that's slightly
 650 * less than 2M.  If this limitation is ever removed to allow huge
 651 * allocations, we would probably still want to limit the iomap size we
 652 * return to avoid stalling other tasks during huge writes; the next
 653 * iomap iteration would then find the blocks already allocated.
 654 *
 655 * Returns: errno on error
 656 */
 657
 658static int __gfs2_iomap_alloc(struct inode *inode, struct iomap *iomap,
 659			      struct metapath *mp)
 660{
 661	struct gfs2_inode *ip = GFS2_I(inode);
 662	struct gfs2_sbd *sdp = GFS2_SB(inode);
 663	struct buffer_head *dibh = mp->mp_bh[0];
 664	u64 bn;
 665	unsigned n, i, blks, alloced = 0, iblks = 0, branch_start = 0;
 666	size_t dblks = iomap->length >> inode->i_blkbits;
 
 667	const unsigned end_of_metadata = mp->mp_fheight - 1;
 668	int ret;
 669	enum alloc_state state;
 670	__be64 *ptr;
 671	__be64 zero_bn = 0;
 
 672
 673	BUG_ON(mp->mp_aheight < 1);
 674	BUG_ON(dibh == NULL);
 675	BUG_ON(dblks < 1);
 676
 677	gfs2_trans_add_meta(ip->i_gl, dibh);
 678
 679	down_write(&ip->i_rw_mutex);
 680
 681	if (mp->mp_fheight == mp->mp_aheight) {
 682		/* Bottom indirect block exists */
 
 
 
 
 
 
 
 
 683		state = ALLOC_DATA;
 684	} else {
 685		/* Need to allocate indirect blocks */
 
 
 
 
 686		if (mp->mp_fheight == ip->i_height) {
 687			/* Writing into existing tree, extend tree down */
 688			iblks = mp->mp_fheight - mp->mp_aheight;
 689			state = ALLOC_GROW_DEPTH;
 690		} else {
 691			/* Building up tree height */
 692			state = ALLOC_GROW_HEIGHT;
 693			iblks = mp->mp_fheight - ip->i_height;
 694			branch_start = metapath_branch_start(mp);
 695			iblks += (mp->mp_fheight - branch_start);
 696		}
 697	}
 698
 699	/* start of the second part of the function (state machine) */
 700
 701	blks = dblks + iblks;
 702	i = mp->mp_aheight;
 703	do {
 
 704		n = blks - alloced;
 705		ret = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL);
 706		if (ret)
 707			goto out;
 708		alloced += n;
 709		if (state != ALLOC_DATA || gfs2_is_jdata(ip))
 710			gfs2_trans_remove_revoke(sdp, bn, n);
 711		switch (state) {
 712		/* Growing height of tree */
 713		case ALLOC_GROW_HEIGHT:
 714			if (i == 1) {
 715				ptr = (__be64 *)(dibh->b_data +
 716						 sizeof(struct gfs2_dinode));
 717				zero_bn = *ptr;
 718			}
 719			for (; i - 1 < mp->mp_fheight - ip->i_height && n > 0;
 720			     i++, n--)
 721				gfs2_indirect_init(mp, ip->i_gl, i, 0, bn++);
 722			if (i - 1 == mp->mp_fheight - ip->i_height) {
 723				i--;
 724				gfs2_buffer_copy_tail(mp->mp_bh[i],
 725						sizeof(struct gfs2_meta_header),
 726						dibh, sizeof(struct gfs2_dinode));
 727				gfs2_buffer_clear_tail(dibh,
 728						sizeof(struct gfs2_dinode) +
 729						sizeof(__be64));
 730				ptr = (__be64 *)(mp->mp_bh[i]->b_data +
 731					sizeof(struct gfs2_meta_header));
 732				*ptr = zero_bn;
 733				state = ALLOC_GROW_DEPTH;
 734				for(i = branch_start; i < mp->mp_fheight; i++) {
 735					if (mp->mp_bh[i] == NULL)
 736						break;
 737					brelse(mp->mp_bh[i]);
 738					mp->mp_bh[i] = NULL;
 739				}
 740				i = branch_start;
 741			}
 742			if (n == 0)
 743				break;
 744			fallthrough;	/* To branching from existing tree */
 745		case ALLOC_GROW_DEPTH:
 746			if (i > 1 && i < mp->mp_fheight)
 747				gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[i-1]);
 748			for (; i < mp->mp_fheight && n > 0; i++, n--)
 749				gfs2_indirect_init(mp, ip->i_gl, i,
 750						   mp->mp_list[i-1], bn++);
 751			if (i == mp->mp_fheight)
 752				state = ALLOC_DATA;
 753			if (n == 0)
 754				break;
 755			fallthrough;	/* To tree complete, adding data blocks */
 756		case ALLOC_DATA:
 757			BUG_ON(n > dblks);
 758			BUG_ON(mp->mp_bh[end_of_metadata] == NULL);
 759			gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[end_of_metadata]);
 760			dblks = n;
 761			ptr = metapointer(end_of_metadata, mp);
 762			iomap->addr = bn << inode->i_blkbits;
 763			iomap->flags |= IOMAP_F_MERGED | IOMAP_F_NEW;
 764			while (n-- > 0)
 765				*ptr++ = cpu_to_be64(bn++);
 766			break;
 767		}
 768	} while (iomap->addr == IOMAP_NULL_ADDR);
 769
 770	iomap->type = IOMAP_MAPPED;
 771	iomap->length = (u64)dblks << inode->i_blkbits;
 772	ip->i_height = mp->mp_fheight;
 773	gfs2_add_inode_blocks(&ip->i_inode, alloced);
 774	gfs2_dinode_out(ip, dibh->b_data);
 775out:
 776	up_write(&ip->i_rw_mutex);
 777	return ret;
 778}
 779
 780#define IOMAP_F_GFS2_BOUNDARY IOMAP_F_PRIVATE
 781
 782/**
 783 * gfs2_alloc_size - Compute the maximum allocation size
 784 * @inode: The inode
 
 785 * @mp: The metapath
 786 * @size: Requested size in blocks
 787 *
 788 * Compute the maximum size of the next allocation at @mp.
 789 *
 790 * Returns: size in blocks
 791 */
 792static u64 gfs2_alloc_size(struct inode *inode, struct metapath *mp, u64 size)
 793{
 794	struct gfs2_inode *ip = GFS2_I(inode);
 795	struct gfs2_sbd *sdp = GFS2_SB(inode);
 796	const __be64 *first, *ptr, *end;
 797
 798	/*
 799	 * For writes to stuffed files, this function is called twice via
 800	 * __gfs2_iomap_get, before and after unstuffing. The size we return the
 801	 * first time needs to be large enough to get the reservation and
 802	 * allocation sizes right.  The size we return the second time must
 803	 * be exact or else __gfs2_iomap_alloc won't do the right thing.
 804	 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 805
 806	if (gfs2_is_stuffed(ip) || mp->mp_fheight != mp->mp_aheight) {
 807		unsigned int maxsize = mp->mp_fheight > 1 ?
 808			sdp->sd_inptrs : sdp->sd_diptrs;
 809		maxsize -= mp->mp_list[mp->mp_fheight - 1];
 810		if (size > maxsize)
 811			size = maxsize;
 812		return size;
 813	}
 
 
 814
 815	first = metapointer(ip->i_height - 1, mp);
 816	end = metaend(ip->i_height - 1, mp);
 817	if (end - first > size)
 818		end = first + size;
 819	for (ptr = first; ptr < end; ptr++) {
 820		if (*ptr)
 821			break;
 822	}
 823	return ptr - first;
 
 824}
 825
 826/**
 827 * __gfs2_iomap_get - Map blocks from an inode to disk blocks
 828 * @inode: The inode
 829 * @pos: Starting position in bytes
 830 * @length: Length to map, in bytes
 831 * @flags: iomap flags
 832 * @iomap: The iomap structure
 833 * @mp: The metapath
 834 *
 835 * Returns: errno
 836 */
 837static int __gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
 838			    unsigned flags, struct iomap *iomap,
 839			    struct metapath *mp)
 840{
 841	struct gfs2_inode *ip = GFS2_I(inode);
 842	struct gfs2_sbd *sdp = GFS2_SB(inode);
 843	loff_t size = i_size_read(inode);
 
 
 844	__be64 *ptr;
 845	sector_t lblock;
 846	sector_t lblock_stop;
 847	int ret;
 848	int eob;
 849	u64 len;
 850	struct buffer_head *dibh = NULL, *bh;
 851	u8 height;
 852
 853	if (!length)
 854		return -EINVAL;
 855
 856	down_read(&ip->i_rw_mutex);
 857
 858	ret = gfs2_meta_inode_buffer(ip, &dibh);
 859	if (ret)
 860		goto unlock;
 861	mp->mp_bh[0] = dibh;
 862
 863	if (gfs2_is_stuffed(ip)) {
 864		if (flags & IOMAP_WRITE) {
 865			loff_t max_size = gfs2_max_stuffed_size(ip);
 866
 867			if (pos + length > max_size)
 868				goto unstuff;
 869			iomap->length = max_size;
 870		} else {
 871			if (pos >= size) {
 872				if (flags & IOMAP_REPORT) {
 873					ret = -ENOENT;
 874					goto unlock;
 875				} else {
 876					iomap->offset = pos;
 877					iomap->length = length;
 878					goto hole_found;
 879				}
 880			}
 881			iomap->length = size;
 882		}
 883		iomap->addr = (ip->i_no_addr << inode->i_blkbits) +
 884			      sizeof(struct gfs2_dinode);
 885		iomap->type = IOMAP_INLINE;
 886		iomap->inline_data = dibh->b_data + sizeof(struct gfs2_dinode);
 887		goto out;
 888	}
 889
 890unstuff:
 891	lblock = pos >> inode->i_blkbits;
 
 
 892	iomap->offset = lblock << inode->i_blkbits;
 893	lblock_stop = (pos + length - 1) >> inode->i_blkbits;
 894	len = lblock_stop - lblock + 1;
 895	iomap->length = len << inode->i_blkbits;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 896
 897	height = ip->i_height;
 898	while ((lblock + 1) * sdp->sd_sb.sb_bsize > sdp->sd_heightsize[height])
 899		height++;
 900	find_metapath(sdp, lblock, mp, height);
 901	if (height > ip->i_height || gfs2_is_stuffed(ip))
 902		goto do_alloc;
 903
 904	ret = lookup_metapath(ip, mp);
 905	if (ret)
 906		goto unlock;
 907
 908	if (mp->mp_aheight != ip->i_height)
 909		goto do_alloc;
 910
 911	ptr = metapointer(ip->i_height - 1, mp);
 912	if (*ptr == 0)
 913		goto do_alloc;
 914
 915	bh = mp->mp_bh[ip->i_height - 1];
 916	len = gfs2_extent_length(bh, ptr, len, &eob);
 917
 918	iomap->addr = be64_to_cpu(*ptr) << inode->i_blkbits;
 919	iomap->length = len << inode->i_blkbits;
 920	iomap->type = IOMAP_MAPPED;
 921	iomap->flags |= IOMAP_F_MERGED;
 
 
 
 922	if (eob)
 923		iomap->flags |= IOMAP_F_GFS2_BOUNDARY;
 
 924
 
 
 
 925out:
 926	iomap->bdev = inode->i_sb->s_bdev;
 927unlock:
 928	up_read(&ip->i_rw_mutex);
 929	return ret;
 930
 931do_alloc:
 932	if (flags & IOMAP_REPORT) {
 
 
 
 933		if (pos >= size)
 934			ret = -ENOENT;
 935		else if (height == ip->i_height)
 936			ret = gfs2_hole_size(inode, lblock, len, mp, iomap);
 937		else
 938			iomap->length = size - iomap->offset;
 939	} else if (flags & IOMAP_WRITE) {
 940		u64 alloc_size;
 941
 942		if (flags & IOMAP_DIRECT)
 943			goto out;  /* (see gfs2_file_direct_write) */
 944
 945		len = gfs2_alloc_size(inode, mp, len);
 946		alloc_size = len << inode->i_blkbits;
 947		if (alloc_size < iomap->length)
 948			iomap->length = alloc_size;
 949	} else {
 950		if (pos < size && height == ip->i_height)
 951			ret = gfs2_hole_size(inode, lblock, len, mp, iomap);
 952	}
 953hole_found:
 954	iomap->addr = IOMAP_NULL_ADDR;
 955	iomap->type = IOMAP_HOLE;
 956	goto out;
 957}
 958
 959static int gfs2_iomap_page_prepare(struct inode *inode, loff_t pos,
 960				   unsigned len)
 961{
 962	unsigned int blockmask = i_blocksize(inode) - 1;
 963	struct gfs2_sbd *sdp = GFS2_SB(inode);
 964	unsigned int blocks;
 965
 966	blocks = ((pos & blockmask) + len + blockmask) >> inode->i_blkbits;
 967	return gfs2_trans_begin(sdp, RES_DINODE + blocks, 0);
 968}
 969
 970static void gfs2_iomap_page_done(struct inode *inode, loff_t pos,
 971				 unsigned copied, struct page *page)
 972{
 973	struct gfs2_trans *tr = current->journal_info;
 974	struct gfs2_inode *ip = GFS2_I(inode);
 975	struct gfs2_sbd *sdp = GFS2_SB(inode);
 976
 977	if (page && !gfs2_is_stuffed(ip))
 978		gfs2_page_add_databufs(ip, page, offset_in_page(pos), copied);
 979
 980	if (tr->tr_num_buf_new)
 981		__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
 982
 983	gfs2_trans_end(sdp);
 984}
 985
 986static const struct iomap_page_ops gfs2_iomap_page_ops = {
 987	.page_prepare = gfs2_iomap_page_prepare,
 988	.page_done = gfs2_iomap_page_done,
 989};
 990
 991static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
 992				  loff_t length, unsigned flags,
 993				  struct iomap *iomap,
 994				  struct metapath *mp)
 995{
 996	struct gfs2_inode *ip = GFS2_I(inode);
 997	struct gfs2_sbd *sdp = GFS2_SB(inode);
 998	bool unstuff;
 999	int ret;
1000
1001	unstuff = gfs2_is_stuffed(ip) &&
1002		  pos + length > gfs2_max_stuffed_size(ip);
1003
1004	if (unstuff || iomap->type == IOMAP_HOLE) {
1005		unsigned int data_blocks, ind_blocks;
1006		struct gfs2_alloc_parms ap = {};
1007		unsigned int rblocks;
1008		struct gfs2_trans *tr;
1009
1010		gfs2_write_calc_reserv(ip, iomap->length, &data_blocks,
1011				       &ind_blocks);
1012		ap.target = data_blocks + ind_blocks;
1013		ret = gfs2_quota_lock_check(ip, &ap);
1014		if (ret)
1015			return ret;
1016
1017		ret = gfs2_inplace_reserve(ip, &ap);
1018		if (ret)
1019			goto out_qunlock;
1020
1021		rblocks = RES_DINODE + ind_blocks;
1022		if (gfs2_is_jdata(ip))
1023			rblocks += data_blocks;
1024		if (ind_blocks || data_blocks)
1025			rblocks += RES_STATFS + RES_QUOTA;
1026		if (inode == sdp->sd_rindex)
1027			rblocks += 2 * RES_STATFS;
1028		rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks);
1029
1030		ret = gfs2_trans_begin(sdp, rblocks,
1031				       iomap->length >> inode->i_blkbits);
1032		if (ret)
1033			goto out_trans_fail;
1034
1035		if (unstuff) {
1036			ret = gfs2_unstuff_dinode(ip);
1037			if (ret)
1038				goto out_trans_end;
1039			release_metapath(mp);
1040			ret = __gfs2_iomap_get(inode, iomap->offset,
1041					       iomap->length, flags, iomap, mp);
1042			if (ret)
1043				goto out_trans_end;
1044		}
1045
1046		if (iomap->type == IOMAP_HOLE) {
1047			ret = __gfs2_iomap_alloc(inode, iomap, mp);
1048			if (ret) {
1049				gfs2_trans_end(sdp);
1050				gfs2_inplace_release(ip);
1051				punch_hole(ip, iomap->offset, iomap->length);
1052				goto out_qunlock;
1053			}
1054		}
1055
1056		tr = current->journal_info;
1057		if (tr->tr_num_buf_new)
1058			__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1059
1060		gfs2_trans_end(sdp);
1061	}
1062
1063	if (gfs2_is_stuffed(ip) || gfs2_is_jdata(ip))
1064		iomap->page_ops = &gfs2_iomap_page_ops;
1065	return 0;
1066
1067out_trans_end:
1068	gfs2_trans_end(sdp);
1069out_trans_fail:
1070	gfs2_inplace_release(ip);
1071out_qunlock:
1072	gfs2_quota_unlock(ip);
1073	return ret;
1074}
1075
1076static int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
1077			    unsigned flags, struct iomap *iomap,
1078			    struct iomap *srcmap)
1079{
1080	struct gfs2_inode *ip = GFS2_I(inode);
1081	struct metapath mp = { .mp_aheight = 1, };
1082	int ret;
1083
1084	if (gfs2_is_jdata(ip))
1085		iomap->flags |= IOMAP_F_BUFFER_HEAD;
1086
1087	trace_gfs2_iomap_start(ip, pos, length, flags);
1088	ret = __gfs2_iomap_get(inode, pos, length, flags, iomap, &mp);
1089	if (ret)
1090		goto out_unlock;
1091
1092	switch(flags & (IOMAP_WRITE | IOMAP_ZERO)) {
1093	case IOMAP_WRITE:
1094		if (flags & IOMAP_DIRECT) {
1095			/*
1096			 * Silently fall back to buffered I/O for stuffed files
1097			 * or if we've got a hole (see gfs2_file_direct_write).
1098			 */
1099			if (iomap->type != IOMAP_MAPPED)
1100				ret = -ENOTBLK;
1101			goto out_unlock;
1102		}
1103		break;
1104	case IOMAP_ZERO:
1105		if (iomap->type == IOMAP_HOLE)
1106			goto out_unlock;
1107		break;
1108	default:
1109		goto out_unlock;
1110	}
1111
1112	ret = gfs2_iomap_begin_write(inode, pos, length, flags, iomap, &mp);
1113
1114out_unlock:
1115	release_metapath(&mp);
1116	trace_gfs2_iomap_end(ip, iomap, ret);
1117	return ret;
1118}
1119
1120static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length,
1121			  ssize_t written, unsigned flags, struct iomap *iomap)
1122{
1123	struct gfs2_inode *ip = GFS2_I(inode);
1124	struct gfs2_sbd *sdp = GFS2_SB(inode);
1125
1126	switch (flags & (IOMAP_WRITE | IOMAP_ZERO)) {
1127	case IOMAP_WRITE:
1128		if (flags & IOMAP_DIRECT)
1129			return 0;
1130		break;
1131	case IOMAP_ZERO:
1132		 if (iomap->type == IOMAP_HOLE)
1133			 return 0;
1134		 break;
1135	default:
1136		 return 0;
1137	}
1138
1139	if (!gfs2_is_stuffed(ip))
1140		gfs2_ordered_add_inode(ip);
1141
1142	if (inode == sdp->sd_rindex)
1143		adjust_fs_space(inode);
1144
1145	gfs2_inplace_release(ip);
1146
1147	if (ip->i_qadata && ip->i_qadata->qa_qd_num)
1148		gfs2_quota_unlock(ip);
1149
1150	if (length != written && (iomap->flags & IOMAP_F_NEW)) {
1151		/* Deallocate blocks that were just allocated. */
1152		loff_t hstart = round_up(pos + written, i_blocksize(inode));
1153		loff_t hend = iomap->offset + iomap->length;
1154
1155		if (hstart < hend) {
1156			truncate_pagecache_range(inode, hstart, hend - 1);
1157			punch_hole(ip, hstart, hend - hstart);
1158		}
1159	}
1160
1161	if (unlikely(!written))
1162		return 0;
1163
1164	if (iomap->flags & IOMAP_F_SIZE_CHANGED)
1165		mark_inode_dirty(inode);
1166	set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
1167	return 0;
1168}
1169
1170const struct iomap_ops gfs2_iomap_ops = {
1171	.iomap_begin = gfs2_iomap_begin,
1172	.iomap_end = gfs2_iomap_end,
1173};
1174
1175/**
1176 * gfs2_block_map - Map one or more blocks of an inode to a disk block
1177 * @inode: The inode
1178 * @lblock: The logical block number
1179 * @bh_map: The bh to be mapped
1180 * @create: True if its ok to alloc blocks to satify the request
1181 *
1182 * The size of the requested mapping is defined in bh_map->b_size.
1183 *
1184 * Clears buffer_mapped(bh_map) and leaves bh_map->b_size unchanged
1185 * when @lblock is not mapped.  Sets buffer_mapped(bh_map) and
1186 * bh_map->b_size to indicate the size of the mapping when @lblock and
1187 * successive blocks are mapped, up to the requested size.
1188 *
1189 * Sets buffer_boundary() if a read of metadata will be required
1190 * before the next block can be mapped. Sets buffer_new() if new
1191 * blocks were allocated.
1192 *
1193 * Returns: errno
1194 */
1195
1196int gfs2_block_map(struct inode *inode, sector_t lblock,
1197		   struct buffer_head *bh_map, int create)
1198{
1199	struct gfs2_inode *ip = GFS2_I(inode);
1200	loff_t pos = (loff_t)lblock << inode->i_blkbits;
1201	loff_t length = bh_map->b_size;
1202	struct iomap iomap = { };
1203	int ret;
1204
1205	clear_buffer_mapped(bh_map);
1206	clear_buffer_new(bh_map);
1207	clear_buffer_boundary(bh_map);
1208	trace_gfs2_bmap(ip, bh_map, lblock, create, 1);
1209
1210	if (!create)
1211		ret = gfs2_iomap_get(inode, pos, length, &iomap);
1212	else
1213		ret = gfs2_iomap_alloc(inode, pos, length, &iomap);
1214	if (ret)
 
 
 
 
1215		goto out;
 
1216
1217	if (iomap.length > bh_map->b_size) {
1218		iomap.length = bh_map->b_size;
1219		iomap.flags &= ~IOMAP_F_GFS2_BOUNDARY;
1220	}
1221	if (iomap.addr != IOMAP_NULL_ADDR)
1222		map_bh(bh_map, inode->i_sb, iomap.addr >> inode->i_blkbits);
1223	bh_map->b_size = iomap.length;
1224	if (iomap.flags & IOMAP_F_GFS2_BOUNDARY)
1225		set_buffer_boundary(bh_map);
1226	if (iomap.flags & IOMAP_F_NEW)
1227		set_buffer_new(bh_map);
1228
1229out:
1230	trace_gfs2_bmap(ip, bh_map, lblock, create, ret);
1231	return ret;
1232}
1233
1234int gfs2_get_extent(struct inode *inode, u64 lblock, u64 *dblock,
1235		    unsigned int *extlen)
1236{
1237	unsigned int blkbits = inode->i_blkbits;
1238	struct iomap iomap = { };
1239	unsigned int len;
1240	int ret;
1241
1242	ret = gfs2_iomap_get(inode, lblock << blkbits, *extlen << blkbits,
1243			     &iomap);
1244	if (ret)
1245		return ret;
1246	if (iomap.type != IOMAP_MAPPED)
1247		return -EIO;
1248	*dblock = iomap.addr >> blkbits;
1249	len = iomap.length >> blkbits;
1250	if (len < *extlen)
1251		*extlen = len;
1252	return 0;
1253}
1254
1255int gfs2_alloc_extent(struct inode *inode, u64 lblock, u64 *dblock,
1256		      unsigned int *extlen, bool *new)
1257{
1258	unsigned int blkbits = inode->i_blkbits;
1259	struct iomap iomap = { };
1260	unsigned int len;
1261	int ret;
 
1262
1263	ret = gfs2_iomap_alloc(inode, lblock << blkbits, *extlen << blkbits,
1264			       &iomap);
1265	if (ret)
1266		return ret;
1267	if (iomap.type != IOMAP_MAPPED)
1268		return -EIO;
1269	*dblock = iomap.addr >> blkbits;
1270	len = iomap.length >> blkbits;
1271	if (len < *extlen)
1272		*extlen = len;
1273	*new = iomap.flags & IOMAP_F_NEW;
1274	return 0;
 
1275}
1276
1277/*
1278 * NOTE: Never call gfs2_block_zero_range with an open transaction because it
1279 * uses iomap write to perform its actions, which begin their own transactions
1280 * (iomap_begin, page_prepare, etc.)
1281 */
1282static int gfs2_block_zero_range(struct inode *inode, loff_t from,
1283				 unsigned int length)
1284{
1285	BUG_ON(current->journal_info);
1286	return iomap_zero_range(inode, from, length, NULL, &gfs2_iomap_ops);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1287}
1288
1289#define GFS2_JTRUNC_REVOKES 8192
1290
1291/**
1292 * gfs2_journaled_truncate - Wrapper for truncate_pagecache for jdata files
1293 * @inode: The inode being truncated
1294 * @oldsize: The original (larger) size
1295 * @newsize: The new smaller size
1296 *
1297 * With jdata files, we have to journal a revoke for each block which is
1298 * truncated. As a result, we need to split this into separate transactions
1299 * if the number of pages being truncated gets too large.
1300 */
1301
1302static int gfs2_journaled_truncate(struct inode *inode, u64 oldsize, u64 newsize)
1303{
1304	struct gfs2_sbd *sdp = GFS2_SB(inode);
1305	u64 max_chunk = GFS2_JTRUNC_REVOKES * sdp->sd_vfs->s_blocksize;
1306	u64 chunk;
1307	int error;
1308
1309	while (oldsize != newsize) {
1310		struct gfs2_trans *tr;
1311		unsigned int offs;
1312
1313		chunk = oldsize - newsize;
1314		if (chunk > max_chunk)
1315			chunk = max_chunk;
1316
1317		offs = oldsize & ~PAGE_MASK;
1318		if (offs && chunk > PAGE_SIZE)
1319			chunk = offs + ((chunk - offs) & PAGE_MASK);
1320
1321		truncate_pagecache(inode, oldsize - chunk);
1322		oldsize -= chunk;
1323
1324		tr = current->journal_info;
1325		if (!test_bit(TR_TOUCHED, &tr->tr_flags))
1326			continue;
1327
1328		gfs2_trans_end(sdp);
1329		error = gfs2_trans_begin(sdp, RES_DINODE, GFS2_JTRUNC_REVOKES);
1330		if (error)
1331			return error;
1332	}
1333
1334	return 0;
1335}
1336
1337static int trunc_start(struct inode *inode, u64 newsize)
1338{
1339	struct gfs2_inode *ip = GFS2_I(inode);
1340	struct gfs2_sbd *sdp = GFS2_SB(inode);
1341	struct buffer_head *dibh = NULL;
1342	int journaled = gfs2_is_jdata(ip);
1343	u64 oldsize = inode->i_size;
1344	int error;
1345
1346	if (!gfs2_is_stuffed(ip)) {
1347		unsigned int blocksize = i_blocksize(inode);
1348		unsigned int offs = newsize & (blocksize - 1);
1349		if (offs) {
1350			error = gfs2_block_zero_range(inode, newsize,
1351						      blocksize - offs);
1352			if (error)
1353				return error;
1354		}
1355	}
1356	if (journaled)
1357		error = gfs2_trans_begin(sdp, RES_DINODE + RES_JDATA, GFS2_JTRUNC_REVOKES);
1358	else
1359		error = gfs2_trans_begin(sdp, RES_DINODE, 0);
1360	if (error)
1361		return error;
1362
1363	error = gfs2_meta_inode_buffer(ip, &dibh);
1364	if (error)
1365		goto out;
1366
1367	gfs2_trans_add_meta(ip->i_gl, dibh);
1368
1369	if (gfs2_is_stuffed(ip))
1370		gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + newsize);
1371	else
 
 
 
 
 
 
 
 
1372		ip->i_diskflags |= GFS2_DIF_TRUNC_IN_PROG;
 
1373
1374	i_size_write(inode, newsize);
1375	ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
1376	gfs2_dinode_out(ip, dibh->b_data);
1377
1378	if (journaled)
1379		error = gfs2_journaled_truncate(inode, oldsize, newsize);
1380	else
1381		truncate_pagecache(inode, newsize);
1382
1383out:
1384	brelse(dibh);
1385	if (current->journal_info)
1386		gfs2_trans_end(sdp);
1387	return error;
1388}
1389
1390int gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
1391		   struct iomap *iomap)
1392{
1393	struct metapath mp = { .mp_aheight = 1, };
1394	int ret;
1395
1396	ret = __gfs2_iomap_get(inode, pos, length, 0, iomap, &mp);
1397	release_metapath(&mp);
1398	return ret;
1399}
1400
1401int gfs2_iomap_alloc(struct inode *inode, loff_t pos, loff_t length,
1402		     struct iomap *iomap)
1403{
1404	struct metapath mp = { .mp_aheight = 1, };
1405	int ret;
1406
1407	ret = __gfs2_iomap_get(inode, pos, length, IOMAP_WRITE, iomap, &mp);
1408	if (!ret && iomap->type == IOMAP_HOLE)
1409		ret = __gfs2_iomap_alloc(inode, iomap, &mp);
1410	release_metapath(&mp);
1411	return ret;
1412}
1413
1414/**
1415 * sweep_bh_for_rgrps - find an rgrp in a meta buffer and free blocks therein
1416 * @ip: inode
1417 * @rd_gh: holder of resource group glock
1418 * @bh: buffer head to sweep
1419 * @start: starting point in bh
1420 * @end: end point in bh
1421 * @meta: true if bh points to metadata (rather than data)
1422 * @btotal: place to keep count of total blocks freed
1423 *
1424 * We sweep a metadata buffer (provided by the metapath) for blocks we need to
1425 * free, and free them all. However, we do it one rgrp at a time. If this
1426 * block has references to multiple rgrps, we break it into individual
1427 * transactions. This allows other processes to use the rgrps while we're
1428 * focused on a single one, for better concurrency / performance.
1429 * At every transaction boundary, we rewrite the inode into the journal.
1430 * That way the bitmaps are kept consistent with the inode and we can recover
1431 * if we're interrupted by power-outages.
1432 *
1433 * Returns: 0, or return code if an error occurred.
1434 *          *btotal has the total number of blocks freed
1435 */
1436static int sweep_bh_for_rgrps(struct gfs2_inode *ip, struct gfs2_holder *rd_gh,
1437			      struct buffer_head *bh, __be64 *start, __be64 *end,
1438			      bool meta, u32 *btotal)
1439{
1440	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1441	struct gfs2_rgrpd *rgd;
1442	struct gfs2_trans *tr;
1443	__be64 *p;
1444	int blks_outside_rgrp;
1445	u64 bn, bstart, isize_blks;
1446	s64 blen; /* needs to be s64 or gfs2_add_inode_blocks breaks */
1447	int ret = 0;
1448	bool buf_in_tr = false; /* buffer was added to transaction */
1449
1450more_rgrps:
1451	rgd = NULL;
1452	if (gfs2_holder_initialized(rd_gh)) {
1453		rgd = gfs2_glock2rgrp(rd_gh->gh_gl);
1454		gfs2_assert_withdraw(sdp,
1455			     gfs2_glock_is_locked_by_me(rd_gh->gh_gl));
1456	}
1457	blks_outside_rgrp = 0;
1458	bstart = 0;
1459	blen = 0;
1460
1461	for (p = start; p < end; p++) {
1462		if (!*p)
1463			continue;
1464		bn = be64_to_cpu(*p);
1465
1466		if (rgd) {
1467			if (!rgrp_contains_block(rgd, bn)) {
1468				blks_outside_rgrp++;
1469				continue;
1470			}
1471		} else {
1472			rgd = gfs2_blk2rgrpd(sdp, bn, true);
1473			if (unlikely(!rgd)) {
1474				ret = -EIO;
1475				goto out;
1476			}
1477			ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
1478						 LM_FLAG_NODE_SCOPE, rd_gh);
1479			if (ret)
1480				goto out;
1481
1482			/* Must be done with the rgrp glock held: */
1483			if (gfs2_rs_active(&ip->i_res) &&
1484			    rgd == ip->i_res.rs_rgd)
1485				gfs2_rs_deltree(&ip->i_res);
1486		}
1487
1488		/* The size of our transactions will be unknown until we
1489		   actually process all the metadata blocks that relate to
1490		   the rgrp. So we estimate. We know it can't be more than
1491		   the dinode's i_blocks and we don't want to exceed the
1492		   journal flush threshold, sd_log_thresh2. */
1493		if (current->journal_info == NULL) {
1494			unsigned int jblocks_rqsted, revokes;
1495
1496			jblocks_rqsted = rgd->rd_length + RES_DINODE +
1497				RES_INDIRECT;
1498			isize_blks = gfs2_get_inode_blocks(&ip->i_inode);
1499			if (isize_blks > atomic_read(&sdp->sd_log_thresh2))
1500				jblocks_rqsted +=
1501					atomic_read(&sdp->sd_log_thresh2);
1502			else
1503				jblocks_rqsted += isize_blks;
1504			revokes = jblocks_rqsted;
1505			if (meta)
1506				revokes += end - start;
1507			else if (ip->i_depth)
1508				revokes += sdp->sd_inptrs;
1509			ret = gfs2_trans_begin(sdp, jblocks_rqsted, revokes);
1510			if (ret)
1511				goto out_unlock;
1512			down_write(&ip->i_rw_mutex);
1513		}
1514		/* check if we will exceed the transaction blocks requested */
1515		tr = current->journal_info;
1516		if (tr->tr_num_buf_new + RES_STATFS +
1517		    RES_QUOTA >= atomic_read(&sdp->sd_log_thresh2)) {
1518			/* We set blks_outside_rgrp to ensure the loop will
1519			   be repeated for the same rgrp, but with a new
1520			   transaction. */
1521			blks_outside_rgrp++;
1522			/* This next part is tricky. If the buffer was added
1523			   to the transaction, we've already set some block
1524			   pointers to 0, so we better follow through and free
1525			   them, or we will introduce corruption (so break).
1526			   This may be impossible, or at least rare, but I
1527			   decided to cover the case regardless.
1528
1529			   If the buffer was not added to the transaction
1530			   (this call), doing so would exceed our transaction
1531			   size, so we need to end the transaction and start a
1532			   new one (so goto). */
1533
1534			if (buf_in_tr)
1535				break;
1536			goto out_unlock;
1537		}
1538
1539		gfs2_trans_add_meta(ip->i_gl, bh);
1540		buf_in_tr = true;
1541		*p = 0;
1542		if (bstart + blen == bn) {
1543			blen++;
1544			continue;
1545		}
1546		if (bstart) {
1547			__gfs2_free_blocks(ip, rgd, bstart, (u32)blen, meta);
1548			(*btotal) += blen;
1549			gfs2_add_inode_blocks(&ip->i_inode, -blen);
1550		}
1551		bstart = bn;
1552		blen = 1;
1553	}
1554	if (bstart) {
1555		__gfs2_free_blocks(ip, rgd, bstart, (u32)blen, meta);
1556		(*btotal) += blen;
1557		gfs2_add_inode_blocks(&ip->i_inode, -blen);
1558	}
1559out_unlock:
1560	if (!ret && blks_outside_rgrp) { /* If buffer still has non-zero blocks
1561					    outside the rgrp we just processed,
1562					    do it all over again. */
1563		if (current->journal_info) {
1564			struct buffer_head *dibh;
1565
1566			ret = gfs2_meta_inode_buffer(ip, &dibh);
1567			if (ret)
1568				goto out;
1569
1570			/* Every transaction boundary, we rewrite the dinode
1571			   to keep its di_blocks current in case of failure. */
1572			ip->i_inode.i_mtime = ip->i_inode.i_ctime =
1573				current_time(&ip->i_inode);
1574			gfs2_trans_add_meta(ip->i_gl, dibh);
1575			gfs2_dinode_out(ip, dibh->b_data);
1576			brelse(dibh);
1577			up_write(&ip->i_rw_mutex);
1578			gfs2_trans_end(sdp);
1579			buf_in_tr = false;
1580		}
1581		gfs2_glock_dq_uninit(rd_gh);
1582		cond_resched();
1583		goto more_rgrps;
1584	}
1585out:
1586	return ret;
1587}
1588
1589static bool mp_eq_to_hgt(struct metapath *mp, __u16 *list, unsigned int h)
1590{
1591	if (memcmp(mp->mp_list, list, h * sizeof(mp->mp_list[0])))
1592		return false;
1593	return true;
1594}
1595
1596/**
1597 * find_nonnull_ptr - find a non-null pointer given a metapath and height
1598 * @sdp: The superblock
1599 * @mp: starting metapath
1600 * @h: desired height to search
1601 * @end_list: See punch_hole().
1602 * @end_aligned: See punch_hole().
1603 *
1604 * Assumes the metapath is valid (with buffers) out to height h.
1605 * Returns: true if a non-null pointer was found in the metapath buffer
1606 *          false if all remaining pointers are NULL in the buffer
1607 */
1608static bool find_nonnull_ptr(struct gfs2_sbd *sdp, struct metapath *mp,
1609			     unsigned int h,
1610			     __u16 *end_list, unsigned int end_aligned)
1611{
1612	struct buffer_head *bh = mp->mp_bh[h];
1613	__be64 *first, *ptr, *end;
1614
1615	first = metaptr1(h, mp);
1616	ptr = first + mp->mp_list[h];
1617	end = (__be64 *)(bh->b_data + bh->b_size);
1618	if (end_list && mp_eq_to_hgt(mp, end_list, h)) {
1619		bool keep_end = h < end_aligned;
1620		end = first + end_list[h] + keep_end;
1621	}
1622
1623	while (ptr < end) {
1624		if (*ptr) { /* if we have a non-null pointer */
1625			mp->mp_list[h] = ptr - first;
1626			h++;
1627			if (h < GFS2_MAX_META_HEIGHT)
1628				mp->mp_list[h] = 0;
1629			return true;
1630		}
1631		ptr++;
1632	}
1633	return false;
1634}
1635
1636enum dealloc_states {
1637	DEALLOC_MP_FULL = 0,    /* Strip a metapath with all buffers read in */
1638	DEALLOC_MP_LOWER = 1,   /* lower the metapath strip height */
1639	DEALLOC_FILL_MP = 2,  /* Fill in the metapath to the given height. */
1640	DEALLOC_DONE = 3,       /* process complete */
1641};
1642
1643static inline void
1644metapointer_range(struct metapath *mp, int height,
1645		  __u16 *start_list, unsigned int start_aligned,
1646		  __u16 *end_list, unsigned int end_aligned,
1647		  __be64 **start, __be64 **end)
1648{
1649	struct buffer_head *bh = mp->mp_bh[height];
1650	__be64 *first;
1651
1652	first = metaptr1(height, mp);
1653	*start = first;
1654	if (mp_eq_to_hgt(mp, start_list, height)) {
1655		bool keep_start = height < start_aligned;
1656		*start = first + start_list[height] + keep_start;
1657	}
1658	*end = (__be64 *)(bh->b_data + bh->b_size);
1659	if (end_list && mp_eq_to_hgt(mp, end_list, height)) {
1660		bool keep_end = height < end_aligned;
1661		*end = first + end_list[height] + keep_end;
1662	}
1663}
1664
1665static inline bool walk_done(struct gfs2_sbd *sdp,
1666			     struct metapath *mp, int height,
1667			     __u16 *end_list, unsigned int end_aligned)
1668{
1669	__u16 end;
1670
1671	if (end_list) {
1672		bool keep_end = height < end_aligned;
1673		if (!mp_eq_to_hgt(mp, end_list, height))
1674			return false;
1675		end = end_list[height] + keep_end;
1676	} else
1677		end = (height > 0) ? sdp->sd_inptrs : sdp->sd_diptrs;
1678	return mp->mp_list[height] >= end;
1679}
1680
1681/**
1682 * punch_hole - deallocate blocks in a file
1683 * @ip: inode to truncate
1684 * @offset: the start of the hole
1685 * @length: the size of the hole (or 0 for truncate)
1686 *
1687 * Punch a hole into a file or truncate a file at a given position.  This
1688 * function operates in whole blocks (@offset and @length are rounded
1689 * accordingly); partially filled blocks must be cleared otherwise.
1690 *
1691 * This function works from the bottom up, and from the right to the left. In
1692 * other words, it strips off the highest layer (data) before stripping any of
1693 * the metadata. Doing it this way is best in case the operation is interrupted
1694 * by power failure, etc.  The dinode is rewritten in every transaction to
1695 * guarantee integrity.
1696 */
1697static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
1698{
1699	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1700	u64 maxsize = sdp->sd_heightsize[ip->i_height];
1701	struct metapath mp = {};
1702	struct buffer_head *dibh, *bh;
1703	struct gfs2_holder rd_gh;
1704	unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift;
1705	u64 lblock = (offset + (1 << bsize_shift) - 1) >> bsize_shift;
1706	__u16 start_list[GFS2_MAX_META_HEIGHT];
1707	__u16 __end_list[GFS2_MAX_META_HEIGHT], *end_list = NULL;
1708	unsigned int start_aligned, end_aligned;
1709	unsigned int strip_h = ip->i_height - 1;
1710	u32 btotal = 0;
1711	int ret, state;
1712	int mp_h; /* metapath buffers are read in to this height */
1713	u64 prev_bnr = 0;
1714	__be64 *start, *end;
1715
1716	if (offset >= maxsize) {
1717		/*
1718		 * The starting point lies beyond the allocated meta-data;
1719		 * there are no blocks do deallocate.
1720		 */
1721		return 0;
1722	}
1723
1724	/*
1725	 * The start position of the hole is defined by lblock, start_list, and
1726	 * start_aligned.  The end position of the hole is defined by lend,
1727	 * end_list, and end_aligned.
1728	 *
1729	 * start_aligned and end_aligned define down to which height the start
1730	 * and end positions are aligned to the metadata tree (i.e., the
1731	 * position is a multiple of the metadata granularity at the height
1732	 * above).  This determines at which heights additional meta pointers
1733	 * needs to be preserved for the remaining data.
1734	 */
1735
1736	if (length) {
1737		u64 end_offset = offset + length;
1738		u64 lend;
1739
1740		/*
1741		 * Clip the end at the maximum file size for the given height:
1742		 * that's how far the metadata goes; files bigger than that
1743		 * will have additional layers of indirection.
1744		 */
1745		if (end_offset > maxsize)
1746			end_offset = maxsize;
1747		lend = end_offset >> bsize_shift;
1748
1749		if (lblock >= lend)
1750			return 0;
1751
1752		find_metapath(sdp, lend, &mp, ip->i_height);
1753		end_list = __end_list;
1754		memcpy(end_list, mp.mp_list, sizeof(mp.mp_list));
1755
1756		for (mp_h = ip->i_height - 1; mp_h > 0; mp_h--) {
1757			if (end_list[mp_h])
1758				break;
1759		}
1760		end_aligned = mp_h;
1761	}
1762
1763	find_metapath(sdp, lblock, &mp, ip->i_height);
1764	memcpy(start_list, mp.mp_list, sizeof(start_list));
1765
1766	for (mp_h = ip->i_height - 1; mp_h > 0; mp_h--) {
1767		if (start_list[mp_h])
1768			break;
1769	}
1770	start_aligned = mp_h;
1771
1772	ret = gfs2_meta_inode_buffer(ip, &dibh);
1773	if (ret)
1774		return ret;
1775
1776	mp.mp_bh[0] = dibh;
1777	ret = lookup_metapath(ip, &mp);
1778	if (ret)
1779		goto out_metapath;
1780
1781	/* issue read-ahead on metadata */
1782	for (mp_h = 0; mp_h < mp.mp_aheight - 1; mp_h++) {
1783		metapointer_range(&mp, mp_h, start_list, start_aligned,
1784				  end_list, end_aligned, &start, &end);
1785		gfs2_metapath_ra(ip->i_gl, start, end);
1786	}
1787
1788	if (mp.mp_aheight == ip->i_height)
1789		state = DEALLOC_MP_FULL; /* We have a complete metapath */
1790	else
1791		state = DEALLOC_FILL_MP; /* deal with partial metapath */
1792
1793	ret = gfs2_rindex_update(sdp);
1794	if (ret)
1795		goto out_metapath;
1796
1797	ret = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
1798	if (ret)
1799		goto out_metapath;
1800	gfs2_holder_mark_uninitialized(&rd_gh);
1801
1802	mp_h = strip_h;
1803
1804	while (state != DEALLOC_DONE) {
1805		switch (state) {
1806		/* Truncate a full metapath at the given strip height.
1807		 * Note that strip_h == mp_h in order to be in this state. */
1808		case DEALLOC_MP_FULL:
1809			bh = mp.mp_bh[mp_h];
1810			gfs2_assert_withdraw(sdp, bh);
1811			if (gfs2_assert_withdraw(sdp,
1812						 prev_bnr != bh->b_blocknr)) {
1813				fs_emerg(sdp, "inode %llu, block:%llu, i_h:%u,"
1814					 "s_h:%u, mp_h:%u\n",
 
1815				       (unsigned long long)ip->i_no_addr,
1816				       prev_bnr, ip->i_height, strip_h, mp_h);
1817			}
1818			prev_bnr = bh->b_blocknr;
1819
1820			if (gfs2_metatype_check(sdp, bh,
1821						(mp_h ? GFS2_METATYPE_IN :
1822							GFS2_METATYPE_DI))) {
1823				ret = -EIO;
1824				goto out;
1825			}
1826
1827			/*
1828			 * Below, passing end_aligned as 0 gives us the
1829			 * metapointer range excluding the end point: the end
1830			 * point is the first metapath we must not deallocate!
1831			 */
1832
1833			metapointer_range(&mp, mp_h, start_list, start_aligned,
1834					  end_list, 0 /* end_aligned */,
1835					  &start, &end);
1836			ret = sweep_bh_for_rgrps(ip, &rd_gh, mp.mp_bh[mp_h],
1837						 start, end,
1838						 mp_h != ip->i_height - 1,
1839						 &btotal);
1840
1841			/* If we hit an error or just swept dinode buffer,
1842			   just exit. */
1843			if (ret || !mp_h) {
1844				state = DEALLOC_DONE;
1845				break;
1846			}
1847			state = DEALLOC_MP_LOWER;
1848			break;
1849
1850		/* lower the metapath strip height */
1851		case DEALLOC_MP_LOWER:
1852			/* We're done with the current buffer, so release it,
1853			   unless it's the dinode buffer. Then back up to the
1854			   previous pointer. */
1855			if (mp_h) {
1856				brelse(mp.mp_bh[mp_h]);
1857				mp.mp_bh[mp_h] = NULL;
1858			}
1859			/* If we can't get any lower in height, we've stripped
1860			   off all we can. Next step is to back up and start
1861			   stripping the previous level of metadata. */
1862			if (mp_h == 0) {
1863				strip_h--;
1864				memcpy(mp.mp_list, start_list, sizeof(start_list));
1865				mp_h = strip_h;
1866				state = DEALLOC_FILL_MP;
1867				break;
1868			}
1869			mp.mp_list[mp_h] = 0;
1870			mp_h--; /* search one metadata height down */
1871			mp.mp_list[mp_h]++;
1872			if (walk_done(sdp, &mp, mp_h, end_list, end_aligned))
1873				break;
1874			/* Here we've found a part of the metapath that is not
1875			 * allocated. We need to search at that height for the
1876			 * next non-null pointer. */
1877			if (find_nonnull_ptr(sdp, &mp, mp_h, end_list, end_aligned)) {
1878				state = DEALLOC_FILL_MP;
1879				mp_h++;
1880			}
1881			/* No more non-null pointers at this height. Back up
1882			   to the previous height and try again. */
1883			break; /* loop around in the same state */
1884
1885		/* Fill the metapath with buffers to the given height. */
1886		case DEALLOC_FILL_MP:
1887			/* Fill the buffers out to the current height. */
1888			ret = fillup_metapath(ip, &mp, mp_h);
1889			if (ret < 0)
1890				goto out;
1891
1892			/* On the first pass, issue read-ahead on metadata. */
1893			if (mp.mp_aheight > 1 && strip_h == ip->i_height - 1) {
1894				unsigned int height = mp.mp_aheight - 1;
1895
1896				/* No read-ahead for data blocks. */
1897				if (mp.mp_aheight - 1 == strip_h)
1898					height--;
1899
1900				for (; height >= mp.mp_aheight - ret; height--) {
1901					metapointer_range(&mp, height,
1902							  start_list, start_aligned,
1903							  end_list, end_aligned,
1904							  &start, &end);
1905					gfs2_metapath_ra(ip->i_gl, start, end);
1906				}
1907			}
1908
1909			/* If buffers found for the entire strip height */
1910			if (mp.mp_aheight - 1 == strip_h) {
1911				state = DEALLOC_MP_FULL;
1912				break;
1913			}
1914			if (mp.mp_aheight < ip->i_height) /* We have a partial height */
1915				mp_h = mp.mp_aheight - 1;
1916
1917			/* If we find a non-null block pointer, crawl a bit
1918			   higher up in the metapath and try again, otherwise
1919			   we need to look lower for a new starting point. */
1920			if (find_nonnull_ptr(sdp, &mp, mp_h, end_list, end_aligned))
1921				mp_h++;
1922			else
1923				state = DEALLOC_MP_LOWER;
1924			break;
1925		}
1926	}
1927
1928	if (btotal) {
1929		if (current->journal_info == NULL) {
1930			ret = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS +
1931					       RES_QUOTA, 0);
1932			if (ret)
1933				goto out;
1934			down_write(&ip->i_rw_mutex);
1935		}
1936		gfs2_statfs_change(sdp, 0, +btotal, 0);
1937		gfs2_quota_change(ip, -(s64)btotal, ip->i_inode.i_uid,
1938				  ip->i_inode.i_gid);
1939		ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
1940		gfs2_trans_add_meta(ip->i_gl, dibh);
1941		gfs2_dinode_out(ip, dibh->b_data);
1942		up_write(&ip->i_rw_mutex);
1943		gfs2_trans_end(sdp);
1944	}
1945
1946out:
1947	if (gfs2_holder_initialized(&rd_gh))
1948		gfs2_glock_dq_uninit(&rd_gh);
1949	if (current->journal_info) {
1950		up_write(&ip->i_rw_mutex);
1951		gfs2_trans_end(sdp);
1952		cond_resched();
1953	}
1954	gfs2_quota_unhold(ip);
1955out_metapath:
1956	release_metapath(&mp);
1957	return ret;
1958}
1959
1960static int trunc_end(struct gfs2_inode *ip)
1961{
1962	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1963	struct buffer_head *dibh;
1964	int error;
1965
1966	error = gfs2_trans_begin(sdp, RES_DINODE, 0);
1967	if (error)
1968		return error;
1969
1970	down_write(&ip->i_rw_mutex);
1971
1972	error = gfs2_meta_inode_buffer(ip, &dibh);
1973	if (error)
1974		goto out;
1975
1976	if (!i_size_read(&ip->i_inode)) {
1977		ip->i_height = 0;
1978		ip->i_goal = ip->i_no_addr;
1979		gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
1980		gfs2_ordered_del_inode(ip);
1981	}
1982	ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
1983	ip->i_diskflags &= ~GFS2_DIF_TRUNC_IN_PROG;
1984
1985	gfs2_trans_add_meta(ip->i_gl, dibh);
1986	gfs2_dinode_out(ip, dibh->b_data);
1987	brelse(dibh);
1988
1989out:
1990	up_write(&ip->i_rw_mutex);
1991	gfs2_trans_end(sdp);
1992	return error;
1993}
1994
1995/**
1996 * do_shrink - make a file smaller
1997 * @inode: the inode
1998 * @newsize: the size to make the file
1999 *
2000 * Called with an exclusive lock on @inode. The @size must
2001 * be equal to or smaller than the current inode size.
2002 *
2003 * Returns: errno
2004 */
2005
2006static int do_shrink(struct inode *inode, u64 newsize)
2007{
2008	struct gfs2_inode *ip = GFS2_I(inode);
2009	int error;
2010
2011	error = trunc_start(inode, newsize);
2012	if (error < 0)
2013		return error;
2014	if (gfs2_is_stuffed(ip))
2015		return 0;
2016
2017	error = punch_hole(ip, newsize, 0);
2018	if (error == 0)
2019		error = trunc_end(ip);
2020
2021	return error;
2022}
2023
2024void gfs2_trim_blocks(struct inode *inode)
2025{
2026	int ret;
2027
2028	ret = do_shrink(inode, inode->i_size);
2029	WARN_ON(ret != 0);
2030}
2031
2032/**
2033 * do_grow - Touch and update inode size
2034 * @inode: The inode
2035 * @size: The new size
2036 *
2037 * This function updates the timestamps on the inode and
2038 * may also increase the size of the inode. This function
2039 * must not be called with @size any smaller than the current
2040 * inode size.
2041 *
2042 * Although it is not strictly required to unstuff files here,
2043 * earlier versions of GFS2 have a bug in the stuffed file reading
2044 * code which will result in a buffer overrun if the size is larger
2045 * than the max stuffed file size. In order to prevent this from
2046 * occurring, such files are unstuffed, but in other cases we can
2047 * just update the inode size directly.
2048 *
2049 * Returns: 0 on success, or -ve on error
2050 */
2051
2052static int do_grow(struct inode *inode, u64 size)
2053{
2054	struct gfs2_inode *ip = GFS2_I(inode);
2055	struct gfs2_sbd *sdp = GFS2_SB(inode);
2056	struct gfs2_alloc_parms ap = { .target = 1, };
2057	struct buffer_head *dibh;
2058	int error;
2059	int unstuff = 0;
2060
2061	if (gfs2_is_stuffed(ip) && size > gfs2_max_stuffed_size(ip)) {
2062		error = gfs2_quota_lock_check(ip, &ap);
2063		if (error)
2064			return error;
2065
2066		error = gfs2_inplace_reserve(ip, &ap);
2067		if (error)
2068			goto do_grow_qunlock;
2069		unstuff = 1;
2070	}
2071
2072	error = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS + RES_RG_BIT +
2073				 (unstuff &&
2074				  gfs2_is_jdata(ip) ? RES_JDATA : 0) +
2075				 (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF ?
2076				  0 : RES_QUOTA), 0);
2077	if (error)
2078		goto do_grow_release;
2079
2080	if (unstuff) {
2081		error = gfs2_unstuff_dinode(ip);
2082		if (error)
2083			goto do_end_trans;
2084	}
2085
2086	error = gfs2_meta_inode_buffer(ip, &dibh);
2087	if (error)
2088		goto do_end_trans;
2089
2090	truncate_setsize(inode, size);
2091	ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
2092	gfs2_trans_add_meta(ip->i_gl, dibh);
2093	gfs2_dinode_out(ip, dibh->b_data);
2094	brelse(dibh);
2095
2096do_end_trans:
2097	gfs2_trans_end(sdp);
2098do_grow_release:
2099	if (unstuff) {
2100		gfs2_inplace_release(ip);
2101do_grow_qunlock:
2102		gfs2_quota_unlock(ip);
2103	}
2104	return error;
2105}
2106
2107/**
2108 * gfs2_setattr_size - make a file a given size
2109 * @inode: the inode
2110 * @newsize: the size to make the file
2111 *
2112 * The file size can grow, shrink, or stay the same size. This
2113 * is called holding i_rwsem and an exclusive glock on the inode
2114 * in question.
2115 *
2116 * Returns: errno
2117 */
2118
2119int gfs2_setattr_size(struct inode *inode, u64 newsize)
2120{
2121	struct gfs2_inode *ip = GFS2_I(inode);
2122	int ret;
2123
2124	BUG_ON(!S_ISREG(inode->i_mode));
2125
2126	ret = inode_newsize_ok(inode, newsize);
2127	if (ret)
2128		return ret;
2129
2130	inode_dio_wait(inode);
2131
2132	ret = gfs2_qa_get(ip);
2133	if (ret)
2134		goto out;
2135
2136	if (newsize >= inode->i_size) {
2137		ret = do_grow(inode, newsize);
2138		goto out;
2139	}
2140
2141	ret = do_shrink(inode, newsize);
2142out:
2143	gfs2_rs_delete(ip);
2144	gfs2_qa_put(ip);
2145	return ret;
2146}
2147
2148int gfs2_truncatei_resume(struct gfs2_inode *ip)
2149{
2150	int error;
2151	error = punch_hole(ip, i_size_read(&ip->i_inode), 0);
2152	if (!error)
2153		error = trunc_end(ip);
2154	return error;
2155}
2156
2157int gfs2_file_dealloc(struct gfs2_inode *ip)
2158{
2159	return punch_hole(ip, 0, 0);
2160}
2161
2162/**
2163 * gfs2_free_journal_extents - Free cached journal bmap info
2164 * @jd: The journal
2165 *
2166 */
2167
2168void gfs2_free_journal_extents(struct gfs2_jdesc *jd)
2169{
2170	struct gfs2_journal_extent *jext;
2171
2172	while(!list_empty(&jd->extent_list)) {
2173		jext = list_first_entry(&jd->extent_list, struct gfs2_journal_extent, list);
2174		list_del(&jext->list);
2175		kfree(jext);
2176	}
2177}
2178
2179/**
2180 * gfs2_add_jextent - Add or merge a new extent to extent cache
2181 * @jd: The journal descriptor
2182 * @lblock: The logical block at start of new extent
2183 * @dblock: The physical block at start of new extent
2184 * @blocks: Size of extent in fs blocks
2185 *
2186 * Returns: 0 on success or -ENOMEM
2187 */
2188
2189static int gfs2_add_jextent(struct gfs2_jdesc *jd, u64 lblock, u64 dblock, u64 blocks)
2190{
2191	struct gfs2_journal_extent *jext;
2192
2193	if (!list_empty(&jd->extent_list)) {
2194		jext = list_last_entry(&jd->extent_list, struct gfs2_journal_extent, list);
2195		if ((jext->dblock + jext->blocks) == dblock) {
2196			jext->blocks += blocks;
2197			return 0;
2198		}
2199	}
2200
2201	jext = kzalloc(sizeof(struct gfs2_journal_extent), GFP_NOFS);
2202	if (jext == NULL)
2203		return -ENOMEM;
2204	jext->dblock = dblock;
2205	jext->lblock = lblock;
2206	jext->blocks = blocks;
2207	list_add_tail(&jext->list, &jd->extent_list);
2208	jd->nr_extents++;
2209	return 0;
2210}
2211
2212/**
2213 * gfs2_map_journal_extents - Cache journal bmap info
2214 * @sdp: The super block
2215 * @jd: The journal to map
2216 *
2217 * Create a reusable "extent" mapping from all logical
2218 * blocks to all physical blocks for the given journal.  This will save
2219 * us time when writing journal blocks.  Most journals will have only one
2220 * extent that maps all their logical blocks.  That's because gfs2.mkfs
2221 * arranges the journal blocks sequentially to maximize performance.
2222 * So the extent would map the first block for the entire file length.
2223 * However, gfs2_jadd can happen while file activity is happening, so
2224 * those journals may not be sequential.  Less likely is the case where
2225 * the users created their own journals by mounting the metafs and
2226 * laying it out.  But it's still possible.  These journals might have
2227 * several extents.
2228 *
2229 * Returns: 0 on success, or error on failure
2230 */
2231
2232int gfs2_map_journal_extents(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd)
2233{
2234	u64 lblock = 0;
2235	u64 lblock_stop;
2236	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
2237	struct buffer_head bh;
2238	unsigned int shift = sdp->sd_sb.sb_bsize_shift;
2239	u64 size;
2240	int rc;
2241	ktime_t start, end;
2242
2243	start = ktime_get();
2244	lblock_stop = i_size_read(jd->jd_inode) >> shift;
2245	size = (lblock_stop - lblock) << shift;
2246	jd->nr_extents = 0;
2247	WARN_ON(!list_empty(&jd->extent_list));
2248
2249	do {
2250		bh.b_state = 0;
2251		bh.b_blocknr = 0;
2252		bh.b_size = size;
2253		rc = gfs2_block_map(jd->jd_inode, lblock, &bh, 0);
2254		if (rc || !buffer_mapped(&bh))
2255			goto fail;
2256		rc = gfs2_add_jextent(jd, lblock, bh.b_blocknr, bh.b_size >> shift);
2257		if (rc)
2258			goto fail;
2259		size -= bh.b_size;
2260		lblock += (bh.b_size >> ip->i_inode.i_blkbits);
2261	} while(size > 0);
2262
2263	end = ktime_get();
2264	fs_info(sdp, "journal %d mapped with %u extents in %lldms\n", jd->jd_jid,
2265		jd->nr_extents, ktime_ms_delta(end, start));
2266	return 0;
2267
2268fail:
2269	fs_warn(sdp, "error %d mapping journal %u at offset %llu (extent %u)\n",
2270		rc, jd->jd_jid,
2271		(unsigned long long)(i_size_read(jd->jd_inode) - size),
2272		jd->nr_extents);
2273	fs_warn(sdp, "bmap=%d lblock=%llu block=%llu, state=0x%08lx, size=%llu\n",
2274		rc, (unsigned long long)lblock, (unsigned long long)bh.b_blocknr,
2275		bh.b_state, (unsigned long long)bh.b_size);
2276	gfs2_free_journal_extents(jd);
2277	return rc;
2278}
2279
2280/**
2281 * gfs2_write_alloc_required - figure out if a write will require an allocation
2282 * @ip: the file being written to
2283 * @offset: the offset to write to
2284 * @len: the number of bytes being written
2285 *
2286 * Returns: 1 if an alloc is required, 0 otherwise
2287 */
2288
2289int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
2290			      unsigned int len)
2291{
2292	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2293	struct buffer_head bh;
2294	unsigned int shift;
2295	u64 lblock, lblock_stop, size;
2296	u64 end_of_file;
2297
2298	if (!len)
2299		return 0;
2300
2301	if (gfs2_is_stuffed(ip)) {
2302		if (offset + len > gfs2_max_stuffed_size(ip))
2303			return 1;
2304		return 0;
2305	}
2306
2307	shift = sdp->sd_sb.sb_bsize_shift;
2308	BUG_ON(gfs2_is_dir(ip));
2309	end_of_file = (i_size_read(&ip->i_inode) + sdp->sd_sb.sb_bsize - 1) >> shift;
2310	lblock = offset >> shift;
2311	lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift;
2312	if (lblock_stop > end_of_file && ip != GFS2_I(sdp->sd_rindex))
2313		return 1;
2314
2315	size = (lblock_stop - lblock) << shift;
2316	do {
2317		bh.b_state = 0;
2318		bh.b_size = size;
2319		gfs2_block_map(&ip->i_inode, lblock, &bh, 0);
2320		if (!buffer_mapped(&bh))
2321			return 1;
2322		size -= bh.b_size;
2323		lblock += (bh.b_size >> ip->i_inode.i_blkbits);
2324	} while(size > 0);
2325
2326	return 0;
2327}
2328
2329static int stuffed_zero_range(struct inode *inode, loff_t offset, loff_t length)
2330{
2331	struct gfs2_inode *ip = GFS2_I(inode);
2332	struct buffer_head *dibh;
2333	int error;
2334
2335	if (offset >= inode->i_size)
2336		return 0;
2337	if (offset + length > inode->i_size)
2338		length = inode->i_size - offset;
2339
2340	error = gfs2_meta_inode_buffer(ip, &dibh);
2341	if (error)
2342		return error;
2343	gfs2_trans_add_meta(ip->i_gl, dibh);
2344	memset(dibh->b_data + sizeof(struct gfs2_dinode) + offset, 0,
2345	       length);
2346	brelse(dibh);
2347	return 0;
2348}
2349
2350static int gfs2_journaled_truncate_range(struct inode *inode, loff_t offset,
2351					 loff_t length)
2352{
2353	struct gfs2_sbd *sdp = GFS2_SB(inode);
2354	loff_t max_chunk = GFS2_JTRUNC_REVOKES * sdp->sd_vfs->s_blocksize;
2355	int error;
2356
2357	while (length) {
2358		struct gfs2_trans *tr;
2359		loff_t chunk;
2360		unsigned int offs;
2361
2362		chunk = length;
2363		if (chunk > max_chunk)
2364			chunk = max_chunk;
2365
2366		offs = offset & ~PAGE_MASK;
2367		if (offs && chunk > PAGE_SIZE)
2368			chunk = offs + ((chunk - offs) & PAGE_MASK);
2369
2370		truncate_pagecache_range(inode, offset, chunk);
2371		offset += chunk;
2372		length -= chunk;
2373
2374		tr = current->journal_info;
2375		if (!test_bit(TR_TOUCHED, &tr->tr_flags))
2376			continue;
2377
2378		gfs2_trans_end(sdp);
2379		error = gfs2_trans_begin(sdp, RES_DINODE, GFS2_JTRUNC_REVOKES);
2380		if (error)
2381			return error;
2382	}
2383	return 0;
2384}
2385
2386int __gfs2_punch_hole(struct file *file, loff_t offset, loff_t length)
2387{
2388	struct inode *inode = file_inode(file);
2389	struct gfs2_inode *ip = GFS2_I(inode);
2390	struct gfs2_sbd *sdp = GFS2_SB(inode);
2391	unsigned int blocksize = i_blocksize(inode);
2392	loff_t start, end;
2393	int error;
2394
2395	if (!gfs2_is_stuffed(ip)) {
2396		unsigned int start_off, end_len;
 
 
 
 
 
2397
 
 
 
 
 
 
 
 
2398		start_off = offset & (blocksize - 1);
2399		end_len = (offset + length) & (blocksize - 1);
2400		if (start_off) {
2401			unsigned int len = length;
2402			if (length > blocksize - start_off)
2403				len = blocksize - start_off;
2404			error = gfs2_block_zero_range(inode, offset, len);
2405			if (error)
2406				goto out;
2407			if (start_off + length < blocksize)
2408				end_len = 0;
2409		}
2410		if (end_len) {
2411			error = gfs2_block_zero_range(inode,
2412				offset + length - end_len, end_len);
2413			if (error)
2414				goto out;
2415		}
2416	}
2417
2418	start = round_down(offset, blocksize);
2419	end = round_up(offset + length, blocksize) - 1;
2420	error = filemap_write_and_wait_range(inode->i_mapping, start, end);
2421	if (error)
2422		return error;
2423
2424	if (gfs2_is_jdata(ip))
2425		error = gfs2_trans_begin(sdp, RES_DINODE + 2 * RES_JDATA,
2426					 GFS2_JTRUNC_REVOKES);
2427	else
2428		error = gfs2_trans_begin(sdp, RES_DINODE, 0);
2429	if (error)
2430		return error;
2431
2432	if (gfs2_is_stuffed(ip)) {
2433		error = stuffed_zero_range(inode, offset, length);
2434		if (error)
2435			goto out;
2436	}
2437
2438	if (gfs2_is_jdata(ip)) {
2439		BUG_ON(!current->journal_info);
2440		gfs2_journaled_truncate_range(inode, offset, length);
2441	} else
2442		truncate_pagecache_range(inode, offset, offset + length - 1);
2443
2444	file_update_time(file);
2445	mark_inode_dirty(inode);
2446
2447	if (current->journal_info)
2448		gfs2_trans_end(sdp);
2449
2450	if (!gfs2_is_stuffed(ip))
2451		error = punch_hole(ip, offset, length);
2452
2453out:
2454	if (current->journal_info)
2455		gfs2_trans_end(sdp);
2456	return error;
2457}
2458
2459static int gfs2_map_blocks(struct iomap_writepage_ctx *wpc, struct inode *inode,
2460		loff_t offset)
2461{
2462	int ret;
2463
2464	if (WARN_ON_ONCE(gfs2_is_stuffed(GFS2_I(inode))))
2465		return -EIO;
2466
2467	if (offset >= wpc->iomap.offset &&
2468	    offset < wpc->iomap.offset + wpc->iomap.length)
2469		return 0;
2470
2471	memset(&wpc->iomap, 0, sizeof(wpc->iomap));
2472	ret = gfs2_iomap_get(inode, offset, INT_MAX, &wpc->iomap);
2473	return ret;
2474}
2475
2476const struct iomap_writeback_ops gfs2_writeback_ops = {
2477	.map_blocks		= gfs2_map_blocks,
2478};
v4.17
 
   1/*
   2 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
   3 * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
   4 *
   5 * This copyrighted material is made available to anyone wishing to use,
   6 * modify, copy, or redistribute it subject to the terms and conditions
   7 * of the GNU General Public License version 2.
   8 */
   9
  10#include <linux/spinlock.h>
  11#include <linux/completion.h>
  12#include <linux/buffer_head.h>
  13#include <linux/blkdev.h>
  14#include <linux/gfs2_ondisk.h>
  15#include <linux/crc32.h>
  16#include <linux/iomap.h>
 
  17
  18#include "gfs2.h"
  19#include "incore.h"
  20#include "bmap.h"
  21#include "glock.h"
  22#include "inode.h"
  23#include "meta_io.h"
  24#include "quota.h"
  25#include "rgrp.h"
  26#include "log.h"
  27#include "super.h"
  28#include "trans.h"
  29#include "dir.h"
  30#include "util.h"
 
  31#include "trace_gfs2.h"
  32
  33/* This doesn't need to be that large as max 64 bit pointers in a 4k
  34 * block is 512, so __u16 is fine for that. It saves stack space to
  35 * keep it small.
  36 */
  37struct metapath {
  38	struct buffer_head *mp_bh[GFS2_MAX_META_HEIGHT];
  39	__u16 mp_list[GFS2_MAX_META_HEIGHT];
  40	int mp_fheight; /* find_metapath height */
  41	int mp_aheight; /* actual height (lookup height) */
  42};
  43
 
 
  44/**
  45 * gfs2_unstuffer_page - unstuff a stuffed inode into a block cached by a page
  46 * @ip: the inode
  47 * @dibh: the dinode buffer
  48 * @block: the block number that was allocated
  49 * @page: The (optional) page. This is looked up if @page is NULL
  50 *
  51 * Returns: errno
  52 */
  53
  54static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
  55			       u64 block, struct page *page)
  56{
  57	struct inode *inode = &ip->i_inode;
  58	struct buffer_head *bh;
  59	int release = 0;
  60
  61	if (!page || page->index) {
  62		page = find_or_create_page(inode->i_mapping, 0, GFP_NOFS);
  63		if (!page)
  64			return -ENOMEM;
  65		release = 1;
  66	}
  67
  68	if (!PageUptodate(page)) {
  69		void *kaddr = kmap(page);
  70		u64 dsize = i_size_read(inode);
  71 
  72		if (dsize > gfs2_max_stuffed_size(ip))
  73			dsize = gfs2_max_stuffed_size(ip);
  74
  75		memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
  76		memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
  77		kunmap(page);
  78
  79		SetPageUptodate(page);
  80	}
  81
  82	if (!page_has_buffers(page))
  83		create_empty_buffers(page, BIT(inode->i_blkbits),
  84				     BIT(BH_Uptodate));
  85
  86	bh = page_buffers(page);
  87
  88	if (!buffer_mapped(bh))
  89		map_bh(bh, inode->i_sb, block);
  90
  91	set_buffer_uptodate(bh);
  92	if (!gfs2_is_jdata(ip))
  93		mark_buffer_dirty(bh);
  94	if (!gfs2_is_writeback(ip))
  95		gfs2_trans_add_data(ip->i_gl, bh);
  96
  97	if (release) {
  98		unlock_page(page);
  99		put_page(page);
 100	}
 101
 102	return 0;
 103}
 104
 105/**
 106 * gfs2_unstuff_dinode - Unstuff a dinode when the data has grown too big
 107 * @ip: The GFS2 inode to unstuff
 108 * @page: The (optional) page. This is looked up if the @page is NULL
 109 *
 110 * This routine unstuffs a dinode and returns it to a "normal" state such
 111 * that the height can be grown in the traditional way.
 112 *
 113 * Returns: errno
 114 */
 115
 116int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page)
 117{
 118	struct buffer_head *bh, *dibh;
 119	struct gfs2_dinode *di;
 120	u64 block = 0;
 121	int isdir = gfs2_is_dir(ip);
 122	int error;
 123
 124	down_write(&ip->i_rw_mutex);
 125
 126	error = gfs2_meta_inode_buffer(ip, &dibh);
 127	if (error)
 128		goto out;
 129
 130	if (i_size_read(&ip->i_inode)) {
 131		/* Get a free block, fill it with the stuffed data,
 132		   and write it out to disk */
 133
 134		unsigned int n = 1;
 135		error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
 136		if (error)
 137			goto out_brelse;
 138		if (isdir) {
 139			gfs2_trans_add_unrevoke(GFS2_SB(&ip->i_inode), block, 1);
 140			error = gfs2_dir_get_new_buffer(ip, block, &bh);
 141			if (error)
 142				goto out_brelse;
 143			gfs2_buffer_copy_tail(bh, sizeof(struct gfs2_meta_header),
 144					      dibh, sizeof(struct gfs2_dinode));
 145			brelse(bh);
 146		} else {
 147			error = gfs2_unstuffer_page(ip, dibh, block, page);
 148			if (error)
 149				goto out_brelse;
 150		}
 151	}
 152
 153	/*  Set up the pointer to the new block  */
 154
 155	gfs2_trans_add_meta(ip->i_gl, dibh);
 156	di = (struct gfs2_dinode *)dibh->b_data;
 157	gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
 158
 159	if (i_size_read(&ip->i_inode)) {
 160		*(__be64 *)(di + 1) = cpu_to_be64(block);
 161		gfs2_add_inode_blocks(&ip->i_inode, 1);
 162		di->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
 163	}
 164
 165	ip->i_height = 1;
 166	di->di_height = cpu_to_be16(1);
 167
 168out_brelse:
 169	brelse(dibh);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 170out:
 171	up_write(&ip->i_rw_mutex);
 172	return error;
 173}
 174
 175
 176/**
 177 * find_metapath - Find path through the metadata tree
 178 * @sdp: The superblock
 
 179 * @mp: The metapath to return the result in
 180 * @block: The disk block to look up
 181 * @height: The pre-calculated height of the metadata tree
 182 *
 183 *   This routine returns a struct metapath structure that defines a path
 184 *   through the metadata of inode "ip" to get to block "block".
 185 *
 186 *   Example:
 187 *   Given:  "ip" is a height 3 file, "offset" is 101342453, and this is a
 188 *   filesystem with a blocksize of 4096.
 189 *
 190 *   find_metapath() would return a struct metapath structure set to:
 191 *   mp_offset = 101342453, mp_height = 3, mp_list[0] = 0, mp_list[1] = 48,
 192 *   and mp_list[2] = 165.
 193 *
 194 *   That means that in order to get to the block containing the byte at
 195 *   offset 101342453, we would load the indirect block pointed to by pointer
 196 *   0 in the dinode.  We would then load the indirect block pointed to by
 197 *   pointer 48 in that indirect block.  We would then load the data block
 198 *   pointed to by pointer 165 in that indirect block.
 199 *
 200 *             ----------------------------------------
 201 *             | Dinode |                             |
 202 *             |        |                            4|
 203 *             |        |0 1 2 3 4 5                 9|
 204 *             |        |                            6|
 205 *             ----------------------------------------
 206 *                       |
 207 *                       |
 208 *                       V
 209 *             ----------------------------------------
 210 *             | Indirect Block                       |
 211 *             |                                     5|
 212 *             |            4 4 4 4 4 5 5            1|
 213 *             |0           5 6 7 8 9 0 1            2|
 214 *             ----------------------------------------
 215 *                                |
 216 *                                |
 217 *                                V
 218 *             ----------------------------------------
 219 *             | Indirect Block                       |
 220 *             |                         1 1 1 1 1   5|
 221 *             |                         6 6 6 6 6   1|
 222 *             |0                        3 4 5 6 7   2|
 223 *             ----------------------------------------
 224 *                                           |
 225 *                                           |
 226 *                                           V
 227 *             ----------------------------------------
 228 *             | Data block containing offset         |
 229 *             |            101342453                 |
 230 *             |                                      |
 231 *             |                                      |
 232 *             ----------------------------------------
 233 *
 234 */
 235
 236static void find_metapath(const struct gfs2_sbd *sdp, u64 block,
 237			  struct metapath *mp, unsigned int height)
 238{
 239	unsigned int i;
 240
 241	mp->mp_fheight = height;
 242	for (i = height; i--;)
 243		mp->mp_list[i] = do_div(block, sdp->sd_inptrs);
 244}
 245
 246static inline unsigned int metapath_branch_start(const struct metapath *mp)
 247{
 248	if (mp->mp_list[0] == 0)
 249		return 2;
 250	return 1;
 251}
 252
 253/**
 254 * metaptr1 - Return the first possible metadata pointer in a metapath buffer
 255 * @height: The metadata height (0 = dinode)
 256 * @mp: The metapath
 257 */
 258static inline __be64 *metaptr1(unsigned int height, const struct metapath *mp)
 259{
 260	struct buffer_head *bh = mp->mp_bh[height];
 261	if (height == 0)
 262		return ((__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)));
 263	return ((__be64 *)(bh->b_data + sizeof(struct gfs2_meta_header)));
 264}
 265
 266/**
 267 * metapointer - Return pointer to start of metadata in a buffer
 268 * @height: The metadata height (0 = dinode)
 269 * @mp: The metapath
 270 *
 271 * Return a pointer to the block number of the next height of the metadata
 272 * tree given a buffer containing the pointer to the current height of the
 273 * metadata tree.
 274 */
 275
 276static inline __be64 *metapointer(unsigned int height, const struct metapath *mp)
 277{
 278	__be64 *p = metaptr1(height, mp);
 279	return p + mp->mp_list[height];
 280}
 281
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 282static void gfs2_metapath_ra(struct gfs2_glock *gl, __be64 *start, __be64 *end)
 283{
 284	const __be64 *t;
 285
 286	for (t = start; t < end; t++) {
 287		struct buffer_head *rabh;
 288
 289		if (!*t)
 290			continue;
 291
 292		rabh = gfs2_getbuf(gl, be64_to_cpu(*t), CREATE);
 293		if (trylock_buffer(rabh)) {
 294			if (!buffer_uptodate(rabh)) {
 295				rabh->b_end_io = end_buffer_read_sync;
 296				submit_bh(REQ_OP_READ,
 297					  REQ_RAHEAD | REQ_META | REQ_PRIO,
 298					  rabh);
 299				continue;
 300			}
 301			unlock_buffer(rabh);
 302		}
 303		brelse(rabh);
 304	}
 305}
 306
 307static int __fillup_metapath(struct gfs2_inode *ip, struct metapath *mp,
 308			     unsigned int x, unsigned int h)
 309{
 310	for (; x < h; x++) {
 311		__be64 *ptr = metapointer(x, mp);
 312		u64 dblock = be64_to_cpu(*ptr);
 313		int ret;
 314
 315		if (!dblock)
 316			break;
 317		ret = gfs2_meta_indirect_buffer(ip, x + 1, dblock, &mp->mp_bh[x + 1]);
 318		if (ret)
 319			return ret;
 320	}
 321	mp->mp_aheight = x + 1;
 322	return 0;
 323}
 324
 325/**
 326 * lookup_metapath - Walk the metadata tree to a specific point
 327 * @ip: The inode
 328 * @mp: The metapath
 329 *
 330 * Assumes that the inode's buffer has already been looked up and
 331 * hooked onto mp->mp_bh[0] and that the metapath has been initialised
 332 * by find_metapath().
 333 *
 334 * If this function encounters part of the tree which has not been
 335 * allocated, it returns the current height of the tree at the point
 336 * at which it found the unallocated block. Blocks which are found are
 337 * added to the mp->mp_bh[] list.
 338 *
 339 * Returns: error
 340 */
 341
 342static int lookup_metapath(struct gfs2_inode *ip, struct metapath *mp)
 343{
 344	return __fillup_metapath(ip, mp, 0, ip->i_height - 1);
 345}
 346
 347/**
 348 * fillup_metapath - fill up buffers for the metadata path to a specific height
 349 * @ip: The inode
 350 * @mp: The metapath
 351 * @h: The height to which it should be mapped
 352 *
 353 * Similar to lookup_metapath, but does lookups for a range of heights
 354 *
 355 * Returns: error or the number of buffers filled
 356 */
 357
 358static int fillup_metapath(struct gfs2_inode *ip, struct metapath *mp, int h)
 359{
 360	unsigned int x = 0;
 361	int ret;
 362
 363	if (h) {
 364		/* find the first buffer we need to look up. */
 365		for (x = h - 1; x > 0; x--) {
 366			if (mp->mp_bh[x])
 367				break;
 368		}
 369	}
 370	ret = __fillup_metapath(ip, mp, x, h);
 371	if (ret)
 372		return ret;
 373	return mp->mp_aheight - x - 1;
 374}
 375
 376static inline void release_metapath(struct metapath *mp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 377{
 378	int i;
 379
 380	for (i = 0; i < GFS2_MAX_META_HEIGHT; i++) {
 381		if (mp->mp_bh[i] == NULL)
 382			break;
 383		brelse(mp->mp_bh[i]);
 
 384	}
 385}
 386
 387/**
 388 * gfs2_extent_length - Returns length of an extent of blocks
 389 * @start: Start of the buffer
 390 * @len: Length of the buffer in bytes
 391 * @ptr: Current position in the buffer
 392 * @limit: Max extent length to return (0 = unlimited)
 393 * @eob: Set to 1 if we hit "end of block"
 394 *
 395 * If the first block is zero (unallocated) it will return the number of
 396 * unallocated blocks in the extent, otherwise it will return the number
 397 * of contiguous blocks in the extent.
 398 *
 399 * Returns: The length of the extent (minimum of one block)
 400 */
 401
 402static inline unsigned int gfs2_extent_length(void *start, unsigned int len, __be64 *ptr, size_t limit, int *eob)
 403{
 404	const __be64 *end = (start + len);
 405	const __be64 *first = ptr;
 406	u64 d = be64_to_cpu(*ptr);
 407
 408	*eob = 0;
 409	do {
 410		ptr++;
 411		if (ptr >= end)
 412			break;
 413		if (limit && --limit == 0)
 414			break;
 415		if (d)
 416			d++;
 417	} while(be64_to_cpu(*ptr) == d);
 418	if (ptr >= end)
 419		*eob = 1;
 420	return (ptr - first);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 421}
 422
 423static inline void bmap_lock(struct gfs2_inode *ip, int create)
 
 424{
 425	if (create)
 426		down_write(&ip->i_rw_mutex);
 427	else
 428		down_read(&ip->i_rw_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 429}
 430
 431static inline void bmap_unlock(struct gfs2_inode *ip, int create)
 
 
 
 
 
 
 
 
 
 
 
 
 
 432{
 433	if (create)
 434		up_write(&ip->i_rw_mutex);
 
 
 
 
 
 
 
 
 
 435	else
 436		up_read(&ip->i_rw_mutex);
 
 
 
 
 
 
 437}
 438
 439static inline __be64 *gfs2_indirect_init(struct metapath *mp,
 440					 struct gfs2_glock *gl, unsigned int i,
 441					 unsigned offset, u64 bn)
 442{
 443	__be64 *ptr = (__be64 *)(mp->mp_bh[i - 1]->b_data +
 444		       ((i > 1) ? sizeof(struct gfs2_meta_header) :
 445				 sizeof(struct gfs2_dinode)));
 446	BUG_ON(i < 1);
 447	BUG_ON(mp->mp_bh[i] != NULL);
 448	mp->mp_bh[i] = gfs2_meta_new(gl, bn);
 449	gfs2_trans_add_meta(gl, mp->mp_bh[i]);
 450	gfs2_metatype_set(mp->mp_bh[i], GFS2_METATYPE_IN, GFS2_FORMAT_IN);
 451	gfs2_buffer_clear_tail(mp->mp_bh[i], sizeof(struct gfs2_meta_header));
 452	ptr += offset;
 453	*ptr = cpu_to_be64(bn);
 454	return ptr;
 455}
 456
 457enum alloc_state {
 458	ALLOC_DATA = 0,
 459	ALLOC_GROW_DEPTH = 1,
 460	ALLOC_GROW_HEIGHT = 2,
 461	/* ALLOC_UNSTUFF = 3,   TBD and rather complicated */
 462};
 463
 464/**
 465 * gfs2_bmap_alloc - Build a metadata tree of the requested height
 466 * @inode: The GFS2 inode
 467 * @lblock: The logical starting block of the extent
 468 * @bh_map: This is used to return the mapping details
 469 * @zero_new: True if newly allocated blocks should be zeroed
 470 * @mp: The metapath, with proper height information calculated
 471 * @maxlen: The max number of data blocks to alloc
 472 * @dblock: Pointer to return the resulting new block
 473 * @dblks: Pointer to return the number of blocks allocated
 474 *
 475 * In this routine we may have to alloc:
 476 *   i) Indirect blocks to grow the metadata tree height
 477 *  ii) Indirect blocks to fill in lower part of the metadata tree
 478 * iii) Data blocks
 479 *
 480 * The function is in two parts. The first part works out the total
 481 * number of blocks which we need. The second part does the actual
 482 * allocation asking for an extent at a time (if enough contiguous free
 483 * blocks are available, there will only be one request per bmap call)
 484 * and uses the state machine to initialise the blocks in order.
 
 
 
 
 
 
 
 
 
 485 *
 486 * Returns: errno on error
 487 */
 488
 489static int gfs2_iomap_alloc(struct inode *inode, struct iomap *iomap,
 490			    unsigned flags, struct metapath *mp)
 491{
 492	struct gfs2_inode *ip = GFS2_I(inode);
 493	struct gfs2_sbd *sdp = GFS2_SB(inode);
 494	struct buffer_head *dibh = mp->mp_bh[0];
 495	u64 bn;
 496	unsigned n, i, blks, alloced = 0, iblks = 0, branch_start = 0;
 497	unsigned dblks = 0;
 498	unsigned ptrs_per_blk;
 499	const unsigned end_of_metadata = mp->mp_fheight - 1;
 
 500	enum alloc_state state;
 501	__be64 *ptr;
 502	__be64 zero_bn = 0;
 503	size_t maxlen = iomap->length >> inode->i_blkbits;
 504
 505	BUG_ON(mp->mp_aheight < 1);
 506	BUG_ON(dibh == NULL);
 
 507
 508	gfs2_trans_add_meta(ip->i_gl, dibh);
 509
 
 
 510	if (mp->mp_fheight == mp->mp_aheight) {
 511		struct buffer_head *bh;
 512		int eob;
 513
 514		/* Bottom indirect block exists, find unalloced extent size */
 515		ptr = metapointer(end_of_metadata, mp);
 516		bh = mp->mp_bh[end_of_metadata];
 517		dblks = gfs2_extent_length(bh->b_data, bh->b_size, ptr,
 518					   maxlen, &eob);
 519		BUG_ON(dblks < 1);
 520		state = ALLOC_DATA;
 521	} else {
 522		/* Need to allocate indirect blocks */
 523		ptrs_per_blk = mp->mp_fheight > 1 ? sdp->sd_inptrs :
 524			sdp->sd_diptrs;
 525		dblks = min(maxlen, (size_t)(ptrs_per_blk -
 526					     mp->mp_list[end_of_metadata]));
 527		if (mp->mp_fheight == ip->i_height) {
 528			/* Writing into existing tree, extend tree down */
 529			iblks = mp->mp_fheight - mp->mp_aheight;
 530			state = ALLOC_GROW_DEPTH;
 531		} else {
 532			/* Building up tree height */
 533			state = ALLOC_GROW_HEIGHT;
 534			iblks = mp->mp_fheight - ip->i_height;
 535			branch_start = metapath_branch_start(mp);
 536			iblks += (mp->mp_fheight - branch_start);
 537		}
 538	}
 539
 540	/* start of the second part of the function (state machine) */
 541
 542	blks = dblks + iblks;
 543	i = mp->mp_aheight;
 544	do {
 545		int error;
 546		n = blks - alloced;
 547		error = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL);
 548		if (error)
 549			return error;
 550		alloced += n;
 551		if (state != ALLOC_DATA || gfs2_is_jdata(ip))
 552			gfs2_trans_add_unrevoke(sdp, bn, n);
 553		switch (state) {
 554		/* Growing height of tree */
 555		case ALLOC_GROW_HEIGHT:
 556			if (i == 1) {
 557				ptr = (__be64 *)(dibh->b_data +
 558						 sizeof(struct gfs2_dinode));
 559				zero_bn = *ptr;
 560			}
 561			for (; i - 1 < mp->mp_fheight - ip->i_height && n > 0;
 562			     i++, n--)
 563				gfs2_indirect_init(mp, ip->i_gl, i, 0, bn++);
 564			if (i - 1 == mp->mp_fheight - ip->i_height) {
 565				i--;
 566				gfs2_buffer_copy_tail(mp->mp_bh[i],
 567						sizeof(struct gfs2_meta_header),
 568						dibh, sizeof(struct gfs2_dinode));
 569				gfs2_buffer_clear_tail(dibh,
 570						sizeof(struct gfs2_dinode) +
 571						sizeof(__be64));
 572				ptr = (__be64 *)(mp->mp_bh[i]->b_data +
 573					sizeof(struct gfs2_meta_header));
 574				*ptr = zero_bn;
 575				state = ALLOC_GROW_DEPTH;
 576				for(i = branch_start; i < mp->mp_fheight; i++) {
 577					if (mp->mp_bh[i] == NULL)
 578						break;
 579					brelse(mp->mp_bh[i]);
 580					mp->mp_bh[i] = NULL;
 581				}
 582				i = branch_start;
 583			}
 584			if (n == 0)
 585				break;
 586		/* Branching from existing tree */
 587		case ALLOC_GROW_DEPTH:
 588			if (i > 1 && i < mp->mp_fheight)
 589				gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[i-1]);
 590			for (; i < mp->mp_fheight && n > 0; i++, n--)
 591				gfs2_indirect_init(mp, ip->i_gl, i,
 592						   mp->mp_list[i-1], bn++);
 593			if (i == mp->mp_fheight)
 594				state = ALLOC_DATA;
 595			if (n == 0)
 596				break;
 597		/* Tree complete, adding data blocks */
 598		case ALLOC_DATA:
 599			BUG_ON(n > dblks);
 600			BUG_ON(mp->mp_bh[end_of_metadata] == NULL);
 601			gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[end_of_metadata]);
 602			dblks = n;
 603			ptr = metapointer(end_of_metadata, mp);
 604			iomap->addr = bn << inode->i_blkbits;
 605			iomap->flags |= IOMAP_F_NEW;
 606			while (n-- > 0)
 607				*ptr++ = cpu_to_be64(bn++);
 608			break;
 609		}
 610	} while (iomap->addr == IOMAP_NULL_ADDR);
 611
 
 612	iomap->length = (u64)dblks << inode->i_blkbits;
 613	ip->i_height = mp->mp_fheight;
 614	gfs2_add_inode_blocks(&ip->i_inode, alloced);
 615	gfs2_dinode_out(ip, mp->mp_bh[0]->b_data);
 616	return 0;
 
 
 617}
 618
 
 
 619/**
 620 * hole_size - figure out the size of a hole
 621 * @inode: The inode
 622 * @lblock: The logical starting block number
 623 * @mp: The metapath
 
 624 *
 625 * Returns: The hole size in bytes
 626 *
 
 627 */
 628static u64 hole_size(struct inode *inode, sector_t lblock, struct metapath *mp)
 629{
 630	struct gfs2_inode *ip = GFS2_I(inode);
 631	struct gfs2_sbd *sdp = GFS2_SB(inode);
 632	struct metapath mp_eof;
 633	u64 factor = 1;
 634	int hgt;
 635	u64 holesz = 0;
 636	const __be64 *first, *end, *ptr;
 637	const struct buffer_head *bh;
 638	u64 lblock_stop = (i_size_read(inode) - 1) >> inode->i_blkbits;
 639	int zeroptrs;
 640	bool done = false;
 641
 642	/* Get another metapath, to the very last byte */
 643	find_metapath(sdp, lblock_stop, &mp_eof, ip->i_height);
 644	for (hgt = ip->i_height - 1; hgt >= 0 && !done; hgt--) {
 645		bh = mp->mp_bh[hgt];
 646		if (bh) {
 647			zeroptrs = 0;
 648			first = metapointer(hgt, mp);
 649			end = (const __be64 *)(bh->b_data + bh->b_size);
 650
 651			for (ptr = first; ptr < end; ptr++) {
 652				if (*ptr) {
 653					done = true;
 654					break;
 655				} else {
 656					zeroptrs++;
 657				}
 658			}
 659		} else {
 660			zeroptrs = sdp->sd_inptrs;
 661		}
 662		if (factor * zeroptrs >= lblock_stop - lblock + 1) {
 663			holesz = lblock_stop - lblock + 1;
 664			break;
 665		}
 666		holesz += factor * zeroptrs;
 667
 668		factor *= sdp->sd_inptrs;
 669		if (hgt && (mp->mp_list[hgt - 1] < mp_eof.mp_list[hgt - 1]))
 670			(mp->mp_list[hgt - 1])++;
 
 
 
 
 671	}
 672	return holesz << inode->i_blkbits;
 673}
 674
 675static void gfs2_stuffed_iomap(struct inode *inode, struct iomap *iomap)
 676{
 677	struct gfs2_inode *ip = GFS2_I(inode);
 678
 679	iomap->addr = (ip->i_no_addr << inode->i_blkbits) +
 680		      sizeof(struct gfs2_dinode);
 681	iomap->offset = 0;
 682	iomap->length = i_size_read(inode);
 683	iomap->type = IOMAP_MAPPED;
 684	iomap->flags = IOMAP_F_DATA_INLINE;
 685}
 686
 687/**
 688 * gfs2_iomap_begin - Map blocks from an inode to disk blocks
 689 * @inode: The inode
 690 * @pos: Starting position in bytes
 691 * @length: Length to map, in bytes
 692 * @flags: iomap flags
 693 * @iomap: The iomap structure
 
 694 *
 695 * Returns: errno
 696 */
 697int gfs2_iomap_begin(struct inode *inode, loff_t pos, loff_t length,
 698		     unsigned flags, struct iomap *iomap)
 
 699{
 700	struct gfs2_inode *ip = GFS2_I(inode);
 701	struct gfs2_sbd *sdp = GFS2_SB(inode);
 702	struct metapath mp = { .mp_aheight = 1, };
 703	unsigned int factor = sdp->sd_sb.sb_bsize;
 704	const u64 *arr = sdp->sd_heightsize;
 705	__be64 *ptr;
 706	sector_t lblock;
 707	sector_t lend;
 708	int ret = 0;
 709	int eob;
 710	unsigned int len;
 711	struct buffer_head *bh;
 712	u8 height;
 713
 714	trace_gfs2_iomap_start(ip, pos, length, flags);
 715	if (!length) {
 716		ret = -EINVAL;
 717		goto out;
 718	}
 
 
 
 
 719
 720	if (gfs2_is_stuffed(ip)) {
 721		if (flags & IOMAP_REPORT) {
 722			gfs2_stuffed_iomap(inode, iomap);
 723			if (pos >= iomap->length)
 724				ret = -ENOENT;
 725			goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 726		}
 727		BUG_ON(!(flags & IOMAP_WRITE));
 
 
 
 
 728	}
 729
 
 730	lblock = pos >> inode->i_blkbits;
 731	lend = (pos + length + sdp->sd_sb.sb_bsize - 1) >> inode->i_blkbits;
 732
 733	iomap->offset = lblock << inode->i_blkbits;
 734	iomap->addr = IOMAP_NULL_ADDR;
 735	iomap->type = IOMAP_HOLE;
 736	iomap->length = (u64)(lend - lblock) << inode->i_blkbits;
 737	iomap->flags = IOMAP_F_MERGED;
 738	bmap_lock(ip, flags & IOMAP_WRITE);
 739
 740	/*
 741	 * Directory data blocks have a struct gfs2_meta_header header, so the
 742	 * remaining size is smaller than the filesystem block size.  Logical
 743	 * block numbers for directories are in units of this remaining size!
 744	 */
 745	if (gfs2_is_dir(ip)) {
 746		factor = sdp->sd_jbsize;
 747		arr = sdp->sd_jheightsize;
 748	}
 749
 750	ret = gfs2_meta_inode_buffer(ip, &mp.mp_bh[0]);
 751	if (ret)
 752		goto out_release;
 753
 754	height = ip->i_height;
 755	while ((lblock + 1) * factor > arr[height])
 756		height++;
 757	find_metapath(sdp, lblock, &mp, height);
 758	if (height > ip->i_height || gfs2_is_stuffed(ip))
 759		goto do_alloc;
 760
 761	ret = lookup_metapath(ip, &mp);
 762	if (ret)
 763		goto out_release;
 764
 765	if (mp.mp_aheight != ip->i_height)
 766		goto do_alloc;
 767
 768	ptr = metapointer(ip->i_height - 1, &mp);
 769	if (*ptr == 0)
 770		goto do_alloc;
 771
 
 
 
 
 
 772	iomap->type = IOMAP_MAPPED;
 773	iomap->addr = be64_to_cpu(*ptr) << inode->i_blkbits;
 774
 775	bh = mp.mp_bh[ip->i_height - 1];
 776	len = gfs2_extent_length(bh->b_data, bh->b_size, ptr, lend - lblock, &eob);
 777	if (eob)
 778		iomap->flags |= IOMAP_F_BOUNDARY;
 779	iomap->length = (u64)len << inode->i_blkbits;
 780
 781out_release:
 782	release_metapath(&mp);
 783	bmap_unlock(ip, flags & IOMAP_WRITE);
 784out:
 785	trace_gfs2_iomap_end(ip, iomap, ret);
 
 
 786	return ret;
 787
 788do_alloc:
 789	if (flags & IOMAP_WRITE) {
 790		ret = gfs2_iomap_alloc(inode, iomap, flags, &mp);
 791	} else if (flags & IOMAP_REPORT) {
 792		loff_t size = i_size_read(inode);
 793		if (pos >= size)
 794			ret = -ENOENT;
 795		else if (height <= ip->i_height)
 796			iomap->length = hole_size(inode, lblock, &mp);
 797		else
 798			iomap->length = size - pos;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 799	}
 800	goto out_release;
 
 
 
 
 
 
 
 801}
 802
 
 
 
 
 
 803/**
 804 * gfs2_block_map - Map one or more blocks of an inode to a disk block
 805 * @inode: The inode
 806 * @lblock: The logical block number
 807 * @bh_map: The bh to be mapped
 808 * @create: True if its ok to alloc blocks to satify the request
 809 *
 810 * The size of the requested mapping is defined in bh_map->b_size.
 811 *
 812 * Clears buffer_mapped(bh_map) and leaves bh_map->b_size unchanged
 813 * when @lblock is not mapped.  Sets buffer_mapped(bh_map) and
 814 * bh_map->b_size to indicate the size of the mapping when @lblock and
 815 * successive blocks are mapped, up to the requested size.
 816 *
 817 * Sets buffer_boundary() if a read of metadata will be required
 818 * before the next block can be mapped. Sets buffer_new() if new
 819 * blocks were allocated.
 820 *
 821 * Returns: errno
 822 */
 823
 824int gfs2_block_map(struct inode *inode, sector_t lblock,
 825		   struct buffer_head *bh_map, int create)
 826{
 827	struct gfs2_inode *ip = GFS2_I(inode);
 828	struct iomap iomap;
 829	int ret, flags = 0;
 
 
 830
 831	clear_buffer_mapped(bh_map);
 832	clear_buffer_new(bh_map);
 833	clear_buffer_boundary(bh_map);
 834	trace_gfs2_bmap(ip, bh_map, lblock, create, 1);
 835
 836	if (create)
 837		flags |= IOMAP_WRITE;
 838	ret = gfs2_iomap_begin(inode, (loff_t)lblock << inode->i_blkbits,
 839			       bh_map->b_size, flags, &iomap);
 840	if (ret) {
 841		if (!create && ret == -ENOENT) {
 842			/* Return unmapped buffer beyond the end of file.  */
 843			ret = 0;
 844		}
 845		goto out;
 846	}
 847
 848	if (iomap.length > bh_map->b_size) {
 849		iomap.length = bh_map->b_size;
 850		iomap.flags &= ~IOMAP_F_BOUNDARY;
 851	}
 852	if (iomap.addr != IOMAP_NULL_ADDR)
 853		map_bh(bh_map, inode->i_sb, iomap.addr >> inode->i_blkbits);
 854	bh_map->b_size = iomap.length;
 855	if (iomap.flags & IOMAP_F_BOUNDARY)
 856		set_buffer_boundary(bh_map);
 857	if (iomap.flags & IOMAP_F_NEW)
 858		set_buffer_new(bh_map);
 859
 860out:
 861	trace_gfs2_bmap(ip, bh_map, lblock, create, ret);
 862	return ret;
 863}
 864
 865/*
 866 * Deprecated: do not use in new code
 867 */
 868int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsigned *extlen)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 869{
 870	struct buffer_head bh = { .b_state = 0, .b_blocknr = 0 };
 
 
 871	int ret;
 872	int create = *new;
 873
 874	BUG_ON(!extlen);
 875	BUG_ON(!dblock);
 876	BUG_ON(!new);
 877
 878	bh.b_size = BIT(inode->i_blkbits + (create ? 0 : 5));
 879	ret = gfs2_block_map(inode, lblock, &bh, create);
 880	*extlen = bh.b_size >> inode->i_blkbits;
 881	*dblock = bh.b_blocknr;
 882	if (buffer_new(&bh))
 883		*new = 1;
 884	else
 885		*new = 0;
 886	return ret;
 887}
 888
 889/**
 890 * gfs2_block_zero_range - Deal with zeroing out data
 891 *
 892 * This is partly borrowed from ext3.
 893 */
 894static int gfs2_block_zero_range(struct inode *inode, loff_t from,
 895				 unsigned int length)
 896{
 897	struct address_space *mapping = inode->i_mapping;
 898	struct gfs2_inode *ip = GFS2_I(inode);
 899	unsigned long index = from >> PAGE_SHIFT;
 900	unsigned offset = from & (PAGE_SIZE-1);
 901	unsigned blocksize, iblock, pos;
 902	struct buffer_head *bh;
 903	struct page *page;
 904	int err;
 905
 906	page = find_or_create_page(mapping, index, GFP_NOFS);
 907	if (!page)
 908		return 0;
 909
 910	blocksize = inode->i_sb->s_blocksize;
 911	iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
 912
 913	if (!page_has_buffers(page))
 914		create_empty_buffers(page, blocksize, 0);
 915
 916	/* Find the buffer that contains "offset" */
 917	bh = page_buffers(page);
 918	pos = blocksize;
 919	while (offset >= pos) {
 920		bh = bh->b_this_page;
 921		iblock++;
 922		pos += blocksize;
 923	}
 924
 925	err = 0;
 926
 927	if (!buffer_mapped(bh)) {
 928		gfs2_block_map(inode, iblock, bh, 0);
 929		/* unmapped? It's a hole - nothing to do */
 930		if (!buffer_mapped(bh))
 931			goto unlock;
 932	}
 933
 934	/* Ok, it's mapped. Make sure it's up-to-date */
 935	if (PageUptodate(page))
 936		set_buffer_uptodate(bh);
 937
 938	if (!buffer_uptodate(bh)) {
 939		err = -EIO;
 940		ll_rw_block(REQ_OP_READ, 0, 1, &bh);
 941		wait_on_buffer(bh);
 942		/* Uhhuh. Read error. Complain and punt. */
 943		if (!buffer_uptodate(bh))
 944			goto unlock;
 945		err = 0;
 946	}
 947
 948	if (!gfs2_is_writeback(ip))
 949		gfs2_trans_add_data(ip->i_gl, bh);
 950
 951	zero_user(page, offset, length);
 952	mark_buffer_dirty(bh);
 953unlock:
 954	unlock_page(page);
 955	put_page(page);
 956	return err;
 957}
 958
 959#define GFS2_JTRUNC_REVOKES 8192
 960
 961/**
 962 * gfs2_journaled_truncate - Wrapper for truncate_pagecache for jdata files
 963 * @inode: The inode being truncated
 964 * @oldsize: The original (larger) size
 965 * @newsize: The new smaller size
 966 *
 967 * With jdata files, we have to journal a revoke for each block which is
 968 * truncated. As a result, we need to split this into separate transactions
 969 * if the number of pages being truncated gets too large.
 970 */
 971
 972static int gfs2_journaled_truncate(struct inode *inode, u64 oldsize, u64 newsize)
 973{
 974	struct gfs2_sbd *sdp = GFS2_SB(inode);
 975	u64 max_chunk = GFS2_JTRUNC_REVOKES * sdp->sd_vfs->s_blocksize;
 976	u64 chunk;
 977	int error;
 978
 979	while (oldsize != newsize) {
 980		struct gfs2_trans *tr;
 981		unsigned int offs;
 982
 983		chunk = oldsize - newsize;
 984		if (chunk > max_chunk)
 985			chunk = max_chunk;
 986
 987		offs = oldsize & ~PAGE_MASK;
 988		if (offs && chunk > PAGE_SIZE)
 989			chunk = offs + ((chunk - offs) & PAGE_MASK);
 990
 991		truncate_pagecache(inode, oldsize - chunk);
 992		oldsize -= chunk;
 993
 994		tr = current->journal_info;
 995		if (!test_bit(TR_TOUCHED, &tr->tr_flags))
 996			continue;
 997
 998		gfs2_trans_end(sdp);
 999		error = gfs2_trans_begin(sdp, RES_DINODE, GFS2_JTRUNC_REVOKES);
1000		if (error)
1001			return error;
1002	}
1003
1004	return 0;
1005}
1006
1007static int trunc_start(struct inode *inode, u64 newsize)
1008{
1009	struct gfs2_inode *ip = GFS2_I(inode);
1010	struct gfs2_sbd *sdp = GFS2_SB(inode);
1011	struct buffer_head *dibh = NULL;
1012	int journaled = gfs2_is_jdata(ip);
1013	u64 oldsize = inode->i_size;
1014	int error;
1015
 
 
 
 
 
 
 
 
 
 
1016	if (journaled)
1017		error = gfs2_trans_begin(sdp, RES_DINODE + RES_JDATA, GFS2_JTRUNC_REVOKES);
1018	else
1019		error = gfs2_trans_begin(sdp, RES_DINODE, 0);
1020	if (error)
1021		return error;
1022
1023	error = gfs2_meta_inode_buffer(ip, &dibh);
1024	if (error)
1025		goto out;
1026
1027	gfs2_trans_add_meta(ip->i_gl, dibh);
1028
1029	if (gfs2_is_stuffed(ip)) {
1030		gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + newsize);
1031	} else {
1032		unsigned int blocksize = i_blocksize(inode);
1033		unsigned int offs = newsize & (blocksize - 1);
1034		if (offs) {
1035			error = gfs2_block_zero_range(inode, newsize,
1036						      blocksize - offs);
1037			if (error)
1038				goto out;
1039		}
1040		ip->i_diskflags |= GFS2_DIF_TRUNC_IN_PROG;
1041	}
1042
1043	i_size_write(inode, newsize);
1044	ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
1045	gfs2_dinode_out(ip, dibh->b_data);
1046
1047	if (journaled)
1048		error = gfs2_journaled_truncate(inode, oldsize, newsize);
1049	else
1050		truncate_pagecache(inode, newsize);
1051
1052out:
1053	brelse(dibh);
1054	if (current->journal_info)
1055		gfs2_trans_end(sdp);
1056	return error;
1057}
1058
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1059/**
1060 * sweep_bh_for_rgrps - find an rgrp in a meta buffer and free blocks therein
1061 * @ip: inode
1062 * @rg_gh: holder of resource group glock
1063 * @bh: buffer head to sweep
1064 * @start: starting point in bh
1065 * @end: end point in bh
1066 * @meta: true if bh points to metadata (rather than data)
1067 * @btotal: place to keep count of total blocks freed
1068 *
1069 * We sweep a metadata buffer (provided by the metapath) for blocks we need to
1070 * free, and free them all. However, we do it one rgrp at a time. If this
1071 * block has references to multiple rgrps, we break it into individual
1072 * transactions. This allows other processes to use the rgrps while we're
1073 * focused on a single one, for better concurrency / performance.
1074 * At every transaction boundary, we rewrite the inode into the journal.
1075 * That way the bitmaps are kept consistent with the inode and we can recover
1076 * if we're interrupted by power-outages.
1077 *
1078 * Returns: 0, or return code if an error occurred.
1079 *          *btotal has the total number of blocks freed
1080 */
1081static int sweep_bh_for_rgrps(struct gfs2_inode *ip, struct gfs2_holder *rd_gh,
1082			      struct buffer_head *bh, __be64 *start, __be64 *end,
1083			      bool meta, u32 *btotal)
1084{
1085	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1086	struct gfs2_rgrpd *rgd;
1087	struct gfs2_trans *tr;
1088	__be64 *p;
1089	int blks_outside_rgrp;
1090	u64 bn, bstart, isize_blks;
1091	s64 blen; /* needs to be s64 or gfs2_add_inode_blocks breaks */
1092	int ret = 0;
1093	bool buf_in_tr = false; /* buffer was added to transaction */
1094
1095more_rgrps:
1096	rgd = NULL;
1097	if (gfs2_holder_initialized(rd_gh)) {
1098		rgd = gfs2_glock2rgrp(rd_gh->gh_gl);
1099		gfs2_assert_withdraw(sdp,
1100			     gfs2_glock_is_locked_by_me(rd_gh->gh_gl));
1101	}
1102	blks_outside_rgrp = 0;
1103	bstart = 0;
1104	blen = 0;
1105
1106	for (p = start; p < end; p++) {
1107		if (!*p)
1108			continue;
1109		bn = be64_to_cpu(*p);
1110
1111		if (rgd) {
1112			if (!rgrp_contains_block(rgd, bn)) {
1113				blks_outside_rgrp++;
1114				continue;
1115			}
1116		} else {
1117			rgd = gfs2_blk2rgrpd(sdp, bn, true);
1118			if (unlikely(!rgd)) {
1119				ret = -EIO;
1120				goto out;
1121			}
1122			ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
1123						 0, rd_gh);
1124			if (ret)
1125				goto out;
1126
1127			/* Must be done with the rgrp glock held: */
1128			if (gfs2_rs_active(&ip->i_res) &&
1129			    rgd == ip->i_res.rs_rbm.rgd)
1130				gfs2_rs_deltree(&ip->i_res);
1131		}
1132
1133		/* The size of our transactions will be unknown until we
1134		   actually process all the metadata blocks that relate to
1135		   the rgrp. So we estimate. We know it can't be more than
1136		   the dinode's i_blocks and we don't want to exceed the
1137		   journal flush threshold, sd_log_thresh2. */
1138		if (current->journal_info == NULL) {
1139			unsigned int jblocks_rqsted, revokes;
1140
1141			jblocks_rqsted = rgd->rd_length + RES_DINODE +
1142				RES_INDIRECT;
1143			isize_blks = gfs2_get_inode_blocks(&ip->i_inode);
1144			if (isize_blks > atomic_read(&sdp->sd_log_thresh2))
1145				jblocks_rqsted +=
1146					atomic_read(&sdp->sd_log_thresh2);
1147			else
1148				jblocks_rqsted += isize_blks;
1149			revokes = jblocks_rqsted;
1150			if (meta)
1151				revokes += end - start;
1152			else if (ip->i_depth)
1153				revokes += sdp->sd_inptrs;
1154			ret = gfs2_trans_begin(sdp, jblocks_rqsted, revokes);
1155			if (ret)
1156				goto out_unlock;
1157			down_write(&ip->i_rw_mutex);
1158		}
1159		/* check if we will exceed the transaction blocks requested */
1160		tr = current->journal_info;
1161		if (tr->tr_num_buf_new + RES_STATFS +
1162		    RES_QUOTA >= atomic_read(&sdp->sd_log_thresh2)) {
1163			/* We set blks_outside_rgrp to ensure the loop will
1164			   be repeated for the same rgrp, but with a new
1165			   transaction. */
1166			blks_outside_rgrp++;
1167			/* This next part is tricky. If the buffer was added
1168			   to the transaction, we've already set some block
1169			   pointers to 0, so we better follow through and free
1170			   them, or we will introduce corruption (so break).
1171			   This may be impossible, or at least rare, but I
1172			   decided to cover the case regardless.
1173
1174			   If the buffer was not added to the transaction
1175			   (this call), doing so would exceed our transaction
1176			   size, so we need to end the transaction and start a
1177			   new one (so goto). */
1178
1179			if (buf_in_tr)
1180				break;
1181			goto out_unlock;
1182		}
1183
1184		gfs2_trans_add_meta(ip->i_gl, bh);
1185		buf_in_tr = true;
1186		*p = 0;
1187		if (bstart + blen == bn) {
1188			blen++;
1189			continue;
1190		}
1191		if (bstart) {
1192			__gfs2_free_blocks(ip, bstart, (u32)blen, meta);
1193			(*btotal) += blen;
1194			gfs2_add_inode_blocks(&ip->i_inode, -blen);
1195		}
1196		bstart = bn;
1197		blen = 1;
1198	}
1199	if (bstart) {
1200		__gfs2_free_blocks(ip, bstart, (u32)blen, meta);
1201		(*btotal) += blen;
1202		gfs2_add_inode_blocks(&ip->i_inode, -blen);
1203	}
1204out_unlock:
1205	if (!ret && blks_outside_rgrp) { /* If buffer still has non-zero blocks
1206					    outside the rgrp we just processed,
1207					    do it all over again. */
1208		if (current->journal_info) {
1209			struct buffer_head *dibh;
1210
1211			ret = gfs2_meta_inode_buffer(ip, &dibh);
1212			if (ret)
1213				goto out;
1214
1215			/* Every transaction boundary, we rewrite the dinode
1216			   to keep its di_blocks current in case of failure. */
1217			ip->i_inode.i_mtime = ip->i_inode.i_ctime =
1218				current_time(&ip->i_inode);
1219			gfs2_trans_add_meta(ip->i_gl, dibh);
1220			gfs2_dinode_out(ip, dibh->b_data);
1221			brelse(dibh);
1222			up_write(&ip->i_rw_mutex);
1223			gfs2_trans_end(sdp);
 
1224		}
1225		gfs2_glock_dq_uninit(rd_gh);
1226		cond_resched();
1227		goto more_rgrps;
1228	}
1229out:
1230	return ret;
1231}
1232
1233static bool mp_eq_to_hgt(struct metapath *mp, __u16 *list, unsigned int h)
1234{
1235	if (memcmp(mp->mp_list, list, h * sizeof(mp->mp_list[0])))
1236		return false;
1237	return true;
1238}
1239
1240/**
1241 * find_nonnull_ptr - find a non-null pointer given a metapath and height
 
1242 * @mp: starting metapath
1243 * @h: desired height to search
 
 
1244 *
1245 * Assumes the metapath is valid (with buffers) out to height h.
1246 * Returns: true if a non-null pointer was found in the metapath buffer
1247 *          false if all remaining pointers are NULL in the buffer
1248 */
1249static bool find_nonnull_ptr(struct gfs2_sbd *sdp, struct metapath *mp,
1250			     unsigned int h,
1251			     __u16 *end_list, unsigned int end_aligned)
1252{
1253	struct buffer_head *bh = mp->mp_bh[h];
1254	__be64 *first, *ptr, *end;
1255
1256	first = metaptr1(h, mp);
1257	ptr = first + mp->mp_list[h];
1258	end = (__be64 *)(bh->b_data + bh->b_size);
1259	if (end_list && mp_eq_to_hgt(mp, end_list, h)) {
1260		bool keep_end = h < end_aligned;
1261		end = first + end_list[h] + keep_end;
1262	}
1263
1264	while (ptr < end) {
1265		if (*ptr) { /* if we have a non-null pointer */
1266			mp->mp_list[h] = ptr - first;
1267			h++;
1268			if (h < GFS2_MAX_META_HEIGHT)
1269				mp->mp_list[h] = 0;
1270			return true;
1271		}
1272		ptr++;
1273	}
1274	return false;
1275}
1276
1277enum dealloc_states {
1278	DEALLOC_MP_FULL = 0,    /* Strip a metapath with all buffers read in */
1279	DEALLOC_MP_LOWER = 1,   /* lower the metapath strip height */
1280	DEALLOC_FILL_MP = 2,  /* Fill in the metapath to the given height. */
1281	DEALLOC_DONE = 3,       /* process complete */
1282};
1283
1284static inline void
1285metapointer_range(struct metapath *mp, int height,
1286		  __u16 *start_list, unsigned int start_aligned,
1287		  __u16 *end_list, unsigned int end_aligned,
1288		  __be64 **start, __be64 **end)
1289{
1290	struct buffer_head *bh = mp->mp_bh[height];
1291	__be64 *first;
1292
1293	first = metaptr1(height, mp);
1294	*start = first;
1295	if (mp_eq_to_hgt(mp, start_list, height)) {
1296		bool keep_start = height < start_aligned;
1297		*start = first + start_list[height] + keep_start;
1298	}
1299	*end = (__be64 *)(bh->b_data + bh->b_size);
1300	if (end_list && mp_eq_to_hgt(mp, end_list, height)) {
1301		bool keep_end = height < end_aligned;
1302		*end = first + end_list[height] + keep_end;
1303	}
1304}
1305
1306static inline bool walk_done(struct gfs2_sbd *sdp,
1307			     struct metapath *mp, int height,
1308			     __u16 *end_list, unsigned int end_aligned)
1309{
1310	__u16 end;
1311
1312	if (end_list) {
1313		bool keep_end = height < end_aligned;
1314		if (!mp_eq_to_hgt(mp, end_list, height))
1315			return false;
1316		end = end_list[height] + keep_end;
1317	} else
1318		end = (height > 0) ? sdp->sd_inptrs : sdp->sd_diptrs;
1319	return mp->mp_list[height] >= end;
1320}
1321
1322/**
1323 * punch_hole - deallocate blocks in a file
1324 * @ip: inode to truncate
1325 * @offset: the start of the hole
1326 * @length: the size of the hole (or 0 for truncate)
1327 *
1328 * Punch a hole into a file or truncate a file at a given position.  This
1329 * function operates in whole blocks (@offset and @length are rounded
1330 * accordingly); partially filled blocks must be cleared otherwise.
1331 *
1332 * This function works from the bottom up, and from the right to the left. In
1333 * other words, it strips off the highest layer (data) before stripping any of
1334 * the metadata. Doing it this way is best in case the operation is interrupted
1335 * by power failure, etc.  The dinode is rewritten in every transaction to
1336 * guarantee integrity.
1337 */
1338static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
1339{
1340	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1341	u64 maxsize = sdp->sd_heightsize[ip->i_height];
1342	struct metapath mp = {};
1343	struct buffer_head *dibh, *bh;
1344	struct gfs2_holder rd_gh;
1345	unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift;
1346	u64 lblock = (offset + (1 << bsize_shift) - 1) >> bsize_shift;
1347	__u16 start_list[GFS2_MAX_META_HEIGHT];
1348	__u16 __end_list[GFS2_MAX_META_HEIGHT], *end_list = NULL;
1349	unsigned int start_aligned, uninitialized_var(end_aligned);
1350	unsigned int strip_h = ip->i_height - 1;
1351	u32 btotal = 0;
1352	int ret, state;
1353	int mp_h; /* metapath buffers are read in to this height */
1354	u64 prev_bnr = 0;
1355	__be64 *start, *end;
1356
1357	if (offset >= maxsize) {
1358		/*
1359		 * The starting point lies beyond the allocated meta-data;
1360		 * there are no blocks do deallocate.
1361		 */
1362		return 0;
1363	}
1364
1365	/*
1366	 * The start position of the hole is defined by lblock, start_list, and
1367	 * start_aligned.  The end position of the hole is defined by lend,
1368	 * end_list, and end_aligned.
1369	 *
1370	 * start_aligned and end_aligned define down to which height the start
1371	 * and end positions are aligned to the metadata tree (i.e., the
1372	 * position is a multiple of the metadata granularity at the height
1373	 * above).  This determines at which heights additional meta pointers
1374	 * needs to be preserved for the remaining data.
1375	 */
1376
1377	if (length) {
1378		u64 end_offset = offset + length;
1379		u64 lend;
1380
1381		/*
1382		 * Clip the end at the maximum file size for the given height:
1383		 * that's how far the metadata goes; files bigger than that
1384		 * will have additional layers of indirection.
1385		 */
1386		if (end_offset > maxsize)
1387			end_offset = maxsize;
1388		lend = end_offset >> bsize_shift;
1389
1390		if (lblock >= lend)
1391			return 0;
1392
1393		find_metapath(sdp, lend, &mp, ip->i_height);
1394		end_list = __end_list;
1395		memcpy(end_list, mp.mp_list, sizeof(mp.mp_list));
1396
1397		for (mp_h = ip->i_height - 1; mp_h > 0; mp_h--) {
1398			if (end_list[mp_h])
1399				break;
1400		}
1401		end_aligned = mp_h;
1402	}
1403
1404	find_metapath(sdp, lblock, &mp, ip->i_height);
1405	memcpy(start_list, mp.mp_list, sizeof(start_list));
1406
1407	for (mp_h = ip->i_height - 1; mp_h > 0; mp_h--) {
1408		if (start_list[mp_h])
1409			break;
1410	}
1411	start_aligned = mp_h;
1412
1413	ret = gfs2_meta_inode_buffer(ip, &dibh);
1414	if (ret)
1415		return ret;
1416
1417	mp.mp_bh[0] = dibh;
1418	ret = lookup_metapath(ip, &mp);
1419	if (ret)
1420		goto out_metapath;
1421
1422	/* issue read-ahead on metadata */
1423	for (mp_h = 0; mp_h < mp.mp_aheight - 1; mp_h++) {
1424		metapointer_range(&mp, mp_h, start_list, start_aligned,
1425				  end_list, end_aligned, &start, &end);
1426		gfs2_metapath_ra(ip->i_gl, start, end);
1427	}
1428
1429	if (mp.mp_aheight == ip->i_height)
1430		state = DEALLOC_MP_FULL; /* We have a complete metapath */
1431	else
1432		state = DEALLOC_FILL_MP; /* deal with partial metapath */
1433
1434	ret = gfs2_rindex_update(sdp);
1435	if (ret)
1436		goto out_metapath;
1437
1438	ret = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
1439	if (ret)
1440		goto out_metapath;
1441	gfs2_holder_mark_uninitialized(&rd_gh);
1442
1443	mp_h = strip_h;
1444
1445	while (state != DEALLOC_DONE) {
1446		switch (state) {
1447		/* Truncate a full metapath at the given strip height.
1448		 * Note that strip_h == mp_h in order to be in this state. */
1449		case DEALLOC_MP_FULL:
1450			bh = mp.mp_bh[mp_h];
1451			gfs2_assert_withdraw(sdp, bh);
1452			if (gfs2_assert_withdraw(sdp,
1453						 prev_bnr != bh->b_blocknr)) {
1454				printk(KERN_EMERG "GFS2: fsid=%s:inode %llu, "
1455				       "block:%llu, i_h:%u, s_h:%u, mp_h:%u\n",
1456				       sdp->sd_fsname,
1457				       (unsigned long long)ip->i_no_addr,
1458				       prev_bnr, ip->i_height, strip_h, mp_h);
1459			}
1460			prev_bnr = bh->b_blocknr;
1461
1462			if (gfs2_metatype_check(sdp, bh,
1463						(mp_h ? GFS2_METATYPE_IN :
1464							GFS2_METATYPE_DI))) {
1465				ret = -EIO;
1466				goto out;
1467			}
1468
1469			/*
1470			 * Below, passing end_aligned as 0 gives us the
1471			 * metapointer range excluding the end point: the end
1472			 * point is the first metapath we must not deallocate!
1473			 */
1474
1475			metapointer_range(&mp, mp_h, start_list, start_aligned,
1476					  end_list, 0 /* end_aligned */,
1477					  &start, &end);
1478			ret = sweep_bh_for_rgrps(ip, &rd_gh, mp.mp_bh[mp_h],
1479						 start, end,
1480						 mp_h != ip->i_height - 1,
1481						 &btotal);
1482
1483			/* If we hit an error or just swept dinode buffer,
1484			   just exit. */
1485			if (ret || !mp_h) {
1486				state = DEALLOC_DONE;
1487				break;
1488			}
1489			state = DEALLOC_MP_LOWER;
1490			break;
1491
1492		/* lower the metapath strip height */
1493		case DEALLOC_MP_LOWER:
1494			/* We're done with the current buffer, so release it,
1495			   unless it's the dinode buffer. Then back up to the
1496			   previous pointer. */
1497			if (mp_h) {
1498				brelse(mp.mp_bh[mp_h]);
1499				mp.mp_bh[mp_h] = NULL;
1500			}
1501			/* If we can't get any lower in height, we've stripped
1502			   off all we can. Next step is to back up and start
1503			   stripping the previous level of metadata. */
1504			if (mp_h == 0) {
1505				strip_h--;
1506				memcpy(mp.mp_list, start_list, sizeof(start_list));
1507				mp_h = strip_h;
1508				state = DEALLOC_FILL_MP;
1509				break;
1510			}
1511			mp.mp_list[mp_h] = 0;
1512			mp_h--; /* search one metadata height down */
1513			mp.mp_list[mp_h]++;
1514			if (walk_done(sdp, &mp, mp_h, end_list, end_aligned))
1515				break;
1516			/* Here we've found a part of the metapath that is not
1517			 * allocated. We need to search at that height for the
1518			 * next non-null pointer. */
1519			if (find_nonnull_ptr(sdp, &mp, mp_h, end_list, end_aligned)) {
1520				state = DEALLOC_FILL_MP;
1521				mp_h++;
1522			}
1523			/* No more non-null pointers at this height. Back up
1524			   to the previous height and try again. */
1525			break; /* loop around in the same state */
1526
1527		/* Fill the metapath with buffers to the given height. */
1528		case DEALLOC_FILL_MP:
1529			/* Fill the buffers out to the current height. */
1530			ret = fillup_metapath(ip, &mp, mp_h);
1531			if (ret < 0)
1532				goto out;
1533
1534			/* issue read-ahead on metadata */
1535			if (mp.mp_aheight > 1) {
1536				for (; ret > 1; ret--) {
1537					metapointer_range(&mp, mp.mp_aheight - ret,
 
 
 
 
 
 
1538							  start_list, start_aligned,
1539							  end_list, end_aligned,
1540							  &start, &end);
1541					gfs2_metapath_ra(ip->i_gl, start, end);
1542				}
1543			}
1544
1545			/* If buffers found for the entire strip height */
1546			if (mp.mp_aheight - 1 == strip_h) {
1547				state = DEALLOC_MP_FULL;
1548				break;
1549			}
1550			if (mp.mp_aheight < ip->i_height) /* We have a partial height */
1551				mp_h = mp.mp_aheight - 1;
1552
1553			/* If we find a non-null block pointer, crawl a bit
1554			   higher up in the metapath and try again, otherwise
1555			   we need to look lower for a new starting point. */
1556			if (find_nonnull_ptr(sdp, &mp, mp_h, end_list, end_aligned))
1557				mp_h++;
1558			else
1559				state = DEALLOC_MP_LOWER;
1560			break;
1561		}
1562	}
1563
1564	if (btotal) {
1565		if (current->journal_info == NULL) {
1566			ret = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS +
1567					       RES_QUOTA, 0);
1568			if (ret)
1569				goto out;
1570			down_write(&ip->i_rw_mutex);
1571		}
1572		gfs2_statfs_change(sdp, 0, +btotal, 0);
1573		gfs2_quota_change(ip, -(s64)btotal, ip->i_inode.i_uid,
1574				  ip->i_inode.i_gid);
1575		ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
1576		gfs2_trans_add_meta(ip->i_gl, dibh);
1577		gfs2_dinode_out(ip, dibh->b_data);
1578		up_write(&ip->i_rw_mutex);
1579		gfs2_trans_end(sdp);
1580	}
1581
1582out:
1583	if (gfs2_holder_initialized(&rd_gh))
1584		gfs2_glock_dq_uninit(&rd_gh);
1585	if (current->journal_info) {
1586		up_write(&ip->i_rw_mutex);
1587		gfs2_trans_end(sdp);
1588		cond_resched();
1589	}
1590	gfs2_quota_unhold(ip);
1591out_metapath:
1592	release_metapath(&mp);
1593	return ret;
1594}
1595
1596static int trunc_end(struct gfs2_inode *ip)
1597{
1598	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1599	struct buffer_head *dibh;
1600	int error;
1601
1602	error = gfs2_trans_begin(sdp, RES_DINODE, 0);
1603	if (error)
1604		return error;
1605
1606	down_write(&ip->i_rw_mutex);
1607
1608	error = gfs2_meta_inode_buffer(ip, &dibh);
1609	if (error)
1610		goto out;
1611
1612	if (!i_size_read(&ip->i_inode)) {
1613		ip->i_height = 0;
1614		ip->i_goal = ip->i_no_addr;
1615		gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
1616		gfs2_ordered_del_inode(ip);
1617	}
1618	ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
1619	ip->i_diskflags &= ~GFS2_DIF_TRUNC_IN_PROG;
1620
1621	gfs2_trans_add_meta(ip->i_gl, dibh);
1622	gfs2_dinode_out(ip, dibh->b_data);
1623	brelse(dibh);
1624
1625out:
1626	up_write(&ip->i_rw_mutex);
1627	gfs2_trans_end(sdp);
1628	return error;
1629}
1630
1631/**
1632 * do_shrink - make a file smaller
1633 * @inode: the inode
1634 * @newsize: the size to make the file
1635 *
1636 * Called with an exclusive lock on @inode. The @size must
1637 * be equal to or smaller than the current inode size.
1638 *
1639 * Returns: errno
1640 */
1641
1642static int do_shrink(struct inode *inode, u64 newsize)
1643{
1644	struct gfs2_inode *ip = GFS2_I(inode);
1645	int error;
1646
1647	error = trunc_start(inode, newsize);
1648	if (error < 0)
1649		return error;
1650	if (gfs2_is_stuffed(ip))
1651		return 0;
1652
1653	error = punch_hole(ip, newsize, 0);
1654	if (error == 0)
1655		error = trunc_end(ip);
1656
1657	return error;
1658}
1659
1660void gfs2_trim_blocks(struct inode *inode)
1661{
1662	int ret;
1663
1664	ret = do_shrink(inode, inode->i_size);
1665	WARN_ON(ret != 0);
1666}
1667
1668/**
1669 * do_grow - Touch and update inode size
1670 * @inode: The inode
1671 * @size: The new size
1672 *
1673 * This function updates the timestamps on the inode and
1674 * may also increase the size of the inode. This function
1675 * must not be called with @size any smaller than the current
1676 * inode size.
1677 *
1678 * Although it is not strictly required to unstuff files here,
1679 * earlier versions of GFS2 have a bug in the stuffed file reading
1680 * code which will result in a buffer overrun if the size is larger
1681 * than the max stuffed file size. In order to prevent this from
1682 * occurring, such files are unstuffed, but in other cases we can
1683 * just update the inode size directly.
1684 *
1685 * Returns: 0 on success, or -ve on error
1686 */
1687
1688static int do_grow(struct inode *inode, u64 size)
1689{
1690	struct gfs2_inode *ip = GFS2_I(inode);
1691	struct gfs2_sbd *sdp = GFS2_SB(inode);
1692	struct gfs2_alloc_parms ap = { .target = 1, };
1693	struct buffer_head *dibh;
1694	int error;
1695	int unstuff = 0;
1696
1697	if (gfs2_is_stuffed(ip) && size > gfs2_max_stuffed_size(ip)) {
1698		error = gfs2_quota_lock_check(ip, &ap);
1699		if (error)
1700			return error;
1701
1702		error = gfs2_inplace_reserve(ip, &ap);
1703		if (error)
1704			goto do_grow_qunlock;
1705		unstuff = 1;
1706	}
1707
1708	error = gfs2_trans_begin(sdp, RES_DINODE + RES_STATFS + RES_RG_BIT +
 
 
1709				 (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF ?
1710				  0 : RES_QUOTA), 0);
1711	if (error)
1712		goto do_grow_release;
1713
1714	if (unstuff) {
1715		error = gfs2_unstuff_dinode(ip, NULL);
1716		if (error)
1717			goto do_end_trans;
1718	}
1719
1720	error = gfs2_meta_inode_buffer(ip, &dibh);
1721	if (error)
1722		goto do_end_trans;
1723
1724	i_size_write(inode, size);
1725	ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
1726	gfs2_trans_add_meta(ip->i_gl, dibh);
1727	gfs2_dinode_out(ip, dibh->b_data);
1728	brelse(dibh);
1729
1730do_end_trans:
1731	gfs2_trans_end(sdp);
1732do_grow_release:
1733	if (unstuff) {
1734		gfs2_inplace_release(ip);
1735do_grow_qunlock:
1736		gfs2_quota_unlock(ip);
1737	}
1738	return error;
1739}
1740
1741/**
1742 * gfs2_setattr_size - make a file a given size
1743 * @inode: the inode
1744 * @newsize: the size to make the file
1745 *
1746 * The file size can grow, shrink, or stay the same size. This
1747 * is called holding i_rwsem and an exclusive glock on the inode
1748 * in question.
1749 *
1750 * Returns: errno
1751 */
1752
1753int gfs2_setattr_size(struct inode *inode, u64 newsize)
1754{
1755	struct gfs2_inode *ip = GFS2_I(inode);
1756	int ret;
1757
1758	BUG_ON(!S_ISREG(inode->i_mode));
1759
1760	ret = inode_newsize_ok(inode, newsize);
1761	if (ret)
1762		return ret;
1763
1764	inode_dio_wait(inode);
1765
1766	ret = gfs2_rsqa_alloc(ip);
1767	if (ret)
1768		goto out;
1769
1770	if (newsize >= inode->i_size) {
1771		ret = do_grow(inode, newsize);
1772		goto out;
1773	}
1774
1775	ret = do_shrink(inode, newsize);
1776out:
1777	gfs2_rsqa_delete(ip, NULL);
 
1778	return ret;
1779}
1780
1781int gfs2_truncatei_resume(struct gfs2_inode *ip)
1782{
1783	int error;
1784	error = punch_hole(ip, i_size_read(&ip->i_inode), 0);
1785	if (!error)
1786		error = trunc_end(ip);
1787	return error;
1788}
1789
1790int gfs2_file_dealloc(struct gfs2_inode *ip)
1791{
1792	return punch_hole(ip, 0, 0);
1793}
1794
1795/**
1796 * gfs2_free_journal_extents - Free cached journal bmap info
1797 * @jd: The journal
1798 *
1799 */
1800
1801void gfs2_free_journal_extents(struct gfs2_jdesc *jd)
1802{
1803	struct gfs2_journal_extent *jext;
1804
1805	while(!list_empty(&jd->extent_list)) {
1806		jext = list_entry(jd->extent_list.next, struct gfs2_journal_extent, list);
1807		list_del(&jext->list);
1808		kfree(jext);
1809	}
1810}
1811
1812/**
1813 * gfs2_add_jextent - Add or merge a new extent to extent cache
1814 * @jd: The journal descriptor
1815 * @lblock: The logical block at start of new extent
1816 * @dblock: The physical block at start of new extent
1817 * @blocks: Size of extent in fs blocks
1818 *
1819 * Returns: 0 on success or -ENOMEM
1820 */
1821
1822static int gfs2_add_jextent(struct gfs2_jdesc *jd, u64 lblock, u64 dblock, u64 blocks)
1823{
1824	struct gfs2_journal_extent *jext;
1825
1826	if (!list_empty(&jd->extent_list)) {
1827		jext = list_entry(jd->extent_list.prev, struct gfs2_journal_extent, list);
1828		if ((jext->dblock + jext->blocks) == dblock) {
1829			jext->blocks += blocks;
1830			return 0;
1831		}
1832	}
1833
1834	jext = kzalloc(sizeof(struct gfs2_journal_extent), GFP_NOFS);
1835	if (jext == NULL)
1836		return -ENOMEM;
1837	jext->dblock = dblock;
1838	jext->lblock = lblock;
1839	jext->blocks = blocks;
1840	list_add_tail(&jext->list, &jd->extent_list);
1841	jd->nr_extents++;
1842	return 0;
1843}
1844
1845/**
1846 * gfs2_map_journal_extents - Cache journal bmap info
1847 * @sdp: The super block
1848 * @jd: The journal to map
1849 *
1850 * Create a reusable "extent" mapping from all logical
1851 * blocks to all physical blocks for the given journal.  This will save
1852 * us time when writing journal blocks.  Most journals will have only one
1853 * extent that maps all their logical blocks.  That's because gfs2.mkfs
1854 * arranges the journal blocks sequentially to maximize performance.
1855 * So the extent would map the first block for the entire file length.
1856 * However, gfs2_jadd can happen while file activity is happening, so
1857 * those journals may not be sequential.  Less likely is the case where
1858 * the users created their own journals by mounting the metafs and
1859 * laying it out.  But it's still possible.  These journals might have
1860 * several extents.
1861 *
1862 * Returns: 0 on success, or error on failure
1863 */
1864
1865int gfs2_map_journal_extents(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd)
1866{
1867	u64 lblock = 0;
1868	u64 lblock_stop;
1869	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
1870	struct buffer_head bh;
1871	unsigned int shift = sdp->sd_sb.sb_bsize_shift;
1872	u64 size;
1873	int rc;
 
1874
 
1875	lblock_stop = i_size_read(jd->jd_inode) >> shift;
1876	size = (lblock_stop - lblock) << shift;
1877	jd->nr_extents = 0;
1878	WARN_ON(!list_empty(&jd->extent_list));
1879
1880	do {
1881		bh.b_state = 0;
1882		bh.b_blocknr = 0;
1883		bh.b_size = size;
1884		rc = gfs2_block_map(jd->jd_inode, lblock, &bh, 0);
1885		if (rc || !buffer_mapped(&bh))
1886			goto fail;
1887		rc = gfs2_add_jextent(jd, lblock, bh.b_blocknr, bh.b_size >> shift);
1888		if (rc)
1889			goto fail;
1890		size -= bh.b_size;
1891		lblock += (bh.b_size >> ip->i_inode.i_blkbits);
1892	} while(size > 0);
1893
1894	fs_info(sdp, "journal %d mapped with %u extents\n", jd->jd_jid,
1895		jd->nr_extents);
 
1896	return 0;
1897
1898fail:
1899	fs_warn(sdp, "error %d mapping journal %u at offset %llu (extent %u)\n",
1900		rc, jd->jd_jid,
1901		(unsigned long long)(i_size_read(jd->jd_inode) - size),
1902		jd->nr_extents);
1903	fs_warn(sdp, "bmap=%d lblock=%llu block=%llu, state=0x%08lx, size=%llu\n",
1904		rc, (unsigned long long)lblock, (unsigned long long)bh.b_blocknr,
1905		bh.b_state, (unsigned long long)bh.b_size);
1906	gfs2_free_journal_extents(jd);
1907	return rc;
1908}
1909
1910/**
1911 * gfs2_write_alloc_required - figure out if a write will require an allocation
1912 * @ip: the file being written to
1913 * @offset: the offset to write to
1914 * @len: the number of bytes being written
1915 *
1916 * Returns: 1 if an alloc is required, 0 otherwise
1917 */
1918
1919int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
1920			      unsigned int len)
1921{
1922	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1923	struct buffer_head bh;
1924	unsigned int shift;
1925	u64 lblock, lblock_stop, size;
1926	u64 end_of_file;
1927
1928	if (!len)
1929		return 0;
1930
1931	if (gfs2_is_stuffed(ip)) {
1932		if (offset + len > gfs2_max_stuffed_size(ip))
1933			return 1;
1934		return 0;
1935	}
1936
1937	shift = sdp->sd_sb.sb_bsize_shift;
1938	BUG_ON(gfs2_is_dir(ip));
1939	end_of_file = (i_size_read(&ip->i_inode) + sdp->sd_sb.sb_bsize - 1) >> shift;
1940	lblock = offset >> shift;
1941	lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift;
1942	if (lblock_stop > end_of_file)
1943		return 1;
1944
1945	size = (lblock_stop - lblock) << shift;
1946	do {
1947		bh.b_state = 0;
1948		bh.b_size = size;
1949		gfs2_block_map(&ip->i_inode, lblock, &bh, 0);
1950		if (!buffer_mapped(&bh))
1951			return 1;
1952		size -= bh.b_size;
1953		lblock += (bh.b_size >> ip->i_inode.i_blkbits);
1954	} while(size > 0);
1955
1956	return 0;
1957}
1958
1959static int stuffed_zero_range(struct inode *inode, loff_t offset, loff_t length)
1960{
1961	struct gfs2_inode *ip = GFS2_I(inode);
1962	struct buffer_head *dibh;
1963	int error;
1964
1965	if (offset >= inode->i_size)
1966		return 0;
1967	if (offset + length > inode->i_size)
1968		length = inode->i_size - offset;
1969
1970	error = gfs2_meta_inode_buffer(ip, &dibh);
1971	if (error)
1972		return error;
1973	gfs2_trans_add_meta(ip->i_gl, dibh);
1974	memset(dibh->b_data + sizeof(struct gfs2_dinode) + offset, 0,
1975	       length);
1976	brelse(dibh);
1977	return 0;
1978}
1979
1980static int gfs2_journaled_truncate_range(struct inode *inode, loff_t offset,
1981					 loff_t length)
1982{
1983	struct gfs2_sbd *sdp = GFS2_SB(inode);
1984	loff_t max_chunk = GFS2_JTRUNC_REVOKES * sdp->sd_vfs->s_blocksize;
1985	int error;
1986
1987	while (length) {
1988		struct gfs2_trans *tr;
1989		loff_t chunk;
1990		unsigned int offs;
1991
1992		chunk = length;
1993		if (chunk > max_chunk)
1994			chunk = max_chunk;
1995
1996		offs = offset & ~PAGE_MASK;
1997		if (offs && chunk > PAGE_SIZE)
1998			chunk = offs + ((chunk - offs) & PAGE_MASK);
1999
2000		truncate_pagecache_range(inode, offset, chunk);
2001		offset += chunk;
2002		length -= chunk;
2003
2004		tr = current->journal_info;
2005		if (!test_bit(TR_TOUCHED, &tr->tr_flags))
2006			continue;
2007
2008		gfs2_trans_end(sdp);
2009		error = gfs2_trans_begin(sdp, RES_DINODE, GFS2_JTRUNC_REVOKES);
2010		if (error)
2011			return error;
2012	}
2013	return 0;
2014}
2015
2016int __gfs2_punch_hole(struct file *file, loff_t offset, loff_t length)
2017{
2018	struct inode *inode = file_inode(file);
2019	struct gfs2_inode *ip = GFS2_I(inode);
2020	struct gfs2_sbd *sdp = GFS2_SB(inode);
 
 
2021	int error;
2022
2023	if (gfs2_is_jdata(ip))
2024		error = gfs2_trans_begin(sdp, RES_DINODE + 2 * RES_JDATA,
2025					 GFS2_JTRUNC_REVOKES);
2026	else
2027		error = gfs2_trans_begin(sdp, RES_DINODE, 0);
2028	if (error)
2029		return error;
2030
2031	if (gfs2_is_stuffed(ip)) {
2032		error = stuffed_zero_range(inode, offset, length);
2033		if (error)
2034			goto out;
2035	} else {
2036		unsigned int start_off, end_off, blocksize;
2037
2038		blocksize = i_blocksize(inode);
2039		start_off = offset & (blocksize - 1);
2040		end_off = (offset + length) & (blocksize - 1);
2041		if (start_off) {
2042			unsigned int len = length;
2043			if (length > blocksize - start_off)
2044				len = blocksize - start_off;
2045			error = gfs2_block_zero_range(inode, offset, len);
2046			if (error)
2047				goto out;
2048			if (start_off + length < blocksize)
2049				end_off = 0;
2050		}
2051		if (end_off) {
2052			error = gfs2_block_zero_range(inode,
2053				offset + length - end_off, end_off);
2054			if (error)
2055				goto out;
2056		}
2057	}
2058
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2059	if (gfs2_is_jdata(ip)) {
2060		BUG_ON(!current->journal_info);
2061		gfs2_journaled_truncate_range(inode, offset, length);
2062	} else
2063		truncate_pagecache_range(inode, offset, offset + length - 1);
2064
2065	file_update_time(file);
2066	mark_inode_dirty(inode);
2067
2068	if (current->journal_info)
2069		gfs2_trans_end(sdp);
2070
2071	if (!gfs2_is_stuffed(ip))
2072		error = punch_hole(ip, offset, length);
2073
2074out:
2075	if (current->journal_info)
2076		gfs2_trans_end(sdp);
2077	return error;
2078}