Linux Audio

Check our new training course

Loading...
   1/*
   2 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
   3 * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
   4 *
   5 * This copyrighted material is made available to anyone wishing to use,
   6 * modify, copy, or redistribute it subject to the terms and conditions
   7 * of the GNU General Public License version 2.
   8 */
   9
  10#include <linux/slab.h>
  11#include <linux/spinlock.h>
  12#include <linux/completion.h>
  13#include <linux/buffer_head.h>
  14#include <linux/pagemap.h>
  15#include <linux/uio.h>
  16#include <linux/blkdev.h>
  17#include <linux/mm.h>
  18#include <linux/mount.h>
  19#include <linux/fs.h>
  20#include <linux/gfs2_ondisk.h>
 
  21#include <linux/falloc.h>
  22#include <linux/swap.h>
  23#include <linux/crc32.h>
  24#include <linux/writeback.h>
  25#include <linux/uaccess.h>
  26#include <linux/dlm.h>
  27#include <linux/dlm_plock.h>
  28#include <linux/delay.h>
  29
  30#include "gfs2.h"
  31#include "incore.h"
  32#include "bmap.h"
  33#include "dir.h"
  34#include "glock.h"
  35#include "glops.h"
  36#include "inode.h"
  37#include "log.h"
  38#include "meta_io.h"
  39#include "quota.h"
  40#include "rgrp.h"
  41#include "trans.h"
  42#include "util.h"
  43
  44/**
  45 * gfs2_llseek - seek to a location in a file
  46 * @file: the file
  47 * @offset: the offset
  48 * @whence: Where to seek from (SEEK_SET, SEEK_CUR, or SEEK_END)
  49 *
  50 * SEEK_END requires the glock for the file because it references the
  51 * file's size.
  52 *
  53 * Returns: The new offset, or errno
  54 */
  55
  56static loff_t gfs2_llseek(struct file *file, loff_t offset, int whence)
  57{
  58	struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
  59	struct gfs2_holder i_gh;
  60	loff_t error;
  61
  62	switch (whence) {
  63	case SEEK_END: /* These reference inode->i_size */
  64	case SEEK_DATA:
  65	case SEEK_HOLE:
  66		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
  67					   &i_gh);
  68		if (!error) {
  69			error = generic_file_llseek(file, offset, whence);
  70			gfs2_glock_dq_uninit(&i_gh);
  71		}
  72		break;
  73	case SEEK_CUR:
  74	case SEEK_SET:
  75		error = generic_file_llseek(file, offset, whence);
  76		break;
  77	default:
  78		error = -EINVAL;
  79	}
  80
  81	return error;
  82}
  83
  84/**
  85 * gfs2_readdir - Iterator for a directory
  86 * @file: The directory to read from
  87 * @ctx: What to feed directory entries to
 
  88 *
  89 * Returns: errno
  90 */
  91
  92static int gfs2_readdir(struct file *file, struct dir_context *ctx)
  93{
  94	struct inode *dir = file->f_mapping->host;
  95	struct gfs2_inode *dip = GFS2_I(dir);
  96	struct gfs2_holder d_gh;
 
  97	int error;
  98
  99	error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
 100	if (error)
 
 
 101		return error;
 
 102
 103	error = gfs2_dir_read(dir, ctx, &file->f_ra);
 104
 105	gfs2_glock_dq_uninit(&d_gh);
 106
 
 
 107	return error;
 108}
 109
 110/**
 111 * fsflags_cvt
 112 * @table: A table of 32 u32 flags
 113 * @val: a 32 bit value to convert
 114 *
 115 * This function can be used to convert between fsflags values and
 116 * GFS2's own flags values.
 117 *
 118 * Returns: the converted flags
 119 */
 120static u32 fsflags_cvt(const u32 *table, u32 val)
 121{
 122	u32 res = 0;
 123	while(val) {
 124		if (val & 1)
 125			res |= *table;
 126		table++;
 127		val >>= 1;
 128	}
 129	return res;
 130}
 131
 132static const u32 fsflags_to_gfs2[32] = {
 133	[3] = GFS2_DIF_SYNC,
 134	[4] = GFS2_DIF_IMMUTABLE,
 135	[5] = GFS2_DIF_APPENDONLY,
 136	[7] = GFS2_DIF_NOATIME,
 137	[12] = GFS2_DIF_EXHASH,
 138	[14] = GFS2_DIF_INHERIT_JDATA,
 139	[17] = GFS2_DIF_TOPDIR,
 140};
 141
 142static const u32 gfs2_to_fsflags[32] = {
 143	[gfs2fl_Sync] = FS_SYNC_FL,
 144	[gfs2fl_Immutable] = FS_IMMUTABLE_FL,
 145	[gfs2fl_AppendOnly] = FS_APPEND_FL,
 146	[gfs2fl_NoAtime] = FS_NOATIME_FL,
 147	[gfs2fl_ExHash] = FS_INDEX_FL,
 148	[gfs2fl_TopLevel] = FS_TOPDIR_FL,
 149	[gfs2fl_InheritJdata] = FS_JOURNAL_DATA_FL,
 150};
 151
 152static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
 153{
 154	struct inode *inode = file_inode(filp);
 155	struct gfs2_inode *ip = GFS2_I(inode);
 156	struct gfs2_holder gh;
 157	int error;
 158	u32 fsflags;
 159
 160	gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
 161	error = gfs2_glock_nq(&gh);
 162	if (error)
 163		goto out_uninit;
 164
 165	fsflags = fsflags_cvt(gfs2_to_fsflags, ip->i_diskflags);
 166	if (!S_ISDIR(inode->i_mode) && ip->i_diskflags & GFS2_DIF_JDATA)
 167		fsflags |= FS_JOURNAL_DATA_FL;
 168	if (put_user(fsflags, ptr))
 169		error = -EFAULT;
 170
 171	gfs2_glock_dq(&gh);
 172out_uninit:
 173	gfs2_holder_uninit(&gh);
 174	return error;
 175}
 176
 177void gfs2_set_inode_flags(struct inode *inode)
 178{
 179	struct gfs2_inode *ip = GFS2_I(inode);
 180	unsigned int flags = inode->i_flags;
 181
 182	flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_NOSEC);
 183	if ((ip->i_eattr == 0) && !is_sxid(inode->i_mode))
 184		flags |= S_NOSEC;
 185	if (ip->i_diskflags & GFS2_DIF_IMMUTABLE)
 186		flags |= S_IMMUTABLE;
 187	if (ip->i_diskflags & GFS2_DIF_APPENDONLY)
 188		flags |= S_APPEND;
 189	if (ip->i_diskflags & GFS2_DIF_NOATIME)
 190		flags |= S_NOATIME;
 191	if (ip->i_diskflags & GFS2_DIF_SYNC)
 192		flags |= S_SYNC;
 193	inode->i_flags = flags;
 194}
 195
 196/* Flags that can be set by user space */
 197#define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA|			\
 198			     GFS2_DIF_IMMUTABLE|		\
 199			     GFS2_DIF_APPENDONLY|		\
 200			     GFS2_DIF_NOATIME|			\
 201			     GFS2_DIF_SYNC|			\
 202			     GFS2_DIF_SYSTEM|			\
 203			     GFS2_DIF_TOPDIR|			\
 204			     GFS2_DIF_INHERIT_JDATA)
 205
 206/**
 207 * do_gfs2_set_flags - set flags on an inode
 208 * @filp: file pointer
 209 * @reqflags: The flags to set
 210 * @mask: Indicates which flags are valid
 211 *
 212 */
 213static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
 214{
 215	struct inode *inode = file_inode(filp);
 216	struct gfs2_inode *ip = GFS2_I(inode);
 217	struct gfs2_sbd *sdp = GFS2_SB(inode);
 218	struct buffer_head *bh;
 219	struct gfs2_holder gh;
 220	int error;
 221	u32 new_flags, flags;
 222
 223	error = mnt_want_write_file(filp);
 224	if (error)
 225		return error;
 226
 227	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
 228	if (error)
 229		goto out_drop_write;
 230
 231	error = -EACCES;
 232	if (!inode_owner_or_capable(inode))
 233		goto out;
 234
 235	error = 0;
 236	flags = ip->i_diskflags;
 237	new_flags = (flags & ~mask) | (reqflags & mask);
 238	if ((new_flags ^ flags) == 0)
 239		goto out;
 240
 241	error = -EINVAL;
 242	if ((new_flags ^ flags) & ~GFS2_FLAGS_USER_SET)
 243		goto out;
 244
 245	error = -EPERM;
 246	if (IS_IMMUTABLE(inode) && (new_flags & GFS2_DIF_IMMUTABLE))
 247		goto out;
 248	if (IS_APPEND(inode) && (new_flags & GFS2_DIF_APPENDONLY))
 249		goto out;
 250	if (((new_flags ^ flags) & GFS2_DIF_IMMUTABLE) &&
 251	    !capable(CAP_LINUX_IMMUTABLE))
 252		goto out;
 253	if (!IS_IMMUTABLE(inode)) {
 254		error = gfs2_permission(inode, MAY_WRITE);
 255		if (error)
 256			goto out;
 257	}
 258	if ((flags ^ new_flags) & GFS2_DIF_JDATA) {
 259		if (flags & GFS2_DIF_JDATA)
 260			gfs2_log_flush(sdp, ip->i_gl, NORMAL_FLUSH);
 261		error = filemap_fdatawrite(inode->i_mapping);
 262		if (error)
 263			goto out;
 264		error = filemap_fdatawait(inode->i_mapping);
 265		if (error)
 266			goto out;
 267	}
 268	error = gfs2_trans_begin(sdp, RES_DINODE, 0);
 269	if (error)
 270		goto out;
 271	error = gfs2_meta_inode_buffer(ip, &bh);
 272	if (error)
 273		goto out_trans_end;
 274	gfs2_trans_add_meta(ip->i_gl, bh);
 275	ip->i_diskflags = new_flags;
 276	gfs2_dinode_out(ip, bh->b_data);
 277	brelse(bh);
 278	gfs2_set_inode_flags(inode);
 279	gfs2_set_aops(inode);
 280out_trans_end:
 281	gfs2_trans_end(sdp);
 282out:
 283	gfs2_glock_dq_uninit(&gh);
 284out_drop_write:
 285	mnt_drop_write_file(filp);
 286	return error;
 287}
 288
 289static int gfs2_set_flags(struct file *filp, u32 __user *ptr)
 290{
 291	struct inode *inode = file_inode(filp);
 292	u32 fsflags, gfsflags;
 293
 294	if (get_user(fsflags, ptr))
 295		return -EFAULT;
 296
 297	gfsflags = fsflags_cvt(fsflags_to_gfs2, fsflags);
 298	if (!S_ISDIR(inode->i_mode)) {
 299		gfsflags &= ~GFS2_DIF_TOPDIR;
 300		if (gfsflags & GFS2_DIF_INHERIT_JDATA)
 301			gfsflags ^= (GFS2_DIF_JDATA | GFS2_DIF_INHERIT_JDATA);
 302		return do_gfs2_set_flags(filp, gfsflags, ~GFS2_DIF_SYSTEM);
 303	}
 304	return do_gfs2_set_flags(filp, gfsflags, ~(GFS2_DIF_SYSTEM | GFS2_DIF_JDATA));
 305}
 306
 307static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 308{
 309	switch(cmd) {
 310	case FS_IOC_GETFLAGS:
 311		return gfs2_get_flags(filp, (u32 __user *)arg);
 312	case FS_IOC_SETFLAGS:
 313		return gfs2_set_flags(filp, (u32 __user *)arg);
 314	case FITRIM:
 315		return gfs2_fitrim(filp, (void __user *)arg);
 316	}
 317	return -ENOTTY;
 318}
 319
 320/**
 321 * gfs2_size_hint - Give a hint to the size of a write request
 322 * @filep: The struct file
 323 * @offset: The file offset of the write
 324 * @size: The length of the write
 325 *
 326 * When we are about to do a write, this function records the total
 327 * write size in order to provide a suitable hint to the lower layers
 328 * about how many blocks will be required.
 329 *
 330 */
 331
 332static void gfs2_size_hint(struct file *filep, loff_t offset, size_t size)
 333{
 334	struct inode *inode = file_inode(filep);
 335	struct gfs2_sbd *sdp = GFS2_SB(inode);
 336	struct gfs2_inode *ip = GFS2_I(inode);
 337	size_t blks = (size + sdp->sd_sb.sb_bsize - 1) >> sdp->sd_sb.sb_bsize_shift;
 338	int hint = min_t(size_t, INT_MAX, blks);
 339
 340	if (hint > atomic_read(&ip->i_res.rs_sizehint))
 341		atomic_set(&ip->i_res.rs_sizehint, hint);
 342}
 343
 344/**
 345 * gfs2_allocate_page_backing - Use bmap to allocate blocks
 346 * @page: The (locked) page to allocate backing for
 347 *
 348 * We try to allocate all the blocks required for the page in
 349 * one go. This might fail for various reasons, so we keep
 350 * trying until all the blocks to back this page are allocated.
 351 * If some of the blocks are already allocated, thats ok too.
 352 */
 353
 354static int gfs2_allocate_page_backing(struct page *page)
 355{
 356	struct inode *inode = page->mapping->host;
 357	struct buffer_head bh;
 358	unsigned long size = PAGE_SIZE;
 359	u64 lblock = page->index << (PAGE_SHIFT - inode->i_blkbits);
 360
 361	do {
 362		bh.b_state = 0;
 363		bh.b_size = size;
 364		gfs2_block_map(inode, lblock, &bh, 1);
 365		if (!buffer_mapped(&bh))
 366			return -EIO;
 367		size -= bh.b_size;
 368		lblock += (bh.b_size >> inode->i_blkbits);
 369	} while(size > 0);
 370	return 0;
 371}
 372
 373/**
 374 * gfs2_page_mkwrite - Make a shared, mmap()ed, page writable
 375 * @vma: The virtual memory area
 376 * @vmf: The virtual memory fault containing the page to become writable
 377 *
 378 * When the page becomes writable, we need to ensure that we have
 379 * blocks allocated on disk to back that page.
 380 */
 381
 382static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
 383{
 384	struct page *page = vmf->page;
 385	struct inode *inode = file_inode(vma->vm_file);
 386	struct gfs2_inode *ip = GFS2_I(inode);
 387	struct gfs2_sbd *sdp = GFS2_SB(inode);
 388	struct gfs2_alloc_parms ap = { .aflags = 0, };
 389	unsigned long last_index;
 390	u64 pos = page->index << PAGE_SHIFT;
 391	unsigned int data_blocks, ind_blocks, rblocks;
 392	struct gfs2_holder gh;
 393	loff_t size;
 394	int ret;
 395
 396	sb_start_pagefault(inode->i_sb);
 397
 398	ret = gfs2_rsqa_alloc(ip);
 399	if (ret)
 400		goto out;
 401
 402	gfs2_size_hint(vma->vm_file, pos, PAGE_SIZE);
 403
 404	gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
 405	ret = gfs2_glock_nq(&gh);
 406	if (ret)
 407		goto out_uninit;
 408
 409	/* Update file times before taking page lock */
 410	file_update_time(vma->vm_file);
 411
 412	set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
 413	set_bit(GIF_SW_PAGED, &ip->i_flags);
 414
 415	if (!gfs2_write_alloc_required(ip, pos, PAGE_SIZE)) {
 416		lock_page(page);
 417		if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
 418			ret = -EAGAIN;
 419			unlock_page(page);
 420		}
 421		goto out_unlock;
 422	}
 423
 424	ret = gfs2_rindex_update(sdp);
 425	if (ret)
 426		goto out_unlock;
 427
 428	gfs2_write_calc_reserv(ip, PAGE_SIZE, &data_blocks, &ind_blocks);
 429	ap.target = data_blocks + ind_blocks;
 430	ret = gfs2_quota_lock_check(ip, &ap);
 431	if (ret)
 432		goto out_unlock;
 433	ret = gfs2_inplace_reserve(ip, &ap);
 
 
 434	if (ret)
 435		goto out_quota_unlock;
 436
 437	rblocks = RES_DINODE + ind_blocks;
 438	if (gfs2_is_jdata(ip))
 439		rblocks += data_blocks ? data_blocks : 1;
 440	if (ind_blocks || data_blocks) {
 441		rblocks += RES_STATFS + RES_QUOTA;
 442		rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks);
 443	}
 444	ret = gfs2_trans_begin(sdp, rblocks, 0);
 445	if (ret)
 446		goto out_trans_fail;
 447
 448	lock_page(page);
 449	ret = -EINVAL;
 450	size = i_size_read(inode);
 451	last_index = (size - 1) >> PAGE_SHIFT;
 452	/* Check page index against inode size */
 453	if (size == 0 || (page->index > last_index))
 454		goto out_trans_end;
 455
 456	ret = -EAGAIN;
 457	/* If truncated, we must retry the operation, we may have raced
 458	 * with the glock demotion code.
 459	 */
 460	if (!PageUptodate(page) || page->mapping != inode->i_mapping)
 461		goto out_trans_end;
 462
 463	/* Unstuff, if required, and allocate backing blocks for page */
 464	ret = 0;
 465	if (gfs2_is_stuffed(ip))
 
 
 466		ret = gfs2_unstuff_dinode(ip, page);
 467	if (ret == 0)
 468		ret = gfs2_allocate_page_backing(page);
 
 
 469
 470out_trans_end:
 471	if (ret)
 472		unlock_page(page);
 473	gfs2_trans_end(sdp);
 474out_trans_fail:
 475	gfs2_inplace_release(ip);
 476out_quota_unlock:
 477	gfs2_quota_unlock(ip);
 
 
 478out_unlock:
 479	gfs2_glock_dq(&gh);
 480out_uninit:
 481	gfs2_holder_uninit(&gh);
 482	if (ret == 0) {
 483		set_page_dirty(page);
 484		wait_for_stable_page(page);
 485	}
 486out:
 487	sb_end_pagefault(inode->i_sb);
 488	return block_page_mkwrite_return(ret);
 
 
 
 
 489}
 490
 491static const struct vm_operations_struct gfs2_vm_ops = {
 492	.fault = filemap_fault,
 493	.map_pages = filemap_map_pages,
 494	.page_mkwrite = gfs2_page_mkwrite,
 495};
 496
 497/**
 498 * gfs2_mmap -
 499 * @file: The file to map
 500 * @vma: The VMA which described the mapping
 501 *
 502 * There is no need to get a lock here unless we should be updating
 503 * atime. We ignore any locking errors since the only consequence is
 504 * a missed atime update (which will just be deferred until later).
 505 *
 506 * Returns: 0
 507 */
 508
 509static int gfs2_mmap(struct file *file, struct vm_area_struct *vma)
 510{
 511	struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
 512
 513	if (!(file->f_flags & O_NOATIME) &&
 514	    !IS_NOATIME(&ip->i_inode)) {
 515		struct gfs2_holder i_gh;
 516		int error;
 517
 518		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
 519					   &i_gh);
 
 
 
 
 
 520		if (error)
 521			return error;
 522		/* grab lock to update inode */
 523		gfs2_glock_dq_uninit(&i_gh);
 524		file_accessed(file);
 525	}
 526	vma->vm_ops = &gfs2_vm_ops;
 
 527
 528	return 0;
 529}
 530
 531/**
 532 * gfs2_open_common - This is common to open and atomic_open
 533 * @inode: The inode being opened
 534 * @file: The file being opened
 535 *
 536 * This maybe called under a glock or not depending upon how it has
 537 * been called. We must always be called under a glock for regular
 538 * files, however. For other file types, it does not matter whether
 539 * we hold the glock or not.
 540 *
 541 * Returns: Error code or 0 for success
 542 */
 543
 544int gfs2_open_common(struct inode *inode, struct file *file)
 545{
 
 
 546	struct gfs2_file *fp;
 547	int ret;
 548
 549	if (S_ISREG(inode->i_mode)) {
 550		ret = generic_file_open(inode, file);
 551		if (ret)
 552			return ret;
 553	}
 554
 555	fp = kzalloc(sizeof(struct gfs2_file), GFP_NOFS);
 556	if (!fp)
 557		return -ENOMEM;
 558
 559	mutex_init(&fp->f_fl_mutex);
 560
 561	gfs2_assert_warn(GFS2_SB(inode), !file->private_data);
 562	file->private_data = fp;
 563	return 0;
 564}
 565
 566/**
 567 * gfs2_open - open a file
 568 * @inode: the inode to open
 569 * @file: the struct file for this opening
 570 *
 571 * After atomic_open, this function is only used for opening files
 572 * which are already cached. We must still get the glock for regular
 573 * files to ensure that we have the file size uptodate for the large
 574 * file check which is in the common code. That is only an issue for
 575 * regular files though.
 576 *
 577 * Returns: errno
 578 */
 579
 580static int gfs2_open(struct inode *inode, struct file *file)
 581{
 582	struct gfs2_inode *ip = GFS2_I(inode);
 583	struct gfs2_holder i_gh;
 584	int error;
 585	bool need_unlock = false;
 586
 587	if (S_ISREG(ip->i_inode.i_mode)) {
 588		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
 589					   &i_gh);
 590		if (error)
 591			return error;
 592		need_unlock = true;
 593	}
 594
 595	error = gfs2_open_common(inode, file);
 
 
 
 
 596
 597	if (need_unlock)
 598		gfs2_glock_dq_uninit(&i_gh);
 
 599
 
 
 
 
 
 
 
 600	return error;
 601}
 602
 603/**
 604 * gfs2_release - called to close a struct file
 605 * @inode: the inode the struct file belongs to
 606 * @file: the struct file being closed
 607 *
 608 * Returns: errno
 609 */
 610
 611static int gfs2_release(struct inode *inode, struct file *file)
 612{
 613	struct gfs2_inode *ip = GFS2_I(inode);
 
 614
 615	kfree(file->private_data);
 616	file->private_data = NULL;
 617
 618	if (!(file->f_mode & FMODE_WRITE))
 619		return 0;
 
 
 620
 621	gfs2_rsqa_delete(ip, &inode->i_writecount);
 622	return 0;
 623}
 624
 625/**
 626 * gfs2_fsync - sync the dirty data for a file (across the cluster)
 627 * @file: the file that points to the dentry
 628 * @start: the start position in the file to sync
 629 * @end: the end position in the file to sync
 630 * @datasync: set if we can ignore timestamp changes
 631 *
 632 * We split the data flushing here so that we don't wait for the data
 633 * until after we've also sent the metadata to disk. Note that for
 634 * data=ordered, we will write & wait for the data at the log flush
 635 * stage anyway, so this is unlikely to make much of a difference
 636 * except in the data=writeback case.
 637 *
 638 * If the fdatawrite fails due to any reason except -EIO, we will
 639 * continue the remainder of the fsync, although we'll still report
 640 * the error at the end. This is to match filemap_write_and_wait_range()
 641 * behaviour.
 642 *
 643 * Returns: errno
 644 */
 645
 646static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
 647		      int datasync)
 648{
 649	struct address_space *mapping = file->f_mapping;
 650	struct inode *inode = mapping->host;
 651	int sync_state = inode->i_state & I_DIRTY_ALL;
 652	struct gfs2_inode *ip = GFS2_I(inode);
 653	int ret = 0, ret1 = 0;
 654
 655	if (mapping->nrpages) {
 656		ret1 = filemap_fdatawrite_range(mapping, start, end);
 657		if (ret1 == -EIO)
 658			return ret1;
 659	}
 660
 661	if (!gfs2_is_jdata(ip))
 662		sync_state &= ~I_DIRTY_PAGES;
 663	if (datasync)
 664		sync_state &= ~(I_DIRTY_SYNC | I_DIRTY_TIME);
 665
 666	if (sync_state) {
 667		ret = sync_inode_metadata(inode, 1);
 668		if (ret)
 
 669			return ret;
 670		if (gfs2_is_jdata(ip))
 671			filemap_write_and_wait(mapping);
 672		gfs2_ail_flush(ip->i_gl, 1);
 673	}
 674
 675	if (mapping->nrpages)
 676		ret = filemap_fdatawait_range(mapping, start, end);
 677
 678	return ret ? ret : ret1;
 679}
 680
 681/**
 682 * gfs2_file_write_iter - Perform a write to a file
 683 * @iocb: The io context
 684 * @iov: The data to write
 685 * @nr_segs: Number of @iov segments
 686 * @pos: The file position
 687 *
 688 * We have to do a lock/unlock here to refresh the inode size for
 689 * O_APPEND writes, otherwise we can land up writing at the wrong
 690 * offset. There is still a race, but provided the app is using its
 691 * own file locking, this will make O_APPEND work as expected.
 692 *
 693 */
 694
 695static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 
 696{
 697	struct file *file = iocb->ki_filp;
 698	struct gfs2_inode *ip = GFS2_I(file_inode(file));
 699	int ret;
 700
 701	ret = gfs2_rsqa_alloc(ip);
 702	if (ret)
 703		return ret;
 704
 705	gfs2_size_hint(file, iocb->ki_pos, iov_iter_count(from));
 706
 707	if (iocb->ki_flags & IOCB_APPEND) {
 708		struct gfs2_holder gh;
 
 709
 710		ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
 711		if (ret)
 712			return ret;
 713		gfs2_glock_dq_uninit(&gh);
 714	}
 715
 716	return generic_file_write_iter(iocb, from);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 717}
 718
 719static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
 720			   int mode)
 721{
 722	struct gfs2_inode *ip = GFS2_I(inode);
 723	struct buffer_head *dibh;
 724	int error;
 725	unsigned int nr_blks;
 726	sector_t lblock = offset >> inode->i_blkbits;
 
 
 
 
 
 
 
 
 727
 728	error = gfs2_meta_inode_buffer(ip, &dibh);
 729	if (unlikely(error))
 730		return error;
 731
 732	gfs2_trans_add_meta(ip->i_gl, dibh);
 733
 734	if (gfs2_is_stuffed(ip)) {
 735		error = gfs2_unstuff_dinode(ip, NULL);
 736		if (unlikely(error))
 737			goto out;
 738	}
 739
 740	while (len) {
 741		struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
 742		bh_map.b_size = len;
 743		set_buffer_zeronew(&bh_map);
 744
 745		error = gfs2_block_map(inode, lblock, &bh_map, 1);
 746		if (unlikely(error))
 747			goto out;
 748		len -= bh_map.b_size;
 749		nr_blks = bh_map.b_size >> inode->i_blkbits;
 750		lblock += nr_blks;
 751		if (!buffer_new(&bh_map))
 752			continue;
 753		if (unlikely(!buffer_zeronew(&bh_map))) {
 754			error = -EIO;
 755			goto out;
 756		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 757	}
 758out:
 
 
 
 759	brelse(dibh);
 
 
 760	return error;
 761}
 762/**
 763 * calc_max_reserv() - Reverse of write_calc_reserv. Given a number of
 764 *                     blocks, determine how many bytes can be written.
 765 * @ip:          The inode in question.
 766 * @len:         Max cap of bytes. What we return in *len must be <= this.
 767 * @data_blocks: Compute and return the number of data blocks needed
 768 * @ind_blocks:  Compute and return the number of indirect blocks needed
 769 * @max_blocks:  The total blocks available to work with.
 770 *
 771 * Returns: void, but @len, @data_blocks and @ind_blocks are filled in.
 772 */
 773static void calc_max_reserv(struct gfs2_inode *ip, loff_t *len,
 774			    unsigned int *data_blocks, unsigned int *ind_blocks,
 775			    unsigned int max_blocks)
 776{
 777	loff_t max = *len;
 778	const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 
 779	unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1);
 780
 781	for (tmp = max_data; tmp > sdp->sd_diptrs;) {
 782		tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
 783		max_data -= tmp;
 784	}
 785
 
 
 
 786	*data_blocks = max_data;
 787	*ind_blocks = max_blocks - max_data;
 788	*len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift;
 789	if (*len > max) {
 790		*len = max;
 791		gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks);
 792	}
 793}
 794
 795static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
 
 796{
 797	struct inode *inode = file_inode(file);
 798	struct gfs2_sbd *sdp = GFS2_SB(inode);
 799	struct gfs2_inode *ip = GFS2_I(inode);
 800	struct gfs2_alloc_parms ap = { .aflags = 0, };
 801	unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
 802	loff_t bytes, max_bytes, max_blks = UINT_MAX;
 
 803	int error;
 804	const loff_t pos = offset;
 805	const loff_t count = len;
 806	loff_t bsize_mask = ~((loff_t)sdp->sd_sb.sb_bsize - 1);
 807	loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
 808	loff_t max_chunk_size = UINT_MAX & bsize_mask;
 809
 810	next = (next + 1) << sdp->sd_sb.sb_bsize_shift;
 811
 
 
 
 
 812	offset &= bsize_mask;
 813
 814	len = next - offset;
 815	bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2;
 816	if (!bytes)
 817		bytes = UINT_MAX;
 818	bytes &= bsize_mask;
 819	if (bytes == 0)
 820		bytes = sdp->sd_sb.sb_bsize;
 821
 822	gfs2_size_hint(file, offset, len);
 
 
 
 823
 824	gfs2_write_calc_reserv(ip, PAGE_SIZE, &data_blocks, &ind_blocks);
 825	ap.min_target = data_blocks + ind_blocks;
 826
 827	while (len > 0) {
 828		if (len < bytes)
 829			bytes = len;
 830		if (!gfs2_write_alloc_required(ip, offset, bytes)) {
 831			len -= bytes;
 832			offset += bytes;
 833			continue;
 834		}
 835
 836		/* We need to determine how many bytes we can actually
 837		 * fallocate without exceeding quota or going over the
 838		 * end of the fs. We start off optimistically by assuming
 839		 * we can write max_bytes */
 840		max_bytes = (len > max_chunk_size) ? max_chunk_size : len;
 841
 842		/* Since max_bytes is most likely a theoretical max, we
 843		 * calculate a more realistic 'bytes' to serve as a good
 844		 * starting point for the number of bytes we may be able
 845		 * to write */
 846		gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
 847		ap.target = data_blocks + ind_blocks;
 848
 849		error = gfs2_quota_lock_check(ip, &ap);
 850		if (error)
 851			return error;
 852		/* ap.allowed tells us how many blocks quota will allow
 853		 * us to write. Check if this reduces max_blks */
 854		if (ap.allowed && ap.allowed < max_blks)
 855			max_blks = ap.allowed;
 856
 857		error = gfs2_inplace_reserve(ip, &ap);
 858		if (error)
 859			goto out_qunlock;
 860
 861		/* check if the selected rgrp limits our max_blks further */
 862		if (ap.allowed && ap.allowed < max_blks)
 863			max_blks = ap.allowed;
 864
 865		/* Almost done. Calculate bytes that can be written using
 866		 * max_blks. We also recompute max_bytes, data_blocks and
 867		 * ind_blocks */
 868		calc_max_reserv(ip, &max_bytes, &data_blocks,
 869				&ind_blocks, max_blks);
 
 
 
 
 
 
 870
 871		rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
 872			  RES_RG_HDR + gfs2_rg_blocks(ip, data_blocks + ind_blocks);
 873		if (gfs2_is_jdata(ip))
 874			rblocks += data_blocks ? data_blocks : 1;
 875
 876		error = gfs2_trans_begin(sdp, rblocks,
 877					 PAGE_SIZE/sdp->sd_sb.sb_bsize);
 878		if (error)
 879			goto out_trans_fail;
 880
 881		error = fallocate_chunk(inode, offset, max_bytes, mode);
 882		gfs2_trans_end(sdp);
 883
 884		if (error)
 885			goto out_trans_fail;
 886
 887		len -= max_bytes;
 888		offset += max_bytes;
 889		gfs2_inplace_release(ip);
 890		gfs2_quota_unlock(ip);
 
 891	}
 892
 893	if (!(mode & FALLOC_FL_KEEP_SIZE) && (pos + count) > inode->i_size) {
 894		i_size_write(inode, pos + count);
 895		file_update_time(file);
 896		mark_inode_dirty(inode);
 897	}
 898
 899	if ((file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host))
 900		return vfs_fsync_range(file, pos, pos + count - 1,
 901			       (file->f_flags & __O_SYNC) ? 0 : 1);
 902	return 0;
 903
 904out_trans_fail:
 905	gfs2_inplace_release(ip);
 906out_qunlock:
 907	gfs2_quota_unlock(ip);
 908	return error;
 909}
 910
 911static long gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
 912{
 913	struct inode *inode = file_inode(file);
 914	struct gfs2_inode *ip = GFS2_I(inode);
 915	struct gfs2_holder gh;
 916	int ret;
 917
 918	if ((mode & ~FALLOC_FL_KEEP_SIZE) || gfs2_is_jdata(ip))
 919		return -EOPNOTSUPP;
 920
 921	inode_lock(inode);
 922
 923	gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
 924	ret = gfs2_glock_nq(&gh);
 925	if (ret)
 926		goto out_uninit;
 927
 928	if (!(mode & FALLOC_FL_KEEP_SIZE) &&
 929	    (offset + len) > inode->i_size) {
 930		ret = inode_newsize_ok(inode, offset + len);
 931		if (ret)
 932			goto out_unlock;
 933	}
 934
 935	ret = get_write_access(inode);
 936	if (ret)
 937		goto out_unlock;
 938
 939	ret = gfs2_rsqa_alloc(ip);
 940	if (ret)
 941		goto out_putw;
 942
 943	ret = __gfs2_fallocate(file, mode, offset, len);
 944	if (ret)
 945		gfs2_rs_deltree(&ip->i_res);
 946
 947out_putw:
 948	put_write_access(inode);
 949out_unlock:
 950	gfs2_glock_dq(&gh);
 951out_uninit:
 952	gfs2_holder_uninit(&gh);
 953	inode_unlock(inode);
 954	return ret;
 955}
 956
 957static ssize_t gfs2_file_splice_write(struct pipe_inode_info *pipe,
 958				      struct file *out, loff_t *ppos,
 959				      size_t len, unsigned int flags)
 960{
 961	int error;
 962	struct gfs2_inode *ip = GFS2_I(out->f_mapping->host);
 963
 964	error = gfs2_rsqa_alloc(ip);
 965	if (error)
 966		return (ssize_t)error;
 967
 968	gfs2_size_hint(out, *ppos, len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 969
 970	return iter_file_splice_write(pipe, out, ppos, len, flags);
 
 
 971}
 972
 973#ifdef CONFIG_GFS2_FS_LOCKING_DLM
 974
 975/**
 976 * gfs2_lock - acquire/release a posix lock on a file
 977 * @file: the file pointer
 978 * @cmd: either modify or retrieve lock state, possibly wait
 979 * @fl: type and range of lock
 980 *
 981 * Returns: errno
 982 */
 983
 984static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
 985{
 986	struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
 987	struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host);
 988	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
 989
 990	if (!(fl->fl_flags & FL_POSIX))
 991		return -ENOLCK;
 992	if (__mandatory_lock(&ip->i_inode) && fl->fl_type != F_UNLCK)
 993		return -ENOLCK;
 994
 995	if (cmd == F_CANCELLK) {
 996		/* Hack: */
 997		cmd = F_SETLK;
 998		fl->fl_type = F_UNLCK;
 999	}
1000	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
1001		if (fl->fl_type == F_UNLCK)
1002			locks_lock_file_wait(file, fl);
1003		return -EIO;
1004	}
1005	if (IS_GETLK(cmd))
1006		return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl);
1007	else if (fl->fl_type == F_UNLCK)
1008		return dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl);
1009	else
1010		return dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl);
1011}
1012
1013static int do_flock(struct file *file, int cmd, struct file_lock *fl)
1014{
1015	struct gfs2_file *fp = file->private_data;
1016	struct gfs2_holder *fl_gh = &fp->f_fl_gh;
1017	struct gfs2_inode *ip = GFS2_I(file_inode(file));
1018	struct gfs2_glock *gl;
1019	unsigned int state;
1020	u16 flags;
1021	int error = 0;
1022	int sleeptime;
1023
1024	state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
1025	flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY_1CB) | GL_EXACT;
1026
1027	mutex_lock(&fp->f_fl_mutex);
1028
1029	gl = fl_gh->gh_gl;
1030	if (gl) {
1031		if (fl_gh->gh_state == state)
1032			goto out;
1033		locks_lock_file_wait(file,
1034				     &(struct file_lock) {
1035					     .fl_type = F_UNLCK,
1036					     .fl_flags = FL_FLOCK
1037				     });
1038		gfs2_glock_dq(fl_gh);
1039		gfs2_holder_reinit(state, flags, fl_gh);
1040	} else {
1041		error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr,
1042				       &gfs2_flock_glops, CREATE, &gl);
1043		if (error)
1044			goto out;
1045		gfs2_holder_init(gl, state, flags, fl_gh);
1046		gfs2_glock_put(gl);
1047	}
1048	for (sleeptime = 1; sleeptime <= 4; sleeptime <<= 1) {
1049		error = gfs2_glock_nq(fl_gh);
1050		if (error != GLR_TRYFAILED)
1051			break;
1052		fl_gh->gh_flags = LM_FLAG_TRY | GL_EXACT;
1053		fl_gh->gh_error = 0;
1054		msleep(sleeptime);
1055	}
1056	if (error) {
1057		gfs2_holder_uninit(fl_gh);
1058		if (error == GLR_TRYFAILED)
1059			error = -EAGAIN;
1060	} else {
1061		error = locks_lock_file_wait(file, fl);
1062		gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
1063	}
1064
1065out:
1066	mutex_unlock(&fp->f_fl_mutex);
1067	return error;
1068}
1069
1070static void do_unflock(struct file *file, struct file_lock *fl)
1071{
1072	struct gfs2_file *fp = file->private_data;
1073	struct gfs2_holder *fl_gh = &fp->f_fl_gh;
1074
1075	mutex_lock(&fp->f_fl_mutex);
1076	locks_lock_file_wait(file, fl);
1077	if (gfs2_holder_initialized(fl_gh)) {
1078		gfs2_glock_dq(fl_gh);
1079		gfs2_holder_uninit(fl_gh);
1080	}
1081	mutex_unlock(&fp->f_fl_mutex);
1082}
1083
1084/**
1085 * gfs2_flock - acquire/release a flock lock on a file
1086 * @file: the file pointer
1087 * @cmd: either modify or retrieve lock state, possibly wait
1088 * @fl: type and range of lock
1089 *
1090 * Returns: errno
1091 */
1092
1093static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
1094{
1095	if (!(fl->fl_flags & FL_FLOCK))
1096		return -ENOLCK;
1097	if (fl->fl_type & LOCK_MAND)
1098		return -EOPNOTSUPP;
1099
1100	if (fl->fl_type == F_UNLCK) {
1101		do_unflock(file, fl);
1102		return 0;
1103	} else {
1104		return do_flock(file, cmd, fl);
1105	}
1106}
1107
1108const struct file_operations gfs2_file_fops = {
1109	.llseek		= gfs2_llseek,
1110	.read_iter	= generic_file_read_iter,
1111	.write_iter	= gfs2_file_write_iter,
 
 
1112	.unlocked_ioctl	= gfs2_ioctl,
1113	.mmap		= gfs2_mmap,
1114	.open		= gfs2_open,
1115	.release	= gfs2_release,
1116	.fsync		= gfs2_fsync,
1117	.lock		= gfs2_lock,
1118	.flock		= gfs2_flock,
1119	.splice_read	= generic_file_splice_read,
1120	.splice_write	= gfs2_file_splice_write,
1121	.setlease	= simple_nosetlease,
1122	.fallocate	= gfs2_fallocate,
1123};
1124
1125const struct file_operations gfs2_dir_fops = {
1126	.iterate_shared	= gfs2_readdir,
1127	.unlocked_ioctl	= gfs2_ioctl,
1128	.open		= gfs2_open,
1129	.release	= gfs2_release,
1130	.fsync		= gfs2_fsync,
1131	.lock		= gfs2_lock,
1132	.flock		= gfs2_flock,
1133	.llseek		= default_llseek,
1134};
1135
1136#endif /* CONFIG_GFS2_FS_LOCKING_DLM */
1137
1138const struct file_operations gfs2_file_fops_nolock = {
1139	.llseek		= gfs2_llseek,
1140	.read_iter	= generic_file_read_iter,
1141	.write_iter	= gfs2_file_write_iter,
 
 
1142	.unlocked_ioctl	= gfs2_ioctl,
1143	.mmap		= gfs2_mmap,
1144	.open		= gfs2_open,
1145	.release	= gfs2_release,
1146	.fsync		= gfs2_fsync,
1147	.splice_read	= generic_file_splice_read,
1148	.splice_write	= gfs2_file_splice_write,
1149	.setlease	= generic_setlease,
1150	.fallocate	= gfs2_fallocate,
1151};
1152
1153const struct file_operations gfs2_dir_fops_nolock = {
1154	.iterate_shared	= gfs2_readdir,
1155	.unlocked_ioctl	= gfs2_ioctl,
1156	.open		= gfs2_open,
1157	.release	= gfs2_release,
1158	.fsync		= gfs2_fsync,
1159	.llseek		= default_llseek,
1160};
1161
   1/*
   2 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
   3 * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
   4 *
   5 * This copyrighted material is made available to anyone wishing to use,
   6 * modify, copy, or redistribute it subject to the terms and conditions
   7 * of the GNU General Public License version 2.
   8 */
   9
  10#include <linux/slab.h>
  11#include <linux/spinlock.h>
  12#include <linux/completion.h>
  13#include <linux/buffer_head.h>
  14#include <linux/pagemap.h>
  15#include <linux/uio.h>
  16#include <linux/blkdev.h>
  17#include <linux/mm.h>
  18#include <linux/mount.h>
  19#include <linux/fs.h>
  20#include <linux/gfs2_ondisk.h>
  21#include <linux/ext2_fs.h>
  22#include <linux/falloc.h>
  23#include <linux/swap.h>
  24#include <linux/crc32.h>
  25#include <linux/writeback.h>
  26#include <asm/uaccess.h>
  27#include <linux/dlm.h>
  28#include <linux/dlm_plock.h>
 
  29
  30#include "gfs2.h"
  31#include "incore.h"
  32#include "bmap.h"
  33#include "dir.h"
  34#include "glock.h"
  35#include "glops.h"
  36#include "inode.h"
  37#include "log.h"
  38#include "meta_io.h"
  39#include "quota.h"
  40#include "rgrp.h"
  41#include "trans.h"
  42#include "util.h"
  43
  44/**
  45 * gfs2_llseek - seek to a location in a file
  46 * @file: the file
  47 * @offset: the offset
  48 * @origin: Where to seek from (SEEK_SET, SEEK_CUR, or SEEK_END)
  49 *
  50 * SEEK_END requires the glock for the file because it references the
  51 * file's size.
  52 *
  53 * Returns: The new offset, or errno
  54 */
  55
  56static loff_t gfs2_llseek(struct file *file, loff_t offset, int origin)
  57{
  58	struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
  59	struct gfs2_holder i_gh;
  60	loff_t error;
  61
  62	if (origin == 2) {
 
 
 
  63		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
  64					   &i_gh);
  65		if (!error) {
  66			error = generic_file_llseek_unlocked(file, offset, origin);
  67			gfs2_glock_dq_uninit(&i_gh);
  68		}
  69	} else
  70		error = generic_file_llseek_unlocked(file, offset, origin);
 
 
 
 
 
 
  71
  72	return error;
  73}
  74
  75/**
  76 * gfs2_readdir - Read directory entries from a directory
  77 * @file: The directory to read from
  78 * @dirent: Buffer for dirents
  79 * @filldir: Function used to do the copying
  80 *
  81 * Returns: errno
  82 */
  83
  84static int gfs2_readdir(struct file *file, void *dirent, filldir_t filldir)
  85{
  86	struct inode *dir = file->f_mapping->host;
  87	struct gfs2_inode *dip = GFS2_I(dir);
  88	struct gfs2_holder d_gh;
  89	u64 offset = file->f_pos;
  90	int error;
  91
  92	gfs2_holder_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
  93	error = gfs2_glock_nq(&d_gh);
  94	if (error) {
  95		gfs2_holder_uninit(&d_gh);
  96		return error;
  97	}
  98
  99	error = gfs2_dir_read(dir, &offset, dirent, filldir);
 100
 101	gfs2_glock_dq_uninit(&d_gh);
 102
 103	file->f_pos = offset;
 104
 105	return error;
 106}
 107
 108/**
 109 * fsflags_cvt
 110 * @table: A table of 32 u32 flags
 111 * @val: a 32 bit value to convert
 112 *
 113 * This function can be used to convert between fsflags values and
 114 * GFS2's own flags values.
 115 *
 116 * Returns: the converted flags
 117 */
 118static u32 fsflags_cvt(const u32 *table, u32 val)
 119{
 120	u32 res = 0;
 121	while(val) {
 122		if (val & 1)
 123			res |= *table;
 124		table++;
 125		val >>= 1;
 126	}
 127	return res;
 128}
 129
 130static const u32 fsflags_to_gfs2[32] = {
 131	[3] = GFS2_DIF_SYNC,
 132	[4] = GFS2_DIF_IMMUTABLE,
 133	[5] = GFS2_DIF_APPENDONLY,
 134	[7] = GFS2_DIF_NOATIME,
 135	[12] = GFS2_DIF_EXHASH,
 136	[14] = GFS2_DIF_INHERIT_JDATA,
 
 137};
 138
 139static const u32 gfs2_to_fsflags[32] = {
 140	[gfs2fl_Sync] = FS_SYNC_FL,
 141	[gfs2fl_Immutable] = FS_IMMUTABLE_FL,
 142	[gfs2fl_AppendOnly] = FS_APPEND_FL,
 143	[gfs2fl_NoAtime] = FS_NOATIME_FL,
 144	[gfs2fl_ExHash] = FS_INDEX_FL,
 
 145	[gfs2fl_InheritJdata] = FS_JOURNAL_DATA_FL,
 146};
 147
 148static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
 149{
 150	struct inode *inode = filp->f_path.dentry->d_inode;
 151	struct gfs2_inode *ip = GFS2_I(inode);
 152	struct gfs2_holder gh;
 153	int error;
 154	u32 fsflags;
 155
 156	gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
 157	error = gfs2_glock_nq(&gh);
 158	if (error)
 159		return error;
 160
 161	fsflags = fsflags_cvt(gfs2_to_fsflags, ip->i_diskflags);
 162	if (!S_ISDIR(inode->i_mode) && ip->i_diskflags & GFS2_DIF_JDATA)
 163		fsflags |= FS_JOURNAL_DATA_FL;
 164	if (put_user(fsflags, ptr))
 165		error = -EFAULT;
 166
 167	gfs2_glock_dq(&gh);
 
 168	gfs2_holder_uninit(&gh);
 169	return error;
 170}
 171
 172void gfs2_set_inode_flags(struct inode *inode)
 173{
 174	struct gfs2_inode *ip = GFS2_I(inode);
 175	unsigned int flags = inode->i_flags;
 176
 177	flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_NOSEC);
 178	if ((ip->i_eattr == 0) && !is_sxid(inode->i_mode))
 179		inode->i_flags |= S_NOSEC;
 180	if (ip->i_diskflags & GFS2_DIF_IMMUTABLE)
 181		flags |= S_IMMUTABLE;
 182	if (ip->i_diskflags & GFS2_DIF_APPENDONLY)
 183		flags |= S_APPEND;
 184	if (ip->i_diskflags & GFS2_DIF_NOATIME)
 185		flags |= S_NOATIME;
 186	if (ip->i_diskflags & GFS2_DIF_SYNC)
 187		flags |= S_SYNC;
 188	inode->i_flags = flags;
 189}
 190
 191/* Flags that can be set by user space */
 192#define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA|			\
 193			     GFS2_DIF_IMMUTABLE|		\
 194			     GFS2_DIF_APPENDONLY|		\
 195			     GFS2_DIF_NOATIME|			\
 196			     GFS2_DIF_SYNC|			\
 197			     GFS2_DIF_SYSTEM|			\
 
 198			     GFS2_DIF_INHERIT_JDATA)
 199
 200/**
 201 * gfs2_set_flags - set flags on an inode
 202 * @inode: The inode
 203 * @flags: The flags to set
 204 * @mask: Indicates which flags are valid
 205 *
 206 */
 207static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
 208{
 209	struct inode *inode = filp->f_path.dentry->d_inode;
 210	struct gfs2_inode *ip = GFS2_I(inode);
 211	struct gfs2_sbd *sdp = GFS2_SB(inode);
 212	struct buffer_head *bh;
 213	struct gfs2_holder gh;
 214	int error;
 215	u32 new_flags, flags;
 216
 217	error = mnt_want_write(filp->f_path.mnt);
 218	if (error)
 219		return error;
 220
 221	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
 222	if (error)
 223		goto out_drop_write;
 224
 225	error = -EACCES;
 226	if (!inode_owner_or_capable(inode))
 227		goto out;
 228
 229	error = 0;
 230	flags = ip->i_diskflags;
 231	new_flags = (flags & ~mask) | (reqflags & mask);
 232	if ((new_flags ^ flags) == 0)
 233		goto out;
 234
 235	error = -EINVAL;
 236	if ((new_flags ^ flags) & ~GFS2_FLAGS_USER_SET)
 237		goto out;
 238
 239	error = -EPERM;
 240	if (IS_IMMUTABLE(inode) && (new_flags & GFS2_DIF_IMMUTABLE))
 241		goto out;
 242	if (IS_APPEND(inode) && (new_flags & GFS2_DIF_APPENDONLY))
 243		goto out;
 244	if (((new_flags ^ flags) & GFS2_DIF_IMMUTABLE) &&
 245	    !capable(CAP_LINUX_IMMUTABLE))
 246		goto out;
 247	if (!IS_IMMUTABLE(inode)) {
 248		error = gfs2_permission(inode, MAY_WRITE);
 249		if (error)
 250			goto out;
 251	}
 252	if ((flags ^ new_flags) & GFS2_DIF_JDATA) {
 253		if (flags & GFS2_DIF_JDATA)
 254			gfs2_log_flush(sdp, ip->i_gl);
 255		error = filemap_fdatawrite(inode->i_mapping);
 256		if (error)
 257			goto out;
 258		error = filemap_fdatawait(inode->i_mapping);
 259		if (error)
 260			goto out;
 261	}
 262	error = gfs2_trans_begin(sdp, RES_DINODE, 0);
 263	if (error)
 264		goto out;
 265	error = gfs2_meta_inode_buffer(ip, &bh);
 266	if (error)
 267		goto out_trans_end;
 268	gfs2_trans_add_bh(ip->i_gl, bh, 1);
 269	ip->i_diskflags = new_flags;
 270	gfs2_dinode_out(ip, bh->b_data);
 271	brelse(bh);
 272	gfs2_set_inode_flags(inode);
 273	gfs2_set_aops(inode);
 274out_trans_end:
 275	gfs2_trans_end(sdp);
 276out:
 277	gfs2_glock_dq_uninit(&gh);
 278out_drop_write:
 279	mnt_drop_write(filp->f_path.mnt);
 280	return error;
 281}
 282
 283static int gfs2_set_flags(struct file *filp, u32 __user *ptr)
 284{
 285	struct inode *inode = filp->f_path.dentry->d_inode;
 286	u32 fsflags, gfsflags;
 287
 288	if (get_user(fsflags, ptr))
 289		return -EFAULT;
 290
 291	gfsflags = fsflags_cvt(fsflags_to_gfs2, fsflags);
 292	if (!S_ISDIR(inode->i_mode)) {
 
 293		if (gfsflags & GFS2_DIF_INHERIT_JDATA)
 294			gfsflags ^= (GFS2_DIF_JDATA | GFS2_DIF_INHERIT_JDATA);
 295		return do_gfs2_set_flags(filp, gfsflags, ~0);
 296	}
 297	return do_gfs2_set_flags(filp, gfsflags, ~GFS2_DIF_JDATA);
 298}
 299
 300static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 301{
 302	switch(cmd) {
 303	case FS_IOC_GETFLAGS:
 304		return gfs2_get_flags(filp, (u32 __user *)arg);
 305	case FS_IOC_SETFLAGS:
 306		return gfs2_set_flags(filp, (u32 __user *)arg);
 
 
 307	}
 308	return -ENOTTY;
 309}
 310
 311/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 312 * gfs2_allocate_page_backing - Use bmap to allocate blocks
 313 * @page: The (locked) page to allocate backing for
 314 *
 315 * We try to allocate all the blocks required for the page in
 316 * one go. This might fail for various reasons, so we keep
 317 * trying until all the blocks to back this page are allocated.
 318 * If some of the blocks are already allocated, thats ok too.
 319 */
 320
 321static int gfs2_allocate_page_backing(struct page *page)
 322{
 323	struct inode *inode = page->mapping->host;
 324	struct buffer_head bh;
 325	unsigned long size = PAGE_CACHE_SIZE;
 326	u64 lblock = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
 327
 328	do {
 329		bh.b_state = 0;
 330		bh.b_size = size;
 331		gfs2_block_map(inode, lblock, &bh, 1);
 332		if (!buffer_mapped(&bh))
 333			return -EIO;
 334		size -= bh.b_size;
 335		lblock += (bh.b_size >> inode->i_blkbits);
 336	} while(size > 0);
 337	return 0;
 338}
 339
 340/**
 341 * gfs2_page_mkwrite - Make a shared, mmap()ed, page writable
 342 * @vma: The virtual memory area
 343 * @page: The page which is about to become writable
 344 *
 345 * When the page becomes writable, we need to ensure that we have
 346 * blocks allocated on disk to back that page.
 347 */
 348
 349static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
 350{
 351	struct page *page = vmf->page;
 352	struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
 353	struct gfs2_inode *ip = GFS2_I(inode);
 354	struct gfs2_sbd *sdp = GFS2_SB(inode);
 
 355	unsigned long last_index;
 356	u64 pos = page->index << PAGE_CACHE_SHIFT;
 357	unsigned int data_blocks, ind_blocks, rblocks;
 358	struct gfs2_holder gh;
 359	struct gfs2_alloc *al;
 360	int ret;
 361
 
 
 
 
 
 
 
 
 362	gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
 363	ret = gfs2_glock_nq(&gh);
 364	if (ret)
 365		goto out;
 
 
 
 366
 367	set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
 368	set_bit(GIF_SW_PAGED, &ip->i_flags);
 369
 370	if (!gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE))
 
 
 
 
 
 371		goto out_unlock;
 372	ret = -ENOMEM;
 373	al = gfs2_alloc_get(ip);
 374	if (al == NULL)
 
 375		goto out_unlock;
 376
 377	ret = gfs2_quota_lock_check(ip);
 
 
 378	if (ret)
 379		goto out_alloc_put;
 380	gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks);
 381	al->al_requested = data_blocks + ind_blocks;
 382	ret = gfs2_inplace_reserve(ip);
 383	if (ret)
 384		goto out_quota_unlock;
 385
 386	rblocks = RES_DINODE + ind_blocks;
 387	if (gfs2_is_jdata(ip))
 388		rblocks += data_blocks ? data_blocks : 1;
 389	if (ind_blocks || data_blocks) {
 390		rblocks += RES_STATFS + RES_QUOTA;
 391		rblocks += gfs2_rg_blocks(al);
 392	}
 393	ret = gfs2_trans_begin(sdp, rblocks, 0);
 394	if (ret)
 395		goto out_trans_fail;
 396
 397	lock_page(page);
 398	ret = -EINVAL;
 399	last_index = ip->i_inode.i_size >> PAGE_CACHE_SHIFT;
 400	if (page->index > last_index)
 401		goto out_unlock_page;
 
 
 
 
 
 
 
 
 
 
 
 402	ret = 0;
 403	if (!PageUptodate(page) || page->mapping != ip->i_inode.i_mapping)
 404		goto out_unlock_page;
 405	if (gfs2_is_stuffed(ip)) {
 406		ret = gfs2_unstuff_dinode(ip, page);
 407		if (ret)
 408			goto out_unlock_page;
 409	}
 410	ret = gfs2_allocate_page_backing(page);
 411
 412out_unlock_page:
 413	unlock_page(page);
 
 414	gfs2_trans_end(sdp);
 415out_trans_fail:
 416	gfs2_inplace_release(ip);
 417out_quota_unlock:
 418	gfs2_quota_unlock(ip);
 419out_alloc_put:
 420	gfs2_alloc_put(ip);
 421out_unlock:
 422	gfs2_glock_dq(&gh);
 
 
 
 
 
 
 423out:
 424	gfs2_holder_uninit(&gh);
 425	if (ret == -ENOMEM)
 426		ret = VM_FAULT_OOM;
 427	else if (ret)
 428		ret = VM_FAULT_SIGBUS;
 429	return ret;
 430}
 431
 432static const struct vm_operations_struct gfs2_vm_ops = {
 433	.fault = filemap_fault,
 
 434	.page_mkwrite = gfs2_page_mkwrite,
 435};
 436
 437/**
 438 * gfs2_mmap -
 439 * @file: The file to map
 440 * @vma: The VMA which described the mapping
 441 *
 442 * There is no need to get a lock here unless we should be updating
 443 * atime. We ignore any locking errors since the only consequence is
 444 * a missed atime update (which will just be deferred until later).
 445 *
 446 * Returns: 0
 447 */
 448
 449static int gfs2_mmap(struct file *file, struct vm_area_struct *vma)
 450{
 451	struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
 452
 453	if (!(file->f_flags & O_NOATIME) &&
 454	    !IS_NOATIME(&ip->i_inode)) {
 455		struct gfs2_holder i_gh;
 456		int error;
 457
 458		gfs2_holder_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
 459		error = gfs2_glock_nq(&i_gh);
 460		if (error == 0) {
 461			file_accessed(file);
 462			gfs2_glock_dq(&i_gh);
 463		}
 464		gfs2_holder_uninit(&i_gh);
 465		if (error)
 466			return error;
 
 
 
 467	}
 468	vma->vm_ops = &gfs2_vm_ops;
 469	vma->vm_flags |= VM_CAN_NONLINEAR;
 470
 471	return 0;
 472}
 473
 474/**
 475 * gfs2_open - open a file
 476 * @inode: the inode to open
 477 * @file: the struct file for this opening
 
 
 
 
 
 478 *
 479 * Returns: errno
 480 */
 481
 482static int gfs2_open(struct inode *inode, struct file *file)
 483{
 484	struct gfs2_inode *ip = GFS2_I(inode);
 485	struct gfs2_holder i_gh;
 486	struct gfs2_file *fp;
 487	int error;
 
 
 
 
 
 
 488
 489	fp = kzalloc(sizeof(struct gfs2_file), GFP_KERNEL);
 490	if (!fp)
 491		return -ENOMEM;
 492
 493	mutex_init(&fp->f_fl_mutex);
 494
 495	gfs2_assert_warn(GFS2_SB(inode), !file->private_data);
 496	file->private_data = fp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 497
 498	if (S_ISREG(ip->i_inode.i_mode)) {
 499		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
 500					   &i_gh);
 501		if (error)
 502			goto fail;
 
 
 503
 504		if (!(file->f_flags & O_LARGEFILE) &&
 505		    i_size_read(inode) > MAX_NON_LFS) {
 506			error = -EOVERFLOW;
 507			goto fail_gunlock;
 508		}
 509
 
 510		gfs2_glock_dq_uninit(&i_gh);
 511	}
 512
 513	return 0;
 514
 515fail_gunlock:
 516	gfs2_glock_dq_uninit(&i_gh);
 517fail:
 518	file->private_data = NULL;
 519	kfree(fp);
 520	return error;
 521}
 522
 523/**
 524 * gfs2_close - called to close a struct file
 525 * @inode: the inode the struct file belongs to
 526 * @file: the struct file being closed
 527 *
 528 * Returns: errno
 529 */
 530
 531static int gfs2_close(struct inode *inode, struct file *file)
 532{
 533	struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
 534	struct gfs2_file *fp;
 535
 536	fp = file->private_data;
 537	file->private_data = NULL;
 538
 539	if (gfs2_assert_warn(sdp, fp))
 540		return -EIO;
 541
 542	kfree(fp);
 543
 
 544	return 0;
 545}
 546
 547/**
 548 * gfs2_fsync - sync the dirty data for a file (across the cluster)
 549 * @file: the file that points to the dentry
 550 * @start: the start position in the file to sync
 551 * @end: the end position in the file to sync
 552 * @datasync: set if we can ignore timestamp changes
 553 *
 554 * The VFS will flush data for us. We only need to worry
 555 * about metadata here.
 
 
 
 
 
 
 
 
 556 *
 557 * Returns: errno
 558 */
 559
 560static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
 561		      int datasync)
 562{
 563	struct inode *inode = file->f_mapping->host;
 564	int sync_state = inode->i_state & (I_DIRTY_SYNC|I_DIRTY_DATASYNC);
 
 565	struct gfs2_inode *ip = GFS2_I(inode);
 566	int ret;
 567
 568	ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
 569	if (ret)
 570		return ret;
 571	mutex_lock(&inode->i_mutex);
 
 572
 
 
 573	if (datasync)
 574		sync_state &= ~I_DIRTY_SYNC;
 575
 576	if (sync_state) {
 577		ret = sync_inode_metadata(inode, 1);
 578		if (ret) {
 579			mutex_unlock(&inode->i_mutex);
 580			return ret;
 581		}
 582		gfs2_ail_flush(ip->i_gl);
 
 583	}
 584
 585	mutex_unlock(&inode->i_mutex);
 586	return 0;
 
 
 587}
 588
 589/**
 590 * gfs2_file_aio_write - Perform a write to a file
 591 * @iocb: The io context
 592 * @iov: The data to write
 593 * @nr_segs: Number of @iov segments
 594 * @pos: The file position
 595 *
 596 * We have to do a lock/unlock here to refresh the inode size for
 597 * O_APPEND writes, otherwise we can land up writing at the wrong
 598 * offset. There is still a race, but provided the app is using its
 599 * own file locking, this will make O_APPEND work as expected.
 600 *
 601 */
 602
 603static ssize_t gfs2_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
 604				   unsigned long nr_segs, loff_t pos)
 605{
 606	struct file *file = iocb->ki_filp;
 
 
 607
 608	if (file->f_flags & O_APPEND) {
 609		struct dentry *dentry = file->f_dentry;
 610		struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
 
 
 
 
 611		struct gfs2_holder gh;
 612		int ret;
 613
 614		ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
 615		if (ret)
 616			return ret;
 617		gfs2_glock_dq_uninit(&gh);
 618	}
 619
 620	return generic_file_aio_write(iocb, iov, nr_segs, pos);
 621}
 622
 623static int empty_write_end(struct page *page, unsigned from,
 624			   unsigned to, int mode)
 625{
 626	struct inode *inode = page->mapping->host;
 627	struct gfs2_inode *ip = GFS2_I(inode);
 628	struct buffer_head *bh;
 629	unsigned offset, blksize = 1 << inode->i_blkbits;
 630	pgoff_t end_index = i_size_read(inode) >> PAGE_CACHE_SHIFT;
 631
 632	zero_user(page, from, to-from);
 633	mark_page_accessed(page);
 634
 635	if (page->index < end_index || !(mode & FALLOC_FL_KEEP_SIZE)) {
 636		if (!gfs2_is_writeback(ip))
 637			gfs2_page_add_databufs(ip, page, from, to);
 638
 639		block_commit_write(page, from, to);
 640		return 0;
 641	}
 642
 643	offset = 0;
 644	bh = page_buffers(page);
 645	while (offset < to) {
 646		if (offset >= from) {
 647			set_buffer_uptodate(bh);
 648			mark_buffer_dirty(bh);
 649			clear_buffer_new(bh);
 650			write_dirty_buffer(bh, WRITE);
 651		}
 652		offset += blksize;
 653		bh = bh->b_this_page;
 654	}
 655
 656	offset = 0;
 657	bh = page_buffers(page);
 658	while (offset < to) {
 659		if (offset >= from) {
 660			wait_on_buffer(bh);
 661			if (!buffer_uptodate(bh))
 662				return -EIO;
 663		}
 664		offset += blksize;
 665		bh = bh->b_this_page;
 666	}
 667	return 0;
 668}
 669
 670static int needs_empty_write(sector_t block, struct inode *inode)
 671{
 672	int error;
 673	struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
 674
 675	bh_map.b_size = 1 << inode->i_blkbits;
 676	error = gfs2_block_map(inode, block, &bh_map, 0);
 677	if (unlikely(error))
 678		return error;
 679	return !buffer_mapped(&bh_map);
 680}
 681
 682static int write_empty_blocks(struct page *page, unsigned from, unsigned to,
 683			      int mode)
 684{
 685	struct inode *inode = page->mapping->host;
 686	unsigned start, end, next, blksize;
 687	sector_t block = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
 688	int ret;
 689
 690	blksize = 1 << inode->i_blkbits;
 691	next = end = 0;
 692	while (next < from) {
 693		next += blksize;
 694		block++;
 695	}
 696	start = next;
 697	do {
 698		next += blksize;
 699		ret = needs_empty_write(block, inode);
 700		if (unlikely(ret < 0))
 701			return ret;
 702		if (ret == 0) {
 703			if (end) {
 704				ret = __block_write_begin(page, start, end - start,
 705							  gfs2_block_map);
 706				if (unlikely(ret))
 707					return ret;
 708				ret = empty_write_end(page, start, end, mode);
 709				if (unlikely(ret))
 710					return ret;
 711				end = 0;
 712			}
 713			start = next;
 714		}
 715		else
 716			end = next;
 717		block++;
 718	} while (next < to);
 719
 720	if (end) {
 721		ret = __block_write_begin(page, start, end - start, gfs2_block_map);
 722		if (unlikely(ret))
 723			return ret;
 724		ret = empty_write_end(page, start, end, mode);
 725		if (unlikely(ret))
 726			return ret;
 727	}
 728
 729	return 0;
 730}
 731
 732static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
 733			   int mode)
 734{
 735	struct gfs2_inode *ip = GFS2_I(inode);
 736	struct buffer_head *dibh;
 737	int error;
 738	u64 start = offset >> PAGE_CACHE_SHIFT;
 739	unsigned int start_offset = offset & ~PAGE_CACHE_MASK;
 740	u64 end = (offset + len - 1) >> PAGE_CACHE_SHIFT;
 741	pgoff_t curr;
 742	struct page *page;
 743	unsigned int end_offset = (offset + len) & ~PAGE_CACHE_MASK;
 744	unsigned int from, to;
 745
 746	if (!end_offset)
 747		end_offset = PAGE_CACHE_SIZE;
 748
 749	error = gfs2_meta_inode_buffer(ip, &dibh);
 750	if (unlikely(error))
 751		goto out;
 752
 753	gfs2_trans_add_bh(ip->i_gl, dibh, 1);
 754
 755	if (gfs2_is_stuffed(ip)) {
 756		error = gfs2_unstuff_dinode(ip, NULL);
 757		if (unlikely(error))
 758			goto out;
 759	}
 760
 761	curr = start;
 762	offset = start << PAGE_CACHE_SHIFT;
 763	from = start_offset;
 764	to = PAGE_CACHE_SIZE;
 765	while (curr <= end) {
 766		page = grab_cache_page_write_begin(inode->i_mapping, curr,
 767						   AOP_FLAG_NOFS);
 768		if (unlikely(!page)) {
 769			error = -ENOMEM;
 
 
 
 
 
 
 770			goto out;
 771		}
 772
 773		if (curr == end)
 774			to = end_offset;
 775		error = write_empty_blocks(page, from, to, mode);
 776		if (!error && offset + to > inode->i_size &&
 777		    !(mode & FALLOC_FL_KEEP_SIZE)) {
 778			i_size_write(inode, offset + to);
 779		}
 780		unlock_page(page);
 781		page_cache_release(page);
 782		if (error)
 783			goto out;
 784		curr++;
 785		offset += PAGE_CACHE_SIZE;
 786		from = 0;
 787	}
 788
 789	gfs2_dinode_out(ip, dibh->b_data);
 790	mark_inode_dirty(inode);
 791
 792	brelse(dibh);
 793
 794out:
 795	return error;
 796}
 797
 798static void calc_max_reserv(struct gfs2_inode *ip, loff_t max, loff_t *len,
 799			    unsigned int *data_blocks, unsigned int *ind_blocks)
 
 
 
 
 
 
 
 
 
 
 
 800{
 
 801	const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 802	unsigned int max_blocks = ip->i_alloc->al_rgd->rd_free_clone;
 803	unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1);
 804
 805	for (tmp = max_data; tmp > sdp->sd_diptrs;) {
 806		tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
 807		max_data -= tmp;
 808	}
 809	/* This calculation isn't the exact reverse of gfs2_write_calc_reserve,
 810	   so it might end up with fewer data blocks */
 811	if (max_data <= *data_blocks)
 812		return;
 813	*data_blocks = max_data;
 814	*ind_blocks = max_blocks - max_data;
 815	*len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift;
 816	if (*len > max) {
 817		*len = max;
 818		gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks);
 819	}
 820}
 821
 822static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
 823			   loff_t len)
 824{
 825	struct inode *inode = file->f_path.dentry->d_inode;
 826	struct gfs2_sbd *sdp = GFS2_SB(inode);
 827	struct gfs2_inode *ip = GFS2_I(inode);
 
 828	unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
 829	loff_t bytes, max_bytes;
 830	struct gfs2_alloc *al;
 831	int error;
 
 
 832	loff_t bsize_mask = ~((loff_t)sdp->sd_sb.sb_bsize - 1);
 833	loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
 
 
 834	next = (next + 1) << sdp->sd_sb.sb_bsize_shift;
 835
 836	/* We only support the FALLOC_FL_KEEP_SIZE mode */
 837	if (mode & ~FALLOC_FL_KEEP_SIZE)
 838		return -EOPNOTSUPP;
 839
 840	offset &= bsize_mask;
 841
 842	len = next - offset;
 843	bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2;
 844	if (!bytes)
 845		bytes = UINT_MAX;
 846	bytes &= bsize_mask;
 847	if (bytes == 0)
 848		bytes = sdp->sd_sb.sb_bsize;
 849
 850	gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
 851	error = gfs2_glock_nq(&ip->i_gh);
 852	if (unlikely(error))
 853		goto out_uninit;
 854
 855	if (!gfs2_write_alloc_required(ip, offset, len))
 856		goto out_unlock;
 857
 858	while (len > 0) {
 859		if (len < bytes)
 860			bytes = len;
 861		al = gfs2_alloc_get(ip);
 862		if (!al) {
 863			error = -ENOMEM;
 864			goto out_unlock;
 865		}
 866
 867		error = gfs2_quota_lock_check(ip);
 
 
 
 
 
 
 
 
 
 
 
 
 
 868		if (error)
 869			goto out_alloc_put;
 
 
 
 
 870
 871retry:
 872		gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
 
 873
 874		al->al_requested = data_blocks + ind_blocks;
 875		error = gfs2_inplace_reserve(ip);
 876		if (error) {
 877			if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) {
 878				bytes >>= 1;
 879				bytes &= bsize_mask;
 880				if (bytes == 0)
 881					bytes = sdp->sd_sb.sb_bsize;
 882				goto retry;
 883			}
 884			goto out_qunlock;
 885		}
 886		max_bytes = bytes;
 887		calc_max_reserv(ip, len, &max_bytes, &data_blocks, &ind_blocks);
 888		al->al_requested = data_blocks + ind_blocks;
 889
 890		rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
 891			  RES_RG_HDR + gfs2_rg_blocks(al);
 892		if (gfs2_is_jdata(ip))
 893			rblocks += data_blocks ? data_blocks : 1;
 894
 895		error = gfs2_trans_begin(sdp, rblocks,
 896					 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
 897		if (error)
 898			goto out_trans_fail;
 899
 900		error = fallocate_chunk(inode, offset, max_bytes, mode);
 901		gfs2_trans_end(sdp);
 902
 903		if (error)
 904			goto out_trans_fail;
 905
 906		len -= max_bytes;
 907		offset += max_bytes;
 908		gfs2_inplace_release(ip);
 909		gfs2_quota_unlock(ip);
 910		gfs2_alloc_put(ip);
 911	}
 912	goto out_unlock;
 
 
 
 
 
 
 
 
 
 
 913
 914out_trans_fail:
 915	gfs2_inplace_release(ip);
 916out_qunlock:
 917	gfs2_quota_unlock(ip);
 918out_alloc_put:
 919	gfs2_alloc_put(ip);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 920out_unlock:
 921	gfs2_glock_dq(&ip->i_gh);
 922out_uninit:
 923	gfs2_holder_uninit(&ip->i_gh);
 924	return error;
 
 925}
 926
 927#ifdef CONFIG_GFS2_FS_LOCKING_DLM
 
 
 
 
 
 
 
 
 
 928
 929/**
 930 * gfs2_setlease - acquire/release a file lease
 931 * @file: the file pointer
 932 * @arg: lease type
 933 * @fl: file lock
 934 *
 935 * We don't currently have a way to enforce a lease across the whole
 936 * cluster; until we do, disable leases (by just returning -EINVAL),
 937 * unless the administrator has requested purely local locking.
 938 *
 939 * Locking: called under lock_flocks
 940 *
 941 * Returns: errno
 942 */
 943
 944static int gfs2_setlease(struct file *file, long arg, struct file_lock **fl)
 945{
 946	return -EINVAL;
 947}
 948
 
 
 949/**
 950 * gfs2_lock - acquire/release a posix lock on a file
 951 * @file: the file pointer
 952 * @cmd: either modify or retrieve lock state, possibly wait
 953 * @fl: type and range of lock
 954 *
 955 * Returns: errno
 956 */
 957
 958static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
 959{
 960	struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
 961	struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host);
 962	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
 963
 964	if (!(fl->fl_flags & FL_POSIX))
 965		return -ENOLCK;
 966	if (__mandatory_lock(&ip->i_inode) && fl->fl_type != F_UNLCK)
 967		return -ENOLCK;
 968
 969	if (cmd == F_CANCELLK) {
 970		/* Hack: */
 971		cmd = F_SETLK;
 972		fl->fl_type = F_UNLCK;
 973	}
 974	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
 
 
 975		return -EIO;
 
 976	if (IS_GETLK(cmd))
 977		return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl);
 978	else if (fl->fl_type == F_UNLCK)
 979		return dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl);
 980	else
 981		return dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl);
 982}
 983
 984static int do_flock(struct file *file, int cmd, struct file_lock *fl)
 985{
 986	struct gfs2_file *fp = file->private_data;
 987	struct gfs2_holder *fl_gh = &fp->f_fl_gh;
 988	struct gfs2_inode *ip = GFS2_I(file->f_path.dentry->d_inode);
 989	struct gfs2_glock *gl;
 990	unsigned int state;
 991	int flags;
 992	int error = 0;
 
 993
 994	state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
 995	flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE;
 996
 997	mutex_lock(&fp->f_fl_mutex);
 998
 999	gl = fl_gh->gh_gl;
1000	if (gl) {
1001		if (fl_gh->gh_state == state)
1002			goto out;
1003		flock_lock_file_wait(file,
1004				     &(struct file_lock){.fl_type = F_UNLCK});
1005		gfs2_glock_dq_wait(fl_gh);
 
 
 
1006		gfs2_holder_reinit(state, flags, fl_gh);
1007	} else {
1008		error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr,
1009				       &gfs2_flock_glops, CREATE, &gl);
1010		if (error)
1011			goto out;
1012		gfs2_holder_init(gl, state, flags, fl_gh);
1013		gfs2_glock_put(gl);
1014	}
1015	error = gfs2_glock_nq(fl_gh);
 
 
 
 
 
 
 
1016	if (error) {
1017		gfs2_holder_uninit(fl_gh);
1018		if (error == GLR_TRYFAILED)
1019			error = -EAGAIN;
1020	} else {
1021		error = flock_lock_file_wait(file, fl);
1022		gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
1023	}
1024
1025out:
1026	mutex_unlock(&fp->f_fl_mutex);
1027	return error;
1028}
1029
1030static void do_unflock(struct file *file, struct file_lock *fl)
1031{
1032	struct gfs2_file *fp = file->private_data;
1033	struct gfs2_holder *fl_gh = &fp->f_fl_gh;
1034
1035	mutex_lock(&fp->f_fl_mutex);
1036	flock_lock_file_wait(file, fl);
1037	if (fl_gh->gh_gl) {
1038		gfs2_glock_dq_wait(fl_gh);
1039		gfs2_holder_uninit(fl_gh);
1040	}
1041	mutex_unlock(&fp->f_fl_mutex);
1042}
1043
1044/**
1045 * gfs2_flock - acquire/release a flock lock on a file
1046 * @file: the file pointer
1047 * @cmd: either modify or retrieve lock state, possibly wait
1048 * @fl: type and range of lock
1049 *
1050 * Returns: errno
1051 */
1052
1053static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
1054{
1055	if (!(fl->fl_flags & FL_FLOCK))
1056		return -ENOLCK;
1057	if (fl->fl_type & LOCK_MAND)
1058		return -EOPNOTSUPP;
1059
1060	if (fl->fl_type == F_UNLCK) {
1061		do_unflock(file, fl);
1062		return 0;
1063	} else {
1064		return do_flock(file, cmd, fl);
1065	}
1066}
1067
1068const struct file_operations gfs2_file_fops = {
1069	.llseek		= gfs2_llseek,
1070	.read		= do_sync_read,
1071	.aio_read	= generic_file_aio_read,
1072	.write		= do_sync_write,
1073	.aio_write	= gfs2_file_aio_write,
1074	.unlocked_ioctl	= gfs2_ioctl,
1075	.mmap		= gfs2_mmap,
1076	.open		= gfs2_open,
1077	.release	= gfs2_close,
1078	.fsync		= gfs2_fsync,
1079	.lock		= gfs2_lock,
1080	.flock		= gfs2_flock,
1081	.splice_read	= generic_file_splice_read,
1082	.splice_write	= generic_file_splice_write,
1083	.setlease	= gfs2_setlease,
1084	.fallocate	= gfs2_fallocate,
1085};
1086
1087const struct file_operations gfs2_dir_fops = {
1088	.readdir	= gfs2_readdir,
1089	.unlocked_ioctl	= gfs2_ioctl,
1090	.open		= gfs2_open,
1091	.release	= gfs2_close,
1092	.fsync		= gfs2_fsync,
1093	.lock		= gfs2_lock,
1094	.flock		= gfs2_flock,
1095	.llseek		= default_llseek,
1096};
1097
1098#endif /* CONFIG_GFS2_FS_LOCKING_DLM */
1099
1100const struct file_operations gfs2_file_fops_nolock = {
1101	.llseek		= gfs2_llseek,
1102	.read		= do_sync_read,
1103	.aio_read	= generic_file_aio_read,
1104	.write		= do_sync_write,
1105	.aio_write	= gfs2_file_aio_write,
1106	.unlocked_ioctl	= gfs2_ioctl,
1107	.mmap		= gfs2_mmap,
1108	.open		= gfs2_open,
1109	.release	= gfs2_close,
1110	.fsync		= gfs2_fsync,
1111	.splice_read	= generic_file_splice_read,
1112	.splice_write	= generic_file_splice_write,
1113	.setlease	= generic_setlease,
1114	.fallocate	= gfs2_fallocate,
1115};
1116
1117const struct file_operations gfs2_dir_fops_nolock = {
1118	.readdir	= gfs2_readdir,
1119	.unlocked_ioctl	= gfs2_ioctl,
1120	.open		= gfs2_open,
1121	.release	= gfs2_close,
1122	.fsync		= gfs2_fsync,
1123	.llseek		= default_llseek,
1124};
1125