Linux Audio

Check our new training course

Loading...
v4.10.11
   1/*
   2 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
   3 * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
   4 *
   5 * This copyrighted material is made available to anyone wishing to use,
   6 * modify, copy, or redistribute it subject to the terms and conditions
   7 * of the GNU General Public License version 2.
   8 */
   9
  10#include <linux/slab.h>
  11#include <linux/spinlock.h>
  12#include <linux/completion.h>
  13#include <linux/buffer_head.h>
  14#include <linux/pagemap.h>
  15#include <linux/uio.h>
  16#include <linux/blkdev.h>
  17#include <linux/mm.h>
  18#include <linux/mount.h>
  19#include <linux/fs.h>
  20#include <linux/gfs2_ondisk.h>
  21#include <linux/falloc.h>
  22#include <linux/swap.h>
  23#include <linux/crc32.h>
  24#include <linux/writeback.h>
  25#include <linux/uaccess.h>
  26#include <linux/dlm.h>
  27#include <linux/dlm_plock.h>
  28#include <linux/delay.h>
  29
  30#include "gfs2.h"
  31#include "incore.h"
  32#include "bmap.h"
  33#include "dir.h"
  34#include "glock.h"
  35#include "glops.h"
  36#include "inode.h"
  37#include "log.h"
  38#include "meta_io.h"
  39#include "quota.h"
  40#include "rgrp.h"
  41#include "trans.h"
  42#include "util.h"
  43
  44/**
  45 * gfs2_llseek - seek to a location in a file
  46 * @file: the file
  47 * @offset: the offset
  48 * @whence: Where to seek from (SEEK_SET, SEEK_CUR, or SEEK_END)
  49 *
  50 * SEEK_END requires the glock for the file because it references the
  51 * file's size.
  52 *
  53 * Returns: The new offset, or errno
  54 */
  55
  56static loff_t gfs2_llseek(struct file *file, loff_t offset, int whence)
  57{
  58	struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
  59	struct gfs2_holder i_gh;
  60	loff_t error;
  61
  62	switch (whence) {
  63	case SEEK_END: /* These reference inode->i_size */
  64	case SEEK_DATA:
  65	case SEEK_HOLE:
  66		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
  67					   &i_gh);
  68		if (!error) {
  69			error = generic_file_llseek(file, offset, whence);
  70			gfs2_glock_dq_uninit(&i_gh);
  71		}
  72		break;
  73	case SEEK_CUR:
  74	case SEEK_SET:
  75		error = generic_file_llseek(file, offset, whence);
  76		break;
  77	default:
  78		error = -EINVAL;
  79	}
  80
  81	return error;
  82}
  83
  84/**
  85 * gfs2_readdir - Iterator for a directory
  86 * @file: The directory to read from
  87 * @ctx: What to feed directory entries to
 
  88 *
  89 * Returns: errno
  90 */
  91
  92static int gfs2_readdir(struct file *file, struct dir_context *ctx)
  93{
  94	struct inode *dir = file->f_mapping->host;
  95	struct gfs2_inode *dip = GFS2_I(dir);
  96	struct gfs2_holder d_gh;
 
  97	int error;
  98
  99	error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
 100	if (error)
 
 
 101		return error;
 
 102
 103	error = gfs2_dir_read(dir, ctx, &file->f_ra);
 104
 105	gfs2_glock_dq_uninit(&d_gh);
 106
 
 
 107	return error;
 108}
 109
 110/**
 111 * fsflags_cvt
 112 * @table: A table of 32 u32 flags
 113 * @val: a 32 bit value to convert
 114 *
 115 * This function can be used to convert between fsflags values and
 116 * GFS2's own flags values.
 117 *
 118 * Returns: the converted flags
 119 */
 120static u32 fsflags_cvt(const u32 *table, u32 val)
 121{
 122	u32 res = 0;
 123	while(val) {
 124		if (val & 1)
 125			res |= *table;
 126		table++;
 127		val >>= 1;
 128	}
 129	return res;
 130}
 131
 132static const u32 fsflags_to_gfs2[32] = {
 133	[3] = GFS2_DIF_SYNC,
 134	[4] = GFS2_DIF_IMMUTABLE,
 135	[5] = GFS2_DIF_APPENDONLY,
 136	[7] = GFS2_DIF_NOATIME,
 137	[12] = GFS2_DIF_EXHASH,
 138	[14] = GFS2_DIF_INHERIT_JDATA,
 139	[17] = GFS2_DIF_TOPDIR,
 140};
 141
 142static const u32 gfs2_to_fsflags[32] = {
 143	[gfs2fl_Sync] = FS_SYNC_FL,
 144	[gfs2fl_Immutable] = FS_IMMUTABLE_FL,
 145	[gfs2fl_AppendOnly] = FS_APPEND_FL,
 146	[gfs2fl_NoAtime] = FS_NOATIME_FL,
 147	[gfs2fl_ExHash] = FS_INDEX_FL,
 148	[gfs2fl_TopLevel] = FS_TOPDIR_FL,
 149	[gfs2fl_InheritJdata] = FS_JOURNAL_DATA_FL,
 150};
 151
 152static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
 153{
 154	struct inode *inode = file_inode(filp);
 155	struct gfs2_inode *ip = GFS2_I(inode);
 156	struct gfs2_holder gh;
 157	int error;
 158	u32 fsflags;
 159
 160	gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
 161	error = gfs2_glock_nq(&gh);
 162	if (error)
 163		goto out_uninit;
 164
 165	fsflags = fsflags_cvt(gfs2_to_fsflags, ip->i_diskflags);
 166	if (!S_ISDIR(inode->i_mode) && ip->i_diskflags & GFS2_DIF_JDATA)
 167		fsflags |= FS_JOURNAL_DATA_FL;
 168	if (put_user(fsflags, ptr))
 169		error = -EFAULT;
 170
 171	gfs2_glock_dq(&gh);
 172out_uninit:
 173	gfs2_holder_uninit(&gh);
 174	return error;
 175}
 176
 177void gfs2_set_inode_flags(struct inode *inode)
 178{
 179	struct gfs2_inode *ip = GFS2_I(inode);
 180	unsigned int flags = inode->i_flags;
 181
 182	flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_NOSEC);
 183	if ((ip->i_eattr == 0) && !is_sxid(inode->i_mode))
 184		flags |= S_NOSEC;
 185	if (ip->i_diskflags & GFS2_DIF_IMMUTABLE)
 186		flags |= S_IMMUTABLE;
 187	if (ip->i_diskflags & GFS2_DIF_APPENDONLY)
 188		flags |= S_APPEND;
 189	if (ip->i_diskflags & GFS2_DIF_NOATIME)
 190		flags |= S_NOATIME;
 191	if (ip->i_diskflags & GFS2_DIF_SYNC)
 192		flags |= S_SYNC;
 193	inode->i_flags = flags;
 194}
 195
 196/* Flags that can be set by user space */
 197#define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA|			\
 198			     GFS2_DIF_IMMUTABLE|		\
 199			     GFS2_DIF_APPENDONLY|		\
 200			     GFS2_DIF_NOATIME|			\
 201			     GFS2_DIF_SYNC|			\
 202			     GFS2_DIF_SYSTEM|			\
 203			     GFS2_DIF_TOPDIR|			\
 204			     GFS2_DIF_INHERIT_JDATA)
 205
 206/**
 207 * do_gfs2_set_flags - set flags on an inode
 208 * @filp: file pointer
 209 * @reqflags: The flags to set
 210 * @mask: Indicates which flags are valid
 211 *
 212 */
 213static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
 214{
 215	struct inode *inode = file_inode(filp);
 216	struct gfs2_inode *ip = GFS2_I(inode);
 217	struct gfs2_sbd *sdp = GFS2_SB(inode);
 218	struct buffer_head *bh;
 219	struct gfs2_holder gh;
 220	int error;
 221	u32 new_flags, flags;
 222
 223	error = mnt_want_write_file(filp);
 224	if (error)
 225		return error;
 226
 227	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
 228	if (error)
 229		goto out_drop_write;
 230
 231	error = -EACCES;
 232	if (!inode_owner_or_capable(inode))
 233		goto out;
 234
 235	error = 0;
 236	flags = ip->i_diskflags;
 237	new_flags = (flags & ~mask) | (reqflags & mask);
 238	if ((new_flags ^ flags) == 0)
 239		goto out;
 240
 241	error = -EINVAL;
 242	if ((new_flags ^ flags) & ~GFS2_FLAGS_USER_SET)
 243		goto out;
 244
 245	error = -EPERM;
 246	if (IS_IMMUTABLE(inode) && (new_flags & GFS2_DIF_IMMUTABLE))
 247		goto out;
 248	if (IS_APPEND(inode) && (new_flags & GFS2_DIF_APPENDONLY))
 249		goto out;
 250	if (((new_flags ^ flags) & GFS2_DIF_IMMUTABLE) &&
 251	    !capable(CAP_LINUX_IMMUTABLE))
 252		goto out;
 253	if (!IS_IMMUTABLE(inode)) {
 254		error = gfs2_permission(inode, MAY_WRITE);
 255		if (error)
 256			goto out;
 257	}
 258	if ((flags ^ new_flags) & GFS2_DIF_JDATA) {
 259		if (flags & GFS2_DIF_JDATA)
 260			gfs2_log_flush(sdp, ip->i_gl, NORMAL_FLUSH);
 261		error = filemap_fdatawrite(inode->i_mapping);
 262		if (error)
 263			goto out;
 264		error = filemap_fdatawait(inode->i_mapping);
 265		if (error)
 266			goto out;
 267	}
 268	error = gfs2_trans_begin(sdp, RES_DINODE, 0);
 269	if (error)
 270		goto out;
 271	error = gfs2_meta_inode_buffer(ip, &bh);
 272	if (error)
 273		goto out_trans_end;
 274	gfs2_trans_add_meta(ip->i_gl, bh);
 275	ip->i_diskflags = new_flags;
 276	gfs2_dinode_out(ip, bh->b_data);
 277	brelse(bh);
 278	gfs2_set_inode_flags(inode);
 279	gfs2_set_aops(inode);
 280out_trans_end:
 281	gfs2_trans_end(sdp);
 282out:
 283	gfs2_glock_dq_uninit(&gh);
 284out_drop_write:
 285	mnt_drop_write_file(filp);
 286	return error;
 287}
 288
 289static int gfs2_set_flags(struct file *filp, u32 __user *ptr)
 290{
 291	struct inode *inode = file_inode(filp);
 292	u32 fsflags, gfsflags;
 293
 294	if (get_user(fsflags, ptr))
 295		return -EFAULT;
 296
 297	gfsflags = fsflags_cvt(fsflags_to_gfs2, fsflags);
 298	if (!S_ISDIR(inode->i_mode)) {
 299		gfsflags &= ~GFS2_DIF_TOPDIR;
 300		if (gfsflags & GFS2_DIF_INHERIT_JDATA)
 301			gfsflags ^= (GFS2_DIF_JDATA | GFS2_DIF_INHERIT_JDATA);
 302		return do_gfs2_set_flags(filp, gfsflags, ~GFS2_DIF_SYSTEM);
 303	}
 304	return do_gfs2_set_flags(filp, gfsflags, ~(GFS2_DIF_SYSTEM | GFS2_DIF_JDATA));
 305}
 306
 307static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 308{
 309	switch(cmd) {
 310	case FS_IOC_GETFLAGS:
 311		return gfs2_get_flags(filp, (u32 __user *)arg);
 312	case FS_IOC_SETFLAGS:
 313		return gfs2_set_flags(filp, (u32 __user *)arg);
 314	case FITRIM:
 315		return gfs2_fitrim(filp, (void __user *)arg);
 316	}
 317	return -ENOTTY;
 318}
 319
 320/**
 321 * gfs2_size_hint - Give a hint to the size of a write request
 322 * @filep: The struct file
 323 * @offset: The file offset of the write
 324 * @size: The length of the write
 325 *
 326 * When we are about to do a write, this function records the total
 327 * write size in order to provide a suitable hint to the lower layers
 328 * about how many blocks will be required.
 329 *
 330 */
 331
 332static void gfs2_size_hint(struct file *filep, loff_t offset, size_t size)
 333{
 334	struct inode *inode = file_inode(filep);
 335	struct gfs2_sbd *sdp = GFS2_SB(inode);
 336	struct gfs2_inode *ip = GFS2_I(inode);
 337	size_t blks = (size + sdp->sd_sb.sb_bsize - 1) >> sdp->sd_sb.sb_bsize_shift;
 338	int hint = min_t(size_t, INT_MAX, blks);
 339
 340	if (hint > atomic_read(&ip->i_res.rs_sizehint))
 341		atomic_set(&ip->i_res.rs_sizehint, hint);
 342}
 343
 344/**
 345 * gfs2_allocate_page_backing - Use bmap to allocate blocks
 346 * @page: The (locked) page to allocate backing for
 347 *
 348 * We try to allocate all the blocks required for the page in
 349 * one go. This might fail for various reasons, so we keep
 350 * trying until all the blocks to back this page are allocated.
 351 * If some of the blocks are already allocated, thats ok too.
 352 */
 353
 354static int gfs2_allocate_page_backing(struct page *page)
 355{
 356	struct inode *inode = page->mapping->host;
 357	struct buffer_head bh;
 358	unsigned long size = PAGE_SIZE;
 359	u64 lblock = page->index << (PAGE_SHIFT - inode->i_blkbits);
 360
 361	do {
 362		bh.b_state = 0;
 363		bh.b_size = size;
 364		gfs2_block_map(inode, lblock, &bh, 1);
 365		if (!buffer_mapped(&bh))
 366			return -EIO;
 367		size -= bh.b_size;
 368		lblock += (bh.b_size >> inode->i_blkbits);
 369	} while(size > 0);
 370	return 0;
 371}
 372
 373/**
 374 * gfs2_page_mkwrite - Make a shared, mmap()ed, page writable
 375 * @vma: The virtual memory area
 376 * @vmf: The virtual memory fault containing the page to become writable
 377 *
 378 * When the page becomes writable, we need to ensure that we have
 379 * blocks allocated on disk to back that page.
 380 */
 381
 382static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
 383{
 384	struct page *page = vmf->page;
 385	struct inode *inode = file_inode(vma->vm_file);
 386	struct gfs2_inode *ip = GFS2_I(inode);
 387	struct gfs2_sbd *sdp = GFS2_SB(inode);
 388	struct gfs2_alloc_parms ap = { .aflags = 0, };
 389	unsigned long last_index;
 390	u64 pos = page->index << PAGE_SHIFT;
 391	unsigned int data_blocks, ind_blocks, rblocks;
 392	struct gfs2_holder gh;
 
 393	loff_t size;
 394	int ret;
 395
 396	sb_start_pagefault(inode->i_sb);
 397
 398	ret = gfs2_rsqa_alloc(ip);
 399	if (ret)
 400		goto out;
 401
 402	gfs2_size_hint(vma->vm_file, pos, PAGE_SIZE);
 403
 404	gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
 405	ret = gfs2_glock_nq(&gh);
 406	if (ret)
 407		goto out_uninit;
 408
 409	/* Update file times before taking page lock */
 410	file_update_time(vma->vm_file);
 411
 412	set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
 413	set_bit(GIF_SW_PAGED, &ip->i_flags);
 414
 415	if (!gfs2_write_alloc_required(ip, pos, PAGE_SIZE)) {
 416		lock_page(page);
 417		if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
 418			ret = -EAGAIN;
 419			unlock_page(page);
 420		}
 421		goto out_unlock;
 422	}
 423
 424	ret = gfs2_rindex_update(sdp);
 425	if (ret)
 
 426		goto out_unlock;
 427
 428	gfs2_write_calc_reserv(ip, PAGE_SIZE, &data_blocks, &ind_blocks);
 429	ap.target = data_blocks + ind_blocks;
 430	ret = gfs2_quota_lock_check(ip, &ap);
 431	if (ret)
 432		goto out_unlock;
 433	ret = gfs2_inplace_reserve(ip, &ap);
 
 434	if (ret)
 435		goto out_quota_unlock;
 436
 437	rblocks = RES_DINODE + ind_blocks;
 438	if (gfs2_is_jdata(ip))
 439		rblocks += data_blocks ? data_blocks : 1;
 440	if (ind_blocks || data_blocks) {
 441		rblocks += RES_STATFS + RES_QUOTA;
 442		rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks);
 443	}
 444	ret = gfs2_trans_begin(sdp, rblocks, 0);
 445	if (ret)
 446		goto out_trans_fail;
 447
 448	lock_page(page);
 449	ret = -EINVAL;
 450	size = i_size_read(inode);
 451	last_index = (size - 1) >> PAGE_SHIFT;
 452	/* Check page index against inode size */
 453	if (size == 0 || (page->index > last_index))
 454		goto out_trans_end;
 455
 456	ret = -EAGAIN;
 457	/* If truncated, we must retry the operation, we may have raced
 458	 * with the glock demotion code.
 459	 */
 460	if (!PageUptodate(page) || page->mapping != inode->i_mapping)
 461		goto out_trans_end;
 462
 463	/* Unstuff, if required, and allocate backing blocks for page */
 464	ret = 0;
 465	if (gfs2_is_stuffed(ip))
 466		ret = gfs2_unstuff_dinode(ip, page);
 467	if (ret == 0)
 468		ret = gfs2_allocate_page_backing(page);
 469
 470out_trans_end:
 471	if (ret)
 472		unlock_page(page);
 473	gfs2_trans_end(sdp);
 474out_trans_fail:
 475	gfs2_inplace_release(ip);
 476out_quota_unlock:
 477	gfs2_quota_unlock(ip);
 
 
 478out_unlock:
 479	gfs2_glock_dq(&gh);
 480out_uninit:
 481	gfs2_holder_uninit(&gh);
 482	if (ret == 0) {
 483		set_page_dirty(page);
 484		wait_for_stable_page(page);
 
 
 
 
 
 
 485	}
 486out:
 487	sb_end_pagefault(inode->i_sb);
 488	return block_page_mkwrite_return(ret);
 489}
 490
 491static const struct vm_operations_struct gfs2_vm_ops = {
 492	.fault = filemap_fault,
 493	.map_pages = filemap_map_pages,
 494	.page_mkwrite = gfs2_page_mkwrite,
 495};
 496
 497/**
 498 * gfs2_mmap -
 499 * @file: The file to map
 500 * @vma: The VMA which described the mapping
 501 *
 502 * There is no need to get a lock here unless we should be updating
 503 * atime. We ignore any locking errors since the only consequence is
 504 * a missed atime update (which will just be deferred until later).
 505 *
 506 * Returns: 0
 507 */
 508
 509static int gfs2_mmap(struct file *file, struct vm_area_struct *vma)
 510{
 511	struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
 512
 513	if (!(file->f_flags & O_NOATIME) &&
 514	    !IS_NOATIME(&ip->i_inode)) {
 515		struct gfs2_holder i_gh;
 516		int error;
 517
 518		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
 519					   &i_gh);
 
 
 
 
 
 520		if (error)
 521			return error;
 522		/* grab lock to update inode */
 523		gfs2_glock_dq_uninit(&i_gh);
 524		file_accessed(file);
 525	}
 526	vma->vm_ops = &gfs2_vm_ops;
 
 527
 528	return 0;
 529}
 530
 531/**
 532 * gfs2_open_common - This is common to open and atomic_open
 533 * @inode: The inode being opened
 534 * @file: The file being opened
 535 *
 536 * This maybe called under a glock or not depending upon how it has
 537 * been called. We must always be called under a glock for regular
 538 * files, however. For other file types, it does not matter whether
 539 * we hold the glock or not.
 540 *
 541 * Returns: Error code or 0 for success
 542 */
 543
 544int gfs2_open_common(struct inode *inode, struct file *file)
 545{
 
 
 546	struct gfs2_file *fp;
 547	int ret;
 548
 549	if (S_ISREG(inode->i_mode)) {
 550		ret = generic_file_open(inode, file);
 551		if (ret)
 552			return ret;
 553	}
 554
 555	fp = kzalloc(sizeof(struct gfs2_file), GFP_NOFS);
 556	if (!fp)
 557		return -ENOMEM;
 558
 559	mutex_init(&fp->f_fl_mutex);
 560
 561	gfs2_assert_warn(GFS2_SB(inode), !file->private_data);
 562	file->private_data = fp;
 563	return 0;
 564}
 565
 566/**
 567 * gfs2_open - open a file
 568 * @inode: the inode to open
 569 * @file: the struct file for this opening
 570 *
 571 * After atomic_open, this function is only used for opening files
 572 * which are already cached. We must still get the glock for regular
 573 * files to ensure that we have the file size uptodate for the large
 574 * file check which is in the common code. That is only an issue for
 575 * regular files though.
 576 *
 577 * Returns: errno
 578 */
 579
 580static int gfs2_open(struct inode *inode, struct file *file)
 581{
 582	struct gfs2_inode *ip = GFS2_I(inode);
 583	struct gfs2_holder i_gh;
 584	int error;
 585	bool need_unlock = false;
 586
 587	if (S_ISREG(ip->i_inode.i_mode)) {
 588		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
 589					   &i_gh);
 590		if (error)
 591			return error;
 592		need_unlock = true;
 593	}
 594
 595	error = gfs2_open_common(inode, file);
 
 
 
 
 596
 597	if (need_unlock)
 598		gfs2_glock_dq_uninit(&i_gh);
 
 599
 
 
 
 
 
 
 
 600	return error;
 601}
 602
 603/**
 604 * gfs2_release - called to close a struct file
 605 * @inode: the inode the struct file belongs to
 606 * @file: the struct file being closed
 607 *
 608 * Returns: errno
 609 */
 610
 611static int gfs2_release(struct inode *inode, struct file *file)
 612{
 613	struct gfs2_inode *ip = GFS2_I(inode);
 
 614
 615	kfree(file->private_data);
 616	file->private_data = NULL;
 617
 618	if (!(file->f_mode & FMODE_WRITE))
 619		return 0;
 
 
 620
 621	gfs2_rsqa_delete(ip, &inode->i_writecount);
 622	return 0;
 623}
 624
 625/**
 626 * gfs2_fsync - sync the dirty data for a file (across the cluster)
 627 * @file: the file that points to the dentry
 628 * @start: the start position in the file to sync
 629 * @end: the end position in the file to sync
 630 * @datasync: set if we can ignore timestamp changes
 631 *
 632 * We split the data flushing here so that we don't wait for the data
 633 * until after we've also sent the metadata to disk. Note that for
 634 * data=ordered, we will write & wait for the data at the log flush
 635 * stage anyway, so this is unlikely to make much of a difference
 636 * except in the data=writeback case.
 637 *
 638 * If the fdatawrite fails due to any reason except -EIO, we will
 639 * continue the remainder of the fsync, although we'll still report
 640 * the error at the end. This is to match filemap_write_and_wait_range()
 641 * behaviour.
 642 *
 643 * Returns: errno
 644 */
 645
 646static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
 647		      int datasync)
 648{
 649	struct address_space *mapping = file->f_mapping;
 650	struct inode *inode = mapping->host;
 651	int sync_state = inode->i_state & I_DIRTY_ALL;
 652	struct gfs2_inode *ip = GFS2_I(inode);
 653	int ret = 0, ret1 = 0;
 654
 655	if (mapping->nrpages) {
 656		ret1 = filemap_fdatawrite_range(mapping, start, end);
 657		if (ret1 == -EIO)
 658			return ret1;
 659	}
 660
 661	if (!gfs2_is_jdata(ip))
 662		sync_state &= ~I_DIRTY_PAGES;
 663	if (datasync)
 664		sync_state &= ~(I_DIRTY_SYNC | I_DIRTY_TIME);
 665
 666	if (sync_state) {
 667		ret = sync_inode_metadata(inode, 1);
 668		if (ret)
 669			return ret;
 670		if (gfs2_is_jdata(ip))
 671			filemap_write_and_wait(mapping);
 672		gfs2_ail_flush(ip->i_gl, 1);
 673	}
 674
 675	if (mapping->nrpages)
 676		ret = filemap_fdatawait_range(mapping, start, end);
 677
 678	return ret ? ret : ret1;
 679}
 680
 681/**
 682 * gfs2_file_write_iter - Perform a write to a file
 683 * @iocb: The io context
 684 * @iov: The data to write
 685 * @nr_segs: Number of @iov segments
 686 * @pos: The file position
 687 *
 688 * We have to do a lock/unlock here to refresh the inode size for
 689 * O_APPEND writes, otherwise we can land up writing at the wrong
 690 * offset. There is still a race, but provided the app is using its
 691 * own file locking, this will make O_APPEND work as expected.
 692 *
 693 */
 694
 695static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 
 696{
 697	struct file *file = iocb->ki_filp;
 698	struct gfs2_inode *ip = GFS2_I(file_inode(file));
 699	int ret;
 700
 701	ret = gfs2_rsqa_alloc(ip);
 702	if (ret)
 703		return ret;
 704
 705	gfs2_size_hint(file, iocb->ki_pos, iov_iter_count(from));
 706
 707	if (iocb->ki_flags & IOCB_APPEND) {
 
 
 708		struct gfs2_holder gh;
 
 709
 710		ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
 711		if (ret)
 712			return ret;
 713		gfs2_glock_dq_uninit(&gh);
 714	}
 715
 716	return generic_file_write_iter(iocb, from);
 717}
 718
 719static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
 720			   int mode)
 721{
 722	struct gfs2_inode *ip = GFS2_I(inode);
 723	struct buffer_head *dibh;
 724	int error;
 
 725	unsigned int nr_blks;
 726	sector_t lblock = offset >> inode->i_blkbits;
 727
 728	error = gfs2_meta_inode_buffer(ip, &dibh);
 729	if (unlikely(error))
 730		return error;
 731
 732	gfs2_trans_add_meta(ip->i_gl, dibh);
 733
 734	if (gfs2_is_stuffed(ip)) {
 735		error = gfs2_unstuff_dinode(ip, NULL);
 736		if (unlikely(error))
 737			goto out;
 738	}
 739
 740	while (len) {
 741		struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
 742		bh_map.b_size = len;
 743		set_buffer_zeronew(&bh_map);
 744
 745		error = gfs2_block_map(inode, lblock, &bh_map, 1);
 746		if (unlikely(error))
 747			goto out;
 748		len -= bh_map.b_size;
 749		nr_blks = bh_map.b_size >> inode->i_blkbits;
 750		lblock += nr_blks;
 751		if (!buffer_new(&bh_map))
 752			continue;
 753		if (unlikely(!buffer_zeronew(&bh_map))) {
 754			error = -EIO;
 755			goto out;
 756		}
 757	}
 
 
 
 
 
 758out:
 759	brelse(dibh);
 760	return error;
 761}
 762/**
 763 * calc_max_reserv() - Reverse of write_calc_reserv. Given a number of
 764 *                     blocks, determine how many bytes can be written.
 765 * @ip:          The inode in question.
 766 * @len:         Max cap of bytes. What we return in *len must be <= this.
 767 * @data_blocks: Compute and return the number of data blocks needed
 768 * @ind_blocks:  Compute and return the number of indirect blocks needed
 769 * @max_blocks:  The total blocks available to work with.
 770 *
 771 * Returns: void, but @len, @data_blocks and @ind_blocks are filled in.
 772 */
 773static void calc_max_reserv(struct gfs2_inode *ip, loff_t *len,
 774			    unsigned int *data_blocks, unsigned int *ind_blocks,
 775			    unsigned int max_blocks)
 776{
 777	loff_t max = *len;
 778	const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 
 779	unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1);
 780
 781	for (tmp = max_data; tmp > sdp->sd_diptrs;) {
 782		tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
 783		max_data -= tmp;
 784	}
 785
 
 
 
 786	*data_blocks = max_data;
 787	*ind_blocks = max_blocks - max_data;
 788	*len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift;
 789	if (*len > max) {
 790		*len = max;
 791		gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks);
 792	}
 793}
 794
 795static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
 
 796{
 797	struct inode *inode = file_inode(file);
 798	struct gfs2_sbd *sdp = GFS2_SB(inode);
 799	struct gfs2_inode *ip = GFS2_I(inode);
 800	struct gfs2_alloc_parms ap = { .aflags = 0, };
 801	unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
 802	loff_t bytes, max_bytes, max_blks = UINT_MAX;
 
 803	int error;
 804	const loff_t pos = offset;
 805	const loff_t count = len;
 806	loff_t bsize_mask = ~((loff_t)sdp->sd_sb.sb_bsize - 1);
 807	loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
 808	loff_t max_chunk_size = UINT_MAX & bsize_mask;
 809
 810	next = (next + 1) << sdp->sd_sb.sb_bsize_shift;
 811
 
 
 
 
 812	offset &= bsize_mask;
 813
 814	len = next - offset;
 815	bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2;
 816	if (!bytes)
 817		bytes = UINT_MAX;
 818	bytes &= bsize_mask;
 819	if (bytes == 0)
 820		bytes = sdp->sd_sb.sb_bsize;
 821
 822	gfs2_size_hint(file, offset, len);
 823
 824	gfs2_write_calc_reserv(ip, PAGE_SIZE, &data_blocks, &ind_blocks);
 825	ap.min_target = data_blocks + ind_blocks;
 826
 827	while (len > 0) {
 828		if (len < bytes)
 829			bytes = len;
 830		if (!gfs2_write_alloc_required(ip, offset, bytes)) {
 831			len -= bytes;
 832			offset += bytes;
 833			continue;
 834		}
 
 
 
 
 
 835
 836		/* We need to determine how many bytes we can actually
 837		 * fallocate without exceeding quota or going over the
 838		 * end of the fs. We start off optimistically by assuming
 839		 * we can write max_bytes */
 840		max_bytes = (len > max_chunk_size) ? max_chunk_size : len;
 841
 842		/* Since max_bytes is most likely a theoretical max, we
 843		 * calculate a more realistic 'bytes' to serve as a good
 844		 * starting point for the number of bytes we may be able
 845		 * to write */
 846		gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
 847		ap.target = data_blocks + ind_blocks;
 848
 849		error = gfs2_quota_lock_check(ip, &ap);
 850		if (error)
 851			return error;
 852		/* ap.allowed tells us how many blocks quota will allow
 853		 * us to write. Check if this reduces max_blks */
 854		if (ap.allowed && ap.allowed < max_blks)
 855			max_blks = ap.allowed;
 856
 857		error = gfs2_inplace_reserve(ip, &ap);
 858		if (error)
 859			goto out_qunlock;
 860
 861		/* check if the selected rgrp limits our max_blks further */
 862		if (ap.allowed && ap.allowed < max_blks)
 863			max_blks = ap.allowed;
 864
 865		/* Almost done. Calculate bytes that can be written using
 866		 * max_blks. We also recompute max_bytes, data_blocks and
 867		 * ind_blocks */
 868		calc_max_reserv(ip, &max_bytes, &data_blocks,
 869				&ind_blocks, max_blks);
 
 
 
 
 
 870
 871		rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
 872			  RES_RG_HDR + gfs2_rg_blocks(ip, data_blocks + ind_blocks);
 873		if (gfs2_is_jdata(ip))
 874			rblocks += data_blocks ? data_blocks : 1;
 875
 876		error = gfs2_trans_begin(sdp, rblocks,
 877					 PAGE_SIZE/sdp->sd_sb.sb_bsize);
 878		if (error)
 879			goto out_trans_fail;
 880
 881		error = fallocate_chunk(inode, offset, max_bytes, mode);
 882		gfs2_trans_end(sdp);
 883
 884		if (error)
 885			goto out_trans_fail;
 886
 887		len -= max_bytes;
 888		offset += max_bytes;
 889		gfs2_inplace_release(ip);
 890		gfs2_quota_unlock(ip);
 
 891	}
 892
 893	if (!(mode & FALLOC_FL_KEEP_SIZE) && (pos + count) > inode->i_size) {
 894		i_size_write(inode, pos + count);
 895		file_update_time(file);
 896		mark_inode_dirty(inode);
 897	}
 898
 899	if ((file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host))
 900		return vfs_fsync_range(file, pos, pos + count - 1,
 901			       (file->f_flags & __O_SYNC) ? 0 : 1);
 902	return 0;
 903
 904out_trans_fail:
 905	gfs2_inplace_release(ip);
 906out_qunlock:
 907	gfs2_quota_unlock(ip);
 908	return error;
 909}
 910
 911static long gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
 912{
 913	struct inode *inode = file_inode(file);
 914	struct gfs2_inode *ip = GFS2_I(inode);
 915	struct gfs2_holder gh;
 916	int ret;
 917
 918	if ((mode & ~FALLOC_FL_KEEP_SIZE) || gfs2_is_jdata(ip))
 919		return -EOPNOTSUPP;
 920
 921	inode_lock(inode);
 922
 923	gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
 924	ret = gfs2_glock_nq(&gh);
 925	if (ret)
 926		goto out_uninit;
 927
 928	if (!(mode & FALLOC_FL_KEEP_SIZE) &&
 929	    (offset + len) > inode->i_size) {
 930		ret = inode_newsize_ok(inode, offset + len);
 931		if (ret)
 932			goto out_unlock;
 933	}
 934
 935	ret = get_write_access(inode);
 936	if (ret)
 937		goto out_unlock;
 938
 939	ret = gfs2_rsqa_alloc(ip);
 940	if (ret)
 941		goto out_putw;
 942
 943	ret = __gfs2_fallocate(file, mode, offset, len);
 944	if (ret)
 945		gfs2_rs_deltree(&ip->i_res);
 946
 947out_putw:
 948	put_write_access(inode);
 949out_unlock:
 950	gfs2_glock_dq(&gh);
 951out_uninit:
 952	gfs2_holder_uninit(&gh);
 953	inode_unlock(inode);
 954	return ret;
 955}
 956
 957static ssize_t gfs2_file_splice_write(struct pipe_inode_info *pipe,
 958				      struct file *out, loff_t *ppos,
 959				      size_t len, unsigned int flags)
 960{
 961	int error;
 962	struct gfs2_inode *ip = GFS2_I(out->f_mapping->host);
 963
 964	error = gfs2_rsqa_alloc(ip);
 965	if (error)
 966		return (ssize_t)error;
 967
 968	gfs2_size_hint(out, *ppos, len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 969
 970	return iter_file_splice_write(pipe, out, ppos, len, flags);
 
 
 971}
 972
 973#ifdef CONFIG_GFS2_FS_LOCKING_DLM
 974
 975/**
 976 * gfs2_lock - acquire/release a posix lock on a file
 977 * @file: the file pointer
 978 * @cmd: either modify or retrieve lock state, possibly wait
 979 * @fl: type and range of lock
 980 *
 981 * Returns: errno
 982 */
 983
 984static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
 985{
 986	struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
 987	struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host);
 988	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
 989
 990	if (!(fl->fl_flags & FL_POSIX))
 991		return -ENOLCK;
 992	if (__mandatory_lock(&ip->i_inode) && fl->fl_type != F_UNLCK)
 993		return -ENOLCK;
 994
 995	if (cmd == F_CANCELLK) {
 996		/* Hack: */
 997		cmd = F_SETLK;
 998		fl->fl_type = F_UNLCK;
 999	}
1000	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
1001		if (fl->fl_type == F_UNLCK)
1002			locks_lock_file_wait(file, fl);
1003		return -EIO;
1004	}
1005	if (IS_GETLK(cmd))
1006		return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl);
1007	else if (fl->fl_type == F_UNLCK)
1008		return dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl);
1009	else
1010		return dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl);
1011}
1012
1013static int do_flock(struct file *file, int cmd, struct file_lock *fl)
1014{
1015	struct gfs2_file *fp = file->private_data;
1016	struct gfs2_holder *fl_gh = &fp->f_fl_gh;
1017	struct gfs2_inode *ip = GFS2_I(file_inode(file));
1018	struct gfs2_glock *gl;
1019	unsigned int state;
1020	u16 flags;
1021	int error = 0;
1022	int sleeptime;
1023
1024	state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
1025	flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY_1CB) | GL_EXACT;
1026
1027	mutex_lock(&fp->f_fl_mutex);
1028
1029	gl = fl_gh->gh_gl;
1030	if (gl) {
1031		if (fl_gh->gh_state == state)
1032			goto out;
1033		locks_lock_file_wait(file,
1034				     &(struct file_lock) {
1035					     .fl_type = F_UNLCK,
1036					     .fl_flags = FL_FLOCK
1037				     });
1038		gfs2_glock_dq(fl_gh);
1039		gfs2_holder_reinit(state, flags, fl_gh);
1040	} else {
1041		error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr,
1042				       &gfs2_flock_glops, CREATE, &gl);
1043		if (error)
1044			goto out;
1045		gfs2_holder_init(gl, state, flags, fl_gh);
1046		gfs2_glock_put(gl);
1047	}
1048	for (sleeptime = 1; sleeptime <= 4; sleeptime <<= 1) {
1049		error = gfs2_glock_nq(fl_gh);
1050		if (error != GLR_TRYFAILED)
1051			break;
1052		fl_gh->gh_flags = LM_FLAG_TRY | GL_EXACT;
1053		fl_gh->gh_error = 0;
1054		msleep(sleeptime);
1055	}
1056	if (error) {
1057		gfs2_holder_uninit(fl_gh);
1058		if (error == GLR_TRYFAILED)
1059			error = -EAGAIN;
1060	} else {
1061		error = locks_lock_file_wait(file, fl);
1062		gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
1063	}
1064
1065out:
1066	mutex_unlock(&fp->f_fl_mutex);
1067	return error;
1068}
1069
1070static void do_unflock(struct file *file, struct file_lock *fl)
1071{
1072	struct gfs2_file *fp = file->private_data;
1073	struct gfs2_holder *fl_gh = &fp->f_fl_gh;
1074
1075	mutex_lock(&fp->f_fl_mutex);
1076	locks_lock_file_wait(file, fl);
1077	if (gfs2_holder_initialized(fl_gh)) {
1078		gfs2_glock_dq(fl_gh);
1079		gfs2_holder_uninit(fl_gh);
1080	}
1081	mutex_unlock(&fp->f_fl_mutex);
1082}
1083
1084/**
1085 * gfs2_flock - acquire/release a flock lock on a file
1086 * @file: the file pointer
1087 * @cmd: either modify or retrieve lock state, possibly wait
1088 * @fl: type and range of lock
1089 *
1090 * Returns: errno
1091 */
1092
1093static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
1094{
1095	if (!(fl->fl_flags & FL_FLOCK))
1096		return -ENOLCK;
1097	if (fl->fl_type & LOCK_MAND)
1098		return -EOPNOTSUPP;
1099
1100	if (fl->fl_type == F_UNLCK) {
1101		do_unflock(file, fl);
1102		return 0;
1103	} else {
1104		return do_flock(file, cmd, fl);
1105	}
1106}
1107
1108const struct file_operations gfs2_file_fops = {
1109	.llseek		= gfs2_llseek,
1110	.read_iter	= generic_file_read_iter,
1111	.write_iter	= gfs2_file_write_iter,
 
 
1112	.unlocked_ioctl	= gfs2_ioctl,
1113	.mmap		= gfs2_mmap,
1114	.open		= gfs2_open,
1115	.release	= gfs2_release,
1116	.fsync		= gfs2_fsync,
1117	.lock		= gfs2_lock,
1118	.flock		= gfs2_flock,
1119	.splice_read	= generic_file_splice_read,
1120	.splice_write	= gfs2_file_splice_write,
1121	.setlease	= simple_nosetlease,
1122	.fallocate	= gfs2_fallocate,
1123};
1124
1125const struct file_operations gfs2_dir_fops = {
1126	.iterate_shared	= gfs2_readdir,
1127	.unlocked_ioctl	= gfs2_ioctl,
1128	.open		= gfs2_open,
1129	.release	= gfs2_release,
1130	.fsync		= gfs2_fsync,
1131	.lock		= gfs2_lock,
1132	.flock		= gfs2_flock,
1133	.llseek		= default_llseek,
1134};
1135
1136#endif /* CONFIG_GFS2_FS_LOCKING_DLM */
1137
1138const struct file_operations gfs2_file_fops_nolock = {
1139	.llseek		= gfs2_llseek,
1140	.read_iter	= generic_file_read_iter,
1141	.write_iter	= gfs2_file_write_iter,
 
 
1142	.unlocked_ioctl	= gfs2_ioctl,
1143	.mmap		= gfs2_mmap,
1144	.open		= gfs2_open,
1145	.release	= gfs2_release,
1146	.fsync		= gfs2_fsync,
1147	.splice_read	= generic_file_splice_read,
1148	.splice_write	= gfs2_file_splice_write,
1149	.setlease	= generic_setlease,
1150	.fallocate	= gfs2_fallocate,
1151};
1152
1153const struct file_operations gfs2_dir_fops_nolock = {
1154	.iterate_shared	= gfs2_readdir,
1155	.unlocked_ioctl	= gfs2_ioctl,
1156	.open		= gfs2_open,
1157	.release	= gfs2_release,
1158	.fsync		= gfs2_fsync,
1159	.llseek		= default_llseek,
1160};
1161
v3.5.6
   1/*
   2 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
   3 * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
   4 *
   5 * This copyrighted material is made available to anyone wishing to use,
   6 * modify, copy, or redistribute it subject to the terms and conditions
   7 * of the GNU General Public License version 2.
   8 */
   9
  10#include <linux/slab.h>
  11#include <linux/spinlock.h>
  12#include <linux/completion.h>
  13#include <linux/buffer_head.h>
  14#include <linux/pagemap.h>
  15#include <linux/uio.h>
  16#include <linux/blkdev.h>
  17#include <linux/mm.h>
  18#include <linux/mount.h>
  19#include <linux/fs.h>
  20#include <linux/gfs2_ondisk.h>
  21#include <linux/falloc.h>
  22#include <linux/swap.h>
  23#include <linux/crc32.h>
  24#include <linux/writeback.h>
  25#include <asm/uaccess.h>
  26#include <linux/dlm.h>
  27#include <linux/dlm_plock.h>
 
  28
  29#include "gfs2.h"
  30#include "incore.h"
  31#include "bmap.h"
  32#include "dir.h"
  33#include "glock.h"
  34#include "glops.h"
  35#include "inode.h"
  36#include "log.h"
  37#include "meta_io.h"
  38#include "quota.h"
  39#include "rgrp.h"
  40#include "trans.h"
  41#include "util.h"
  42
  43/**
  44 * gfs2_llseek - seek to a location in a file
  45 * @file: the file
  46 * @offset: the offset
  47 * @origin: Where to seek from (SEEK_SET, SEEK_CUR, or SEEK_END)
  48 *
  49 * SEEK_END requires the glock for the file because it references the
  50 * file's size.
  51 *
  52 * Returns: The new offset, or errno
  53 */
  54
  55static loff_t gfs2_llseek(struct file *file, loff_t offset, int origin)
  56{
  57	struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
  58	struct gfs2_holder i_gh;
  59	loff_t error;
  60
  61	switch (origin) {
  62	case SEEK_END: /* These reference inode->i_size */
  63	case SEEK_DATA:
  64	case SEEK_HOLE:
  65		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
  66					   &i_gh);
  67		if (!error) {
  68			error = generic_file_llseek(file, offset, origin);
  69			gfs2_glock_dq_uninit(&i_gh);
  70		}
  71		break;
  72	case SEEK_CUR:
  73	case SEEK_SET:
  74		error = generic_file_llseek(file, offset, origin);
  75		break;
  76	default:
  77		error = -EINVAL;
  78	}
  79
  80	return error;
  81}
  82
  83/**
  84 * gfs2_readdir - Read directory entries from a directory
  85 * @file: The directory to read from
  86 * @dirent: Buffer for dirents
  87 * @filldir: Function used to do the copying
  88 *
  89 * Returns: errno
  90 */
  91
  92static int gfs2_readdir(struct file *file, void *dirent, filldir_t filldir)
  93{
  94	struct inode *dir = file->f_mapping->host;
  95	struct gfs2_inode *dip = GFS2_I(dir);
  96	struct gfs2_holder d_gh;
  97	u64 offset = file->f_pos;
  98	int error;
  99
 100	gfs2_holder_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
 101	error = gfs2_glock_nq(&d_gh);
 102	if (error) {
 103		gfs2_holder_uninit(&d_gh);
 104		return error;
 105	}
 106
 107	error = gfs2_dir_read(dir, &offset, dirent, filldir, &file->f_ra);
 108
 109	gfs2_glock_dq_uninit(&d_gh);
 110
 111	file->f_pos = offset;
 112
 113	return error;
 114}
 115
 116/**
 117 * fsflags_cvt
 118 * @table: A table of 32 u32 flags
 119 * @val: a 32 bit value to convert
 120 *
 121 * This function can be used to convert between fsflags values and
 122 * GFS2's own flags values.
 123 *
 124 * Returns: the converted flags
 125 */
 126static u32 fsflags_cvt(const u32 *table, u32 val)
 127{
 128	u32 res = 0;
 129	while(val) {
 130		if (val & 1)
 131			res |= *table;
 132		table++;
 133		val >>= 1;
 134	}
 135	return res;
 136}
 137
 138static const u32 fsflags_to_gfs2[32] = {
 139	[3] = GFS2_DIF_SYNC,
 140	[4] = GFS2_DIF_IMMUTABLE,
 141	[5] = GFS2_DIF_APPENDONLY,
 142	[7] = GFS2_DIF_NOATIME,
 143	[12] = GFS2_DIF_EXHASH,
 144	[14] = GFS2_DIF_INHERIT_JDATA,
 
 145};
 146
 147static const u32 gfs2_to_fsflags[32] = {
 148	[gfs2fl_Sync] = FS_SYNC_FL,
 149	[gfs2fl_Immutable] = FS_IMMUTABLE_FL,
 150	[gfs2fl_AppendOnly] = FS_APPEND_FL,
 151	[gfs2fl_NoAtime] = FS_NOATIME_FL,
 152	[gfs2fl_ExHash] = FS_INDEX_FL,
 
 153	[gfs2fl_InheritJdata] = FS_JOURNAL_DATA_FL,
 154};
 155
 156static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
 157{
 158	struct inode *inode = filp->f_path.dentry->d_inode;
 159	struct gfs2_inode *ip = GFS2_I(inode);
 160	struct gfs2_holder gh;
 161	int error;
 162	u32 fsflags;
 163
 164	gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
 165	error = gfs2_glock_nq(&gh);
 166	if (error)
 167		return error;
 168
 169	fsflags = fsflags_cvt(gfs2_to_fsflags, ip->i_diskflags);
 170	if (!S_ISDIR(inode->i_mode) && ip->i_diskflags & GFS2_DIF_JDATA)
 171		fsflags |= FS_JOURNAL_DATA_FL;
 172	if (put_user(fsflags, ptr))
 173		error = -EFAULT;
 174
 175	gfs2_glock_dq(&gh);
 
 176	gfs2_holder_uninit(&gh);
 177	return error;
 178}
 179
 180void gfs2_set_inode_flags(struct inode *inode)
 181{
 182	struct gfs2_inode *ip = GFS2_I(inode);
 183	unsigned int flags = inode->i_flags;
 184
 185	flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_NOSEC);
 186	if ((ip->i_eattr == 0) && !is_sxid(inode->i_mode))
 187		inode->i_flags |= S_NOSEC;
 188	if (ip->i_diskflags & GFS2_DIF_IMMUTABLE)
 189		flags |= S_IMMUTABLE;
 190	if (ip->i_diskflags & GFS2_DIF_APPENDONLY)
 191		flags |= S_APPEND;
 192	if (ip->i_diskflags & GFS2_DIF_NOATIME)
 193		flags |= S_NOATIME;
 194	if (ip->i_diskflags & GFS2_DIF_SYNC)
 195		flags |= S_SYNC;
 196	inode->i_flags = flags;
 197}
 198
 199/* Flags that can be set by user space */
 200#define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA|			\
 201			     GFS2_DIF_IMMUTABLE|		\
 202			     GFS2_DIF_APPENDONLY|		\
 203			     GFS2_DIF_NOATIME|			\
 204			     GFS2_DIF_SYNC|			\
 205			     GFS2_DIF_SYSTEM|			\
 
 206			     GFS2_DIF_INHERIT_JDATA)
 207
 208/**
 209 * gfs2_set_flags - set flags on an inode
 210 * @inode: The inode
 211 * @flags: The flags to set
 212 * @mask: Indicates which flags are valid
 213 *
 214 */
 215static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
 216{
 217	struct inode *inode = filp->f_path.dentry->d_inode;
 218	struct gfs2_inode *ip = GFS2_I(inode);
 219	struct gfs2_sbd *sdp = GFS2_SB(inode);
 220	struct buffer_head *bh;
 221	struct gfs2_holder gh;
 222	int error;
 223	u32 new_flags, flags;
 224
 225	error = mnt_want_write_file(filp);
 226	if (error)
 227		return error;
 228
 229	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
 230	if (error)
 231		goto out_drop_write;
 232
 233	error = -EACCES;
 234	if (!inode_owner_or_capable(inode))
 235		goto out;
 236
 237	error = 0;
 238	flags = ip->i_diskflags;
 239	new_flags = (flags & ~mask) | (reqflags & mask);
 240	if ((new_flags ^ flags) == 0)
 241		goto out;
 242
 243	error = -EINVAL;
 244	if ((new_flags ^ flags) & ~GFS2_FLAGS_USER_SET)
 245		goto out;
 246
 247	error = -EPERM;
 248	if (IS_IMMUTABLE(inode) && (new_flags & GFS2_DIF_IMMUTABLE))
 249		goto out;
 250	if (IS_APPEND(inode) && (new_flags & GFS2_DIF_APPENDONLY))
 251		goto out;
 252	if (((new_flags ^ flags) & GFS2_DIF_IMMUTABLE) &&
 253	    !capable(CAP_LINUX_IMMUTABLE))
 254		goto out;
 255	if (!IS_IMMUTABLE(inode)) {
 256		error = gfs2_permission(inode, MAY_WRITE);
 257		if (error)
 258			goto out;
 259	}
 260	if ((flags ^ new_flags) & GFS2_DIF_JDATA) {
 261		if (flags & GFS2_DIF_JDATA)
 262			gfs2_log_flush(sdp, ip->i_gl);
 263		error = filemap_fdatawrite(inode->i_mapping);
 264		if (error)
 265			goto out;
 266		error = filemap_fdatawait(inode->i_mapping);
 267		if (error)
 268			goto out;
 269	}
 270	error = gfs2_trans_begin(sdp, RES_DINODE, 0);
 271	if (error)
 272		goto out;
 273	error = gfs2_meta_inode_buffer(ip, &bh);
 274	if (error)
 275		goto out_trans_end;
 276	gfs2_trans_add_bh(ip->i_gl, bh, 1);
 277	ip->i_diskflags = new_flags;
 278	gfs2_dinode_out(ip, bh->b_data);
 279	brelse(bh);
 280	gfs2_set_inode_flags(inode);
 281	gfs2_set_aops(inode);
 282out_trans_end:
 283	gfs2_trans_end(sdp);
 284out:
 285	gfs2_glock_dq_uninit(&gh);
 286out_drop_write:
 287	mnt_drop_write_file(filp);
 288	return error;
 289}
 290
 291static int gfs2_set_flags(struct file *filp, u32 __user *ptr)
 292{
 293	struct inode *inode = filp->f_path.dentry->d_inode;
 294	u32 fsflags, gfsflags;
 295
 296	if (get_user(fsflags, ptr))
 297		return -EFAULT;
 298
 299	gfsflags = fsflags_cvt(fsflags_to_gfs2, fsflags);
 300	if (!S_ISDIR(inode->i_mode)) {
 
 301		if (gfsflags & GFS2_DIF_INHERIT_JDATA)
 302			gfsflags ^= (GFS2_DIF_JDATA | GFS2_DIF_INHERIT_JDATA);
 303		return do_gfs2_set_flags(filp, gfsflags, ~0);
 304	}
 305	return do_gfs2_set_flags(filp, gfsflags, ~GFS2_DIF_JDATA);
 306}
 307
 308static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 309{
 310	switch(cmd) {
 311	case FS_IOC_GETFLAGS:
 312		return gfs2_get_flags(filp, (u32 __user *)arg);
 313	case FS_IOC_SETFLAGS:
 314		return gfs2_set_flags(filp, (u32 __user *)arg);
 315	case FITRIM:
 316		return gfs2_fitrim(filp, (void __user *)arg);
 317	}
 318	return -ENOTTY;
 319}
 320
 321/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 322 * gfs2_allocate_page_backing - Use bmap to allocate blocks
 323 * @page: The (locked) page to allocate backing for
 324 *
 325 * We try to allocate all the blocks required for the page in
 326 * one go. This might fail for various reasons, so we keep
 327 * trying until all the blocks to back this page are allocated.
 328 * If some of the blocks are already allocated, thats ok too.
 329 */
 330
 331static int gfs2_allocate_page_backing(struct page *page)
 332{
 333	struct inode *inode = page->mapping->host;
 334	struct buffer_head bh;
 335	unsigned long size = PAGE_CACHE_SIZE;
 336	u64 lblock = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
 337
 338	do {
 339		bh.b_state = 0;
 340		bh.b_size = size;
 341		gfs2_block_map(inode, lblock, &bh, 1);
 342		if (!buffer_mapped(&bh))
 343			return -EIO;
 344		size -= bh.b_size;
 345		lblock += (bh.b_size >> inode->i_blkbits);
 346	} while(size > 0);
 347	return 0;
 348}
 349
 350/**
 351 * gfs2_page_mkwrite - Make a shared, mmap()ed, page writable
 352 * @vma: The virtual memory area
 353 * @page: The page which is about to become writable
 354 *
 355 * When the page becomes writable, we need to ensure that we have
 356 * blocks allocated on disk to back that page.
 357 */
 358
 359static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
 360{
 361	struct page *page = vmf->page;
 362	struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
 363	struct gfs2_inode *ip = GFS2_I(inode);
 364	struct gfs2_sbd *sdp = GFS2_SB(inode);
 
 365	unsigned long last_index;
 366	u64 pos = page->index << PAGE_CACHE_SHIFT;
 367	unsigned int data_blocks, ind_blocks, rblocks;
 368	struct gfs2_holder gh;
 369	struct gfs2_qadata *qa;
 370	loff_t size;
 371	int ret;
 372
 373	/* Wait if fs is frozen. This is racy so we check again later on
 374	 * and retry if the fs has been frozen after the page lock has
 375	 * been acquired
 376	 */
 377	vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
 
 
 378
 379	gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
 380	ret = gfs2_glock_nq(&gh);
 381	if (ret)
 382		goto out;
 
 
 
 383
 384	set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
 385	set_bit(GIF_SW_PAGED, &ip->i_flags);
 386
 387	if (!gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE)) {
 388		lock_page(page);
 389		if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
 390			ret = -EAGAIN;
 391			unlock_page(page);
 392		}
 393		goto out_unlock;
 394	}
 395
 396	ret = -ENOMEM;
 397	qa = gfs2_qadata_get(ip);
 398	if (qa == NULL)
 399		goto out_unlock;
 400
 401	ret = gfs2_quota_lock_check(ip);
 
 
 402	if (ret)
 403		goto out_alloc_put;
 404	gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks);
 405	ret = gfs2_inplace_reserve(ip, data_blocks + ind_blocks);
 406	if (ret)
 407		goto out_quota_unlock;
 408
 409	rblocks = RES_DINODE + ind_blocks;
 410	if (gfs2_is_jdata(ip))
 411		rblocks += data_blocks ? data_blocks : 1;
 412	if (ind_blocks || data_blocks) {
 413		rblocks += RES_STATFS + RES_QUOTA;
 414		rblocks += gfs2_rg_blocks(ip);
 415	}
 416	ret = gfs2_trans_begin(sdp, rblocks, 0);
 417	if (ret)
 418		goto out_trans_fail;
 419
 420	lock_page(page);
 421	ret = -EINVAL;
 422	size = i_size_read(inode);
 423	last_index = (size - 1) >> PAGE_CACHE_SHIFT;
 424	/* Check page index against inode size */
 425	if (size == 0 || (page->index > last_index))
 426		goto out_trans_end;
 427
 428	ret = -EAGAIN;
 429	/* If truncated, we must retry the operation, we may have raced
 430	 * with the glock demotion code.
 431	 */
 432	if (!PageUptodate(page) || page->mapping != inode->i_mapping)
 433		goto out_trans_end;
 434
 435	/* Unstuff, if required, and allocate backing blocks for page */
 436	ret = 0;
 437	if (gfs2_is_stuffed(ip))
 438		ret = gfs2_unstuff_dinode(ip, page);
 439	if (ret == 0)
 440		ret = gfs2_allocate_page_backing(page);
 441
 442out_trans_end:
 443	if (ret)
 444		unlock_page(page);
 445	gfs2_trans_end(sdp);
 446out_trans_fail:
 447	gfs2_inplace_release(ip);
 448out_quota_unlock:
 449	gfs2_quota_unlock(ip);
 450out_alloc_put:
 451	gfs2_qadata_put(ip);
 452out_unlock:
 453	gfs2_glock_dq(&gh);
 454out:
 455	gfs2_holder_uninit(&gh);
 456	if (ret == 0) {
 457		set_page_dirty(page);
 458		/* This check must be post dropping of transaction lock */
 459		if (inode->i_sb->s_frozen == SB_UNFROZEN) {
 460			wait_on_page_writeback(page);
 461		} else {
 462			ret = -EAGAIN;
 463			unlock_page(page);
 464		}
 465	}
 
 
 466	return block_page_mkwrite_return(ret);
 467}
 468
 469static const struct vm_operations_struct gfs2_vm_ops = {
 470	.fault = filemap_fault,
 
 471	.page_mkwrite = gfs2_page_mkwrite,
 472};
 473
 474/**
 475 * gfs2_mmap -
 476 * @file: The file to map
 477 * @vma: The VMA which described the mapping
 478 *
 479 * There is no need to get a lock here unless we should be updating
 480 * atime. We ignore any locking errors since the only consequence is
 481 * a missed atime update (which will just be deferred until later).
 482 *
 483 * Returns: 0
 484 */
 485
 486static int gfs2_mmap(struct file *file, struct vm_area_struct *vma)
 487{
 488	struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
 489
 490	if (!(file->f_flags & O_NOATIME) &&
 491	    !IS_NOATIME(&ip->i_inode)) {
 492		struct gfs2_holder i_gh;
 493		int error;
 494
 495		gfs2_holder_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
 496		error = gfs2_glock_nq(&i_gh);
 497		if (error == 0) {
 498			file_accessed(file);
 499			gfs2_glock_dq(&i_gh);
 500		}
 501		gfs2_holder_uninit(&i_gh);
 502		if (error)
 503			return error;
 
 
 
 504	}
 505	vma->vm_ops = &gfs2_vm_ops;
 506	vma->vm_flags |= VM_CAN_NONLINEAR;
 507
 508	return 0;
 509}
 510
 511/**
 512 * gfs2_open - open a file
 513 * @inode: the inode to open
 514 * @file: the struct file for this opening
 
 
 
 
 
 515 *
 516 * Returns: errno
 517 */
 518
 519static int gfs2_open(struct inode *inode, struct file *file)
 520{
 521	struct gfs2_inode *ip = GFS2_I(inode);
 522	struct gfs2_holder i_gh;
 523	struct gfs2_file *fp;
 524	int error;
 
 
 
 
 
 
 525
 526	fp = kzalloc(sizeof(struct gfs2_file), GFP_KERNEL);
 527	if (!fp)
 528		return -ENOMEM;
 529
 530	mutex_init(&fp->f_fl_mutex);
 531
 532	gfs2_assert_warn(GFS2_SB(inode), !file->private_data);
 533	file->private_data = fp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 534
 535	if (S_ISREG(ip->i_inode.i_mode)) {
 536		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
 537					   &i_gh);
 538		if (error)
 539			goto fail;
 
 
 540
 541		if (!(file->f_flags & O_LARGEFILE) &&
 542		    i_size_read(inode) > MAX_NON_LFS) {
 543			error = -EOVERFLOW;
 544			goto fail_gunlock;
 545		}
 546
 
 547		gfs2_glock_dq_uninit(&i_gh);
 548	}
 549
 550	return 0;
 551
 552fail_gunlock:
 553	gfs2_glock_dq_uninit(&i_gh);
 554fail:
 555	file->private_data = NULL;
 556	kfree(fp);
 557	return error;
 558}
 559
 560/**
 561 * gfs2_release - called to close a struct file
 562 * @inode: the inode the struct file belongs to
 563 * @file: the struct file being closed
 564 *
 565 * Returns: errno
 566 */
 567
 568static int gfs2_release(struct inode *inode, struct file *file)
 569{
 570	struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
 571	struct gfs2_file *fp;
 572
 573	fp = file->private_data;
 574	file->private_data = NULL;
 575
 576	if (gfs2_assert_warn(sdp, fp))
 577		return -EIO;
 578
 579	kfree(fp);
 580
 
 581	return 0;
 582}
 583
 584/**
 585 * gfs2_fsync - sync the dirty data for a file (across the cluster)
 586 * @file: the file that points to the dentry
 587 * @start: the start position in the file to sync
 588 * @end: the end position in the file to sync
 589 * @datasync: set if we can ignore timestamp changes
 590 *
 591 * We split the data flushing here so that we don't wait for the data
 592 * until after we've also sent the metadata to disk. Note that for
 593 * data=ordered, we will write & wait for the data at the log flush
 594 * stage anyway, so this is unlikely to make much of a difference
 595 * except in the data=writeback case.
 596 *
 597 * If the fdatawrite fails due to any reason except -EIO, we will
 598 * continue the remainder of the fsync, although we'll still report
 599 * the error at the end. This is to match filemap_write_and_wait_range()
 600 * behaviour.
 601 *
 602 * Returns: errno
 603 */
 604
 605static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
 606		      int datasync)
 607{
 608	struct address_space *mapping = file->f_mapping;
 609	struct inode *inode = mapping->host;
 610	int sync_state = inode->i_state & (I_DIRTY_SYNC|I_DIRTY_DATASYNC);
 611	struct gfs2_inode *ip = GFS2_I(inode);
 612	int ret = 0, ret1 = 0;
 613
 614	if (mapping->nrpages) {
 615		ret1 = filemap_fdatawrite_range(mapping, start, end);
 616		if (ret1 == -EIO)
 617			return ret1;
 618	}
 619
 
 
 620	if (datasync)
 621		sync_state &= ~I_DIRTY_SYNC;
 622
 623	if (sync_state) {
 624		ret = sync_inode_metadata(inode, 1);
 625		if (ret)
 626			return ret;
 627		if (gfs2_is_jdata(ip))
 628			filemap_write_and_wait(mapping);
 629		gfs2_ail_flush(ip->i_gl, 1);
 630	}
 631
 632	if (mapping->nrpages)
 633		ret = filemap_fdatawait_range(mapping, start, end);
 634
 635	return ret ? ret : ret1;
 636}
 637
 638/**
 639 * gfs2_file_aio_write - Perform a write to a file
 640 * @iocb: The io context
 641 * @iov: The data to write
 642 * @nr_segs: Number of @iov segments
 643 * @pos: The file position
 644 *
 645 * We have to do a lock/unlock here to refresh the inode size for
 646 * O_APPEND writes, otherwise we can land up writing at the wrong
 647 * offset. There is still a race, but provided the app is using its
 648 * own file locking, this will make O_APPEND work as expected.
 649 *
 650 */
 651
 652static ssize_t gfs2_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
 653				   unsigned long nr_segs, loff_t pos)
 654{
 655	struct file *file = iocb->ki_filp;
 
 
 
 
 
 
 
 
 656
 657	if (file->f_flags & O_APPEND) {
 658		struct dentry *dentry = file->f_dentry;
 659		struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
 660		struct gfs2_holder gh;
 661		int ret;
 662
 663		ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
 664		if (ret)
 665			return ret;
 666		gfs2_glock_dq_uninit(&gh);
 667	}
 668
 669	return generic_file_aio_write(iocb, iov, nr_segs, pos);
 670}
 671
 672static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
 673			   int mode)
 674{
 675	struct gfs2_inode *ip = GFS2_I(inode);
 676	struct buffer_head *dibh;
 677	int error;
 678	loff_t size = len;
 679	unsigned int nr_blks;
 680	sector_t lblock = offset >> inode->i_blkbits;
 681
 682	error = gfs2_meta_inode_buffer(ip, &dibh);
 683	if (unlikely(error))
 684		return error;
 685
 686	gfs2_trans_add_bh(ip->i_gl, dibh, 1);
 687
 688	if (gfs2_is_stuffed(ip)) {
 689		error = gfs2_unstuff_dinode(ip, NULL);
 690		if (unlikely(error))
 691			goto out;
 692	}
 693
 694	while (len) {
 695		struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
 696		bh_map.b_size = len;
 697		set_buffer_zeronew(&bh_map);
 698
 699		error = gfs2_block_map(inode, lblock, &bh_map, 1);
 700		if (unlikely(error))
 701			goto out;
 702		len -= bh_map.b_size;
 703		nr_blks = bh_map.b_size >> inode->i_blkbits;
 704		lblock += nr_blks;
 705		if (!buffer_new(&bh_map))
 706			continue;
 707		if (unlikely(!buffer_zeronew(&bh_map))) {
 708			error = -EIO;
 709			goto out;
 710		}
 711	}
 712	if (offset + size > inode->i_size && !(mode & FALLOC_FL_KEEP_SIZE))
 713		i_size_write(inode, offset + size);
 714
 715	mark_inode_dirty(inode);
 716
 717out:
 718	brelse(dibh);
 719	return error;
 720}
 721
 722static void calc_max_reserv(struct gfs2_inode *ip, loff_t max, loff_t *len,
 723			    unsigned int *data_blocks, unsigned int *ind_blocks)
 
 
 
 
 
 
 
 
 
 
 
 724{
 
 725	const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 726	unsigned int max_blocks = ip->i_rgd->rd_free_clone;
 727	unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1);
 728
 729	for (tmp = max_data; tmp > sdp->sd_diptrs;) {
 730		tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
 731		max_data -= tmp;
 732	}
 733	/* This calculation isn't the exact reverse of gfs2_write_calc_reserve,
 734	   so it might end up with fewer data blocks */
 735	if (max_data <= *data_blocks)
 736		return;
 737	*data_blocks = max_data;
 738	*ind_blocks = max_blocks - max_data;
 739	*len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift;
 740	if (*len > max) {
 741		*len = max;
 742		gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks);
 743	}
 744}
 745
 746static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
 747			   loff_t len)
 748{
 749	struct inode *inode = file->f_path.dentry->d_inode;
 750	struct gfs2_sbd *sdp = GFS2_SB(inode);
 751	struct gfs2_inode *ip = GFS2_I(inode);
 
 752	unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
 753	loff_t bytes, max_bytes;
 754	struct gfs2_qadata *qa;
 755	int error;
 756	const loff_t pos = offset;
 757	const loff_t count = len;
 758	loff_t bsize_mask = ~((loff_t)sdp->sd_sb.sb_bsize - 1);
 759	loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
 760	loff_t max_chunk_size = UINT_MAX & bsize_mask;
 
 761	next = (next + 1) << sdp->sd_sb.sb_bsize_shift;
 762
 763	/* We only support the FALLOC_FL_KEEP_SIZE mode */
 764	if (mode & ~FALLOC_FL_KEEP_SIZE)
 765		return -EOPNOTSUPP;
 766
 767	offset &= bsize_mask;
 768
 769	len = next - offset;
 770	bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2;
 771	if (!bytes)
 772		bytes = UINT_MAX;
 773	bytes &= bsize_mask;
 774	if (bytes == 0)
 775		bytes = sdp->sd_sb.sb_bsize;
 776
 777	gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
 778	error = gfs2_glock_nq(&ip->i_gh);
 779	if (unlikely(error))
 780		goto out_uninit;
 781
 782	while (len > 0) {
 783		if (len < bytes)
 784			bytes = len;
 785		if (!gfs2_write_alloc_required(ip, offset, bytes)) {
 786			len -= bytes;
 787			offset += bytes;
 788			continue;
 789		}
 790		qa = gfs2_qadata_get(ip);
 791		if (!qa) {
 792			error = -ENOMEM;
 793			goto out_unlock;
 794		}
 795
 796		error = gfs2_quota_lock_check(ip);
 
 
 
 
 
 
 
 
 
 
 
 
 
 797		if (error)
 798			goto out_alloc_put;
 
 
 
 
 799
 800retry:
 801		gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
 
 802
 803		error = gfs2_inplace_reserve(ip, data_blocks + ind_blocks);
 804		if (error) {
 805			if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) {
 806				bytes >>= 1;
 807				bytes &= bsize_mask;
 808				if (bytes == 0)
 809					bytes = sdp->sd_sb.sb_bsize;
 810				goto retry;
 811			}
 812			goto out_qunlock;
 813		}
 814		max_bytes = bytes;
 815		calc_max_reserv(ip, (len > max_chunk_size)? max_chunk_size: len,
 816				&max_bytes, &data_blocks, &ind_blocks);
 817
 818		rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
 819			  RES_RG_HDR + gfs2_rg_blocks(ip);
 820		if (gfs2_is_jdata(ip))
 821			rblocks += data_blocks ? data_blocks : 1;
 822
 823		error = gfs2_trans_begin(sdp, rblocks,
 824					 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
 825		if (error)
 826			goto out_trans_fail;
 827
 828		error = fallocate_chunk(inode, offset, max_bytes, mode);
 829		gfs2_trans_end(sdp);
 830
 831		if (error)
 832			goto out_trans_fail;
 833
 834		len -= max_bytes;
 835		offset += max_bytes;
 836		gfs2_inplace_release(ip);
 837		gfs2_quota_unlock(ip);
 838		gfs2_qadata_put(ip);
 839	}
 840
 841	if (error == 0)
 842		error = generic_write_sync(file, pos, count);
 843	goto out_unlock;
 
 
 
 
 
 
 
 844
 845out_trans_fail:
 846	gfs2_inplace_release(ip);
 847out_qunlock:
 848	gfs2_quota_unlock(ip);
 849out_alloc_put:
 850	gfs2_qadata_put(ip);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 851out_unlock:
 852	gfs2_glock_dq(&ip->i_gh);
 853out_uninit:
 854	gfs2_holder_uninit(&ip->i_gh);
 855	return error;
 
 856}
 857
 858#ifdef CONFIG_GFS2_FS_LOCKING_DLM
 
 
 
 
 
 
 
 
 
 859
 860/**
 861 * gfs2_setlease - acquire/release a file lease
 862 * @file: the file pointer
 863 * @arg: lease type
 864 * @fl: file lock
 865 *
 866 * We don't currently have a way to enforce a lease across the whole
 867 * cluster; until we do, disable leases (by just returning -EINVAL),
 868 * unless the administrator has requested purely local locking.
 869 *
 870 * Locking: called under lock_flocks
 871 *
 872 * Returns: errno
 873 */
 874
 875static int gfs2_setlease(struct file *file, long arg, struct file_lock **fl)
 876{
 877	return -EINVAL;
 878}
 879
 
 
 880/**
 881 * gfs2_lock - acquire/release a posix lock on a file
 882 * @file: the file pointer
 883 * @cmd: either modify or retrieve lock state, possibly wait
 884 * @fl: type and range of lock
 885 *
 886 * Returns: errno
 887 */
 888
 889static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
 890{
 891	struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
 892	struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host);
 893	struct lm_lockstruct *ls = &sdp->sd_lockstruct;
 894
 895	if (!(fl->fl_flags & FL_POSIX))
 896		return -ENOLCK;
 897	if (__mandatory_lock(&ip->i_inode) && fl->fl_type != F_UNLCK)
 898		return -ENOLCK;
 899
 900	if (cmd == F_CANCELLK) {
 901		/* Hack: */
 902		cmd = F_SETLK;
 903		fl->fl_type = F_UNLCK;
 904	}
 905	if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
 
 
 906		return -EIO;
 
 907	if (IS_GETLK(cmd))
 908		return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl);
 909	else if (fl->fl_type == F_UNLCK)
 910		return dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl);
 911	else
 912		return dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl);
 913}
 914
 915static int do_flock(struct file *file, int cmd, struct file_lock *fl)
 916{
 917	struct gfs2_file *fp = file->private_data;
 918	struct gfs2_holder *fl_gh = &fp->f_fl_gh;
 919	struct gfs2_inode *ip = GFS2_I(file->f_path.dentry->d_inode);
 920	struct gfs2_glock *gl;
 921	unsigned int state;
 922	int flags;
 923	int error = 0;
 
 924
 925	state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
 926	flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE;
 927
 928	mutex_lock(&fp->f_fl_mutex);
 929
 930	gl = fl_gh->gh_gl;
 931	if (gl) {
 932		if (fl_gh->gh_state == state)
 933			goto out;
 934		flock_lock_file_wait(file,
 935				     &(struct file_lock){.fl_type = F_UNLCK});
 936		gfs2_glock_dq_wait(fl_gh);
 
 
 
 937		gfs2_holder_reinit(state, flags, fl_gh);
 938	} else {
 939		error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr,
 940				       &gfs2_flock_glops, CREATE, &gl);
 941		if (error)
 942			goto out;
 943		gfs2_holder_init(gl, state, flags, fl_gh);
 944		gfs2_glock_put(gl);
 945	}
 946	error = gfs2_glock_nq(fl_gh);
 
 
 
 
 
 
 
 947	if (error) {
 948		gfs2_holder_uninit(fl_gh);
 949		if (error == GLR_TRYFAILED)
 950			error = -EAGAIN;
 951	} else {
 952		error = flock_lock_file_wait(file, fl);
 953		gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
 954	}
 955
 956out:
 957	mutex_unlock(&fp->f_fl_mutex);
 958	return error;
 959}
 960
 961static void do_unflock(struct file *file, struct file_lock *fl)
 962{
 963	struct gfs2_file *fp = file->private_data;
 964	struct gfs2_holder *fl_gh = &fp->f_fl_gh;
 965
 966	mutex_lock(&fp->f_fl_mutex);
 967	flock_lock_file_wait(file, fl);
 968	if (fl_gh->gh_gl) {
 969		gfs2_glock_dq_wait(fl_gh);
 970		gfs2_holder_uninit(fl_gh);
 971	}
 972	mutex_unlock(&fp->f_fl_mutex);
 973}
 974
 975/**
 976 * gfs2_flock - acquire/release a flock lock on a file
 977 * @file: the file pointer
 978 * @cmd: either modify or retrieve lock state, possibly wait
 979 * @fl: type and range of lock
 980 *
 981 * Returns: errno
 982 */
 983
 984static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
 985{
 986	if (!(fl->fl_flags & FL_FLOCK))
 987		return -ENOLCK;
 988	if (fl->fl_type & LOCK_MAND)
 989		return -EOPNOTSUPP;
 990
 991	if (fl->fl_type == F_UNLCK) {
 992		do_unflock(file, fl);
 993		return 0;
 994	} else {
 995		return do_flock(file, cmd, fl);
 996	}
 997}
 998
 999const struct file_operations gfs2_file_fops = {
1000	.llseek		= gfs2_llseek,
1001	.read		= do_sync_read,
1002	.aio_read	= generic_file_aio_read,
1003	.write		= do_sync_write,
1004	.aio_write	= gfs2_file_aio_write,
1005	.unlocked_ioctl	= gfs2_ioctl,
1006	.mmap		= gfs2_mmap,
1007	.open		= gfs2_open,
1008	.release	= gfs2_release,
1009	.fsync		= gfs2_fsync,
1010	.lock		= gfs2_lock,
1011	.flock		= gfs2_flock,
1012	.splice_read	= generic_file_splice_read,
1013	.splice_write	= generic_file_splice_write,
1014	.setlease	= gfs2_setlease,
1015	.fallocate	= gfs2_fallocate,
1016};
1017
1018const struct file_operations gfs2_dir_fops = {
1019	.readdir	= gfs2_readdir,
1020	.unlocked_ioctl	= gfs2_ioctl,
1021	.open		= gfs2_open,
1022	.release	= gfs2_release,
1023	.fsync		= gfs2_fsync,
1024	.lock		= gfs2_lock,
1025	.flock		= gfs2_flock,
1026	.llseek		= default_llseek,
1027};
1028
1029#endif /* CONFIG_GFS2_FS_LOCKING_DLM */
1030
1031const struct file_operations gfs2_file_fops_nolock = {
1032	.llseek		= gfs2_llseek,
1033	.read		= do_sync_read,
1034	.aio_read	= generic_file_aio_read,
1035	.write		= do_sync_write,
1036	.aio_write	= gfs2_file_aio_write,
1037	.unlocked_ioctl	= gfs2_ioctl,
1038	.mmap		= gfs2_mmap,
1039	.open		= gfs2_open,
1040	.release	= gfs2_release,
1041	.fsync		= gfs2_fsync,
1042	.splice_read	= generic_file_splice_read,
1043	.splice_write	= generic_file_splice_write,
1044	.setlease	= generic_setlease,
1045	.fallocate	= gfs2_fallocate,
1046};
1047
1048const struct file_operations gfs2_dir_fops_nolock = {
1049	.readdir	= gfs2_readdir,
1050	.unlocked_ioctl	= gfs2_ioctl,
1051	.open		= gfs2_open,
1052	.release	= gfs2_release,
1053	.fsync		= gfs2_fsync,
1054	.llseek		= default_llseek,
1055};
1056