Loading...
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/slab.h>
11#include <linux/spinlock.h>
12#include <linux/completion.h>
13#include <linux/buffer_head.h>
14#include <linux/pagemap.h>
15#include <linux/uio.h>
16#include <linux/blkdev.h>
17#include <linux/mm.h>
18#include <linux/mount.h>
19#include <linux/fs.h>
20#include <linux/gfs2_ondisk.h>
21#include <linux/ext2_fs.h>
22#include <linux/falloc.h>
23#include <linux/swap.h>
24#include <linux/crc32.h>
25#include <linux/writeback.h>
26#include <asm/uaccess.h>
27#include <linux/dlm.h>
28#include <linux/dlm_plock.h>
29
30#include "gfs2.h"
31#include "incore.h"
32#include "bmap.h"
33#include "dir.h"
34#include "glock.h"
35#include "glops.h"
36#include "inode.h"
37#include "log.h"
38#include "meta_io.h"
39#include "quota.h"
40#include "rgrp.h"
41#include "trans.h"
42#include "util.h"
43
44/**
45 * gfs2_llseek - seek to a location in a file
46 * @file: the file
47 * @offset: the offset
48 * @origin: Where to seek from (SEEK_SET, SEEK_CUR, or SEEK_END)
49 *
50 * SEEK_END requires the glock for the file because it references the
51 * file's size.
52 *
53 * Returns: The new offset, or errno
54 */
55
56static loff_t gfs2_llseek(struct file *file, loff_t offset, int origin)
57{
58 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
59 struct gfs2_holder i_gh;
60 loff_t error;
61
62 if (origin == 2) {
63 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
64 &i_gh);
65 if (!error) {
66 error = generic_file_llseek_unlocked(file, offset, origin);
67 gfs2_glock_dq_uninit(&i_gh);
68 }
69 } else
70 error = generic_file_llseek_unlocked(file, offset, origin);
71
72 return error;
73}
74
75/**
76 * gfs2_readdir - Read directory entries from a directory
77 * @file: The directory to read from
78 * @dirent: Buffer for dirents
79 * @filldir: Function used to do the copying
80 *
81 * Returns: errno
82 */
83
84static int gfs2_readdir(struct file *file, void *dirent, filldir_t filldir)
85{
86 struct inode *dir = file->f_mapping->host;
87 struct gfs2_inode *dip = GFS2_I(dir);
88 struct gfs2_holder d_gh;
89 u64 offset = file->f_pos;
90 int error;
91
92 gfs2_holder_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
93 error = gfs2_glock_nq(&d_gh);
94 if (error) {
95 gfs2_holder_uninit(&d_gh);
96 return error;
97 }
98
99 error = gfs2_dir_read(dir, &offset, dirent, filldir);
100
101 gfs2_glock_dq_uninit(&d_gh);
102
103 file->f_pos = offset;
104
105 return error;
106}
107
108/**
109 * fsflags_cvt
110 * @table: A table of 32 u32 flags
111 * @val: a 32 bit value to convert
112 *
113 * This function can be used to convert between fsflags values and
114 * GFS2's own flags values.
115 *
116 * Returns: the converted flags
117 */
118static u32 fsflags_cvt(const u32 *table, u32 val)
119{
120 u32 res = 0;
121 while(val) {
122 if (val & 1)
123 res |= *table;
124 table++;
125 val >>= 1;
126 }
127 return res;
128}
129
130static const u32 fsflags_to_gfs2[32] = {
131 [3] = GFS2_DIF_SYNC,
132 [4] = GFS2_DIF_IMMUTABLE,
133 [5] = GFS2_DIF_APPENDONLY,
134 [7] = GFS2_DIF_NOATIME,
135 [12] = GFS2_DIF_EXHASH,
136 [14] = GFS2_DIF_INHERIT_JDATA,
137};
138
139static const u32 gfs2_to_fsflags[32] = {
140 [gfs2fl_Sync] = FS_SYNC_FL,
141 [gfs2fl_Immutable] = FS_IMMUTABLE_FL,
142 [gfs2fl_AppendOnly] = FS_APPEND_FL,
143 [gfs2fl_NoAtime] = FS_NOATIME_FL,
144 [gfs2fl_ExHash] = FS_INDEX_FL,
145 [gfs2fl_InheritJdata] = FS_JOURNAL_DATA_FL,
146};
147
148static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
149{
150 struct inode *inode = filp->f_path.dentry->d_inode;
151 struct gfs2_inode *ip = GFS2_I(inode);
152 struct gfs2_holder gh;
153 int error;
154 u32 fsflags;
155
156 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
157 error = gfs2_glock_nq(&gh);
158 if (error)
159 return error;
160
161 fsflags = fsflags_cvt(gfs2_to_fsflags, ip->i_diskflags);
162 if (!S_ISDIR(inode->i_mode) && ip->i_diskflags & GFS2_DIF_JDATA)
163 fsflags |= FS_JOURNAL_DATA_FL;
164 if (put_user(fsflags, ptr))
165 error = -EFAULT;
166
167 gfs2_glock_dq(&gh);
168 gfs2_holder_uninit(&gh);
169 return error;
170}
171
172void gfs2_set_inode_flags(struct inode *inode)
173{
174 struct gfs2_inode *ip = GFS2_I(inode);
175 unsigned int flags = inode->i_flags;
176
177 flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_NOSEC);
178 if ((ip->i_eattr == 0) && !is_sxid(inode->i_mode))
179 inode->i_flags |= S_NOSEC;
180 if (ip->i_diskflags & GFS2_DIF_IMMUTABLE)
181 flags |= S_IMMUTABLE;
182 if (ip->i_diskflags & GFS2_DIF_APPENDONLY)
183 flags |= S_APPEND;
184 if (ip->i_diskflags & GFS2_DIF_NOATIME)
185 flags |= S_NOATIME;
186 if (ip->i_diskflags & GFS2_DIF_SYNC)
187 flags |= S_SYNC;
188 inode->i_flags = flags;
189}
190
191/* Flags that can be set by user space */
192#define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA| \
193 GFS2_DIF_IMMUTABLE| \
194 GFS2_DIF_APPENDONLY| \
195 GFS2_DIF_NOATIME| \
196 GFS2_DIF_SYNC| \
197 GFS2_DIF_SYSTEM| \
198 GFS2_DIF_INHERIT_JDATA)
199
200/**
201 * gfs2_set_flags - set flags on an inode
202 * @inode: The inode
203 * @flags: The flags to set
204 * @mask: Indicates which flags are valid
205 *
206 */
207static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
208{
209 struct inode *inode = filp->f_path.dentry->d_inode;
210 struct gfs2_inode *ip = GFS2_I(inode);
211 struct gfs2_sbd *sdp = GFS2_SB(inode);
212 struct buffer_head *bh;
213 struct gfs2_holder gh;
214 int error;
215 u32 new_flags, flags;
216
217 error = mnt_want_write(filp->f_path.mnt);
218 if (error)
219 return error;
220
221 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
222 if (error)
223 goto out_drop_write;
224
225 error = -EACCES;
226 if (!inode_owner_or_capable(inode))
227 goto out;
228
229 error = 0;
230 flags = ip->i_diskflags;
231 new_flags = (flags & ~mask) | (reqflags & mask);
232 if ((new_flags ^ flags) == 0)
233 goto out;
234
235 error = -EINVAL;
236 if ((new_flags ^ flags) & ~GFS2_FLAGS_USER_SET)
237 goto out;
238
239 error = -EPERM;
240 if (IS_IMMUTABLE(inode) && (new_flags & GFS2_DIF_IMMUTABLE))
241 goto out;
242 if (IS_APPEND(inode) && (new_flags & GFS2_DIF_APPENDONLY))
243 goto out;
244 if (((new_flags ^ flags) & GFS2_DIF_IMMUTABLE) &&
245 !capable(CAP_LINUX_IMMUTABLE))
246 goto out;
247 if (!IS_IMMUTABLE(inode)) {
248 error = gfs2_permission(inode, MAY_WRITE);
249 if (error)
250 goto out;
251 }
252 if ((flags ^ new_flags) & GFS2_DIF_JDATA) {
253 if (flags & GFS2_DIF_JDATA)
254 gfs2_log_flush(sdp, ip->i_gl);
255 error = filemap_fdatawrite(inode->i_mapping);
256 if (error)
257 goto out;
258 error = filemap_fdatawait(inode->i_mapping);
259 if (error)
260 goto out;
261 }
262 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
263 if (error)
264 goto out;
265 error = gfs2_meta_inode_buffer(ip, &bh);
266 if (error)
267 goto out_trans_end;
268 gfs2_trans_add_bh(ip->i_gl, bh, 1);
269 ip->i_diskflags = new_flags;
270 gfs2_dinode_out(ip, bh->b_data);
271 brelse(bh);
272 gfs2_set_inode_flags(inode);
273 gfs2_set_aops(inode);
274out_trans_end:
275 gfs2_trans_end(sdp);
276out:
277 gfs2_glock_dq_uninit(&gh);
278out_drop_write:
279 mnt_drop_write(filp->f_path.mnt);
280 return error;
281}
282
283static int gfs2_set_flags(struct file *filp, u32 __user *ptr)
284{
285 struct inode *inode = filp->f_path.dentry->d_inode;
286 u32 fsflags, gfsflags;
287
288 if (get_user(fsflags, ptr))
289 return -EFAULT;
290
291 gfsflags = fsflags_cvt(fsflags_to_gfs2, fsflags);
292 if (!S_ISDIR(inode->i_mode)) {
293 if (gfsflags & GFS2_DIF_INHERIT_JDATA)
294 gfsflags ^= (GFS2_DIF_JDATA | GFS2_DIF_INHERIT_JDATA);
295 return do_gfs2_set_flags(filp, gfsflags, ~0);
296 }
297 return do_gfs2_set_flags(filp, gfsflags, ~GFS2_DIF_JDATA);
298}
299
300static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
301{
302 switch(cmd) {
303 case FS_IOC_GETFLAGS:
304 return gfs2_get_flags(filp, (u32 __user *)arg);
305 case FS_IOC_SETFLAGS:
306 return gfs2_set_flags(filp, (u32 __user *)arg);
307 }
308 return -ENOTTY;
309}
310
311/**
312 * gfs2_allocate_page_backing - Use bmap to allocate blocks
313 * @page: The (locked) page to allocate backing for
314 *
315 * We try to allocate all the blocks required for the page in
316 * one go. This might fail for various reasons, so we keep
317 * trying until all the blocks to back this page are allocated.
318 * If some of the blocks are already allocated, thats ok too.
319 */
320
321static int gfs2_allocate_page_backing(struct page *page)
322{
323 struct inode *inode = page->mapping->host;
324 struct buffer_head bh;
325 unsigned long size = PAGE_CACHE_SIZE;
326 u64 lblock = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
327
328 do {
329 bh.b_state = 0;
330 bh.b_size = size;
331 gfs2_block_map(inode, lblock, &bh, 1);
332 if (!buffer_mapped(&bh))
333 return -EIO;
334 size -= bh.b_size;
335 lblock += (bh.b_size >> inode->i_blkbits);
336 } while(size > 0);
337 return 0;
338}
339
340/**
341 * gfs2_page_mkwrite - Make a shared, mmap()ed, page writable
342 * @vma: The virtual memory area
343 * @page: The page which is about to become writable
344 *
345 * When the page becomes writable, we need to ensure that we have
346 * blocks allocated on disk to back that page.
347 */
348
349static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
350{
351 struct page *page = vmf->page;
352 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
353 struct gfs2_inode *ip = GFS2_I(inode);
354 struct gfs2_sbd *sdp = GFS2_SB(inode);
355 unsigned long last_index;
356 u64 pos = page->index << PAGE_CACHE_SHIFT;
357 unsigned int data_blocks, ind_blocks, rblocks;
358 struct gfs2_holder gh;
359 struct gfs2_alloc *al;
360 int ret;
361
362 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
363 ret = gfs2_glock_nq(&gh);
364 if (ret)
365 goto out;
366
367 set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
368 set_bit(GIF_SW_PAGED, &ip->i_flags);
369
370 if (!gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE))
371 goto out_unlock;
372 ret = -ENOMEM;
373 al = gfs2_alloc_get(ip);
374 if (al == NULL)
375 goto out_unlock;
376
377 ret = gfs2_quota_lock_check(ip);
378 if (ret)
379 goto out_alloc_put;
380 gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks);
381 al->al_requested = data_blocks + ind_blocks;
382 ret = gfs2_inplace_reserve(ip);
383 if (ret)
384 goto out_quota_unlock;
385
386 rblocks = RES_DINODE + ind_blocks;
387 if (gfs2_is_jdata(ip))
388 rblocks += data_blocks ? data_blocks : 1;
389 if (ind_blocks || data_blocks) {
390 rblocks += RES_STATFS + RES_QUOTA;
391 rblocks += gfs2_rg_blocks(al);
392 }
393 ret = gfs2_trans_begin(sdp, rblocks, 0);
394 if (ret)
395 goto out_trans_fail;
396
397 lock_page(page);
398 ret = -EINVAL;
399 last_index = ip->i_inode.i_size >> PAGE_CACHE_SHIFT;
400 if (page->index > last_index)
401 goto out_unlock_page;
402 ret = 0;
403 if (!PageUptodate(page) || page->mapping != ip->i_inode.i_mapping)
404 goto out_unlock_page;
405 if (gfs2_is_stuffed(ip)) {
406 ret = gfs2_unstuff_dinode(ip, page);
407 if (ret)
408 goto out_unlock_page;
409 }
410 ret = gfs2_allocate_page_backing(page);
411
412out_unlock_page:
413 unlock_page(page);
414 gfs2_trans_end(sdp);
415out_trans_fail:
416 gfs2_inplace_release(ip);
417out_quota_unlock:
418 gfs2_quota_unlock(ip);
419out_alloc_put:
420 gfs2_alloc_put(ip);
421out_unlock:
422 gfs2_glock_dq(&gh);
423out:
424 gfs2_holder_uninit(&gh);
425 if (ret == -ENOMEM)
426 ret = VM_FAULT_OOM;
427 else if (ret)
428 ret = VM_FAULT_SIGBUS;
429 return ret;
430}
431
432static const struct vm_operations_struct gfs2_vm_ops = {
433 .fault = filemap_fault,
434 .page_mkwrite = gfs2_page_mkwrite,
435};
436
437/**
438 * gfs2_mmap -
439 * @file: The file to map
440 * @vma: The VMA which described the mapping
441 *
442 * There is no need to get a lock here unless we should be updating
443 * atime. We ignore any locking errors since the only consequence is
444 * a missed atime update (which will just be deferred until later).
445 *
446 * Returns: 0
447 */
448
449static int gfs2_mmap(struct file *file, struct vm_area_struct *vma)
450{
451 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
452
453 if (!(file->f_flags & O_NOATIME) &&
454 !IS_NOATIME(&ip->i_inode)) {
455 struct gfs2_holder i_gh;
456 int error;
457
458 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
459 error = gfs2_glock_nq(&i_gh);
460 if (error == 0) {
461 file_accessed(file);
462 gfs2_glock_dq(&i_gh);
463 }
464 gfs2_holder_uninit(&i_gh);
465 if (error)
466 return error;
467 }
468 vma->vm_ops = &gfs2_vm_ops;
469 vma->vm_flags |= VM_CAN_NONLINEAR;
470
471 return 0;
472}
473
474/**
475 * gfs2_open - open a file
476 * @inode: the inode to open
477 * @file: the struct file for this opening
478 *
479 * Returns: errno
480 */
481
482static int gfs2_open(struct inode *inode, struct file *file)
483{
484 struct gfs2_inode *ip = GFS2_I(inode);
485 struct gfs2_holder i_gh;
486 struct gfs2_file *fp;
487 int error;
488
489 fp = kzalloc(sizeof(struct gfs2_file), GFP_KERNEL);
490 if (!fp)
491 return -ENOMEM;
492
493 mutex_init(&fp->f_fl_mutex);
494
495 gfs2_assert_warn(GFS2_SB(inode), !file->private_data);
496 file->private_data = fp;
497
498 if (S_ISREG(ip->i_inode.i_mode)) {
499 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
500 &i_gh);
501 if (error)
502 goto fail;
503
504 if (!(file->f_flags & O_LARGEFILE) &&
505 i_size_read(inode) > MAX_NON_LFS) {
506 error = -EOVERFLOW;
507 goto fail_gunlock;
508 }
509
510 gfs2_glock_dq_uninit(&i_gh);
511 }
512
513 return 0;
514
515fail_gunlock:
516 gfs2_glock_dq_uninit(&i_gh);
517fail:
518 file->private_data = NULL;
519 kfree(fp);
520 return error;
521}
522
523/**
524 * gfs2_close - called to close a struct file
525 * @inode: the inode the struct file belongs to
526 * @file: the struct file being closed
527 *
528 * Returns: errno
529 */
530
531static int gfs2_close(struct inode *inode, struct file *file)
532{
533 struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
534 struct gfs2_file *fp;
535
536 fp = file->private_data;
537 file->private_data = NULL;
538
539 if (gfs2_assert_warn(sdp, fp))
540 return -EIO;
541
542 kfree(fp);
543
544 return 0;
545}
546
547/**
548 * gfs2_fsync - sync the dirty data for a file (across the cluster)
549 * @file: the file that points to the dentry
550 * @start: the start position in the file to sync
551 * @end: the end position in the file to sync
552 * @datasync: set if we can ignore timestamp changes
553 *
554 * The VFS will flush data for us. We only need to worry
555 * about metadata here.
556 *
557 * Returns: errno
558 */
559
560static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
561 int datasync)
562{
563 struct inode *inode = file->f_mapping->host;
564 int sync_state = inode->i_state & (I_DIRTY_SYNC|I_DIRTY_DATASYNC);
565 struct gfs2_inode *ip = GFS2_I(inode);
566 int ret;
567
568 ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
569 if (ret)
570 return ret;
571 mutex_lock(&inode->i_mutex);
572
573 if (datasync)
574 sync_state &= ~I_DIRTY_SYNC;
575
576 if (sync_state) {
577 ret = sync_inode_metadata(inode, 1);
578 if (ret) {
579 mutex_unlock(&inode->i_mutex);
580 return ret;
581 }
582 gfs2_ail_flush(ip->i_gl);
583 }
584
585 mutex_unlock(&inode->i_mutex);
586 return 0;
587}
588
589/**
590 * gfs2_file_aio_write - Perform a write to a file
591 * @iocb: The io context
592 * @iov: The data to write
593 * @nr_segs: Number of @iov segments
594 * @pos: The file position
595 *
596 * We have to do a lock/unlock here to refresh the inode size for
597 * O_APPEND writes, otherwise we can land up writing at the wrong
598 * offset. There is still a race, but provided the app is using its
599 * own file locking, this will make O_APPEND work as expected.
600 *
601 */
602
603static ssize_t gfs2_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
604 unsigned long nr_segs, loff_t pos)
605{
606 struct file *file = iocb->ki_filp;
607
608 if (file->f_flags & O_APPEND) {
609 struct dentry *dentry = file->f_dentry;
610 struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
611 struct gfs2_holder gh;
612 int ret;
613
614 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
615 if (ret)
616 return ret;
617 gfs2_glock_dq_uninit(&gh);
618 }
619
620 return generic_file_aio_write(iocb, iov, nr_segs, pos);
621}
622
623static int empty_write_end(struct page *page, unsigned from,
624 unsigned to, int mode)
625{
626 struct inode *inode = page->mapping->host;
627 struct gfs2_inode *ip = GFS2_I(inode);
628 struct buffer_head *bh;
629 unsigned offset, blksize = 1 << inode->i_blkbits;
630 pgoff_t end_index = i_size_read(inode) >> PAGE_CACHE_SHIFT;
631
632 zero_user(page, from, to-from);
633 mark_page_accessed(page);
634
635 if (page->index < end_index || !(mode & FALLOC_FL_KEEP_SIZE)) {
636 if (!gfs2_is_writeback(ip))
637 gfs2_page_add_databufs(ip, page, from, to);
638
639 block_commit_write(page, from, to);
640 return 0;
641 }
642
643 offset = 0;
644 bh = page_buffers(page);
645 while (offset < to) {
646 if (offset >= from) {
647 set_buffer_uptodate(bh);
648 mark_buffer_dirty(bh);
649 clear_buffer_new(bh);
650 write_dirty_buffer(bh, WRITE);
651 }
652 offset += blksize;
653 bh = bh->b_this_page;
654 }
655
656 offset = 0;
657 bh = page_buffers(page);
658 while (offset < to) {
659 if (offset >= from) {
660 wait_on_buffer(bh);
661 if (!buffer_uptodate(bh))
662 return -EIO;
663 }
664 offset += blksize;
665 bh = bh->b_this_page;
666 }
667 return 0;
668}
669
670static int needs_empty_write(sector_t block, struct inode *inode)
671{
672 int error;
673 struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
674
675 bh_map.b_size = 1 << inode->i_blkbits;
676 error = gfs2_block_map(inode, block, &bh_map, 0);
677 if (unlikely(error))
678 return error;
679 return !buffer_mapped(&bh_map);
680}
681
682static int write_empty_blocks(struct page *page, unsigned from, unsigned to,
683 int mode)
684{
685 struct inode *inode = page->mapping->host;
686 unsigned start, end, next, blksize;
687 sector_t block = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
688 int ret;
689
690 blksize = 1 << inode->i_blkbits;
691 next = end = 0;
692 while (next < from) {
693 next += blksize;
694 block++;
695 }
696 start = next;
697 do {
698 next += blksize;
699 ret = needs_empty_write(block, inode);
700 if (unlikely(ret < 0))
701 return ret;
702 if (ret == 0) {
703 if (end) {
704 ret = __block_write_begin(page, start, end - start,
705 gfs2_block_map);
706 if (unlikely(ret))
707 return ret;
708 ret = empty_write_end(page, start, end, mode);
709 if (unlikely(ret))
710 return ret;
711 end = 0;
712 }
713 start = next;
714 }
715 else
716 end = next;
717 block++;
718 } while (next < to);
719
720 if (end) {
721 ret = __block_write_begin(page, start, end - start, gfs2_block_map);
722 if (unlikely(ret))
723 return ret;
724 ret = empty_write_end(page, start, end, mode);
725 if (unlikely(ret))
726 return ret;
727 }
728
729 return 0;
730}
731
732static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
733 int mode)
734{
735 struct gfs2_inode *ip = GFS2_I(inode);
736 struct buffer_head *dibh;
737 int error;
738 u64 start = offset >> PAGE_CACHE_SHIFT;
739 unsigned int start_offset = offset & ~PAGE_CACHE_MASK;
740 u64 end = (offset + len - 1) >> PAGE_CACHE_SHIFT;
741 pgoff_t curr;
742 struct page *page;
743 unsigned int end_offset = (offset + len) & ~PAGE_CACHE_MASK;
744 unsigned int from, to;
745
746 if (!end_offset)
747 end_offset = PAGE_CACHE_SIZE;
748
749 error = gfs2_meta_inode_buffer(ip, &dibh);
750 if (unlikely(error))
751 goto out;
752
753 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
754
755 if (gfs2_is_stuffed(ip)) {
756 error = gfs2_unstuff_dinode(ip, NULL);
757 if (unlikely(error))
758 goto out;
759 }
760
761 curr = start;
762 offset = start << PAGE_CACHE_SHIFT;
763 from = start_offset;
764 to = PAGE_CACHE_SIZE;
765 while (curr <= end) {
766 page = grab_cache_page_write_begin(inode->i_mapping, curr,
767 AOP_FLAG_NOFS);
768 if (unlikely(!page)) {
769 error = -ENOMEM;
770 goto out;
771 }
772
773 if (curr == end)
774 to = end_offset;
775 error = write_empty_blocks(page, from, to, mode);
776 if (!error && offset + to > inode->i_size &&
777 !(mode & FALLOC_FL_KEEP_SIZE)) {
778 i_size_write(inode, offset + to);
779 }
780 unlock_page(page);
781 page_cache_release(page);
782 if (error)
783 goto out;
784 curr++;
785 offset += PAGE_CACHE_SIZE;
786 from = 0;
787 }
788
789 gfs2_dinode_out(ip, dibh->b_data);
790 mark_inode_dirty(inode);
791
792 brelse(dibh);
793
794out:
795 return error;
796}
797
798static void calc_max_reserv(struct gfs2_inode *ip, loff_t max, loff_t *len,
799 unsigned int *data_blocks, unsigned int *ind_blocks)
800{
801 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
802 unsigned int max_blocks = ip->i_alloc->al_rgd->rd_free_clone;
803 unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1);
804
805 for (tmp = max_data; tmp > sdp->sd_diptrs;) {
806 tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
807 max_data -= tmp;
808 }
809 /* This calculation isn't the exact reverse of gfs2_write_calc_reserve,
810 so it might end up with fewer data blocks */
811 if (max_data <= *data_blocks)
812 return;
813 *data_blocks = max_data;
814 *ind_blocks = max_blocks - max_data;
815 *len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift;
816 if (*len > max) {
817 *len = max;
818 gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks);
819 }
820}
821
822static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
823 loff_t len)
824{
825 struct inode *inode = file->f_path.dentry->d_inode;
826 struct gfs2_sbd *sdp = GFS2_SB(inode);
827 struct gfs2_inode *ip = GFS2_I(inode);
828 unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
829 loff_t bytes, max_bytes;
830 struct gfs2_alloc *al;
831 int error;
832 loff_t bsize_mask = ~((loff_t)sdp->sd_sb.sb_bsize - 1);
833 loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
834 next = (next + 1) << sdp->sd_sb.sb_bsize_shift;
835
836 /* We only support the FALLOC_FL_KEEP_SIZE mode */
837 if (mode & ~FALLOC_FL_KEEP_SIZE)
838 return -EOPNOTSUPP;
839
840 offset &= bsize_mask;
841
842 len = next - offset;
843 bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2;
844 if (!bytes)
845 bytes = UINT_MAX;
846 bytes &= bsize_mask;
847 if (bytes == 0)
848 bytes = sdp->sd_sb.sb_bsize;
849
850 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
851 error = gfs2_glock_nq(&ip->i_gh);
852 if (unlikely(error))
853 goto out_uninit;
854
855 if (!gfs2_write_alloc_required(ip, offset, len))
856 goto out_unlock;
857
858 while (len > 0) {
859 if (len < bytes)
860 bytes = len;
861 al = gfs2_alloc_get(ip);
862 if (!al) {
863 error = -ENOMEM;
864 goto out_unlock;
865 }
866
867 error = gfs2_quota_lock_check(ip);
868 if (error)
869 goto out_alloc_put;
870
871retry:
872 gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
873
874 al->al_requested = data_blocks + ind_blocks;
875 error = gfs2_inplace_reserve(ip);
876 if (error) {
877 if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) {
878 bytes >>= 1;
879 bytes &= bsize_mask;
880 if (bytes == 0)
881 bytes = sdp->sd_sb.sb_bsize;
882 goto retry;
883 }
884 goto out_qunlock;
885 }
886 max_bytes = bytes;
887 calc_max_reserv(ip, len, &max_bytes, &data_blocks, &ind_blocks);
888 al->al_requested = data_blocks + ind_blocks;
889
890 rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
891 RES_RG_HDR + gfs2_rg_blocks(al);
892 if (gfs2_is_jdata(ip))
893 rblocks += data_blocks ? data_blocks : 1;
894
895 error = gfs2_trans_begin(sdp, rblocks,
896 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
897 if (error)
898 goto out_trans_fail;
899
900 error = fallocate_chunk(inode, offset, max_bytes, mode);
901 gfs2_trans_end(sdp);
902
903 if (error)
904 goto out_trans_fail;
905
906 len -= max_bytes;
907 offset += max_bytes;
908 gfs2_inplace_release(ip);
909 gfs2_quota_unlock(ip);
910 gfs2_alloc_put(ip);
911 }
912 goto out_unlock;
913
914out_trans_fail:
915 gfs2_inplace_release(ip);
916out_qunlock:
917 gfs2_quota_unlock(ip);
918out_alloc_put:
919 gfs2_alloc_put(ip);
920out_unlock:
921 gfs2_glock_dq(&ip->i_gh);
922out_uninit:
923 gfs2_holder_uninit(&ip->i_gh);
924 return error;
925}
926
927#ifdef CONFIG_GFS2_FS_LOCKING_DLM
928
929/**
930 * gfs2_setlease - acquire/release a file lease
931 * @file: the file pointer
932 * @arg: lease type
933 * @fl: file lock
934 *
935 * We don't currently have a way to enforce a lease across the whole
936 * cluster; until we do, disable leases (by just returning -EINVAL),
937 * unless the administrator has requested purely local locking.
938 *
939 * Locking: called under lock_flocks
940 *
941 * Returns: errno
942 */
943
944static int gfs2_setlease(struct file *file, long arg, struct file_lock **fl)
945{
946 return -EINVAL;
947}
948
949/**
950 * gfs2_lock - acquire/release a posix lock on a file
951 * @file: the file pointer
952 * @cmd: either modify or retrieve lock state, possibly wait
953 * @fl: type and range of lock
954 *
955 * Returns: errno
956 */
957
958static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
959{
960 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
961 struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host);
962 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
963
964 if (!(fl->fl_flags & FL_POSIX))
965 return -ENOLCK;
966 if (__mandatory_lock(&ip->i_inode) && fl->fl_type != F_UNLCK)
967 return -ENOLCK;
968
969 if (cmd == F_CANCELLK) {
970 /* Hack: */
971 cmd = F_SETLK;
972 fl->fl_type = F_UNLCK;
973 }
974 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
975 return -EIO;
976 if (IS_GETLK(cmd))
977 return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl);
978 else if (fl->fl_type == F_UNLCK)
979 return dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl);
980 else
981 return dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl);
982}
983
984static int do_flock(struct file *file, int cmd, struct file_lock *fl)
985{
986 struct gfs2_file *fp = file->private_data;
987 struct gfs2_holder *fl_gh = &fp->f_fl_gh;
988 struct gfs2_inode *ip = GFS2_I(file->f_path.dentry->d_inode);
989 struct gfs2_glock *gl;
990 unsigned int state;
991 int flags;
992 int error = 0;
993
994 state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
995 flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE;
996
997 mutex_lock(&fp->f_fl_mutex);
998
999 gl = fl_gh->gh_gl;
1000 if (gl) {
1001 if (fl_gh->gh_state == state)
1002 goto out;
1003 flock_lock_file_wait(file,
1004 &(struct file_lock){.fl_type = F_UNLCK});
1005 gfs2_glock_dq_wait(fl_gh);
1006 gfs2_holder_reinit(state, flags, fl_gh);
1007 } else {
1008 error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr,
1009 &gfs2_flock_glops, CREATE, &gl);
1010 if (error)
1011 goto out;
1012 gfs2_holder_init(gl, state, flags, fl_gh);
1013 gfs2_glock_put(gl);
1014 }
1015 error = gfs2_glock_nq(fl_gh);
1016 if (error) {
1017 gfs2_holder_uninit(fl_gh);
1018 if (error == GLR_TRYFAILED)
1019 error = -EAGAIN;
1020 } else {
1021 error = flock_lock_file_wait(file, fl);
1022 gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
1023 }
1024
1025out:
1026 mutex_unlock(&fp->f_fl_mutex);
1027 return error;
1028}
1029
1030static void do_unflock(struct file *file, struct file_lock *fl)
1031{
1032 struct gfs2_file *fp = file->private_data;
1033 struct gfs2_holder *fl_gh = &fp->f_fl_gh;
1034
1035 mutex_lock(&fp->f_fl_mutex);
1036 flock_lock_file_wait(file, fl);
1037 if (fl_gh->gh_gl) {
1038 gfs2_glock_dq_wait(fl_gh);
1039 gfs2_holder_uninit(fl_gh);
1040 }
1041 mutex_unlock(&fp->f_fl_mutex);
1042}
1043
1044/**
1045 * gfs2_flock - acquire/release a flock lock on a file
1046 * @file: the file pointer
1047 * @cmd: either modify or retrieve lock state, possibly wait
1048 * @fl: type and range of lock
1049 *
1050 * Returns: errno
1051 */
1052
1053static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
1054{
1055 if (!(fl->fl_flags & FL_FLOCK))
1056 return -ENOLCK;
1057 if (fl->fl_type & LOCK_MAND)
1058 return -EOPNOTSUPP;
1059
1060 if (fl->fl_type == F_UNLCK) {
1061 do_unflock(file, fl);
1062 return 0;
1063 } else {
1064 return do_flock(file, cmd, fl);
1065 }
1066}
1067
1068const struct file_operations gfs2_file_fops = {
1069 .llseek = gfs2_llseek,
1070 .read = do_sync_read,
1071 .aio_read = generic_file_aio_read,
1072 .write = do_sync_write,
1073 .aio_write = gfs2_file_aio_write,
1074 .unlocked_ioctl = gfs2_ioctl,
1075 .mmap = gfs2_mmap,
1076 .open = gfs2_open,
1077 .release = gfs2_close,
1078 .fsync = gfs2_fsync,
1079 .lock = gfs2_lock,
1080 .flock = gfs2_flock,
1081 .splice_read = generic_file_splice_read,
1082 .splice_write = generic_file_splice_write,
1083 .setlease = gfs2_setlease,
1084 .fallocate = gfs2_fallocate,
1085};
1086
1087const struct file_operations gfs2_dir_fops = {
1088 .readdir = gfs2_readdir,
1089 .unlocked_ioctl = gfs2_ioctl,
1090 .open = gfs2_open,
1091 .release = gfs2_close,
1092 .fsync = gfs2_fsync,
1093 .lock = gfs2_lock,
1094 .flock = gfs2_flock,
1095 .llseek = default_llseek,
1096};
1097
1098#endif /* CONFIG_GFS2_FS_LOCKING_DLM */
1099
1100const struct file_operations gfs2_file_fops_nolock = {
1101 .llseek = gfs2_llseek,
1102 .read = do_sync_read,
1103 .aio_read = generic_file_aio_read,
1104 .write = do_sync_write,
1105 .aio_write = gfs2_file_aio_write,
1106 .unlocked_ioctl = gfs2_ioctl,
1107 .mmap = gfs2_mmap,
1108 .open = gfs2_open,
1109 .release = gfs2_close,
1110 .fsync = gfs2_fsync,
1111 .splice_read = generic_file_splice_read,
1112 .splice_write = generic_file_splice_write,
1113 .setlease = generic_setlease,
1114 .fallocate = gfs2_fallocate,
1115};
1116
1117const struct file_operations gfs2_dir_fops_nolock = {
1118 .readdir = gfs2_readdir,
1119 .unlocked_ioctl = gfs2_ioctl,
1120 .open = gfs2_open,
1121 .release = gfs2_close,
1122 .fsync = gfs2_fsync,
1123 .llseek = default_llseek,
1124};
1125
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/slab.h>
11#include <linux/spinlock.h>
12#include <linux/completion.h>
13#include <linux/buffer_head.h>
14#include <linux/pagemap.h>
15#include <linux/uio.h>
16#include <linux/blkdev.h>
17#include <linux/mm.h>
18#include <linux/mount.h>
19#include <linux/fs.h>
20#include <linux/gfs2_ondisk.h>
21#include <linux/falloc.h>
22#include <linux/swap.h>
23#include <linux/crc32.h>
24#include <linux/writeback.h>
25#include <asm/uaccess.h>
26#include <linux/dlm.h>
27#include <linux/dlm_plock.h>
28#include <linux/delay.h>
29
30#include "gfs2.h"
31#include "incore.h"
32#include "bmap.h"
33#include "dir.h"
34#include "glock.h"
35#include "glops.h"
36#include "inode.h"
37#include "log.h"
38#include "meta_io.h"
39#include "quota.h"
40#include "rgrp.h"
41#include "trans.h"
42#include "util.h"
43
44/**
45 * gfs2_llseek - seek to a location in a file
46 * @file: the file
47 * @offset: the offset
48 * @whence: Where to seek from (SEEK_SET, SEEK_CUR, or SEEK_END)
49 *
50 * SEEK_END requires the glock for the file because it references the
51 * file's size.
52 *
53 * Returns: The new offset, or errno
54 */
55
56static loff_t gfs2_llseek(struct file *file, loff_t offset, int whence)
57{
58 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
59 struct gfs2_holder i_gh;
60 loff_t error;
61
62 switch (whence) {
63 case SEEK_END: /* These reference inode->i_size */
64 case SEEK_DATA:
65 case SEEK_HOLE:
66 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
67 &i_gh);
68 if (!error) {
69 error = generic_file_llseek(file, offset, whence);
70 gfs2_glock_dq_uninit(&i_gh);
71 }
72 break;
73 case SEEK_CUR:
74 case SEEK_SET:
75 error = generic_file_llseek(file, offset, whence);
76 break;
77 default:
78 error = -EINVAL;
79 }
80
81 return error;
82}
83
84/**
85 * gfs2_readdir - Iterator for a directory
86 * @file: The directory to read from
87 * @ctx: What to feed directory entries to
88 *
89 * Returns: errno
90 */
91
92static int gfs2_readdir(struct file *file, struct dir_context *ctx)
93{
94 struct inode *dir = file->f_mapping->host;
95 struct gfs2_inode *dip = GFS2_I(dir);
96 struct gfs2_holder d_gh;
97 int error;
98
99 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
100 if (error)
101 return error;
102
103 error = gfs2_dir_read(dir, ctx, &file->f_ra);
104
105 gfs2_glock_dq_uninit(&d_gh);
106
107 return error;
108}
109
110/**
111 * fsflags_cvt
112 * @table: A table of 32 u32 flags
113 * @val: a 32 bit value to convert
114 *
115 * This function can be used to convert between fsflags values and
116 * GFS2's own flags values.
117 *
118 * Returns: the converted flags
119 */
120static u32 fsflags_cvt(const u32 *table, u32 val)
121{
122 u32 res = 0;
123 while(val) {
124 if (val & 1)
125 res |= *table;
126 table++;
127 val >>= 1;
128 }
129 return res;
130}
131
132static const u32 fsflags_to_gfs2[32] = {
133 [3] = GFS2_DIF_SYNC,
134 [4] = GFS2_DIF_IMMUTABLE,
135 [5] = GFS2_DIF_APPENDONLY,
136 [7] = GFS2_DIF_NOATIME,
137 [12] = GFS2_DIF_EXHASH,
138 [14] = GFS2_DIF_INHERIT_JDATA,
139 [17] = GFS2_DIF_TOPDIR,
140};
141
142static const u32 gfs2_to_fsflags[32] = {
143 [gfs2fl_Sync] = FS_SYNC_FL,
144 [gfs2fl_Immutable] = FS_IMMUTABLE_FL,
145 [gfs2fl_AppendOnly] = FS_APPEND_FL,
146 [gfs2fl_NoAtime] = FS_NOATIME_FL,
147 [gfs2fl_ExHash] = FS_INDEX_FL,
148 [gfs2fl_TopLevel] = FS_TOPDIR_FL,
149 [gfs2fl_InheritJdata] = FS_JOURNAL_DATA_FL,
150};
151
152static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
153{
154 struct inode *inode = file_inode(filp);
155 struct gfs2_inode *ip = GFS2_I(inode);
156 struct gfs2_holder gh;
157 int error;
158 u32 fsflags;
159
160 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
161 error = gfs2_glock_nq(&gh);
162 if (error)
163 return error;
164
165 fsflags = fsflags_cvt(gfs2_to_fsflags, ip->i_diskflags);
166 if (!S_ISDIR(inode->i_mode) && ip->i_diskflags & GFS2_DIF_JDATA)
167 fsflags |= FS_JOURNAL_DATA_FL;
168 if (put_user(fsflags, ptr))
169 error = -EFAULT;
170
171 gfs2_glock_dq(&gh);
172 gfs2_holder_uninit(&gh);
173 return error;
174}
175
176void gfs2_set_inode_flags(struct inode *inode)
177{
178 struct gfs2_inode *ip = GFS2_I(inode);
179 unsigned int flags = inode->i_flags;
180
181 flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_NOSEC);
182 if ((ip->i_eattr == 0) && !is_sxid(inode->i_mode))
183 flags |= S_NOSEC;
184 if (ip->i_diskflags & GFS2_DIF_IMMUTABLE)
185 flags |= S_IMMUTABLE;
186 if (ip->i_diskflags & GFS2_DIF_APPENDONLY)
187 flags |= S_APPEND;
188 if (ip->i_diskflags & GFS2_DIF_NOATIME)
189 flags |= S_NOATIME;
190 if (ip->i_diskflags & GFS2_DIF_SYNC)
191 flags |= S_SYNC;
192 inode->i_flags = flags;
193}
194
195/* Flags that can be set by user space */
196#define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA| \
197 GFS2_DIF_IMMUTABLE| \
198 GFS2_DIF_APPENDONLY| \
199 GFS2_DIF_NOATIME| \
200 GFS2_DIF_SYNC| \
201 GFS2_DIF_SYSTEM| \
202 GFS2_DIF_TOPDIR| \
203 GFS2_DIF_INHERIT_JDATA)
204
205/**
206 * do_gfs2_set_flags - set flags on an inode
207 * @filp: file pointer
208 * @reqflags: The flags to set
209 * @mask: Indicates which flags are valid
210 *
211 */
212static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
213{
214 struct inode *inode = file_inode(filp);
215 struct gfs2_inode *ip = GFS2_I(inode);
216 struct gfs2_sbd *sdp = GFS2_SB(inode);
217 struct buffer_head *bh;
218 struct gfs2_holder gh;
219 int error;
220 u32 new_flags, flags;
221
222 error = mnt_want_write_file(filp);
223 if (error)
224 return error;
225
226 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
227 if (error)
228 goto out_drop_write;
229
230 error = -EACCES;
231 if (!inode_owner_or_capable(inode))
232 goto out;
233
234 error = 0;
235 flags = ip->i_diskflags;
236 new_flags = (flags & ~mask) | (reqflags & mask);
237 if ((new_flags ^ flags) == 0)
238 goto out;
239
240 error = -EINVAL;
241 if ((new_flags ^ flags) & ~GFS2_FLAGS_USER_SET)
242 goto out;
243
244 error = -EPERM;
245 if (IS_IMMUTABLE(inode) && (new_flags & GFS2_DIF_IMMUTABLE))
246 goto out;
247 if (IS_APPEND(inode) && (new_flags & GFS2_DIF_APPENDONLY))
248 goto out;
249 if (((new_flags ^ flags) & GFS2_DIF_IMMUTABLE) &&
250 !capable(CAP_LINUX_IMMUTABLE))
251 goto out;
252 if (!IS_IMMUTABLE(inode)) {
253 error = gfs2_permission(inode, MAY_WRITE);
254 if (error)
255 goto out;
256 }
257 if ((flags ^ new_flags) & GFS2_DIF_JDATA) {
258 if (flags & GFS2_DIF_JDATA)
259 gfs2_log_flush(sdp, ip->i_gl, NORMAL_FLUSH);
260 error = filemap_fdatawrite(inode->i_mapping);
261 if (error)
262 goto out;
263 error = filemap_fdatawait(inode->i_mapping);
264 if (error)
265 goto out;
266 }
267 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
268 if (error)
269 goto out;
270 error = gfs2_meta_inode_buffer(ip, &bh);
271 if (error)
272 goto out_trans_end;
273 gfs2_trans_add_meta(ip->i_gl, bh);
274 ip->i_diskflags = new_flags;
275 gfs2_dinode_out(ip, bh->b_data);
276 brelse(bh);
277 gfs2_set_inode_flags(inode);
278 gfs2_set_aops(inode);
279out_trans_end:
280 gfs2_trans_end(sdp);
281out:
282 gfs2_glock_dq_uninit(&gh);
283out_drop_write:
284 mnt_drop_write_file(filp);
285 return error;
286}
287
288static int gfs2_set_flags(struct file *filp, u32 __user *ptr)
289{
290 struct inode *inode = file_inode(filp);
291 u32 fsflags, gfsflags;
292
293 if (get_user(fsflags, ptr))
294 return -EFAULT;
295
296 gfsflags = fsflags_cvt(fsflags_to_gfs2, fsflags);
297 if (!S_ISDIR(inode->i_mode)) {
298 gfsflags &= ~GFS2_DIF_TOPDIR;
299 if (gfsflags & GFS2_DIF_INHERIT_JDATA)
300 gfsflags ^= (GFS2_DIF_JDATA | GFS2_DIF_INHERIT_JDATA);
301 return do_gfs2_set_flags(filp, gfsflags, ~GFS2_DIF_SYSTEM);
302 }
303 return do_gfs2_set_flags(filp, gfsflags, ~(GFS2_DIF_SYSTEM | GFS2_DIF_JDATA));
304}
305
306static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
307{
308 switch(cmd) {
309 case FS_IOC_GETFLAGS:
310 return gfs2_get_flags(filp, (u32 __user *)arg);
311 case FS_IOC_SETFLAGS:
312 return gfs2_set_flags(filp, (u32 __user *)arg);
313 case FITRIM:
314 return gfs2_fitrim(filp, (void __user *)arg);
315 }
316 return -ENOTTY;
317}
318
319/**
320 * gfs2_size_hint - Give a hint to the size of a write request
321 * @filep: The struct file
322 * @offset: The file offset of the write
323 * @size: The length of the write
324 *
325 * When we are about to do a write, this function records the total
326 * write size in order to provide a suitable hint to the lower layers
327 * about how many blocks will be required.
328 *
329 */
330
331static void gfs2_size_hint(struct file *filep, loff_t offset, size_t size)
332{
333 struct inode *inode = file_inode(filep);
334 struct gfs2_sbd *sdp = GFS2_SB(inode);
335 struct gfs2_inode *ip = GFS2_I(inode);
336 size_t blks = (size + sdp->sd_sb.sb_bsize - 1) >> sdp->sd_sb.sb_bsize_shift;
337 int hint = min_t(size_t, INT_MAX, blks);
338
339 if (hint > atomic_read(&ip->i_res.rs_sizehint))
340 atomic_set(&ip->i_res.rs_sizehint, hint);
341}
342
343/**
344 * gfs2_allocate_page_backing - Use bmap to allocate blocks
345 * @page: The (locked) page to allocate backing for
346 *
347 * We try to allocate all the blocks required for the page in
348 * one go. This might fail for various reasons, so we keep
349 * trying until all the blocks to back this page are allocated.
350 * If some of the blocks are already allocated, thats ok too.
351 */
352
353static int gfs2_allocate_page_backing(struct page *page)
354{
355 struct inode *inode = page->mapping->host;
356 struct buffer_head bh;
357 unsigned long size = PAGE_SIZE;
358 u64 lblock = page->index << (PAGE_SHIFT - inode->i_blkbits);
359
360 do {
361 bh.b_state = 0;
362 bh.b_size = size;
363 gfs2_block_map(inode, lblock, &bh, 1);
364 if (!buffer_mapped(&bh))
365 return -EIO;
366 size -= bh.b_size;
367 lblock += (bh.b_size >> inode->i_blkbits);
368 } while(size > 0);
369 return 0;
370}
371
372/**
373 * gfs2_page_mkwrite - Make a shared, mmap()ed, page writable
374 * @vma: The virtual memory area
375 * @vmf: The virtual memory fault containing the page to become writable
376 *
377 * When the page becomes writable, we need to ensure that we have
378 * blocks allocated on disk to back that page.
379 */
380
381static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
382{
383 struct page *page = vmf->page;
384 struct inode *inode = file_inode(vma->vm_file);
385 struct gfs2_inode *ip = GFS2_I(inode);
386 struct gfs2_sbd *sdp = GFS2_SB(inode);
387 struct gfs2_alloc_parms ap = { .aflags = 0, };
388 unsigned long last_index;
389 u64 pos = page->index << PAGE_SHIFT;
390 unsigned int data_blocks, ind_blocks, rblocks;
391 struct gfs2_holder gh;
392 loff_t size;
393 int ret;
394
395 sb_start_pagefault(inode->i_sb);
396
397 /* Update file times before taking page lock */
398 file_update_time(vma->vm_file);
399
400 ret = gfs2_rsqa_alloc(ip);
401 if (ret)
402 goto out;
403
404 gfs2_size_hint(vma->vm_file, pos, PAGE_SIZE);
405
406 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
407 ret = gfs2_glock_nq(&gh);
408 if (ret)
409 goto out_uninit;
410
411 set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
412 set_bit(GIF_SW_PAGED, &ip->i_flags);
413
414 if (!gfs2_write_alloc_required(ip, pos, PAGE_SIZE)) {
415 lock_page(page);
416 if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
417 ret = -EAGAIN;
418 unlock_page(page);
419 }
420 goto out_unlock;
421 }
422
423 ret = gfs2_rindex_update(sdp);
424 if (ret)
425 goto out_unlock;
426
427 gfs2_write_calc_reserv(ip, PAGE_SIZE, &data_blocks, &ind_blocks);
428 ap.target = data_blocks + ind_blocks;
429 ret = gfs2_quota_lock_check(ip, &ap);
430 if (ret)
431 goto out_unlock;
432 ret = gfs2_inplace_reserve(ip, &ap);
433 if (ret)
434 goto out_quota_unlock;
435
436 rblocks = RES_DINODE + ind_blocks;
437 if (gfs2_is_jdata(ip))
438 rblocks += data_blocks ? data_blocks : 1;
439 if (ind_blocks || data_blocks) {
440 rblocks += RES_STATFS + RES_QUOTA;
441 rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks);
442 }
443 ret = gfs2_trans_begin(sdp, rblocks, 0);
444 if (ret)
445 goto out_trans_fail;
446
447 lock_page(page);
448 ret = -EINVAL;
449 size = i_size_read(inode);
450 last_index = (size - 1) >> PAGE_SHIFT;
451 /* Check page index against inode size */
452 if (size == 0 || (page->index > last_index))
453 goto out_trans_end;
454
455 ret = -EAGAIN;
456 /* If truncated, we must retry the operation, we may have raced
457 * with the glock demotion code.
458 */
459 if (!PageUptodate(page) || page->mapping != inode->i_mapping)
460 goto out_trans_end;
461
462 /* Unstuff, if required, and allocate backing blocks for page */
463 ret = 0;
464 if (gfs2_is_stuffed(ip))
465 ret = gfs2_unstuff_dinode(ip, page);
466 if (ret == 0)
467 ret = gfs2_allocate_page_backing(page);
468
469out_trans_end:
470 if (ret)
471 unlock_page(page);
472 gfs2_trans_end(sdp);
473out_trans_fail:
474 gfs2_inplace_release(ip);
475out_quota_unlock:
476 gfs2_quota_unlock(ip);
477out_unlock:
478 gfs2_glock_dq(&gh);
479out_uninit:
480 gfs2_holder_uninit(&gh);
481 if (ret == 0) {
482 set_page_dirty(page);
483 wait_for_stable_page(page);
484 }
485out:
486 sb_end_pagefault(inode->i_sb);
487 return block_page_mkwrite_return(ret);
488}
489
490static const struct vm_operations_struct gfs2_vm_ops = {
491 .fault = filemap_fault,
492 .map_pages = filemap_map_pages,
493 .page_mkwrite = gfs2_page_mkwrite,
494};
495
496/**
497 * gfs2_mmap -
498 * @file: The file to map
499 * @vma: The VMA which described the mapping
500 *
501 * There is no need to get a lock here unless we should be updating
502 * atime. We ignore any locking errors since the only consequence is
503 * a missed atime update (which will just be deferred until later).
504 *
505 * Returns: 0
506 */
507
508static int gfs2_mmap(struct file *file, struct vm_area_struct *vma)
509{
510 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
511
512 if (!(file->f_flags & O_NOATIME) &&
513 !IS_NOATIME(&ip->i_inode)) {
514 struct gfs2_holder i_gh;
515 int error;
516
517 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
518 &i_gh);
519 if (error)
520 return error;
521 /* grab lock to update inode */
522 gfs2_glock_dq_uninit(&i_gh);
523 file_accessed(file);
524 }
525 vma->vm_ops = &gfs2_vm_ops;
526
527 return 0;
528}
529
530/**
531 * gfs2_open_common - This is common to open and atomic_open
532 * @inode: The inode being opened
533 * @file: The file being opened
534 *
535 * This maybe called under a glock or not depending upon how it has
536 * been called. We must always be called under a glock for regular
537 * files, however. For other file types, it does not matter whether
538 * we hold the glock or not.
539 *
540 * Returns: Error code or 0 for success
541 */
542
543int gfs2_open_common(struct inode *inode, struct file *file)
544{
545 struct gfs2_file *fp;
546 int ret;
547
548 if (S_ISREG(inode->i_mode)) {
549 ret = generic_file_open(inode, file);
550 if (ret)
551 return ret;
552 }
553
554 fp = kzalloc(sizeof(struct gfs2_file), GFP_NOFS);
555 if (!fp)
556 return -ENOMEM;
557
558 mutex_init(&fp->f_fl_mutex);
559
560 gfs2_assert_warn(GFS2_SB(inode), !file->private_data);
561 file->private_data = fp;
562 return 0;
563}
564
565/**
566 * gfs2_open - open a file
567 * @inode: the inode to open
568 * @file: the struct file for this opening
569 *
570 * After atomic_open, this function is only used for opening files
571 * which are already cached. We must still get the glock for regular
572 * files to ensure that we have the file size uptodate for the large
573 * file check which is in the common code. That is only an issue for
574 * regular files though.
575 *
576 * Returns: errno
577 */
578
579static int gfs2_open(struct inode *inode, struct file *file)
580{
581 struct gfs2_inode *ip = GFS2_I(inode);
582 struct gfs2_holder i_gh;
583 int error;
584 bool need_unlock = false;
585
586 if (S_ISREG(ip->i_inode.i_mode)) {
587 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
588 &i_gh);
589 if (error)
590 return error;
591 need_unlock = true;
592 }
593
594 error = gfs2_open_common(inode, file);
595
596 if (need_unlock)
597 gfs2_glock_dq_uninit(&i_gh);
598
599 return error;
600}
601
602/**
603 * gfs2_release - called to close a struct file
604 * @inode: the inode the struct file belongs to
605 * @file: the struct file being closed
606 *
607 * Returns: errno
608 */
609
610static int gfs2_release(struct inode *inode, struct file *file)
611{
612 struct gfs2_inode *ip = GFS2_I(inode);
613
614 kfree(file->private_data);
615 file->private_data = NULL;
616
617 if (!(file->f_mode & FMODE_WRITE))
618 return 0;
619
620 gfs2_rsqa_delete(ip, &inode->i_writecount);
621 return 0;
622}
623
624/**
625 * gfs2_fsync - sync the dirty data for a file (across the cluster)
626 * @file: the file that points to the dentry
627 * @start: the start position in the file to sync
628 * @end: the end position in the file to sync
629 * @datasync: set if we can ignore timestamp changes
630 *
631 * We split the data flushing here so that we don't wait for the data
632 * until after we've also sent the metadata to disk. Note that for
633 * data=ordered, we will write & wait for the data at the log flush
634 * stage anyway, so this is unlikely to make much of a difference
635 * except in the data=writeback case.
636 *
637 * If the fdatawrite fails due to any reason except -EIO, we will
638 * continue the remainder of the fsync, although we'll still report
639 * the error at the end. This is to match filemap_write_and_wait_range()
640 * behaviour.
641 *
642 * Returns: errno
643 */
644
645static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
646 int datasync)
647{
648 struct address_space *mapping = file->f_mapping;
649 struct inode *inode = mapping->host;
650 int sync_state = inode->i_state & I_DIRTY_ALL;
651 struct gfs2_inode *ip = GFS2_I(inode);
652 int ret = 0, ret1 = 0;
653
654 if (mapping->nrpages) {
655 ret1 = filemap_fdatawrite_range(mapping, start, end);
656 if (ret1 == -EIO)
657 return ret1;
658 }
659
660 if (!gfs2_is_jdata(ip))
661 sync_state &= ~I_DIRTY_PAGES;
662 if (datasync)
663 sync_state &= ~(I_DIRTY_SYNC | I_DIRTY_TIME);
664
665 if (sync_state) {
666 ret = sync_inode_metadata(inode, 1);
667 if (ret)
668 return ret;
669 if (gfs2_is_jdata(ip))
670 filemap_write_and_wait(mapping);
671 gfs2_ail_flush(ip->i_gl, 1);
672 }
673
674 if (mapping->nrpages)
675 ret = filemap_fdatawait_range(mapping, start, end);
676
677 return ret ? ret : ret1;
678}
679
680/**
681 * gfs2_file_write_iter - Perform a write to a file
682 * @iocb: The io context
683 * @iov: The data to write
684 * @nr_segs: Number of @iov segments
685 * @pos: The file position
686 *
687 * We have to do a lock/unlock here to refresh the inode size for
688 * O_APPEND writes, otherwise we can land up writing at the wrong
689 * offset. There is still a race, but provided the app is using its
690 * own file locking, this will make O_APPEND work as expected.
691 *
692 */
693
694static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
695{
696 struct file *file = iocb->ki_filp;
697 struct gfs2_inode *ip = GFS2_I(file_inode(file));
698 int ret;
699
700 ret = gfs2_rsqa_alloc(ip);
701 if (ret)
702 return ret;
703
704 gfs2_size_hint(file, iocb->ki_pos, iov_iter_count(from));
705
706 if (iocb->ki_flags & IOCB_APPEND) {
707 struct gfs2_holder gh;
708
709 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
710 if (ret)
711 return ret;
712 gfs2_glock_dq_uninit(&gh);
713 }
714
715 return generic_file_write_iter(iocb, from);
716}
717
718static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
719 int mode)
720{
721 struct gfs2_inode *ip = GFS2_I(inode);
722 struct buffer_head *dibh;
723 int error;
724 unsigned int nr_blks;
725 sector_t lblock = offset >> inode->i_blkbits;
726
727 error = gfs2_meta_inode_buffer(ip, &dibh);
728 if (unlikely(error))
729 return error;
730
731 gfs2_trans_add_meta(ip->i_gl, dibh);
732
733 if (gfs2_is_stuffed(ip)) {
734 error = gfs2_unstuff_dinode(ip, NULL);
735 if (unlikely(error))
736 goto out;
737 }
738
739 while (len) {
740 struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
741 bh_map.b_size = len;
742 set_buffer_zeronew(&bh_map);
743
744 error = gfs2_block_map(inode, lblock, &bh_map, 1);
745 if (unlikely(error))
746 goto out;
747 len -= bh_map.b_size;
748 nr_blks = bh_map.b_size >> inode->i_blkbits;
749 lblock += nr_blks;
750 if (!buffer_new(&bh_map))
751 continue;
752 if (unlikely(!buffer_zeronew(&bh_map))) {
753 error = -EIO;
754 goto out;
755 }
756 }
757out:
758 brelse(dibh);
759 return error;
760}
761/**
762 * calc_max_reserv() - Reverse of write_calc_reserv. Given a number of
763 * blocks, determine how many bytes can be written.
764 * @ip: The inode in question.
765 * @len: Max cap of bytes. What we return in *len must be <= this.
766 * @data_blocks: Compute and return the number of data blocks needed
767 * @ind_blocks: Compute and return the number of indirect blocks needed
768 * @max_blocks: The total blocks available to work with.
769 *
770 * Returns: void, but @len, @data_blocks and @ind_blocks are filled in.
771 */
772static void calc_max_reserv(struct gfs2_inode *ip, loff_t *len,
773 unsigned int *data_blocks, unsigned int *ind_blocks,
774 unsigned int max_blocks)
775{
776 loff_t max = *len;
777 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
778 unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1);
779
780 for (tmp = max_data; tmp > sdp->sd_diptrs;) {
781 tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
782 max_data -= tmp;
783 }
784
785 *data_blocks = max_data;
786 *ind_blocks = max_blocks - max_data;
787 *len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift;
788 if (*len > max) {
789 *len = max;
790 gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks);
791 }
792}
793
794static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
795{
796 struct inode *inode = file_inode(file);
797 struct gfs2_sbd *sdp = GFS2_SB(inode);
798 struct gfs2_inode *ip = GFS2_I(inode);
799 struct gfs2_alloc_parms ap = { .aflags = 0, };
800 unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
801 loff_t bytes, max_bytes, max_blks = UINT_MAX;
802 int error;
803 const loff_t pos = offset;
804 const loff_t count = len;
805 loff_t bsize_mask = ~((loff_t)sdp->sd_sb.sb_bsize - 1);
806 loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
807 loff_t max_chunk_size = UINT_MAX & bsize_mask;
808
809 next = (next + 1) << sdp->sd_sb.sb_bsize_shift;
810
811 offset &= bsize_mask;
812
813 len = next - offset;
814 bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2;
815 if (!bytes)
816 bytes = UINT_MAX;
817 bytes &= bsize_mask;
818 if (bytes == 0)
819 bytes = sdp->sd_sb.sb_bsize;
820
821 gfs2_size_hint(file, offset, len);
822
823 gfs2_write_calc_reserv(ip, PAGE_SIZE, &data_blocks, &ind_blocks);
824 ap.min_target = data_blocks + ind_blocks;
825
826 while (len > 0) {
827 if (len < bytes)
828 bytes = len;
829 if (!gfs2_write_alloc_required(ip, offset, bytes)) {
830 len -= bytes;
831 offset += bytes;
832 continue;
833 }
834
835 /* We need to determine how many bytes we can actually
836 * fallocate without exceeding quota or going over the
837 * end of the fs. We start off optimistically by assuming
838 * we can write max_bytes */
839 max_bytes = (len > max_chunk_size) ? max_chunk_size : len;
840
841 /* Since max_bytes is most likely a theoretical max, we
842 * calculate a more realistic 'bytes' to serve as a good
843 * starting point for the number of bytes we may be able
844 * to write */
845 gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
846 ap.target = data_blocks + ind_blocks;
847
848 error = gfs2_quota_lock_check(ip, &ap);
849 if (error)
850 return error;
851 /* ap.allowed tells us how many blocks quota will allow
852 * us to write. Check if this reduces max_blks */
853 if (ap.allowed && ap.allowed < max_blks)
854 max_blks = ap.allowed;
855
856 error = gfs2_inplace_reserve(ip, &ap);
857 if (error)
858 goto out_qunlock;
859
860 /* check if the selected rgrp limits our max_blks further */
861 if (ap.allowed && ap.allowed < max_blks)
862 max_blks = ap.allowed;
863
864 /* Almost done. Calculate bytes that can be written using
865 * max_blks. We also recompute max_bytes, data_blocks and
866 * ind_blocks */
867 calc_max_reserv(ip, &max_bytes, &data_blocks,
868 &ind_blocks, max_blks);
869
870 rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
871 RES_RG_HDR + gfs2_rg_blocks(ip, data_blocks + ind_blocks);
872 if (gfs2_is_jdata(ip))
873 rblocks += data_blocks ? data_blocks : 1;
874
875 error = gfs2_trans_begin(sdp, rblocks,
876 PAGE_SIZE/sdp->sd_sb.sb_bsize);
877 if (error)
878 goto out_trans_fail;
879
880 error = fallocate_chunk(inode, offset, max_bytes, mode);
881 gfs2_trans_end(sdp);
882
883 if (error)
884 goto out_trans_fail;
885
886 len -= max_bytes;
887 offset += max_bytes;
888 gfs2_inplace_release(ip);
889 gfs2_quota_unlock(ip);
890 }
891
892 if (!(mode & FALLOC_FL_KEEP_SIZE) && (pos + count) > inode->i_size) {
893 i_size_write(inode, pos + count);
894 file_update_time(file);
895 mark_inode_dirty(inode);
896 }
897
898 return generic_write_sync(file, pos, count);
899
900out_trans_fail:
901 gfs2_inplace_release(ip);
902out_qunlock:
903 gfs2_quota_unlock(ip);
904 return error;
905}
906
907static long gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
908{
909 struct inode *inode = file_inode(file);
910 struct gfs2_inode *ip = GFS2_I(inode);
911 struct gfs2_holder gh;
912 int ret;
913
914 if ((mode & ~FALLOC_FL_KEEP_SIZE) || gfs2_is_jdata(ip))
915 return -EOPNOTSUPP;
916
917 inode_lock(inode);
918
919 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
920 ret = gfs2_glock_nq(&gh);
921 if (ret)
922 goto out_uninit;
923
924 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
925 (offset + len) > inode->i_size) {
926 ret = inode_newsize_ok(inode, offset + len);
927 if (ret)
928 goto out_unlock;
929 }
930
931 ret = get_write_access(inode);
932 if (ret)
933 goto out_unlock;
934
935 ret = gfs2_rsqa_alloc(ip);
936 if (ret)
937 goto out_putw;
938
939 ret = __gfs2_fallocate(file, mode, offset, len);
940 if (ret)
941 gfs2_rs_deltree(&ip->i_res);
942
943out_putw:
944 put_write_access(inode);
945out_unlock:
946 gfs2_glock_dq(&gh);
947out_uninit:
948 gfs2_holder_uninit(&gh);
949 inode_unlock(inode);
950 return ret;
951}
952
953static ssize_t gfs2_file_splice_write(struct pipe_inode_info *pipe,
954 struct file *out, loff_t *ppos,
955 size_t len, unsigned int flags)
956{
957 int error;
958 struct gfs2_inode *ip = GFS2_I(out->f_mapping->host);
959
960 error = gfs2_rsqa_alloc(ip);
961 if (error)
962 return (ssize_t)error;
963
964 gfs2_size_hint(out, *ppos, len);
965
966 return iter_file_splice_write(pipe, out, ppos, len, flags);
967}
968
969#ifdef CONFIG_GFS2_FS_LOCKING_DLM
970
971/**
972 * gfs2_lock - acquire/release a posix lock on a file
973 * @file: the file pointer
974 * @cmd: either modify or retrieve lock state, possibly wait
975 * @fl: type and range of lock
976 *
977 * Returns: errno
978 */
979
980static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
981{
982 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
983 struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host);
984 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
985
986 if (!(fl->fl_flags & FL_POSIX))
987 return -ENOLCK;
988 if (__mandatory_lock(&ip->i_inode) && fl->fl_type != F_UNLCK)
989 return -ENOLCK;
990
991 if (cmd == F_CANCELLK) {
992 /* Hack: */
993 cmd = F_SETLK;
994 fl->fl_type = F_UNLCK;
995 }
996 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
997 if (fl->fl_type == F_UNLCK)
998 locks_lock_file_wait(file, fl);
999 return -EIO;
1000 }
1001 if (IS_GETLK(cmd))
1002 return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl);
1003 else if (fl->fl_type == F_UNLCK)
1004 return dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl);
1005 else
1006 return dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl);
1007}
1008
1009static int do_flock(struct file *file, int cmd, struct file_lock *fl)
1010{
1011 struct gfs2_file *fp = file->private_data;
1012 struct gfs2_holder *fl_gh = &fp->f_fl_gh;
1013 struct gfs2_inode *ip = GFS2_I(file_inode(file));
1014 struct gfs2_glock *gl;
1015 unsigned int state;
1016 u16 flags;
1017 int error = 0;
1018 int sleeptime;
1019
1020 state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
1021 flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY_1CB) | GL_EXACT;
1022
1023 mutex_lock(&fp->f_fl_mutex);
1024
1025 gl = fl_gh->gh_gl;
1026 if (gl) {
1027 if (fl_gh->gh_state == state)
1028 goto out;
1029 locks_lock_file_wait(file,
1030 &(struct file_lock) {
1031 .fl_type = F_UNLCK,
1032 .fl_flags = FL_FLOCK
1033 });
1034 gfs2_glock_dq(fl_gh);
1035 gfs2_holder_reinit(state, flags, fl_gh);
1036 } else {
1037 error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr,
1038 &gfs2_flock_glops, CREATE, &gl);
1039 if (error)
1040 goto out;
1041 gfs2_holder_init(gl, state, flags, fl_gh);
1042 gfs2_glock_put(gl);
1043 }
1044 for (sleeptime = 1; sleeptime <= 4; sleeptime <<= 1) {
1045 error = gfs2_glock_nq(fl_gh);
1046 if (error != GLR_TRYFAILED)
1047 break;
1048 fl_gh->gh_flags = LM_FLAG_TRY | GL_EXACT;
1049 fl_gh->gh_error = 0;
1050 msleep(sleeptime);
1051 }
1052 if (error) {
1053 gfs2_holder_uninit(fl_gh);
1054 if (error == GLR_TRYFAILED)
1055 error = -EAGAIN;
1056 } else {
1057 error = locks_lock_file_wait(file, fl);
1058 gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
1059 }
1060
1061out:
1062 mutex_unlock(&fp->f_fl_mutex);
1063 return error;
1064}
1065
1066static void do_unflock(struct file *file, struct file_lock *fl)
1067{
1068 struct gfs2_file *fp = file->private_data;
1069 struct gfs2_holder *fl_gh = &fp->f_fl_gh;
1070
1071 mutex_lock(&fp->f_fl_mutex);
1072 locks_lock_file_wait(file, fl);
1073 if (fl_gh->gh_gl) {
1074 gfs2_glock_dq(fl_gh);
1075 gfs2_holder_uninit(fl_gh);
1076 }
1077 mutex_unlock(&fp->f_fl_mutex);
1078}
1079
1080/**
1081 * gfs2_flock - acquire/release a flock lock on a file
1082 * @file: the file pointer
1083 * @cmd: either modify or retrieve lock state, possibly wait
1084 * @fl: type and range of lock
1085 *
1086 * Returns: errno
1087 */
1088
1089static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
1090{
1091 if (!(fl->fl_flags & FL_FLOCK))
1092 return -ENOLCK;
1093 if (fl->fl_type & LOCK_MAND)
1094 return -EOPNOTSUPP;
1095
1096 if (fl->fl_type == F_UNLCK) {
1097 do_unflock(file, fl);
1098 return 0;
1099 } else {
1100 return do_flock(file, cmd, fl);
1101 }
1102}
1103
1104const struct file_operations gfs2_file_fops = {
1105 .llseek = gfs2_llseek,
1106 .read_iter = generic_file_read_iter,
1107 .write_iter = gfs2_file_write_iter,
1108 .unlocked_ioctl = gfs2_ioctl,
1109 .mmap = gfs2_mmap,
1110 .open = gfs2_open,
1111 .release = gfs2_release,
1112 .fsync = gfs2_fsync,
1113 .lock = gfs2_lock,
1114 .flock = gfs2_flock,
1115 .splice_read = generic_file_splice_read,
1116 .splice_write = gfs2_file_splice_write,
1117 .setlease = simple_nosetlease,
1118 .fallocate = gfs2_fallocate,
1119};
1120
1121const struct file_operations gfs2_dir_fops = {
1122 .iterate = gfs2_readdir,
1123 .unlocked_ioctl = gfs2_ioctl,
1124 .open = gfs2_open,
1125 .release = gfs2_release,
1126 .fsync = gfs2_fsync,
1127 .lock = gfs2_lock,
1128 .flock = gfs2_flock,
1129 .llseek = default_llseek,
1130};
1131
1132#endif /* CONFIG_GFS2_FS_LOCKING_DLM */
1133
1134const struct file_operations gfs2_file_fops_nolock = {
1135 .llseek = gfs2_llseek,
1136 .read_iter = generic_file_read_iter,
1137 .write_iter = gfs2_file_write_iter,
1138 .unlocked_ioctl = gfs2_ioctl,
1139 .mmap = gfs2_mmap,
1140 .open = gfs2_open,
1141 .release = gfs2_release,
1142 .fsync = gfs2_fsync,
1143 .splice_read = generic_file_splice_read,
1144 .splice_write = gfs2_file_splice_write,
1145 .setlease = generic_setlease,
1146 .fallocate = gfs2_fallocate,
1147};
1148
1149const struct file_operations gfs2_dir_fops_nolock = {
1150 .iterate = gfs2_readdir,
1151 .unlocked_ioctl = gfs2_ioctl,
1152 .open = gfs2_open,
1153 .release = gfs2_release,
1154 .fsync = gfs2_fsync,
1155 .llseek = default_llseek,
1156};
1157