Loading...
1/* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * file.c
5 *
6 * File open, close, extend, truncate
7 *
8 * Copyright (C) 2002, 2004 Oracle. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
24 */
25
26#include <linux/capability.h>
27#include <linux/fs.h>
28#include <linux/types.h>
29#include <linux/slab.h>
30#include <linux/highmem.h>
31#include <linux/pagemap.h>
32#include <linux/uio.h>
33#include <linux/sched.h>
34#include <linux/splice.h>
35#include <linux/mount.h>
36#include <linux/writeback.h>
37#include <linux/falloc.h>
38#include <linux/quotaops.h>
39#include <linux/blkdev.h>
40#include <linux/backing-dev.h>
41
42#include <cluster/masklog.h>
43
44#include "ocfs2.h"
45
46#include "alloc.h"
47#include "aops.h"
48#include "dir.h"
49#include "dlmglue.h"
50#include "extent_map.h"
51#include "file.h"
52#include "sysfile.h"
53#include "inode.h"
54#include "ioctl.h"
55#include "journal.h"
56#include "locks.h"
57#include "mmap.h"
58#include "suballoc.h"
59#include "super.h"
60#include "xattr.h"
61#include "acl.h"
62#include "quota.h"
63#include "refcounttree.h"
64#include "ocfs2_trace.h"
65
66#include "buffer_head_io.h"
67
68static int ocfs2_init_file_private(struct inode *inode, struct file *file)
69{
70 struct ocfs2_file_private *fp;
71
72 fp = kzalloc(sizeof(struct ocfs2_file_private), GFP_KERNEL);
73 if (!fp)
74 return -ENOMEM;
75
76 fp->fp_file = file;
77 mutex_init(&fp->fp_mutex);
78 ocfs2_file_lock_res_init(&fp->fp_flock, fp);
79 file->private_data = fp;
80
81 return 0;
82}
83
84static void ocfs2_free_file_private(struct inode *inode, struct file *file)
85{
86 struct ocfs2_file_private *fp = file->private_data;
87 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
88
89 if (fp) {
90 ocfs2_simple_drop_lockres(osb, &fp->fp_flock);
91 ocfs2_lock_res_free(&fp->fp_flock);
92 kfree(fp);
93 file->private_data = NULL;
94 }
95}
96
97static int ocfs2_file_open(struct inode *inode, struct file *file)
98{
99 int status;
100 int mode = file->f_flags;
101 struct ocfs2_inode_info *oi = OCFS2_I(inode);
102
103 trace_ocfs2_file_open(inode, file, file->f_path.dentry,
104 (unsigned long long)OCFS2_I(inode)->ip_blkno,
105 file->f_path.dentry->d_name.len,
106 file->f_path.dentry->d_name.name, mode);
107
108 if (file->f_mode & FMODE_WRITE) {
109 status = dquot_initialize(inode);
110 if (status)
111 goto leave;
112 }
113
114 spin_lock(&oi->ip_lock);
115
116 /* Check that the inode hasn't been wiped from disk by another
117 * node. If it hasn't then we're safe as long as we hold the
118 * spin lock until our increment of open count. */
119 if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED) {
120 spin_unlock(&oi->ip_lock);
121
122 status = -ENOENT;
123 goto leave;
124 }
125
126 if (mode & O_DIRECT)
127 oi->ip_flags |= OCFS2_INODE_OPEN_DIRECT;
128
129 oi->ip_open_count++;
130 spin_unlock(&oi->ip_lock);
131
132 status = ocfs2_init_file_private(inode, file);
133 if (status) {
134 /*
135 * We want to set open count back if we're failing the
136 * open.
137 */
138 spin_lock(&oi->ip_lock);
139 oi->ip_open_count--;
140 spin_unlock(&oi->ip_lock);
141 }
142
143leave:
144 return status;
145}
146
147static int ocfs2_file_release(struct inode *inode, struct file *file)
148{
149 struct ocfs2_inode_info *oi = OCFS2_I(inode);
150
151 spin_lock(&oi->ip_lock);
152 if (!--oi->ip_open_count)
153 oi->ip_flags &= ~OCFS2_INODE_OPEN_DIRECT;
154
155 trace_ocfs2_file_release(inode, file, file->f_path.dentry,
156 oi->ip_blkno,
157 file->f_path.dentry->d_name.len,
158 file->f_path.dentry->d_name.name,
159 oi->ip_open_count);
160 spin_unlock(&oi->ip_lock);
161
162 ocfs2_free_file_private(inode, file);
163
164 return 0;
165}
166
167static int ocfs2_dir_open(struct inode *inode, struct file *file)
168{
169 return ocfs2_init_file_private(inode, file);
170}
171
172static int ocfs2_dir_release(struct inode *inode, struct file *file)
173{
174 ocfs2_free_file_private(inode, file);
175 return 0;
176}
177
178static int ocfs2_sync_file(struct file *file, loff_t start, loff_t end,
179 int datasync)
180{
181 int err = 0;
182 struct inode *inode = file->f_mapping->host;
183 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
184 struct ocfs2_inode_info *oi = OCFS2_I(inode);
185 journal_t *journal = osb->journal->j_journal;
186 int ret;
187 tid_t commit_tid;
188 bool needs_barrier = false;
189
190 trace_ocfs2_sync_file(inode, file, file->f_path.dentry,
191 OCFS2_I(inode)->ip_blkno,
192 file->f_path.dentry->d_name.len,
193 file->f_path.dentry->d_name.name,
194 (unsigned long long)datasync);
195
196 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
197 return -EROFS;
198
199 err = filemap_write_and_wait_range(inode->i_mapping, start, end);
200 if (err)
201 return err;
202
203 commit_tid = datasync ? oi->i_datasync_tid : oi->i_sync_tid;
204 if (journal->j_flags & JBD2_BARRIER &&
205 !jbd2_trans_will_send_data_barrier(journal, commit_tid))
206 needs_barrier = true;
207 err = jbd2_complete_transaction(journal, commit_tid);
208 if (needs_barrier) {
209 ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
210 if (!err)
211 err = ret;
212 }
213
214 if (err)
215 mlog_errno(err);
216
217 return (err < 0) ? -EIO : 0;
218}
219
220int ocfs2_should_update_atime(struct inode *inode,
221 struct vfsmount *vfsmnt)
222{
223 struct timespec now;
224 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
225
226 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
227 return 0;
228
229 if ((inode->i_flags & S_NOATIME) ||
230 ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode)))
231 return 0;
232
233 /*
234 * We can be called with no vfsmnt structure - NFSD will
235 * sometimes do this.
236 *
237 * Note that our action here is different than touch_atime() -
238 * if we can't tell whether this is a noatime mount, then we
239 * don't know whether to trust the value of s_atime_quantum.
240 */
241 if (vfsmnt == NULL)
242 return 0;
243
244 if ((vfsmnt->mnt_flags & MNT_NOATIME) ||
245 ((vfsmnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)))
246 return 0;
247
248 if (vfsmnt->mnt_flags & MNT_RELATIME) {
249 if ((timespec_compare(&inode->i_atime, &inode->i_mtime) <= 0) ||
250 (timespec_compare(&inode->i_atime, &inode->i_ctime) <= 0))
251 return 1;
252
253 return 0;
254 }
255
256 now = CURRENT_TIME;
257 if ((now.tv_sec - inode->i_atime.tv_sec <= osb->s_atime_quantum))
258 return 0;
259 else
260 return 1;
261}
262
263int ocfs2_update_inode_atime(struct inode *inode,
264 struct buffer_head *bh)
265{
266 int ret;
267 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
268 handle_t *handle;
269 struct ocfs2_dinode *di = (struct ocfs2_dinode *) bh->b_data;
270
271 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
272 if (IS_ERR(handle)) {
273 ret = PTR_ERR(handle);
274 mlog_errno(ret);
275 goto out;
276 }
277
278 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
279 OCFS2_JOURNAL_ACCESS_WRITE);
280 if (ret) {
281 mlog_errno(ret);
282 goto out_commit;
283 }
284
285 /*
286 * Don't use ocfs2_mark_inode_dirty() here as we don't always
287 * have i_mutex to guard against concurrent changes to other
288 * inode fields.
289 */
290 inode->i_atime = CURRENT_TIME;
291 di->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
292 di->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
293 ocfs2_update_inode_fsync_trans(handle, inode, 0);
294 ocfs2_journal_dirty(handle, bh);
295
296out_commit:
297 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
298out:
299 return ret;
300}
301
302int ocfs2_set_inode_size(handle_t *handle,
303 struct inode *inode,
304 struct buffer_head *fe_bh,
305 u64 new_i_size)
306{
307 int status;
308
309 i_size_write(inode, new_i_size);
310 inode->i_blocks = ocfs2_inode_sector_count(inode);
311 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
312
313 status = ocfs2_mark_inode_dirty(handle, inode, fe_bh);
314 if (status < 0) {
315 mlog_errno(status);
316 goto bail;
317 }
318
319bail:
320 return status;
321}
322
323int ocfs2_simple_size_update(struct inode *inode,
324 struct buffer_head *di_bh,
325 u64 new_i_size)
326{
327 int ret;
328 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
329 handle_t *handle = NULL;
330
331 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
332 if (IS_ERR(handle)) {
333 ret = PTR_ERR(handle);
334 mlog_errno(ret);
335 goto out;
336 }
337
338 ret = ocfs2_set_inode_size(handle, inode, di_bh,
339 new_i_size);
340 if (ret < 0)
341 mlog_errno(ret);
342
343 ocfs2_update_inode_fsync_trans(handle, inode, 0);
344 ocfs2_commit_trans(osb, handle);
345out:
346 return ret;
347}
348
349static int ocfs2_cow_file_pos(struct inode *inode,
350 struct buffer_head *fe_bh,
351 u64 offset)
352{
353 int status;
354 u32 phys, cpos = offset >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
355 unsigned int num_clusters = 0;
356 unsigned int ext_flags = 0;
357
358 /*
359 * If the new offset is aligned to the range of the cluster, there is
360 * no space for ocfs2_zero_range_for_truncate to fill, so no need to
361 * CoW either.
362 */
363 if ((offset & (OCFS2_SB(inode->i_sb)->s_clustersize - 1)) == 0)
364 return 0;
365
366 status = ocfs2_get_clusters(inode, cpos, &phys,
367 &num_clusters, &ext_flags);
368 if (status) {
369 mlog_errno(status);
370 goto out;
371 }
372
373 if (!(ext_flags & OCFS2_EXT_REFCOUNTED))
374 goto out;
375
376 return ocfs2_refcount_cow(inode, fe_bh, cpos, 1, cpos+1);
377
378out:
379 return status;
380}
381
382static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb,
383 struct inode *inode,
384 struct buffer_head *fe_bh,
385 u64 new_i_size)
386{
387 int status;
388 handle_t *handle;
389 struct ocfs2_dinode *di;
390 u64 cluster_bytes;
391
392 /*
393 * We need to CoW the cluster contains the offset if it is reflinked
394 * since we will call ocfs2_zero_range_for_truncate later which will
395 * write "0" from offset to the end of the cluster.
396 */
397 status = ocfs2_cow_file_pos(inode, fe_bh, new_i_size);
398 if (status) {
399 mlog_errno(status);
400 return status;
401 }
402
403 /* TODO: This needs to actually orphan the inode in this
404 * transaction. */
405
406 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
407 if (IS_ERR(handle)) {
408 status = PTR_ERR(handle);
409 mlog_errno(status);
410 goto out;
411 }
412
413 status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), fe_bh,
414 OCFS2_JOURNAL_ACCESS_WRITE);
415 if (status < 0) {
416 mlog_errno(status);
417 goto out_commit;
418 }
419
420 /*
421 * Do this before setting i_size.
422 */
423 cluster_bytes = ocfs2_align_bytes_to_clusters(inode->i_sb, new_i_size);
424 status = ocfs2_zero_range_for_truncate(inode, handle, new_i_size,
425 cluster_bytes);
426 if (status) {
427 mlog_errno(status);
428 goto out_commit;
429 }
430
431 i_size_write(inode, new_i_size);
432 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
433
434 di = (struct ocfs2_dinode *) fe_bh->b_data;
435 di->i_size = cpu_to_le64(new_i_size);
436 di->i_ctime = di->i_mtime = cpu_to_le64(inode->i_ctime.tv_sec);
437 di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
438 ocfs2_update_inode_fsync_trans(handle, inode, 0);
439
440 ocfs2_journal_dirty(handle, fe_bh);
441
442out_commit:
443 ocfs2_commit_trans(osb, handle);
444out:
445 return status;
446}
447
448int ocfs2_truncate_file(struct inode *inode,
449 struct buffer_head *di_bh,
450 u64 new_i_size)
451{
452 int status = 0;
453 struct ocfs2_dinode *fe = NULL;
454 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
455
456 /* We trust di_bh because it comes from ocfs2_inode_lock(), which
457 * already validated it */
458 fe = (struct ocfs2_dinode *) di_bh->b_data;
459
460 trace_ocfs2_truncate_file((unsigned long long)OCFS2_I(inode)->ip_blkno,
461 (unsigned long long)le64_to_cpu(fe->i_size),
462 (unsigned long long)new_i_size);
463
464 mlog_bug_on_msg(le64_to_cpu(fe->i_size) != i_size_read(inode),
465 "Inode %llu, inode i_size = %lld != di "
466 "i_size = %llu, i_flags = 0x%x\n",
467 (unsigned long long)OCFS2_I(inode)->ip_blkno,
468 i_size_read(inode),
469 (unsigned long long)le64_to_cpu(fe->i_size),
470 le32_to_cpu(fe->i_flags));
471
472 if (new_i_size > le64_to_cpu(fe->i_size)) {
473 trace_ocfs2_truncate_file_error(
474 (unsigned long long)le64_to_cpu(fe->i_size),
475 (unsigned long long)new_i_size);
476 status = -EINVAL;
477 mlog_errno(status);
478 goto bail;
479 }
480
481 down_write(&OCFS2_I(inode)->ip_alloc_sem);
482
483 ocfs2_resv_discard(&osb->osb_la_resmap,
484 &OCFS2_I(inode)->ip_la_data_resv);
485
486 /*
487 * The inode lock forced other nodes to sync and drop their
488 * pages, which (correctly) happens even if we have a truncate
489 * without allocation change - ocfs2 cluster sizes can be much
490 * greater than page size, so we have to truncate them
491 * anyway.
492 */
493 unmap_mapping_range(inode->i_mapping, new_i_size + PAGE_SIZE - 1, 0, 1);
494 truncate_inode_pages(inode->i_mapping, new_i_size);
495
496 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
497 status = ocfs2_truncate_inline(inode, di_bh, new_i_size,
498 i_size_read(inode), 1);
499 if (status)
500 mlog_errno(status);
501
502 goto bail_unlock_sem;
503 }
504
505 /* alright, we're going to need to do a full blown alloc size
506 * change. Orphan the inode so that recovery can complete the
507 * truncate if necessary. This does the task of marking
508 * i_size. */
509 status = ocfs2_orphan_for_truncate(osb, inode, di_bh, new_i_size);
510 if (status < 0) {
511 mlog_errno(status);
512 goto bail_unlock_sem;
513 }
514
515 status = ocfs2_commit_truncate(osb, inode, di_bh);
516 if (status < 0) {
517 mlog_errno(status);
518 goto bail_unlock_sem;
519 }
520
521 /* TODO: orphan dir cleanup here. */
522bail_unlock_sem:
523 up_write(&OCFS2_I(inode)->ip_alloc_sem);
524
525bail:
526 if (!status && OCFS2_I(inode)->ip_clusters == 0)
527 status = ocfs2_try_remove_refcount_tree(inode, di_bh);
528
529 return status;
530}
531
532/*
533 * extend file allocation only here.
534 * we'll update all the disk stuff, and oip->alloc_size
535 *
536 * expect stuff to be locked, a transaction started and enough data /
537 * metadata reservations in the contexts.
538 *
539 * Will return -EAGAIN, and a reason if a restart is needed.
540 * If passed in, *reason will always be set, even in error.
541 */
542int ocfs2_add_inode_data(struct ocfs2_super *osb,
543 struct inode *inode,
544 u32 *logical_offset,
545 u32 clusters_to_add,
546 int mark_unwritten,
547 struct buffer_head *fe_bh,
548 handle_t *handle,
549 struct ocfs2_alloc_context *data_ac,
550 struct ocfs2_alloc_context *meta_ac,
551 enum ocfs2_alloc_restarted *reason_ret)
552{
553 int ret;
554 struct ocfs2_extent_tree et;
555
556 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), fe_bh);
557 ret = ocfs2_add_clusters_in_btree(handle, &et, logical_offset,
558 clusters_to_add, mark_unwritten,
559 data_ac, meta_ac, reason_ret);
560
561 return ret;
562}
563
564static int __ocfs2_extend_allocation(struct inode *inode, u32 logical_start,
565 u32 clusters_to_add, int mark_unwritten)
566{
567 int status = 0;
568 int restart_func = 0;
569 int credits;
570 u32 prev_clusters;
571 struct buffer_head *bh = NULL;
572 struct ocfs2_dinode *fe = NULL;
573 handle_t *handle = NULL;
574 struct ocfs2_alloc_context *data_ac = NULL;
575 struct ocfs2_alloc_context *meta_ac = NULL;
576 enum ocfs2_alloc_restarted why = RESTART_NONE;
577 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
578 struct ocfs2_extent_tree et;
579 int did_quota = 0;
580
581 /*
582 * Unwritten extent only exists for file systems which
583 * support holes.
584 */
585 BUG_ON(mark_unwritten && !ocfs2_sparse_alloc(osb));
586
587 status = ocfs2_read_inode_block(inode, &bh);
588 if (status < 0) {
589 mlog_errno(status);
590 goto leave;
591 }
592 fe = (struct ocfs2_dinode *) bh->b_data;
593
594restart_all:
595 BUG_ON(le32_to_cpu(fe->i_clusters) != OCFS2_I(inode)->ip_clusters);
596
597 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), bh);
598 status = ocfs2_lock_allocators(inode, &et, clusters_to_add, 0,
599 &data_ac, &meta_ac);
600 if (status) {
601 mlog_errno(status);
602 goto leave;
603 }
604
605 credits = ocfs2_calc_extend_credits(osb->sb, &fe->id2.i_list);
606 handle = ocfs2_start_trans(osb, credits);
607 if (IS_ERR(handle)) {
608 status = PTR_ERR(handle);
609 handle = NULL;
610 mlog_errno(status);
611 goto leave;
612 }
613
614restarted_transaction:
615 trace_ocfs2_extend_allocation(
616 (unsigned long long)OCFS2_I(inode)->ip_blkno,
617 (unsigned long long)i_size_read(inode),
618 le32_to_cpu(fe->i_clusters), clusters_to_add,
619 why, restart_func);
620
621 status = dquot_alloc_space_nodirty(inode,
622 ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
623 if (status)
624 goto leave;
625 did_quota = 1;
626
627 /* reserve a write to the file entry early on - that we if we
628 * run out of credits in the allocation path, we can still
629 * update i_size. */
630 status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
631 OCFS2_JOURNAL_ACCESS_WRITE);
632 if (status < 0) {
633 mlog_errno(status);
634 goto leave;
635 }
636
637 prev_clusters = OCFS2_I(inode)->ip_clusters;
638
639 status = ocfs2_add_inode_data(osb,
640 inode,
641 &logical_start,
642 clusters_to_add,
643 mark_unwritten,
644 bh,
645 handle,
646 data_ac,
647 meta_ac,
648 &why);
649 if ((status < 0) && (status != -EAGAIN)) {
650 if (status != -ENOSPC)
651 mlog_errno(status);
652 goto leave;
653 }
654 ocfs2_update_inode_fsync_trans(handle, inode, 1);
655 ocfs2_journal_dirty(handle, bh);
656
657 spin_lock(&OCFS2_I(inode)->ip_lock);
658 clusters_to_add -= (OCFS2_I(inode)->ip_clusters - prev_clusters);
659 spin_unlock(&OCFS2_I(inode)->ip_lock);
660 /* Release unused quota reservation */
661 dquot_free_space(inode,
662 ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
663 did_quota = 0;
664
665 if (why != RESTART_NONE && clusters_to_add) {
666 if (why == RESTART_META) {
667 restart_func = 1;
668 status = 0;
669 } else {
670 BUG_ON(why != RESTART_TRANS);
671
672 status = ocfs2_allocate_extend_trans(handle, 1);
673 if (status < 0) {
674 /* handle still has to be committed at
675 * this point. */
676 status = -ENOMEM;
677 mlog_errno(status);
678 goto leave;
679 }
680 goto restarted_transaction;
681 }
682 }
683
684 trace_ocfs2_extend_allocation_end(OCFS2_I(inode)->ip_blkno,
685 le32_to_cpu(fe->i_clusters),
686 (unsigned long long)le64_to_cpu(fe->i_size),
687 OCFS2_I(inode)->ip_clusters,
688 (unsigned long long)i_size_read(inode));
689
690leave:
691 if (status < 0 && did_quota)
692 dquot_free_space(inode,
693 ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
694 if (handle) {
695 ocfs2_commit_trans(osb, handle);
696 handle = NULL;
697 }
698 if (data_ac) {
699 ocfs2_free_alloc_context(data_ac);
700 data_ac = NULL;
701 }
702 if (meta_ac) {
703 ocfs2_free_alloc_context(meta_ac);
704 meta_ac = NULL;
705 }
706 if ((!status) && restart_func) {
707 restart_func = 0;
708 goto restart_all;
709 }
710 brelse(bh);
711 bh = NULL;
712
713 return status;
714}
715
716int ocfs2_extend_allocation(struct inode *inode, u32 logical_start,
717 u32 clusters_to_add, int mark_unwritten)
718{
719 return __ocfs2_extend_allocation(inode, logical_start,
720 clusters_to_add, mark_unwritten);
721}
722
723/*
724 * While a write will already be ordering the data, a truncate will not.
725 * Thus, we need to explicitly order the zeroed pages.
726 */
727static handle_t *ocfs2_zero_start_ordered_transaction(struct inode *inode,
728 struct buffer_head *di_bh)
729{
730 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
731 handle_t *handle = NULL;
732 int ret = 0;
733
734 if (!ocfs2_should_order_data(inode))
735 goto out;
736
737 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
738 if (IS_ERR(handle)) {
739 ret = -ENOMEM;
740 mlog_errno(ret);
741 goto out;
742 }
743
744 ret = ocfs2_jbd2_file_inode(handle, inode);
745 if (ret < 0) {
746 mlog_errno(ret);
747 goto out;
748 }
749
750 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
751 OCFS2_JOURNAL_ACCESS_WRITE);
752 if (ret)
753 mlog_errno(ret);
754 ocfs2_update_inode_fsync_trans(handle, inode, 1);
755
756out:
757 if (ret) {
758 if (!IS_ERR(handle))
759 ocfs2_commit_trans(osb, handle);
760 handle = ERR_PTR(ret);
761 }
762 return handle;
763}
764
765/* Some parts of this taken from generic_cont_expand, which turned out
766 * to be too fragile to do exactly what we need without us having to
767 * worry about recursive locking in ->write_begin() and ->write_end(). */
768static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
769 u64 abs_to, struct buffer_head *di_bh)
770{
771 struct address_space *mapping = inode->i_mapping;
772 struct page *page;
773 unsigned long index = abs_from >> PAGE_SHIFT;
774 handle_t *handle;
775 int ret = 0;
776 unsigned zero_from, zero_to, block_start, block_end;
777 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
778
779 BUG_ON(abs_from >= abs_to);
780 BUG_ON(abs_to > (((u64)index + 1) << PAGE_SHIFT));
781 BUG_ON(abs_from & (inode->i_blkbits - 1));
782
783 handle = ocfs2_zero_start_ordered_transaction(inode, di_bh);
784 if (IS_ERR(handle)) {
785 ret = PTR_ERR(handle);
786 goto out;
787 }
788
789 page = find_or_create_page(mapping, index, GFP_NOFS);
790 if (!page) {
791 ret = -ENOMEM;
792 mlog_errno(ret);
793 goto out_commit_trans;
794 }
795
796 /* Get the offsets within the page that we want to zero */
797 zero_from = abs_from & (PAGE_SIZE - 1);
798 zero_to = abs_to & (PAGE_SIZE - 1);
799 if (!zero_to)
800 zero_to = PAGE_SIZE;
801
802 trace_ocfs2_write_zero_page(
803 (unsigned long long)OCFS2_I(inode)->ip_blkno,
804 (unsigned long long)abs_from,
805 (unsigned long long)abs_to,
806 index, zero_from, zero_to);
807
808 /* We know that zero_from is block aligned */
809 for (block_start = zero_from; block_start < zero_to;
810 block_start = block_end) {
811 block_end = block_start + (1 << inode->i_blkbits);
812
813 /*
814 * block_start is block-aligned. Bump it by one to force
815 * __block_write_begin and block_commit_write to zero the
816 * whole block.
817 */
818 ret = __block_write_begin(page, block_start + 1, 0,
819 ocfs2_get_block);
820 if (ret < 0) {
821 mlog_errno(ret);
822 goto out_unlock;
823 }
824
825
826 /* must not update i_size! */
827 ret = block_commit_write(page, block_start + 1,
828 block_start + 1);
829 if (ret < 0)
830 mlog_errno(ret);
831 else
832 ret = 0;
833 }
834
835 /*
836 * fs-writeback will release the dirty pages without page lock
837 * whose offset are over inode size, the release happens at
838 * block_write_full_page().
839 */
840 i_size_write(inode, abs_to);
841 inode->i_blocks = ocfs2_inode_sector_count(inode);
842 di->i_size = cpu_to_le64((u64)i_size_read(inode));
843 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
844 di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec);
845 di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
846 di->i_mtime_nsec = di->i_ctime_nsec;
847 if (handle) {
848 ocfs2_journal_dirty(handle, di_bh);
849 ocfs2_update_inode_fsync_trans(handle, inode, 1);
850 }
851
852out_unlock:
853 unlock_page(page);
854 put_page(page);
855out_commit_trans:
856 if (handle)
857 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
858out:
859 return ret;
860}
861
862/*
863 * Find the next range to zero. We do this in terms of bytes because
864 * that's what ocfs2_zero_extend() wants, and it is dealing with the
865 * pagecache. We may return multiple extents.
866 *
867 * zero_start and zero_end are ocfs2_zero_extend()s current idea of what
868 * needs to be zeroed. range_start and range_end return the next zeroing
869 * range. A subsequent call should pass the previous range_end as its
870 * zero_start. If range_end is 0, there's nothing to do.
871 *
872 * Unwritten extents are skipped over. Refcounted extents are CoWd.
873 */
874static int ocfs2_zero_extend_get_range(struct inode *inode,
875 struct buffer_head *di_bh,
876 u64 zero_start, u64 zero_end,
877 u64 *range_start, u64 *range_end)
878{
879 int rc = 0, needs_cow = 0;
880 u32 p_cpos, zero_clusters = 0;
881 u32 zero_cpos =
882 zero_start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
883 u32 last_cpos = ocfs2_clusters_for_bytes(inode->i_sb, zero_end);
884 unsigned int num_clusters = 0;
885 unsigned int ext_flags = 0;
886
887 while (zero_cpos < last_cpos) {
888 rc = ocfs2_get_clusters(inode, zero_cpos, &p_cpos,
889 &num_clusters, &ext_flags);
890 if (rc) {
891 mlog_errno(rc);
892 goto out;
893 }
894
895 if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) {
896 zero_clusters = num_clusters;
897 if (ext_flags & OCFS2_EXT_REFCOUNTED)
898 needs_cow = 1;
899 break;
900 }
901
902 zero_cpos += num_clusters;
903 }
904 if (!zero_clusters) {
905 *range_end = 0;
906 goto out;
907 }
908
909 while ((zero_cpos + zero_clusters) < last_cpos) {
910 rc = ocfs2_get_clusters(inode, zero_cpos + zero_clusters,
911 &p_cpos, &num_clusters,
912 &ext_flags);
913 if (rc) {
914 mlog_errno(rc);
915 goto out;
916 }
917
918 if (!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN))
919 break;
920 if (ext_flags & OCFS2_EXT_REFCOUNTED)
921 needs_cow = 1;
922 zero_clusters += num_clusters;
923 }
924 if ((zero_cpos + zero_clusters) > last_cpos)
925 zero_clusters = last_cpos - zero_cpos;
926
927 if (needs_cow) {
928 rc = ocfs2_refcount_cow(inode, di_bh, zero_cpos,
929 zero_clusters, UINT_MAX);
930 if (rc) {
931 mlog_errno(rc);
932 goto out;
933 }
934 }
935
936 *range_start = ocfs2_clusters_to_bytes(inode->i_sb, zero_cpos);
937 *range_end = ocfs2_clusters_to_bytes(inode->i_sb,
938 zero_cpos + zero_clusters);
939
940out:
941 return rc;
942}
943
944/*
945 * Zero one range returned from ocfs2_zero_extend_get_range(). The caller
946 * has made sure that the entire range needs zeroing.
947 */
948static int ocfs2_zero_extend_range(struct inode *inode, u64 range_start,
949 u64 range_end, struct buffer_head *di_bh)
950{
951 int rc = 0;
952 u64 next_pos;
953 u64 zero_pos = range_start;
954
955 trace_ocfs2_zero_extend_range(
956 (unsigned long long)OCFS2_I(inode)->ip_blkno,
957 (unsigned long long)range_start,
958 (unsigned long long)range_end);
959 BUG_ON(range_start >= range_end);
960
961 while (zero_pos < range_end) {
962 next_pos = (zero_pos & PAGE_MASK) + PAGE_SIZE;
963 if (next_pos > range_end)
964 next_pos = range_end;
965 rc = ocfs2_write_zero_page(inode, zero_pos, next_pos, di_bh);
966 if (rc < 0) {
967 mlog_errno(rc);
968 break;
969 }
970 zero_pos = next_pos;
971
972 /*
973 * Very large extends have the potential to lock up
974 * the cpu for extended periods of time.
975 */
976 cond_resched();
977 }
978
979 return rc;
980}
981
982int ocfs2_zero_extend(struct inode *inode, struct buffer_head *di_bh,
983 loff_t zero_to_size)
984{
985 int ret = 0;
986 u64 zero_start, range_start = 0, range_end = 0;
987 struct super_block *sb = inode->i_sb;
988
989 zero_start = ocfs2_align_bytes_to_blocks(sb, i_size_read(inode));
990 trace_ocfs2_zero_extend((unsigned long long)OCFS2_I(inode)->ip_blkno,
991 (unsigned long long)zero_start,
992 (unsigned long long)i_size_read(inode));
993 while (zero_start < zero_to_size) {
994 ret = ocfs2_zero_extend_get_range(inode, di_bh, zero_start,
995 zero_to_size,
996 &range_start,
997 &range_end);
998 if (ret) {
999 mlog_errno(ret);
1000 break;
1001 }
1002 if (!range_end)
1003 break;
1004 /* Trim the ends */
1005 if (range_start < zero_start)
1006 range_start = zero_start;
1007 if (range_end > zero_to_size)
1008 range_end = zero_to_size;
1009
1010 ret = ocfs2_zero_extend_range(inode, range_start,
1011 range_end, di_bh);
1012 if (ret) {
1013 mlog_errno(ret);
1014 break;
1015 }
1016 zero_start = range_end;
1017 }
1018
1019 return ret;
1020}
1021
1022int ocfs2_extend_no_holes(struct inode *inode, struct buffer_head *di_bh,
1023 u64 new_i_size, u64 zero_to)
1024{
1025 int ret;
1026 u32 clusters_to_add;
1027 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1028
1029 /*
1030 * Only quota files call this without a bh, and they can't be
1031 * refcounted.
1032 */
1033 BUG_ON(!di_bh && (oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
1034 BUG_ON(!di_bh && !(oi->ip_flags & OCFS2_INODE_SYSTEM_FILE));
1035
1036 clusters_to_add = ocfs2_clusters_for_bytes(inode->i_sb, new_i_size);
1037 if (clusters_to_add < oi->ip_clusters)
1038 clusters_to_add = 0;
1039 else
1040 clusters_to_add -= oi->ip_clusters;
1041
1042 if (clusters_to_add) {
1043 ret = __ocfs2_extend_allocation(inode, oi->ip_clusters,
1044 clusters_to_add, 0);
1045 if (ret) {
1046 mlog_errno(ret);
1047 goto out;
1048 }
1049 }
1050
1051 /*
1052 * Call this even if we don't add any clusters to the tree. We
1053 * still need to zero the area between the old i_size and the
1054 * new i_size.
1055 */
1056 ret = ocfs2_zero_extend(inode, di_bh, zero_to);
1057 if (ret < 0)
1058 mlog_errno(ret);
1059
1060out:
1061 return ret;
1062}
1063
1064static int ocfs2_extend_file(struct inode *inode,
1065 struct buffer_head *di_bh,
1066 u64 new_i_size)
1067{
1068 int ret = 0;
1069 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1070
1071 BUG_ON(!di_bh);
1072
1073 /* setattr sometimes calls us like this. */
1074 if (new_i_size == 0)
1075 goto out;
1076
1077 if (i_size_read(inode) == new_i_size)
1078 goto out;
1079 BUG_ON(new_i_size < i_size_read(inode));
1080
1081 /*
1082 * The alloc sem blocks people in read/write from reading our
1083 * allocation until we're done changing it. We depend on
1084 * i_mutex to block other extend/truncate calls while we're
1085 * here. We even have to hold it for sparse files because there
1086 * might be some tail zeroing.
1087 */
1088 down_write(&oi->ip_alloc_sem);
1089
1090 if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1091 /*
1092 * We can optimize small extends by keeping the inodes
1093 * inline data.
1094 */
1095 if (ocfs2_size_fits_inline_data(di_bh, new_i_size)) {
1096 up_write(&oi->ip_alloc_sem);
1097 goto out_update_size;
1098 }
1099
1100 ret = ocfs2_convert_inline_data_to_extents(inode, di_bh);
1101 if (ret) {
1102 up_write(&oi->ip_alloc_sem);
1103 mlog_errno(ret);
1104 goto out;
1105 }
1106 }
1107
1108 if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
1109 ret = ocfs2_zero_extend(inode, di_bh, new_i_size);
1110 else
1111 ret = ocfs2_extend_no_holes(inode, di_bh, new_i_size,
1112 new_i_size);
1113
1114 up_write(&oi->ip_alloc_sem);
1115
1116 if (ret < 0) {
1117 mlog_errno(ret);
1118 goto out;
1119 }
1120
1121out_update_size:
1122 ret = ocfs2_simple_size_update(inode, di_bh, new_i_size);
1123 if (ret < 0)
1124 mlog_errno(ret);
1125
1126out:
1127 return ret;
1128}
1129
1130int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
1131{
1132 int status = 0, size_change;
1133 int inode_locked = 0;
1134 struct inode *inode = d_inode(dentry);
1135 struct super_block *sb = inode->i_sb;
1136 struct ocfs2_super *osb = OCFS2_SB(sb);
1137 struct buffer_head *bh = NULL;
1138 handle_t *handle = NULL;
1139 struct dquot *transfer_to[MAXQUOTAS] = { };
1140 int qtype;
1141
1142 trace_ocfs2_setattr(inode, dentry,
1143 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1144 dentry->d_name.len, dentry->d_name.name,
1145 attr->ia_valid, attr->ia_mode,
1146 from_kuid(&init_user_ns, attr->ia_uid),
1147 from_kgid(&init_user_ns, attr->ia_gid));
1148
1149 /* ensuring we don't even attempt to truncate a symlink */
1150 if (S_ISLNK(inode->i_mode))
1151 attr->ia_valid &= ~ATTR_SIZE;
1152
1153#define OCFS2_VALID_ATTRS (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME | ATTR_SIZE \
1154 | ATTR_GID | ATTR_UID | ATTR_MODE)
1155 if (!(attr->ia_valid & OCFS2_VALID_ATTRS))
1156 return 0;
1157
1158 status = inode_change_ok(inode, attr);
1159 if (status)
1160 return status;
1161
1162 if (is_quota_modification(inode, attr)) {
1163 status = dquot_initialize(inode);
1164 if (status)
1165 return status;
1166 }
1167 size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE;
1168 if (size_change) {
1169 status = ocfs2_rw_lock(inode, 1);
1170 if (status < 0) {
1171 mlog_errno(status);
1172 goto bail;
1173 }
1174 }
1175
1176 status = ocfs2_inode_lock(inode, &bh, 1);
1177 if (status < 0) {
1178 if (status != -ENOENT)
1179 mlog_errno(status);
1180 goto bail_unlock_rw;
1181 }
1182 inode_locked = 1;
1183
1184 if (size_change) {
1185 status = inode_newsize_ok(inode, attr->ia_size);
1186 if (status)
1187 goto bail_unlock;
1188
1189 inode_dio_wait(inode);
1190
1191 if (i_size_read(inode) >= attr->ia_size) {
1192 if (ocfs2_should_order_data(inode)) {
1193 status = ocfs2_begin_ordered_truncate(inode,
1194 attr->ia_size);
1195 if (status)
1196 goto bail_unlock;
1197 }
1198 status = ocfs2_truncate_file(inode, bh, attr->ia_size);
1199 } else
1200 status = ocfs2_extend_file(inode, bh, attr->ia_size);
1201 if (status < 0) {
1202 if (status != -ENOSPC)
1203 mlog_errno(status);
1204 status = -ENOSPC;
1205 goto bail_unlock;
1206 }
1207 }
1208
1209 if ((attr->ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
1210 (attr->ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
1211 /*
1212 * Gather pointers to quota structures so that allocation /
1213 * freeing of quota structures happens here and not inside
1214 * dquot_transfer() where we have problems with lock ordering
1215 */
1216 if (attr->ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)
1217 && OCFS2_HAS_RO_COMPAT_FEATURE(sb,
1218 OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) {
1219 transfer_to[USRQUOTA] = dqget(sb, make_kqid_uid(attr->ia_uid));
1220 if (IS_ERR(transfer_to[USRQUOTA])) {
1221 status = PTR_ERR(transfer_to[USRQUOTA]);
1222 goto bail_unlock;
1223 }
1224 }
1225 if (attr->ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid)
1226 && OCFS2_HAS_RO_COMPAT_FEATURE(sb,
1227 OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)) {
1228 transfer_to[GRPQUOTA] = dqget(sb, make_kqid_gid(attr->ia_gid));
1229 if (IS_ERR(transfer_to[GRPQUOTA])) {
1230 status = PTR_ERR(transfer_to[GRPQUOTA]);
1231 goto bail_unlock;
1232 }
1233 }
1234 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS +
1235 2 * ocfs2_quota_trans_credits(sb));
1236 if (IS_ERR(handle)) {
1237 status = PTR_ERR(handle);
1238 mlog_errno(status);
1239 goto bail_unlock;
1240 }
1241 status = __dquot_transfer(inode, transfer_to);
1242 if (status < 0)
1243 goto bail_commit;
1244 } else {
1245 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1246 if (IS_ERR(handle)) {
1247 status = PTR_ERR(handle);
1248 mlog_errno(status);
1249 goto bail_unlock;
1250 }
1251 }
1252
1253 setattr_copy(inode, attr);
1254 mark_inode_dirty(inode);
1255
1256 status = ocfs2_mark_inode_dirty(handle, inode, bh);
1257 if (status < 0)
1258 mlog_errno(status);
1259
1260bail_commit:
1261 ocfs2_commit_trans(osb, handle);
1262bail_unlock:
1263 if (status) {
1264 ocfs2_inode_unlock(inode, 1);
1265 inode_locked = 0;
1266 }
1267bail_unlock_rw:
1268 if (size_change)
1269 ocfs2_rw_unlock(inode, 1);
1270bail:
1271
1272 /* Release quota pointers in case we acquired them */
1273 for (qtype = 0; qtype < OCFS2_MAXQUOTAS; qtype++)
1274 dqput(transfer_to[qtype]);
1275
1276 if (!status && attr->ia_valid & ATTR_MODE) {
1277 status = ocfs2_acl_chmod(inode, bh);
1278 if (status < 0)
1279 mlog_errno(status);
1280 }
1281 if (inode_locked)
1282 ocfs2_inode_unlock(inode, 1);
1283
1284 brelse(bh);
1285 return status;
1286}
1287
1288int ocfs2_getattr(struct vfsmount *mnt,
1289 struct dentry *dentry,
1290 struct kstat *stat)
1291{
1292 struct inode *inode = d_inode(dentry);
1293 struct super_block *sb = d_inode(dentry)->i_sb;
1294 struct ocfs2_super *osb = sb->s_fs_info;
1295 int err;
1296
1297 err = ocfs2_inode_revalidate(dentry);
1298 if (err) {
1299 if (err != -ENOENT)
1300 mlog_errno(err);
1301 goto bail;
1302 }
1303
1304 generic_fillattr(inode, stat);
1305 /*
1306 * If there is inline data in the inode, the inode will normally not
1307 * have data blocks allocated (it may have an external xattr block).
1308 * Report at least one sector for such files, so tools like tar, rsync,
1309 * others don't incorrectly think the file is completely sparse.
1310 */
1311 if (unlikely(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL))
1312 stat->blocks += (stat->size + 511)>>9;
1313
1314 /* We set the blksize from the cluster size for performance */
1315 stat->blksize = osb->s_clustersize;
1316
1317bail:
1318 return err;
1319}
1320
1321int ocfs2_permission(struct inode *inode, int mask)
1322{
1323 int ret;
1324
1325 if (mask & MAY_NOT_BLOCK)
1326 return -ECHILD;
1327
1328 ret = ocfs2_inode_lock(inode, NULL, 0);
1329 if (ret) {
1330 if (ret != -ENOENT)
1331 mlog_errno(ret);
1332 goto out;
1333 }
1334
1335 ret = generic_permission(inode, mask);
1336
1337 ocfs2_inode_unlock(inode, 0);
1338out:
1339 return ret;
1340}
1341
1342static int __ocfs2_write_remove_suid(struct inode *inode,
1343 struct buffer_head *bh)
1344{
1345 int ret;
1346 handle_t *handle;
1347 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1348 struct ocfs2_dinode *di;
1349
1350 trace_ocfs2_write_remove_suid(
1351 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1352 inode->i_mode);
1353
1354 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1355 if (IS_ERR(handle)) {
1356 ret = PTR_ERR(handle);
1357 mlog_errno(ret);
1358 goto out;
1359 }
1360
1361 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
1362 OCFS2_JOURNAL_ACCESS_WRITE);
1363 if (ret < 0) {
1364 mlog_errno(ret);
1365 goto out_trans;
1366 }
1367
1368 inode->i_mode &= ~S_ISUID;
1369 if ((inode->i_mode & S_ISGID) && (inode->i_mode & S_IXGRP))
1370 inode->i_mode &= ~S_ISGID;
1371
1372 di = (struct ocfs2_dinode *) bh->b_data;
1373 di->i_mode = cpu_to_le16(inode->i_mode);
1374 ocfs2_update_inode_fsync_trans(handle, inode, 0);
1375
1376 ocfs2_journal_dirty(handle, bh);
1377
1378out_trans:
1379 ocfs2_commit_trans(osb, handle);
1380out:
1381 return ret;
1382}
1383
1384static int ocfs2_write_remove_suid(struct inode *inode)
1385{
1386 int ret;
1387 struct buffer_head *bh = NULL;
1388
1389 ret = ocfs2_read_inode_block(inode, &bh);
1390 if (ret < 0) {
1391 mlog_errno(ret);
1392 goto out;
1393 }
1394
1395 ret = __ocfs2_write_remove_suid(inode, bh);
1396out:
1397 brelse(bh);
1398 return ret;
1399}
1400
1401/*
1402 * Allocate enough extents to cover the region starting at byte offset
1403 * start for len bytes. Existing extents are skipped, any extents
1404 * added are marked as "unwritten".
1405 */
1406static int ocfs2_allocate_unwritten_extents(struct inode *inode,
1407 u64 start, u64 len)
1408{
1409 int ret;
1410 u32 cpos, phys_cpos, clusters, alloc_size;
1411 u64 end = start + len;
1412 struct buffer_head *di_bh = NULL;
1413
1414 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1415 ret = ocfs2_read_inode_block(inode, &di_bh);
1416 if (ret) {
1417 mlog_errno(ret);
1418 goto out;
1419 }
1420
1421 /*
1422 * Nothing to do if the requested reservation range
1423 * fits within the inode.
1424 */
1425 if (ocfs2_size_fits_inline_data(di_bh, end))
1426 goto out;
1427
1428 ret = ocfs2_convert_inline_data_to_extents(inode, di_bh);
1429 if (ret) {
1430 mlog_errno(ret);
1431 goto out;
1432 }
1433 }
1434
1435 /*
1436 * We consider both start and len to be inclusive.
1437 */
1438 cpos = start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
1439 clusters = ocfs2_clusters_for_bytes(inode->i_sb, start + len);
1440 clusters -= cpos;
1441
1442 while (clusters) {
1443 ret = ocfs2_get_clusters(inode, cpos, &phys_cpos,
1444 &alloc_size, NULL);
1445 if (ret) {
1446 mlog_errno(ret);
1447 goto out;
1448 }
1449
1450 /*
1451 * Hole or existing extent len can be arbitrary, so
1452 * cap it to our own allocation request.
1453 */
1454 if (alloc_size > clusters)
1455 alloc_size = clusters;
1456
1457 if (phys_cpos) {
1458 /*
1459 * We already have an allocation at this
1460 * region so we can safely skip it.
1461 */
1462 goto next;
1463 }
1464
1465 ret = __ocfs2_extend_allocation(inode, cpos, alloc_size, 1);
1466 if (ret) {
1467 if (ret != -ENOSPC)
1468 mlog_errno(ret);
1469 goto out;
1470 }
1471
1472next:
1473 cpos += alloc_size;
1474 clusters -= alloc_size;
1475 }
1476
1477 ret = 0;
1478out:
1479
1480 brelse(di_bh);
1481 return ret;
1482}
1483
1484/*
1485 * Truncate a byte range, avoiding pages within partial clusters. This
1486 * preserves those pages for the zeroing code to write to.
1487 */
1488static void ocfs2_truncate_cluster_pages(struct inode *inode, u64 byte_start,
1489 u64 byte_len)
1490{
1491 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1492 loff_t start, end;
1493 struct address_space *mapping = inode->i_mapping;
1494
1495 start = (loff_t)ocfs2_align_bytes_to_clusters(inode->i_sb, byte_start);
1496 end = byte_start + byte_len;
1497 end = end & ~(osb->s_clustersize - 1);
1498
1499 if (start < end) {
1500 unmap_mapping_range(mapping, start, end - start, 0);
1501 truncate_inode_pages_range(mapping, start, end - 1);
1502 }
1503}
1504
1505static int ocfs2_zero_partial_clusters(struct inode *inode,
1506 u64 start, u64 len)
1507{
1508 int ret = 0;
1509 u64 tmpend, end = start + len;
1510 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1511 unsigned int csize = osb->s_clustersize;
1512 handle_t *handle;
1513
1514 /*
1515 * The "start" and "end" values are NOT necessarily part of
1516 * the range whose allocation is being deleted. Rather, this
1517 * is what the user passed in with the request. We must zero
1518 * partial clusters here. There's no need to worry about
1519 * physical allocation - the zeroing code knows to skip holes.
1520 */
1521 trace_ocfs2_zero_partial_clusters(
1522 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1523 (unsigned long long)start, (unsigned long long)end);
1524
1525 /*
1526 * If both edges are on a cluster boundary then there's no
1527 * zeroing required as the region is part of the allocation to
1528 * be truncated.
1529 */
1530 if ((start & (csize - 1)) == 0 && (end & (csize - 1)) == 0)
1531 goto out;
1532
1533 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1534 if (IS_ERR(handle)) {
1535 ret = PTR_ERR(handle);
1536 mlog_errno(ret);
1537 goto out;
1538 }
1539
1540 /*
1541 * We want to get the byte offset of the end of the 1st cluster.
1542 */
1543 tmpend = (u64)osb->s_clustersize + (start & ~(osb->s_clustersize - 1));
1544 if (tmpend > end)
1545 tmpend = end;
1546
1547 trace_ocfs2_zero_partial_clusters_range1((unsigned long long)start,
1548 (unsigned long long)tmpend);
1549
1550 ret = ocfs2_zero_range_for_truncate(inode, handle, start, tmpend);
1551 if (ret)
1552 mlog_errno(ret);
1553
1554 if (tmpend < end) {
1555 /*
1556 * This may make start and end equal, but the zeroing
1557 * code will skip any work in that case so there's no
1558 * need to catch it up here.
1559 */
1560 start = end & ~(osb->s_clustersize - 1);
1561
1562 trace_ocfs2_zero_partial_clusters_range2(
1563 (unsigned long long)start, (unsigned long long)end);
1564
1565 ret = ocfs2_zero_range_for_truncate(inode, handle, start, end);
1566 if (ret)
1567 mlog_errno(ret);
1568 }
1569 ocfs2_update_inode_fsync_trans(handle, inode, 1);
1570
1571 ocfs2_commit_trans(osb, handle);
1572out:
1573 return ret;
1574}
1575
1576static int ocfs2_find_rec(struct ocfs2_extent_list *el, u32 pos)
1577{
1578 int i;
1579 struct ocfs2_extent_rec *rec = NULL;
1580
1581 for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) {
1582
1583 rec = &el->l_recs[i];
1584
1585 if (le32_to_cpu(rec->e_cpos) < pos)
1586 break;
1587 }
1588
1589 return i;
1590}
1591
1592/*
1593 * Helper to calculate the punching pos and length in one run, we handle the
1594 * following three cases in order:
1595 *
1596 * - remove the entire record
1597 * - remove a partial record
1598 * - no record needs to be removed (hole-punching completed)
1599*/
1600static void ocfs2_calc_trunc_pos(struct inode *inode,
1601 struct ocfs2_extent_list *el,
1602 struct ocfs2_extent_rec *rec,
1603 u32 trunc_start, u32 *trunc_cpos,
1604 u32 *trunc_len, u32 *trunc_end,
1605 u64 *blkno, int *done)
1606{
1607 int ret = 0;
1608 u32 coff, range;
1609
1610 range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec);
1611
1612 if (le32_to_cpu(rec->e_cpos) >= trunc_start) {
1613 /*
1614 * remove an entire extent record.
1615 */
1616 *trunc_cpos = le32_to_cpu(rec->e_cpos);
1617 /*
1618 * Skip holes if any.
1619 */
1620 if (range < *trunc_end)
1621 *trunc_end = range;
1622 *trunc_len = *trunc_end - le32_to_cpu(rec->e_cpos);
1623 *blkno = le64_to_cpu(rec->e_blkno);
1624 *trunc_end = le32_to_cpu(rec->e_cpos);
1625 } else if (range > trunc_start) {
1626 /*
1627 * remove a partial extent record, which means we're
1628 * removing the last extent record.
1629 */
1630 *trunc_cpos = trunc_start;
1631 /*
1632 * skip hole if any.
1633 */
1634 if (range < *trunc_end)
1635 *trunc_end = range;
1636 *trunc_len = *trunc_end - trunc_start;
1637 coff = trunc_start - le32_to_cpu(rec->e_cpos);
1638 *blkno = le64_to_cpu(rec->e_blkno) +
1639 ocfs2_clusters_to_blocks(inode->i_sb, coff);
1640 *trunc_end = trunc_start;
1641 } else {
1642 /*
1643 * It may have two following possibilities:
1644 *
1645 * - last record has been removed
1646 * - trunc_start was within a hole
1647 *
1648 * both two cases mean the completion of hole punching.
1649 */
1650 ret = 1;
1651 }
1652
1653 *done = ret;
1654}
1655
1656static int ocfs2_remove_inode_range(struct inode *inode,
1657 struct buffer_head *di_bh, u64 byte_start,
1658 u64 byte_len)
1659{
1660 int ret = 0, flags = 0, done = 0, i;
1661 u32 trunc_start, trunc_len, trunc_end, trunc_cpos, phys_cpos;
1662 u32 cluster_in_el;
1663 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1664 struct ocfs2_cached_dealloc_ctxt dealloc;
1665 struct address_space *mapping = inode->i_mapping;
1666 struct ocfs2_extent_tree et;
1667 struct ocfs2_path *path = NULL;
1668 struct ocfs2_extent_list *el = NULL;
1669 struct ocfs2_extent_rec *rec = NULL;
1670 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
1671 u64 blkno, refcount_loc = le64_to_cpu(di->i_refcount_loc);
1672
1673 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh);
1674 ocfs2_init_dealloc_ctxt(&dealloc);
1675
1676 trace_ocfs2_remove_inode_range(
1677 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1678 (unsigned long long)byte_start,
1679 (unsigned long long)byte_len);
1680
1681 if (byte_len == 0)
1682 return 0;
1683
1684 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1685 ret = ocfs2_truncate_inline(inode, di_bh, byte_start,
1686 byte_start + byte_len, 0);
1687 if (ret) {
1688 mlog_errno(ret);
1689 goto out;
1690 }
1691 /*
1692 * There's no need to get fancy with the page cache
1693 * truncate of an inline-data inode. We're talking
1694 * about less than a page here, which will be cached
1695 * in the dinode buffer anyway.
1696 */
1697 unmap_mapping_range(mapping, 0, 0, 0);
1698 truncate_inode_pages(mapping, 0);
1699 goto out;
1700 }
1701
1702 /*
1703 * For reflinks, we may need to CoW 2 clusters which might be
1704 * partially zero'd later, if hole's start and end offset were
1705 * within one cluster(means is not exactly aligned to clustersize).
1706 */
1707
1708 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL) {
1709
1710 ret = ocfs2_cow_file_pos(inode, di_bh, byte_start);
1711 if (ret) {
1712 mlog_errno(ret);
1713 goto out;
1714 }
1715
1716 ret = ocfs2_cow_file_pos(inode, di_bh, byte_start + byte_len);
1717 if (ret) {
1718 mlog_errno(ret);
1719 goto out;
1720 }
1721 }
1722
1723 trunc_start = ocfs2_clusters_for_bytes(osb->sb, byte_start);
1724 trunc_end = (byte_start + byte_len) >> osb->s_clustersize_bits;
1725 cluster_in_el = trunc_end;
1726
1727 ret = ocfs2_zero_partial_clusters(inode, byte_start, byte_len);
1728 if (ret) {
1729 mlog_errno(ret);
1730 goto out;
1731 }
1732
1733 path = ocfs2_new_path_from_et(&et);
1734 if (!path) {
1735 ret = -ENOMEM;
1736 mlog_errno(ret);
1737 goto out;
1738 }
1739
1740 while (trunc_end > trunc_start) {
1741
1742 ret = ocfs2_find_path(INODE_CACHE(inode), path,
1743 cluster_in_el);
1744 if (ret) {
1745 mlog_errno(ret);
1746 goto out;
1747 }
1748
1749 el = path_leaf_el(path);
1750
1751 i = ocfs2_find_rec(el, trunc_end);
1752 /*
1753 * Need to go to previous extent block.
1754 */
1755 if (i < 0) {
1756 if (path->p_tree_depth == 0)
1757 break;
1758
1759 ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb,
1760 path,
1761 &cluster_in_el);
1762 if (ret) {
1763 mlog_errno(ret);
1764 goto out;
1765 }
1766
1767 /*
1768 * We've reached the leftmost extent block,
1769 * it's safe to leave.
1770 */
1771 if (cluster_in_el == 0)
1772 break;
1773
1774 /*
1775 * The 'pos' searched for previous extent block is
1776 * always one cluster less than actual trunc_end.
1777 */
1778 trunc_end = cluster_in_el + 1;
1779
1780 ocfs2_reinit_path(path, 1);
1781
1782 continue;
1783
1784 } else
1785 rec = &el->l_recs[i];
1786
1787 ocfs2_calc_trunc_pos(inode, el, rec, trunc_start, &trunc_cpos,
1788 &trunc_len, &trunc_end, &blkno, &done);
1789 if (done)
1790 break;
1791
1792 flags = rec->e_flags;
1793 phys_cpos = ocfs2_blocks_to_clusters(inode->i_sb, blkno);
1794
1795 ret = ocfs2_remove_btree_range(inode, &et, trunc_cpos,
1796 phys_cpos, trunc_len, flags,
1797 &dealloc, refcount_loc, false);
1798 if (ret < 0) {
1799 mlog_errno(ret);
1800 goto out;
1801 }
1802
1803 cluster_in_el = trunc_end;
1804
1805 ocfs2_reinit_path(path, 1);
1806 }
1807
1808 ocfs2_truncate_cluster_pages(inode, byte_start, byte_len);
1809
1810out:
1811 ocfs2_free_path(path);
1812 ocfs2_schedule_truncate_log_flush(osb, 1);
1813 ocfs2_run_deallocs(osb, &dealloc);
1814
1815 return ret;
1816}
1817
1818/*
1819 * Parts of this function taken from xfs_change_file_space()
1820 */
1821static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
1822 loff_t f_pos, unsigned int cmd,
1823 struct ocfs2_space_resv *sr,
1824 int change_size)
1825{
1826 int ret;
1827 s64 llen;
1828 loff_t size;
1829 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1830 struct buffer_head *di_bh = NULL;
1831 handle_t *handle;
1832 unsigned long long max_off = inode->i_sb->s_maxbytes;
1833
1834 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
1835 return -EROFS;
1836
1837 inode_lock(inode);
1838
1839 /*
1840 * This prevents concurrent writes on other nodes
1841 */
1842 ret = ocfs2_rw_lock(inode, 1);
1843 if (ret) {
1844 mlog_errno(ret);
1845 goto out;
1846 }
1847
1848 ret = ocfs2_inode_lock(inode, &di_bh, 1);
1849 if (ret) {
1850 mlog_errno(ret);
1851 goto out_rw_unlock;
1852 }
1853
1854 if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) {
1855 ret = -EPERM;
1856 goto out_inode_unlock;
1857 }
1858
1859 switch (sr->l_whence) {
1860 case 0: /*SEEK_SET*/
1861 break;
1862 case 1: /*SEEK_CUR*/
1863 sr->l_start += f_pos;
1864 break;
1865 case 2: /*SEEK_END*/
1866 sr->l_start += i_size_read(inode);
1867 break;
1868 default:
1869 ret = -EINVAL;
1870 goto out_inode_unlock;
1871 }
1872 sr->l_whence = 0;
1873
1874 llen = sr->l_len > 0 ? sr->l_len - 1 : sr->l_len;
1875
1876 if (sr->l_start < 0
1877 || sr->l_start > max_off
1878 || (sr->l_start + llen) < 0
1879 || (sr->l_start + llen) > max_off) {
1880 ret = -EINVAL;
1881 goto out_inode_unlock;
1882 }
1883 size = sr->l_start + sr->l_len;
1884
1885 if (cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64 ||
1886 cmd == OCFS2_IOC_UNRESVSP || cmd == OCFS2_IOC_UNRESVSP64) {
1887 if (sr->l_len <= 0) {
1888 ret = -EINVAL;
1889 goto out_inode_unlock;
1890 }
1891 }
1892
1893 if (file && should_remove_suid(file->f_path.dentry)) {
1894 ret = __ocfs2_write_remove_suid(inode, di_bh);
1895 if (ret) {
1896 mlog_errno(ret);
1897 goto out_inode_unlock;
1898 }
1899 }
1900
1901 down_write(&OCFS2_I(inode)->ip_alloc_sem);
1902 switch (cmd) {
1903 case OCFS2_IOC_RESVSP:
1904 case OCFS2_IOC_RESVSP64:
1905 /*
1906 * This takes unsigned offsets, but the signed ones we
1907 * pass have been checked against overflow above.
1908 */
1909 ret = ocfs2_allocate_unwritten_extents(inode, sr->l_start,
1910 sr->l_len);
1911 break;
1912 case OCFS2_IOC_UNRESVSP:
1913 case OCFS2_IOC_UNRESVSP64:
1914 ret = ocfs2_remove_inode_range(inode, di_bh, sr->l_start,
1915 sr->l_len);
1916 break;
1917 default:
1918 ret = -EINVAL;
1919 }
1920 up_write(&OCFS2_I(inode)->ip_alloc_sem);
1921 if (ret) {
1922 mlog_errno(ret);
1923 goto out_inode_unlock;
1924 }
1925
1926 /*
1927 * We update c/mtime for these changes
1928 */
1929 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1930 if (IS_ERR(handle)) {
1931 ret = PTR_ERR(handle);
1932 mlog_errno(ret);
1933 goto out_inode_unlock;
1934 }
1935
1936 if (change_size && i_size_read(inode) < size)
1937 i_size_write(inode, size);
1938
1939 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
1940 ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
1941 if (ret < 0)
1942 mlog_errno(ret);
1943
1944 if (file && (file->f_flags & O_SYNC))
1945 handle->h_sync = 1;
1946
1947 ocfs2_commit_trans(osb, handle);
1948
1949out_inode_unlock:
1950 brelse(di_bh);
1951 ocfs2_inode_unlock(inode, 1);
1952out_rw_unlock:
1953 ocfs2_rw_unlock(inode, 1);
1954
1955out:
1956 inode_unlock(inode);
1957 return ret;
1958}
1959
1960int ocfs2_change_file_space(struct file *file, unsigned int cmd,
1961 struct ocfs2_space_resv *sr)
1962{
1963 struct inode *inode = file_inode(file);
1964 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1965 int ret;
1966
1967 if ((cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64) &&
1968 !ocfs2_writes_unwritten_extents(osb))
1969 return -ENOTTY;
1970 else if ((cmd == OCFS2_IOC_UNRESVSP || cmd == OCFS2_IOC_UNRESVSP64) &&
1971 !ocfs2_sparse_alloc(osb))
1972 return -ENOTTY;
1973
1974 if (!S_ISREG(inode->i_mode))
1975 return -EINVAL;
1976
1977 if (!(file->f_mode & FMODE_WRITE))
1978 return -EBADF;
1979
1980 ret = mnt_want_write_file(file);
1981 if (ret)
1982 return ret;
1983 ret = __ocfs2_change_file_space(file, inode, file->f_pos, cmd, sr, 0);
1984 mnt_drop_write_file(file);
1985 return ret;
1986}
1987
1988static long ocfs2_fallocate(struct file *file, int mode, loff_t offset,
1989 loff_t len)
1990{
1991 struct inode *inode = file_inode(file);
1992 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1993 struct ocfs2_space_resv sr;
1994 int change_size = 1;
1995 int cmd = OCFS2_IOC_RESVSP64;
1996
1997 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
1998 return -EOPNOTSUPP;
1999 if (!ocfs2_writes_unwritten_extents(osb))
2000 return -EOPNOTSUPP;
2001
2002 if (mode & FALLOC_FL_KEEP_SIZE)
2003 change_size = 0;
2004
2005 if (mode & FALLOC_FL_PUNCH_HOLE)
2006 cmd = OCFS2_IOC_UNRESVSP64;
2007
2008 sr.l_whence = 0;
2009 sr.l_start = (s64)offset;
2010 sr.l_len = (s64)len;
2011
2012 return __ocfs2_change_file_space(NULL, inode, offset, cmd, &sr,
2013 change_size);
2014}
2015
2016int ocfs2_check_range_for_refcount(struct inode *inode, loff_t pos,
2017 size_t count)
2018{
2019 int ret = 0;
2020 unsigned int extent_flags;
2021 u32 cpos, clusters, extent_len, phys_cpos;
2022 struct super_block *sb = inode->i_sb;
2023
2024 if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)) ||
2025 !(OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL) ||
2026 OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
2027 return 0;
2028
2029 cpos = pos >> OCFS2_SB(sb)->s_clustersize_bits;
2030 clusters = ocfs2_clusters_for_bytes(sb, pos + count) - cpos;
2031
2032 while (clusters) {
2033 ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &extent_len,
2034 &extent_flags);
2035 if (ret < 0) {
2036 mlog_errno(ret);
2037 goto out;
2038 }
2039
2040 if (phys_cpos && (extent_flags & OCFS2_EXT_REFCOUNTED)) {
2041 ret = 1;
2042 break;
2043 }
2044
2045 if (extent_len > clusters)
2046 extent_len = clusters;
2047
2048 clusters -= extent_len;
2049 cpos += extent_len;
2050 }
2051out:
2052 return ret;
2053}
2054
2055static int ocfs2_is_io_unaligned(struct inode *inode, size_t count, loff_t pos)
2056{
2057 int blockmask = inode->i_sb->s_blocksize - 1;
2058 loff_t final_size = pos + count;
2059
2060 if ((pos & blockmask) || (final_size & blockmask))
2061 return 1;
2062 return 0;
2063}
2064
2065static int ocfs2_prepare_inode_for_refcount(struct inode *inode,
2066 struct file *file,
2067 loff_t pos, size_t count,
2068 int *meta_level)
2069{
2070 int ret;
2071 struct buffer_head *di_bh = NULL;
2072 u32 cpos = pos >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
2073 u32 clusters =
2074 ocfs2_clusters_for_bytes(inode->i_sb, pos + count) - cpos;
2075
2076 ret = ocfs2_inode_lock(inode, &di_bh, 1);
2077 if (ret) {
2078 mlog_errno(ret);
2079 goto out;
2080 }
2081
2082 *meta_level = 1;
2083
2084 ret = ocfs2_refcount_cow(inode, di_bh, cpos, clusters, UINT_MAX);
2085 if (ret)
2086 mlog_errno(ret);
2087out:
2088 brelse(di_bh);
2089 return ret;
2090}
2091
2092static int ocfs2_prepare_inode_for_write(struct file *file,
2093 loff_t pos,
2094 size_t count)
2095{
2096 int ret = 0, meta_level = 0;
2097 struct dentry *dentry = file->f_path.dentry;
2098 struct inode *inode = d_inode(dentry);
2099 loff_t end;
2100
2101 /*
2102 * We start with a read level meta lock and only jump to an ex
2103 * if we need to make modifications here.
2104 */
2105 for(;;) {
2106 ret = ocfs2_inode_lock(inode, NULL, meta_level);
2107 if (ret < 0) {
2108 meta_level = -1;
2109 mlog_errno(ret);
2110 goto out;
2111 }
2112
2113 /* Clear suid / sgid if necessary. We do this here
2114 * instead of later in the write path because
2115 * remove_suid() calls ->setattr without any hint that
2116 * we may have already done our cluster locking. Since
2117 * ocfs2_setattr() *must* take cluster locks to
2118 * proceed, this will lead us to recursively lock the
2119 * inode. There's also the dinode i_size state which
2120 * can be lost via setattr during extending writes (we
2121 * set inode->i_size at the end of a write. */
2122 if (should_remove_suid(dentry)) {
2123 if (meta_level == 0) {
2124 ocfs2_inode_unlock(inode, meta_level);
2125 meta_level = 1;
2126 continue;
2127 }
2128
2129 ret = ocfs2_write_remove_suid(inode);
2130 if (ret < 0) {
2131 mlog_errno(ret);
2132 goto out_unlock;
2133 }
2134 }
2135
2136 end = pos + count;
2137
2138 ret = ocfs2_check_range_for_refcount(inode, pos, count);
2139 if (ret == 1) {
2140 ocfs2_inode_unlock(inode, meta_level);
2141 meta_level = -1;
2142
2143 ret = ocfs2_prepare_inode_for_refcount(inode,
2144 file,
2145 pos,
2146 count,
2147 &meta_level);
2148 }
2149
2150 if (ret < 0) {
2151 mlog_errno(ret);
2152 goto out_unlock;
2153 }
2154
2155 break;
2156 }
2157
2158out_unlock:
2159 trace_ocfs2_prepare_inode_for_write(OCFS2_I(inode)->ip_blkno,
2160 pos, count);
2161
2162 if (meta_level >= 0)
2163 ocfs2_inode_unlock(inode, meta_level);
2164
2165out:
2166 return ret;
2167}
2168
2169static ssize_t ocfs2_file_write_iter(struct kiocb *iocb,
2170 struct iov_iter *from)
2171{
2172 int direct_io, rw_level;
2173 ssize_t written = 0;
2174 ssize_t ret;
2175 size_t count = iov_iter_count(from);
2176 struct file *file = iocb->ki_filp;
2177 struct inode *inode = file_inode(file);
2178 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2179 int full_coherency = !(osb->s_mount_opt &
2180 OCFS2_MOUNT_COHERENCY_BUFFERED);
2181 void *saved_ki_complete = NULL;
2182 int append_write = ((iocb->ki_pos + count) >=
2183 i_size_read(inode) ? 1 : 0);
2184
2185 trace_ocfs2_file_aio_write(inode, file, file->f_path.dentry,
2186 (unsigned long long)OCFS2_I(inode)->ip_blkno,
2187 file->f_path.dentry->d_name.len,
2188 file->f_path.dentry->d_name.name,
2189 (unsigned int)from->nr_segs); /* GRRRRR */
2190
2191 if (count == 0)
2192 return 0;
2193
2194 direct_io = iocb->ki_flags & IOCB_DIRECT ? 1 : 0;
2195
2196 inode_lock(inode);
2197
2198 /*
2199 * Concurrent O_DIRECT writes are allowed with
2200 * mount_option "coherency=buffered".
2201 * For append write, we must take rw EX.
2202 */
2203 rw_level = (!direct_io || full_coherency || append_write);
2204
2205 ret = ocfs2_rw_lock(inode, rw_level);
2206 if (ret < 0) {
2207 mlog_errno(ret);
2208 goto out_mutex;
2209 }
2210
2211 /*
2212 * O_DIRECT writes with "coherency=full" need to take EX cluster
2213 * inode_lock to guarantee coherency.
2214 */
2215 if (direct_io && full_coherency) {
2216 /*
2217 * We need to take and drop the inode lock to force
2218 * other nodes to drop their caches. Buffered I/O
2219 * already does this in write_begin().
2220 */
2221 ret = ocfs2_inode_lock(inode, NULL, 1);
2222 if (ret < 0) {
2223 mlog_errno(ret);
2224 goto out;
2225 }
2226
2227 ocfs2_inode_unlock(inode, 1);
2228 }
2229
2230 ret = generic_write_checks(iocb, from);
2231 if (ret <= 0) {
2232 if (ret)
2233 mlog_errno(ret);
2234 goto out;
2235 }
2236 count = ret;
2237
2238 ret = ocfs2_prepare_inode_for_write(file, iocb->ki_pos, count);
2239 if (ret < 0) {
2240 mlog_errno(ret);
2241 goto out;
2242 }
2243
2244 if (direct_io && !is_sync_kiocb(iocb) &&
2245 ocfs2_is_io_unaligned(inode, count, iocb->ki_pos)) {
2246 /*
2247 * Make it a sync io if it's an unaligned aio.
2248 */
2249 saved_ki_complete = xchg(&iocb->ki_complete, NULL);
2250 }
2251
2252 /* communicate with ocfs2_dio_end_io */
2253 ocfs2_iocb_set_rw_locked(iocb, rw_level);
2254
2255 written = __generic_file_write_iter(iocb, from);
2256 /* buffered aio wouldn't have proper lock coverage today */
2257 BUG_ON(written == -EIOCBQUEUED && !(iocb->ki_flags & IOCB_DIRECT));
2258
2259 /*
2260 * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
2261 * function pointer which is called when o_direct io completes so that
2262 * it can unlock our rw lock.
2263 * Unfortunately there are error cases which call end_io and others
2264 * that don't. so we don't have to unlock the rw_lock if either an
2265 * async dio is going to do it in the future or an end_io after an
2266 * error has already done it.
2267 */
2268 if ((written == -EIOCBQUEUED) || (!ocfs2_iocb_is_rw_locked(iocb))) {
2269 rw_level = -1;
2270 }
2271
2272 if (unlikely(written <= 0))
2273 goto out;
2274
2275 if (((file->f_flags & O_DSYNC) && !direct_io) ||
2276 IS_SYNC(inode)) {
2277 ret = filemap_fdatawrite_range(file->f_mapping,
2278 iocb->ki_pos - written,
2279 iocb->ki_pos - 1);
2280 if (ret < 0)
2281 written = ret;
2282
2283 if (!ret) {
2284 ret = jbd2_journal_force_commit(osb->journal->j_journal);
2285 if (ret < 0)
2286 written = ret;
2287 }
2288
2289 if (!ret)
2290 ret = filemap_fdatawait_range(file->f_mapping,
2291 iocb->ki_pos - written,
2292 iocb->ki_pos - 1);
2293 }
2294
2295out:
2296 if (saved_ki_complete)
2297 xchg(&iocb->ki_complete, saved_ki_complete);
2298
2299 if (rw_level != -1)
2300 ocfs2_rw_unlock(inode, rw_level);
2301
2302out_mutex:
2303 inode_unlock(inode);
2304
2305 if (written)
2306 ret = written;
2307 return ret;
2308}
2309
2310static ssize_t ocfs2_file_splice_read(struct file *in,
2311 loff_t *ppos,
2312 struct pipe_inode_info *pipe,
2313 size_t len,
2314 unsigned int flags)
2315{
2316 int ret = 0, lock_level = 0;
2317 struct inode *inode = file_inode(in);
2318
2319 trace_ocfs2_file_splice_read(inode, in, in->f_path.dentry,
2320 (unsigned long long)OCFS2_I(inode)->ip_blkno,
2321 in->f_path.dentry->d_name.len,
2322 in->f_path.dentry->d_name.name, len);
2323
2324 /*
2325 * See the comment in ocfs2_file_read_iter()
2326 */
2327 ret = ocfs2_inode_lock_atime(inode, in->f_path.mnt, &lock_level);
2328 if (ret < 0) {
2329 mlog_errno(ret);
2330 goto bail;
2331 }
2332 ocfs2_inode_unlock(inode, lock_level);
2333
2334 ret = generic_file_splice_read(in, ppos, pipe, len, flags);
2335
2336bail:
2337 return ret;
2338}
2339
2340static ssize_t ocfs2_file_read_iter(struct kiocb *iocb,
2341 struct iov_iter *to)
2342{
2343 int ret = 0, rw_level = -1, lock_level = 0;
2344 struct file *filp = iocb->ki_filp;
2345 struct inode *inode = file_inode(filp);
2346
2347 trace_ocfs2_file_aio_read(inode, filp, filp->f_path.dentry,
2348 (unsigned long long)OCFS2_I(inode)->ip_blkno,
2349 filp->f_path.dentry->d_name.len,
2350 filp->f_path.dentry->d_name.name,
2351 to->nr_segs); /* GRRRRR */
2352
2353
2354 if (!inode) {
2355 ret = -EINVAL;
2356 mlog_errno(ret);
2357 goto bail;
2358 }
2359
2360 /*
2361 * buffered reads protect themselves in ->readpage(). O_DIRECT reads
2362 * need locks to protect pending reads from racing with truncate.
2363 */
2364 if (iocb->ki_flags & IOCB_DIRECT) {
2365 ret = ocfs2_rw_lock(inode, 0);
2366 if (ret < 0) {
2367 mlog_errno(ret);
2368 goto bail;
2369 }
2370 rw_level = 0;
2371 /* communicate with ocfs2_dio_end_io */
2372 ocfs2_iocb_set_rw_locked(iocb, rw_level);
2373 }
2374
2375 /*
2376 * We're fine letting folks race truncates and extending
2377 * writes with read across the cluster, just like they can
2378 * locally. Hence no rw_lock during read.
2379 *
2380 * Take and drop the meta data lock to update inode fields
2381 * like i_size. This allows the checks down below
2382 * generic_file_aio_read() a chance of actually working.
2383 */
2384 ret = ocfs2_inode_lock_atime(inode, filp->f_path.mnt, &lock_level);
2385 if (ret < 0) {
2386 mlog_errno(ret);
2387 goto bail;
2388 }
2389 ocfs2_inode_unlock(inode, lock_level);
2390
2391 ret = generic_file_read_iter(iocb, to);
2392 trace_generic_file_aio_read_ret(ret);
2393
2394 /* buffered aio wouldn't have proper lock coverage today */
2395 BUG_ON(ret == -EIOCBQUEUED && !(iocb->ki_flags & IOCB_DIRECT));
2396
2397 /* see ocfs2_file_write_iter */
2398 if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) {
2399 rw_level = -1;
2400 }
2401
2402bail:
2403 if (rw_level != -1)
2404 ocfs2_rw_unlock(inode, rw_level);
2405
2406 return ret;
2407}
2408
2409/* Refer generic_file_llseek_unlocked() */
2410static loff_t ocfs2_file_llseek(struct file *file, loff_t offset, int whence)
2411{
2412 struct inode *inode = file->f_mapping->host;
2413 int ret = 0;
2414
2415 inode_lock(inode);
2416
2417 switch (whence) {
2418 case SEEK_SET:
2419 break;
2420 case SEEK_END:
2421 /* SEEK_END requires the OCFS2 inode lock for the file
2422 * because it references the file's size.
2423 */
2424 ret = ocfs2_inode_lock(inode, NULL, 0);
2425 if (ret < 0) {
2426 mlog_errno(ret);
2427 goto out;
2428 }
2429 offset += i_size_read(inode);
2430 ocfs2_inode_unlock(inode, 0);
2431 break;
2432 case SEEK_CUR:
2433 if (offset == 0) {
2434 offset = file->f_pos;
2435 goto out;
2436 }
2437 offset += file->f_pos;
2438 break;
2439 case SEEK_DATA:
2440 case SEEK_HOLE:
2441 ret = ocfs2_seek_data_hole_offset(file, &offset, whence);
2442 if (ret)
2443 goto out;
2444 break;
2445 default:
2446 ret = -EINVAL;
2447 goto out;
2448 }
2449
2450 offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
2451
2452out:
2453 inode_unlock(inode);
2454 if (ret)
2455 return ret;
2456 return offset;
2457}
2458
2459const struct inode_operations ocfs2_file_iops = {
2460 .setattr = ocfs2_setattr,
2461 .getattr = ocfs2_getattr,
2462 .permission = ocfs2_permission,
2463 .setxattr = generic_setxattr,
2464 .getxattr = generic_getxattr,
2465 .listxattr = ocfs2_listxattr,
2466 .removexattr = generic_removexattr,
2467 .fiemap = ocfs2_fiemap,
2468 .get_acl = ocfs2_iop_get_acl,
2469 .set_acl = ocfs2_iop_set_acl,
2470};
2471
2472const struct inode_operations ocfs2_special_file_iops = {
2473 .setattr = ocfs2_setattr,
2474 .getattr = ocfs2_getattr,
2475 .permission = ocfs2_permission,
2476 .get_acl = ocfs2_iop_get_acl,
2477 .set_acl = ocfs2_iop_set_acl,
2478};
2479
2480/*
2481 * Other than ->lock, keep ocfs2_fops and ocfs2_dops in sync with
2482 * ocfs2_fops_no_plocks and ocfs2_dops_no_plocks!
2483 */
2484const struct file_operations ocfs2_fops = {
2485 .llseek = ocfs2_file_llseek,
2486 .mmap = ocfs2_mmap,
2487 .fsync = ocfs2_sync_file,
2488 .release = ocfs2_file_release,
2489 .open = ocfs2_file_open,
2490 .read_iter = ocfs2_file_read_iter,
2491 .write_iter = ocfs2_file_write_iter,
2492 .unlocked_ioctl = ocfs2_ioctl,
2493#ifdef CONFIG_COMPAT
2494 .compat_ioctl = ocfs2_compat_ioctl,
2495#endif
2496 .lock = ocfs2_lock,
2497 .flock = ocfs2_flock,
2498 .splice_read = ocfs2_file_splice_read,
2499 .splice_write = iter_file_splice_write,
2500 .fallocate = ocfs2_fallocate,
2501};
2502
2503const struct file_operations ocfs2_dops = {
2504 .llseek = generic_file_llseek,
2505 .read = generic_read_dir,
2506 .iterate = ocfs2_readdir,
2507 .fsync = ocfs2_sync_file,
2508 .release = ocfs2_dir_release,
2509 .open = ocfs2_dir_open,
2510 .unlocked_ioctl = ocfs2_ioctl,
2511#ifdef CONFIG_COMPAT
2512 .compat_ioctl = ocfs2_compat_ioctl,
2513#endif
2514 .lock = ocfs2_lock,
2515 .flock = ocfs2_flock,
2516};
2517
2518/*
2519 * POSIX-lockless variants of our file_operations.
2520 *
2521 * These will be used if the underlying cluster stack does not support
2522 * posix file locking, if the user passes the "localflocks" mount
2523 * option, or if we have a local-only fs.
2524 *
2525 * ocfs2_flock is in here because all stacks handle UNIX file locks,
2526 * so we still want it in the case of no stack support for
2527 * plocks. Internally, it will do the right thing when asked to ignore
2528 * the cluster.
2529 */
2530const struct file_operations ocfs2_fops_no_plocks = {
2531 .llseek = ocfs2_file_llseek,
2532 .mmap = ocfs2_mmap,
2533 .fsync = ocfs2_sync_file,
2534 .release = ocfs2_file_release,
2535 .open = ocfs2_file_open,
2536 .read_iter = ocfs2_file_read_iter,
2537 .write_iter = ocfs2_file_write_iter,
2538 .unlocked_ioctl = ocfs2_ioctl,
2539#ifdef CONFIG_COMPAT
2540 .compat_ioctl = ocfs2_compat_ioctl,
2541#endif
2542 .flock = ocfs2_flock,
2543 .splice_read = ocfs2_file_splice_read,
2544 .splice_write = iter_file_splice_write,
2545 .fallocate = ocfs2_fallocate,
2546};
2547
2548const struct file_operations ocfs2_dops_no_plocks = {
2549 .llseek = generic_file_llseek,
2550 .read = generic_read_dir,
2551 .iterate = ocfs2_readdir,
2552 .fsync = ocfs2_sync_file,
2553 .release = ocfs2_dir_release,
2554 .open = ocfs2_dir_open,
2555 .unlocked_ioctl = ocfs2_ioctl,
2556#ifdef CONFIG_COMPAT
2557 .compat_ioctl = ocfs2_compat_ioctl,
2558#endif
2559 .flock = ocfs2_flock,
2560};
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * file.c
4 *
5 * File open, close, extend, truncate
6 *
7 * Copyright (C) 2002, 2004 Oracle. All rights reserved.
8 */
9
10#include <linux/capability.h>
11#include <linux/fs.h>
12#include <linux/types.h>
13#include <linux/slab.h>
14#include <linux/highmem.h>
15#include <linux/pagemap.h>
16#include <linux/uio.h>
17#include <linux/sched.h>
18#include <linux/splice.h>
19#include <linux/mount.h>
20#include <linux/writeback.h>
21#include <linux/falloc.h>
22#include <linux/quotaops.h>
23#include <linux/blkdev.h>
24#include <linux/backing-dev.h>
25
26#include <cluster/masklog.h>
27
28#include "ocfs2.h"
29
30#include "alloc.h"
31#include "aops.h"
32#include "dir.h"
33#include "dlmglue.h"
34#include "extent_map.h"
35#include "file.h"
36#include "sysfile.h"
37#include "inode.h"
38#include "ioctl.h"
39#include "journal.h"
40#include "locks.h"
41#include "mmap.h"
42#include "suballoc.h"
43#include "super.h"
44#include "xattr.h"
45#include "acl.h"
46#include "quota.h"
47#include "refcounttree.h"
48#include "ocfs2_trace.h"
49
50#include "buffer_head_io.h"
51
52static int ocfs2_init_file_private(struct inode *inode, struct file *file)
53{
54 struct ocfs2_file_private *fp;
55
56 fp = kzalloc(sizeof(struct ocfs2_file_private), GFP_KERNEL);
57 if (!fp)
58 return -ENOMEM;
59
60 fp->fp_file = file;
61 mutex_init(&fp->fp_mutex);
62 ocfs2_file_lock_res_init(&fp->fp_flock, fp);
63 file->private_data = fp;
64
65 return 0;
66}
67
68static void ocfs2_free_file_private(struct inode *inode, struct file *file)
69{
70 struct ocfs2_file_private *fp = file->private_data;
71 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
72
73 if (fp) {
74 ocfs2_simple_drop_lockres(osb, &fp->fp_flock);
75 ocfs2_lock_res_free(&fp->fp_flock);
76 kfree(fp);
77 file->private_data = NULL;
78 }
79}
80
81static int ocfs2_file_open(struct inode *inode, struct file *file)
82{
83 int status;
84 int mode = file->f_flags;
85 struct ocfs2_inode_info *oi = OCFS2_I(inode);
86
87 trace_ocfs2_file_open(inode, file, file->f_path.dentry,
88 (unsigned long long)oi->ip_blkno,
89 file->f_path.dentry->d_name.len,
90 file->f_path.dentry->d_name.name, mode);
91
92 if (file->f_mode & FMODE_WRITE) {
93 status = dquot_initialize(inode);
94 if (status)
95 goto leave;
96 }
97
98 spin_lock(&oi->ip_lock);
99
100 /* Check that the inode hasn't been wiped from disk by another
101 * node. If it hasn't then we're safe as long as we hold the
102 * spin lock until our increment of open count. */
103 if (oi->ip_flags & OCFS2_INODE_DELETED) {
104 spin_unlock(&oi->ip_lock);
105
106 status = -ENOENT;
107 goto leave;
108 }
109
110 if (mode & O_DIRECT)
111 oi->ip_flags |= OCFS2_INODE_OPEN_DIRECT;
112
113 oi->ip_open_count++;
114 spin_unlock(&oi->ip_lock);
115
116 status = ocfs2_init_file_private(inode, file);
117 if (status) {
118 /*
119 * We want to set open count back if we're failing the
120 * open.
121 */
122 spin_lock(&oi->ip_lock);
123 oi->ip_open_count--;
124 spin_unlock(&oi->ip_lock);
125 }
126
127 file->f_mode |= FMODE_NOWAIT;
128
129leave:
130 return status;
131}
132
133static int ocfs2_file_release(struct inode *inode, struct file *file)
134{
135 struct ocfs2_inode_info *oi = OCFS2_I(inode);
136
137 spin_lock(&oi->ip_lock);
138 if (!--oi->ip_open_count)
139 oi->ip_flags &= ~OCFS2_INODE_OPEN_DIRECT;
140
141 trace_ocfs2_file_release(inode, file, file->f_path.dentry,
142 oi->ip_blkno,
143 file->f_path.dentry->d_name.len,
144 file->f_path.dentry->d_name.name,
145 oi->ip_open_count);
146 spin_unlock(&oi->ip_lock);
147
148 ocfs2_free_file_private(inode, file);
149
150 return 0;
151}
152
153static int ocfs2_dir_open(struct inode *inode, struct file *file)
154{
155 return ocfs2_init_file_private(inode, file);
156}
157
158static int ocfs2_dir_release(struct inode *inode, struct file *file)
159{
160 ocfs2_free_file_private(inode, file);
161 return 0;
162}
163
164static int ocfs2_sync_file(struct file *file, loff_t start, loff_t end,
165 int datasync)
166{
167 int err = 0;
168 struct inode *inode = file->f_mapping->host;
169 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
170 struct ocfs2_inode_info *oi = OCFS2_I(inode);
171 journal_t *journal = osb->journal->j_journal;
172 int ret;
173 tid_t commit_tid;
174 bool needs_barrier = false;
175
176 trace_ocfs2_sync_file(inode, file, file->f_path.dentry,
177 oi->ip_blkno,
178 file->f_path.dentry->d_name.len,
179 file->f_path.dentry->d_name.name,
180 (unsigned long long)datasync);
181
182 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
183 return -EROFS;
184
185 err = file_write_and_wait_range(file, start, end);
186 if (err)
187 return err;
188
189 commit_tid = datasync ? oi->i_datasync_tid : oi->i_sync_tid;
190 if (journal->j_flags & JBD2_BARRIER &&
191 !jbd2_trans_will_send_data_barrier(journal, commit_tid))
192 needs_barrier = true;
193 err = jbd2_complete_transaction(journal, commit_tid);
194 if (needs_barrier) {
195 ret = blkdev_issue_flush(inode->i_sb->s_bdev);
196 if (!err)
197 err = ret;
198 }
199
200 if (err)
201 mlog_errno(err);
202
203 return (err < 0) ? -EIO : 0;
204}
205
206int ocfs2_should_update_atime(struct inode *inode,
207 struct vfsmount *vfsmnt)
208{
209 struct timespec64 now;
210 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
211
212 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
213 return 0;
214
215 if ((inode->i_flags & S_NOATIME) ||
216 ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode)))
217 return 0;
218
219 /*
220 * We can be called with no vfsmnt structure - NFSD will
221 * sometimes do this.
222 *
223 * Note that our action here is different than touch_atime() -
224 * if we can't tell whether this is a noatime mount, then we
225 * don't know whether to trust the value of s_atime_quantum.
226 */
227 if (vfsmnt == NULL)
228 return 0;
229
230 if ((vfsmnt->mnt_flags & MNT_NOATIME) ||
231 ((vfsmnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)))
232 return 0;
233
234 if (vfsmnt->mnt_flags & MNT_RELATIME) {
235 struct timespec64 ctime = inode_get_ctime(inode);
236 struct timespec64 atime = inode_get_atime(inode);
237 struct timespec64 mtime = inode_get_mtime(inode);
238
239 if ((timespec64_compare(&atime, &mtime) <= 0) ||
240 (timespec64_compare(&atime, &ctime) <= 0))
241 return 1;
242
243 return 0;
244 }
245
246 now = current_time(inode);
247 if ((now.tv_sec - inode_get_atime_sec(inode) <= osb->s_atime_quantum))
248 return 0;
249 else
250 return 1;
251}
252
253int ocfs2_update_inode_atime(struct inode *inode,
254 struct buffer_head *bh)
255{
256 int ret;
257 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
258 handle_t *handle;
259 struct ocfs2_dinode *di = (struct ocfs2_dinode *) bh->b_data;
260
261 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
262 if (IS_ERR(handle)) {
263 ret = PTR_ERR(handle);
264 mlog_errno(ret);
265 goto out;
266 }
267
268 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
269 OCFS2_JOURNAL_ACCESS_WRITE);
270 if (ret) {
271 mlog_errno(ret);
272 goto out_commit;
273 }
274
275 /*
276 * Don't use ocfs2_mark_inode_dirty() here as we don't always
277 * have i_rwsem to guard against concurrent changes to other
278 * inode fields.
279 */
280 inode_set_atime_to_ts(inode, current_time(inode));
281 di->i_atime = cpu_to_le64(inode_get_atime_sec(inode));
282 di->i_atime_nsec = cpu_to_le32(inode_get_atime_nsec(inode));
283 ocfs2_update_inode_fsync_trans(handle, inode, 0);
284 ocfs2_journal_dirty(handle, bh);
285
286out_commit:
287 ocfs2_commit_trans(osb, handle);
288out:
289 return ret;
290}
291
292int ocfs2_set_inode_size(handle_t *handle,
293 struct inode *inode,
294 struct buffer_head *fe_bh,
295 u64 new_i_size)
296{
297 int status;
298
299 i_size_write(inode, new_i_size);
300 inode->i_blocks = ocfs2_inode_sector_count(inode);
301 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
302
303 status = ocfs2_mark_inode_dirty(handle, inode, fe_bh);
304 if (status < 0) {
305 mlog_errno(status);
306 goto bail;
307 }
308
309bail:
310 return status;
311}
312
313int ocfs2_simple_size_update(struct inode *inode,
314 struct buffer_head *di_bh,
315 u64 new_i_size)
316{
317 int ret;
318 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
319 handle_t *handle = NULL;
320
321 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
322 if (IS_ERR(handle)) {
323 ret = PTR_ERR(handle);
324 mlog_errno(ret);
325 goto out;
326 }
327
328 ret = ocfs2_set_inode_size(handle, inode, di_bh,
329 new_i_size);
330 if (ret < 0)
331 mlog_errno(ret);
332
333 ocfs2_update_inode_fsync_trans(handle, inode, 0);
334 ocfs2_commit_trans(osb, handle);
335out:
336 return ret;
337}
338
339static int ocfs2_cow_file_pos(struct inode *inode,
340 struct buffer_head *fe_bh,
341 u64 offset)
342{
343 int status;
344 u32 phys, cpos = offset >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
345 unsigned int num_clusters = 0;
346 unsigned int ext_flags = 0;
347
348 /*
349 * If the new offset is aligned to the range of the cluster, there is
350 * no space for ocfs2_zero_range_for_truncate to fill, so no need to
351 * CoW either.
352 */
353 if ((offset & (OCFS2_SB(inode->i_sb)->s_clustersize - 1)) == 0)
354 return 0;
355
356 status = ocfs2_get_clusters(inode, cpos, &phys,
357 &num_clusters, &ext_flags);
358 if (status) {
359 mlog_errno(status);
360 goto out;
361 }
362
363 if (!(ext_flags & OCFS2_EXT_REFCOUNTED))
364 goto out;
365
366 return ocfs2_refcount_cow(inode, fe_bh, cpos, 1, cpos+1);
367
368out:
369 return status;
370}
371
372static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb,
373 struct inode *inode,
374 struct buffer_head *fe_bh,
375 u64 new_i_size)
376{
377 int status;
378 handle_t *handle;
379 struct ocfs2_dinode *di;
380 u64 cluster_bytes;
381
382 /*
383 * We need to CoW the cluster contains the offset if it is reflinked
384 * since we will call ocfs2_zero_range_for_truncate later which will
385 * write "0" from offset to the end of the cluster.
386 */
387 status = ocfs2_cow_file_pos(inode, fe_bh, new_i_size);
388 if (status) {
389 mlog_errno(status);
390 return status;
391 }
392
393 /* TODO: This needs to actually orphan the inode in this
394 * transaction. */
395
396 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
397 if (IS_ERR(handle)) {
398 status = PTR_ERR(handle);
399 mlog_errno(status);
400 goto out;
401 }
402
403 status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), fe_bh,
404 OCFS2_JOURNAL_ACCESS_WRITE);
405 if (status < 0) {
406 mlog_errno(status);
407 goto out_commit;
408 }
409
410 /*
411 * Do this before setting i_size.
412 */
413 cluster_bytes = ocfs2_align_bytes_to_clusters(inode->i_sb, new_i_size);
414 status = ocfs2_zero_range_for_truncate(inode, handle, new_i_size,
415 cluster_bytes);
416 if (status) {
417 mlog_errno(status);
418 goto out_commit;
419 }
420
421 i_size_write(inode, new_i_size);
422 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
423
424 di = (struct ocfs2_dinode *) fe_bh->b_data;
425 di->i_size = cpu_to_le64(new_i_size);
426 di->i_ctime = di->i_mtime = cpu_to_le64(inode_get_ctime_sec(inode));
427 di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode));
428 ocfs2_update_inode_fsync_trans(handle, inode, 0);
429
430 ocfs2_journal_dirty(handle, fe_bh);
431
432out_commit:
433 ocfs2_commit_trans(osb, handle);
434out:
435 return status;
436}
437
438int ocfs2_truncate_file(struct inode *inode,
439 struct buffer_head *di_bh,
440 u64 new_i_size)
441{
442 int status = 0;
443 struct ocfs2_dinode *fe = NULL;
444 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
445
446 /* We trust di_bh because it comes from ocfs2_inode_lock(), which
447 * already validated it */
448 fe = (struct ocfs2_dinode *) di_bh->b_data;
449
450 trace_ocfs2_truncate_file((unsigned long long)OCFS2_I(inode)->ip_blkno,
451 (unsigned long long)le64_to_cpu(fe->i_size),
452 (unsigned long long)new_i_size);
453
454 mlog_bug_on_msg(le64_to_cpu(fe->i_size) != i_size_read(inode),
455 "Inode %llu, inode i_size = %lld != di "
456 "i_size = %llu, i_flags = 0x%x\n",
457 (unsigned long long)OCFS2_I(inode)->ip_blkno,
458 i_size_read(inode),
459 (unsigned long long)le64_to_cpu(fe->i_size),
460 le32_to_cpu(fe->i_flags));
461
462 if (new_i_size > le64_to_cpu(fe->i_size)) {
463 trace_ocfs2_truncate_file_error(
464 (unsigned long long)le64_to_cpu(fe->i_size),
465 (unsigned long long)new_i_size);
466 status = -EINVAL;
467 mlog_errno(status);
468 goto bail;
469 }
470
471 down_write(&OCFS2_I(inode)->ip_alloc_sem);
472
473 ocfs2_resv_discard(&osb->osb_la_resmap,
474 &OCFS2_I(inode)->ip_la_data_resv);
475
476 /*
477 * The inode lock forced other nodes to sync and drop their
478 * pages, which (correctly) happens even if we have a truncate
479 * without allocation change - ocfs2 cluster sizes can be much
480 * greater than page size, so we have to truncate them
481 * anyway.
482 */
483
484 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
485 unmap_mapping_range(inode->i_mapping,
486 new_i_size + PAGE_SIZE - 1, 0, 1);
487 truncate_inode_pages(inode->i_mapping, new_i_size);
488 status = ocfs2_truncate_inline(inode, di_bh, new_i_size,
489 i_size_read(inode), 1);
490 if (status)
491 mlog_errno(status);
492
493 goto bail_unlock_sem;
494 }
495
496 /* alright, we're going to need to do a full blown alloc size
497 * change. Orphan the inode so that recovery can complete the
498 * truncate if necessary. This does the task of marking
499 * i_size. */
500 status = ocfs2_orphan_for_truncate(osb, inode, di_bh, new_i_size);
501 if (status < 0) {
502 mlog_errno(status);
503 goto bail_unlock_sem;
504 }
505
506 unmap_mapping_range(inode->i_mapping, new_i_size + PAGE_SIZE - 1, 0, 1);
507 truncate_inode_pages(inode->i_mapping, new_i_size);
508
509 status = ocfs2_commit_truncate(osb, inode, di_bh);
510 if (status < 0) {
511 mlog_errno(status);
512 goto bail_unlock_sem;
513 }
514
515 /* TODO: orphan dir cleanup here. */
516bail_unlock_sem:
517 up_write(&OCFS2_I(inode)->ip_alloc_sem);
518
519bail:
520 if (!status && OCFS2_I(inode)->ip_clusters == 0)
521 status = ocfs2_try_remove_refcount_tree(inode, di_bh);
522
523 return status;
524}
525
526/*
527 * extend file allocation only here.
528 * we'll update all the disk stuff, and oip->alloc_size
529 *
530 * expect stuff to be locked, a transaction started and enough data /
531 * metadata reservations in the contexts.
532 *
533 * Will return -EAGAIN, and a reason if a restart is needed.
534 * If passed in, *reason will always be set, even in error.
535 */
536int ocfs2_add_inode_data(struct ocfs2_super *osb,
537 struct inode *inode,
538 u32 *logical_offset,
539 u32 clusters_to_add,
540 int mark_unwritten,
541 struct buffer_head *fe_bh,
542 handle_t *handle,
543 struct ocfs2_alloc_context *data_ac,
544 struct ocfs2_alloc_context *meta_ac,
545 enum ocfs2_alloc_restarted *reason_ret)
546{
547 struct ocfs2_extent_tree et;
548
549 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), fe_bh);
550 return ocfs2_add_clusters_in_btree(handle, &et, logical_offset,
551 clusters_to_add, mark_unwritten,
552 data_ac, meta_ac, reason_ret);
553}
554
555static int ocfs2_extend_allocation(struct inode *inode, u32 logical_start,
556 u32 clusters_to_add, int mark_unwritten)
557{
558 int status = 0;
559 int restart_func = 0;
560 int credits;
561 u32 prev_clusters;
562 struct buffer_head *bh = NULL;
563 struct ocfs2_dinode *fe = NULL;
564 handle_t *handle = NULL;
565 struct ocfs2_alloc_context *data_ac = NULL;
566 struct ocfs2_alloc_context *meta_ac = NULL;
567 enum ocfs2_alloc_restarted why = RESTART_NONE;
568 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
569 struct ocfs2_extent_tree et;
570 int did_quota = 0;
571
572 /*
573 * Unwritten extent only exists for file systems which
574 * support holes.
575 */
576 BUG_ON(mark_unwritten && !ocfs2_sparse_alloc(osb));
577
578 status = ocfs2_read_inode_block(inode, &bh);
579 if (status < 0) {
580 mlog_errno(status);
581 goto leave;
582 }
583 fe = (struct ocfs2_dinode *) bh->b_data;
584
585restart_all:
586 BUG_ON(le32_to_cpu(fe->i_clusters) != OCFS2_I(inode)->ip_clusters);
587
588 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), bh);
589 status = ocfs2_lock_allocators(inode, &et, clusters_to_add, 0,
590 &data_ac, &meta_ac);
591 if (status) {
592 mlog_errno(status);
593 goto leave;
594 }
595
596 credits = ocfs2_calc_extend_credits(osb->sb, &fe->id2.i_list);
597 handle = ocfs2_start_trans(osb, credits);
598 if (IS_ERR(handle)) {
599 status = PTR_ERR(handle);
600 handle = NULL;
601 mlog_errno(status);
602 goto leave;
603 }
604
605restarted_transaction:
606 trace_ocfs2_extend_allocation(
607 (unsigned long long)OCFS2_I(inode)->ip_blkno,
608 (unsigned long long)i_size_read(inode),
609 le32_to_cpu(fe->i_clusters), clusters_to_add,
610 why, restart_func);
611
612 status = dquot_alloc_space_nodirty(inode,
613 ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
614 if (status)
615 goto leave;
616 did_quota = 1;
617
618 /* reserve a write to the file entry early on - that we if we
619 * run out of credits in the allocation path, we can still
620 * update i_size. */
621 status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
622 OCFS2_JOURNAL_ACCESS_WRITE);
623 if (status < 0) {
624 mlog_errno(status);
625 goto leave;
626 }
627
628 prev_clusters = OCFS2_I(inode)->ip_clusters;
629
630 status = ocfs2_add_inode_data(osb,
631 inode,
632 &logical_start,
633 clusters_to_add,
634 mark_unwritten,
635 bh,
636 handle,
637 data_ac,
638 meta_ac,
639 &why);
640 if ((status < 0) && (status != -EAGAIN)) {
641 if (status != -ENOSPC)
642 mlog_errno(status);
643 goto leave;
644 }
645 ocfs2_update_inode_fsync_trans(handle, inode, 1);
646 ocfs2_journal_dirty(handle, bh);
647
648 spin_lock(&OCFS2_I(inode)->ip_lock);
649 clusters_to_add -= (OCFS2_I(inode)->ip_clusters - prev_clusters);
650 spin_unlock(&OCFS2_I(inode)->ip_lock);
651 /* Release unused quota reservation */
652 dquot_free_space(inode,
653 ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
654 did_quota = 0;
655
656 if (why != RESTART_NONE && clusters_to_add) {
657 if (why == RESTART_META) {
658 restart_func = 1;
659 status = 0;
660 } else {
661 BUG_ON(why != RESTART_TRANS);
662
663 status = ocfs2_allocate_extend_trans(handle, 1);
664 if (status < 0) {
665 /* handle still has to be committed at
666 * this point. */
667 status = -ENOMEM;
668 mlog_errno(status);
669 goto leave;
670 }
671 goto restarted_transaction;
672 }
673 }
674
675 trace_ocfs2_extend_allocation_end(OCFS2_I(inode)->ip_blkno,
676 le32_to_cpu(fe->i_clusters),
677 (unsigned long long)le64_to_cpu(fe->i_size),
678 OCFS2_I(inode)->ip_clusters,
679 (unsigned long long)i_size_read(inode));
680
681leave:
682 if (status < 0 && did_quota)
683 dquot_free_space(inode,
684 ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
685 if (handle) {
686 ocfs2_commit_trans(osb, handle);
687 handle = NULL;
688 }
689 if (data_ac) {
690 ocfs2_free_alloc_context(data_ac);
691 data_ac = NULL;
692 }
693 if (meta_ac) {
694 ocfs2_free_alloc_context(meta_ac);
695 meta_ac = NULL;
696 }
697 if ((!status) && restart_func) {
698 restart_func = 0;
699 goto restart_all;
700 }
701 brelse(bh);
702 bh = NULL;
703
704 return status;
705}
706
707/*
708 * While a write will already be ordering the data, a truncate will not.
709 * Thus, we need to explicitly order the zeroed pages.
710 */
711static handle_t *ocfs2_zero_start_ordered_transaction(struct inode *inode,
712 struct buffer_head *di_bh,
713 loff_t start_byte,
714 loff_t length)
715{
716 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
717 handle_t *handle = NULL;
718 int ret = 0;
719
720 if (!ocfs2_should_order_data(inode))
721 goto out;
722
723 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
724 if (IS_ERR(handle)) {
725 ret = -ENOMEM;
726 mlog_errno(ret);
727 goto out;
728 }
729
730 ret = ocfs2_jbd2_inode_add_write(handle, inode, start_byte, length);
731 if (ret < 0) {
732 mlog_errno(ret);
733 goto out;
734 }
735
736 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
737 OCFS2_JOURNAL_ACCESS_WRITE);
738 if (ret)
739 mlog_errno(ret);
740 ocfs2_update_inode_fsync_trans(handle, inode, 1);
741
742out:
743 if (ret) {
744 if (!IS_ERR(handle))
745 ocfs2_commit_trans(osb, handle);
746 handle = ERR_PTR(ret);
747 }
748 return handle;
749}
750
751/* Some parts of this taken from generic_cont_expand, which turned out
752 * to be too fragile to do exactly what we need without us having to
753 * worry about recursive locking in ->write_begin() and ->write_end(). */
754static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
755 u64 abs_to, struct buffer_head *di_bh)
756{
757 struct address_space *mapping = inode->i_mapping;
758 struct page *page;
759 unsigned long index = abs_from >> PAGE_SHIFT;
760 handle_t *handle;
761 int ret = 0;
762 unsigned zero_from, zero_to, block_start, block_end;
763 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
764
765 BUG_ON(abs_from >= abs_to);
766 BUG_ON(abs_to > (((u64)index + 1) << PAGE_SHIFT));
767 BUG_ON(abs_from & (inode->i_blkbits - 1));
768
769 handle = ocfs2_zero_start_ordered_transaction(inode, di_bh,
770 abs_from,
771 abs_to - abs_from);
772 if (IS_ERR(handle)) {
773 ret = PTR_ERR(handle);
774 goto out;
775 }
776
777 page = find_or_create_page(mapping, index, GFP_NOFS);
778 if (!page) {
779 ret = -ENOMEM;
780 mlog_errno(ret);
781 goto out_commit_trans;
782 }
783
784 /* Get the offsets within the page that we want to zero */
785 zero_from = abs_from & (PAGE_SIZE - 1);
786 zero_to = abs_to & (PAGE_SIZE - 1);
787 if (!zero_to)
788 zero_to = PAGE_SIZE;
789
790 trace_ocfs2_write_zero_page(
791 (unsigned long long)OCFS2_I(inode)->ip_blkno,
792 (unsigned long long)abs_from,
793 (unsigned long long)abs_to,
794 index, zero_from, zero_to);
795
796 /* We know that zero_from is block aligned */
797 for (block_start = zero_from; block_start < zero_to;
798 block_start = block_end) {
799 block_end = block_start + i_blocksize(inode);
800
801 /*
802 * block_start is block-aligned. Bump it by one to force
803 * __block_write_begin and block_commit_write to zero the
804 * whole block.
805 */
806 ret = __block_write_begin(page, block_start + 1, 0,
807 ocfs2_get_block);
808 if (ret < 0) {
809 mlog_errno(ret);
810 goto out_unlock;
811 }
812
813
814 /* must not update i_size! */
815 block_commit_write(page, block_start + 1, block_start + 1);
816 }
817
818 /*
819 * fs-writeback will release the dirty pages without page lock
820 * whose offset are over inode size, the release happens at
821 * block_write_full_folio().
822 */
823 i_size_write(inode, abs_to);
824 inode->i_blocks = ocfs2_inode_sector_count(inode);
825 di->i_size = cpu_to_le64((u64)i_size_read(inode));
826 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
827 di->i_mtime = di->i_ctime = cpu_to_le64(inode_get_mtime_sec(inode));
828 di->i_ctime_nsec = cpu_to_le32(inode_get_mtime_nsec(inode));
829 di->i_mtime_nsec = di->i_ctime_nsec;
830 if (handle) {
831 ocfs2_journal_dirty(handle, di_bh);
832 ocfs2_update_inode_fsync_trans(handle, inode, 1);
833 }
834
835out_unlock:
836 unlock_page(page);
837 put_page(page);
838out_commit_trans:
839 if (handle)
840 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
841out:
842 return ret;
843}
844
845/*
846 * Find the next range to zero. We do this in terms of bytes because
847 * that's what ocfs2_zero_extend() wants, and it is dealing with the
848 * pagecache. We may return multiple extents.
849 *
850 * zero_start and zero_end are ocfs2_zero_extend()s current idea of what
851 * needs to be zeroed. range_start and range_end return the next zeroing
852 * range. A subsequent call should pass the previous range_end as its
853 * zero_start. If range_end is 0, there's nothing to do.
854 *
855 * Unwritten extents are skipped over. Refcounted extents are CoWd.
856 */
857static int ocfs2_zero_extend_get_range(struct inode *inode,
858 struct buffer_head *di_bh,
859 u64 zero_start, u64 zero_end,
860 u64 *range_start, u64 *range_end)
861{
862 int rc = 0, needs_cow = 0;
863 u32 p_cpos, zero_clusters = 0;
864 u32 zero_cpos =
865 zero_start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
866 u32 last_cpos = ocfs2_clusters_for_bytes(inode->i_sb, zero_end);
867 unsigned int num_clusters = 0;
868 unsigned int ext_flags = 0;
869
870 while (zero_cpos < last_cpos) {
871 rc = ocfs2_get_clusters(inode, zero_cpos, &p_cpos,
872 &num_clusters, &ext_flags);
873 if (rc) {
874 mlog_errno(rc);
875 goto out;
876 }
877
878 if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) {
879 zero_clusters = num_clusters;
880 if (ext_flags & OCFS2_EXT_REFCOUNTED)
881 needs_cow = 1;
882 break;
883 }
884
885 zero_cpos += num_clusters;
886 }
887 if (!zero_clusters) {
888 *range_end = 0;
889 goto out;
890 }
891
892 while ((zero_cpos + zero_clusters) < last_cpos) {
893 rc = ocfs2_get_clusters(inode, zero_cpos + zero_clusters,
894 &p_cpos, &num_clusters,
895 &ext_flags);
896 if (rc) {
897 mlog_errno(rc);
898 goto out;
899 }
900
901 if (!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN))
902 break;
903 if (ext_flags & OCFS2_EXT_REFCOUNTED)
904 needs_cow = 1;
905 zero_clusters += num_clusters;
906 }
907 if ((zero_cpos + zero_clusters) > last_cpos)
908 zero_clusters = last_cpos - zero_cpos;
909
910 if (needs_cow) {
911 rc = ocfs2_refcount_cow(inode, di_bh, zero_cpos,
912 zero_clusters, UINT_MAX);
913 if (rc) {
914 mlog_errno(rc);
915 goto out;
916 }
917 }
918
919 *range_start = ocfs2_clusters_to_bytes(inode->i_sb, zero_cpos);
920 *range_end = ocfs2_clusters_to_bytes(inode->i_sb,
921 zero_cpos + zero_clusters);
922
923out:
924 return rc;
925}
926
927/*
928 * Zero one range returned from ocfs2_zero_extend_get_range(). The caller
929 * has made sure that the entire range needs zeroing.
930 */
931static int ocfs2_zero_extend_range(struct inode *inode, u64 range_start,
932 u64 range_end, struct buffer_head *di_bh)
933{
934 int rc = 0;
935 u64 next_pos;
936 u64 zero_pos = range_start;
937
938 trace_ocfs2_zero_extend_range(
939 (unsigned long long)OCFS2_I(inode)->ip_blkno,
940 (unsigned long long)range_start,
941 (unsigned long long)range_end);
942 BUG_ON(range_start >= range_end);
943
944 while (zero_pos < range_end) {
945 next_pos = (zero_pos & PAGE_MASK) + PAGE_SIZE;
946 if (next_pos > range_end)
947 next_pos = range_end;
948 rc = ocfs2_write_zero_page(inode, zero_pos, next_pos, di_bh);
949 if (rc < 0) {
950 mlog_errno(rc);
951 break;
952 }
953 zero_pos = next_pos;
954
955 /*
956 * Very large extends have the potential to lock up
957 * the cpu for extended periods of time.
958 */
959 cond_resched();
960 }
961
962 return rc;
963}
964
965int ocfs2_zero_extend(struct inode *inode, struct buffer_head *di_bh,
966 loff_t zero_to_size)
967{
968 int ret = 0;
969 u64 zero_start, range_start = 0, range_end = 0;
970 struct super_block *sb = inode->i_sb;
971
972 zero_start = ocfs2_align_bytes_to_blocks(sb, i_size_read(inode));
973 trace_ocfs2_zero_extend((unsigned long long)OCFS2_I(inode)->ip_blkno,
974 (unsigned long long)zero_start,
975 (unsigned long long)i_size_read(inode));
976 while (zero_start < zero_to_size) {
977 ret = ocfs2_zero_extend_get_range(inode, di_bh, zero_start,
978 zero_to_size,
979 &range_start,
980 &range_end);
981 if (ret) {
982 mlog_errno(ret);
983 break;
984 }
985 if (!range_end)
986 break;
987 /* Trim the ends */
988 if (range_start < zero_start)
989 range_start = zero_start;
990 if (range_end > zero_to_size)
991 range_end = zero_to_size;
992
993 ret = ocfs2_zero_extend_range(inode, range_start,
994 range_end, di_bh);
995 if (ret) {
996 mlog_errno(ret);
997 break;
998 }
999 zero_start = range_end;
1000 }
1001
1002 return ret;
1003}
1004
1005int ocfs2_extend_no_holes(struct inode *inode, struct buffer_head *di_bh,
1006 u64 new_i_size, u64 zero_to)
1007{
1008 int ret;
1009 u32 clusters_to_add;
1010 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1011
1012 /*
1013 * Only quota files call this without a bh, and they can't be
1014 * refcounted.
1015 */
1016 BUG_ON(!di_bh && ocfs2_is_refcount_inode(inode));
1017 BUG_ON(!di_bh && !(oi->ip_flags & OCFS2_INODE_SYSTEM_FILE));
1018
1019 clusters_to_add = ocfs2_clusters_for_bytes(inode->i_sb, new_i_size);
1020 if (clusters_to_add < oi->ip_clusters)
1021 clusters_to_add = 0;
1022 else
1023 clusters_to_add -= oi->ip_clusters;
1024
1025 if (clusters_to_add) {
1026 ret = ocfs2_extend_allocation(inode, oi->ip_clusters,
1027 clusters_to_add, 0);
1028 if (ret) {
1029 mlog_errno(ret);
1030 goto out;
1031 }
1032 }
1033
1034 /*
1035 * Call this even if we don't add any clusters to the tree. We
1036 * still need to zero the area between the old i_size and the
1037 * new i_size.
1038 */
1039 ret = ocfs2_zero_extend(inode, di_bh, zero_to);
1040 if (ret < 0)
1041 mlog_errno(ret);
1042
1043out:
1044 return ret;
1045}
1046
1047static int ocfs2_extend_file(struct inode *inode,
1048 struct buffer_head *di_bh,
1049 u64 new_i_size)
1050{
1051 int ret = 0;
1052 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1053
1054 BUG_ON(!di_bh);
1055
1056 /* setattr sometimes calls us like this. */
1057 if (new_i_size == 0)
1058 goto out;
1059
1060 if (i_size_read(inode) == new_i_size)
1061 goto out;
1062 BUG_ON(new_i_size < i_size_read(inode));
1063
1064 /*
1065 * The alloc sem blocks people in read/write from reading our
1066 * allocation until we're done changing it. We depend on
1067 * i_rwsem to block other extend/truncate calls while we're
1068 * here. We even have to hold it for sparse files because there
1069 * might be some tail zeroing.
1070 */
1071 down_write(&oi->ip_alloc_sem);
1072
1073 if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1074 /*
1075 * We can optimize small extends by keeping the inodes
1076 * inline data.
1077 */
1078 if (ocfs2_size_fits_inline_data(di_bh, new_i_size)) {
1079 up_write(&oi->ip_alloc_sem);
1080 goto out_update_size;
1081 }
1082
1083 ret = ocfs2_convert_inline_data_to_extents(inode, di_bh);
1084 if (ret) {
1085 up_write(&oi->ip_alloc_sem);
1086 mlog_errno(ret);
1087 goto out;
1088 }
1089 }
1090
1091 if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
1092 ret = ocfs2_zero_extend(inode, di_bh, new_i_size);
1093 else
1094 ret = ocfs2_extend_no_holes(inode, di_bh, new_i_size,
1095 new_i_size);
1096
1097 up_write(&oi->ip_alloc_sem);
1098
1099 if (ret < 0) {
1100 mlog_errno(ret);
1101 goto out;
1102 }
1103
1104out_update_size:
1105 ret = ocfs2_simple_size_update(inode, di_bh, new_i_size);
1106 if (ret < 0)
1107 mlog_errno(ret);
1108
1109out:
1110 return ret;
1111}
1112
1113int ocfs2_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
1114 struct iattr *attr)
1115{
1116 int status = 0, size_change;
1117 int inode_locked = 0;
1118 struct inode *inode = d_inode(dentry);
1119 struct super_block *sb = inode->i_sb;
1120 struct ocfs2_super *osb = OCFS2_SB(sb);
1121 struct buffer_head *bh = NULL;
1122 handle_t *handle = NULL;
1123 struct dquot *transfer_to[MAXQUOTAS] = { };
1124 int qtype;
1125 int had_lock;
1126 struct ocfs2_lock_holder oh;
1127
1128 trace_ocfs2_setattr(inode, dentry,
1129 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1130 dentry->d_name.len, dentry->d_name.name,
1131 attr->ia_valid, attr->ia_mode,
1132 from_kuid(&init_user_ns, attr->ia_uid),
1133 from_kgid(&init_user_ns, attr->ia_gid));
1134
1135 /* ensuring we don't even attempt to truncate a symlink */
1136 if (S_ISLNK(inode->i_mode))
1137 attr->ia_valid &= ~ATTR_SIZE;
1138
1139#define OCFS2_VALID_ATTRS (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME | ATTR_SIZE \
1140 | ATTR_GID | ATTR_UID | ATTR_MODE)
1141 if (!(attr->ia_valid & OCFS2_VALID_ATTRS))
1142 return 0;
1143
1144 status = setattr_prepare(&nop_mnt_idmap, dentry, attr);
1145 if (status)
1146 return status;
1147
1148 if (is_quota_modification(&nop_mnt_idmap, inode, attr)) {
1149 status = dquot_initialize(inode);
1150 if (status)
1151 return status;
1152 }
1153 size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE;
1154 if (size_change) {
1155 /*
1156 * Here we should wait dio to finish before inode lock
1157 * to avoid a deadlock between ocfs2_setattr() and
1158 * ocfs2_dio_end_io_write()
1159 */
1160 inode_dio_wait(inode);
1161
1162 status = ocfs2_rw_lock(inode, 1);
1163 if (status < 0) {
1164 mlog_errno(status);
1165 goto bail;
1166 }
1167 }
1168
1169 had_lock = ocfs2_inode_lock_tracker(inode, &bh, 1, &oh);
1170 if (had_lock < 0) {
1171 status = had_lock;
1172 goto bail_unlock_rw;
1173 } else if (had_lock) {
1174 /*
1175 * As far as we know, ocfs2_setattr() could only be the first
1176 * VFS entry point in the call chain of recursive cluster
1177 * locking issue.
1178 *
1179 * For instance:
1180 * chmod_common()
1181 * notify_change()
1182 * ocfs2_setattr()
1183 * posix_acl_chmod()
1184 * ocfs2_iop_get_acl()
1185 *
1186 * But, we're not 100% sure if it's always true, because the
1187 * ordering of the VFS entry points in the call chain is out
1188 * of our control. So, we'd better dump the stack here to
1189 * catch the other cases of recursive locking.
1190 */
1191 mlog(ML_ERROR, "Another case of recursive locking:\n");
1192 dump_stack();
1193 }
1194 inode_locked = 1;
1195
1196 if (size_change) {
1197 status = inode_newsize_ok(inode, attr->ia_size);
1198 if (status)
1199 goto bail_unlock;
1200
1201 if (i_size_read(inode) >= attr->ia_size) {
1202 if (ocfs2_should_order_data(inode)) {
1203 status = ocfs2_begin_ordered_truncate(inode,
1204 attr->ia_size);
1205 if (status)
1206 goto bail_unlock;
1207 }
1208 status = ocfs2_truncate_file(inode, bh, attr->ia_size);
1209 } else
1210 status = ocfs2_extend_file(inode, bh, attr->ia_size);
1211 if (status < 0) {
1212 if (status != -ENOSPC)
1213 mlog_errno(status);
1214 status = -ENOSPC;
1215 goto bail_unlock;
1216 }
1217 }
1218
1219 if ((attr->ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
1220 (attr->ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
1221 /*
1222 * Gather pointers to quota structures so that allocation /
1223 * freeing of quota structures happens here and not inside
1224 * dquot_transfer() where we have problems with lock ordering
1225 */
1226 if (attr->ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)
1227 && OCFS2_HAS_RO_COMPAT_FEATURE(sb,
1228 OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) {
1229 transfer_to[USRQUOTA] = dqget(sb, make_kqid_uid(attr->ia_uid));
1230 if (IS_ERR(transfer_to[USRQUOTA])) {
1231 status = PTR_ERR(transfer_to[USRQUOTA]);
1232 transfer_to[USRQUOTA] = NULL;
1233 goto bail_unlock;
1234 }
1235 }
1236 if (attr->ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid)
1237 && OCFS2_HAS_RO_COMPAT_FEATURE(sb,
1238 OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)) {
1239 transfer_to[GRPQUOTA] = dqget(sb, make_kqid_gid(attr->ia_gid));
1240 if (IS_ERR(transfer_to[GRPQUOTA])) {
1241 status = PTR_ERR(transfer_to[GRPQUOTA]);
1242 transfer_to[GRPQUOTA] = NULL;
1243 goto bail_unlock;
1244 }
1245 }
1246 down_write(&OCFS2_I(inode)->ip_alloc_sem);
1247 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS +
1248 2 * ocfs2_quota_trans_credits(sb));
1249 if (IS_ERR(handle)) {
1250 status = PTR_ERR(handle);
1251 mlog_errno(status);
1252 goto bail_unlock_alloc;
1253 }
1254 status = __dquot_transfer(inode, transfer_to);
1255 if (status < 0)
1256 goto bail_commit;
1257 } else {
1258 down_write(&OCFS2_I(inode)->ip_alloc_sem);
1259 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1260 if (IS_ERR(handle)) {
1261 status = PTR_ERR(handle);
1262 mlog_errno(status);
1263 goto bail_unlock_alloc;
1264 }
1265 }
1266
1267 setattr_copy(&nop_mnt_idmap, inode, attr);
1268 mark_inode_dirty(inode);
1269
1270 status = ocfs2_mark_inode_dirty(handle, inode, bh);
1271 if (status < 0)
1272 mlog_errno(status);
1273
1274bail_commit:
1275 ocfs2_commit_trans(osb, handle);
1276bail_unlock_alloc:
1277 up_write(&OCFS2_I(inode)->ip_alloc_sem);
1278bail_unlock:
1279 if (status && inode_locked) {
1280 ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock);
1281 inode_locked = 0;
1282 }
1283bail_unlock_rw:
1284 if (size_change)
1285 ocfs2_rw_unlock(inode, 1);
1286bail:
1287
1288 /* Release quota pointers in case we acquired them */
1289 for (qtype = 0; qtype < OCFS2_MAXQUOTAS; qtype++)
1290 dqput(transfer_to[qtype]);
1291
1292 if (!status && attr->ia_valid & ATTR_MODE) {
1293 status = ocfs2_acl_chmod(inode, bh);
1294 if (status < 0)
1295 mlog_errno(status);
1296 }
1297 if (inode_locked)
1298 ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock);
1299
1300 brelse(bh);
1301 return status;
1302}
1303
1304int ocfs2_getattr(struct mnt_idmap *idmap, const struct path *path,
1305 struct kstat *stat, u32 request_mask, unsigned int flags)
1306{
1307 struct inode *inode = d_inode(path->dentry);
1308 struct super_block *sb = path->dentry->d_sb;
1309 struct ocfs2_super *osb = sb->s_fs_info;
1310 int err;
1311
1312 err = ocfs2_inode_revalidate(path->dentry);
1313 if (err) {
1314 if (err != -ENOENT)
1315 mlog_errno(err);
1316 goto bail;
1317 }
1318
1319 generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
1320 /*
1321 * If there is inline data in the inode, the inode will normally not
1322 * have data blocks allocated (it may have an external xattr block).
1323 * Report at least one sector for such files, so tools like tar, rsync,
1324 * others don't incorrectly think the file is completely sparse.
1325 */
1326 if (unlikely(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL))
1327 stat->blocks += (stat->size + 511)>>9;
1328
1329 /* We set the blksize from the cluster size for performance */
1330 stat->blksize = osb->s_clustersize;
1331
1332bail:
1333 return err;
1334}
1335
1336int ocfs2_permission(struct mnt_idmap *idmap, struct inode *inode,
1337 int mask)
1338{
1339 int ret, had_lock;
1340 struct ocfs2_lock_holder oh;
1341
1342 if (mask & MAY_NOT_BLOCK)
1343 return -ECHILD;
1344
1345 had_lock = ocfs2_inode_lock_tracker(inode, NULL, 0, &oh);
1346 if (had_lock < 0) {
1347 ret = had_lock;
1348 goto out;
1349 } else if (had_lock) {
1350 /* See comments in ocfs2_setattr() for details.
1351 * The call chain of this case could be:
1352 * do_sys_open()
1353 * may_open()
1354 * inode_permission()
1355 * ocfs2_permission()
1356 * ocfs2_iop_get_acl()
1357 */
1358 mlog(ML_ERROR, "Another case of recursive locking:\n");
1359 dump_stack();
1360 }
1361
1362 ret = generic_permission(&nop_mnt_idmap, inode, mask);
1363
1364 ocfs2_inode_unlock_tracker(inode, 0, &oh, had_lock);
1365out:
1366 return ret;
1367}
1368
1369static int __ocfs2_write_remove_suid(struct inode *inode,
1370 struct buffer_head *bh)
1371{
1372 int ret;
1373 handle_t *handle;
1374 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1375 struct ocfs2_dinode *di;
1376
1377 trace_ocfs2_write_remove_suid(
1378 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1379 inode->i_mode);
1380
1381 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1382 if (IS_ERR(handle)) {
1383 ret = PTR_ERR(handle);
1384 mlog_errno(ret);
1385 goto out;
1386 }
1387
1388 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
1389 OCFS2_JOURNAL_ACCESS_WRITE);
1390 if (ret < 0) {
1391 mlog_errno(ret);
1392 goto out_trans;
1393 }
1394
1395 inode->i_mode &= ~S_ISUID;
1396 if ((inode->i_mode & S_ISGID) && (inode->i_mode & S_IXGRP))
1397 inode->i_mode &= ~S_ISGID;
1398
1399 di = (struct ocfs2_dinode *) bh->b_data;
1400 di->i_mode = cpu_to_le16(inode->i_mode);
1401 ocfs2_update_inode_fsync_trans(handle, inode, 0);
1402
1403 ocfs2_journal_dirty(handle, bh);
1404
1405out_trans:
1406 ocfs2_commit_trans(osb, handle);
1407out:
1408 return ret;
1409}
1410
1411static int ocfs2_write_remove_suid(struct inode *inode)
1412{
1413 int ret;
1414 struct buffer_head *bh = NULL;
1415
1416 ret = ocfs2_read_inode_block(inode, &bh);
1417 if (ret < 0) {
1418 mlog_errno(ret);
1419 goto out;
1420 }
1421
1422 ret = __ocfs2_write_remove_suid(inode, bh);
1423out:
1424 brelse(bh);
1425 return ret;
1426}
1427
1428/*
1429 * Allocate enough extents to cover the region starting at byte offset
1430 * start for len bytes. Existing extents are skipped, any extents
1431 * added are marked as "unwritten".
1432 */
1433static int ocfs2_allocate_unwritten_extents(struct inode *inode,
1434 u64 start, u64 len)
1435{
1436 int ret;
1437 u32 cpos, phys_cpos, clusters, alloc_size;
1438 u64 end = start + len;
1439 struct buffer_head *di_bh = NULL;
1440
1441 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1442 ret = ocfs2_read_inode_block(inode, &di_bh);
1443 if (ret) {
1444 mlog_errno(ret);
1445 goto out;
1446 }
1447
1448 /*
1449 * Nothing to do if the requested reservation range
1450 * fits within the inode.
1451 */
1452 if (ocfs2_size_fits_inline_data(di_bh, end))
1453 goto out;
1454
1455 ret = ocfs2_convert_inline_data_to_extents(inode, di_bh);
1456 if (ret) {
1457 mlog_errno(ret);
1458 goto out;
1459 }
1460 }
1461
1462 /*
1463 * We consider both start and len to be inclusive.
1464 */
1465 cpos = start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
1466 clusters = ocfs2_clusters_for_bytes(inode->i_sb, start + len);
1467 clusters -= cpos;
1468
1469 while (clusters) {
1470 ret = ocfs2_get_clusters(inode, cpos, &phys_cpos,
1471 &alloc_size, NULL);
1472 if (ret) {
1473 mlog_errno(ret);
1474 goto out;
1475 }
1476
1477 /*
1478 * Hole or existing extent len can be arbitrary, so
1479 * cap it to our own allocation request.
1480 */
1481 if (alloc_size > clusters)
1482 alloc_size = clusters;
1483
1484 if (phys_cpos) {
1485 /*
1486 * We already have an allocation at this
1487 * region so we can safely skip it.
1488 */
1489 goto next;
1490 }
1491
1492 ret = ocfs2_extend_allocation(inode, cpos, alloc_size, 1);
1493 if (ret) {
1494 if (ret != -ENOSPC)
1495 mlog_errno(ret);
1496 goto out;
1497 }
1498
1499next:
1500 cpos += alloc_size;
1501 clusters -= alloc_size;
1502 }
1503
1504 ret = 0;
1505out:
1506
1507 brelse(di_bh);
1508 return ret;
1509}
1510
1511/*
1512 * Truncate a byte range, avoiding pages within partial clusters. This
1513 * preserves those pages for the zeroing code to write to.
1514 */
1515static void ocfs2_truncate_cluster_pages(struct inode *inode, u64 byte_start,
1516 u64 byte_len)
1517{
1518 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1519 loff_t start, end;
1520 struct address_space *mapping = inode->i_mapping;
1521
1522 start = (loff_t)ocfs2_align_bytes_to_clusters(inode->i_sb, byte_start);
1523 end = byte_start + byte_len;
1524 end = end & ~(osb->s_clustersize - 1);
1525
1526 if (start < end) {
1527 unmap_mapping_range(mapping, start, end - start, 0);
1528 truncate_inode_pages_range(mapping, start, end - 1);
1529 }
1530}
1531
1532/*
1533 * zero out partial blocks of one cluster.
1534 *
1535 * start: file offset where zero starts, will be made upper block aligned.
1536 * len: it will be trimmed to the end of current cluster if "start + len"
1537 * is bigger than it.
1538 */
1539static int ocfs2_zeroout_partial_cluster(struct inode *inode,
1540 u64 start, u64 len)
1541{
1542 int ret;
1543 u64 start_block, end_block, nr_blocks;
1544 u64 p_block, offset;
1545 u32 cluster, p_cluster, nr_clusters;
1546 struct super_block *sb = inode->i_sb;
1547 u64 end = ocfs2_align_bytes_to_clusters(sb, start);
1548
1549 if (start + len < end)
1550 end = start + len;
1551
1552 start_block = ocfs2_blocks_for_bytes(sb, start);
1553 end_block = ocfs2_blocks_for_bytes(sb, end);
1554 nr_blocks = end_block - start_block;
1555 if (!nr_blocks)
1556 return 0;
1557
1558 cluster = ocfs2_bytes_to_clusters(sb, start);
1559 ret = ocfs2_get_clusters(inode, cluster, &p_cluster,
1560 &nr_clusters, NULL);
1561 if (ret)
1562 return ret;
1563 if (!p_cluster)
1564 return 0;
1565
1566 offset = start_block - ocfs2_clusters_to_blocks(sb, cluster);
1567 p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset;
1568 return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS);
1569}
1570
1571static int ocfs2_zero_partial_clusters(struct inode *inode,
1572 u64 start, u64 len)
1573{
1574 int ret = 0;
1575 u64 tmpend = 0;
1576 u64 end = start + len;
1577 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1578 unsigned int csize = osb->s_clustersize;
1579 handle_t *handle;
1580 loff_t isize = i_size_read(inode);
1581
1582 /*
1583 * The "start" and "end" values are NOT necessarily part of
1584 * the range whose allocation is being deleted. Rather, this
1585 * is what the user passed in with the request. We must zero
1586 * partial clusters here. There's no need to worry about
1587 * physical allocation - the zeroing code knows to skip holes.
1588 */
1589 trace_ocfs2_zero_partial_clusters(
1590 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1591 (unsigned long long)start, (unsigned long long)end);
1592
1593 /*
1594 * If both edges are on a cluster boundary then there's no
1595 * zeroing required as the region is part of the allocation to
1596 * be truncated.
1597 */
1598 if ((start & (csize - 1)) == 0 && (end & (csize - 1)) == 0)
1599 goto out;
1600
1601 /* No page cache for EOF blocks, issue zero out to disk. */
1602 if (end > isize) {
1603 /*
1604 * zeroout eof blocks in last cluster starting from
1605 * "isize" even "start" > "isize" because it is
1606 * complicated to zeroout just at "start" as "start"
1607 * may be not aligned with block size, buffer write
1608 * would be required to do that, but out of eof buffer
1609 * write is not supported.
1610 */
1611 ret = ocfs2_zeroout_partial_cluster(inode, isize,
1612 end - isize);
1613 if (ret) {
1614 mlog_errno(ret);
1615 goto out;
1616 }
1617 if (start >= isize)
1618 goto out;
1619 end = isize;
1620 }
1621 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1622 if (IS_ERR(handle)) {
1623 ret = PTR_ERR(handle);
1624 mlog_errno(ret);
1625 goto out;
1626 }
1627
1628 /*
1629 * If start is on a cluster boundary and end is somewhere in another
1630 * cluster, we have not COWed the cluster starting at start, unless
1631 * end is also within the same cluster. So, in this case, we skip this
1632 * first call to ocfs2_zero_range_for_truncate() truncate and move on
1633 * to the next one.
1634 */
1635 if ((start & (csize - 1)) != 0) {
1636 /*
1637 * We want to get the byte offset of the end of the 1st
1638 * cluster.
1639 */
1640 tmpend = (u64)osb->s_clustersize +
1641 (start & ~(osb->s_clustersize - 1));
1642 if (tmpend > end)
1643 tmpend = end;
1644
1645 trace_ocfs2_zero_partial_clusters_range1(
1646 (unsigned long long)start,
1647 (unsigned long long)tmpend);
1648
1649 ret = ocfs2_zero_range_for_truncate(inode, handle, start,
1650 tmpend);
1651 if (ret)
1652 mlog_errno(ret);
1653 }
1654
1655 if (tmpend < end) {
1656 /*
1657 * This may make start and end equal, but the zeroing
1658 * code will skip any work in that case so there's no
1659 * need to catch it up here.
1660 */
1661 start = end & ~(osb->s_clustersize - 1);
1662
1663 trace_ocfs2_zero_partial_clusters_range2(
1664 (unsigned long long)start, (unsigned long long)end);
1665
1666 ret = ocfs2_zero_range_for_truncate(inode, handle, start, end);
1667 if (ret)
1668 mlog_errno(ret);
1669 }
1670 ocfs2_update_inode_fsync_trans(handle, inode, 1);
1671
1672 ocfs2_commit_trans(osb, handle);
1673out:
1674 return ret;
1675}
1676
1677static int ocfs2_find_rec(struct ocfs2_extent_list *el, u32 pos)
1678{
1679 int i;
1680 struct ocfs2_extent_rec *rec = NULL;
1681
1682 for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) {
1683
1684 rec = &el->l_recs[i];
1685
1686 if (le32_to_cpu(rec->e_cpos) < pos)
1687 break;
1688 }
1689
1690 return i;
1691}
1692
1693/*
1694 * Helper to calculate the punching pos and length in one run, we handle the
1695 * following three cases in order:
1696 *
1697 * - remove the entire record
1698 * - remove a partial record
1699 * - no record needs to be removed (hole-punching completed)
1700*/
1701static void ocfs2_calc_trunc_pos(struct inode *inode,
1702 struct ocfs2_extent_list *el,
1703 struct ocfs2_extent_rec *rec,
1704 u32 trunc_start, u32 *trunc_cpos,
1705 u32 *trunc_len, u32 *trunc_end,
1706 u64 *blkno, int *done)
1707{
1708 int ret = 0;
1709 u32 coff, range;
1710
1711 range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec);
1712
1713 if (le32_to_cpu(rec->e_cpos) >= trunc_start) {
1714 /*
1715 * remove an entire extent record.
1716 */
1717 *trunc_cpos = le32_to_cpu(rec->e_cpos);
1718 /*
1719 * Skip holes if any.
1720 */
1721 if (range < *trunc_end)
1722 *trunc_end = range;
1723 *trunc_len = *trunc_end - le32_to_cpu(rec->e_cpos);
1724 *blkno = le64_to_cpu(rec->e_blkno);
1725 *trunc_end = le32_to_cpu(rec->e_cpos);
1726 } else if (range > trunc_start) {
1727 /*
1728 * remove a partial extent record, which means we're
1729 * removing the last extent record.
1730 */
1731 *trunc_cpos = trunc_start;
1732 /*
1733 * skip hole if any.
1734 */
1735 if (range < *trunc_end)
1736 *trunc_end = range;
1737 *trunc_len = *trunc_end - trunc_start;
1738 coff = trunc_start - le32_to_cpu(rec->e_cpos);
1739 *blkno = le64_to_cpu(rec->e_blkno) +
1740 ocfs2_clusters_to_blocks(inode->i_sb, coff);
1741 *trunc_end = trunc_start;
1742 } else {
1743 /*
1744 * It may have two following possibilities:
1745 *
1746 * - last record has been removed
1747 * - trunc_start was within a hole
1748 *
1749 * both two cases mean the completion of hole punching.
1750 */
1751 ret = 1;
1752 }
1753
1754 *done = ret;
1755}
1756
1757int ocfs2_remove_inode_range(struct inode *inode,
1758 struct buffer_head *di_bh, u64 byte_start,
1759 u64 byte_len)
1760{
1761 int ret = 0, flags = 0, done = 0, i;
1762 u32 trunc_start, trunc_len, trunc_end, trunc_cpos, phys_cpos;
1763 u32 cluster_in_el;
1764 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1765 struct ocfs2_cached_dealloc_ctxt dealloc;
1766 struct address_space *mapping = inode->i_mapping;
1767 struct ocfs2_extent_tree et;
1768 struct ocfs2_path *path = NULL;
1769 struct ocfs2_extent_list *el = NULL;
1770 struct ocfs2_extent_rec *rec = NULL;
1771 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
1772 u64 blkno, refcount_loc = le64_to_cpu(di->i_refcount_loc);
1773
1774 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh);
1775 ocfs2_init_dealloc_ctxt(&dealloc);
1776
1777 trace_ocfs2_remove_inode_range(
1778 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1779 (unsigned long long)byte_start,
1780 (unsigned long long)byte_len);
1781
1782 if (byte_len == 0)
1783 return 0;
1784
1785 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1786 ret = ocfs2_truncate_inline(inode, di_bh, byte_start,
1787 byte_start + byte_len, 0);
1788 if (ret) {
1789 mlog_errno(ret);
1790 goto out;
1791 }
1792 /*
1793 * There's no need to get fancy with the page cache
1794 * truncate of an inline-data inode. We're talking
1795 * about less than a page here, which will be cached
1796 * in the dinode buffer anyway.
1797 */
1798 unmap_mapping_range(mapping, 0, 0, 0);
1799 truncate_inode_pages(mapping, 0);
1800 goto out;
1801 }
1802
1803 /*
1804 * For reflinks, we may need to CoW 2 clusters which might be
1805 * partially zero'd later, if hole's start and end offset were
1806 * within one cluster(means is not exactly aligned to clustersize).
1807 */
1808
1809 if (ocfs2_is_refcount_inode(inode)) {
1810 ret = ocfs2_cow_file_pos(inode, di_bh, byte_start);
1811 if (ret) {
1812 mlog_errno(ret);
1813 goto out;
1814 }
1815
1816 ret = ocfs2_cow_file_pos(inode, di_bh, byte_start + byte_len);
1817 if (ret) {
1818 mlog_errno(ret);
1819 goto out;
1820 }
1821 }
1822
1823 trunc_start = ocfs2_clusters_for_bytes(osb->sb, byte_start);
1824 trunc_end = (byte_start + byte_len) >> osb->s_clustersize_bits;
1825 cluster_in_el = trunc_end;
1826
1827 ret = ocfs2_zero_partial_clusters(inode, byte_start, byte_len);
1828 if (ret) {
1829 mlog_errno(ret);
1830 goto out;
1831 }
1832
1833 path = ocfs2_new_path_from_et(&et);
1834 if (!path) {
1835 ret = -ENOMEM;
1836 mlog_errno(ret);
1837 goto out;
1838 }
1839
1840 while (trunc_end > trunc_start) {
1841
1842 ret = ocfs2_find_path(INODE_CACHE(inode), path,
1843 cluster_in_el);
1844 if (ret) {
1845 mlog_errno(ret);
1846 goto out;
1847 }
1848
1849 el = path_leaf_el(path);
1850
1851 i = ocfs2_find_rec(el, trunc_end);
1852 /*
1853 * Need to go to previous extent block.
1854 */
1855 if (i < 0) {
1856 if (path->p_tree_depth == 0)
1857 break;
1858
1859 ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb,
1860 path,
1861 &cluster_in_el);
1862 if (ret) {
1863 mlog_errno(ret);
1864 goto out;
1865 }
1866
1867 /*
1868 * We've reached the leftmost extent block,
1869 * it's safe to leave.
1870 */
1871 if (cluster_in_el == 0)
1872 break;
1873
1874 /*
1875 * The 'pos' searched for previous extent block is
1876 * always one cluster less than actual trunc_end.
1877 */
1878 trunc_end = cluster_in_el + 1;
1879
1880 ocfs2_reinit_path(path, 1);
1881
1882 continue;
1883
1884 } else
1885 rec = &el->l_recs[i];
1886
1887 ocfs2_calc_trunc_pos(inode, el, rec, trunc_start, &trunc_cpos,
1888 &trunc_len, &trunc_end, &blkno, &done);
1889 if (done)
1890 break;
1891
1892 flags = rec->e_flags;
1893 phys_cpos = ocfs2_blocks_to_clusters(inode->i_sb, blkno);
1894
1895 ret = ocfs2_remove_btree_range(inode, &et, trunc_cpos,
1896 phys_cpos, trunc_len, flags,
1897 &dealloc, refcount_loc, false);
1898 if (ret < 0) {
1899 mlog_errno(ret);
1900 goto out;
1901 }
1902
1903 cluster_in_el = trunc_end;
1904
1905 ocfs2_reinit_path(path, 1);
1906 }
1907
1908 ocfs2_truncate_cluster_pages(inode, byte_start, byte_len);
1909
1910out:
1911 ocfs2_free_path(path);
1912 ocfs2_schedule_truncate_log_flush(osb, 1);
1913 ocfs2_run_deallocs(osb, &dealloc);
1914
1915 return ret;
1916}
1917
1918/*
1919 * Parts of this function taken from xfs_change_file_space()
1920 */
1921static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
1922 loff_t f_pos, unsigned int cmd,
1923 struct ocfs2_space_resv *sr,
1924 int change_size)
1925{
1926 int ret;
1927 s64 llen;
1928 loff_t size, orig_isize;
1929 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1930 struct buffer_head *di_bh = NULL;
1931 handle_t *handle;
1932 unsigned long long max_off = inode->i_sb->s_maxbytes;
1933
1934 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
1935 return -EROFS;
1936
1937 inode_lock(inode);
1938
1939 /*
1940 * This prevents concurrent writes on other nodes
1941 */
1942 ret = ocfs2_rw_lock(inode, 1);
1943 if (ret) {
1944 mlog_errno(ret);
1945 goto out;
1946 }
1947
1948 ret = ocfs2_inode_lock(inode, &di_bh, 1);
1949 if (ret) {
1950 mlog_errno(ret);
1951 goto out_rw_unlock;
1952 }
1953
1954 if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) {
1955 ret = -EPERM;
1956 goto out_inode_unlock;
1957 }
1958
1959 switch (sr->l_whence) {
1960 case 0: /*SEEK_SET*/
1961 break;
1962 case 1: /*SEEK_CUR*/
1963 sr->l_start += f_pos;
1964 break;
1965 case 2: /*SEEK_END*/
1966 sr->l_start += i_size_read(inode);
1967 break;
1968 default:
1969 ret = -EINVAL;
1970 goto out_inode_unlock;
1971 }
1972 sr->l_whence = 0;
1973
1974 llen = sr->l_len > 0 ? sr->l_len - 1 : sr->l_len;
1975
1976 if (sr->l_start < 0
1977 || sr->l_start > max_off
1978 || (sr->l_start + llen) < 0
1979 || (sr->l_start + llen) > max_off) {
1980 ret = -EINVAL;
1981 goto out_inode_unlock;
1982 }
1983 size = sr->l_start + sr->l_len;
1984
1985 if (cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64 ||
1986 cmd == OCFS2_IOC_UNRESVSP || cmd == OCFS2_IOC_UNRESVSP64) {
1987 if (sr->l_len <= 0) {
1988 ret = -EINVAL;
1989 goto out_inode_unlock;
1990 }
1991 }
1992
1993 if (file && setattr_should_drop_suidgid(&nop_mnt_idmap, file_inode(file))) {
1994 ret = __ocfs2_write_remove_suid(inode, di_bh);
1995 if (ret) {
1996 mlog_errno(ret);
1997 goto out_inode_unlock;
1998 }
1999 }
2000
2001 down_write(&OCFS2_I(inode)->ip_alloc_sem);
2002 switch (cmd) {
2003 case OCFS2_IOC_RESVSP:
2004 case OCFS2_IOC_RESVSP64:
2005 /*
2006 * This takes unsigned offsets, but the signed ones we
2007 * pass have been checked against overflow above.
2008 */
2009 ret = ocfs2_allocate_unwritten_extents(inode, sr->l_start,
2010 sr->l_len);
2011 break;
2012 case OCFS2_IOC_UNRESVSP:
2013 case OCFS2_IOC_UNRESVSP64:
2014 ret = ocfs2_remove_inode_range(inode, di_bh, sr->l_start,
2015 sr->l_len);
2016 break;
2017 default:
2018 ret = -EINVAL;
2019 }
2020
2021 orig_isize = i_size_read(inode);
2022 /* zeroout eof blocks in the cluster. */
2023 if (!ret && change_size && orig_isize < size) {
2024 ret = ocfs2_zeroout_partial_cluster(inode, orig_isize,
2025 size - orig_isize);
2026 if (!ret)
2027 i_size_write(inode, size);
2028 }
2029 up_write(&OCFS2_I(inode)->ip_alloc_sem);
2030 if (ret) {
2031 mlog_errno(ret);
2032 goto out_inode_unlock;
2033 }
2034
2035 /*
2036 * We update c/mtime for these changes
2037 */
2038 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
2039 if (IS_ERR(handle)) {
2040 ret = PTR_ERR(handle);
2041 mlog_errno(ret);
2042 goto out_inode_unlock;
2043 }
2044
2045 inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
2046 ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
2047 if (ret < 0)
2048 mlog_errno(ret);
2049
2050 if (file && (file->f_flags & O_SYNC))
2051 handle->h_sync = 1;
2052
2053 ocfs2_commit_trans(osb, handle);
2054
2055out_inode_unlock:
2056 brelse(di_bh);
2057 ocfs2_inode_unlock(inode, 1);
2058out_rw_unlock:
2059 ocfs2_rw_unlock(inode, 1);
2060
2061out:
2062 inode_unlock(inode);
2063 return ret;
2064}
2065
2066int ocfs2_change_file_space(struct file *file, unsigned int cmd,
2067 struct ocfs2_space_resv *sr)
2068{
2069 struct inode *inode = file_inode(file);
2070 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2071 int ret;
2072
2073 if ((cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64) &&
2074 !ocfs2_writes_unwritten_extents(osb))
2075 return -ENOTTY;
2076 else if ((cmd == OCFS2_IOC_UNRESVSP || cmd == OCFS2_IOC_UNRESVSP64) &&
2077 !ocfs2_sparse_alloc(osb))
2078 return -ENOTTY;
2079
2080 if (!S_ISREG(inode->i_mode))
2081 return -EINVAL;
2082
2083 if (!(file->f_mode & FMODE_WRITE))
2084 return -EBADF;
2085
2086 ret = mnt_want_write_file(file);
2087 if (ret)
2088 return ret;
2089 ret = __ocfs2_change_file_space(file, inode, file->f_pos, cmd, sr, 0);
2090 mnt_drop_write_file(file);
2091 return ret;
2092}
2093
2094static long ocfs2_fallocate(struct file *file, int mode, loff_t offset,
2095 loff_t len)
2096{
2097 struct inode *inode = file_inode(file);
2098 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2099 struct ocfs2_space_resv sr;
2100 int change_size = 1;
2101 int cmd = OCFS2_IOC_RESVSP64;
2102 int ret = 0;
2103
2104 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2105 return -EOPNOTSUPP;
2106 if (!ocfs2_writes_unwritten_extents(osb))
2107 return -EOPNOTSUPP;
2108
2109 if (mode & FALLOC_FL_KEEP_SIZE) {
2110 change_size = 0;
2111 } else {
2112 ret = inode_newsize_ok(inode, offset + len);
2113 if (ret)
2114 return ret;
2115 }
2116
2117 if (mode & FALLOC_FL_PUNCH_HOLE)
2118 cmd = OCFS2_IOC_UNRESVSP64;
2119
2120 sr.l_whence = 0;
2121 sr.l_start = (s64)offset;
2122 sr.l_len = (s64)len;
2123
2124 return __ocfs2_change_file_space(NULL, inode, offset, cmd, &sr,
2125 change_size);
2126}
2127
2128int ocfs2_check_range_for_refcount(struct inode *inode, loff_t pos,
2129 size_t count)
2130{
2131 int ret = 0;
2132 unsigned int extent_flags;
2133 u32 cpos, clusters, extent_len, phys_cpos;
2134 struct super_block *sb = inode->i_sb;
2135
2136 if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)) ||
2137 !ocfs2_is_refcount_inode(inode) ||
2138 OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
2139 return 0;
2140
2141 cpos = pos >> OCFS2_SB(sb)->s_clustersize_bits;
2142 clusters = ocfs2_clusters_for_bytes(sb, pos + count) - cpos;
2143
2144 while (clusters) {
2145 ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &extent_len,
2146 &extent_flags);
2147 if (ret < 0) {
2148 mlog_errno(ret);
2149 goto out;
2150 }
2151
2152 if (phys_cpos && (extent_flags & OCFS2_EXT_REFCOUNTED)) {
2153 ret = 1;
2154 break;
2155 }
2156
2157 if (extent_len > clusters)
2158 extent_len = clusters;
2159
2160 clusters -= extent_len;
2161 cpos += extent_len;
2162 }
2163out:
2164 return ret;
2165}
2166
2167static int ocfs2_is_io_unaligned(struct inode *inode, size_t count, loff_t pos)
2168{
2169 int blockmask = inode->i_sb->s_blocksize - 1;
2170 loff_t final_size = pos + count;
2171
2172 if ((pos & blockmask) || (final_size & blockmask))
2173 return 1;
2174 return 0;
2175}
2176
2177static int ocfs2_inode_lock_for_extent_tree(struct inode *inode,
2178 struct buffer_head **di_bh,
2179 int meta_level,
2180 int write_sem,
2181 int wait)
2182{
2183 int ret = 0;
2184
2185 if (wait)
2186 ret = ocfs2_inode_lock(inode, di_bh, meta_level);
2187 else
2188 ret = ocfs2_try_inode_lock(inode, di_bh, meta_level);
2189 if (ret < 0)
2190 goto out;
2191
2192 if (wait) {
2193 if (write_sem)
2194 down_write(&OCFS2_I(inode)->ip_alloc_sem);
2195 else
2196 down_read(&OCFS2_I(inode)->ip_alloc_sem);
2197 } else {
2198 if (write_sem)
2199 ret = down_write_trylock(&OCFS2_I(inode)->ip_alloc_sem);
2200 else
2201 ret = down_read_trylock(&OCFS2_I(inode)->ip_alloc_sem);
2202
2203 if (!ret) {
2204 ret = -EAGAIN;
2205 goto out_unlock;
2206 }
2207 }
2208
2209 return ret;
2210
2211out_unlock:
2212 brelse(*di_bh);
2213 *di_bh = NULL;
2214 ocfs2_inode_unlock(inode, meta_level);
2215out:
2216 return ret;
2217}
2218
2219static void ocfs2_inode_unlock_for_extent_tree(struct inode *inode,
2220 struct buffer_head **di_bh,
2221 int meta_level,
2222 int write_sem)
2223{
2224 if (write_sem)
2225 up_write(&OCFS2_I(inode)->ip_alloc_sem);
2226 else
2227 up_read(&OCFS2_I(inode)->ip_alloc_sem);
2228
2229 brelse(*di_bh);
2230 *di_bh = NULL;
2231
2232 if (meta_level >= 0)
2233 ocfs2_inode_unlock(inode, meta_level);
2234}
2235
2236static int ocfs2_prepare_inode_for_write(struct file *file,
2237 loff_t pos, size_t count, int wait)
2238{
2239 int ret = 0, meta_level = 0, overwrite_io = 0;
2240 int write_sem = 0;
2241 struct dentry *dentry = file->f_path.dentry;
2242 struct inode *inode = d_inode(dentry);
2243 struct buffer_head *di_bh = NULL;
2244 u32 cpos;
2245 u32 clusters;
2246
2247 /*
2248 * We start with a read level meta lock and only jump to an ex
2249 * if we need to make modifications here.
2250 */
2251 for(;;) {
2252 ret = ocfs2_inode_lock_for_extent_tree(inode,
2253 &di_bh,
2254 meta_level,
2255 write_sem,
2256 wait);
2257 if (ret < 0) {
2258 if (ret != -EAGAIN)
2259 mlog_errno(ret);
2260 goto out;
2261 }
2262
2263 /*
2264 * Check if IO will overwrite allocated blocks in case
2265 * IOCB_NOWAIT flag is set.
2266 */
2267 if (!wait && !overwrite_io) {
2268 overwrite_io = 1;
2269
2270 ret = ocfs2_overwrite_io(inode, di_bh, pos, count);
2271 if (ret < 0) {
2272 if (ret != -EAGAIN)
2273 mlog_errno(ret);
2274 goto out_unlock;
2275 }
2276 }
2277
2278 /* Clear suid / sgid if necessary. We do this here
2279 * instead of later in the write path because
2280 * remove_suid() calls ->setattr without any hint that
2281 * we may have already done our cluster locking. Since
2282 * ocfs2_setattr() *must* take cluster locks to
2283 * proceed, this will lead us to recursively lock the
2284 * inode. There's also the dinode i_size state which
2285 * can be lost via setattr during extending writes (we
2286 * set inode->i_size at the end of a write. */
2287 if (setattr_should_drop_suidgid(&nop_mnt_idmap, inode)) {
2288 if (meta_level == 0) {
2289 ocfs2_inode_unlock_for_extent_tree(inode,
2290 &di_bh,
2291 meta_level,
2292 write_sem);
2293 meta_level = 1;
2294 continue;
2295 }
2296
2297 ret = ocfs2_write_remove_suid(inode);
2298 if (ret < 0) {
2299 mlog_errno(ret);
2300 goto out_unlock;
2301 }
2302 }
2303
2304 ret = ocfs2_check_range_for_refcount(inode, pos, count);
2305 if (ret == 1) {
2306 ocfs2_inode_unlock_for_extent_tree(inode,
2307 &di_bh,
2308 meta_level,
2309 write_sem);
2310 meta_level = 1;
2311 write_sem = 1;
2312 ret = ocfs2_inode_lock_for_extent_tree(inode,
2313 &di_bh,
2314 meta_level,
2315 write_sem,
2316 wait);
2317 if (ret < 0) {
2318 if (ret != -EAGAIN)
2319 mlog_errno(ret);
2320 goto out;
2321 }
2322
2323 cpos = pos >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
2324 clusters =
2325 ocfs2_clusters_for_bytes(inode->i_sb, pos + count) - cpos;
2326 ret = ocfs2_refcount_cow(inode, di_bh, cpos, clusters, UINT_MAX);
2327 }
2328
2329 if (ret < 0) {
2330 if (ret != -EAGAIN)
2331 mlog_errno(ret);
2332 goto out_unlock;
2333 }
2334
2335 break;
2336 }
2337
2338out_unlock:
2339 trace_ocfs2_prepare_inode_for_write(OCFS2_I(inode)->ip_blkno,
2340 pos, count, wait);
2341
2342 ocfs2_inode_unlock_for_extent_tree(inode,
2343 &di_bh,
2344 meta_level,
2345 write_sem);
2346
2347out:
2348 return ret;
2349}
2350
2351static ssize_t ocfs2_file_write_iter(struct kiocb *iocb,
2352 struct iov_iter *from)
2353{
2354 int rw_level;
2355 ssize_t written = 0;
2356 ssize_t ret;
2357 size_t count = iov_iter_count(from);
2358 struct file *file = iocb->ki_filp;
2359 struct inode *inode = file_inode(file);
2360 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2361 int full_coherency = !(osb->s_mount_opt &
2362 OCFS2_MOUNT_COHERENCY_BUFFERED);
2363 void *saved_ki_complete = NULL;
2364 int append_write = ((iocb->ki_pos + count) >=
2365 i_size_read(inode) ? 1 : 0);
2366 int direct_io = iocb->ki_flags & IOCB_DIRECT ? 1 : 0;
2367 int nowait = iocb->ki_flags & IOCB_NOWAIT ? 1 : 0;
2368
2369 trace_ocfs2_file_write_iter(inode, file, file->f_path.dentry,
2370 (unsigned long long)OCFS2_I(inode)->ip_blkno,
2371 file->f_path.dentry->d_name.len,
2372 file->f_path.dentry->d_name.name,
2373 (unsigned int)from->nr_segs); /* GRRRRR */
2374
2375 if (!direct_io && nowait)
2376 return -EOPNOTSUPP;
2377
2378 if (count == 0)
2379 return 0;
2380
2381 if (nowait) {
2382 if (!inode_trylock(inode))
2383 return -EAGAIN;
2384 } else
2385 inode_lock(inode);
2386
2387 /*
2388 * Concurrent O_DIRECT writes are allowed with
2389 * mount_option "coherency=buffered".
2390 * For append write, we must take rw EX.
2391 */
2392 rw_level = (!direct_io || full_coherency || append_write);
2393
2394 if (nowait)
2395 ret = ocfs2_try_rw_lock(inode, rw_level);
2396 else
2397 ret = ocfs2_rw_lock(inode, rw_level);
2398 if (ret < 0) {
2399 if (ret != -EAGAIN)
2400 mlog_errno(ret);
2401 goto out_mutex;
2402 }
2403
2404 /*
2405 * O_DIRECT writes with "coherency=full" need to take EX cluster
2406 * inode_lock to guarantee coherency.
2407 */
2408 if (direct_io && full_coherency) {
2409 /*
2410 * We need to take and drop the inode lock to force
2411 * other nodes to drop their caches. Buffered I/O
2412 * already does this in write_begin().
2413 */
2414 if (nowait)
2415 ret = ocfs2_try_inode_lock(inode, NULL, 1);
2416 else
2417 ret = ocfs2_inode_lock(inode, NULL, 1);
2418 if (ret < 0) {
2419 if (ret != -EAGAIN)
2420 mlog_errno(ret);
2421 goto out;
2422 }
2423
2424 ocfs2_inode_unlock(inode, 1);
2425 }
2426
2427 ret = generic_write_checks(iocb, from);
2428 if (ret <= 0) {
2429 if (ret)
2430 mlog_errno(ret);
2431 goto out;
2432 }
2433 count = ret;
2434
2435 ret = ocfs2_prepare_inode_for_write(file, iocb->ki_pos, count, !nowait);
2436 if (ret < 0) {
2437 if (ret != -EAGAIN)
2438 mlog_errno(ret);
2439 goto out;
2440 }
2441
2442 if (direct_io && !is_sync_kiocb(iocb) &&
2443 ocfs2_is_io_unaligned(inode, count, iocb->ki_pos)) {
2444 /*
2445 * Make it a sync io if it's an unaligned aio.
2446 */
2447 saved_ki_complete = xchg(&iocb->ki_complete, NULL);
2448 }
2449
2450 /* communicate with ocfs2_dio_end_io */
2451 ocfs2_iocb_set_rw_locked(iocb, rw_level);
2452
2453 written = __generic_file_write_iter(iocb, from);
2454 /* buffered aio wouldn't have proper lock coverage today */
2455 BUG_ON(written == -EIOCBQUEUED && !direct_io);
2456
2457 /*
2458 * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
2459 * function pointer which is called when o_direct io completes so that
2460 * it can unlock our rw lock.
2461 * Unfortunately there are error cases which call end_io and others
2462 * that don't. so we don't have to unlock the rw_lock if either an
2463 * async dio is going to do it in the future or an end_io after an
2464 * error has already done it.
2465 */
2466 if ((written == -EIOCBQUEUED) || (!ocfs2_iocb_is_rw_locked(iocb))) {
2467 rw_level = -1;
2468 }
2469
2470 if (unlikely(written <= 0))
2471 goto out;
2472
2473 if (((file->f_flags & O_DSYNC) && !direct_io) ||
2474 IS_SYNC(inode)) {
2475 ret = filemap_fdatawrite_range(file->f_mapping,
2476 iocb->ki_pos - written,
2477 iocb->ki_pos - 1);
2478 if (ret < 0)
2479 written = ret;
2480
2481 if (!ret) {
2482 ret = jbd2_journal_force_commit(osb->journal->j_journal);
2483 if (ret < 0)
2484 written = ret;
2485 }
2486
2487 if (!ret)
2488 ret = filemap_fdatawait_range(file->f_mapping,
2489 iocb->ki_pos - written,
2490 iocb->ki_pos - 1);
2491 }
2492
2493out:
2494 if (saved_ki_complete)
2495 xchg(&iocb->ki_complete, saved_ki_complete);
2496
2497 if (rw_level != -1)
2498 ocfs2_rw_unlock(inode, rw_level);
2499
2500out_mutex:
2501 inode_unlock(inode);
2502
2503 if (written)
2504 ret = written;
2505 return ret;
2506}
2507
2508static ssize_t ocfs2_file_read_iter(struct kiocb *iocb,
2509 struct iov_iter *to)
2510{
2511 int ret = 0, rw_level = -1, lock_level = 0;
2512 struct file *filp = iocb->ki_filp;
2513 struct inode *inode = file_inode(filp);
2514 int direct_io = iocb->ki_flags & IOCB_DIRECT ? 1 : 0;
2515 int nowait = iocb->ki_flags & IOCB_NOWAIT ? 1 : 0;
2516
2517 trace_ocfs2_file_read_iter(inode, filp, filp->f_path.dentry,
2518 (unsigned long long)OCFS2_I(inode)->ip_blkno,
2519 filp->f_path.dentry->d_name.len,
2520 filp->f_path.dentry->d_name.name,
2521 to->nr_segs); /* GRRRRR */
2522
2523
2524 if (!inode) {
2525 ret = -EINVAL;
2526 mlog_errno(ret);
2527 goto bail;
2528 }
2529
2530 if (!direct_io && nowait)
2531 return -EOPNOTSUPP;
2532
2533 /*
2534 * buffered reads protect themselves in ->read_folio(). O_DIRECT reads
2535 * need locks to protect pending reads from racing with truncate.
2536 */
2537 if (direct_io) {
2538 if (nowait)
2539 ret = ocfs2_try_rw_lock(inode, 0);
2540 else
2541 ret = ocfs2_rw_lock(inode, 0);
2542
2543 if (ret < 0) {
2544 if (ret != -EAGAIN)
2545 mlog_errno(ret);
2546 goto bail;
2547 }
2548 rw_level = 0;
2549 /* communicate with ocfs2_dio_end_io */
2550 ocfs2_iocb_set_rw_locked(iocb, rw_level);
2551 }
2552
2553 /*
2554 * We're fine letting folks race truncates and extending
2555 * writes with read across the cluster, just like they can
2556 * locally. Hence no rw_lock during read.
2557 *
2558 * Take and drop the meta data lock to update inode fields
2559 * like i_size. This allows the checks down below
2560 * copy_splice_read() a chance of actually working.
2561 */
2562 ret = ocfs2_inode_lock_atime(inode, filp->f_path.mnt, &lock_level,
2563 !nowait);
2564 if (ret < 0) {
2565 if (ret != -EAGAIN)
2566 mlog_errno(ret);
2567 goto bail;
2568 }
2569 ocfs2_inode_unlock(inode, lock_level);
2570
2571 ret = generic_file_read_iter(iocb, to);
2572 trace_generic_file_read_iter_ret(ret);
2573
2574 /* buffered aio wouldn't have proper lock coverage today */
2575 BUG_ON(ret == -EIOCBQUEUED && !direct_io);
2576
2577 /* see ocfs2_file_write_iter */
2578 if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) {
2579 rw_level = -1;
2580 }
2581
2582bail:
2583 if (rw_level != -1)
2584 ocfs2_rw_unlock(inode, rw_level);
2585
2586 return ret;
2587}
2588
2589static ssize_t ocfs2_file_splice_read(struct file *in, loff_t *ppos,
2590 struct pipe_inode_info *pipe,
2591 size_t len, unsigned int flags)
2592{
2593 struct inode *inode = file_inode(in);
2594 ssize_t ret = 0;
2595 int lock_level = 0;
2596
2597 trace_ocfs2_file_splice_read(inode, in, in->f_path.dentry,
2598 (unsigned long long)OCFS2_I(inode)->ip_blkno,
2599 in->f_path.dentry->d_name.len,
2600 in->f_path.dentry->d_name.name,
2601 flags);
2602
2603 /*
2604 * We're fine letting folks race truncates and extending writes with
2605 * read across the cluster, just like they can locally. Hence no
2606 * rw_lock during read.
2607 *
2608 * Take and drop the meta data lock to update inode fields like i_size.
2609 * This allows the checks down below filemap_splice_read() a chance of
2610 * actually working.
2611 */
2612 ret = ocfs2_inode_lock_atime(inode, in->f_path.mnt, &lock_level, 1);
2613 if (ret < 0) {
2614 if (ret != -EAGAIN)
2615 mlog_errno(ret);
2616 goto bail;
2617 }
2618 ocfs2_inode_unlock(inode, lock_level);
2619
2620 ret = filemap_splice_read(in, ppos, pipe, len, flags);
2621 trace_filemap_splice_read_ret(ret);
2622bail:
2623 return ret;
2624}
2625
2626/* Refer generic_file_llseek_unlocked() */
2627static loff_t ocfs2_file_llseek(struct file *file, loff_t offset, int whence)
2628{
2629 struct inode *inode = file->f_mapping->host;
2630 int ret = 0;
2631
2632 inode_lock(inode);
2633
2634 switch (whence) {
2635 case SEEK_SET:
2636 break;
2637 case SEEK_END:
2638 /* SEEK_END requires the OCFS2 inode lock for the file
2639 * because it references the file's size.
2640 */
2641 ret = ocfs2_inode_lock(inode, NULL, 0);
2642 if (ret < 0) {
2643 mlog_errno(ret);
2644 goto out;
2645 }
2646 offset += i_size_read(inode);
2647 ocfs2_inode_unlock(inode, 0);
2648 break;
2649 case SEEK_CUR:
2650 if (offset == 0) {
2651 offset = file->f_pos;
2652 goto out;
2653 }
2654 offset += file->f_pos;
2655 break;
2656 case SEEK_DATA:
2657 case SEEK_HOLE:
2658 ret = ocfs2_seek_data_hole_offset(file, &offset, whence);
2659 if (ret)
2660 goto out;
2661 break;
2662 default:
2663 ret = -EINVAL;
2664 goto out;
2665 }
2666
2667 offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
2668
2669out:
2670 inode_unlock(inode);
2671 if (ret)
2672 return ret;
2673 return offset;
2674}
2675
2676static loff_t ocfs2_remap_file_range(struct file *file_in, loff_t pos_in,
2677 struct file *file_out, loff_t pos_out,
2678 loff_t len, unsigned int remap_flags)
2679{
2680 struct inode *inode_in = file_inode(file_in);
2681 struct inode *inode_out = file_inode(file_out);
2682 struct ocfs2_super *osb = OCFS2_SB(inode_in->i_sb);
2683 struct buffer_head *in_bh = NULL, *out_bh = NULL;
2684 bool same_inode = (inode_in == inode_out);
2685 loff_t remapped = 0;
2686 ssize_t ret;
2687
2688 if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
2689 return -EINVAL;
2690 if (!ocfs2_refcount_tree(osb))
2691 return -EOPNOTSUPP;
2692 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
2693 return -EROFS;
2694
2695 /* Lock both files against IO */
2696 ret = ocfs2_reflink_inodes_lock(inode_in, &in_bh, inode_out, &out_bh);
2697 if (ret)
2698 return ret;
2699
2700 /* Check file eligibility and prepare for block sharing. */
2701 ret = -EINVAL;
2702 if ((OCFS2_I(inode_in)->ip_flags & OCFS2_INODE_SYSTEM_FILE) ||
2703 (OCFS2_I(inode_out)->ip_flags & OCFS2_INODE_SYSTEM_FILE))
2704 goto out_unlock;
2705
2706 ret = generic_remap_file_range_prep(file_in, pos_in, file_out, pos_out,
2707 &len, remap_flags);
2708 if (ret < 0 || len == 0)
2709 goto out_unlock;
2710
2711 /* Lock out changes to the allocation maps and remap. */
2712 down_write(&OCFS2_I(inode_in)->ip_alloc_sem);
2713 if (!same_inode)
2714 down_write_nested(&OCFS2_I(inode_out)->ip_alloc_sem,
2715 SINGLE_DEPTH_NESTING);
2716
2717 /* Zap any page cache for the destination file's range. */
2718 truncate_inode_pages_range(&inode_out->i_data,
2719 round_down(pos_out, PAGE_SIZE),
2720 round_up(pos_out + len, PAGE_SIZE) - 1);
2721
2722 remapped = ocfs2_reflink_remap_blocks(inode_in, in_bh, pos_in,
2723 inode_out, out_bh, pos_out, len);
2724 up_write(&OCFS2_I(inode_in)->ip_alloc_sem);
2725 if (!same_inode)
2726 up_write(&OCFS2_I(inode_out)->ip_alloc_sem);
2727 if (remapped < 0) {
2728 ret = remapped;
2729 mlog_errno(ret);
2730 goto out_unlock;
2731 }
2732
2733 /*
2734 * Empty the extent map so that we may get the right extent
2735 * record from the disk.
2736 */
2737 ocfs2_extent_map_trunc(inode_in, 0);
2738 ocfs2_extent_map_trunc(inode_out, 0);
2739
2740 ret = ocfs2_reflink_update_dest(inode_out, out_bh, pos_out + len);
2741 if (ret) {
2742 mlog_errno(ret);
2743 goto out_unlock;
2744 }
2745
2746out_unlock:
2747 ocfs2_reflink_inodes_unlock(inode_in, in_bh, inode_out, out_bh);
2748 return remapped > 0 ? remapped : ret;
2749}
2750
2751const struct inode_operations ocfs2_file_iops = {
2752 .setattr = ocfs2_setattr,
2753 .getattr = ocfs2_getattr,
2754 .permission = ocfs2_permission,
2755 .listxattr = ocfs2_listxattr,
2756 .fiemap = ocfs2_fiemap,
2757 .get_inode_acl = ocfs2_iop_get_acl,
2758 .set_acl = ocfs2_iop_set_acl,
2759 .fileattr_get = ocfs2_fileattr_get,
2760 .fileattr_set = ocfs2_fileattr_set,
2761};
2762
2763const struct inode_operations ocfs2_special_file_iops = {
2764 .setattr = ocfs2_setattr,
2765 .getattr = ocfs2_getattr,
2766 .permission = ocfs2_permission,
2767 .get_inode_acl = ocfs2_iop_get_acl,
2768 .set_acl = ocfs2_iop_set_acl,
2769};
2770
2771/*
2772 * Other than ->lock, keep ocfs2_fops and ocfs2_dops in sync with
2773 * ocfs2_fops_no_plocks and ocfs2_dops_no_plocks!
2774 */
2775const struct file_operations ocfs2_fops = {
2776 .llseek = ocfs2_file_llseek,
2777 .mmap = ocfs2_mmap,
2778 .fsync = ocfs2_sync_file,
2779 .release = ocfs2_file_release,
2780 .open = ocfs2_file_open,
2781 .read_iter = ocfs2_file_read_iter,
2782 .write_iter = ocfs2_file_write_iter,
2783 .unlocked_ioctl = ocfs2_ioctl,
2784#ifdef CONFIG_COMPAT
2785 .compat_ioctl = ocfs2_compat_ioctl,
2786#endif
2787 .lock = ocfs2_lock,
2788 .flock = ocfs2_flock,
2789 .splice_read = ocfs2_file_splice_read,
2790 .splice_write = iter_file_splice_write,
2791 .fallocate = ocfs2_fallocate,
2792 .remap_file_range = ocfs2_remap_file_range,
2793};
2794
2795WRAP_DIR_ITER(ocfs2_readdir) // FIXME!
2796const struct file_operations ocfs2_dops = {
2797 .llseek = generic_file_llseek,
2798 .read = generic_read_dir,
2799 .iterate_shared = shared_ocfs2_readdir,
2800 .fsync = ocfs2_sync_file,
2801 .release = ocfs2_dir_release,
2802 .open = ocfs2_dir_open,
2803 .unlocked_ioctl = ocfs2_ioctl,
2804#ifdef CONFIG_COMPAT
2805 .compat_ioctl = ocfs2_compat_ioctl,
2806#endif
2807 .lock = ocfs2_lock,
2808 .flock = ocfs2_flock,
2809};
2810
2811/*
2812 * POSIX-lockless variants of our file_operations.
2813 *
2814 * These will be used if the underlying cluster stack does not support
2815 * posix file locking, if the user passes the "localflocks" mount
2816 * option, or if we have a local-only fs.
2817 *
2818 * ocfs2_flock is in here because all stacks handle UNIX file locks,
2819 * so we still want it in the case of no stack support for
2820 * plocks. Internally, it will do the right thing when asked to ignore
2821 * the cluster.
2822 */
2823const struct file_operations ocfs2_fops_no_plocks = {
2824 .llseek = ocfs2_file_llseek,
2825 .mmap = ocfs2_mmap,
2826 .fsync = ocfs2_sync_file,
2827 .release = ocfs2_file_release,
2828 .open = ocfs2_file_open,
2829 .read_iter = ocfs2_file_read_iter,
2830 .write_iter = ocfs2_file_write_iter,
2831 .unlocked_ioctl = ocfs2_ioctl,
2832#ifdef CONFIG_COMPAT
2833 .compat_ioctl = ocfs2_compat_ioctl,
2834#endif
2835 .flock = ocfs2_flock,
2836 .splice_read = filemap_splice_read,
2837 .splice_write = iter_file_splice_write,
2838 .fallocate = ocfs2_fallocate,
2839 .remap_file_range = ocfs2_remap_file_range,
2840};
2841
2842const struct file_operations ocfs2_dops_no_plocks = {
2843 .llseek = generic_file_llseek,
2844 .read = generic_read_dir,
2845 .iterate_shared = shared_ocfs2_readdir,
2846 .fsync = ocfs2_sync_file,
2847 .release = ocfs2_dir_release,
2848 .open = ocfs2_dir_open,
2849 .unlocked_ioctl = ocfs2_ioctl,
2850#ifdef CONFIG_COMPAT
2851 .compat_ioctl = ocfs2_compat_ioctl,
2852#endif
2853 .flock = ocfs2_flock,
2854};