Loading...
1/*
2 * linux/fs/ext4/inode.c
3 *
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
8 *
9 * from
10 *
11 * linux/fs/minix/inode.c
12 *
13 * Copyright (C) 1991, 1992 Linus Torvalds
14 *
15 * 64-bit file support on 64-bit platforms by Jakub Jelinek
16 * (jj@sunsite.ms.mff.cuni.cz)
17 *
18 * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
19 */
20
21#include <linux/fs.h>
22#include <linux/time.h>
23#include <linux/jbd2.h>
24#include <linux/highuid.h>
25#include <linux/pagemap.h>
26#include <linux/quotaops.h>
27#include <linux/string.h>
28#include <linux/buffer_head.h>
29#include <linux/writeback.h>
30#include <linux/pagevec.h>
31#include <linux/mpage.h>
32#include <linux/namei.h>
33#include <linux/uio.h>
34#include <linux/bio.h>
35#include <linux/workqueue.h>
36#include <linux/kernel.h>
37#include <linux/printk.h>
38#include <linux/slab.h>
39#include <linux/ratelimit.h>
40
41#include "ext4_jbd2.h"
42#include "xattr.h"
43#include "acl.h"
44#include "truncate.h"
45
46#include <trace/events/ext4.h>
47
48#define MPAGE_DA_EXTENT_TAIL 0x01
49
50static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
51 struct ext4_inode_info *ei)
52{
53 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
54 __u16 csum_lo;
55 __u16 csum_hi = 0;
56 __u32 csum;
57
58 csum_lo = raw->i_checksum_lo;
59 raw->i_checksum_lo = 0;
60 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
61 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) {
62 csum_hi = raw->i_checksum_hi;
63 raw->i_checksum_hi = 0;
64 }
65
66 csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw,
67 EXT4_INODE_SIZE(inode->i_sb));
68
69 raw->i_checksum_lo = csum_lo;
70 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
71 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
72 raw->i_checksum_hi = csum_hi;
73
74 return csum;
75}
76
77static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw,
78 struct ext4_inode_info *ei)
79{
80 __u32 provided, calculated;
81
82 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
83 cpu_to_le32(EXT4_OS_LINUX) ||
84 !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
85 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
86 return 1;
87
88 provided = le16_to_cpu(raw->i_checksum_lo);
89 calculated = ext4_inode_csum(inode, raw, ei);
90 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
91 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
92 provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16;
93 else
94 calculated &= 0xFFFF;
95
96 return provided == calculated;
97}
98
99static void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw,
100 struct ext4_inode_info *ei)
101{
102 __u32 csum;
103
104 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
105 cpu_to_le32(EXT4_OS_LINUX) ||
106 !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
107 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
108 return;
109
110 csum = ext4_inode_csum(inode, raw, ei);
111 raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF);
112 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
113 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
114 raw->i_checksum_hi = cpu_to_le16(csum >> 16);
115}
116
117static inline int ext4_begin_ordered_truncate(struct inode *inode,
118 loff_t new_size)
119{
120 trace_ext4_begin_ordered_truncate(inode, new_size);
121 /*
122 * If jinode is zero, then we never opened the file for
123 * writing, so there's no need to call
124 * jbd2_journal_begin_ordered_truncate() since there's no
125 * outstanding writes we need to flush.
126 */
127 if (!EXT4_I(inode)->jinode)
128 return 0;
129 return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode),
130 EXT4_I(inode)->jinode,
131 new_size);
132}
133
134static void ext4_invalidatepage(struct page *page, unsigned long offset);
135static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
136 struct buffer_head *bh_result, int create);
137static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode);
138static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
139static int __ext4_journalled_writepage(struct page *page, unsigned int len);
140static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh);
141static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
142 struct inode *inode, struct page *page, loff_t from,
143 loff_t length, int flags);
144
145/*
146 * Test whether an inode is a fast symlink.
147 */
148static int ext4_inode_is_fast_symlink(struct inode *inode)
149{
150 int ea_blocks = EXT4_I(inode)->i_file_acl ?
151 (inode->i_sb->s_blocksize >> 9) : 0;
152
153 return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
154}
155
156/*
157 * Restart the transaction associated with *handle. This does a commit,
158 * so before we call here everything must be consistently dirtied against
159 * this transaction.
160 */
161int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
162 int nblocks)
163{
164 int ret;
165
166 /*
167 * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this
168 * moment, get_block can be called only for blocks inside i_size since
169 * page cache has been already dropped and writes are blocked by
170 * i_mutex. So we can safely drop the i_data_sem here.
171 */
172 BUG_ON(EXT4_JOURNAL(inode) == NULL);
173 jbd_debug(2, "restarting handle %p\n", handle);
174 up_write(&EXT4_I(inode)->i_data_sem);
175 ret = ext4_journal_restart(handle, nblocks);
176 down_write(&EXT4_I(inode)->i_data_sem);
177 ext4_discard_preallocations(inode);
178
179 return ret;
180}
181
182/*
183 * Called at the last iput() if i_nlink is zero.
184 */
185void ext4_evict_inode(struct inode *inode)
186{
187 handle_t *handle;
188 int err;
189
190 trace_ext4_evict_inode(inode);
191
192 ext4_ioend_wait(inode);
193
194 if (inode->i_nlink) {
195 /*
196 * When journalling data dirty buffers are tracked only in the
197 * journal. So although mm thinks everything is clean and
198 * ready for reaping the inode might still have some pages to
199 * write in the running transaction or waiting to be
200 * checkpointed. Thus calling jbd2_journal_invalidatepage()
201 * (via truncate_inode_pages()) to discard these buffers can
202 * cause data loss. Also even if we did not discard these
203 * buffers, we would have no way to find them after the inode
204 * is reaped and thus user could see stale data if he tries to
205 * read them before the transaction is checkpointed. So be
206 * careful and force everything to disk here... We use
207 * ei->i_datasync_tid to store the newest transaction
208 * containing inode's data.
209 *
210 * Note that directories do not have this problem because they
211 * don't use page cache.
212 */
213 if (ext4_should_journal_data(inode) &&
214 (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) {
215 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
216 tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;
217
218 jbd2_log_start_commit(journal, commit_tid);
219 jbd2_log_wait_commit(journal, commit_tid);
220 filemap_write_and_wait(&inode->i_data);
221 }
222 truncate_inode_pages(&inode->i_data, 0);
223 goto no_delete;
224 }
225
226 if (!is_bad_inode(inode))
227 dquot_initialize(inode);
228
229 if (ext4_should_order_data(inode))
230 ext4_begin_ordered_truncate(inode, 0);
231 truncate_inode_pages(&inode->i_data, 0);
232
233 if (is_bad_inode(inode))
234 goto no_delete;
235
236 handle = ext4_journal_start(inode, ext4_blocks_for_truncate(inode)+3);
237 if (IS_ERR(handle)) {
238 ext4_std_error(inode->i_sb, PTR_ERR(handle));
239 /*
240 * If we're going to skip the normal cleanup, we still need to
241 * make sure that the in-core orphan linked list is properly
242 * cleaned up.
243 */
244 ext4_orphan_del(NULL, inode);
245 goto no_delete;
246 }
247
248 if (IS_SYNC(inode))
249 ext4_handle_sync(handle);
250 inode->i_size = 0;
251 err = ext4_mark_inode_dirty(handle, inode);
252 if (err) {
253 ext4_warning(inode->i_sb,
254 "couldn't mark inode dirty (err %d)", err);
255 goto stop_handle;
256 }
257 if (inode->i_blocks)
258 ext4_truncate(inode);
259
260 /*
261 * ext4_ext_truncate() doesn't reserve any slop when it
262 * restarts journal transactions; therefore there may not be
263 * enough credits left in the handle to remove the inode from
264 * the orphan list and set the dtime field.
265 */
266 if (!ext4_handle_has_enough_credits(handle, 3)) {
267 err = ext4_journal_extend(handle, 3);
268 if (err > 0)
269 err = ext4_journal_restart(handle, 3);
270 if (err != 0) {
271 ext4_warning(inode->i_sb,
272 "couldn't extend journal (err %d)", err);
273 stop_handle:
274 ext4_journal_stop(handle);
275 ext4_orphan_del(NULL, inode);
276 goto no_delete;
277 }
278 }
279
280 /*
281 * Kill off the orphan record which ext4_truncate created.
282 * AKPM: I think this can be inside the above `if'.
283 * Note that ext4_orphan_del() has to be able to cope with the
284 * deletion of a non-existent orphan - this is because we don't
285 * know if ext4_truncate() actually created an orphan record.
286 * (Well, we could do this if we need to, but heck - it works)
287 */
288 ext4_orphan_del(handle, inode);
289 EXT4_I(inode)->i_dtime = get_seconds();
290
291 /*
292 * One subtle ordering requirement: if anything has gone wrong
293 * (transaction abort, IO errors, whatever), then we can still
294 * do these next steps (the fs will already have been marked as
295 * having errors), but we can't free the inode if the mark_dirty
296 * fails.
297 */
298 if (ext4_mark_inode_dirty(handle, inode))
299 /* If that failed, just do the required in-core inode clear. */
300 ext4_clear_inode(inode);
301 else
302 ext4_free_inode(handle, inode);
303 ext4_journal_stop(handle);
304 return;
305no_delete:
306 ext4_clear_inode(inode); /* We must guarantee clearing of inode... */
307}
308
309#ifdef CONFIG_QUOTA
310qsize_t *ext4_get_reserved_space(struct inode *inode)
311{
312 return &EXT4_I(inode)->i_reserved_quota;
313}
314#endif
315
316/*
317 * Calculate the number of metadata blocks need to reserve
318 * to allocate a block located at @lblock
319 */
320static int ext4_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
321{
322 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
323 return ext4_ext_calc_metadata_amount(inode, lblock);
324
325 return ext4_ind_calc_metadata_amount(inode, lblock);
326}
327
328/*
329 * Called with i_data_sem down, which is important since we can call
330 * ext4_discard_preallocations() from here.
331 */
332void ext4_da_update_reserve_space(struct inode *inode,
333 int used, int quota_claim)
334{
335 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
336 struct ext4_inode_info *ei = EXT4_I(inode);
337
338 spin_lock(&ei->i_block_reservation_lock);
339 trace_ext4_da_update_reserve_space(inode, used, quota_claim);
340 if (unlikely(used > ei->i_reserved_data_blocks)) {
341 ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, used %d "
342 "with only %d reserved data blocks",
343 __func__, inode->i_ino, used,
344 ei->i_reserved_data_blocks);
345 WARN_ON(1);
346 used = ei->i_reserved_data_blocks;
347 }
348
349 if (unlikely(ei->i_allocated_meta_blocks > ei->i_reserved_meta_blocks)) {
350 ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, allocated %d "
351 "with only %d reserved metadata blocks\n", __func__,
352 inode->i_ino, ei->i_allocated_meta_blocks,
353 ei->i_reserved_meta_blocks);
354 WARN_ON(1);
355 ei->i_allocated_meta_blocks = ei->i_reserved_meta_blocks;
356 }
357
358 /* Update per-inode reservations */
359 ei->i_reserved_data_blocks -= used;
360 ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks;
361 percpu_counter_sub(&sbi->s_dirtyclusters_counter,
362 used + ei->i_allocated_meta_blocks);
363 ei->i_allocated_meta_blocks = 0;
364
365 if (ei->i_reserved_data_blocks == 0) {
366 /*
367 * We can release all of the reserved metadata blocks
368 * only when we have written all of the delayed
369 * allocation blocks.
370 */
371 percpu_counter_sub(&sbi->s_dirtyclusters_counter,
372 ei->i_reserved_meta_blocks);
373 ei->i_reserved_meta_blocks = 0;
374 ei->i_da_metadata_calc_len = 0;
375 }
376 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
377
378 /* Update quota subsystem for data blocks */
379 if (quota_claim)
380 dquot_claim_block(inode, EXT4_C2B(sbi, used));
381 else {
382 /*
383 * We did fallocate with an offset that is already delayed
384 * allocated. So on delayed allocated writeback we should
385 * not re-claim the quota for fallocated blocks.
386 */
387 dquot_release_reservation_block(inode, EXT4_C2B(sbi, used));
388 }
389
390 /*
391 * If we have done all the pending block allocations and if
392 * there aren't any writers on the inode, we can discard the
393 * inode's preallocations.
394 */
395 if ((ei->i_reserved_data_blocks == 0) &&
396 (atomic_read(&inode->i_writecount) == 0))
397 ext4_discard_preallocations(inode);
398}
399
400static int __check_block_validity(struct inode *inode, const char *func,
401 unsigned int line,
402 struct ext4_map_blocks *map)
403{
404 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk,
405 map->m_len)) {
406 ext4_error_inode(inode, func, line, map->m_pblk,
407 "lblock %lu mapped to illegal pblock "
408 "(length %d)", (unsigned long) map->m_lblk,
409 map->m_len);
410 return -EIO;
411 }
412 return 0;
413}
414
415#define check_block_validity(inode, map) \
416 __check_block_validity((inode), __func__, __LINE__, (map))
417
418/*
419 * Return the number of contiguous dirty pages in a given inode
420 * starting at page frame idx.
421 */
422static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx,
423 unsigned int max_pages)
424{
425 struct address_space *mapping = inode->i_mapping;
426 pgoff_t index;
427 struct pagevec pvec;
428 pgoff_t num = 0;
429 int i, nr_pages, done = 0;
430
431 if (max_pages == 0)
432 return 0;
433 pagevec_init(&pvec, 0);
434 while (!done) {
435 index = idx;
436 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
437 PAGECACHE_TAG_DIRTY,
438 (pgoff_t)PAGEVEC_SIZE);
439 if (nr_pages == 0)
440 break;
441 for (i = 0; i < nr_pages; i++) {
442 struct page *page = pvec.pages[i];
443 struct buffer_head *bh, *head;
444
445 lock_page(page);
446 if (unlikely(page->mapping != mapping) ||
447 !PageDirty(page) ||
448 PageWriteback(page) ||
449 page->index != idx) {
450 done = 1;
451 unlock_page(page);
452 break;
453 }
454 if (page_has_buffers(page)) {
455 bh = head = page_buffers(page);
456 do {
457 if (!buffer_delay(bh) &&
458 !buffer_unwritten(bh))
459 done = 1;
460 bh = bh->b_this_page;
461 } while (!done && (bh != head));
462 }
463 unlock_page(page);
464 if (done)
465 break;
466 idx++;
467 num++;
468 if (num >= max_pages) {
469 done = 1;
470 break;
471 }
472 }
473 pagevec_release(&pvec);
474 }
475 return num;
476}
477
478/*
479 * Sets the BH_Da_Mapped bit on the buffer heads corresponding to the given map.
480 */
481static void set_buffers_da_mapped(struct inode *inode,
482 struct ext4_map_blocks *map)
483{
484 struct address_space *mapping = inode->i_mapping;
485 struct pagevec pvec;
486 int i, nr_pages;
487 pgoff_t index, end;
488
489 index = map->m_lblk >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
490 end = (map->m_lblk + map->m_len - 1) >>
491 (PAGE_CACHE_SHIFT - inode->i_blkbits);
492
493 pagevec_init(&pvec, 0);
494 while (index <= end) {
495 nr_pages = pagevec_lookup(&pvec, mapping, index,
496 min(end - index + 1,
497 (pgoff_t)PAGEVEC_SIZE));
498 if (nr_pages == 0)
499 break;
500 for (i = 0; i < nr_pages; i++) {
501 struct page *page = pvec.pages[i];
502 struct buffer_head *bh, *head;
503
504 if (unlikely(page->mapping != mapping) ||
505 !PageDirty(page))
506 break;
507
508 if (page_has_buffers(page)) {
509 bh = head = page_buffers(page);
510 do {
511 set_buffer_da_mapped(bh);
512 bh = bh->b_this_page;
513 } while (bh != head);
514 }
515 index++;
516 }
517 pagevec_release(&pvec);
518 }
519}
520
521/*
522 * The ext4_map_blocks() function tries to look up the requested blocks,
523 * and returns if the blocks are already mapped.
524 *
525 * Otherwise it takes the write lock of the i_data_sem and allocate blocks
526 * and store the allocated blocks in the result buffer head and mark it
527 * mapped.
528 *
529 * If file type is extents based, it will call ext4_ext_map_blocks(),
530 * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
531 * based files
532 *
533 * On success, it returns the number of blocks being mapped or allocate.
534 * if create==0 and the blocks are pre-allocated and uninitialized block,
535 * the result buffer head is unmapped. If the create ==1, it will make sure
536 * the buffer head is mapped.
537 *
538 * It returns 0 if plain look up failed (blocks have not been allocated), in
539 * that case, buffer head is unmapped
540 *
541 * It returns the error in case of allocation failure.
542 */
543int ext4_map_blocks(handle_t *handle, struct inode *inode,
544 struct ext4_map_blocks *map, int flags)
545{
546 int retval;
547
548 map->m_flags = 0;
549 ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u,"
550 "logical block %lu\n", inode->i_ino, flags, map->m_len,
551 (unsigned long) map->m_lblk);
552 /*
553 * Try to see if we can get the block without requesting a new
554 * file system block.
555 */
556 down_read((&EXT4_I(inode)->i_data_sem));
557 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
558 retval = ext4_ext_map_blocks(handle, inode, map, flags &
559 EXT4_GET_BLOCKS_KEEP_SIZE);
560 } else {
561 retval = ext4_ind_map_blocks(handle, inode, map, flags &
562 EXT4_GET_BLOCKS_KEEP_SIZE);
563 }
564 up_read((&EXT4_I(inode)->i_data_sem));
565
566 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
567 int ret = check_block_validity(inode, map);
568 if (ret != 0)
569 return ret;
570 }
571
572 /* If it is only a block(s) look up */
573 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
574 return retval;
575
576 /*
577 * Returns if the blocks have already allocated
578 *
579 * Note that if blocks have been preallocated
580 * ext4_ext_get_block() returns the create = 0
581 * with buffer head unmapped.
582 */
583 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
584 return retval;
585
586 /*
587 * When we call get_blocks without the create flag, the
588 * BH_Unwritten flag could have gotten set if the blocks
589 * requested were part of a uninitialized extent. We need to
590 * clear this flag now that we are committed to convert all or
591 * part of the uninitialized extent to be an initialized
592 * extent. This is because we need to avoid the combination
593 * of BH_Unwritten and BH_Mapped flags being simultaneously
594 * set on the buffer_head.
595 */
596 map->m_flags &= ~EXT4_MAP_UNWRITTEN;
597
598 /*
599 * New blocks allocate and/or writing to uninitialized extent
600 * will possibly result in updating i_data, so we take
601 * the write lock of i_data_sem, and call get_blocks()
602 * with create == 1 flag.
603 */
604 down_write((&EXT4_I(inode)->i_data_sem));
605
606 /*
607 * if the caller is from delayed allocation writeout path
608 * we have already reserved fs blocks for allocation
609 * let the underlying get_block() function know to
610 * avoid double accounting
611 */
612 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
613 ext4_set_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
614 /*
615 * We need to check for EXT4 here because migrate
616 * could have changed the inode type in between
617 */
618 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
619 retval = ext4_ext_map_blocks(handle, inode, map, flags);
620 } else {
621 retval = ext4_ind_map_blocks(handle, inode, map, flags);
622
623 if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
624 /*
625 * We allocated new blocks which will result in
626 * i_data's format changing. Force the migrate
627 * to fail by clearing migrate flags
628 */
629 ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
630 }
631
632 /*
633 * Update reserved blocks/metadata blocks after successful
634 * block allocation which had been deferred till now. We don't
635 * support fallocate for non extent files. So we can update
636 * reserve space here.
637 */
638 if ((retval > 0) &&
639 (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
640 ext4_da_update_reserve_space(inode, retval, 1);
641 }
642 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
643 ext4_clear_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
644
645 /* If we have successfully mapped the delayed allocated blocks,
646 * set the BH_Da_Mapped bit on them. Its important to do this
647 * under the protection of i_data_sem.
648 */
649 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
650 set_buffers_da_mapped(inode, map);
651 }
652
653 up_write((&EXT4_I(inode)->i_data_sem));
654 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
655 int ret = check_block_validity(inode, map);
656 if (ret != 0)
657 return ret;
658 }
659 return retval;
660}
661
662/* Maximum number of blocks we map for direct IO at once. */
663#define DIO_MAX_BLOCKS 4096
664
665static int _ext4_get_block(struct inode *inode, sector_t iblock,
666 struct buffer_head *bh, int flags)
667{
668 handle_t *handle = ext4_journal_current_handle();
669 struct ext4_map_blocks map;
670 int ret = 0, started = 0;
671 int dio_credits;
672
673 map.m_lblk = iblock;
674 map.m_len = bh->b_size >> inode->i_blkbits;
675
676 if (flags && !handle) {
677 /* Direct IO write... */
678 if (map.m_len > DIO_MAX_BLOCKS)
679 map.m_len = DIO_MAX_BLOCKS;
680 dio_credits = ext4_chunk_trans_blocks(inode, map.m_len);
681 handle = ext4_journal_start(inode, dio_credits);
682 if (IS_ERR(handle)) {
683 ret = PTR_ERR(handle);
684 return ret;
685 }
686 started = 1;
687 }
688
689 ret = ext4_map_blocks(handle, inode, &map, flags);
690 if (ret > 0) {
691 map_bh(bh, inode->i_sb, map.m_pblk);
692 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
693 bh->b_size = inode->i_sb->s_blocksize * map.m_len;
694 ret = 0;
695 }
696 if (started)
697 ext4_journal_stop(handle);
698 return ret;
699}
700
701int ext4_get_block(struct inode *inode, sector_t iblock,
702 struct buffer_head *bh, int create)
703{
704 return _ext4_get_block(inode, iblock, bh,
705 create ? EXT4_GET_BLOCKS_CREATE : 0);
706}
707
708/*
709 * `handle' can be NULL if create is zero
710 */
711struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
712 ext4_lblk_t block, int create, int *errp)
713{
714 struct ext4_map_blocks map;
715 struct buffer_head *bh;
716 int fatal = 0, err;
717
718 J_ASSERT(handle != NULL || create == 0);
719
720 map.m_lblk = block;
721 map.m_len = 1;
722 err = ext4_map_blocks(handle, inode, &map,
723 create ? EXT4_GET_BLOCKS_CREATE : 0);
724
725 if (err < 0)
726 *errp = err;
727 if (err <= 0)
728 return NULL;
729 *errp = 0;
730
731 bh = sb_getblk(inode->i_sb, map.m_pblk);
732 if (!bh) {
733 *errp = -EIO;
734 return NULL;
735 }
736 if (map.m_flags & EXT4_MAP_NEW) {
737 J_ASSERT(create != 0);
738 J_ASSERT(handle != NULL);
739
740 /*
741 * Now that we do not always journal data, we should
742 * keep in mind whether this should always journal the
743 * new buffer as metadata. For now, regular file
744 * writes use ext4_get_block instead, so it's not a
745 * problem.
746 */
747 lock_buffer(bh);
748 BUFFER_TRACE(bh, "call get_create_access");
749 fatal = ext4_journal_get_create_access(handle, bh);
750 if (!fatal && !buffer_uptodate(bh)) {
751 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
752 set_buffer_uptodate(bh);
753 }
754 unlock_buffer(bh);
755 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
756 err = ext4_handle_dirty_metadata(handle, inode, bh);
757 if (!fatal)
758 fatal = err;
759 } else {
760 BUFFER_TRACE(bh, "not a new buffer");
761 }
762 if (fatal) {
763 *errp = fatal;
764 brelse(bh);
765 bh = NULL;
766 }
767 return bh;
768}
769
770struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
771 ext4_lblk_t block, int create, int *err)
772{
773 struct buffer_head *bh;
774
775 bh = ext4_getblk(handle, inode, block, create, err);
776 if (!bh)
777 return bh;
778 if (buffer_uptodate(bh))
779 return bh;
780 ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh);
781 wait_on_buffer(bh);
782 if (buffer_uptodate(bh))
783 return bh;
784 put_bh(bh);
785 *err = -EIO;
786 return NULL;
787}
788
789static int walk_page_buffers(handle_t *handle,
790 struct buffer_head *head,
791 unsigned from,
792 unsigned to,
793 int *partial,
794 int (*fn)(handle_t *handle,
795 struct buffer_head *bh))
796{
797 struct buffer_head *bh;
798 unsigned block_start, block_end;
799 unsigned blocksize = head->b_size;
800 int err, ret = 0;
801 struct buffer_head *next;
802
803 for (bh = head, block_start = 0;
804 ret == 0 && (bh != head || !block_start);
805 block_start = block_end, bh = next) {
806 next = bh->b_this_page;
807 block_end = block_start + blocksize;
808 if (block_end <= from || block_start >= to) {
809 if (partial && !buffer_uptodate(bh))
810 *partial = 1;
811 continue;
812 }
813 err = (*fn)(handle, bh);
814 if (!ret)
815 ret = err;
816 }
817 return ret;
818}
819
820/*
821 * To preserve ordering, it is essential that the hole instantiation and
822 * the data write be encapsulated in a single transaction. We cannot
823 * close off a transaction and start a new one between the ext4_get_block()
824 * and the commit_write(). So doing the jbd2_journal_start at the start of
825 * prepare_write() is the right place.
826 *
827 * Also, this function can nest inside ext4_writepage() ->
828 * block_write_full_page(). In that case, we *know* that ext4_writepage()
829 * has generated enough buffer credits to do the whole page. So we won't
830 * block on the journal in that case, which is good, because the caller may
831 * be PF_MEMALLOC.
832 *
833 * By accident, ext4 can be reentered when a transaction is open via
834 * quota file writes. If we were to commit the transaction while thus
835 * reentered, there can be a deadlock - we would be holding a quota
836 * lock, and the commit would never complete if another thread had a
837 * transaction open and was blocking on the quota lock - a ranking
838 * violation.
839 *
840 * So what we do is to rely on the fact that jbd2_journal_stop/journal_start
841 * will _not_ run commit under these circumstances because handle->h_ref
842 * is elevated. We'll still have enough credits for the tiny quotafile
843 * write.
844 */
845static int do_journal_get_write_access(handle_t *handle,
846 struct buffer_head *bh)
847{
848 int dirty = buffer_dirty(bh);
849 int ret;
850
851 if (!buffer_mapped(bh) || buffer_freed(bh))
852 return 0;
853 /*
854 * __block_write_begin() could have dirtied some buffers. Clean
855 * the dirty bit as jbd2_journal_get_write_access() could complain
856 * otherwise about fs integrity issues. Setting of the dirty bit
857 * by __block_write_begin() isn't a real problem here as we clear
858 * the bit before releasing a page lock and thus writeback cannot
859 * ever write the buffer.
860 */
861 if (dirty)
862 clear_buffer_dirty(bh);
863 ret = ext4_journal_get_write_access(handle, bh);
864 if (!ret && dirty)
865 ret = ext4_handle_dirty_metadata(handle, NULL, bh);
866 return ret;
867}
868
869static int ext4_get_block_write(struct inode *inode, sector_t iblock,
870 struct buffer_head *bh_result, int create);
871static int ext4_write_begin(struct file *file, struct address_space *mapping,
872 loff_t pos, unsigned len, unsigned flags,
873 struct page **pagep, void **fsdata)
874{
875 struct inode *inode = mapping->host;
876 int ret, needed_blocks;
877 handle_t *handle;
878 int retries = 0;
879 struct page *page;
880 pgoff_t index;
881 unsigned from, to;
882
883 trace_ext4_write_begin(inode, pos, len, flags);
884 /*
885 * Reserve one block more for addition to orphan list in case
886 * we allocate blocks but write fails for some reason
887 */
888 needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
889 index = pos >> PAGE_CACHE_SHIFT;
890 from = pos & (PAGE_CACHE_SIZE - 1);
891 to = from + len;
892
893retry:
894 handle = ext4_journal_start(inode, needed_blocks);
895 if (IS_ERR(handle)) {
896 ret = PTR_ERR(handle);
897 goto out;
898 }
899
900 /* We cannot recurse into the filesystem as the transaction is already
901 * started */
902 flags |= AOP_FLAG_NOFS;
903
904 page = grab_cache_page_write_begin(mapping, index, flags);
905 if (!page) {
906 ext4_journal_stop(handle);
907 ret = -ENOMEM;
908 goto out;
909 }
910 *pagep = page;
911
912 if (ext4_should_dioread_nolock(inode))
913 ret = __block_write_begin(page, pos, len, ext4_get_block_write);
914 else
915 ret = __block_write_begin(page, pos, len, ext4_get_block);
916
917 if (!ret && ext4_should_journal_data(inode)) {
918 ret = walk_page_buffers(handle, page_buffers(page),
919 from, to, NULL, do_journal_get_write_access);
920 }
921
922 if (ret) {
923 unlock_page(page);
924 page_cache_release(page);
925 /*
926 * __block_write_begin may have instantiated a few blocks
927 * outside i_size. Trim these off again. Don't need
928 * i_size_read because we hold i_mutex.
929 *
930 * Add inode to orphan list in case we crash before
931 * truncate finishes
932 */
933 if (pos + len > inode->i_size && ext4_can_truncate(inode))
934 ext4_orphan_add(handle, inode);
935
936 ext4_journal_stop(handle);
937 if (pos + len > inode->i_size) {
938 ext4_truncate_failed_write(inode);
939 /*
940 * If truncate failed early the inode might
941 * still be on the orphan list; we need to
942 * make sure the inode is removed from the
943 * orphan list in that case.
944 */
945 if (inode->i_nlink)
946 ext4_orphan_del(NULL, inode);
947 }
948 }
949
950 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
951 goto retry;
952out:
953 return ret;
954}
955
956/* For write_end() in data=journal mode */
957static int write_end_fn(handle_t *handle, struct buffer_head *bh)
958{
959 if (!buffer_mapped(bh) || buffer_freed(bh))
960 return 0;
961 set_buffer_uptodate(bh);
962 return ext4_handle_dirty_metadata(handle, NULL, bh);
963}
964
965static int ext4_generic_write_end(struct file *file,
966 struct address_space *mapping,
967 loff_t pos, unsigned len, unsigned copied,
968 struct page *page, void *fsdata)
969{
970 int i_size_changed = 0;
971 struct inode *inode = mapping->host;
972 handle_t *handle = ext4_journal_current_handle();
973
974 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
975
976 /*
977 * No need to use i_size_read() here, the i_size
978 * cannot change under us because we hold i_mutex.
979 *
980 * But it's important to update i_size while still holding page lock:
981 * page writeout could otherwise come in and zero beyond i_size.
982 */
983 if (pos + copied > inode->i_size) {
984 i_size_write(inode, pos + copied);
985 i_size_changed = 1;
986 }
987
988 if (pos + copied > EXT4_I(inode)->i_disksize) {
989 /* We need to mark inode dirty even if
990 * new_i_size is less that inode->i_size
991 * bu greater than i_disksize.(hint delalloc)
992 */
993 ext4_update_i_disksize(inode, (pos + copied));
994 i_size_changed = 1;
995 }
996 unlock_page(page);
997 page_cache_release(page);
998
999 /*
1000 * Don't mark the inode dirty under page lock. First, it unnecessarily
1001 * makes the holding time of page lock longer. Second, it forces lock
1002 * ordering of page lock and transaction start for journaling
1003 * filesystems.
1004 */
1005 if (i_size_changed)
1006 ext4_mark_inode_dirty(handle, inode);
1007
1008 return copied;
1009}
1010
1011/*
1012 * We need to pick up the new inode size which generic_commit_write gave us
1013 * `file' can be NULL - eg, when called from page_symlink().
1014 *
1015 * ext4 never places buffers on inode->i_mapping->private_list. metadata
1016 * buffers are managed internally.
1017 */
1018static int ext4_ordered_write_end(struct file *file,
1019 struct address_space *mapping,
1020 loff_t pos, unsigned len, unsigned copied,
1021 struct page *page, void *fsdata)
1022{
1023 handle_t *handle = ext4_journal_current_handle();
1024 struct inode *inode = mapping->host;
1025 int ret = 0, ret2;
1026
1027 trace_ext4_ordered_write_end(inode, pos, len, copied);
1028 ret = ext4_jbd2_file_inode(handle, inode);
1029
1030 if (ret == 0) {
1031 ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
1032 page, fsdata);
1033 copied = ret2;
1034 if (pos + len > inode->i_size && ext4_can_truncate(inode))
1035 /* if we have allocated more blocks and copied
1036 * less. We will have blocks allocated outside
1037 * inode->i_size. So truncate them
1038 */
1039 ext4_orphan_add(handle, inode);
1040 if (ret2 < 0)
1041 ret = ret2;
1042 } else {
1043 unlock_page(page);
1044 page_cache_release(page);
1045 }
1046
1047 ret2 = ext4_journal_stop(handle);
1048 if (!ret)
1049 ret = ret2;
1050
1051 if (pos + len > inode->i_size) {
1052 ext4_truncate_failed_write(inode);
1053 /*
1054 * If truncate failed early the inode might still be
1055 * on the orphan list; we need to make sure the inode
1056 * is removed from the orphan list in that case.
1057 */
1058 if (inode->i_nlink)
1059 ext4_orphan_del(NULL, inode);
1060 }
1061
1062
1063 return ret ? ret : copied;
1064}
1065
1066static int ext4_writeback_write_end(struct file *file,
1067 struct address_space *mapping,
1068 loff_t pos, unsigned len, unsigned copied,
1069 struct page *page, void *fsdata)
1070{
1071 handle_t *handle = ext4_journal_current_handle();
1072 struct inode *inode = mapping->host;
1073 int ret = 0, ret2;
1074
1075 trace_ext4_writeback_write_end(inode, pos, len, copied);
1076 ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
1077 page, fsdata);
1078 copied = ret2;
1079 if (pos + len > inode->i_size && ext4_can_truncate(inode))
1080 /* if we have allocated more blocks and copied
1081 * less. We will have blocks allocated outside
1082 * inode->i_size. So truncate them
1083 */
1084 ext4_orphan_add(handle, inode);
1085
1086 if (ret2 < 0)
1087 ret = ret2;
1088
1089 ret2 = ext4_journal_stop(handle);
1090 if (!ret)
1091 ret = ret2;
1092
1093 if (pos + len > inode->i_size) {
1094 ext4_truncate_failed_write(inode);
1095 /*
1096 * If truncate failed early the inode might still be
1097 * on the orphan list; we need to make sure the inode
1098 * is removed from the orphan list in that case.
1099 */
1100 if (inode->i_nlink)
1101 ext4_orphan_del(NULL, inode);
1102 }
1103
1104 return ret ? ret : copied;
1105}
1106
1107static int ext4_journalled_write_end(struct file *file,
1108 struct address_space *mapping,
1109 loff_t pos, unsigned len, unsigned copied,
1110 struct page *page, void *fsdata)
1111{
1112 handle_t *handle = ext4_journal_current_handle();
1113 struct inode *inode = mapping->host;
1114 int ret = 0, ret2;
1115 int partial = 0;
1116 unsigned from, to;
1117 loff_t new_i_size;
1118
1119 trace_ext4_journalled_write_end(inode, pos, len, copied);
1120 from = pos & (PAGE_CACHE_SIZE - 1);
1121 to = from + len;
1122
1123 BUG_ON(!ext4_handle_valid(handle));
1124
1125 if (copied < len) {
1126 if (!PageUptodate(page))
1127 copied = 0;
1128 page_zero_new_buffers(page, from+copied, to);
1129 }
1130
1131 ret = walk_page_buffers(handle, page_buffers(page), from,
1132 to, &partial, write_end_fn);
1133 if (!partial)
1134 SetPageUptodate(page);
1135 new_i_size = pos + copied;
1136 if (new_i_size > inode->i_size)
1137 i_size_write(inode, pos+copied);
1138 ext4_set_inode_state(inode, EXT4_STATE_JDATA);
1139 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
1140 if (new_i_size > EXT4_I(inode)->i_disksize) {
1141 ext4_update_i_disksize(inode, new_i_size);
1142 ret2 = ext4_mark_inode_dirty(handle, inode);
1143 if (!ret)
1144 ret = ret2;
1145 }
1146
1147 unlock_page(page);
1148 page_cache_release(page);
1149 if (pos + len > inode->i_size && ext4_can_truncate(inode))
1150 /* if we have allocated more blocks and copied
1151 * less. We will have blocks allocated outside
1152 * inode->i_size. So truncate them
1153 */
1154 ext4_orphan_add(handle, inode);
1155
1156 ret2 = ext4_journal_stop(handle);
1157 if (!ret)
1158 ret = ret2;
1159 if (pos + len > inode->i_size) {
1160 ext4_truncate_failed_write(inode);
1161 /*
1162 * If truncate failed early the inode might still be
1163 * on the orphan list; we need to make sure the inode
1164 * is removed from the orphan list in that case.
1165 */
1166 if (inode->i_nlink)
1167 ext4_orphan_del(NULL, inode);
1168 }
1169
1170 return ret ? ret : copied;
1171}
1172
1173/*
1174 * Reserve a single cluster located at lblock
1175 */
1176static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
1177{
1178 int retries = 0;
1179 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1180 struct ext4_inode_info *ei = EXT4_I(inode);
1181 unsigned int md_needed;
1182 int ret;
1183 ext4_lblk_t save_last_lblock;
1184 int save_len;
1185
1186 /*
1187 * We will charge metadata quota at writeout time; this saves
1188 * us from metadata over-estimation, though we may go over by
1189 * a small amount in the end. Here we just reserve for data.
1190 */
1191 ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
1192 if (ret)
1193 return ret;
1194
1195 /*
1196 * recalculate the amount of metadata blocks to reserve
1197 * in order to allocate nrblocks
1198 * worse case is one extent per block
1199 */
1200repeat:
1201 spin_lock(&ei->i_block_reservation_lock);
1202 /*
1203 * ext4_calc_metadata_amount() has side effects, which we have
1204 * to be prepared undo if we fail to claim space.
1205 */
1206 save_len = ei->i_da_metadata_calc_len;
1207 save_last_lblock = ei->i_da_metadata_calc_last_lblock;
1208 md_needed = EXT4_NUM_B2C(sbi,
1209 ext4_calc_metadata_amount(inode, lblock));
1210 trace_ext4_da_reserve_space(inode, md_needed);
1211
1212 /*
1213 * We do still charge estimated metadata to the sb though;
1214 * we cannot afford to run out of free blocks.
1215 */
1216 if (ext4_claim_free_clusters(sbi, md_needed + 1, 0)) {
1217 ei->i_da_metadata_calc_len = save_len;
1218 ei->i_da_metadata_calc_last_lblock = save_last_lblock;
1219 spin_unlock(&ei->i_block_reservation_lock);
1220 if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
1221 yield();
1222 goto repeat;
1223 }
1224 dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
1225 return -ENOSPC;
1226 }
1227 ei->i_reserved_data_blocks++;
1228 ei->i_reserved_meta_blocks += md_needed;
1229 spin_unlock(&ei->i_block_reservation_lock);
1230
1231 return 0; /* success */
1232}
1233
1234static void ext4_da_release_space(struct inode *inode, int to_free)
1235{
1236 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1237 struct ext4_inode_info *ei = EXT4_I(inode);
1238
1239 if (!to_free)
1240 return; /* Nothing to release, exit */
1241
1242 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1243
1244 trace_ext4_da_release_space(inode, to_free);
1245 if (unlikely(to_free > ei->i_reserved_data_blocks)) {
1246 /*
1247 * if there aren't enough reserved blocks, then the
1248 * counter is messed up somewhere. Since this
1249 * function is called from invalidate page, it's
1250 * harmless to return without any action.
1251 */
1252 ext4_msg(inode->i_sb, KERN_NOTICE, "ext4_da_release_space: "
1253 "ino %lu, to_free %d with only %d reserved "
1254 "data blocks", inode->i_ino, to_free,
1255 ei->i_reserved_data_blocks);
1256 WARN_ON(1);
1257 to_free = ei->i_reserved_data_blocks;
1258 }
1259 ei->i_reserved_data_blocks -= to_free;
1260
1261 if (ei->i_reserved_data_blocks == 0) {
1262 /*
1263 * We can release all of the reserved metadata blocks
1264 * only when we have written all of the delayed
1265 * allocation blocks.
1266 * Note that in case of bigalloc, i_reserved_meta_blocks,
1267 * i_reserved_data_blocks, etc. refer to number of clusters.
1268 */
1269 percpu_counter_sub(&sbi->s_dirtyclusters_counter,
1270 ei->i_reserved_meta_blocks);
1271 ei->i_reserved_meta_blocks = 0;
1272 ei->i_da_metadata_calc_len = 0;
1273 }
1274
1275 /* update fs dirty data blocks counter */
1276 percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free);
1277
1278 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1279
1280 dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free));
1281}
1282
1283static void ext4_da_page_release_reservation(struct page *page,
1284 unsigned long offset)
1285{
1286 int to_release = 0;
1287 struct buffer_head *head, *bh;
1288 unsigned int curr_off = 0;
1289 struct inode *inode = page->mapping->host;
1290 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1291 int num_clusters;
1292
1293 head = page_buffers(page);
1294 bh = head;
1295 do {
1296 unsigned int next_off = curr_off + bh->b_size;
1297
1298 if ((offset <= curr_off) && (buffer_delay(bh))) {
1299 to_release++;
1300 clear_buffer_delay(bh);
1301 clear_buffer_da_mapped(bh);
1302 }
1303 curr_off = next_off;
1304 } while ((bh = bh->b_this_page) != head);
1305
1306 /* If we have released all the blocks belonging to a cluster, then we
1307 * need to release the reserved space for that cluster. */
1308 num_clusters = EXT4_NUM_B2C(sbi, to_release);
1309 while (num_clusters > 0) {
1310 ext4_fsblk_t lblk;
1311 lblk = (page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits)) +
1312 ((num_clusters - 1) << sbi->s_cluster_bits);
1313 if (sbi->s_cluster_ratio == 1 ||
1314 !ext4_find_delalloc_cluster(inode, lblk, 1))
1315 ext4_da_release_space(inode, 1);
1316
1317 num_clusters--;
1318 }
1319}
1320
1321/*
1322 * Delayed allocation stuff
1323 */
1324
1325/*
1326 * mpage_da_submit_io - walks through extent of pages and try to write
1327 * them with writepage() call back
1328 *
1329 * @mpd->inode: inode
1330 * @mpd->first_page: first page of the extent
1331 * @mpd->next_page: page after the last page of the extent
1332 *
1333 * By the time mpage_da_submit_io() is called we expect all blocks
1334 * to be allocated. this may be wrong if allocation failed.
1335 *
1336 * As pages are already locked by write_cache_pages(), we can't use it
1337 */
1338static int mpage_da_submit_io(struct mpage_da_data *mpd,
1339 struct ext4_map_blocks *map)
1340{
1341 struct pagevec pvec;
1342 unsigned long index, end;
1343 int ret = 0, err, nr_pages, i;
1344 struct inode *inode = mpd->inode;
1345 struct address_space *mapping = inode->i_mapping;
1346 loff_t size = i_size_read(inode);
1347 unsigned int len, block_start;
1348 struct buffer_head *bh, *page_bufs = NULL;
1349 int journal_data = ext4_should_journal_data(inode);
1350 sector_t pblock = 0, cur_logical = 0;
1351 struct ext4_io_submit io_submit;
1352
1353 BUG_ON(mpd->next_page <= mpd->first_page);
1354 memset(&io_submit, 0, sizeof(io_submit));
1355 /*
1356 * We need to start from the first_page to the next_page - 1
1357 * to make sure we also write the mapped dirty buffer_heads.
1358 * If we look at mpd->b_blocknr we would only be looking
1359 * at the currently mapped buffer_heads.
1360 */
1361 index = mpd->first_page;
1362 end = mpd->next_page - 1;
1363
1364 pagevec_init(&pvec, 0);
1365 while (index <= end) {
1366 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
1367 if (nr_pages == 0)
1368 break;
1369 for (i = 0; i < nr_pages; i++) {
1370 int commit_write = 0, skip_page = 0;
1371 struct page *page = pvec.pages[i];
1372
1373 index = page->index;
1374 if (index > end)
1375 break;
1376
1377 if (index == size >> PAGE_CACHE_SHIFT)
1378 len = size & ~PAGE_CACHE_MASK;
1379 else
1380 len = PAGE_CACHE_SIZE;
1381 if (map) {
1382 cur_logical = index << (PAGE_CACHE_SHIFT -
1383 inode->i_blkbits);
1384 pblock = map->m_pblk + (cur_logical -
1385 map->m_lblk);
1386 }
1387 index++;
1388
1389 BUG_ON(!PageLocked(page));
1390 BUG_ON(PageWriteback(page));
1391
1392 /*
1393 * If the page does not have buffers (for
1394 * whatever reason), try to create them using
1395 * __block_write_begin. If this fails,
1396 * skip the page and move on.
1397 */
1398 if (!page_has_buffers(page)) {
1399 if (__block_write_begin(page, 0, len,
1400 noalloc_get_block_write)) {
1401 skip_page:
1402 unlock_page(page);
1403 continue;
1404 }
1405 commit_write = 1;
1406 }
1407
1408 bh = page_bufs = page_buffers(page);
1409 block_start = 0;
1410 do {
1411 if (!bh)
1412 goto skip_page;
1413 if (map && (cur_logical >= map->m_lblk) &&
1414 (cur_logical <= (map->m_lblk +
1415 (map->m_len - 1)))) {
1416 if (buffer_delay(bh)) {
1417 clear_buffer_delay(bh);
1418 bh->b_blocknr = pblock;
1419 }
1420 if (buffer_da_mapped(bh))
1421 clear_buffer_da_mapped(bh);
1422 if (buffer_unwritten(bh) ||
1423 buffer_mapped(bh))
1424 BUG_ON(bh->b_blocknr != pblock);
1425 if (map->m_flags & EXT4_MAP_UNINIT)
1426 set_buffer_uninit(bh);
1427 clear_buffer_unwritten(bh);
1428 }
1429
1430 /*
1431 * skip page if block allocation undone and
1432 * block is dirty
1433 */
1434 if (ext4_bh_delay_or_unwritten(NULL, bh))
1435 skip_page = 1;
1436 bh = bh->b_this_page;
1437 block_start += bh->b_size;
1438 cur_logical++;
1439 pblock++;
1440 } while (bh != page_bufs);
1441
1442 if (skip_page)
1443 goto skip_page;
1444
1445 if (commit_write)
1446 /* mark the buffer_heads as dirty & uptodate */
1447 block_commit_write(page, 0, len);
1448
1449 clear_page_dirty_for_io(page);
1450 /*
1451 * Delalloc doesn't support data journalling,
1452 * but eventually maybe we'll lift this
1453 * restriction.
1454 */
1455 if (unlikely(journal_data && PageChecked(page)))
1456 err = __ext4_journalled_writepage(page, len);
1457 else if (test_opt(inode->i_sb, MBLK_IO_SUBMIT))
1458 err = ext4_bio_write_page(&io_submit, page,
1459 len, mpd->wbc);
1460 else if (buffer_uninit(page_bufs)) {
1461 ext4_set_bh_endio(page_bufs, inode);
1462 err = block_write_full_page_endio(page,
1463 noalloc_get_block_write,
1464 mpd->wbc, ext4_end_io_buffer_write);
1465 } else
1466 err = block_write_full_page(page,
1467 noalloc_get_block_write, mpd->wbc);
1468
1469 if (!err)
1470 mpd->pages_written++;
1471 /*
1472 * In error case, we have to continue because
1473 * remaining pages are still locked
1474 */
1475 if (ret == 0)
1476 ret = err;
1477 }
1478 pagevec_release(&pvec);
1479 }
1480 ext4_io_submit(&io_submit);
1481 return ret;
1482}
1483
1484static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd)
1485{
1486 int nr_pages, i;
1487 pgoff_t index, end;
1488 struct pagevec pvec;
1489 struct inode *inode = mpd->inode;
1490 struct address_space *mapping = inode->i_mapping;
1491
1492 index = mpd->first_page;
1493 end = mpd->next_page - 1;
1494 while (index <= end) {
1495 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
1496 if (nr_pages == 0)
1497 break;
1498 for (i = 0; i < nr_pages; i++) {
1499 struct page *page = pvec.pages[i];
1500 if (page->index > end)
1501 break;
1502 BUG_ON(!PageLocked(page));
1503 BUG_ON(PageWriteback(page));
1504 block_invalidatepage(page, 0);
1505 ClearPageUptodate(page);
1506 unlock_page(page);
1507 }
1508 index = pvec.pages[nr_pages - 1]->index + 1;
1509 pagevec_release(&pvec);
1510 }
1511 return;
1512}
1513
1514static void ext4_print_free_blocks(struct inode *inode)
1515{
1516 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1517 struct super_block *sb = inode->i_sb;
1518
1519 ext4_msg(sb, KERN_CRIT, "Total free blocks count %lld",
1520 EXT4_C2B(EXT4_SB(inode->i_sb),
1521 ext4_count_free_clusters(inode->i_sb)));
1522 ext4_msg(sb, KERN_CRIT, "Free/Dirty block details");
1523 ext4_msg(sb, KERN_CRIT, "free_blocks=%lld",
1524 (long long) EXT4_C2B(EXT4_SB(inode->i_sb),
1525 percpu_counter_sum(&sbi->s_freeclusters_counter)));
1526 ext4_msg(sb, KERN_CRIT, "dirty_blocks=%lld",
1527 (long long) EXT4_C2B(EXT4_SB(inode->i_sb),
1528 percpu_counter_sum(&sbi->s_dirtyclusters_counter)));
1529 ext4_msg(sb, KERN_CRIT, "Block reservation details");
1530 ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u",
1531 EXT4_I(inode)->i_reserved_data_blocks);
1532 ext4_msg(sb, KERN_CRIT, "i_reserved_meta_blocks=%u",
1533 EXT4_I(inode)->i_reserved_meta_blocks);
1534 return;
1535}
1536
1537/*
1538 * mpage_da_map_and_submit - go through given space, map them
1539 * if necessary, and then submit them for I/O
1540 *
1541 * @mpd - bh describing space
1542 *
1543 * The function skips space we know is already mapped to disk blocks.
1544 *
1545 */
1546static void mpage_da_map_and_submit(struct mpage_da_data *mpd)
1547{
1548 int err, blks, get_blocks_flags;
1549 struct ext4_map_blocks map, *mapp = NULL;
1550 sector_t next = mpd->b_blocknr;
1551 unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits;
1552 loff_t disksize = EXT4_I(mpd->inode)->i_disksize;
1553 handle_t *handle = NULL;
1554
1555 /*
1556 * If the blocks are mapped already, or we couldn't accumulate
1557 * any blocks, then proceed immediately to the submission stage.
1558 */
1559 if ((mpd->b_size == 0) ||
1560 ((mpd->b_state & (1 << BH_Mapped)) &&
1561 !(mpd->b_state & (1 << BH_Delay)) &&
1562 !(mpd->b_state & (1 << BH_Unwritten))))
1563 goto submit_io;
1564
1565 handle = ext4_journal_current_handle();
1566 BUG_ON(!handle);
1567
1568 /*
1569 * Call ext4_map_blocks() to allocate any delayed allocation
1570 * blocks, or to convert an uninitialized extent to be
1571 * initialized (in the case where we have written into
1572 * one or more preallocated blocks).
1573 *
1574 * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE to
1575 * indicate that we are on the delayed allocation path. This
1576 * affects functions in many different parts of the allocation
1577 * call path. This flag exists primarily because we don't
1578 * want to change *many* call functions, so ext4_map_blocks()
1579 * will set the EXT4_STATE_DELALLOC_RESERVED flag once the
1580 * inode's allocation semaphore is taken.
1581 *
1582 * If the blocks in questions were delalloc blocks, set
1583 * EXT4_GET_BLOCKS_DELALLOC_RESERVE so the delalloc accounting
1584 * variables are updated after the blocks have been allocated.
1585 */
1586 map.m_lblk = next;
1587 map.m_len = max_blocks;
1588 get_blocks_flags = EXT4_GET_BLOCKS_CREATE;
1589 if (ext4_should_dioread_nolock(mpd->inode))
1590 get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
1591 if (mpd->b_state & (1 << BH_Delay))
1592 get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
1593
1594 blks = ext4_map_blocks(handle, mpd->inode, &map, get_blocks_flags);
1595 if (blks < 0) {
1596 struct super_block *sb = mpd->inode->i_sb;
1597
1598 err = blks;
1599 /*
1600 * If get block returns EAGAIN or ENOSPC and there
1601 * appears to be free blocks we will just let
1602 * mpage_da_submit_io() unlock all of the pages.
1603 */
1604 if (err == -EAGAIN)
1605 goto submit_io;
1606
1607 if (err == -ENOSPC && ext4_count_free_clusters(sb)) {
1608 mpd->retval = err;
1609 goto submit_io;
1610 }
1611
1612 /*
1613 * get block failure will cause us to loop in
1614 * writepages, because a_ops->writepage won't be able
1615 * to make progress. The page will be redirtied by
1616 * writepage and writepages will again try to write
1617 * the same.
1618 */
1619 if (!(EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)) {
1620 ext4_msg(sb, KERN_CRIT,
1621 "delayed block allocation failed for inode %lu "
1622 "at logical offset %llu with max blocks %zd "
1623 "with error %d", mpd->inode->i_ino,
1624 (unsigned long long) next,
1625 mpd->b_size >> mpd->inode->i_blkbits, err);
1626 ext4_msg(sb, KERN_CRIT,
1627 "This should not happen!! Data will be lost\n");
1628 if (err == -ENOSPC)
1629 ext4_print_free_blocks(mpd->inode);
1630 }
1631 /* invalidate all the pages */
1632 ext4_da_block_invalidatepages(mpd);
1633
1634 /* Mark this page range as having been completed */
1635 mpd->io_done = 1;
1636 return;
1637 }
1638 BUG_ON(blks == 0);
1639
1640 mapp = ↦
1641 if (map.m_flags & EXT4_MAP_NEW) {
1642 struct block_device *bdev = mpd->inode->i_sb->s_bdev;
1643 int i;
1644
1645 for (i = 0; i < map.m_len; i++)
1646 unmap_underlying_metadata(bdev, map.m_pblk + i);
1647
1648 if (ext4_should_order_data(mpd->inode)) {
1649 err = ext4_jbd2_file_inode(handle, mpd->inode);
1650 if (err) {
1651 /* Only if the journal is aborted */
1652 mpd->retval = err;
1653 goto submit_io;
1654 }
1655 }
1656 }
1657
1658 /*
1659 * Update on-disk size along with block allocation.
1660 */
1661 disksize = ((loff_t) next + blks) << mpd->inode->i_blkbits;
1662 if (disksize > i_size_read(mpd->inode))
1663 disksize = i_size_read(mpd->inode);
1664 if (disksize > EXT4_I(mpd->inode)->i_disksize) {
1665 ext4_update_i_disksize(mpd->inode, disksize);
1666 err = ext4_mark_inode_dirty(handle, mpd->inode);
1667 if (err)
1668 ext4_error(mpd->inode->i_sb,
1669 "Failed to mark inode %lu dirty",
1670 mpd->inode->i_ino);
1671 }
1672
1673submit_io:
1674 mpage_da_submit_io(mpd, mapp);
1675 mpd->io_done = 1;
1676}
1677
1678#define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \
1679 (1 << BH_Delay) | (1 << BH_Unwritten))
1680
1681/*
1682 * mpage_add_bh_to_extent - try to add one more block to extent of blocks
1683 *
1684 * @mpd->lbh - extent of blocks
1685 * @logical - logical number of the block in the file
1686 * @bh - bh of the block (used to access block's state)
1687 *
1688 * the function is used to collect contig. blocks in same state
1689 */
1690static void mpage_add_bh_to_extent(struct mpage_da_data *mpd,
1691 sector_t logical, size_t b_size,
1692 unsigned long b_state)
1693{
1694 sector_t next;
1695 int nrblocks = mpd->b_size >> mpd->inode->i_blkbits;
1696
1697 /*
1698 * XXX Don't go larger than mballoc is willing to allocate
1699 * This is a stopgap solution. We eventually need to fold
1700 * mpage_da_submit_io() into this function and then call
1701 * ext4_map_blocks() multiple times in a loop
1702 */
1703 if (nrblocks >= 8*1024*1024/mpd->inode->i_sb->s_blocksize)
1704 goto flush_it;
1705
1706 /* check if thereserved journal credits might overflow */
1707 if (!(ext4_test_inode_flag(mpd->inode, EXT4_INODE_EXTENTS))) {
1708 if (nrblocks >= EXT4_MAX_TRANS_DATA) {
1709 /*
1710 * With non-extent format we are limited by the journal
1711 * credit available. Total credit needed to insert
1712 * nrblocks contiguous blocks is dependent on the
1713 * nrblocks. So limit nrblocks.
1714 */
1715 goto flush_it;
1716 } else if ((nrblocks + (b_size >> mpd->inode->i_blkbits)) >
1717 EXT4_MAX_TRANS_DATA) {
1718 /*
1719 * Adding the new buffer_head would make it cross the
1720 * allowed limit for which we have journal credit
1721 * reserved. So limit the new bh->b_size
1722 */
1723 b_size = (EXT4_MAX_TRANS_DATA - nrblocks) <<
1724 mpd->inode->i_blkbits;
1725 /* we will do mpage_da_submit_io in the next loop */
1726 }
1727 }
1728 /*
1729 * First block in the extent
1730 */
1731 if (mpd->b_size == 0) {
1732 mpd->b_blocknr = logical;
1733 mpd->b_size = b_size;
1734 mpd->b_state = b_state & BH_FLAGS;
1735 return;
1736 }
1737
1738 next = mpd->b_blocknr + nrblocks;
1739 /*
1740 * Can we merge the block to our big extent?
1741 */
1742 if (logical == next && (b_state & BH_FLAGS) == mpd->b_state) {
1743 mpd->b_size += b_size;
1744 return;
1745 }
1746
1747flush_it:
1748 /*
1749 * We couldn't merge the block to our extent, so we
1750 * need to flush current extent and start new one
1751 */
1752 mpage_da_map_and_submit(mpd);
1753 return;
1754}
1755
1756static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh)
1757{
1758 return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh);
1759}
1760
1761/*
1762 * This function is grabs code from the very beginning of
1763 * ext4_map_blocks, but assumes that the caller is from delayed write
1764 * time. This function looks up the requested blocks and sets the
1765 * buffer delay bit under the protection of i_data_sem.
1766 */
1767static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
1768 struct ext4_map_blocks *map,
1769 struct buffer_head *bh)
1770{
1771 int retval;
1772 sector_t invalid_block = ~((sector_t) 0xffff);
1773
1774 if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
1775 invalid_block = ~0;
1776
1777 map->m_flags = 0;
1778 ext_debug("ext4_da_map_blocks(): inode %lu, max_blocks %u,"
1779 "logical block %lu\n", inode->i_ino, map->m_len,
1780 (unsigned long) map->m_lblk);
1781 /*
1782 * Try to see if we can get the block without requesting a new
1783 * file system block.
1784 */
1785 down_read((&EXT4_I(inode)->i_data_sem));
1786 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
1787 retval = ext4_ext_map_blocks(NULL, inode, map, 0);
1788 else
1789 retval = ext4_ind_map_blocks(NULL, inode, map, 0);
1790
1791 if (retval == 0) {
1792 /*
1793 * XXX: __block_prepare_write() unmaps passed block,
1794 * is it OK?
1795 */
1796 /* If the block was allocated from previously allocated cluster,
1797 * then we dont need to reserve it again. */
1798 if (!(map->m_flags & EXT4_MAP_FROM_CLUSTER)) {
1799 retval = ext4_da_reserve_space(inode, iblock);
1800 if (retval)
1801 /* not enough space to reserve */
1802 goto out_unlock;
1803 }
1804
1805 /* Clear EXT4_MAP_FROM_CLUSTER flag since its purpose is served
1806 * and it should not appear on the bh->b_state.
1807 */
1808 map->m_flags &= ~EXT4_MAP_FROM_CLUSTER;
1809
1810 map_bh(bh, inode->i_sb, invalid_block);
1811 set_buffer_new(bh);
1812 set_buffer_delay(bh);
1813 }
1814
1815out_unlock:
1816 up_read((&EXT4_I(inode)->i_data_sem));
1817
1818 return retval;
1819}
1820
1821/*
1822 * This is a special get_blocks_t callback which is used by
1823 * ext4_da_write_begin(). It will either return mapped block or
1824 * reserve space for a single block.
1825 *
1826 * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set.
1827 * We also have b_blocknr = -1 and b_bdev initialized properly
1828 *
1829 * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set.
1830 * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev
1831 * initialized properly.
1832 */
1833static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
1834 struct buffer_head *bh, int create)
1835{
1836 struct ext4_map_blocks map;
1837 int ret = 0;
1838
1839 BUG_ON(create == 0);
1840 BUG_ON(bh->b_size != inode->i_sb->s_blocksize);
1841
1842 map.m_lblk = iblock;
1843 map.m_len = 1;
1844
1845 /*
1846 * first, we need to know whether the block is allocated already
1847 * preallocated blocks are unmapped but should treated
1848 * the same as allocated blocks.
1849 */
1850 ret = ext4_da_map_blocks(inode, iblock, &map, bh);
1851 if (ret <= 0)
1852 return ret;
1853
1854 map_bh(bh, inode->i_sb, map.m_pblk);
1855 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
1856
1857 if (buffer_unwritten(bh)) {
1858 /* A delayed write to unwritten bh should be marked
1859 * new and mapped. Mapped ensures that we don't do
1860 * get_block multiple times when we write to the same
1861 * offset and new ensures that we do proper zero out
1862 * for partial write.
1863 */
1864 set_buffer_new(bh);
1865 set_buffer_mapped(bh);
1866 }
1867 return 0;
1868}
1869
1870/*
1871 * This function is used as a standard get_block_t calback function
1872 * when there is no desire to allocate any blocks. It is used as a
1873 * callback function for block_write_begin() and block_write_full_page().
1874 * These functions should only try to map a single block at a time.
1875 *
1876 * Since this function doesn't do block allocations even if the caller
1877 * requests it by passing in create=1, it is critically important that
1878 * any caller checks to make sure that any buffer heads are returned
1879 * by this function are either all already mapped or marked for
1880 * delayed allocation before calling block_write_full_page(). Otherwise,
1881 * b_blocknr could be left unitialized, and the page write functions will
1882 * be taken by surprise.
1883 */
1884static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
1885 struct buffer_head *bh_result, int create)
1886{
1887 BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize);
1888 return _ext4_get_block(inode, iblock, bh_result, 0);
1889}
1890
1891static int bget_one(handle_t *handle, struct buffer_head *bh)
1892{
1893 get_bh(bh);
1894 return 0;
1895}
1896
1897static int bput_one(handle_t *handle, struct buffer_head *bh)
1898{
1899 put_bh(bh);
1900 return 0;
1901}
1902
1903static int __ext4_journalled_writepage(struct page *page,
1904 unsigned int len)
1905{
1906 struct address_space *mapping = page->mapping;
1907 struct inode *inode = mapping->host;
1908 struct buffer_head *page_bufs;
1909 handle_t *handle = NULL;
1910 int ret = 0;
1911 int err;
1912
1913 ClearPageChecked(page);
1914 page_bufs = page_buffers(page);
1915 BUG_ON(!page_bufs);
1916 walk_page_buffers(handle, page_bufs, 0, len, NULL, bget_one);
1917 /* As soon as we unlock the page, it can go away, but we have
1918 * references to buffers so we are safe */
1919 unlock_page(page);
1920
1921 handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
1922 if (IS_ERR(handle)) {
1923 ret = PTR_ERR(handle);
1924 goto out;
1925 }
1926
1927 BUG_ON(!ext4_handle_valid(handle));
1928
1929 ret = walk_page_buffers(handle, page_bufs, 0, len, NULL,
1930 do_journal_get_write_access);
1931
1932 err = walk_page_buffers(handle, page_bufs, 0, len, NULL,
1933 write_end_fn);
1934 if (ret == 0)
1935 ret = err;
1936 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
1937 err = ext4_journal_stop(handle);
1938 if (!ret)
1939 ret = err;
1940
1941 walk_page_buffers(handle, page_bufs, 0, len, NULL, bput_one);
1942 ext4_set_inode_state(inode, EXT4_STATE_JDATA);
1943out:
1944 return ret;
1945}
1946
1947static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode);
1948static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
1949
1950/*
1951 * Note that we don't need to start a transaction unless we're journaling data
1952 * because we should have holes filled from ext4_page_mkwrite(). We even don't
1953 * need to file the inode to the transaction's list in ordered mode because if
1954 * we are writing back data added by write(), the inode is already there and if
1955 * we are writing back data modified via mmap(), no one guarantees in which
1956 * transaction the data will hit the disk. In case we are journaling data, we
1957 * cannot start transaction directly because transaction start ranks above page
1958 * lock so we have to do some magic.
1959 *
1960 * This function can get called via...
1961 * - ext4_da_writepages after taking page lock (have journal handle)
1962 * - journal_submit_inode_data_buffers (no journal handle)
1963 * - shrink_page_list via pdflush (no journal handle)
1964 * - grab_page_cache when doing write_begin (have journal handle)
1965 *
1966 * We don't do any block allocation in this function. If we have page with
1967 * multiple blocks we need to write those buffer_heads that are mapped. This
1968 * is important for mmaped based write. So if we do with blocksize 1K
1969 * truncate(f, 1024);
1970 * a = mmap(f, 0, 4096);
1971 * a[0] = 'a';
1972 * truncate(f, 4096);
1973 * we have in the page first buffer_head mapped via page_mkwrite call back
1974 * but other buffer_heads would be unmapped but dirty (dirty done via the
1975 * do_wp_page). So writepage should write the first block. If we modify
1976 * the mmap area beyond 1024 we will again get a page_fault and the
1977 * page_mkwrite callback will do the block allocation and mark the
1978 * buffer_heads mapped.
1979 *
1980 * We redirty the page if we have any buffer_heads that is either delay or
1981 * unwritten in the page.
1982 *
1983 * We can get recursively called as show below.
1984 *
1985 * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
1986 * ext4_writepage()
1987 *
1988 * But since we don't do any block allocation we should not deadlock.
1989 * Page also have the dirty flag cleared so we don't get recurive page_lock.
1990 */
1991static int ext4_writepage(struct page *page,
1992 struct writeback_control *wbc)
1993{
1994 int ret = 0, commit_write = 0;
1995 loff_t size;
1996 unsigned int len;
1997 struct buffer_head *page_bufs = NULL;
1998 struct inode *inode = page->mapping->host;
1999
2000 trace_ext4_writepage(page);
2001 size = i_size_read(inode);
2002 if (page->index == size >> PAGE_CACHE_SHIFT)
2003 len = size & ~PAGE_CACHE_MASK;
2004 else
2005 len = PAGE_CACHE_SIZE;
2006
2007 /*
2008 * If the page does not have buffers (for whatever reason),
2009 * try to create them using __block_write_begin. If this
2010 * fails, redirty the page and move on.
2011 */
2012 if (!page_has_buffers(page)) {
2013 if (__block_write_begin(page, 0, len,
2014 noalloc_get_block_write)) {
2015 redirty_page:
2016 redirty_page_for_writepage(wbc, page);
2017 unlock_page(page);
2018 return 0;
2019 }
2020 commit_write = 1;
2021 }
2022 page_bufs = page_buffers(page);
2023 if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
2024 ext4_bh_delay_or_unwritten)) {
2025 /*
2026 * We don't want to do block allocation, so redirty
2027 * the page and return. We may reach here when we do
2028 * a journal commit via journal_submit_inode_data_buffers.
2029 * We can also reach here via shrink_page_list but it
2030 * should never be for direct reclaim so warn if that
2031 * happens
2032 */
2033 WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
2034 PF_MEMALLOC);
2035 goto redirty_page;
2036 }
2037 if (commit_write)
2038 /* now mark the buffer_heads as dirty and uptodate */
2039 block_commit_write(page, 0, len);
2040
2041 if (PageChecked(page) && ext4_should_journal_data(inode))
2042 /*
2043 * It's mmapped pagecache. Add buffers and journal it. There
2044 * doesn't seem much point in redirtying the page here.
2045 */
2046 return __ext4_journalled_writepage(page, len);
2047
2048 if (buffer_uninit(page_bufs)) {
2049 ext4_set_bh_endio(page_bufs, inode);
2050 ret = block_write_full_page_endio(page, noalloc_get_block_write,
2051 wbc, ext4_end_io_buffer_write);
2052 } else
2053 ret = block_write_full_page(page, noalloc_get_block_write,
2054 wbc);
2055
2056 return ret;
2057}
2058
2059/*
2060 * This is called via ext4_da_writepages() to
2061 * calculate the total number of credits to reserve to fit
2062 * a single extent allocation into a single transaction,
2063 * ext4_da_writpeages() will loop calling this before
2064 * the block allocation.
2065 */
2066
2067static int ext4_da_writepages_trans_blocks(struct inode *inode)
2068{
2069 int max_blocks = EXT4_I(inode)->i_reserved_data_blocks;
2070
2071 /*
2072 * With non-extent format the journal credit needed to
2073 * insert nrblocks contiguous block is dependent on
2074 * number of contiguous block. So we will limit
2075 * number of contiguous block to a sane value
2076 */
2077 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) &&
2078 (max_blocks > EXT4_MAX_TRANS_DATA))
2079 max_blocks = EXT4_MAX_TRANS_DATA;
2080
2081 return ext4_chunk_trans_blocks(inode, max_blocks);
2082}
2083
2084/*
2085 * write_cache_pages_da - walk the list of dirty pages of the given
2086 * address space and accumulate pages that need writing, and call
2087 * mpage_da_map_and_submit to map a single contiguous memory region
2088 * and then write them.
2089 */
2090static int write_cache_pages_da(struct address_space *mapping,
2091 struct writeback_control *wbc,
2092 struct mpage_da_data *mpd,
2093 pgoff_t *done_index)
2094{
2095 struct buffer_head *bh, *head;
2096 struct inode *inode = mapping->host;
2097 struct pagevec pvec;
2098 unsigned int nr_pages;
2099 sector_t logical;
2100 pgoff_t index, end;
2101 long nr_to_write = wbc->nr_to_write;
2102 int i, tag, ret = 0;
2103
2104 memset(mpd, 0, sizeof(struct mpage_da_data));
2105 mpd->wbc = wbc;
2106 mpd->inode = inode;
2107 pagevec_init(&pvec, 0);
2108 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2109 end = wbc->range_end >> PAGE_CACHE_SHIFT;
2110
2111 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2112 tag = PAGECACHE_TAG_TOWRITE;
2113 else
2114 tag = PAGECACHE_TAG_DIRTY;
2115
2116 *done_index = index;
2117 while (index <= end) {
2118 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
2119 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
2120 if (nr_pages == 0)
2121 return 0;
2122
2123 for (i = 0; i < nr_pages; i++) {
2124 struct page *page = pvec.pages[i];
2125
2126 /*
2127 * At this point, the page may be truncated or
2128 * invalidated (changing page->mapping to NULL), or
2129 * even swizzled back from swapper_space to tmpfs file
2130 * mapping. However, page->index will not change
2131 * because we have a reference on the page.
2132 */
2133 if (page->index > end)
2134 goto out;
2135
2136 *done_index = page->index + 1;
2137
2138 /*
2139 * If we can't merge this page, and we have
2140 * accumulated an contiguous region, write it
2141 */
2142 if ((mpd->next_page != page->index) &&
2143 (mpd->next_page != mpd->first_page)) {
2144 mpage_da_map_and_submit(mpd);
2145 goto ret_extent_tail;
2146 }
2147
2148 lock_page(page);
2149
2150 /*
2151 * If the page is no longer dirty, or its
2152 * mapping no longer corresponds to inode we
2153 * are writing (which means it has been
2154 * truncated or invalidated), or the page is
2155 * already under writeback and we are not
2156 * doing a data integrity writeback, skip the page
2157 */
2158 if (!PageDirty(page) ||
2159 (PageWriteback(page) &&
2160 (wbc->sync_mode == WB_SYNC_NONE)) ||
2161 unlikely(page->mapping != mapping)) {
2162 unlock_page(page);
2163 continue;
2164 }
2165
2166 wait_on_page_writeback(page);
2167 BUG_ON(PageWriteback(page));
2168
2169 if (mpd->next_page != page->index)
2170 mpd->first_page = page->index;
2171 mpd->next_page = page->index + 1;
2172 logical = (sector_t) page->index <<
2173 (PAGE_CACHE_SHIFT - inode->i_blkbits);
2174
2175 if (!page_has_buffers(page)) {
2176 mpage_add_bh_to_extent(mpd, logical,
2177 PAGE_CACHE_SIZE,
2178 (1 << BH_Dirty) | (1 << BH_Uptodate));
2179 if (mpd->io_done)
2180 goto ret_extent_tail;
2181 } else {
2182 /*
2183 * Page with regular buffer heads,
2184 * just add all dirty ones
2185 */
2186 head = page_buffers(page);
2187 bh = head;
2188 do {
2189 BUG_ON(buffer_locked(bh));
2190 /*
2191 * We need to try to allocate
2192 * unmapped blocks in the same page.
2193 * Otherwise we won't make progress
2194 * with the page in ext4_writepage
2195 */
2196 if (ext4_bh_delay_or_unwritten(NULL, bh)) {
2197 mpage_add_bh_to_extent(mpd, logical,
2198 bh->b_size,
2199 bh->b_state);
2200 if (mpd->io_done)
2201 goto ret_extent_tail;
2202 } else if (buffer_dirty(bh) && (buffer_mapped(bh))) {
2203 /*
2204 * mapped dirty buffer. We need
2205 * to update the b_state
2206 * because we look at b_state
2207 * in mpage_da_map_blocks. We
2208 * don't update b_size because
2209 * if we find an unmapped
2210 * buffer_head later we need to
2211 * use the b_state flag of that
2212 * buffer_head.
2213 */
2214 if (mpd->b_size == 0)
2215 mpd->b_state = bh->b_state & BH_FLAGS;
2216 }
2217 logical++;
2218 } while ((bh = bh->b_this_page) != head);
2219 }
2220
2221 if (nr_to_write > 0) {
2222 nr_to_write--;
2223 if (nr_to_write == 0 &&
2224 wbc->sync_mode == WB_SYNC_NONE)
2225 /*
2226 * We stop writing back only if we are
2227 * not doing integrity sync. In case of
2228 * integrity sync we have to keep going
2229 * because someone may be concurrently
2230 * dirtying pages, and we might have
2231 * synced a lot of newly appeared dirty
2232 * pages, but have not synced all of the
2233 * old dirty pages.
2234 */
2235 goto out;
2236 }
2237 }
2238 pagevec_release(&pvec);
2239 cond_resched();
2240 }
2241 return 0;
2242ret_extent_tail:
2243 ret = MPAGE_DA_EXTENT_TAIL;
2244out:
2245 pagevec_release(&pvec);
2246 cond_resched();
2247 return ret;
2248}
2249
2250
2251static int ext4_da_writepages(struct address_space *mapping,
2252 struct writeback_control *wbc)
2253{
2254 pgoff_t index;
2255 int range_whole = 0;
2256 handle_t *handle = NULL;
2257 struct mpage_da_data mpd;
2258 struct inode *inode = mapping->host;
2259 int pages_written = 0;
2260 unsigned int max_pages;
2261 int range_cyclic, cycled = 1, io_done = 0;
2262 int needed_blocks, ret = 0;
2263 long desired_nr_to_write, nr_to_writebump = 0;
2264 loff_t range_start = wbc->range_start;
2265 struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
2266 pgoff_t done_index = 0;
2267 pgoff_t end;
2268 struct blk_plug plug;
2269
2270 trace_ext4_da_writepages(inode, wbc);
2271
2272 /*
2273 * No pages to write? This is mainly a kludge to avoid starting
2274 * a transaction for special inodes like journal inode on last iput()
2275 * because that could violate lock ordering on umount
2276 */
2277 if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
2278 return 0;
2279
2280 /*
2281 * If the filesystem has aborted, it is read-only, so return
2282 * right away instead of dumping stack traces later on that
2283 * will obscure the real source of the problem. We test
2284 * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because
2285 * the latter could be true if the filesystem is mounted
2286 * read-only, and in that case, ext4_da_writepages should
2287 * *never* be called, so if that ever happens, we would want
2288 * the stack trace.
2289 */
2290 if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED))
2291 return -EROFS;
2292
2293 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2294 range_whole = 1;
2295
2296 range_cyclic = wbc->range_cyclic;
2297 if (wbc->range_cyclic) {
2298 index = mapping->writeback_index;
2299 if (index)
2300 cycled = 0;
2301 wbc->range_start = index << PAGE_CACHE_SHIFT;
2302 wbc->range_end = LLONG_MAX;
2303 wbc->range_cyclic = 0;
2304 end = -1;
2305 } else {
2306 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2307 end = wbc->range_end >> PAGE_CACHE_SHIFT;
2308 }
2309
2310 /*
2311 * This works around two forms of stupidity. The first is in
2312 * the writeback code, which caps the maximum number of pages
2313 * written to be 1024 pages. This is wrong on multiple
2314 * levels; different architectues have a different page size,
2315 * which changes the maximum amount of data which gets
2316 * written. Secondly, 4 megabytes is way too small. XFS
2317 * forces this value to be 16 megabytes by multiplying
2318 * nr_to_write parameter by four, and then relies on its
2319 * allocator to allocate larger extents to make them
2320 * contiguous. Unfortunately this brings us to the second
2321 * stupidity, which is that ext4's mballoc code only allocates
2322 * at most 2048 blocks. So we force contiguous writes up to
2323 * the number of dirty blocks in the inode, or
2324 * sbi->max_writeback_mb_bump whichever is smaller.
2325 */
2326 max_pages = sbi->s_max_writeback_mb_bump << (20 - PAGE_CACHE_SHIFT);
2327 if (!range_cyclic && range_whole) {
2328 if (wbc->nr_to_write == LONG_MAX)
2329 desired_nr_to_write = wbc->nr_to_write;
2330 else
2331 desired_nr_to_write = wbc->nr_to_write * 8;
2332 } else
2333 desired_nr_to_write = ext4_num_dirty_pages(inode, index,
2334 max_pages);
2335 if (desired_nr_to_write > max_pages)
2336 desired_nr_to_write = max_pages;
2337
2338 if (wbc->nr_to_write < desired_nr_to_write) {
2339 nr_to_writebump = desired_nr_to_write - wbc->nr_to_write;
2340 wbc->nr_to_write = desired_nr_to_write;
2341 }
2342
2343retry:
2344 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2345 tag_pages_for_writeback(mapping, index, end);
2346
2347 blk_start_plug(&plug);
2348 while (!ret && wbc->nr_to_write > 0) {
2349
2350 /*
2351 * we insert one extent at a time. So we need
2352 * credit needed for single extent allocation.
2353 * journalled mode is currently not supported
2354 * by delalloc
2355 */
2356 BUG_ON(ext4_should_journal_data(inode));
2357 needed_blocks = ext4_da_writepages_trans_blocks(inode);
2358
2359 /* start a new transaction*/
2360 handle = ext4_journal_start(inode, needed_blocks);
2361 if (IS_ERR(handle)) {
2362 ret = PTR_ERR(handle);
2363 ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
2364 "%ld pages, ino %lu; err %d", __func__,
2365 wbc->nr_to_write, inode->i_ino, ret);
2366 blk_finish_plug(&plug);
2367 goto out_writepages;
2368 }
2369
2370 /*
2371 * Now call write_cache_pages_da() to find the next
2372 * contiguous region of logical blocks that need
2373 * blocks to be allocated by ext4 and submit them.
2374 */
2375 ret = write_cache_pages_da(mapping, wbc, &mpd, &done_index);
2376 /*
2377 * If we have a contiguous extent of pages and we
2378 * haven't done the I/O yet, map the blocks and submit
2379 * them for I/O.
2380 */
2381 if (!mpd.io_done && mpd.next_page != mpd.first_page) {
2382 mpage_da_map_and_submit(&mpd);
2383 ret = MPAGE_DA_EXTENT_TAIL;
2384 }
2385 trace_ext4_da_write_pages(inode, &mpd);
2386 wbc->nr_to_write -= mpd.pages_written;
2387
2388 ext4_journal_stop(handle);
2389
2390 if ((mpd.retval == -ENOSPC) && sbi->s_journal) {
2391 /* commit the transaction which would
2392 * free blocks released in the transaction
2393 * and try again
2394 */
2395 jbd2_journal_force_commit_nested(sbi->s_journal);
2396 ret = 0;
2397 } else if (ret == MPAGE_DA_EXTENT_TAIL) {
2398 /*
2399 * Got one extent now try with rest of the pages.
2400 * If mpd.retval is set -EIO, journal is aborted.
2401 * So we don't need to write any more.
2402 */
2403 pages_written += mpd.pages_written;
2404 ret = mpd.retval;
2405 io_done = 1;
2406 } else if (wbc->nr_to_write)
2407 /*
2408 * There is no more writeout needed
2409 * or we requested for a noblocking writeout
2410 * and we found the device congested
2411 */
2412 break;
2413 }
2414 blk_finish_plug(&plug);
2415 if (!io_done && !cycled) {
2416 cycled = 1;
2417 index = 0;
2418 wbc->range_start = index << PAGE_CACHE_SHIFT;
2419 wbc->range_end = mapping->writeback_index - 1;
2420 goto retry;
2421 }
2422
2423 /* Update index */
2424 wbc->range_cyclic = range_cyclic;
2425 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2426 /*
2427 * set the writeback_index so that range_cyclic
2428 * mode will write it back later
2429 */
2430 mapping->writeback_index = done_index;
2431
2432out_writepages:
2433 wbc->nr_to_write -= nr_to_writebump;
2434 wbc->range_start = range_start;
2435 trace_ext4_da_writepages_result(inode, wbc, ret, pages_written);
2436 return ret;
2437}
2438
2439#define FALL_BACK_TO_NONDELALLOC 1
2440static int ext4_nonda_switch(struct super_block *sb)
2441{
2442 s64 free_blocks, dirty_blocks;
2443 struct ext4_sb_info *sbi = EXT4_SB(sb);
2444
2445 /*
2446 * switch to non delalloc mode if we are running low
2447 * on free block. The free block accounting via percpu
2448 * counters can get slightly wrong with percpu_counter_batch getting
2449 * accumulated on each CPU without updating global counters
2450 * Delalloc need an accurate free block accounting. So switch
2451 * to non delalloc when we are near to error range.
2452 */
2453 free_blocks = EXT4_C2B(sbi,
2454 percpu_counter_read_positive(&sbi->s_freeclusters_counter));
2455 dirty_blocks = percpu_counter_read_positive(&sbi->s_dirtyclusters_counter);
2456 if (2 * free_blocks < 3 * dirty_blocks ||
2457 free_blocks < (dirty_blocks + EXT4_FREECLUSTERS_WATERMARK)) {
2458 /*
2459 * free block count is less than 150% of dirty blocks
2460 * or free blocks is less than watermark
2461 */
2462 return 1;
2463 }
2464 /*
2465 * Even if we don't switch but are nearing capacity,
2466 * start pushing delalloc when 1/2 of free blocks are dirty.
2467 */
2468 if (free_blocks < 2 * dirty_blocks)
2469 writeback_inodes_sb_if_idle(sb, WB_REASON_FS_FREE_SPACE);
2470
2471 return 0;
2472}
2473
2474static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
2475 loff_t pos, unsigned len, unsigned flags,
2476 struct page **pagep, void **fsdata)
2477{
2478 int ret, retries = 0;
2479 struct page *page;
2480 pgoff_t index;
2481 struct inode *inode = mapping->host;
2482 handle_t *handle;
2483
2484 index = pos >> PAGE_CACHE_SHIFT;
2485
2486 if (ext4_nonda_switch(inode->i_sb)) {
2487 *fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
2488 return ext4_write_begin(file, mapping, pos,
2489 len, flags, pagep, fsdata);
2490 }
2491 *fsdata = (void *)0;
2492 trace_ext4_da_write_begin(inode, pos, len, flags);
2493retry:
2494 /*
2495 * With delayed allocation, we don't log the i_disksize update
2496 * if there is delayed block allocation. But we still need
2497 * to journalling the i_disksize update if writes to the end
2498 * of file which has an already mapped buffer.
2499 */
2500 handle = ext4_journal_start(inode, 1);
2501 if (IS_ERR(handle)) {
2502 ret = PTR_ERR(handle);
2503 goto out;
2504 }
2505 /* We cannot recurse into the filesystem as the transaction is already
2506 * started */
2507 flags |= AOP_FLAG_NOFS;
2508
2509 page = grab_cache_page_write_begin(mapping, index, flags);
2510 if (!page) {
2511 ext4_journal_stop(handle);
2512 ret = -ENOMEM;
2513 goto out;
2514 }
2515 *pagep = page;
2516
2517 ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep);
2518 if (ret < 0) {
2519 unlock_page(page);
2520 ext4_journal_stop(handle);
2521 page_cache_release(page);
2522 /*
2523 * block_write_begin may have instantiated a few blocks
2524 * outside i_size. Trim these off again. Don't need
2525 * i_size_read because we hold i_mutex.
2526 */
2527 if (pos + len > inode->i_size)
2528 ext4_truncate_failed_write(inode);
2529 }
2530
2531 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
2532 goto retry;
2533out:
2534 return ret;
2535}
2536
2537/*
2538 * Check if we should update i_disksize
2539 * when write to the end of file but not require block allocation
2540 */
2541static int ext4_da_should_update_i_disksize(struct page *page,
2542 unsigned long offset)
2543{
2544 struct buffer_head *bh;
2545 struct inode *inode = page->mapping->host;
2546 unsigned int idx;
2547 int i;
2548
2549 bh = page_buffers(page);
2550 idx = offset >> inode->i_blkbits;
2551
2552 for (i = 0; i < idx; i++)
2553 bh = bh->b_this_page;
2554
2555 if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh))
2556 return 0;
2557 return 1;
2558}
2559
2560static int ext4_da_write_end(struct file *file,
2561 struct address_space *mapping,
2562 loff_t pos, unsigned len, unsigned copied,
2563 struct page *page, void *fsdata)
2564{
2565 struct inode *inode = mapping->host;
2566 int ret = 0, ret2;
2567 handle_t *handle = ext4_journal_current_handle();
2568 loff_t new_i_size;
2569 unsigned long start, end;
2570 int write_mode = (int)(unsigned long)fsdata;
2571
2572 if (write_mode == FALL_BACK_TO_NONDELALLOC) {
2573 switch (ext4_inode_journal_mode(inode)) {
2574 case EXT4_INODE_ORDERED_DATA_MODE:
2575 return ext4_ordered_write_end(file, mapping, pos,
2576 len, copied, page, fsdata);
2577 case EXT4_INODE_WRITEBACK_DATA_MODE:
2578 return ext4_writeback_write_end(file, mapping, pos,
2579 len, copied, page, fsdata);
2580 default:
2581 BUG();
2582 }
2583 }
2584
2585 trace_ext4_da_write_end(inode, pos, len, copied);
2586 start = pos & (PAGE_CACHE_SIZE - 1);
2587 end = start + copied - 1;
2588
2589 /*
2590 * generic_write_end() will run mark_inode_dirty() if i_size
2591 * changes. So let's piggyback the i_disksize mark_inode_dirty
2592 * into that.
2593 */
2594
2595 new_i_size = pos + copied;
2596 if (copied && new_i_size > EXT4_I(inode)->i_disksize) {
2597 if (ext4_da_should_update_i_disksize(page, end)) {
2598 down_write(&EXT4_I(inode)->i_data_sem);
2599 if (new_i_size > EXT4_I(inode)->i_disksize) {
2600 /*
2601 * Updating i_disksize when extending file
2602 * without needing block allocation
2603 */
2604 if (ext4_should_order_data(inode))
2605 ret = ext4_jbd2_file_inode(handle,
2606 inode);
2607
2608 EXT4_I(inode)->i_disksize = new_i_size;
2609 }
2610 up_write(&EXT4_I(inode)->i_data_sem);
2611 /* We need to mark inode dirty even if
2612 * new_i_size is less that inode->i_size
2613 * bu greater than i_disksize.(hint delalloc)
2614 */
2615 ext4_mark_inode_dirty(handle, inode);
2616 }
2617 }
2618 ret2 = generic_write_end(file, mapping, pos, len, copied,
2619 page, fsdata);
2620 copied = ret2;
2621 if (ret2 < 0)
2622 ret = ret2;
2623 ret2 = ext4_journal_stop(handle);
2624 if (!ret)
2625 ret = ret2;
2626
2627 return ret ? ret : copied;
2628}
2629
2630static void ext4_da_invalidatepage(struct page *page, unsigned long offset)
2631{
2632 /*
2633 * Drop reserved blocks
2634 */
2635 BUG_ON(!PageLocked(page));
2636 if (!page_has_buffers(page))
2637 goto out;
2638
2639 ext4_da_page_release_reservation(page, offset);
2640
2641out:
2642 ext4_invalidatepage(page, offset);
2643
2644 return;
2645}
2646
2647/*
2648 * Force all delayed allocation blocks to be allocated for a given inode.
2649 */
2650int ext4_alloc_da_blocks(struct inode *inode)
2651{
2652 trace_ext4_alloc_da_blocks(inode);
2653
2654 if (!EXT4_I(inode)->i_reserved_data_blocks &&
2655 !EXT4_I(inode)->i_reserved_meta_blocks)
2656 return 0;
2657
2658 /*
2659 * We do something simple for now. The filemap_flush() will
2660 * also start triggering a write of the data blocks, which is
2661 * not strictly speaking necessary (and for users of
2662 * laptop_mode, not even desirable). However, to do otherwise
2663 * would require replicating code paths in:
2664 *
2665 * ext4_da_writepages() ->
2666 * write_cache_pages() ---> (via passed in callback function)
2667 * __mpage_da_writepage() -->
2668 * mpage_add_bh_to_extent()
2669 * mpage_da_map_blocks()
2670 *
2671 * The problem is that write_cache_pages(), located in
2672 * mm/page-writeback.c, marks pages clean in preparation for
2673 * doing I/O, which is not desirable if we're not planning on
2674 * doing I/O at all.
2675 *
2676 * We could call write_cache_pages(), and then redirty all of
2677 * the pages by calling redirty_page_for_writepage() but that
2678 * would be ugly in the extreme. So instead we would need to
2679 * replicate parts of the code in the above functions,
2680 * simplifying them because we wouldn't actually intend to
2681 * write out the pages, but rather only collect contiguous
2682 * logical block extents, call the multi-block allocator, and
2683 * then update the buffer heads with the block allocations.
2684 *
2685 * For now, though, we'll cheat by calling filemap_flush(),
2686 * which will map the blocks, and start the I/O, but not
2687 * actually wait for the I/O to complete.
2688 */
2689 return filemap_flush(inode->i_mapping);
2690}
2691
2692/*
2693 * bmap() is special. It gets used by applications such as lilo and by
2694 * the swapper to find the on-disk block of a specific piece of data.
2695 *
2696 * Naturally, this is dangerous if the block concerned is still in the
2697 * journal. If somebody makes a swapfile on an ext4 data-journaling
2698 * filesystem and enables swap, then they may get a nasty shock when the
2699 * data getting swapped to that swapfile suddenly gets overwritten by
2700 * the original zero's written out previously to the journal and
2701 * awaiting writeback in the kernel's buffer cache.
2702 *
2703 * So, if we see any bmap calls here on a modified, data-journaled file,
2704 * take extra steps to flush any blocks which might be in the cache.
2705 */
2706static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
2707{
2708 struct inode *inode = mapping->host;
2709 journal_t *journal;
2710 int err;
2711
2712 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
2713 test_opt(inode->i_sb, DELALLOC)) {
2714 /*
2715 * With delalloc we want to sync the file
2716 * so that we can make sure we allocate
2717 * blocks for file
2718 */
2719 filemap_write_and_wait(mapping);
2720 }
2721
2722 if (EXT4_JOURNAL(inode) &&
2723 ext4_test_inode_state(inode, EXT4_STATE_JDATA)) {
2724 /*
2725 * This is a REALLY heavyweight approach, but the use of
2726 * bmap on dirty files is expected to be extremely rare:
2727 * only if we run lilo or swapon on a freshly made file
2728 * do we expect this to happen.
2729 *
2730 * (bmap requires CAP_SYS_RAWIO so this does not
2731 * represent an unprivileged user DOS attack --- we'd be
2732 * in trouble if mortal users could trigger this path at
2733 * will.)
2734 *
2735 * NB. EXT4_STATE_JDATA is not set on files other than
2736 * regular files. If somebody wants to bmap a directory
2737 * or symlink and gets confused because the buffer
2738 * hasn't yet been flushed to disk, they deserve
2739 * everything they get.
2740 */
2741
2742 ext4_clear_inode_state(inode, EXT4_STATE_JDATA);
2743 journal = EXT4_JOURNAL(inode);
2744 jbd2_journal_lock_updates(journal);
2745 err = jbd2_journal_flush(journal);
2746 jbd2_journal_unlock_updates(journal);
2747
2748 if (err)
2749 return 0;
2750 }
2751
2752 return generic_block_bmap(mapping, block, ext4_get_block);
2753}
2754
2755static int ext4_readpage(struct file *file, struct page *page)
2756{
2757 trace_ext4_readpage(page);
2758 return mpage_readpage(page, ext4_get_block);
2759}
2760
2761static int
2762ext4_readpages(struct file *file, struct address_space *mapping,
2763 struct list_head *pages, unsigned nr_pages)
2764{
2765 return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
2766}
2767
2768static void ext4_invalidatepage_free_endio(struct page *page, unsigned long offset)
2769{
2770 struct buffer_head *head, *bh;
2771 unsigned int curr_off = 0;
2772
2773 if (!page_has_buffers(page))
2774 return;
2775 head = bh = page_buffers(page);
2776 do {
2777 if (offset <= curr_off && test_clear_buffer_uninit(bh)
2778 && bh->b_private) {
2779 ext4_free_io_end(bh->b_private);
2780 bh->b_private = NULL;
2781 bh->b_end_io = NULL;
2782 }
2783 curr_off = curr_off + bh->b_size;
2784 bh = bh->b_this_page;
2785 } while (bh != head);
2786}
2787
2788static void ext4_invalidatepage(struct page *page, unsigned long offset)
2789{
2790 journal_t *journal = EXT4_JOURNAL(page->mapping->host);
2791
2792 trace_ext4_invalidatepage(page, offset);
2793
2794 /*
2795 * free any io_end structure allocated for buffers to be discarded
2796 */
2797 if (ext4_should_dioread_nolock(page->mapping->host))
2798 ext4_invalidatepage_free_endio(page, offset);
2799 /*
2800 * If it's a full truncate we just forget about the pending dirtying
2801 */
2802 if (offset == 0)
2803 ClearPageChecked(page);
2804
2805 if (journal)
2806 jbd2_journal_invalidatepage(journal, page, offset);
2807 else
2808 block_invalidatepage(page, offset);
2809}
2810
2811static int ext4_releasepage(struct page *page, gfp_t wait)
2812{
2813 journal_t *journal = EXT4_JOURNAL(page->mapping->host);
2814
2815 trace_ext4_releasepage(page);
2816
2817 WARN_ON(PageChecked(page));
2818 if (!page_has_buffers(page))
2819 return 0;
2820 if (journal)
2821 return jbd2_journal_try_to_free_buffers(journal, page, wait);
2822 else
2823 return try_to_free_buffers(page);
2824}
2825
2826/*
2827 * ext4_get_block used when preparing for a DIO write or buffer write.
2828 * We allocate an uinitialized extent if blocks haven't been allocated.
2829 * The extent will be converted to initialized after the IO is complete.
2830 */
2831static int ext4_get_block_write(struct inode *inode, sector_t iblock,
2832 struct buffer_head *bh_result, int create)
2833{
2834 ext4_debug("ext4_get_block_write: inode %lu, create flag %d\n",
2835 inode->i_ino, create);
2836 return _ext4_get_block(inode, iblock, bh_result,
2837 EXT4_GET_BLOCKS_IO_CREATE_EXT);
2838}
2839
2840static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
2841 ssize_t size, void *private, int ret,
2842 bool is_async)
2843{
2844 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
2845 ext4_io_end_t *io_end = iocb->private;
2846 struct workqueue_struct *wq;
2847 unsigned long flags;
2848 struct ext4_inode_info *ei;
2849
2850 /* if not async direct IO or dio with 0 bytes write, just return */
2851 if (!io_end || !size)
2852 goto out;
2853
2854 ext_debug("ext4_end_io_dio(): io_end 0x%p "
2855 "for inode %lu, iocb 0x%p, offset %llu, size %zd\n",
2856 iocb->private, io_end->inode->i_ino, iocb, offset,
2857 size);
2858
2859 iocb->private = NULL;
2860
2861 /* if not aio dio with unwritten extents, just free io and return */
2862 if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
2863 ext4_free_io_end(io_end);
2864out:
2865 if (is_async)
2866 aio_complete(iocb, ret, 0);
2867 inode_dio_done(inode);
2868 return;
2869 }
2870
2871 io_end->offset = offset;
2872 io_end->size = size;
2873 if (is_async) {
2874 io_end->iocb = iocb;
2875 io_end->result = ret;
2876 }
2877 wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq;
2878
2879 /* Add the io_end to per-inode completed aio dio list*/
2880 ei = EXT4_I(io_end->inode);
2881 spin_lock_irqsave(&ei->i_completed_io_lock, flags);
2882 list_add_tail(&io_end->list, &ei->i_completed_io_list);
2883 spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
2884
2885 /* queue the work to convert unwritten extents to written */
2886 queue_work(wq, &io_end->work);
2887}
2888
2889static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate)
2890{
2891 ext4_io_end_t *io_end = bh->b_private;
2892 struct workqueue_struct *wq;
2893 struct inode *inode;
2894 unsigned long flags;
2895
2896 if (!test_clear_buffer_uninit(bh) || !io_end)
2897 goto out;
2898
2899 if (!(io_end->inode->i_sb->s_flags & MS_ACTIVE)) {
2900 ext4_msg(io_end->inode->i_sb, KERN_INFO,
2901 "sb umounted, discard end_io request for inode %lu",
2902 io_end->inode->i_ino);
2903 ext4_free_io_end(io_end);
2904 goto out;
2905 }
2906
2907 /*
2908 * It may be over-defensive here to check EXT4_IO_END_UNWRITTEN now,
2909 * but being more careful is always safe for the future change.
2910 */
2911 inode = io_end->inode;
2912 ext4_set_io_unwritten_flag(inode, io_end);
2913
2914 /* Add the io_end to per-inode completed io list*/
2915 spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags);
2916 list_add_tail(&io_end->list, &EXT4_I(inode)->i_completed_io_list);
2917 spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags);
2918
2919 wq = EXT4_SB(inode->i_sb)->dio_unwritten_wq;
2920 /* queue the work to convert unwritten extents to written */
2921 queue_work(wq, &io_end->work);
2922out:
2923 bh->b_private = NULL;
2924 bh->b_end_io = NULL;
2925 clear_buffer_uninit(bh);
2926 end_buffer_async_write(bh, uptodate);
2927}
2928
2929static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode)
2930{
2931 ext4_io_end_t *io_end;
2932 struct page *page = bh->b_page;
2933 loff_t offset = (sector_t)page->index << PAGE_CACHE_SHIFT;
2934 size_t size = bh->b_size;
2935
2936retry:
2937 io_end = ext4_init_io_end(inode, GFP_ATOMIC);
2938 if (!io_end) {
2939 pr_warn_ratelimited("%s: allocation fail\n", __func__);
2940 schedule();
2941 goto retry;
2942 }
2943 io_end->offset = offset;
2944 io_end->size = size;
2945 /*
2946 * We need to hold a reference to the page to make sure it
2947 * doesn't get evicted before ext4_end_io_work() has a chance
2948 * to convert the extent from written to unwritten.
2949 */
2950 io_end->page = page;
2951 get_page(io_end->page);
2952
2953 bh->b_private = io_end;
2954 bh->b_end_io = ext4_end_io_buffer_write;
2955 return 0;
2956}
2957
2958/*
2959 * For ext4 extent files, ext4 will do direct-io write to holes,
2960 * preallocated extents, and those write extend the file, no need to
2961 * fall back to buffered IO.
2962 *
2963 * For holes, we fallocate those blocks, mark them as uninitialized
2964 * If those blocks were preallocated, we mark sure they are splited, but
2965 * still keep the range to write as uninitialized.
2966 *
2967 * The unwrritten extents will be converted to written when DIO is completed.
2968 * For async direct IO, since the IO may still pending when return, we
2969 * set up an end_io call back function, which will do the conversion
2970 * when async direct IO completed.
2971 *
2972 * If the O_DIRECT write will extend the file then add this inode to the
2973 * orphan list. So recovery will truncate it back to the original size
2974 * if the machine crashes during the write.
2975 *
2976 */
2977static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
2978 const struct iovec *iov, loff_t offset,
2979 unsigned long nr_segs)
2980{
2981 struct file *file = iocb->ki_filp;
2982 struct inode *inode = file->f_mapping->host;
2983 ssize_t ret;
2984 size_t count = iov_length(iov, nr_segs);
2985
2986 loff_t final_size = offset + count;
2987 if (rw == WRITE && final_size <= inode->i_size) {
2988 /*
2989 * We could direct write to holes and fallocate.
2990 *
2991 * Allocated blocks to fill the hole are marked as uninitialized
2992 * to prevent parallel buffered read to expose the stale data
2993 * before DIO complete the data IO.
2994 *
2995 * As to previously fallocated extents, ext4 get_block
2996 * will just simply mark the buffer mapped but still
2997 * keep the extents uninitialized.
2998 *
2999 * for non AIO case, we will convert those unwritten extents
3000 * to written after return back from blockdev_direct_IO.
3001 *
3002 * for async DIO, the conversion needs to be defered when
3003 * the IO is completed. The ext4 end_io callback function
3004 * will be called to take care of the conversion work.
3005 * Here for async case, we allocate an io_end structure to
3006 * hook to the iocb.
3007 */
3008 iocb->private = NULL;
3009 EXT4_I(inode)->cur_aio_dio = NULL;
3010 if (!is_sync_kiocb(iocb)) {
3011 ext4_io_end_t *io_end =
3012 ext4_init_io_end(inode, GFP_NOFS);
3013 if (!io_end)
3014 return -ENOMEM;
3015 io_end->flag |= EXT4_IO_END_DIRECT;
3016 iocb->private = io_end;
3017 /*
3018 * we save the io structure for current async
3019 * direct IO, so that later ext4_map_blocks()
3020 * could flag the io structure whether there
3021 * is a unwritten extents needs to be converted
3022 * when IO is completed.
3023 */
3024 EXT4_I(inode)->cur_aio_dio = iocb->private;
3025 }
3026
3027 ret = __blockdev_direct_IO(rw, iocb, inode,
3028 inode->i_sb->s_bdev, iov,
3029 offset, nr_segs,
3030 ext4_get_block_write,
3031 ext4_end_io_dio,
3032 NULL,
3033 DIO_LOCKING);
3034 if (iocb->private)
3035 EXT4_I(inode)->cur_aio_dio = NULL;
3036 /*
3037 * The io_end structure takes a reference to the inode,
3038 * that structure needs to be destroyed and the
3039 * reference to the inode need to be dropped, when IO is
3040 * complete, even with 0 byte write, or failed.
3041 *
3042 * In the successful AIO DIO case, the io_end structure will be
3043 * desctroyed and the reference to the inode will be dropped
3044 * after the end_io call back function is called.
3045 *
3046 * In the case there is 0 byte write, or error case, since
3047 * VFS direct IO won't invoke the end_io call back function,
3048 * we need to free the end_io structure here.
3049 */
3050 if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) {
3051 ext4_free_io_end(iocb->private);
3052 iocb->private = NULL;
3053 } else if (ret > 0 && ext4_test_inode_state(inode,
3054 EXT4_STATE_DIO_UNWRITTEN)) {
3055 int err;
3056 /*
3057 * for non AIO case, since the IO is already
3058 * completed, we could do the conversion right here
3059 */
3060 err = ext4_convert_unwritten_extents(inode,
3061 offset, ret);
3062 if (err < 0)
3063 ret = err;
3064 ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
3065 }
3066 return ret;
3067 }
3068
3069 /* for write the the end of file case, we fall back to old way */
3070 return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
3071}
3072
3073static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
3074 const struct iovec *iov, loff_t offset,
3075 unsigned long nr_segs)
3076{
3077 struct file *file = iocb->ki_filp;
3078 struct inode *inode = file->f_mapping->host;
3079 ssize_t ret;
3080
3081 /*
3082 * If we are doing data journalling we don't support O_DIRECT
3083 */
3084 if (ext4_should_journal_data(inode))
3085 return 0;
3086
3087 trace_ext4_direct_IO_enter(inode, offset, iov_length(iov, nr_segs), rw);
3088 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3089 ret = ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs);
3090 else
3091 ret = ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
3092 trace_ext4_direct_IO_exit(inode, offset,
3093 iov_length(iov, nr_segs), rw, ret);
3094 return ret;
3095}
3096
3097/*
3098 * Pages can be marked dirty completely asynchronously from ext4's journalling
3099 * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do
3100 * much here because ->set_page_dirty is called under VFS locks. The page is
3101 * not necessarily locked.
3102 *
3103 * We cannot just dirty the page and leave attached buffers clean, because the
3104 * buffers' dirty state is "definitive". We cannot just set the buffers dirty
3105 * or jbddirty because all the journalling code will explode.
3106 *
3107 * So what we do is to mark the page "pending dirty" and next time writepage
3108 * is called, propagate that into the buffers appropriately.
3109 */
3110static int ext4_journalled_set_page_dirty(struct page *page)
3111{
3112 SetPageChecked(page);
3113 return __set_page_dirty_nobuffers(page);
3114}
3115
3116static const struct address_space_operations ext4_ordered_aops = {
3117 .readpage = ext4_readpage,
3118 .readpages = ext4_readpages,
3119 .writepage = ext4_writepage,
3120 .write_begin = ext4_write_begin,
3121 .write_end = ext4_ordered_write_end,
3122 .bmap = ext4_bmap,
3123 .invalidatepage = ext4_invalidatepage,
3124 .releasepage = ext4_releasepage,
3125 .direct_IO = ext4_direct_IO,
3126 .migratepage = buffer_migrate_page,
3127 .is_partially_uptodate = block_is_partially_uptodate,
3128 .error_remove_page = generic_error_remove_page,
3129};
3130
3131static const struct address_space_operations ext4_writeback_aops = {
3132 .readpage = ext4_readpage,
3133 .readpages = ext4_readpages,
3134 .writepage = ext4_writepage,
3135 .write_begin = ext4_write_begin,
3136 .write_end = ext4_writeback_write_end,
3137 .bmap = ext4_bmap,
3138 .invalidatepage = ext4_invalidatepage,
3139 .releasepage = ext4_releasepage,
3140 .direct_IO = ext4_direct_IO,
3141 .migratepage = buffer_migrate_page,
3142 .is_partially_uptodate = block_is_partially_uptodate,
3143 .error_remove_page = generic_error_remove_page,
3144};
3145
3146static const struct address_space_operations ext4_journalled_aops = {
3147 .readpage = ext4_readpage,
3148 .readpages = ext4_readpages,
3149 .writepage = ext4_writepage,
3150 .write_begin = ext4_write_begin,
3151 .write_end = ext4_journalled_write_end,
3152 .set_page_dirty = ext4_journalled_set_page_dirty,
3153 .bmap = ext4_bmap,
3154 .invalidatepage = ext4_invalidatepage,
3155 .releasepage = ext4_releasepage,
3156 .direct_IO = ext4_direct_IO,
3157 .is_partially_uptodate = block_is_partially_uptodate,
3158 .error_remove_page = generic_error_remove_page,
3159};
3160
3161static const struct address_space_operations ext4_da_aops = {
3162 .readpage = ext4_readpage,
3163 .readpages = ext4_readpages,
3164 .writepage = ext4_writepage,
3165 .writepages = ext4_da_writepages,
3166 .write_begin = ext4_da_write_begin,
3167 .write_end = ext4_da_write_end,
3168 .bmap = ext4_bmap,
3169 .invalidatepage = ext4_da_invalidatepage,
3170 .releasepage = ext4_releasepage,
3171 .direct_IO = ext4_direct_IO,
3172 .migratepage = buffer_migrate_page,
3173 .is_partially_uptodate = block_is_partially_uptodate,
3174 .error_remove_page = generic_error_remove_page,
3175};
3176
3177void ext4_set_aops(struct inode *inode)
3178{
3179 switch (ext4_inode_journal_mode(inode)) {
3180 case EXT4_INODE_ORDERED_DATA_MODE:
3181 if (test_opt(inode->i_sb, DELALLOC))
3182 inode->i_mapping->a_ops = &ext4_da_aops;
3183 else
3184 inode->i_mapping->a_ops = &ext4_ordered_aops;
3185 break;
3186 case EXT4_INODE_WRITEBACK_DATA_MODE:
3187 if (test_opt(inode->i_sb, DELALLOC))
3188 inode->i_mapping->a_ops = &ext4_da_aops;
3189 else
3190 inode->i_mapping->a_ops = &ext4_writeback_aops;
3191 break;
3192 case EXT4_INODE_JOURNAL_DATA_MODE:
3193 inode->i_mapping->a_ops = &ext4_journalled_aops;
3194 break;
3195 default:
3196 BUG();
3197 }
3198}
3199
3200
3201/*
3202 * ext4_discard_partial_page_buffers()
3203 * Wrapper function for ext4_discard_partial_page_buffers_no_lock.
3204 * This function finds and locks the page containing the offset
3205 * "from" and passes it to ext4_discard_partial_page_buffers_no_lock.
3206 * Calling functions that already have the page locked should call
3207 * ext4_discard_partial_page_buffers_no_lock directly.
3208 */
3209int ext4_discard_partial_page_buffers(handle_t *handle,
3210 struct address_space *mapping, loff_t from,
3211 loff_t length, int flags)
3212{
3213 struct inode *inode = mapping->host;
3214 struct page *page;
3215 int err = 0;
3216
3217 page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT,
3218 mapping_gfp_mask(mapping) & ~__GFP_FS);
3219 if (!page)
3220 return -ENOMEM;
3221
3222 err = ext4_discard_partial_page_buffers_no_lock(handle, inode, page,
3223 from, length, flags);
3224
3225 unlock_page(page);
3226 page_cache_release(page);
3227 return err;
3228}
3229
3230/*
3231 * ext4_discard_partial_page_buffers_no_lock()
3232 * Zeros a page range of length 'length' starting from offset 'from'.
3233 * Buffer heads that correspond to the block aligned regions of the
3234 * zeroed range will be unmapped. Unblock aligned regions
3235 * will have the corresponding buffer head mapped if needed so that
3236 * that region of the page can be updated with the partial zero out.
3237 *
3238 * This function assumes that the page has already been locked. The
3239 * The range to be discarded must be contained with in the given page.
3240 * If the specified range exceeds the end of the page it will be shortened
3241 * to the end of the page that corresponds to 'from'. This function is
3242 * appropriate for updating a page and it buffer heads to be unmapped and
3243 * zeroed for blocks that have been either released, or are going to be
3244 * released.
3245 *
3246 * handle: The journal handle
3247 * inode: The files inode
3248 * page: A locked page that contains the offset "from"
3249 * from: The starting byte offset (from the begining of the file)
3250 * to begin discarding
3251 * len: The length of bytes to discard
3252 * flags: Optional flags that may be used:
3253 *
3254 * EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED
3255 * Only zero the regions of the page whose buffer heads
3256 * have already been unmapped. This flag is appropriate
3257 * for updateing the contents of a page whose blocks may
3258 * have already been released, and we only want to zero
3259 * out the regions that correspond to those released blocks.
3260 *
3261 * Returns zero on sucess or negative on failure.
3262 */
3263static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
3264 struct inode *inode, struct page *page, loff_t from,
3265 loff_t length, int flags)
3266{
3267 ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
3268 unsigned int offset = from & (PAGE_CACHE_SIZE-1);
3269 unsigned int blocksize, max, pos;
3270 ext4_lblk_t iblock;
3271 struct buffer_head *bh;
3272 int err = 0;
3273
3274 blocksize = inode->i_sb->s_blocksize;
3275 max = PAGE_CACHE_SIZE - offset;
3276
3277 if (index != page->index)
3278 return -EINVAL;
3279
3280 /*
3281 * correct length if it does not fall between
3282 * 'from' and the end of the page
3283 */
3284 if (length > max || length < 0)
3285 length = max;
3286
3287 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
3288
3289 if (!page_has_buffers(page))
3290 create_empty_buffers(page, blocksize, 0);
3291
3292 /* Find the buffer that contains "offset" */
3293 bh = page_buffers(page);
3294 pos = blocksize;
3295 while (offset >= pos) {
3296 bh = bh->b_this_page;
3297 iblock++;
3298 pos += blocksize;
3299 }
3300
3301 pos = offset;
3302 while (pos < offset + length) {
3303 unsigned int end_of_block, range_to_discard;
3304
3305 err = 0;
3306
3307 /* The length of space left to zero and unmap */
3308 range_to_discard = offset + length - pos;
3309
3310 /* The length of space until the end of the block */
3311 end_of_block = blocksize - (pos & (blocksize-1));
3312
3313 /*
3314 * Do not unmap or zero past end of block
3315 * for this buffer head
3316 */
3317 if (range_to_discard > end_of_block)
3318 range_to_discard = end_of_block;
3319
3320
3321 /*
3322 * Skip this buffer head if we are only zeroing unampped
3323 * regions of the page
3324 */
3325 if (flags & EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED &&
3326 buffer_mapped(bh))
3327 goto next;
3328
3329 /* If the range is block aligned, unmap */
3330 if (range_to_discard == blocksize) {
3331 clear_buffer_dirty(bh);
3332 bh->b_bdev = NULL;
3333 clear_buffer_mapped(bh);
3334 clear_buffer_req(bh);
3335 clear_buffer_new(bh);
3336 clear_buffer_delay(bh);
3337 clear_buffer_unwritten(bh);
3338 clear_buffer_uptodate(bh);
3339 zero_user(page, pos, range_to_discard);
3340 BUFFER_TRACE(bh, "Buffer discarded");
3341 goto next;
3342 }
3343
3344 /*
3345 * If this block is not completely contained in the range
3346 * to be discarded, then it is not going to be released. Because
3347 * we need to keep this block, we need to make sure this part
3348 * of the page is uptodate before we modify it by writeing
3349 * partial zeros on it.
3350 */
3351 if (!buffer_mapped(bh)) {
3352 /*
3353 * Buffer head must be mapped before we can read
3354 * from the block
3355 */
3356 BUFFER_TRACE(bh, "unmapped");
3357 ext4_get_block(inode, iblock, bh, 0);
3358 /* unmapped? It's a hole - nothing to do */
3359 if (!buffer_mapped(bh)) {
3360 BUFFER_TRACE(bh, "still unmapped");
3361 goto next;
3362 }
3363 }
3364
3365 /* Ok, it's mapped. Make sure it's up-to-date */
3366 if (PageUptodate(page))
3367 set_buffer_uptodate(bh);
3368
3369 if (!buffer_uptodate(bh)) {
3370 err = -EIO;
3371 ll_rw_block(READ, 1, &bh);
3372 wait_on_buffer(bh);
3373 /* Uhhuh. Read error. Complain and punt.*/
3374 if (!buffer_uptodate(bh))
3375 goto next;
3376 }
3377
3378 if (ext4_should_journal_data(inode)) {
3379 BUFFER_TRACE(bh, "get write access");
3380 err = ext4_journal_get_write_access(handle, bh);
3381 if (err)
3382 goto next;
3383 }
3384
3385 zero_user(page, pos, range_to_discard);
3386
3387 err = 0;
3388 if (ext4_should_journal_data(inode)) {
3389 err = ext4_handle_dirty_metadata(handle, inode, bh);
3390 } else
3391 mark_buffer_dirty(bh);
3392
3393 BUFFER_TRACE(bh, "Partial buffer zeroed");
3394next:
3395 bh = bh->b_this_page;
3396 iblock++;
3397 pos += range_to_discard;
3398 }
3399
3400 return err;
3401}
3402
3403int ext4_can_truncate(struct inode *inode)
3404{
3405 if (S_ISREG(inode->i_mode))
3406 return 1;
3407 if (S_ISDIR(inode->i_mode))
3408 return 1;
3409 if (S_ISLNK(inode->i_mode))
3410 return !ext4_inode_is_fast_symlink(inode);
3411 return 0;
3412}
3413
3414/*
3415 * ext4_punch_hole: punches a hole in a file by releaseing the blocks
3416 * associated with the given offset and length
3417 *
3418 * @inode: File inode
3419 * @offset: The offset where the hole will begin
3420 * @len: The length of the hole
3421 *
3422 * Returns: 0 on sucess or negative on failure
3423 */
3424
3425int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
3426{
3427 struct inode *inode = file->f_path.dentry->d_inode;
3428 if (!S_ISREG(inode->i_mode))
3429 return -EOPNOTSUPP;
3430
3431 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
3432 /* TODO: Add support for non extent hole punching */
3433 return -EOPNOTSUPP;
3434 }
3435
3436 if (EXT4_SB(inode->i_sb)->s_cluster_ratio > 1) {
3437 /* TODO: Add support for bigalloc file systems */
3438 return -EOPNOTSUPP;
3439 }
3440
3441 return ext4_ext_punch_hole(file, offset, length);
3442}
3443
3444/*
3445 * ext4_truncate()
3446 *
3447 * We block out ext4_get_block() block instantiations across the entire
3448 * transaction, and VFS/VM ensures that ext4_truncate() cannot run
3449 * simultaneously on behalf of the same inode.
3450 *
3451 * As we work through the truncate and commit bits of it to the journal there
3452 * is one core, guiding principle: the file's tree must always be consistent on
3453 * disk. We must be able to restart the truncate after a crash.
3454 *
3455 * The file's tree may be transiently inconsistent in memory (although it
3456 * probably isn't), but whenever we close off and commit a journal transaction,
3457 * the contents of (the filesystem + the journal) must be consistent and
3458 * restartable. It's pretty simple, really: bottom up, right to left (although
3459 * left-to-right works OK too).
3460 *
3461 * Note that at recovery time, journal replay occurs *before* the restart of
3462 * truncate against the orphan inode list.
3463 *
3464 * The committed inode has the new, desired i_size (which is the same as
3465 * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see
3466 * that this inode's truncate did not complete and it will again call
3467 * ext4_truncate() to have another go. So there will be instantiated blocks
3468 * to the right of the truncation point in a crashed ext4 filesystem. But
3469 * that's fine - as long as they are linked from the inode, the post-crash
3470 * ext4_truncate() run will find them and release them.
3471 */
3472void ext4_truncate(struct inode *inode)
3473{
3474 trace_ext4_truncate_enter(inode);
3475
3476 if (!ext4_can_truncate(inode))
3477 return;
3478
3479 ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
3480
3481 if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
3482 ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
3483
3484 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3485 ext4_ext_truncate(inode);
3486 else
3487 ext4_ind_truncate(inode);
3488
3489 trace_ext4_truncate_exit(inode);
3490}
3491
3492/*
3493 * ext4_get_inode_loc returns with an extra refcount against the inode's
3494 * underlying buffer_head on success. If 'in_mem' is true, we have all
3495 * data in memory that is needed to recreate the on-disk version of this
3496 * inode.
3497 */
3498static int __ext4_get_inode_loc(struct inode *inode,
3499 struct ext4_iloc *iloc, int in_mem)
3500{
3501 struct ext4_group_desc *gdp;
3502 struct buffer_head *bh;
3503 struct super_block *sb = inode->i_sb;
3504 ext4_fsblk_t block;
3505 int inodes_per_block, inode_offset;
3506
3507 iloc->bh = NULL;
3508 if (!ext4_valid_inum(sb, inode->i_ino))
3509 return -EIO;
3510
3511 iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb);
3512 gdp = ext4_get_group_desc(sb, iloc->block_group, NULL);
3513 if (!gdp)
3514 return -EIO;
3515
3516 /*
3517 * Figure out the offset within the block group inode table
3518 */
3519 inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
3520 inode_offset = ((inode->i_ino - 1) %
3521 EXT4_INODES_PER_GROUP(sb));
3522 block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block);
3523 iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb);
3524
3525 bh = sb_getblk(sb, block);
3526 if (!bh) {
3527 EXT4_ERROR_INODE_BLOCK(inode, block,
3528 "unable to read itable block");
3529 return -EIO;
3530 }
3531 if (!buffer_uptodate(bh)) {
3532 lock_buffer(bh);
3533
3534 /*
3535 * If the buffer has the write error flag, we have failed
3536 * to write out another inode in the same block. In this
3537 * case, we don't have to read the block because we may
3538 * read the old inode data successfully.
3539 */
3540 if (buffer_write_io_error(bh) && !buffer_uptodate(bh))
3541 set_buffer_uptodate(bh);
3542
3543 if (buffer_uptodate(bh)) {
3544 /* someone brought it uptodate while we waited */
3545 unlock_buffer(bh);
3546 goto has_buffer;
3547 }
3548
3549 /*
3550 * If we have all information of the inode in memory and this
3551 * is the only valid inode in the block, we need not read the
3552 * block.
3553 */
3554 if (in_mem) {
3555 struct buffer_head *bitmap_bh;
3556 int i, start;
3557
3558 start = inode_offset & ~(inodes_per_block - 1);
3559
3560 /* Is the inode bitmap in cache? */
3561 bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp));
3562 if (!bitmap_bh)
3563 goto make_io;
3564
3565 /*
3566 * If the inode bitmap isn't in cache then the
3567 * optimisation may end up performing two reads instead
3568 * of one, so skip it.
3569 */
3570 if (!buffer_uptodate(bitmap_bh)) {
3571 brelse(bitmap_bh);
3572 goto make_io;
3573 }
3574 for (i = start; i < start + inodes_per_block; i++) {
3575 if (i == inode_offset)
3576 continue;
3577 if (ext4_test_bit(i, bitmap_bh->b_data))
3578 break;
3579 }
3580 brelse(bitmap_bh);
3581 if (i == start + inodes_per_block) {
3582 /* all other inodes are free, so skip I/O */
3583 memset(bh->b_data, 0, bh->b_size);
3584 set_buffer_uptodate(bh);
3585 unlock_buffer(bh);
3586 goto has_buffer;
3587 }
3588 }
3589
3590make_io:
3591 /*
3592 * If we need to do any I/O, try to pre-readahead extra
3593 * blocks from the inode table.
3594 */
3595 if (EXT4_SB(sb)->s_inode_readahead_blks) {
3596 ext4_fsblk_t b, end, table;
3597 unsigned num;
3598
3599 table = ext4_inode_table(sb, gdp);
3600 /* s_inode_readahead_blks is always a power of 2 */
3601 b = block & ~(EXT4_SB(sb)->s_inode_readahead_blks-1);
3602 if (table > b)
3603 b = table;
3604 end = b + EXT4_SB(sb)->s_inode_readahead_blks;
3605 num = EXT4_INODES_PER_GROUP(sb);
3606 if (ext4_has_group_desc_csum(sb))
3607 num -= ext4_itable_unused_count(sb, gdp);
3608 table += num / inodes_per_block;
3609 if (end > table)
3610 end = table;
3611 while (b <= end)
3612 sb_breadahead(sb, b++);
3613 }
3614
3615 /*
3616 * There are other valid inodes in the buffer, this inode
3617 * has in-inode xattrs, or we don't have this inode in memory.
3618 * Read the block from disk.
3619 */
3620 trace_ext4_load_inode(inode);
3621 get_bh(bh);
3622 bh->b_end_io = end_buffer_read_sync;
3623 submit_bh(READ | REQ_META | REQ_PRIO, bh);
3624 wait_on_buffer(bh);
3625 if (!buffer_uptodate(bh)) {
3626 EXT4_ERROR_INODE_BLOCK(inode, block,
3627 "unable to read itable block");
3628 brelse(bh);
3629 return -EIO;
3630 }
3631 }
3632has_buffer:
3633 iloc->bh = bh;
3634 return 0;
3635}
3636
3637int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
3638{
3639 /* We have all inode data except xattrs in memory here. */
3640 return __ext4_get_inode_loc(inode, iloc,
3641 !ext4_test_inode_state(inode, EXT4_STATE_XATTR));
3642}
3643
3644void ext4_set_inode_flags(struct inode *inode)
3645{
3646 unsigned int flags = EXT4_I(inode)->i_flags;
3647
3648 inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
3649 if (flags & EXT4_SYNC_FL)
3650 inode->i_flags |= S_SYNC;
3651 if (flags & EXT4_APPEND_FL)
3652 inode->i_flags |= S_APPEND;
3653 if (flags & EXT4_IMMUTABLE_FL)
3654 inode->i_flags |= S_IMMUTABLE;
3655 if (flags & EXT4_NOATIME_FL)
3656 inode->i_flags |= S_NOATIME;
3657 if (flags & EXT4_DIRSYNC_FL)
3658 inode->i_flags |= S_DIRSYNC;
3659}
3660
3661/* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
3662void ext4_get_inode_flags(struct ext4_inode_info *ei)
3663{
3664 unsigned int vfs_fl;
3665 unsigned long old_fl, new_fl;
3666
3667 do {
3668 vfs_fl = ei->vfs_inode.i_flags;
3669 old_fl = ei->i_flags;
3670 new_fl = old_fl & ~(EXT4_SYNC_FL|EXT4_APPEND_FL|
3671 EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|
3672 EXT4_DIRSYNC_FL);
3673 if (vfs_fl & S_SYNC)
3674 new_fl |= EXT4_SYNC_FL;
3675 if (vfs_fl & S_APPEND)
3676 new_fl |= EXT4_APPEND_FL;
3677 if (vfs_fl & S_IMMUTABLE)
3678 new_fl |= EXT4_IMMUTABLE_FL;
3679 if (vfs_fl & S_NOATIME)
3680 new_fl |= EXT4_NOATIME_FL;
3681 if (vfs_fl & S_DIRSYNC)
3682 new_fl |= EXT4_DIRSYNC_FL;
3683 } while (cmpxchg(&ei->i_flags, old_fl, new_fl) != old_fl);
3684}
3685
3686static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
3687 struct ext4_inode_info *ei)
3688{
3689 blkcnt_t i_blocks ;
3690 struct inode *inode = &(ei->vfs_inode);
3691 struct super_block *sb = inode->i_sb;
3692
3693 if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
3694 EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) {
3695 /* we are using combined 48 bit field */
3696 i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 |
3697 le32_to_cpu(raw_inode->i_blocks_lo);
3698 if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) {
3699 /* i_blocks represent file system block size */
3700 return i_blocks << (inode->i_blkbits - 9);
3701 } else {
3702 return i_blocks;
3703 }
3704 } else {
3705 return le32_to_cpu(raw_inode->i_blocks_lo);
3706 }
3707}
3708
3709struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
3710{
3711 struct ext4_iloc iloc;
3712 struct ext4_inode *raw_inode;
3713 struct ext4_inode_info *ei;
3714 struct inode *inode;
3715 journal_t *journal = EXT4_SB(sb)->s_journal;
3716 long ret;
3717 int block;
3718 uid_t i_uid;
3719 gid_t i_gid;
3720
3721 inode = iget_locked(sb, ino);
3722 if (!inode)
3723 return ERR_PTR(-ENOMEM);
3724 if (!(inode->i_state & I_NEW))
3725 return inode;
3726
3727 ei = EXT4_I(inode);
3728 iloc.bh = NULL;
3729
3730 ret = __ext4_get_inode_loc(inode, &iloc, 0);
3731 if (ret < 0)
3732 goto bad_inode;
3733 raw_inode = ext4_raw_inode(&iloc);
3734
3735 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
3736 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
3737 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
3738 EXT4_INODE_SIZE(inode->i_sb)) {
3739 EXT4_ERROR_INODE(inode, "bad extra_isize (%u != %u)",
3740 EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize,
3741 EXT4_INODE_SIZE(inode->i_sb));
3742 ret = -EIO;
3743 goto bad_inode;
3744 }
3745 } else
3746 ei->i_extra_isize = 0;
3747
3748 /* Precompute checksum seed for inode metadata */
3749 if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
3750 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
3751 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3752 __u32 csum;
3753 __le32 inum = cpu_to_le32(inode->i_ino);
3754 __le32 gen = raw_inode->i_generation;
3755 csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
3756 sizeof(inum));
3757 ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
3758 sizeof(gen));
3759 }
3760
3761 if (!ext4_inode_csum_verify(inode, raw_inode, ei)) {
3762 EXT4_ERROR_INODE(inode, "checksum invalid");
3763 ret = -EIO;
3764 goto bad_inode;
3765 }
3766
3767 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
3768 i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
3769 i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
3770 if (!(test_opt(inode->i_sb, NO_UID32))) {
3771 i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
3772 i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
3773 }
3774 i_uid_write(inode, i_uid);
3775 i_gid_write(inode, i_gid);
3776 set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
3777
3778 ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
3779 ei->i_dir_start_lookup = 0;
3780 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
3781 /* We now have enough fields to check if the inode was active or not.
3782 * This is needed because nfsd might try to access dead inodes
3783 * the test is that same one that e2fsck uses
3784 * NeilBrown 1999oct15
3785 */
3786 if (inode->i_nlink == 0) {
3787 if (inode->i_mode == 0 ||
3788 !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) {
3789 /* this inode is deleted */
3790 ret = -ESTALE;
3791 goto bad_inode;
3792 }
3793 /* The only unlinked inodes we let through here have
3794 * valid i_mode and are being read by the orphan
3795 * recovery code: that's fine, we're about to complete
3796 * the process of deleting those. */
3797 }
3798 ei->i_flags = le32_to_cpu(raw_inode->i_flags);
3799 inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
3800 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
3801 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT))
3802 ei->i_file_acl |=
3803 ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
3804 inode->i_size = ext4_isize(raw_inode);
3805 ei->i_disksize = inode->i_size;
3806#ifdef CONFIG_QUOTA
3807 ei->i_reserved_quota = 0;
3808#endif
3809 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
3810 ei->i_block_group = iloc.block_group;
3811 ei->i_last_alloc_group = ~0;
3812 /*
3813 * NOTE! The in-memory inode i_data array is in little-endian order
3814 * even on big-endian machines: we do NOT byteswap the block numbers!
3815 */
3816 for (block = 0; block < EXT4_N_BLOCKS; block++)
3817 ei->i_data[block] = raw_inode->i_block[block];
3818 INIT_LIST_HEAD(&ei->i_orphan);
3819
3820 /*
3821 * Set transaction id's of transactions that have to be committed
3822 * to finish f[data]sync. We set them to currently running transaction
3823 * as we cannot be sure that the inode or some of its metadata isn't
3824 * part of the transaction - the inode could have been reclaimed and
3825 * now it is reread from disk.
3826 */
3827 if (journal) {
3828 transaction_t *transaction;
3829 tid_t tid;
3830
3831 read_lock(&journal->j_state_lock);
3832 if (journal->j_running_transaction)
3833 transaction = journal->j_running_transaction;
3834 else
3835 transaction = journal->j_committing_transaction;
3836 if (transaction)
3837 tid = transaction->t_tid;
3838 else
3839 tid = journal->j_commit_sequence;
3840 read_unlock(&journal->j_state_lock);
3841 ei->i_sync_tid = tid;
3842 ei->i_datasync_tid = tid;
3843 }
3844
3845 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
3846 if (ei->i_extra_isize == 0) {
3847 /* The extra space is currently unused. Use it. */
3848 ei->i_extra_isize = sizeof(struct ext4_inode) -
3849 EXT4_GOOD_OLD_INODE_SIZE;
3850 } else {
3851 __le32 *magic = (void *)raw_inode +
3852 EXT4_GOOD_OLD_INODE_SIZE +
3853 ei->i_extra_isize;
3854 if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC))
3855 ext4_set_inode_state(inode, EXT4_STATE_XATTR);
3856 }
3857 }
3858
3859 EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
3860 EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
3861 EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
3862 EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
3863
3864 inode->i_version = le32_to_cpu(raw_inode->i_disk_version);
3865 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
3866 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
3867 inode->i_version |=
3868 (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
3869 }
3870
3871 ret = 0;
3872 if (ei->i_file_acl &&
3873 !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) {
3874 EXT4_ERROR_INODE(inode, "bad extended attribute block %llu",
3875 ei->i_file_acl);
3876 ret = -EIO;
3877 goto bad_inode;
3878 } else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
3879 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
3880 (S_ISLNK(inode->i_mode) &&
3881 !ext4_inode_is_fast_symlink(inode)))
3882 /* Validate extent which is part of inode */
3883 ret = ext4_ext_check_inode(inode);
3884 } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
3885 (S_ISLNK(inode->i_mode) &&
3886 !ext4_inode_is_fast_symlink(inode))) {
3887 /* Validate block references which are part of inode */
3888 ret = ext4_ind_check_inode(inode);
3889 }
3890 if (ret)
3891 goto bad_inode;
3892
3893 if (S_ISREG(inode->i_mode)) {
3894 inode->i_op = &ext4_file_inode_operations;
3895 inode->i_fop = &ext4_file_operations;
3896 ext4_set_aops(inode);
3897 } else if (S_ISDIR(inode->i_mode)) {
3898 inode->i_op = &ext4_dir_inode_operations;
3899 inode->i_fop = &ext4_dir_operations;
3900 } else if (S_ISLNK(inode->i_mode)) {
3901 if (ext4_inode_is_fast_symlink(inode)) {
3902 inode->i_op = &ext4_fast_symlink_inode_operations;
3903 nd_terminate_link(ei->i_data, inode->i_size,
3904 sizeof(ei->i_data) - 1);
3905 } else {
3906 inode->i_op = &ext4_symlink_inode_operations;
3907 ext4_set_aops(inode);
3908 }
3909 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
3910 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
3911 inode->i_op = &ext4_special_inode_operations;
3912 if (raw_inode->i_block[0])
3913 init_special_inode(inode, inode->i_mode,
3914 old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
3915 else
3916 init_special_inode(inode, inode->i_mode,
3917 new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
3918 } else {
3919 ret = -EIO;
3920 EXT4_ERROR_INODE(inode, "bogus i_mode (%o)", inode->i_mode);
3921 goto bad_inode;
3922 }
3923 brelse(iloc.bh);
3924 ext4_set_inode_flags(inode);
3925 unlock_new_inode(inode);
3926 return inode;
3927
3928bad_inode:
3929 brelse(iloc.bh);
3930 iget_failed(inode);
3931 return ERR_PTR(ret);
3932}
3933
3934static int ext4_inode_blocks_set(handle_t *handle,
3935 struct ext4_inode *raw_inode,
3936 struct ext4_inode_info *ei)
3937{
3938 struct inode *inode = &(ei->vfs_inode);
3939 u64 i_blocks = inode->i_blocks;
3940 struct super_block *sb = inode->i_sb;
3941
3942 if (i_blocks <= ~0U) {
3943 /*
3944 * i_blocks can be represnted in a 32 bit variable
3945 * as multiple of 512 bytes
3946 */
3947 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
3948 raw_inode->i_blocks_high = 0;
3949 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
3950 return 0;
3951 }
3952 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE))
3953 return -EFBIG;
3954
3955 if (i_blocks <= 0xffffffffffffULL) {
3956 /*
3957 * i_blocks can be represented in a 48 bit variable
3958 * as multiple of 512 bytes
3959 */
3960 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
3961 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
3962 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
3963 } else {
3964 ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE);
3965 /* i_block is stored in file system block size */
3966 i_blocks = i_blocks >> (inode->i_blkbits - 9);
3967 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
3968 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
3969 }
3970 return 0;
3971}
3972
3973/*
3974 * Post the struct inode info into an on-disk inode location in the
3975 * buffer-cache. This gobbles the caller's reference to the
3976 * buffer_head in the inode location struct.
3977 *
3978 * The caller must have write access to iloc->bh.
3979 */
3980static int ext4_do_update_inode(handle_t *handle,
3981 struct inode *inode,
3982 struct ext4_iloc *iloc)
3983{
3984 struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
3985 struct ext4_inode_info *ei = EXT4_I(inode);
3986 struct buffer_head *bh = iloc->bh;
3987 int err = 0, rc, block;
3988 uid_t i_uid;
3989 gid_t i_gid;
3990
3991 /* For fields not not tracking in the in-memory inode,
3992 * initialise them to zero for new inodes. */
3993 if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
3994 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
3995
3996 ext4_get_inode_flags(ei);
3997 raw_inode->i_mode = cpu_to_le16(inode->i_mode);
3998 i_uid = i_uid_read(inode);
3999 i_gid = i_gid_read(inode);
4000 if (!(test_opt(inode->i_sb, NO_UID32))) {
4001 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid));
4002 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid));
4003/*
4004 * Fix up interoperability with old kernels. Otherwise, old inodes get
4005 * re-used with the upper 16 bits of the uid/gid intact
4006 */
4007 if (!ei->i_dtime) {
4008 raw_inode->i_uid_high =
4009 cpu_to_le16(high_16_bits(i_uid));
4010 raw_inode->i_gid_high =
4011 cpu_to_le16(high_16_bits(i_gid));
4012 } else {
4013 raw_inode->i_uid_high = 0;
4014 raw_inode->i_gid_high = 0;
4015 }
4016 } else {
4017 raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid));
4018 raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(i_gid));
4019 raw_inode->i_uid_high = 0;
4020 raw_inode->i_gid_high = 0;
4021 }
4022 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
4023
4024 EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
4025 EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
4026 EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
4027 EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
4028
4029 if (ext4_inode_blocks_set(handle, raw_inode, ei))
4030 goto out_brelse;
4031 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
4032 raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF);
4033 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
4034 cpu_to_le32(EXT4_OS_HURD))
4035 raw_inode->i_file_acl_high =
4036 cpu_to_le16(ei->i_file_acl >> 32);
4037 raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
4038 ext4_isize_set(raw_inode, ei->i_disksize);
4039 if (ei->i_disksize > 0x7fffffffULL) {
4040 struct super_block *sb = inode->i_sb;
4041 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
4042 EXT4_FEATURE_RO_COMPAT_LARGE_FILE) ||
4043 EXT4_SB(sb)->s_es->s_rev_level ==
4044 cpu_to_le32(EXT4_GOOD_OLD_REV)) {
4045 /* If this is the first large file
4046 * created, add a flag to the superblock.
4047 */
4048 err = ext4_journal_get_write_access(handle,
4049 EXT4_SB(sb)->s_sbh);
4050 if (err)
4051 goto out_brelse;
4052 ext4_update_dynamic_rev(sb);
4053 EXT4_SET_RO_COMPAT_FEATURE(sb,
4054 EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
4055 ext4_handle_sync(handle);
4056 err = ext4_handle_dirty_super_now(handle, sb);
4057 }
4058 }
4059 raw_inode->i_generation = cpu_to_le32(inode->i_generation);
4060 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
4061 if (old_valid_dev(inode->i_rdev)) {
4062 raw_inode->i_block[0] =
4063 cpu_to_le32(old_encode_dev(inode->i_rdev));
4064 raw_inode->i_block[1] = 0;
4065 } else {
4066 raw_inode->i_block[0] = 0;
4067 raw_inode->i_block[1] =
4068 cpu_to_le32(new_encode_dev(inode->i_rdev));
4069 raw_inode->i_block[2] = 0;
4070 }
4071 } else
4072 for (block = 0; block < EXT4_N_BLOCKS; block++)
4073 raw_inode->i_block[block] = ei->i_data[block];
4074
4075 raw_inode->i_disk_version = cpu_to_le32(inode->i_version);
4076 if (ei->i_extra_isize) {
4077 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
4078 raw_inode->i_version_hi =
4079 cpu_to_le32(inode->i_version >> 32);
4080 raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
4081 }
4082
4083 ext4_inode_csum_set(inode, raw_inode, ei);
4084
4085 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
4086 rc = ext4_handle_dirty_metadata(handle, NULL, bh);
4087 if (!err)
4088 err = rc;
4089 ext4_clear_inode_state(inode, EXT4_STATE_NEW);
4090
4091 ext4_update_inode_fsync_trans(handle, inode, 0);
4092out_brelse:
4093 brelse(bh);
4094 ext4_std_error(inode->i_sb, err);
4095 return err;
4096}
4097
4098/*
4099 * ext4_write_inode()
4100 *
4101 * We are called from a few places:
4102 *
4103 * - Within generic_file_write() for O_SYNC files.
4104 * Here, there will be no transaction running. We wait for any running
4105 * trasnaction to commit.
4106 *
4107 * - Within sys_sync(), kupdate and such.
4108 * We wait on commit, if tol to.
4109 *
4110 * - Within prune_icache() (PF_MEMALLOC == true)
4111 * Here we simply return. We can't afford to block kswapd on the
4112 * journal commit.
4113 *
4114 * In all cases it is actually safe for us to return without doing anything,
4115 * because the inode has been copied into a raw inode buffer in
4116 * ext4_mark_inode_dirty(). This is a correctness thing for O_SYNC and for
4117 * knfsd.
4118 *
4119 * Note that we are absolutely dependent upon all inode dirtiers doing the
4120 * right thing: they *must* call mark_inode_dirty() after dirtying info in
4121 * which we are interested.
4122 *
4123 * It would be a bug for them to not do this. The code:
4124 *
4125 * mark_inode_dirty(inode)
4126 * stuff();
4127 * inode->i_size = expr;
4128 *
4129 * is in error because a kswapd-driven write_inode() could occur while
4130 * `stuff()' is running, and the new i_size will be lost. Plus the inode
4131 * will no longer be on the superblock's dirty inode list.
4132 */
4133int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
4134{
4135 int err;
4136
4137 if (current->flags & PF_MEMALLOC)
4138 return 0;
4139
4140 if (EXT4_SB(inode->i_sb)->s_journal) {
4141 if (ext4_journal_current_handle()) {
4142 jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
4143 dump_stack();
4144 return -EIO;
4145 }
4146
4147 if (wbc->sync_mode != WB_SYNC_ALL)
4148 return 0;
4149
4150 err = ext4_force_commit(inode->i_sb);
4151 } else {
4152 struct ext4_iloc iloc;
4153
4154 err = __ext4_get_inode_loc(inode, &iloc, 0);
4155 if (err)
4156 return err;
4157 if (wbc->sync_mode == WB_SYNC_ALL)
4158 sync_dirty_buffer(iloc.bh);
4159 if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
4160 EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr,
4161 "IO error syncing inode");
4162 err = -EIO;
4163 }
4164 brelse(iloc.bh);
4165 }
4166 return err;
4167}
4168
4169/*
4170 * ext4_setattr()
4171 *
4172 * Called from notify_change.
4173 *
4174 * We want to trap VFS attempts to truncate the file as soon as
4175 * possible. In particular, we want to make sure that when the VFS
4176 * shrinks i_size, we put the inode on the orphan list and modify
4177 * i_disksize immediately, so that during the subsequent flushing of
4178 * dirty pages and freeing of disk blocks, we can guarantee that any
4179 * commit will leave the blocks being flushed in an unused state on
4180 * disk. (On recovery, the inode will get truncated and the blocks will
4181 * be freed, so we have a strong guarantee that no future commit will
4182 * leave these blocks visible to the user.)
4183 *
4184 * Another thing we have to assure is that if we are in ordered mode
4185 * and inode is still attached to the committing transaction, we must
4186 * we start writeout of all the dirty pages which are being truncated.
4187 * This way we are sure that all the data written in the previous
4188 * transaction are already on disk (truncate waits for pages under
4189 * writeback).
4190 *
4191 * Called with inode->i_mutex down.
4192 */
4193int ext4_setattr(struct dentry *dentry, struct iattr *attr)
4194{
4195 struct inode *inode = dentry->d_inode;
4196 int error, rc = 0;
4197 int orphan = 0;
4198 const unsigned int ia_valid = attr->ia_valid;
4199
4200 error = inode_change_ok(inode, attr);
4201 if (error)
4202 return error;
4203
4204 if (is_quota_modification(inode, attr))
4205 dquot_initialize(inode);
4206 if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
4207 (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
4208 handle_t *handle;
4209
4210 /* (user+group)*(old+new) structure, inode write (sb,
4211 * inode block, ? - but truncate inode update has it) */
4212 handle = ext4_journal_start(inode, (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)+
4213 EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb))+3);
4214 if (IS_ERR(handle)) {
4215 error = PTR_ERR(handle);
4216 goto err_out;
4217 }
4218 error = dquot_transfer(inode, attr);
4219 if (error) {
4220 ext4_journal_stop(handle);
4221 return error;
4222 }
4223 /* Update corresponding info in inode so that everything is in
4224 * one transaction */
4225 if (attr->ia_valid & ATTR_UID)
4226 inode->i_uid = attr->ia_uid;
4227 if (attr->ia_valid & ATTR_GID)
4228 inode->i_gid = attr->ia_gid;
4229 error = ext4_mark_inode_dirty(handle, inode);
4230 ext4_journal_stop(handle);
4231 }
4232
4233 if (attr->ia_valid & ATTR_SIZE) {
4234 inode_dio_wait(inode);
4235
4236 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
4237 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4238
4239 if (attr->ia_size > sbi->s_bitmap_maxbytes)
4240 return -EFBIG;
4241 }
4242 }
4243
4244 if (S_ISREG(inode->i_mode) &&
4245 attr->ia_valid & ATTR_SIZE &&
4246 (attr->ia_size < inode->i_size)) {
4247 handle_t *handle;
4248
4249 handle = ext4_journal_start(inode, 3);
4250 if (IS_ERR(handle)) {
4251 error = PTR_ERR(handle);
4252 goto err_out;
4253 }
4254 if (ext4_handle_valid(handle)) {
4255 error = ext4_orphan_add(handle, inode);
4256 orphan = 1;
4257 }
4258 EXT4_I(inode)->i_disksize = attr->ia_size;
4259 rc = ext4_mark_inode_dirty(handle, inode);
4260 if (!error)
4261 error = rc;
4262 ext4_journal_stop(handle);
4263
4264 if (ext4_should_order_data(inode)) {
4265 error = ext4_begin_ordered_truncate(inode,
4266 attr->ia_size);
4267 if (error) {
4268 /* Do as much error cleanup as possible */
4269 handle = ext4_journal_start(inode, 3);
4270 if (IS_ERR(handle)) {
4271 ext4_orphan_del(NULL, inode);
4272 goto err_out;
4273 }
4274 ext4_orphan_del(handle, inode);
4275 orphan = 0;
4276 ext4_journal_stop(handle);
4277 goto err_out;
4278 }
4279 }
4280 }
4281
4282 if (attr->ia_valid & ATTR_SIZE) {
4283 if (attr->ia_size != i_size_read(inode))
4284 truncate_setsize(inode, attr->ia_size);
4285 ext4_truncate(inode);
4286 }
4287
4288 if (!rc) {
4289 setattr_copy(inode, attr);
4290 mark_inode_dirty(inode);
4291 }
4292
4293 /*
4294 * If the call to ext4_truncate failed to get a transaction handle at
4295 * all, we need to clean up the in-core orphan list manually.
4296 */
4297 if (orphan && inode->i_nlink)
4298 ext4_orphan_del(NULL, inode);
4299
4300 if (!rc && (ia_valid & ATTR_MODE))
4301 rc = ext4_acl_chmod(inode);
4302
4303err_out:
4304 ext4_std_error(inode->i_sb, error);
4305 if (!error)
4306 error = rc;
4307 return error;
4308}
4309
4310int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
4311 struct kstat *stat)
4312{
4313 struct inode *inode;
4314 unsigned long delalloc_blocks;
4315
4316 inode = dentry->d_inode;
4317 generic_fillattr(inode, stat);
4318
4319 /*
4320 * We can't update i_blocks if the block allocation is delayed
4321 * otherwise in the case of system crash before the real block
4322 * allocation is done, we will have i_blocks inconsistent with
4323 * on-disk file blocks.
4324 * We always keep i_blocks updated together with real
4325 * allocation. But to not confuse with user, stat
4326 * will return the blocks that include the delayed allocation
4327 * blocks for this file.
4328 */
4329 delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb),
4330 EXT4_I(inode)->i_reserved_data_blocks);
4331
4332 stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9;
4333 return 0;
4334}
4335
4336static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
4337{
4338 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
4339 return ext4_ind_trans_blocks(inode, nrblocks, chunk);
4340 return ext4_ext_index_trans_blocks(inode, nrblocks, chunk);
4341}
4342
4343/*
4344 * Account for index blocks, block groups bitmaps and block group
4345 * descriptor blocks if modify datablocks and index blocks
4346 * worse case, the indexs blocks spread over different block groups
4347 *
4348 * If datablocks are discontiguous, they are possible to spread over
4349 * different block groups too. If they are contiuguous, with flexbg,
4350 * they could still across block group boundary.
4351 *
4352 * Also account for superblock, inode, quota and xattr blocks
4353 */
4354static int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk)
4355{
4356 ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb);
4357 int gdpblocks;
4358 int idxblocks;
4359 int ret = 0;
4360
4361 /*
4362 * How many index blocks need to touch to modify nrblocks?
4363 * The "Chunk" flag indicating whether the nrblocks is
4364 * physically contiguous on disk
4365 *
4366 * For Direct IO and fallocate, they calls get_block to allocate
4367 * one single extent at a time, so they could set the "Chunk" flag
4368 */
4369 idxblocks = ext4_index_trans_blocks(inode, nrblocks, chunk);
4370
4371 ret = idxblocks;
4372
4373 /*
4374 * Now let's see how many group bitmaps and group descriptors need
4375 * to account
4376 */
4377 groups = idxblocks;
4378 if (chunk)
4379 groups += 1;
4380 else
4381 groups += nrblocks;
4382
4383 gdpblocks = groups;
4384 if (groups > ngroups)
4385 groups = ngroups;
4386 if (groups > EXT4_SB(inode->i_sb)->s_gdb_count)
4387 gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count;
4388
4389 /* bitmaps and block group descriptor blocks */
4390 ret += groups + gdpblocks;
4391
4392 /* Blocks for super block, inode, quota and xattr blocks */
4393 ret += EXT4_META_TRANS_BLOCKS(inode->i_sb);
4394
4395 return ret;
4396}
4397
4398/*
4399 * Calculate the total number of credits to reserve to fit
4400 * the modification of a single pages into a single transaction,
4401 * which may include multiple chunks of block allocations.
4402 *
4403 * This could be called via ext4_write_begin()
4404 *
4405 * We need to consider the worse case, when
4406 * one new block per extent.
4407 */
4408int ext4_writepage_trans_blocks(struct inode *inode)
4409{
4410 int bpp = ext4_journal_blocks_per_page(inode);
4411 int ret;
4412
4413 ret = ext4_meta_trans_blocks(inode, bpp, 0);
4414
4415 /* Account for data blocks for journalled mode */
4416 if (ext4_should_journal_data(inode))
4417 ret += bpp;
4418 return ret;
4419}
4420
4421/*
4422 * Calculate the journal credits for a chunk of data modification.
4423 *
4424 * This is called from DIO, fallocate or whoever calling
4425 * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks.
4426 *
4427 * journal buffers for data blocks are not included here, as DIO
4428 * and fallocate do no need to journal data buffers.
4429 */
4430int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks)
4431{
4432 return ext4_meta_trans_blocks(inode, nrblocks, 1);
4433}
4434
4435/*
4436 * The caller must have previously called ext4_reserve_inode_write().
4437 * Give this, we know that the caller already has write access to iloc->bh.
4438 */
4439int ext4_mark_iloc_dirty(handle_t *handle,
4440 struct inode *inode, struct ext4_iloc *iloc)
4441{
4442 int err = 0;
4443
4444 if (IS_I_VERSION(inode))
4445 inode_inc_iversion(inode);
4446
4447 /* the do_update_inode consumes one bh->b_count */
4448 get_bh(iloc->bh);
4449
4450 /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
4451 err = ext4_do_update_inode(handle, inode, iloc);
4452 put_bh(iloc->bh);
4453 return err;
4454}
4455
4456/*
4457 * On success, We end up with an outstanding reference count against
4458 * iloc->bh. This _must_ be cleaned up later.
4459 */
4460
4461int
4462ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
4463 struct ext4_iloc *iloc)
4464{
4465 int err;
4466
4467 err = ext4_get_inode_loc(inode, iloc);
4468 if (!err) {
4469 BUFFER_TRACE(iloc->bh, "get_write_access");
4470 err = ext4_journal_get_write_access(handle, iloc->bh);
4471 if (err) {
4472 brelse(iloc->bh);
4473 iloc->bh = NULL;
4474 }
4475 }
4476 ext4_std_error(inode->i_sb, err);
4477 return err;
4478}
4479
4480/*
4481 * Expand an inode by new_extra_isize bytes.
4482 * Returns 0 on success or negative error number on failure.
4483 */
4484static int ext4_expand_extra_isize(struct inode *inode,
4485 unsigned int new_extra_isize,
4486 struct ext4_iloc iloc,
4487 handle_t *handle)
4488{
4489 struct ext4_inode *raw_inode;
4490 struct ext4_xattr_ibody_header *header;
4491
4492 if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
4493 return 0;
4494
4495 raw_inode = ext4_raw_inode(&iloc);
4496
4497 header = IHDR(inode, raw_inode);
4498
4499 /* No extended attributes present */
4500 if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
4501 header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
4502 memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
4503 new_extra_isize);
4504 EXT4_I(inode)->i_extra_isize = new_extra_isize;
4505 return 0;
4506 }
4507
4508 /* try to expand with EAs present */
4509 return ext4_expand_extra_isize_ea(inode, new_extra_isize,
4510 raw_inode, handle);
4511}
4512
4513/*
4514 * What we do here is to mark the in-core inode as clean with respect to inode
4515 * dirtiness (it may still be data-dirty).
4516 * This means that the in-core inode may be reaped by prune_icache
4517 * without having to perform any I/O. This is a very good thing,
4518 * because *any* task may call prune_icache - even ones which
4519 * have a transaction open against a different journal.
4520 *
4521 * Is this cheating? Not really. Sure, we haven't written the
4522 * inode out, but prune_icache isn't a user-visible syncing function.
4523 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
4524 * we start and wait on commits.
4525 *
4526 * Is this efficient/effective? Well, we're being nice to the system
4527 * by cleaning up our inodes proactively so they can be reaped
4528 * without I/O. But we are potentially leaving up to five seconds'
4529 * worth of inodes floating about which prune_icache wants us to
4530 * write out. One way to fix that would be to get prune_icache()
4531 * to do a write_super() to free up some memory. It has the desired
4532 * effect.
4533 */
4534int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
4535{
4536 struct ext4_iloc iloc;
4537 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4538 static unsigned int mnt_count;
4539 int err, ret;
4540
4541 might_sleep();
4542 trace_ext4_mark_inode_dirty(inode, _RET_IP_);
4543 err = ext4_reserve_inode_write(handle, inode, &iloc);
4544 if (ext4_handle_valid(handle) &&
4545 EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
4546 !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
4547 /*
4548 * We need extra buffer credits since we may write into EA block
4549 * with this same handle. If journal_extend fails, then it will
4550 * only result in a minor loss of functionality for that inode.
4551 * If this is felt to be critical, then e2fsck should be run to
4552 * force a large enough s_min_extra_isize.
4553 */
4554 if ((jbd2_journal_extend(handle,
4555 EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) {
4556 ret = ext4_expand_extra_isize(inode,
4557 sbi->s_want_extra_isize,
4558 iloc, handle);
4559 if (ret) {
4560 ext4_set_inode_state(inode,
4561 EXT4_STATE_NO_EXPAND);
4562 if (mnt_count !=
4563 le16_to_cpu(sbi->s_es->s_mnt_count)) {
4564 ext4_warning(inode->i_sb,
4565 "Unable to expand inode %lu. Delete"
4566 " some EAs or run e2fsck.",
4567 inode->i_ino);
4568 mnt_count =
4569 le16_to_cpu(sbi->s_es->s_mnt_count);
4570 }
4571 }
4572 }
4573 }
4574 if (!err)
4575 err = ext4_mark_iloc_dirty(handle, inode, &iloc);
4576 return err;
4577}
4578
4579/*
4580 * ext4_dirty_inode() is called from __mark_inode_dirty()
4581 *
4582 * We're really interested in the case where a file is being extended.
4583 * i_size has been changed by generic_commit_write() and we thus need
4584 * to include the updated inode in the current transaction.
4585 *
4586 * Also, dquot_alloc_block() will always dirty the inode when blocks
4587 * are allocated to the file.
4588 *
4589 * If the inode is marked synchronous, we don't honour that here - doing
4590 * so would cause a commit on atime updates, which we don't bother doing.
4591 * We handle synchronous inodes at the highest possible level.
4592 */
4593void ext4_dirty_inode(struct inode *inode, int flags)
4594{
4595 handle_t *handle;
4596
4597 handle = ext4_journal_start(inode, 2);
4598 if (IS_ERR(handle))
4599 goto out;
4600
4601 ext4_mark_inode_dirty(handle, inode);
4602
4603 ext4_journal_stop(handle);
4604out:
4605 return;
4606}
4607
4608#if 0
4609/*
4610 * Bind an inode's backing buffer_head into this transaction, to prevent
4611 * it from being flushed to disk early. Unlike
4612 * ext4_reserve_inode_write, this leaves behind no bh reference and
4613 * returns no iloc structure, so the caller needs to repeat the iloc
4614 * lookup to mark the inode dirty later.
4615 */
4616static int ext4_pin_inode(handle_t *handle, struct inode *inode)
4617{
4618 struct ext4_iloc iloc;
4619
4620 int err = 0;
4621 if (handle) {
4622 err = ext4_get_inode_loc(inode, &iloc);
4623 if (!err) {
4624 BUFFER_TRACE(iloc.bh, "get_write_access");
4625 err = jbd2_journal_get_write_access(handle, iloc.bh);
4626 if (!err)
4627 err = ext4_handle_dirty_metadata(handle,
4628 NULL,
4629 iloc.bh);
4630 brelse(iloc.bh);
4631 }
4632 }
4633 ext4_std_error(inode->i_sb, err);
4634 return err;
4635}
4636#endif
4637
4638int ext4_change_inode_journal_flag(struct inode *inode, int val)
4639{
4640 journal_t *journal;
4641 handle_t *handle;
4642 int err;
4643
4644 /*
4645 * We have to be very careful here: changing a data block's
4646 * journaling status dynamically is dangerous. If we write a
4647 * data block to the journal, change the status and then delete
4648 * that block, we risk forgetting to revoke the old log record
4649 * from the journal and so a subsequent replay can corrupt data.
4650 * So, first we make sure that the journal is empty and that
4651 * nobody is changing anything.
4652 */
4653
4654 journal = EXT4_JOURNAL(inode);
4655 if (!journal)
4656 return 0;
4657 if (is_journal_aborted(journal))
4658 return -EROFS;
4659 /* We have to allocate physical blocks for delalloc blocks
4660 * before flushing journal. otherwise delalloc blocks can not
4661 * be allocated any more. even more truncate on delalloc blocks
4662 * could trigger BUG by flushing delalloc blocks in journal.
4663 * There is no delalloc block in non-journal data mode.
4664 */
4665 if (val && test_opt(inode->i_sb, DELALLOC)) {
4666 err = ext4_alloc_da_blocks(inode);
4667 if (err < 0)
4668 return err;
4669 }
4670
4671 jbd2_journal_lock_updates(journal);
4672
4673 /*
4674 * OK, there are no updates running now, and all cached data is
4675 * synced to disk. We are now in a completely consistent state
4676 * which doesn't have anything in the journal, and we know that
4677 * no filesystem updates are running, so it is safe to modify
4678 * the inode's in-core data-journaling state flag now.
4679 */
4680
4681 if (val)
4682 ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
4683 else {
4684 jbd2_journal_flush(journal);
4685 ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
4686 }
4687 ext4_set_aops(inode);
4688
4689 jbd2_journal_unlock_updates(journal);
4690
4691 /* Finally we can mark the inode as dirty. */
4692
4693 handle = ext4_journal_start(inode, 1);
4694 if (IS_ERR(handle))
4695 return PTR_ERR(handle);
4696
4697 err = ext4_mark_inode_dirty(handle, inode);
4698 ext4_handle_sync(handle);
4699 ext4_journal_stop(handle);
4700 ext4_std_error(inode->i_sb, err);
4701
4702 return err;
4703}
4704
4705static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh)
4706{
4707 return !buffer_mapped(bh);
4708}
4709
4710int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
4711{
4712 struct page *page = vmf->page;
4713 loff_t size;
4714 unsigned long len;
4715 int ret;
4716 struct file *file = vma->vm_file;
4717 struct inode *inode = file->f_path.dentry->d_inode;
4718 struct address_space *mapping = inode->i_mapping;
4719 handle_t *handle;
4720 get_block_t *get_block;
4721 int retries = 0;
4722
4723 /*
4724 * This check is racy but catches the common case. We rely on
4725 * __block_page_mkwrite() to do a reliable check.
4726 */
4727 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
4728 /* Delalloc case is easy... */
4729 if (test_opt(inode->i_sb, DELALLOC) &&
4730 !ext4_should_journal_data(inode) &&
4731 !ext4_nonda_switch(inode->i_sb)) {
4732 do {
4733 ret = __block_page_mkwrite(vma, vmf,
4734 ext4_da_get_block_prep);
4735 } while (ret == -ENOSPC &&
4736 ext4_should_retry_alloc(inode->i_sb, &retries));
4737 goto out_ret;
4738 }
4739
4740 lock_page(page);
4741 size = i_size_read(inode);
4742 /* Page got truncated from under us? */
4743 if (page->mapping != mapping || page_offset(page) > size) {
4744 unlock_page(page);
4745 ret = VM_FAULT_NOPAGE;
4746 goto out;
4747 }
4748
4749 if (page->index == size >> PAGE_CACHE_SHIFT)
4750 len = size & ~PAGE_CACHE_MASK;
4751 else
4752 len = PAGE_CACHE_SIZE;
4753 /*
4754 * Return if we have all the buffers mapped. This avoids the need to do
4755 * journal_start/journal_stop which can block and take a long time
4756 */
4757 if (page_has_buffers(page)) {
4758 if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
4759 ext4_bh_unmapped)) {
4760 /* Wait so that we don't change page under IO */
4761 wait_on_page_writeback(page);
4762 ret = VM_FAULT_LOCKED;
4763 goto out;
4764 }
4765 }
4766 unlock_page(page);
4767 /* OK, we need to fill the hole... */
4768 if (ext4_should_dioread_nolock(inode))
4769 get_block = ext4_get_block_write;
4770 else
4771 get_block = ext4_get_block;
4772retry_alloc:
4773 handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
4774 if (IS_ERR(handle)) {
4775 ret = VM_FAULT_SIGBUS;
4776 goto out;
4777 }
4778 ret = __block_page_mkwrite(vma, vmf, get_block);
4779 if (!ret && ext4_should_journal_data(inode)) {
4780 if (walk_page_buffers(handle, page_buffers(page), 0,
4781 PAGE_CACHE_SIZE, NULL, do_journal_get_write_access)) {
4782 unlock_page(page);
4783 ret = VM_FAULT_SIGBUS;
4784 ext4_journal_stop(handle);
4785 goto out;
4786 }
4787 ext4_set_inode_state(inode, EXT4_STATE_JDATA);
4788 }
4789 ext4_journal_stop(handle);
4790 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
4791 goto retry_alloc;
4792out_ret:
4793 ret = block_page_mkwrite_return(ret);
4794out:
4795 return ret;
4796}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/fs/ext4/inode.c
4 *
5 * Copyright (C) 1992, 1993, 1994, 1995
6 * Remy Card (card@masi.ibp.fr)
7 * Laboratoire MASI - Institut Blaise Pascal
8 * Universite Pierre et Marie Curie (Paris VI)
9 *
10 * from
11 *
12 * linux/fs/minix/inode.c
13 *
14 * Copyright (C) 1991, 1992 Linus Torvalds
15 *
16 * 64-bit file support on 64-bit platforms by Jakub Jelinek
17 * (jj@sunsite.ms.mff.cuni.cz)
18 *
19 * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
20 */
21
22#include <linux/fs.h>
23#include <linux/mount.h>
24#include <linux/time.h>
25#include <linux/highuid.h>
26#include <linux/pagemap.h>
27#include <linux/dax.h>
28#include <linux/quotaops.h>
29#include <linux/string.h>
30#include <linux/buffer_head.h>
31#include <linux/writeback.h>
32#include <linux/pagevec.h>
33#include <linux/mpage.h>
34#include <linux/namei.h>
35#include <linux/uio.h>
36#include <linux/bio.h>
37#include <linux/workqueue.h>
38#include <linux/kernel.h>
39#include <linux/printk.h>
40#include <linux/slab.h>
41#include <linux/bitops.h>
42#include <linux/iomap.h>
43#include <linux/iversion.h>
44
45#include "ext4_jbd2.h"
46#include "xattr.h"
47#include "acl.h"
48#include "truncate.h"
49
50#include <trace/events/ext4.h>
51
52static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
53 struct ext4_inode_info *ei)
54{
55 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
56 __u32 csum;
57 __u16 dummy_csum = 0;
58 int offset = offsetof(struct ext4_inode, i_checksum_lo);
59 unsigned int csum_size = sizeof(dummy_csum);
60
61 csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw, offset);
62 csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, csum_size);
63 offset += csum_size;
64 csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
65 EXT4_GOOD_OLD_INODE_SIZE - offset);
66
67 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
68 offset = offsetof(struct ext4_inode, i_checksum_hi);
69 csum = ext4_chksum(sbi, csum, (__u8 *)raw +
70 EXT4_GOOD_OLD_INODE_SIZE,
71 offset - EXT4_GOOD_OLD_INODE_SIZE);
72 if (EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) {
73 csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum,
74 csum_size);
75 offset += csum_size;
76 }
77 csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
78 EXT4_INODE_SIZE(inode->i_sb) - offset);
79 }
80
81 return csum;
82}
83
84static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw,
85 struct ext4_inode_info *ei)
86{
87 __u32 provided, calculated;
88
89 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
90 cpu_to_le32(EXT4_OS_LINUX) ||
91 !ext4_has_metadata_csum(inode->i_sb))
92 return 1;
93
94 provided = le16_to_cpu(raw->i_checksum_lo);
95 calculated = ext4_inode_csum(inode, raw, ei);
96 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
97 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
98 provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16;
99 else
100 calculated &= 0xFFFF;
101
102 return provided == calculated;
103}
104
105void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw,
106 struct ext4_inode_info *ei)
107{
108 __u32 csum;
109
110 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
111 cpu_to_le32(EXT4_OS_LINUX) ||
112 !ext4_has_metadata_csum(inode->i_sb))
113 return;
114
115 csum = ext4_inode_csum(inode, raw, ei);
116 raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF);
117 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
118 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
119 raw->i_checksum_hi = cpu_to_le16(csum >> 16);
120}
121
122static inline int ext4_begin_ordered_truncate(struct inode *inode,
123 loff_t new_size)
124{
125 trace_ext4_begin_ordered_truncate(inode, new_size);
126 /*
127 * If jinode is zero, then we never opened the file for
128 * writing, so there's no need to call
129 * jbd2_journal_begin_ordered_truncate() since there's no
130 * outstanding writes we need to flush.
131 */
132 if (!EXT4_I(inode)->jinode)
133 return 0;
134 return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode),
135 EXT4_I(inode)->jinode,
136 new_size);
137}
138
139static void ext4_invalidatepage(struct page *page, unsigned int offset,
140 unsigned int length);
141static int __ext4_journalled_writepage(struct page *page, unsigned int len);
142static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh);
143static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
144 int pextents);
145
146/*
147 * Test whether an inode is a fast symlink.
148 * A fast symlink has its symlink data stored in ext4_inode_info->i_data.
149 */
150int ext4_inode_is_fast_symlink(struct inode *inode)
151{
152 if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) {
153 int ea_blocks = EXT4_I(inode)->i_file_acl ?
154 EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0;
155
156 if (ext4_has_inline_data(inode))
157 return 0;
158
159 return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
160 }
161 return S_ISLNK(inode->i_mode) && inode->i_size &&
162 (inode->i_size < EXT4_N_BLOCKS * 4);
163}
164
165/*
166 * Called at the last iput() if i_nlink is zero.
167 */
168void ext4_evict_inode(struct inode *inode)
169{
170 handle_t *handle;
171 int err;
172 /*
173 * Credits for final inode cleanup and freeing:
174 * sb + inode (ext4_orphan_del()), block bitmap, group descriptor
175 * (xattr block freeing), bitmap, group descriptor (inode freeing)
176 */
177 int extra_credits = 6;
178 struct ext4_xattr_inode_array *ea_inode_array = NULL;
179 bool freeze_protected = false;
180
181 trace_ext4_evict_inode(inode);
182
183 if (inode->i_nlink) {
184 /*
185 * When journalling data dirty buffers are tracked only in the
186 * journal. So although mm thinks everything is clean and
187 * ready for reaping the inode might still have some pages to
188 * write in the running transaction or waiting to be
189 * checkpointed. Thus calling jbd2_journal_invalidatepage()
190 * (via truncate_inode_pages()) to discard these buffers can
191 * cause data loss. Also even if we did not discard these
192 * buffers, we would have no way to find them after the inode
193 * is reaped and thus user could see stale data if he tries to
194 * read them before the transaction is checkpointed. So be
195 * careful and force everything to disk here... We use
196 * ei->i_datasync_tid to store the newest transaction
197 * containing inode's data.
198 *
199 * Note that directories do not have this problem because they
200 * don't use page cache.
201 */
202 if (inode->i_ino != EXT4_JOURNAL_INO &&
203 ext4_should_journal_data(inode) &&
204 (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) &&
205 inode->i_data.nrpages) {
206 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
207 tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;
208
209 jbd2_complete_transaction(journal, commit_tid);
210 filemap_write_and_wait(&inode->i_data);
211 }
212 truncate_inode_pages_final(&inode->i_data);
213
214 goto no_delete;
215 }
216
217 if (is_bad_inode(inode))
218 goto no_delete;
219 dquot_initialize(inode);
220
221 if (ext4_should_order_data(inode))
222 ext4_begin_ordered_truncate(inode, 0);
223 truncate_inode_pages_final(&inode->i_data);
224
225 /*
226 * For inodes with journalled data, transaction commit could have
227 * dirtied the inode. Flush worker is ignoring it because of I_FREEING
228 * flag but we still need to remove the inode from the writeback lists.
229 */
230 if (!list_empty_careful(&inode->i_io_list)) {
231 WARN_ON_ONCE(!ext4_should_journal_data(inode));
232 inode_io_list_del(inode);
233 }
234
235 /*
236 * Protect us against freezing - iput() caller didn't have to have any
237 * protection against it. When we are in a running transaction though,
238 * we are already protected against freezing and we cannot grab further
239 * protection due to lock ordering constraints.
240 */
241 if (!ext4_journal_current_handle()) {
242 sb_start_intwrite(inode->i_sb);
243 freeze_protected = true;
244 }
245
246 if (!IS_NOQUOTA(inode))
247 extra_credits += EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb);
248
249 /*
250 * Block bitmap, group descriptor, and inode are accounted in both
251 * ext4_blocks_for_truncate() and extra_credits. So subtract 3.
252 */
253 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE,
254 ext4_blocks_for_truncate(inode) + extra_credits - 3);
255 if (IS_ERR(handle)) {
256 ext4_std_error(inode->i_sb, PTR_ERR(handle));
257 /*
258 * If we're going to skip the normal cleanup, we still need to
259 * make sure that the in-core orphan linked list is properly
260 * cleaned up.
261 */
262 ext4_orphan_del(NULL, inode);
263 if (freeze_protected)
264 sb_end_intwrite(inode->i_sb);
265 goto no_delete;
266 }
267
268 if (IS_SYNC(inode))
269 ext4_handle_sync(handle);
270
271 /*
272 * Set inode->i_size to 0 before calling ext4_truncate(). We need
273 * special handling of symlinks here because i_size is used to
274 * determine whether ext4_inode_info->i_data contains symlink data or
275 * block mappings. Setting i_size to 0 will remove its fast symlink
276 * status. Erase i_data so that it becomes a valid empty block map.
277 */
278 if (ext4_inode_is_fast_symlink(inode))
279 memset(EXT4_I(inode)->i_data, 0, sizeof(EXT4_I(inode)->i_data));
280 inode->i_size = 0;
281 err = ext4_mark_inode_dirty(handle, inode);
282 if (err) {
283 ext4_warning(inode->i_sb,
284 "couldn't mark inode dirty (err %d)", err);
285 goto stop_handle;
286 }
287 if (inode->i_blocks) {
288 err = ext4_truncate(inode);
289 if (err) {
290 ext4_error_err(inode->i_sb, -err,
291 "couldn't truncate inode %lu (err %d)",
292 inode->i_ino, err);
293 goto stop_handle;
294 }
295 }
296
297 /* Remove xattr references. */
298 err = ext4_xattr_delete_inode(handle, inode, &ea_inode_array,
299 extra_credits);
300 if (err) {
301 ext4_warning(inode->i_sb, "xattr delete (err %d)", err);
302stop_handle:
303 ext4_journal_stop(handle);
304 ext4_orphan_del(NULL, inode);
305 if (freeze_protected)
306 sb_end_intwrite(inode->i_sb);
307 ext4_xattr_inode_array_free(ea_inode_array);
308 goto no_delete;
309 }
310
311 /*
312 * Kill off the orphan record which ext4_truncate created.
313 * AKPM: I think this can be inside the above `if'.
314 * Note that ext4_orphan_del() has to be able to cope with the
315 * deletion of a non-existent orphan - this is because we don't
316 * know if ext4_truncate() actually created an orphan record.
317 * (Well, we could do this if we need to, but heck - it works)
318 */
319 ext4_orphan_del(handle, inode);
320 EXT4_I(inode)->i_dtime = (__u32)ktime_get_real_seconds();
321
322 /*
323 * One subtle ordering requirement: if anything has gone wrong
324 * (transaction abort, IO errors, whatever), then we can still
325 * do these next steps (the fs will already have been marked as
326 * having errors), but we can't free the inode if the mark_dirty
327 * fails.
328 */
329 if (ext4_mark_inode_dirty(handle, inode))
330 /* If that failed, just do the required in-core inode clear. */
331 ext4_clear_inode(inode);
332 else
333 ext4_free_inode(handle, inode);
334 ext4_journal_stop(handle);
335 if (freeze_protected)
336 sb_end_intwrite(inode->i_sb);
337 ext4_xattr_inode_array_free(ea_inode_array);
338 return;
339no_delete:
340 if (!list_empty(&EXT4_I(inode)->i_fc_list))
341 ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_NOMEM);
342 ext4_clear_inode(inode); /* We must guarantee clearing of inode... */
343}
344
345#ifdef CONFIG_QUOTA
346qsize_t *ext4_get_reserved_space(struct inode *inode)
347{
348 return &EXT4_I(inode)->i_reserved_quota;
349}
350#endif
351
352/*
353 * Called with i_data_sem down, which is important since we can call
354 * ext4_discard_preallocations() from here.
355 */
356void ext4_da_update_reserve_space(struct inode *inode,
357 int used, int quota_claim)
358{
359 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
360 struct ext4_inode_info *ei = EXT4_I(inode);
361
362 spin_lock(&ei->i_block_reservation_lock);
363 trace_ext4_da_update_reserve_space(inode, used, quota_claim);
364 if (unlikely(used > ei->i_reserved_data_blocks)) {
365 ext4_warning(inode->i_sb, "%s: ino %lu, used %d "
366 "with only %d reserved data blocks",
367 __func__, inode->i_ino, used,
368 ei->i_reserved_data_blocks);
369 WARN_ON(1);
370 used = ei->i_reserved_data_blocks;
371 }
372
373 /* Update per-inode reservations */
374 ei->i_reserved_data_blocks -= used;
375 percpu_counter_sub(&sbi->s_dirtyclusters_counter, used);
376
377 spin_unlock(&ei->i_block_reservation_lock);
378
379 /* Update quota subsystem for data blocks */
380 if (quota_claim)
381 dquot_claim_block(inode, EXT4_C2B(sbi, used));
382 else {
383 /*
384 * We did fallocate with an offset that is already delayed
385 * allocated. So on delayed allocated writeback we should
386 * not re-claim the quota for fallocated blocks.
387 */
388 dquot_release_reservation_block(inode, EXT4_C2B(sbi, used));
389 }
390
391 /*
392 * If we have done all the pending block allocations and if
393 * there aren't any writers on the inode, we can discard the
394 * inode's preallocations.
395 */
396 if ((ei->i_reserved_data_blocks == 0) &&
397 !inode_is_open_for_write(inode))
398 ext4_discard_preallocations(inode, 0);
399}
400
401static int __check_block_validity(struct inode *inode, const char *func,
402 unsigned int line,
403 struct ext4_map_blocks *map)
404{
405 if (ext4_has_feature_journal(inode->i_sb) &&
406 (inode->i_ino ==
407 le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum)))
408 return 0;
409 if (!ext4_inode_block_valid(inode, map->m_pblk, map->m_len)) {
410 ext4_error_inode(inode, func, line, map->m_pblk,
411 "lblock %lu mapped to illegal pblock %llu "
412 "(length %d)", (unsigned long) map->m_lblk,
413 map->m_pblk, map->m_len);
414 return -EFSCORRUPTED;
415 }
416 return 0;
417}
418
419int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk,
420 ext4_lblk_t len)
421{
422 int ret;
423
424 if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode))
425 return fscrypt_zeroout_range(inode, lblk, pblk, len);
426
427 ret = sb_issue_zeroout(inode->i_sb, pblk, len, GFP_NOFS);
428 if (ret > 0)
429 ret = 0;
430
431 return ret;
432}
433
434#define check_block_validity(inode, map) \
435 __check_block_validity((inode), __func__, __LINE__, (map))
436
437#ifdef ES_AGGRESSIVE_TEST
438static void ext4_map_blocks_es_recheck(handle_t *handle,
439 struct inode *inode,
440 struct ext4_map_blocks *es_map,
441 struct ext4_map_blocks *map,
442 int flags)
443{
444 int retval;
445
446 map->m_flags = 0;
447 /*
448 * There is a race window that the result is not the same.
449 * e.g. xfstests #223 when dioread_nolock enables. The reason
450 * is that we lookup a block mapping in extent status tree with
451 * out taking i_data_sem. So at the time the unwritten extent
452 * could be converted.
453 */
454 down_read(&EXT4_I(inode)->i_data_sem);
455 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
456 retval = ext4_ext_map_blocks(handle, inode, map, 0);
457 } else {
458 retval = ext4_ind_map_blocks(handle, inode, map, 0);
459 }
460 up_read((&EXT4_I(inode)->i_data_sem));
461
462 /*
463 * We don't check m_len because extent will be collpased in status
464 * tree. So the m_len might not equal.
465 */
466 if (es_map->m_lblk != map->m_lblk ||
467 es_map->m_flags != map->m_flags ||
468 es_map->m_pblk != map->m_pblk) {
469 printk("ES cache assertion failed for inode: %lu "
470 "es_cached ex [%d/%d/%llu/%x] != "
471 "found ex [%d/%d/%llu/%x] retval %d flags %x\n",
472 inode->i_ino, es_map->m_lblk, es_map->m_len,
473 es_map->m_pblk, es_map->m_flags, map->m_lblk,
474 map->m_len, map->m_pblk, map->m_flags,
475 retval, flags);
476 }
477}
478#endif /* ES_AGGRESSIVE_TEST */
479
480/*
481 * The ext4_map_blocks() function tries to look up the requested blocks,
482 * and returns if the blocks are already mapped.
483 *
484 * Otherwise it takes the write lock of the i_data_sem and allocate blocks
485 * and store the allocated blocks in the result buffer head and mark it
486 * mapped.
487 *
488 * If file type is extents based, it will call ext4_ext_map_blocks(),
489 * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
490 * based files
491 *
492 * On success, it returns the number of blocks being mapped or allocated. if
493 * create==0 and the blocks are pre-allocated and unwritten, the resulting @map
494 * is marked as unwritten. If the create == 1, it will mark @map as mapped.
495 *
496 * It returns 0 if plain look up failed (blocks have not been allocated), in
497 * that case, @map is returned as unmapped but we still do fill map->m_len to
498 * indicate the length of a hole starting at map->m_lblk.
499 *
500 * It returns the error in case of allocation failure.
501 */
502int ext4_map_blocks(handle_t *handle, struct inode *inode,
503 struct ext4_map_blocks *map, int flags)
504{
505 struct extent_status es;
506 int retval;
507 int ret = 0;
508#ifdef ES_AGGRESSIVE_TEST
509 struct ext4_map_blocks orig_map;
510
511 memcpy(&orig_map, map, sizeof(*map));
512#endif
513
514 map->m_flags = 0;
515 ext_debug(inode, "flag 0x%x, max_blocks %u, logical block %lu\n",
516 flags, map->m_len, (unsigned long) map->m_lblk);
517
518 /*
519 * ext4_map_blocks returns an int, and m_len is an unsigned int
520 */
521 if (unlikely(map->m_len > INT_MAX))
522 map->m_len = INT_MAX;
523
524 /* We can handle the block number less than EXT_MAX_BLOCKS */
525 if (unlikely(map->m_lblk >= EXT_MAX_BLOCKS))
526 return -EFSCORRUPTED;
527
528 /* Lookup extent status tree firstly */
529 if (!(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) &&
530 ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) {
531 if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) {
532 map->m_pblk = ext4_es_pblock(&es) +
533 map->m_lblk - es.es_lblk;
534 map->m_flags |= ext4_es_is_written(&es) ?
535 EXT4_MAP_MAPPED : EXT4_MAP_UNWRITTEN;
536 retval = es.es_len - (map->m_lblk - es.es_lblk);
537 if (retval > map->m_len)
538 retval = map->m_len;
539 map->m_len = retval;
540 } else if (ext4_es_is_delayed(&es) || ext4_es_is_hole(&es)) {
541 map->m_pblk = 0;
542 retval = es.es_len - (map->m_lblk - es.es_lblk);
543 if (retval > map->m_len)
544 retval = map->m_len;
545 map->m_len = retval;
546 retval = 0;
547 } else {
548 BUG();
549 }
550#ifdef ES_AGGRESSIVE_TEST
551 ext4_map_blocks_es_recheck(handle, inode, map,
552 &orig_map, flags);
553#endif
554 goto found;
555 }
556
557 /*
558 * Try to see if we can get the block without requesting a new
559 * file system block.
560 */
561 down_read(&EXT4_I(inode)->i_data_sem);
562 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
563 retval = ext4_ext_map_blocks(handle, inode, map, 0);
564 } else {
565 retval = ext4_ind_map_blocks(handle, inode, map, 0);
566 }
567 if (retval > 0) {
568 unsigned int status;
569
570 if (unlikely(retval != map->m_len)) {
571 ext4_warning(inode->i_sb,
572 "ES len assertion failed for inode "
573 "%lu: retval %d != map->m_len %d",
574 inode->i_ino, retval, map->m_len);
575 WARN_ON(1);
576 }
577
578 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
579 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
580 if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
581 !(status & EXTENT_STATUS_WRITTEN) &&
582 ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk,
583 map->m_lblk + map->m_len - 1))
584 status |= EXTENT_STATUS_DELAYED;
585 ret = ext4_es_insert_extent(inode, map->m_lblk,
586 map->m_len, map->m_pblk, status);
587 if (ret < 0)
588 retval = ret;
589 }
590 up_read((&EXT4_I(inode)->i_data_sem));
591
592found:
593 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
594 ret = check_block_validity(inode, map);
595 if (ret != 0)
596 return ret;
597 }
598
599 /* If it is only a block(s) look up */
600 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
601 return retval;
602
603 /*
604 * Returns if the blocks have already allocated
605 *
606 * Note that if blocks have been preallocated
607 * ext4_ext_get_block() returns the create = 0
608 * with buffer head unmapped.
609 */
610 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
611 /*
612 * If we need to convert extent to unwritten
613 * we continue and do the actual work in
614 * ext4_ext_map_blocks()
615 */
616 if (!(flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN))
617 return retval;
618
619 /*
620 * Here we clear m_flags because after allocating an new extent,
621 * it will be set again.
622 */
623 map->m_flags &= ~EXT4_MAP_FLAGS;
624
625 /*
626 * New blocks allocate and/or writing to unwritten extent
627 * will possibly result in updating i_data, so we take
628 * the write lock of i_data_sem, and call get_block()
629 * with create == 1 flag.
630 */
631 down_write(&EXT4_I(inode)->i_data_sem);
632
633 /*
634 * We need to check for EXT4 here because migrate
635 * could have changed the inode type in between
636 */
637 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
638 retval = ext4_ext_map_blocks(handle, inode, map, flags);
639 } else {
640 retval = ext4_ind_map_blocks(handle, inode, map, flags);
641
642 if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
643 /*
644 * We allocated new blocks which will result in
645 * i_data's format changing. Force the migrate
646 * to fail by clearing migrate flags
647 */
648 ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
649 }
650
651 /*
652 * Update reserved blocks/metadata blocks after successful
653 * block allocation which had been deferred till now. We don't
654 * support fallocate for non extent files. So we can update
655 * reserve space here.
656 */
657 if ((retval > 0) &&
658 (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
659 ext4_da_update_reserve_space(inode, retval, 1);
660 }
661
662 if (retval > 0) {
663 unsigned int status;
664
665 if (unlikely(retval != map->m_len)) {
666 ext4_warning(inode->i_sb,
667 "ES len assertion failed for inode "
668 "%lu: retval %d != map->m_len %d",
669 inode->i_ino, retval, map->m_len);
670 WARN_ON(1);
671 }
672
673 /*
674 * We have to zeroout blocks before inserting them into extent
675 * status tree. Otherwise someone could look them up there and
676 * use them before they are really zeroed. We also have to
677 * unmap metadata before zeroing as otherwise writeback can
678 * overwrite zeros with stale data from block device.
679 */
680 if (flags & EXT4_GET_BLOCKS_ZERO &&
681 map->m_flags & EXT4_MAP_MAPPED &&
682 map->m_flags & EXT4_MAP_NEW) {
683 ret = ext4_issue_zeroout(inode, map->m_lblk,
684 map->m_pblk, map->m_len);
685 if (ret) {
686 retval = ret;
687 goto out_sem;
688 }
689 }
690
691 /*
692 * If the extent has been zeroed out, we don't need to update
693 * extent status tree.
694 */
695 if ((flags & EXT4_GET_BLOCKS_PRE_IO) &&
696 ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) {
697 if (ext4_es_is_written(&es))
698 goto out_sem;
699 }
700 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
701 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
702 if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
703 !(status & EXTENT_STATUS_WRITTEN) &&
704 ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk,
705 map->m_lblk + map->m_len - 1))
706 status |= EXTENT_STATUS_DELAYED;
707 ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
708 map->m_pblk, status);
709 if (ret < 0) {
710 retval = ret;
711 goto out_sem;
712 }
713 }
714
715out_sem:
716 up_write((&EXT4_I(inode)->i_data_sem));
717 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
718 ret = check_block_validity(inode, map);
719 if (ret != 0)
720 return ret;
721
722 /*
723 * Inodes with freshly allocated blocks where contents will be
724 * visible after transaction commit must be on transaction's
725 * ordered data list.
726 */
727 if (map->m_flags & EXT4_MAP_NEW &&
728 !(map->m_flags & EXT4_MAP_UNWRITTEN) &&
729 !(flags & EXT4_GET_BLOCKS_ZERO) &&
730 !ext4_is_quota_file(inode) &&
731 ext4_should_order_data(inode)) {
732 loff_t start_byte =
733 (loff_t)map->m_lblk << inode->i_blkbits;
734 loff_t length = (loff_t)map->m_len << inode->i_blkbits;
735
736 if (flags & EXT4_GET_BLOCKS_IO_SUBMIT)
737 ret = ext4_jbd2_inode_add_wait(handle, inode,
738 start_byte, length);
739 else
740 ret = ext4_jbd2_inode_add_write(handle, inode,
741 start_byte, length);
742 if (ret)
743 return ret;
744 }
745 ext4_fc_track_range(handle, inode, map->m_lblk,
746 map->m_lblk + map->m_len - 1);
747 }
748
749 if (retval < 0)
750 ext_debug(inode, "failed with err %d\n", retval);
751 return retval;
752}
753
754/*
755 * Update EXT4_MAP_FLAGS in bh->b_state. For buffer heads attached to pages
756 * we have to be careful as someone else may be manipulating b_state as well.
757 */
758static void ext4_update_bh_state(struct buffer_head *bh, unsigned long flags)
759{
760 unsigned long old_state;
761 unsigned long new_state;
762
763 flags &= EXT4_MAP_FLAGS;
764
765 /* Dummy buffer_head? Set non-atomically. */
766 if (!bh->b_page) {
767 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | flags;
768 return;
769 }
770 /*
771 * Someone else may be modifying b_state. Be careful! This is ugly but
772 * once we get rid of using bh as a container for mapping information
773 * to pass to / from get_block functions, this can go away.
774 */
775 do {
776 old_state = READ_ONCE(bh->b_state);
777 new_state = (old_state & ~EXT4_MAP_FLAGS) | flags;
778 } while (unlikely(
779 cmpxchg(&bh->b_state, old_state, new_state) != old_state));
780}
781
782static int _ext4_get_block(struct inode *inode, sector_t iblock,
783 struct buffer_head *bh, int flags)
784{
785 struct ext4_map_blocks map;
786 int ret = 0;
787
788 if (ext4_has_inline_data(inode))
789 return -ERANGE;
790
791 map.m_lblk = iblock;
792 map.m_len = bh->b_size >> inode->i_blkbits;
793
794 ret = ext4_map_blocks(ext4_journal_current_handle(), inode, &map,
795 flags);
796 if (ret > 0) {
797 map_bh(bh, inode->i_sb, map.m_pblk);
798 ext4_update_bh_state(bh, map.m_flags);
799 bh->b_size = inode->i_sb->s_blocksize * map.m_len;
800 ret = 0;
801 } else if (ret == 0) {
802 /* hole case, need to fill in bh->b_size */
803 bh->b_size = inode->i_sb->s_blocksize * map.m_len;
804 }
805 return ret;
806}
807
808int ext4_get_block(struct inode *inode, sector_t iblock,
809 struct buffer_head *bh, int create)
810{
811 return _ext4_get_block(inode, iblock, bh,
812 create ? EXT4_GET_BLOCKS_CREATE : 0);
813}
814
815/*
816 * Get block function used when preparing for buffered write if we require
817 * creating an unwritten extent if blocks haven't been allocated. The extent
818 * will be converted to written after the IO is complete.
819 */
820int ext4_get_block_unwritten(struct inode *inode, sector_t iblock,
821 struct buffer_head *bh_result, int create)
822{
823 ext4_debug("ext4_get_block_unwritten: inode %lu, create flag %d\n",
824 inode->i_ino, create);
825 return _ext4_get_block(inode, iblock, bh_result,
826 EXT4_GET_BLOCKS_IO_CREATE_EXT);
827}
828
829/* Maximum number of blocks we map for direct IO at once. */
830#define DIO_MAX_BLOCKS 4096
831
832/*
833 * `handle' can be NULL if create is zero
834 */
835struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
836 ext4_lblk_t block, int map_flags)
837{
838 struct ext4_map_blocks map;
839 struct buffer_head *bh;
840 int create = map_flags & EXT4_GET_BLOCKS_CREATE;
841 int err;
842
843 ASSERT((EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
844 || handle != NULL || create == 0);
845
846 map.m_lblk = block;
847 map.m_len = 1;
848 err = ext4_map_blocks(handle, inode, &map, map_flags);
849
850 if (err == 0)
851 return create ? ERR_PTR(-ENOSPC) : NULL;
852 if (err < 0)
853 return ERR_PTR(err);
854
855 bh = sb_getblk(inode->i_sb, map.m_pblk);
856 if (unlikely(!bh))
857 return ERR_PTR(-ENOMEM);
858 if (map.m_flags & EXT4_MAP_NEW) {
859 ASSERT(create != 0);
860 ASSERT((EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
861 || (handle != NULL));
862
863 /*
864 * Now that we do not always journal data, we should
865 * keep in mind whether this should always journal the
866 * new buffer as metadata. For now, regular file
867 * writes use ext4_get_block instead, so it's not a
868 * problem.
869 */
870 lock_buffer(bh);
871 BUFFER_TRACE(bh, "call get_create_access");
872 err = ext4_journal_get_create_access(handle, bh);
873 if (unlikely(err)) {
874 unlock_buffer(bh);
875 goto errout;
876 }
877 if (!buffer_uptodate(bh)) {
878 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
879 set_buffer_uptodate(bh);
880 }
881 unlock_buffer(bh);
882 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
883 err = ext4_handle_dirty_metadata(handle, inode, bh);
884 if (unlikely(err))
885 goto errout;
886 } else
887 BUFFER_TRACE(bh, "not a new buffer");
888 return bh;
889errout:
890 brelse(bh);
891 return ERR_PTR(err);
892}
893
894struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
895 ext4_lblk_t block, int map_flags)
896{
897 struct buffer_head *bh;
898 int ret;
899
900 bh = ext4_getblk(handle, inode, block, map_flags);
901 if (IS_ERR(bh))
902 return bh;
903 if (!bh || ext4_buffer_uptodate(bh))
904 return bh;
905
906 ret = ext4_read_bh_lock(bh, REQ_META | REQ_PRIO, true);
907 if (ret) {
908 put_bh(bh);
909 return ERR_PTR(ret);
910 }
911 return bh;
912}
913
914/* Read a contiguous batch of blocks. */
915int ext4_bread_batch(struct inode *inode, ext4_lblk_t block, int bh_count,
916 bool wait, struct buffer_head **bhs)
917{
918 int i, err;
919
920 for (i = 0; i < bh_count; i++) {
921 bhs[i] = ext4_getblk(NULL, inode, block + i, 0 /* map_flags */);
922 if (IS_ERR(bhs[i])) {
923 err = PTR_ERR(bhs[i]);
924 bh_count = i;
925 goto out_brelse;
926 }
927 }
928
929 for (i = 0; i < bh_count; i++)
930 /* Note that NULL bhs[i] is valid because of holes. */
931 if (bhs[i] && !ext4_buffer_uptodate(bhs[i]))
932 ext4_read_bh_lock(bhs[i], REQ_META | REQ_PRIO, false);
933
934 if (!wait)
935 return 0;
936
937 for (i = 0; i < bh_count; i++)
938 if (bhs[i])
939 wait_on_buffer(bhs[i]);
940
941 for (i = 0; i < bh_count; i++) {
942 if (bhs[i] && !buffer_uptodate(bhs[i])) {
943 err = -EIO;
944 goto out_brelse;
945 }
946 }
947 return 0;
948
949out_brelse:
950 for (i = 0; i < bh_count; i++) {
951 brelse(bhs[i]);
952 bhs[i] = NULL;
953 }
954 return err;
955}
956
957int ext4_walk_page_buffers(handle_t *handle,
958 struct buffer_head *head,
959 unsigned from,
960 unsigned to,
961 int *partial,
962 int (*fn)(handle_t *handle,
963 struct buffer_head *bh))
964{
965 struct buffer_head *bh;
966 unsigned block_start, block_end;
967 unsigned blocksize = head->b_size;
968 int err, ret = 0;
969 struct buffer_head *next;
970
971 for (bh = head, block_start = 0;
972 ret == 0 && (bh != head || !block_start);
973 block_start = block_end, bh = next) {
974 next = bh->b_this_page;
975 block_end = block_start + blocksize;
976 if (block_end <= from || block_start >= to) {
977 if (partial && !buffer_uptodate(bh))
978 *partial = 1;
979 continue;
980 }
981 err = (*fn)(handle, bh);
982 if (!ret)
983 ret = err;
984 }
985 return ret;
986}
987
988/*
989 * To preserve ordering, it is essential that the hole instantiation and
990 * the data write be encapsulated in a single transaction. We cannot
991 * close off a transaction and start a new one between the ext4_get_block()
992 * and the commit_write(). So doing the jbd2_journal_start at the start of
993 * prepare_write() is the right place.
994 *
995 * Also, this function can nest inside ext4_writepage(). In that case, we
996 * *know* that ext4_writepage() has generated enough buffer credits to do the
997 * whole page. So we won't block on the journal in that case, which is good,
998 * because the caller may be PF_MEMALLOC.
999 *
1000 * By accident, ext4 can be reentered when a transaction is open via
1001 * quota file writes. If we were to commit the transaction while thus
1002 * reentered, there can be a deadlock - we would be holding a quota
1003 * lock, and the commit would never complete if another thread had a
1004 * transaction open and was blocking on the quota lock - a ranking
1005 * violation.
1006 *
1007 * So what we do is to rely on the fact that jbd2_journal_stop/journal_start
1008 * will _not_ run commit under these circumstances because handle->h_ref
1009 * is elevated. We'll still have enough credits for the tiny quotafile
1010 * write.
1011 */
1012int do_journal_get_write_access(handle_t *handle,
1013 struct buffer_head *bh)
1014{
1015 int dirty = buffer_dirty(bh);
1016 int ret;
1017
1018 if (!buffer_mapped(bh) || buffer_freed(bh))
1019 return 0;
1020 /*
1021 * __block_write_begin() could have dirtied some buffers. Clean
1022 * the dirty bit as jbd2_journal_get_write_access() could complain
1023 * otherwise about fs integrity issues. Setting of the dirty bit
1024 * by __block_write_begin() isn't a real problem here as we clear
1025 * the bit before releasing a page lock and thus writeback cannot
1026 * ever write the buffer.
1027 */
1028 if (dirty)
1029 clear_buffer_dirty(bh);
1030 BUFFER_TRACE(bh, "get write access");
1031 ret = ext4_journal_get_write_access(handle, bh);
1032 if (!ret && dirty)
1033 ret = ext4_handle_dirty_metadata(handle, NULL, bh);
1034 return ret;
1035}
1036
1037#ifdef CONFIG_FS_ENCRYPTION
1038static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
1039 get_block_t *get_block)
1040{
1041 unsigned from = pos & (PAGE_SIZE - 1);
1042 unsigned to = from + len;
1043 struct inode *inode = page->mapping->host;
1044 unsigned block_start, block_end;
1045 sector_t block;
1046 int err = 0;
1047 unsigned blocksize = inode->i_sb->s_blocksize;
1048 unsigned bbits;
1049 struct buffer_head *bh, *head, *wait[2];
1050 int nr_wait = 0;
1051 int i;
1052
1053 BUG_ON(!PageLocked(page));
1054 BUG_ON(from > PAGE_SIZE);
1055 BUG_ON(to > PAGE_SIZE);
1056 BUG_ON(from > to);
1057
1058 if (!page_has_buffers(page))
1059 create_empty_buffers(page, blocksize, 0);
1060 head = page_buffers(page);
1061 bbits = ilog2(blocksize);
1062 block = (sector_t)page->index << (PAGE_SHIFT - bbits);
1063
1064 for (bh = head, block_start = 0; bh != head || !block_start;
1065 block++, block_start = block_end, bh = bh->b_this_page) {
1066 block_end = block_start + blocksize;
1067 if (block_end <= from || block_start >= to) {
1068 if (PageUptodate(page)) {
1069 set_buffer_uptodate(bh);
1070 }
1071 continue;
1072 }
1073 if (buffer_new(bh))
1074 clear_buffer_new(bh);
1075 if (!buffer_mapped(bh)) {
1076 WARN_ON(bh->b_size != blocksize);
1077 err = get_block(inode, block, bh, 1);
1078 if (err)
1079 break;
1080 if (buffer_new(bh)) {
1081 if (PageUptodate(page)) {
1082 clear_buffer_new(bh);
1083 set_buffer_uptodate(bh);
1084 mark_buffer_dirty(bh);
1085 continue;
1086 }
1087 if (block_end > to || block_start < from)
1088 zero_user_segments(page, to, block_end,
1089 block_start, from);
1090 continue;
1091 }
1092 }
1093 if (PageUptodate(page)) {
1094 set_buffer_uptodate(bh);
1095 continue;
1096 }
1097 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1098 !buffer_unwritten(bh) &&
1099 (block_start < from || block_end > to)) {
1100 ext4_read_bh_lock(bh, 0, false);
1101 wait[nr_wait++] = bh;
1102 }
1103 }
1104 /*
1105 * If we issued read requests, let them complete.
1106 */
1107 for (i = 0; i < nr_wait; i++) {
1108 wait_on_buffer(wait[i]);
1109 if (!buffer_uptodate(wait[i]))
1110 err = -EIO;
1111 }
1112 if (unlikely(err)) {
1113 page_zero_new_buffers(page, from, to);
1114 } else if (fscrypt_inode_uses_fs_layer_crypto(inode)) {
1115 for (i = 0; i < nr_wait; i++) {
1116 int err2;
1117
1118 err2 = fscrypt_decrypt_pagecache_blocks(page, blocksize,
1119 bh_offset(wait[i]));
1120 if (err2) {
1121 clear_buffer_uptodate(wait[i]);
1122 err = err2;
1123 }
1124 }
1125 }
1126
1127 return err;
1128}
1129#endif
1130
1131static int ext4_write_begin(struct file *file, struct address_space *mapping,
1132 loff_t pos, unsigned len, unsigned flags,
1133 struct page **pagep, void **fsdata)
1134{
1135 struct inode *inode = mapping->host;
1136 int ret, needed_blocks;
1137 handle_t *handle;
1138 int retries = 0;
1139 struct page *page;
1140 pgoff_t index;
1141 unsigned from, to;
1142
1143 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
1144 return -EIO;
1145
1146 trace_ext4_write_begin(inode, pos, len, flags);
1147 /*
1148 * Reserve one block more for addition to orphan list in case
1149 * we allocate blocks but write fails for some reason
1150 */
1151 needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
1152 index = pos >> PAGE_SHIFT;
1153 from = pos & (PAGE_SIZE - 1);
1154 to = from + len;
1155
1156 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
1157 ret = ext4_try_to_write_inline_data(mapping, inode, pos, len,
1158 flags, pagep);
1159 if (ret < 0)
1160 return ret;
1161 if (ret == 1)
1162 return 0;
1163 }
1164
1165 /*
1166 * grab_cache_page_write_begin() can take a long time if the
1167 * system is thrashing due to memory pressure, or if the page
1168 * is being written back. So grab it first before we start
1169 * the transaction handle. This also allows us to allocate
1170 * the page (if needed) without using GFP_NOFS.
1171 */
1172retry_grab:
1173 page = grab_cache_page_write_begin(mapping, index, flags);
1174 if (!page)
1175 return -ENOMEM;
1176 unlock_page(page);
1177
1178retry_journal:
1179 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks);
1180 if (IS_ERR(handle)) {
1181 put_page(page);
1182 return PTR_ERR(handle);
1183 }
1184
1185 lock_page(page);
1186 if (page->mapping != mapping) {
1187 /* The page got truncated from under us */
1188 unlock_page(page);
1189 put_page(page);
1190 ext4_journal_stop(handle);
1191 goto retry_grab;
1192 }
1193 /* In case writeback began while the page was unlocked */
1194 wait_for_stable_page(page);
1195
1196#ifdef CONFIG_FS_ENCRYPTION
1197 if (ext4_should_dioread_nolock(inode))
1198 ret = ext4_block_write_begin(page, pos, len,
1199 ext4_get_block_unwritten);
1200 else
1201 ret = ext4_block_write_begin(page, pos, len,
1202 ext4_get_block);
1203#else
1204 if (ext4_should_dioread_nolock(inode))
1205 ret = __block_write_begin(page, pos, len,
1206 ext4_get_block_unwritten);
1207 else
1208 ret = __block_write_begin(page, pos, len, ext4_get_block);
1209#endif
1210 if (!ret && ext4_should_journal_data(inode)) {
1211 ret = ext4_walk_page_buffers(handle, page_buffers(page),
1212 from, to, NULL,
1213 do_journal_get_write_access);
1214 }
1215
1216 if (ret) {
1217 bool extended = (pos + len > inode->i_size) &&
1218 !ext4_verity_in_progress(inode);
1219
1220 unlock_page(page);
1221 /*
1222 * __block_write_begin may have instantiated a few blocks
1223 * outside i_size. Trim these off again. Don't need
1224 * i_size_read because we hold i_mutex.
1225 *
1226 * Add inode to orphan list in case we crash before
1227 * truncate finishes
1228 */
1229 if (extended && ext4_can_truncate(inode))
1230 ext4_orphan_add(handle, inode);
1231
1232 ext4_journal_stop(handle);
1233 if (extended) {
1234 ext4_truncate_failed_write(inode);
1235 /*
1236 * If truncate failed early the inode might
1237 * still be on the orphan list; we need to
1238 * make sure the inode is removed from the
1239 * orphan list in that case.
1240 */
1241 if (inode->i_nlink)
1242 ext4_orphan_del(NULL, inode);
1243 }
1244
1245 if (ret == -ENOSPC &&
1246 ext4_should_retry_alloc(inode->i_sb, &retries))
1247 goto retry_journal;
1248 put_page(page);
1249 return ret;
1250 }
1251 *pagep = page;
1252 return ret;
1253}
1254
1255/* For write_end() in data=journal mode */
1256static int write_end_fn(handle_t *handle, struct buffer_head *bh)
1257{
1258 int ret;
1259 if (!buffer_mapped(bh) || buffer_freed(bh))
1260 return 0;
1261 set_buffer_uptodate(bh);
1262 ret = ext4_handle_dirty_metadata(handle, NULL, bh);
1263 clear_buffer_meta(bh);
1264 clear_buffer_prio(bh);
1265 return ret;
1266}
1267
1268/*
1269 * We need to pick up the new inode size which generic_commit_write gave us
1270 * `file' can be NULL - eg, when called from page_symlink().
1271 *
1272 * ext4 never places buffers on inode->i_mapping->private_list. metadata
1273 * buffers are managed internally.
1274 */
1275static int ext4_write_end(struct file *file,
1276 struct address_space *mapping,
1277 loff_t pos, unsigned len, unsigned copied,
1278 struct page *page, void *fsdata)
1279{
1280 handle_t *handle = ext4_journal_current_handle();
1281 struct inode *inode = mapping->host;
1282 loff_t old_size = inode->i_size;
1283 int ret = 0, ret2;
1284 int i_size_changed = 0;
1285 int inline_data = ext4_has_inline_data(inode);
1286 bool verity = ext4_verity_in_progress(inode);
1287
1288 trace_ext4_write_end(inode, pos, len, copied);
1289 if (inline_data) {
1290 ret = ext4_write_inline_data_end(inode, pos, len,
1291 copied, page);
1292 if (ret < 0) {
1293 unlock_page(page);
1294 put_page(page);
1295 goto errout;
1296 }
1297 copied = ret;
1298 ret = 0;
1299 } else
1300 copied = block_write_end(file, mapping, pos,
1301 len, copied, page, fsdata);
1302 /*
1303 * it's important to update i_size while still holding page lock:
1304 * page writeout could otherwise come in and zero beyond i_size.
1305 *
1306 * If FS_IOC_ENABLE_VERITY is running on this inode, then Merkle tree
1307 * blocks are being written past EOF, so skip the i_size update.
1308 */
1309 if (!verity)
1310 i_size_changed = ext4_update_inode_size(inode, pos + copied);
1311 unlock_page(page);
1312 put_page(page);
1313
1314 if (old_size < pos && !verity)
1315 pagecache_isize_extended(inode, old_size, pos);
1316 /*
1317 * Don't mark the inode dirty under page lock. First, it unnecessarily
1318 * makes the holding time of page lock longer. Second, it forces lock
1319 * ordering of page lock and transaction start for journaling
1320 * filesystems.
1321 */
1322 if (i_size_changed || inline_data)
1323 ret = ext4_mark_inode_dirty(handle, inode);
1324
1325errout:
1326 if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode))
1327 /* if we have allocated more blocks and copied
1328 * less. We will have blocks allocated outside
1329 * inode->i_size. So truncate them
1330 */
1331 ext4_orphan_add(handle, inode);
1332
1333 ret2 = ext4_journal_stop(handle);
1334 if (!ret)
1335 ret = ret2;
1336
1337 if (pos + len > inode->i_size && !verity) {
1338 ext4_truncate_failed_write(inode);
1339 /*
1340 * If truncate failed early the inode might still be
1341 * on the orphan list; we need to make sure the inode
1342 * is removed from the orphan list in that case.
1343 */
1344 if (inode->i_nlink)
1345 ext4_orphan_del(NULL, inode);
1346 }
1347
1348 return ret ? ret : copied;
1349}
1350
1351/*
1352 * This is a private version of page_zero_new_buffers() which doesn't
1353 * set the buffer to be dirty, since in data=journalled mode we need
1354 * to call ext4_handle_dirty_metadata() instead.
1355 */
1356static void ext4_journalled_zero_new_buffers(handle_t *handle,
1357 struct page *page,
1358 unsigned from, unsigned to)
1359{
1360 unsigned int block_start = 0, block_end;
1361 struct buffer_head *head, *bh;
1362
1363 bh = head = page_buffers(page);
1364 do {
1365 block_end = block_start + bh->b_size;
1366 if (buffer_new(bh)) {
1367 if (block_end > from && block_start < to) {
1368 if (!PageUptodate(page)) {
1369 unsigned start, size;
1370
1371 start = max(from, block_start);
1372 size = min(to, block_end) - start;
1373
1374 zero_user(page, start, size);
1375 write_end_fn(handle, bh);
1376 }
1377 clear_buffer_new(bh);
1378 }
1379 }
1380 block_start = block_end;
1381 bh = bh->b_this_page;
1382 } while (bh != head);
1383}
1384
1385static int ext4_journalled_write_end(struct file *file,
1386 struct address_space *mapping,
1387 loff_t pos, unsigned len, unsigned copied,
1388 struct page *page, void *fsdata)
1389{
1390 handle_t *handle = ext4_journal_current_handle();
1391 struct inode *inode = mapping->host;
1392 loff_t old_size = inode->i_size;
1393 int ret = 0, ret2;
1394 int partial = 0;
1395 unsigned from, to;
1396 int size_changed = 0;
1397 int inline_data = ext4_has_inline_data(inode);
1398 bool verity = ext4_verity_in_progress(inode);
1399
1400 trace_ext4_journalled_write_end(inode, pos, len, copied);
1401 from = pos & (PAGE_SIZE - 1);
1402 to = from + len;
1403
1404 BUG_ON(!ext4_handle_valid(handle));
1405
1406 if (inline_data) {
1407 ret = ext4_write_inline_data_end(inode, pos, len,
1408 copied, page);
1409 if (ret < 0) {
1410 unlock_page(page);
1411 put_page(page);
1412 goto errout;
1413 }
1414 copied = ret;
1415 ret = 0;
1416 } else if (unlikely(copied < len) && !PageUptodate(page)) {
1417 copied = 0;
1418 ext4_journalled_zero_new_buffers(handle, page, from, to);
1419 } else {
1420 if (unlikely(copied < len))
1421 ext4_journalled_zero_new_buffers(handle, page,
1422 from + copied, to);
1423 ret = ext4_walk_page_buffers(handle, page_buffers(page), from,
1424 from + copied, &partial,
1425 write_end_fn);
1426 if (!partial)
1427 SetPageUptodate(page);
1428 }
1429 if (!verity)
1430 size_changed = ext4_update_inode_size(inode, pos + copied);
1431 ext4_set_inode_state(inode, EXT4_STATE_JDATA);
1432 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
1433 unlock_page(page);
1434 put_page(page);
1435
1436 if (old_size < pos && !verity)
1437 pagecache_isize_extended(inode, old_size, pos);
1438
1439 if (size_changed || inline_data) {
1440 ret2 = ext4_mark_inode_dirty(handle, inode);
1441 if (!ret)
1442 ret = ret2;
1443 }
1444
1445errout:
1446 if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode))
1447 /* if we have allocated more blocks and copied
1448 * less. We will have blocks allocated outside
1449 * inode->i_size. So truncate them
1450 */
1451 ext4_orphan_add(handle, inode);
1452
1453 ret2 = ext4_journal_stop(handle);
1454 if (!ret)
1455 ret = ret2;
1456 if (pos + len > inode->i_size && !verity) {
1457 ext4_truncate_failed_write(inode);
1458 /*
1459 * If truncate failed early the inode might still be
1460 * on the orphan list; we need to make sure the inode
1461 * is removed from the orphan list in that case.
1462 */
1463 if (inode->i_nlink)
1464 ext4_orphan_del(NULL, inode);
1465 }
1466
1467 return ret ? ret : copied;
1468}
1469
1470/*
1471 * Reserve space for a single cluster
1472 */
1473static int ext4_da_reserve_space(struct inode *inode)
1474{
1475 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1476 struct ext4_inode_info *ei = EXT4_I(inode);
1477 int ret;
1478
1479 /*
1480 * We will charge metadata quota at writeout time; this saves
1481 * us from metadata over-estimation, though we may go over by
1482 * a small amount in the end. Here we just reserve for data.
1483 */
1484 ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
1485 if (ret)
1486 return ret;
1487
1488 spin_lock(&ei->i_block_reservation_lock);
1489 if (ext4_claim_free_clusters(sbi, 1, 0)) {
1490 spin_unlock(&ei->i_block_reservation_lock);
1491 dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
1492 return -ENOSPC;
1493 }
1494 ei->i_reserved_data_blocks++;
1495 trace_ext4_da_reserve_space(inode);
1496 spin_unlock(&ei->i_block_reservation_lock);
1497
1498 return 0; /* success */
1499}
1500
1501void ext4_da_release_space(struct inode *inode, int to_free)
1502{
1503 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1504 struct ext4_inode_info *ei = EXT4_I(inode);
1505
1506 if (!to_free)
1507 return; /* Nothing to release, exit */
1508
1509 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1510
1511 trace_ext4_da_release_space(inode, to_free);
1512 if (unlikely(to_free > ei->i_reserved_data_blocks)) {
1513 /*
1514 * if there aren't enough reserved blocks, then the
1515 * counter is messed up somewhere. Since this
1516 * function is called from invalidate page, it's
1517 * harmless to return without any action.
1518 */
1519 ext4_warning(inode->i_sb, "ext4_da_release_space: "
1520 "ino %lu, to_free %d with only %d reserved "
1521 "data blocks", inode->i_ino, to_free,
1522 ei->i_reserved_data_blocks);
1523 WARN_ON(1);
1524 to_free = ei->i_reserved_data_blocks;
1525 }
1526 ei->i_reserved_data_blocks -= to_free;
1527
1528 /* update fs dirty data blocks counter */
1529 percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free);
1530
1531 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1532
1533 dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free));
1534}
1535
1536/*
1537 * Delayed allocation stuff
1538 */
1539
1540struct mpage_da_data {
1541 struct inode *inode;
1542 struct writeback_control *wbc;
1543
1544 pgoff_t first_page; /* The first page to write */
1545 pgoff_t next_page; /* Current page to examine */
1546 pgoff_t last_page; /* Last page to examine */
1547 /*
1548 * Extent to map - this can be after first_page because that can be
1549 * fully mapped. We somewhat abuse m_flags to store whether the extent
1550 * is delalloc or unwritten.
1551 */
1552 struct ext4_map_blocks map;
1553 struct ext4_io_submit io_submit; /* IO submission data */
1554 unsigned int do_map:1;
1555 unsigned int scanned_until_end:1;
1556};
1557
1558static void mpage_release_unused_pages(struct mpage_da_data *mpd,
1559 bool invalidate)
1560{
1561 int nr_pages, i;
1562 pgoff_t index, end;
1563 struct pagevec pvec;
1564 struct inode *inode = mpd->inode;
1565 struct address_space *mapping = inode->i_mapping;
1566
1567 /* This is necessary when next_page == 0. */
1568 if (mpd->first_page >= mpd->next_page)
1569 return;
1570
1571 mpd->scanned_until_end = 0;
1572 index = mpd->first_page;
1573 end = mpd->next_page - 1;
1574 if (invalidate) {
1575 ext4_lblk_t start, last;
1576 start = index << (PAGE_SHIFT - inode->i_blkbits);
1577 last = end << (PAGE_SHIFT - inode->i_blkbits);
1578 ext4_es_remove_extent(inode, start, last - start + 1);
1579 }
1580
1581 pagevec_init(&pvec);
1582 while (index <= end) {
1583 nr_pages = pagevec_lookup_range(&pvec, mapping, &index, end);
1584 if (nr_pages == 0)
1585 break;
1586 for (i = 0; i < nr_pages; i++) {
1587 struct page *page = pvec.pages[i];
1588
1589 BUG_ON(!PageLocked(page));
1590 BUG_ON(PageWriteback(page));
1591 if (invalidate) {
1592 if (page_mapped(page))
1593 clear_page_dirty_for_io(page);
1594 block_invalidatepage(page, 0, PAGE_SIZE);
1595 ClearPageUptodate(page);
1596 }
1597 unlock_page(page);
1598 }
1599 pagevec_release(&pvec);
1600 }
1601}
1602
1603static void ext4_print_free_blocks(struct inode *inode)
1604{
1605 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1606 struct super_block *sb = inode->i_sb;
1607 struct ext4_inode_info *ei = EXT4_I(inode);
1608
1609 ext4_msg(sb, KERN_CRIT, "Total free blocks count %lld",
1610 EXT4_C2B(EXT4_SB(inode->i_sb),
1611 ext4_count_free_clusters(sb)));
1612 ext4_msg(sb, KERN_CRIT, "Free/Dirty block details");
1613 ext4_msg(sb, KERN_CRIT, "free_blocks=%lld",
1614 (long long) EXT4_C2B(EXT4_SB(sb),
1615 percpu_counter_sum(&sbi->s_freeclusters_counter)));
1616 ext4_msg(sb, KERN_CRIT, "dirty_blocks=%lld",
1617 (long long) EXT4_C2B(EXT4_SB(sb),
1618 percpu_counter_sum(&sbi->s_dirtyclusters_counter)));
1619 ext4_msg(sb, KERN_CRIT, "Block reservation details");
1620 ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u",
1621 ei->i_reserved_data_blocks);
1622 return;
1623}
1624
1625static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh)
1626{
1627 return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh);
1628}
1629
1630/*
1631 * ext4_insert_delayed_block - adds a delayed block to the extents status
1632 * tree, incrementing the reserved cluster/block
1633 * count or making a pending reservation
1634 * where needed
1635 *
1636 * @inode - file containing the newly added block
1637 * @lblk - logical block to be added
1638 *
1639 * Returns 0 on success, negative error code on failure.
1640 */
1641static int ext4_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk)
1642{
1643 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1644 int ret;
1645 bool allocated = false;
1646 bool reserved = false;
1647
1648 /*
1649 * If the cluster containing lblk is shared with a delayed,
1650 * written, or unwritten extent in a bigalloc file system, it's
1651 * already been accounted for and does not need to be reserved.
1652 * A pending reservation must be made for the cluster if it's
1653 * shared with a written or unwritten extent and doesn't already
1654 * have one. Written and unwritten extents can be purged from the
1655 * extents status tree if the system is under memory pressure, so
1656 * it's necessary to examine the extent tree if a search of the
1657 * extents status tree doesn't get a match.
1658 */
1659 if (sbi->s_cluster_ratio == 1) {
1660 ret = ext4_da_reserve_space(inode);
1661 if (ret != 0) /* ENOSPC */
1662 goto errout;
1663 reserved = true;
1664 } else { /* bigalloc */
1665 if (!ext4_es_scan_clu(inode, &ext4_es_is_delonly, lblk)) {
1666 if (!ext4_es_scan_clu(inode,
1667 &ext4_es_is_mapped, lblk)) {
1668 ret = ext4_clu_mapped(inode,
1669 EXT4_B2C(sbi, lblk));
1670 if (ret < 0)
1671 goto errout;
1672 if (ret == 0) {
1673 ret = ext4_da_reserve_space(inode);
1674 if (ret != 0) /* ENOSPC */
1675 goto errout;
1676 reserved = true;
1677 } else {
1678 allocated = true;
1679 }
1680 } else {
1681 allocated = true;
1682 }
1683 }
1684 }
1685
1686 ret = ext4_es_insert_delayed_block(inode, lblk, allocated);
1687 if (ret && reserved)
1688 ext4_da_release_space(inode, 1);
1689
1690errout:
1691 return ret;
1692}
1693
1694/*
1695 * This function is grabs code from the very beginning of
1696 * ext4_map_blocks, but assumes that the caller is from delayed write
1697 * time. This function looks up the requested blocks and sets the
1698 * buffer delay bit under the protection of i_data_sem.
1699 */
1700static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
1701 struct ext4_map_blocks *map,
1702 struct buffer_head *bh)
1703{
1704 struct extent_status es;
1705 int retval;
1706 sector_t invalid_block = ~((sector_t) 0xffff);
1707#ifdef ES_AGGRESSIVE_TEST
1708 struct ext4_map_blocks orig_map;
1709
1710 memcpy(&orig_map, map, sizeof(*map));
1711#endif
1712
1713 if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
1714 invalid_block = ~0;
1715
1716 map->m_flags = 0;
1717 ext_debug(inode, "max_blocks %u, logical block %lu\n", map->m_len,
1718 (unsigned long) map->m_lblk);
1719
1720 /* Lookup extent status tree firstly */
1721 if (ext4_es_lookup_extent(inode, iblock, NULL, &es)) {
1722 if (ext4_es_is_hole(&es)) {
1723 retval = 0;
1724 down_read(&EXT4_I(inode)->i_data_sem);
1725 goto add_delayed;
1726 }
1727
1728 /*
1729 * Delayed extent could be allocated by fallocate.
1730 * So we need to check it.
1731 */
1732 if (ext4_es_is_delayed(&es) && !ext4_es_is_unwritten(&es)) {
1733 map_bh(bh, inode->i_sb, invalid_block);
1734 set_buffer_new(bh);
1735 set_buffer_delay(bh);
1736 return 0;
1737 }
1738
1739 map->m_pblk = ext4_es_pblock(&es) + iblock - es.es_lblk;
1740 retval = es.es_len - (iblock - es.es_lblk);
1741 if (retval > map->m_len)
1742 retval = map->m_len;
1743 map->m_len = retval;
1744 if (ext4_es_is_written(&es))
1745 map->m_flags |= EXT4_MAP_MAPPED;
1746 else if (ext4_es_is_unwritten(&es))
1747 map->m_flags |= EXT4_MAP_UNWRITTEN;
1748 else
1749 BUG();
1750
1751#ifdef ES_AGGRESSIVE_TEST
1752 ext4_map_blocks_es_recheck(NULL, inode, map, &orig_map, 0);
1753#endif
1754 return retval;
1755 }
1756
1757 /*
1758 * Try to see if we can get the block without requesting a new
1759 * file system block.
1760 */
1761 down_read(&EXT4_I(inode)->i_data_sem);
1762 if (ext4_has_inline_data(inode))
1763 retval = 0;
1764 else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
1765 retval = ext4_ext_map_blocks(NULL, inode, map, 0);
1766 else
1767 retval = ext4_ind_map_blocks(NULL, inode, map, 0);
1768
1769add_delayed:
1770 if (retval == 0) {
1771 int ret;
1772
1773 /*
1774 * XXX: __block_prepare_write() unmaps passed block,
1775 * is it OK?
1776 */
1777
1778 ret = ext4_insert_delayed_block(inode, map->m_lblk);
1779 if (ret != 0) {
1780 retval = ret;
1781 goto out_unlock;
1782 }
1783
1784 map_bh(bh, inode->i_sb, invalid_block);
1785 set_buffer_new(bh);
1786 set_buffer_delay(bh);
1787 } else if (retval > 0) {
1788 int ret;
1789 unsigned int status;
1790
1791 if (unlikely(retval != map->m_len)) {
1792 ext4_warning(inode->i_sb,
1793 "ES len assertion failed for inode "
1794 "%lu: retval %d != map->m_len %d",
1795 inode->i_ino, retval, map->m_len);
1796 WARN_ON(1);
1797 }
1798
1799 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
1800 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
1801 ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
1802 map->m_pblk, status);
1803 if (ret != 0)
1804 retval = ret;
1805 }
1806
1807out_unlock:
1808 up_read((&EXT4_I(inode)->i_data_sem));
1809
1810 return retval;
1811}
1812
1813/*
1814 * This is a special get_block_t callback which is used by
1815 * ext4_da_write_begin(). It will either return mapped block or
1816 * reserve space for a single block.
1817 *
1818 * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set.
1819 * We also have b_blocknr = -1 and b_bdev initialized properly
1820 *
1821 * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set.
1822 * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev
1823 * initialized properly.
1824 */
1825int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
1826 struct buffer_head *bh, int create)
1827{
1828 struct ext4_map_blocks map;
1829 int ret = 0;
1830
1831 BUG_ON(create == 0);
1832 BUG_ON(bh->b_size != inode->i_sb->s_blocksize);
1833
1834 map.m_lblk = iblock;
1835 map.m_len = 1;
1836
1837 /*
1838 * first, we need to know whether the block is allocated already
1839 * preallocated blocks are unmapped but should treated
1840 * the same as allocated blocks.
1841 */
1842 ret = ext4_da_map_blocks(inode, iblock, &map, bh);
1843 if (ret <= 0)
1844 return ret;
1845
1846 map_bh(bh, inode->i_sb, map.m_pblk);
1847 ext4_update_bh_state(bh, map.m_flags);
1848
1849 if (buffer_unwritten(bh)) {
1850 /* A delayed write to unwritten bh should be marked
1851 * new and mapped. Mapped ensures that we don't do
1852 * get_block multiple times when we write to the same
1853 * offset and new ensures that we do proper zero out
1854 * for partial write.
1855 */
1856 set_buffer_new(bh);
1857 set_buffer_mapped(bh);
1858 }
1859 return 0;
1860}
1861
1862static int bget_one(handle_t *handle, struct buffer_head *bh)
1863{
1864 get_bh(bh);
1865 return 0;
1866}
1867
1868static int bput_one(handle_t *handle, struct buffer_head *bh)
1869{
1870 put_bh(bh);
1871 return 0;
1872}
1873
1874static int __ext4_journalled_writepage(struct page *page,
1875 unsigned int len)
1876{
1877 struct address_space *mapping = page->mapping;
1878 struct inode *inode = mapping->host;
1879 struct buffer_head *page_bufs = NULL;
1880 handle_t *handle = NULL;
1881 int ret = 0, err = 0;
1882 int inline_data = ext4_has_inline_data(inode);
1883 struct buffer_head *inode_bh = NULL;
1884
1885 ClearPageChecked(page);
1886
1887 if (inline_data) {
1888 BUG_ON(page->index != 0);
1889 BUG_ON(len > ext4_get_max_inline_size(inode));
1890 inode_bh = ext4_journalled_write_inline_data(inode, len, page);
1891 if (inode_bh == NULL)
1892 goto out;
1893 } else {
1894 page_bufs = page_buffers(page);
1895 if (!page_bufs) {
1896 BUG();
1897 goto out;
1898 }
1899 ext4_walk_page_buffers(handle, page_bufs, 0, len,
1900 NULL, bget_one);
1901 }
1902 /*
1903 * We need to release the page lock before we start the
1904 * journal, so grab a reference so the page won't disappear
1905 * out from under us.
1906 */
1907 get_page(page);
1908 unlock_page(page);
1909
1910 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
1911 ext4_writepage_trans_blocks(inode));
1912 if (IS_ERR(handle)) {
1913 ret = PTR_ERR(handle);
1914 put_page(page);
1915 goto out_no_pagelock;
1916 }
1917 BUG_ON(!ext4_handle_valid(handle));
1918
1919 lock_page(page);
1920 put_page(page);
1921 if (page->mapping != mapping) {
1922 /* The page got truncated from under us */
1923 ext4_journal_stop(handle);
1924 ret = 0;
1925 goto out;
1926 }
1927
1928 if (inline_data) {
1929 ret = ext4_mark_inode_dirty(handle, inode);
1930 } else {
1931 ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL,
1932 do_journal_get_write_access);
1933
1934 err = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL,
1935 write_end_fn);
1936 }
1937 if (ret == 0)
1938 ret = err;
1939 err = ext4_jbd2_inode_add_write(handle, inode, page_offset(page), len);
1940 if (ret == 0)
1941 ret = err;
1942 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
1943 err = ext4_journal_stop(handle);
1944 if (!ret)
1945 ret = err;
1946
1947 ext4_set_inode_state(inode, EXT4_STATE_JDATA);
1948out:
1949 unlock_page(page);
1950out_no_pagelock:
1951 if (!inline_data && page_bufs)
1952 ext4_walk_page_buffers(NULL, page_bufs, 0, len,
1953 NULL, bput_one);
1954 brelse(inode_bh);
1955 return ret;
1956}
1957
1958/*
1959 * Note that we don't need to start a transaction unless we're journaling data
1960 * because we should have holes filled from ext4_page_mkwrite(). We even don't
1961 * need to file the inode to the transaction's list in ordered mode because if
1962 * we are writing back data added by write(), the inode is already there and if
1963 * we are writing back data modified via mmap(), no one guarantees in which
1964 * transaction the data will hit the disk. In case we are journaling data, we
1965 * cannot start transaction directly because transaction start ranks above page
1966 * lock so we have to do some magic.
1967 *
1968 * This function can get called via...
1969 * - ext4_writepages after taking page lock (have journal handle)
1970 * - journal_submit_inode_data_buffers (no journal handle)
1971 * - shrink_page_list via the kswapd/direct reclaim (no journal handle)
1972 * - grab_page_cache when doing write_begin (have journal handle)
1973 *
1974 * We don't do any block allocation in this function. If we have page with
1975 * multiple blocks we need to write those buffer_heads that are mapped. This
1976 * is important for mmaped based write. So if we do with blocksize 1K
1977 * truncate(f, 1024);
1978 * a = mmap(f, 0, 4096);
1979 * a[0] = 'a';
1980 * truncate(f, 4096);
1981 * we have in the page first buffer_head mapped via page_mkwrite call back
1982 * but other buffer_heads would be unmapped but dirty (dirty done via the
1983 * do_wp_page). So writepage should write the first block. If we modify
1984 * the mmap area beyond 1024 we will again get a page_fault and the
1985 * page_mkwrite callback will do the block allocation and mark the
1986 * buffer_heads mapped.
1987 *
1988 * We redirty the page if we have any buffer_heads that is either delay or
1989 * unwritten in the page.
1990 *
1991 * We can get recursively called as show below.
1992 *
1993 * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
1994 * ext4_writepage()
1995 *
1996 * But since we don't do any block allocation we should not deadlock.
1997 * Page also have the dirty flag cleared so we don't get recurive page_lock.
1998 */
1999static int ext4_writepage(struct page *page,
2000 struct writeback_control *wbc)
2001{
2002 int ret = 0;
2003 loff_t size;
2004 unsigned int len;
2005 struct buffer_head *page_bufs = NULL;
2006 struct inode *inode = page->mapping->host;
2007 struct ext4_io_submit io_submit;
2008 bool keep_towrite = false;
2009
2010 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) {
2011 inode->i_mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
2012 unlock_page(page);
2013 return -EIO;
2014 }
2015
2016 trace_ext4_writepage(page);
2017 size = i_size_read(inode);
2018 if (page->index == size >> PAGE_SHIFT &&
2019 !ext4_verity_in_progress(inode))
2020 len = size & ~PAGE_MASK;
2021 else
2022 len = PAGE_SIZE;
2023
2024 page_bufs = page_buffers(page);
2025 /*
2026 * We cannot do block allocation or other extent handling in this
2027 * function. If there are buffers needing that, we have to redirty
2028 * the page. But we may reach here when we do a journal commit via
2029 * journal_submit_inode_data_buffers() and in that case we must write
2030 * allocated buffers to achieve data=ordered mode guarantees.
2031 *
2032 * Also, if there is only one buffer per page (the fs block
2033 * size == the page size), if one buffer needs block
2034 * allocation or needs to modify the extent tree to clear the
2035 * unwritten flag, we know that the page can't be written at
2036 * all, so we might as well refuse the write immediately.
2037 * Unfortunately if the block size != page size, we can't as
2038 * easily detect this case using ext4_walk_page_buffers(), but
2039 * for the extremely common case, this is an optimization that
2040 * skips a useless round trip through ext4_bio_write_page().
2041 */
2042 if (ext4_walk_page_buffers(NULL, page_bufs, 0, len, NULL,
2043 ext4_bh_delay_or_unwritten)) {
2044 redirty_page_for_writepage(wbc, page);
2045 if ((current->flags & PF_MEMALLOC) ||
2046 (inode->i_sb->s_blocksize == PAGE_SIZE)) {
2047 /*
2048 * For memory cleaning there's no point in writing only
2049 * some buffers. So just bail out. Warn if we came here
2050 * from direct reclaim.
2051 */
2052 WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD))
2053 == PF_MEMALLOC);
2054 unlock_page(page);
2055 return 0;
2056 }
2057 keep_towrite = true;
2058 }
2059
2060 if (PageChecked(page) && ext4_should_journal_data(inode))
2061 /*
2062 * It's mmapped pagecache. Add buffers and journal it. There
2063 * doesn't seem much point in redirtying the page here.
2064 */
2065 return __ext4_journalled_writepage(page, len);
2066
2067 ext4_io_submit_init(&io_submit, wbc);
2068 io_submit.io_end = ext4_init_io_end(inode, GFP_NOFS);
2069 if (!io_submit.io_end) {
2070 redirty_page_for_writepage(wbc, page);
2071 unlock_page(page);
2072 return -ENOMEM;
2073 }
2074 ret = ext4_bio_write_page(&io_submit, page, len, keep_towrite);
2075 ext4_io_submit(&io_submit);
2076 /* Drop io_end reference we got from init */
2077 ext4_put_io_end_defer(io_submit.io_end);
2078 return ret;
2079}
2080
2081static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page)
2082{
2083 int len;
2084 loff_t size;
2085 int err;
2086
2087 BUG_ON(page->index != mpd->first_page);
2088 clear_page_dirty_for_io(page);
2089 /*
2090 * We have to be very careful here! Nothing protects writeback path
2091 * against i_size changes and the page can be writeably mapped into
2092 * page tables. So an application can be growing i_size and writing
2093 * data through mmap while writeback runs. clear_page_dirty_for_io()
2094 * write-protects our page in page tables and the page cannot get
2095 * written to again until we release page lock. So only after
2096 * clear_page_dirty_for_io() we are safe to sample i_size for
2097 * ext4_bio_write_page() to zero-out tail of the written page. We rely
2098 * on the barrier provided by TestClearPageDirty in
2099 * clear_page_dirty_for_io() to make sure i_size is really sampled only
2100 * after page tables are updated.
2101 */
2102 size = i_size_read(mpd->inode);
2103 if (page->index == size >> PAGE_SHIFT &&
2104 !ext4_verity_in_progress(mpd->inode))
2105 len = size & ~PAGE_MASK;
2106 else
2107 len = PAGE_SIZE;
2108 err = ext4_bio_write_page(&mpd->io_submit, page, len, false);
2109 if (!err)
2110 mpd->wbc->nr_to_write--;
2111 mpd->first_page++;
2112
2113 return err;
2114}
2115
2116#define BH_FLAGS (BIT(BH_Unwritten) | BIT(BH_Delay))
2117
2118/*
2119 * mballoc gives us at most this number of blocks...
2120 * XXX: That seems to be only a limitation of ext4_mb_normalize_request().
2121 * The rest of mballoc seems to handle chunks up to full group size.
2122 */
2123#define MAX_WRITEPAGES_EXTENT_LEN 2048
2124
2125/*
2126 * mpage_add_bh_to_extent - try to add bh to extent of blocks to map
2127 *
2128 * @mpd - extent of blocks
2129 * @lblk - logical number of the block in the file
2130 * @bh - buffer head we want to add to the extent
2131 *
2132 * The function is used to collect contig. blocks in the same state. If the
2133 * buffer doesn't require mapping for writeback and we haven't started the
2134 * extent of buffers to map yet, the function returns 'true' immediately - the
2135 * caller can write the buffer right away. Otherwise the function returns true
2136 * if the block has been added to the extent, false if the block couldn't be
2137 * added.
2138 */
2139static bool mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk,
2140 struct buffer_head *bh)
2141{
2142 struct ext4_map_blocks *map = &mpd->map;
2143
2144 /* Buffer that doesn't need mapping for writeback? */
2145 if (!buffer_dirty(bh) || !buffer_mapped(bh) ||
2146 (!buffer_delay(bh) && !buffer_unwritten(bh))) {
2147 /* So far no extent to map => we write the buffer right away */
2148 if (map->m_len == 0)
2149 return true;
2150 return false;
2151 }
2152
2153 /* First block in the extent? */
2154 if (map->m_len == 0) {
2155 /* We cannot map unless handle is started... */
2156 if (!mpd->do_map)
2157 return false;
2158 map->m_lblk = lblk;
2159 map->m_len = 1;
2160 map->m_flags = bh->b_state & BH_FLAGS;
2161 return true;
2162 }
2163
2164 /* Don't go larger than mballoc is willing to allocate */
2165 if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN)
2166 return false;
2167
2168 /* Can we merge the block to our big extent? */
2169 if (lblk == map->m_lblk + map->m_len &&
2170 (bh->b_state & BH_FLAGS) == map->m_flags) {
2171 map->m_len++;
2172 return true;
2173 }
2174 return false;
2175}
2176
2177/*
2178 * mpage_process_page_bufs - submit page buffers for IO or add them to extent
2179 *
2180 * @mpd - extent of blocks for mapping
2181 * @head - the first buffer in the page
2182 * @bh - buffer we should start processing from
2183 * @lblk - logical number of the block in the file corresponding to @bh
2184 *
2185 * Walk through page buffers from @bh upto @head (exclusive) and either submit
2186 * the page for IO if all buffers in this page were mapped and there's no
2187 * accumulated extent of buffers to map or add buffers in the page to the
2188 * extent of buffers to map. The function returns 1 if the caller can continue
2189 * by processing the next page, 0 if it should stop adding buffers to the
2190 * extent to map because we cannot extend it anymore. It can also return value
2191 * < 0 in case of error during IO submission.
2192 */
2193static int mpage_process_page_bufs(struct mpage_da_data *mpd,
2194 struct buffer_head *head,
2195 struct buffer_head *bh,
2196 ext4_lblk_t lblk)
2197{
2198 struct inode *inode = mpd->inode;
2199 int err;
2200 ext4_lblk_t blocks = (i_size_read(inode) + i_blocksize(inode) - 1)
2201 >> inode->i_blkbits;
2202
2203 if (ext4_verity_in_progress(inode))
2204 blocks = EXT_MAX_BLOCKS;
2205
2206 do {
2207 BUG_ON(buffer_locked(bh));
2208
2209 if (lblk >= blocks || !mpage_add_bh_to_extent(mpd, lblk, bh)) {
2210 /* Found extent to map? */
2211 if (mpd->map.m_len)
2212 return 0;
2213 /* Buffer needs mapping and handle is not started? */
2214 if (!mpd->do_map)
2215 return 0;
2216 /* Everything mapped so far and we hit EOF */
2217 break;
2218 }
2219 } while (lblk++, (bh = bh->b_this_page) != head);
2220 /* So far everything mapped? Submit the page for IO. */
2221 if (mpd->map.m_len == 0) {
2222 err = mpage_submit_page(mpd, head->b_page);
2223 if (err < 0)
2224 return err;
2225 }
2226 if (lblk >= blocks) {
2227 mpd->scanned_until_end = 1;
2228 return 0;
2229 }
2230 return 1;
2231}
2232
2233/*
2234 * mpage_process_page - update page buffers corresponding to changed extent and
2235 * may submit fully mapped page for IO
2236 *
2237 * @mpd - description of extent to map, on return next extent to map
2238 * @m_lblk - logical block mapping.
2239 * @m_pblk - corresponding physical mapping.
2240 * @map_bh - determines on return whether this page requires any further
2241 * mapping or not.
2242 * Scan given page buffers corresponding to changed extent and update buffer
2243 * state according to new extent state.
2244 * We map delalloc buffers to their physical location, clear unwritten bits.
2245 * If the given page is not fully mapped, we update @map to the next extent in
2246 * the given page that needs mapping & return @map_bh as true.
2247 */
2248static int mpage_process_page(struct mpage_da_data *mpd, struct page *page,
2249 ext4_lblk_t *m_lblk, ext4_fsblk_t *m_pblk,
2250 bool *map_bh)
2251{
2252 struct buffer_head *head, *bh;
2253 ext4_io_end_t *io_end = mpd->io_submit.io_end;
2254 ext4_lblk_t lblk = *m_lblk;
2255 ext4_fsblk_t pblock = *m_pblk;
2256 int err = 0;
2257 int blkbits = mpd->inode->i_blkbits;
2258 ssize_t io_end_size = 0;
2259 struct ext4_io_end_vec *io_end_vec = ext4_last_io_end_vec(io_end);
2260
2261 bh = head = page_buffers(page);
2262 do {
2263 if (lblk < mpd->map.m_lblk)
2264 continue;
2265 if (lblk >= mpd->map.m_lblk + mpd->map.m_len) {
2266 /*
2267 * Buffer after end of mapped extent.
2268 * Find next buffer in the page to map.
2269 */
2270 mpd->map.m_len = 0;
2271 mpd->map.m_flags = 0;
2272 io_end_vec->size += io_end_size;
2273 io_end_size = 0;
2274
2275 err = mpage_process_page_bufs(mpd, head, bh, lblk);
2276 if (err > 0)
2277 err = 0;
2278 if (!err && mpd->map.m_len && mpd->map.m_lblk > lblk) {
2279 io_end_vec = ext4_alloc_io_end_vec(io_end);
2280 if (IS_ERR(io_end_vec)) {
2281 err = PTR_ERR(io_end_vec);
2282 goto out;
2283 }
2284 io_end_vec->offset = (loff_t)mpd->map.m_lblk << blkbits;
2285 }
2286 *map_bh = true;
2287 goto out;
2288 }
2289 if (buffer_delay(bh)) {
2290 clear_buffer_delay(bh);
2291 bh->b_blocknr = pblock++;
2292 }
2293 clear_buffer_unwritten(bh);
2294 io_end_size += (1 << blkbits);
2295 } while (lblk++, (bh = bh->b_this_page) != head);
2296
2297 io_end_vec->size += io_end_size;
2298 io_end_size = 0;
2299 *map_bh = false;
2300out:
2301 *m_lblk = lblk;
2302 *m_pblk = pblock;
2303 return err;
2304}
2305
2306/*
2307 * mpage_map_buffers - update buffers corresponding to changed extent and
2308 * submit fully mapped pages for IO
2309 *
2310 * @mpd - description of extent to map, on return next extent to map
2311 *
2312 * Scan buffers corresponding to changed extent (we expect corresponding pages
2313 * to be already locked) and update buffer state according to new extent state.
2314 * We map delalloc buffers to their physical location, clear unwritten bits,
2315 * and mark buffers as uninit when we perform writes to unwritten extents
2316 * and do extent conversion after IO is finished. If the last page is not fully
2317 * mapped, we update @map to the next extent in the last page that needs
2318 * mapping. Otherwise we submit the page for IO.
2319 */
2320static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
2321{
2322 struct pagevec pvec;
2323 int nr_pages, i;
2324 struct inode *inode = mpd->inode;
2325 int bpp_bits = PAGE_SHIFT - inode->i_blkbits;
2326 pgoff_t start, end;
2327 ext4_lblk_t lblk;
2328 ext4_fsblk_t pblock;
2329 int err;
2330 bool map_bh = false;
2331
2332 start = mpd->map.m_lblk >> bpp_bits;
2333 end = (mpd->map.m_lblk + mpd->map.m_len - 1) >> bpp_bits;
2334 lblk = start << bpp_bits;
2335 pblock = mpd->map.m_pblk;
2336
2337 pagevec_init(&pvec);
2338 while (start <= end) {
2339 nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping,
2340 &start, end);
2341 if (nr_pages == 0)
2342 break;
2343 for (i = 0; i < nr_pages; i++) {
2344 struct page *page = pvec.pages[i];
2345
2346 err = mpage_process_page(mpd, page, &lblk, &pblock,
2347 &map_bh);
2348 /*
2349 * If map_bh is true, means page may require further bh
2350 * mapping, or maybe the page was submitted for IO.
2351 * So we return to call further extent mapping.
2352 */
2353 if (err < 0 || map_bh)
2354 goto out;
2355 /* Page fully mapped - let IO run! */
2356 err = mpage_submit_page(mpd, page);
2357 if (err < 0)
2358 goto out;
2359 }
2360 pagevec_release(&pvec);
2361 }
2362 /* Extent fully mapped and matches with page boundary. We are done. */
2363 mpd->map.m_len = 0;
2364 mpd->map.m_flags = 0;
2365 return 0;
2366out:
2367 pagevec_release(&pvec);
2368 return err;
2369}
2370
2371static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
2372{
2373 struct inode *inode = mpd->inode;
2374 struct ext4_map_blocks *map = &mpd->map;
2375 int get_blocks_flags;
2376 int err, dioread_nolock;
2377
2378 trace_ext4_da_write_pages_extent(inode, map);
2379 /*
2380 * Call ext4_map_blocks() to allocate any delayed allocation blocks, or
2381 * to convert an unwritten extent to be initialized (in the case
2382 * where we have written into one or more preallocated blocks). It is
2383 * possible that we're going to need more metadata blocks than
2384 * previously reserved. However we must not fail because we're in
2385 * writeback and there is nothing we can do about it so it might result
2386 * in data loss. So use reserved blocks to allocate metadata if
2387 * possible.
2388 *
2389 * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE if
2390 * the blocks in question are delalloc blocks. This indicates
2391 * that the blocks and quotas has already been checked when
2392 * the data was copied into the page cache.
2393 */
2394 get_blocks_flags = EXT4_GET_BLOCKS_CREATE |
2395 EXT4_GET_BLOCKS_METADATA_NOFAIL |
2396 EXT4_GET_BLOCKS_IO_SUBMIT;
2397 dioread_nolock = ext4_should_dioread_nolock(inode);
2398 if (dioread_nolock)
2399 get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
2400 if (map->m_flags & BIT(BH_Delay))
2401 get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
2402
2403 err = ext4_map_blocks(handle, inode, map, get_blocks_flags);
2404 if (err < 0)
2405 return err;
2406 if (dioread_nolock && (map->m_flags & EXT4_MAP_UNWRITTEN)) {
2407 if (!mpd->io_submit.io_end->handle &&
2408 ext4_handle_valid(handle)) {
2409 mpd->io_submit.io_end->handle = handle->h_rsv_handle;
2410 handle->h_rsv_handle = NULL;
2411 }
2412 ext4_set_io_unwritten_flag(inode, mpd->io_submit.io_end);
2413 }
2414
2415 BUG_ON(map->m_len == 0);
2416 return 0;
2417}
2418
2419/*
2420 * mpage_map_and_submit_extent - map extent starting at mpd->lblk of length
2421 * mpd->len and submit pages underlying it for IO
2422 *
2423 * @handle - handle for journal operations
2424 * @mpd - extent to map
2425 * @give_up_on_write - we set this to true iff there is a fatal error and there
2426 * is no hope of writing the data. The caller should discard
2427 * dirty pages to avoid infinite loops.
2428 *
2429 * The function maps extent starting at mpd->lblk of length mpd->len. If it is
2430 * delayed, blocks are allocated, if it is unwritten, we may need to convert
2431 * them to initialized or split the described range from larger unwritten
2432 * extent. Note that we need not map all the described range since allocation
2433 * can return less blocks or the range is covered by more unwritten extents. We
2434 * cannot map more because we are limited by reserved transaction credits. On
2435 * the other hand we always make sure that the last touched page is fully
2436 * mapped so that it can be written out (and thus forward progress is
2437 * guaranteed). After mapping we submit all mapped pages for IO.
2438 */
2439static int mpage_map_and_submit_extent(handle_t *handle,
2440 struct mpage_da_data *mpd,
2441 bool *give_up_on_write)
2442{
2443 struct inode *inode = mpd->inode;
2444 struct ext4_map_blocks *map = &mpd->map;
2445 int err;
2446 loff_t disksize;
2447 int progress = 0;
2448 ext4_io_end_t *io_end = mpd->io_submit.io_end;
2449 struct ext4_io_end_vec *io_end_vec;
2450
2451 io_end_vec = ext4_alloc_io_end_vec(io_end);
2452 if (IS_ERR(io_end_vec))
2453 return PTR_ERR(io_end_vec);
2454 io_end_vec->offset = ((loff_t)map->m_lblk) << inode->i_blkbits;
2455 do {
2456 err = mpage_map_one_extent(handle, mpd);
2457 if (err < 0) {
2458 struct super_block *sb = inode->i_sb;
2459
2460 if (ext4_forced_shutdown(EXT4_SB(sb)) ||
2461 ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED))
2462 goto invalidate_dirty_pages;
2463 /*
2464 * Let the uper layers retry transient errors.
2465 * In the case of ENOSPC, if ext4_count_free_blocks()
2466 * is non-zero, a commit should free up blocks.
2467 */
2468 if ((err == -ENOMEM) ||
2469 (err == -ENOSPC && ext4_count_free_clusters(sb))) {
2470 if (progress)
2471 goto update_disksize;
2472 return err;
2473 }
2474 ext4_msg(sb, KERN_CRIT,
2475 "Delayed block allocation failed for "
2476 "inode %lu at logical offset %llu with"
2477 " max blocks %u with error %d",
2478 inode->i_ino,
2479 (unsigned long long)map->m_lblk,
2480 (unsigned)map->m_len, -err);
2481 ext4_msg(sb, KERN_CRIT,
2482 "This should not happen!! Data will "
2483 "be lost\n");
2484 if (err == -ENOSPC)
2485 ext4_print_free_blocks(inode);
2486 invalidate_dirty_pages:
2487 *give_up_on_write = true;
2488 return err;
2489 }
2490 progress = 1;
2491 /*
2492 * Update buffer state, submit mapped pages, and get us new
2493 * extent to map
2494 */
2495 err = mpage_map_and_submit_buffers(mpd);
2496 if (err < 0)
2497 goto update_disksize;
2498 } while (map->m_len);
2499
2500update_disksize:
2501 /*
2502 * Update on-disk size after IO is submitted. Races with
2503 * truncate are avoided by checking i_size under i_data_sem.
2504 */
2505 disksize = ((loff_t)mpd->first_page) << PAGE_SHIFT;
2506 if (disksize > READ_ONCE(EXT4_I(inode)->i_disksize)) {
2507 int err2;
2508 loff_t i_size;
2509
2510 down_write(&EXT4_I(inode)->i_data_sem);
2511 i_size = i_size_read(inode);
2512 if (disksize > i_size)
2513 disksize = i_size;
2514 if (disksize > EXT4_I(inode)->i_disksize)
2515 EXT4_I(inode)->i_disksize = disksize;
2516 up_write(&EXT4_I(inode)->i_data_sem);
2517 err2 = ext4_mark_inode_dirty(handle, inode);
2518 if (err2) {
2519 ext4_error_err(inode->i_sb, -err2,
2520 "Failed to mark inode %lu dirty",
2521 inode->i_ino);
2522 }
2523 if (!err)
2524 err = err2;
2525 }
2526 return err;
2527}
2528
2529/*
2530 * Calculate the total number of credits to reserve for one writepages
2531 * iteration. This is called from ext4_writepages(). We map an extent of
2532 * up to MAX_WRITEPAGES_EXTENT_LEN blocks and then we go on and finish mapping
2533 * the last partial page. So in total we can map MAX_WRITEPAGES_EXTENT_LEN +
2534 * bpp - 1 blocks in bpp different extents.
2535 */
2536static int ext4_da_writepages_trans_blocks(struct inode *inode)
2537{
2538 int bpp = ext4_journal_blocks_per_page(inode);
2539
2540 return ext4_meta_trans_blocks(inode,
2541 MAX_WRITEPAGES_EXTENT_LEN + bpp - 1, bpp);
2542}
2543
2544/*
2545 * mpage_prepare_extent_to_map - find & lock contiguous range of dirty pages
2546 * and underlying extent to map
2547 *
2548 * @mpd - where to look for pages
2549 *
2550 * Walk dirty pages in the mapping. If they are fully mapped, submit them for
2551 * IO immediately. When we find a page which isn't mapped we start accumulating
2552 * extent of buffers underlying these pages that needs mapping (formed by
2553 * either delayed or unwritten buffers). We also lock the pages containing
2554 * these buffers. The extent found is returned in @mpd structure (starting at
2555 * mpd->lblk with length mpd->len blocks).
2556 *
2557 * Note that this function can attach bios to one io_end structure which are
2558 * neither logically nor physically contiguous. Although it may seem as an
2559 * unnecessary complication, it is actually inevitable in blocksize < pagesize
2560 * case as we need to track IO to all buffers underlying a page in one io_end.
2561 */
2562static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
2563{
2564 struct address_space *mapping = mpd->inode->i_mapping;
2565 struct pagevec pvec;
2566 unsigned int nr_pages;
2567 long left = mpd->wbc->nr_to_write;
2568 pgoff_t index = mpd->first_page;
2569 pgoff_t end = mpd->last_page;
2570 xa_mark_t tag;
2571 int i, err = 0;
2572 int blkbits = mpd->inode->i_blkbits;
2573 ext4_lblk_t lblk;
2574 struct buffer_head *head;
2575
2576 if (mpd->wbc->sync_mode == WB_SYNC_ALL || mpd->wbc->tagged_writepages)
2577 tag = PAGECACHE_TAG_TOWRITE;
2578 else
2579 tag = PAGECACHE_TAG_DIRTY;
2580
2581 pagevec_init(&pvec);
2582 mpd->map.m_len = 0;
2583 mpd->next_page = index;
2584 while (index <= end) {
2585 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
2586 tag);
2587 if (nr_pages == 0)
2588 break;
2589
2590 for (i = 0; i < nr_pages; i++) {
2591 struct page *page = pvec.pages[i];
2592
2593 /*
2594 * Accumulated enough dirty pages? This doesn't apply
2595 * to WB_SYNC_ALL mode. For integrity sync we have to
2596 * keep going because someone may be concurrently
2597 * dirtying pages, and we might have synced a lot of
2598 * newly appeared dirty pages, but have not synced all
2599 * of the old dirty pages.
2600 */
2601 if (mpd->wbc->sync_mode == WB_SYNC_NONE && left <= 0)
2602 goto out;
2603
2604 /* If we can't merge this page, we are done. */
2605 if (mpd->map.m_len > 0 && mpd->next_page != page->index)
2606 goto out;
2607
2608 lock_page(page);
2609 /*
2610 * If the page is no longer dirty, or its mapping no
2611 * longer corresponds to inode we are writing (which
2612 * means it has been truncated or invalidated), or the
2613 * page is already under writeback and we are not doing
2614 * a data integrity writeback, skip the page
2615 */
2616 if (!PageDirty(page) ||
2617 (PageWriteback(page) &&
2618 (mpd->wbc->sync_mode == WB_SYNC_NONE)) ||
2619 unlikely(page->mapping != mapping)) {
2620 unlock_page(page);
2621 continue;
2622 }
2623
2624 wait_on_page_writeback(page);
2625 BUG_ON(PageWriteback(page));
2626
2627 if (mpd->map.m_len == 0)
2628 mpd->first_page = page->index;
2629 mpd->next_page = page->index + 1;
2630 /* Add all dirty buffers to mpd */
2631 lblk = ((ext4_lblk_t)page->index) <<
2632 (PAGE_SHIFT - blkbits);
2633 head = page_buffers(page);
2634 err = mpage_process_page_bufs(mpd, head, head, lblk);
2635 if (err <= 0)
2636 goto out;
2637 err = 0;
2638 left--;
2639 }
2640 pagevec_release(&pvec);
2641 cond_resched();
2642 }
2643 mpd->scanned_until_end = 1;
2644 return 0;
2645out:
2646 pagevec_release(&pvec);
2647 return err;
2648}
2649
2650static int ext4_writepages(struct address_space *mapping,
2651 struct writeback_control *wbc)
2652{
2653 pgoff_t writeback_index = 0;
2654 long nr_to_write = wbc->nr_to_write;
2655 int range_whole = 0;
2656 int cycled = 1;
2657 handle_t *handle = NULL;
2658 struct mpage_da_data mpd;
2659 struct inode *inode = mapping->host;
2660 int needed_blocks, rsv_blocks = 0, ret = 0;
2661 struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
2662 struct blk_plug plug;
2663 bool give_up_on_write = false;
2664
2665 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
2666 return -EIO;
2667
2668 percpu_down_read(&sbi->s_writepages_rwsem);
2669 trace_ext4_writepages(inode, wbc);
2670
2671 /*
2672 * No pages to write? This is mainly a kludge to avoid starting
2673 * a transaction for special inodes like journal inode on last iput()
2674 * because that could violate lock ordering on umount
2675 */
2676 if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
2677 goto out_writepages;
2678
2679 if (ext4_should_journal_data(inode)) {
2680 ret = generic_writepages(mapping, wbc);
2681 goto out_writepages;
2682 }
2683
2684 /*
2685 * If the filesystem has aborted, it is read-only, so return
2686 * right away instead of dumping stack traces later on that
2687 * will obscure the real source of the problem. We test
2688 * EXT4_MF_FS_ABORTED instead of sb->s_flag's SB_RDONLY because
2689 * the latter could be true if the filesystem is mounted
2690 * read-only, and in that case, ext4_writepages should
2691 * *never* be called, so if that ever happens, we would want
2692 * the stack trace.
2693 */
2694 if (unlikely(ext4_forced_shutdown(EXT4_SB(mapping->host->i_sb)) ||
2695 ext4_test_mount_flag(inode->i_sb, EXT4_MF_FS_ABORTED))) {
2696 ret = -EROFS;
2697 goto out_writepages;
2698 }
2699
2700 /*
2701 * If we have inline data and arrive here, it means that
2702 * we will soon create the block for the 1st page, so
2703 * we'd better clear the inline data here.
2704 */
2705 if (ext4_has_inline_data(inode)) {
2706 /* Just inode will be modified... */
2707 handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
2708 if (IS_ERR(handle)) {
2709 ret = PTR_ERR(handle);
2710 goto out_writepages;
2711 }
2712 BUG_ON(ext4_test_inode_state(inode,
2713 EXT4_STATE_MAY_INLINE_DATA));
2714 ext4_destroy_inline_data(handle, inode);
2715 ext4_journal_stop(handle);
2716 }
2717
2718 if (ext4_should_dioread_nolock(inode)) {
2719 /*
2720 * We may need to convert up to one extent per block in
2721 * the page and we may dirty the inode.
2722 */
2723 rsv_blocks = 1 + ext4_chunk_trans_blocks(inode,
2724 PAGE_SIZE >> inode->i_blkbits);
2725 }
2726
2727 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2728 range_whole = 1;
2729
2730 if (wbc->range_cyclic) {
2731 writeback_index = mapping->writeback_index;
2732 if (writeback_index)
2733 cycled = 0;
2734 mpd.first_page = writeback_index;
2735 mpd.last_page = -1;
2736 } else {
2737 mpd.first_page = wbc->range_start >> PAGE_SHIFT;
2738 mpd.last_page = wbc->range_end >> PAGE_SHIFT;
2739 }
2740
2741 mpd.inode = inode;
2742 mpd.wbc = wbc;
2743 ext4_io_submit_init(&mpd.io_submit, wbc);
2744retry:
2745 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2746 tag_pages_for_writeback(mapping, mpd.first_page, mpd.last_page);
2747 blk_start_plug(&plug);
2748
2749 /*
2750 * First writeback pages that don't need mapping - we can avoid
2751 * starting a transaction unnecessarily and also avoid being blocked
2752 * in the block layer on device congestion while having transaction
2753 * started.
2754 */
2755 mpd.do_map = 0;
2756 mpd.scanned_until_end = 0;
2757 mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
2758 if (!mpd.io_submit.io_end) {
2759 ret = -ENOMEM;
2760 goto unplug;
2761 }
2762 ret = mpage_prepare_extent_to_map(&mpd);
2763 /* Unlock pages we didn't use */
2764 mpage_release_unused_pages(&mpd, false);
2765 /* Submit prepared bio */
2766 ext4_io_submit(&mpd.io_submit);
2767 ext4_put_io_end_defer(mpd.io_submit.io_end);
2768 mpd.io_submit.io_end = NULL;
2769 if (ret < 0)
2770 goto unplug;
2771
2772 while (!mpd.scanned_until_end && wbc->nr_to_write > 0) {
2773 /* For each extent of pages we use new io_end */
2774 mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
2775 if (!mpd.io_submit.io_end) {
2776 ret = -ENOMEM;
2777 break;
2778 }
2779
2780 /*
2781 * We have two constraints: We find one extent to map and we
2782 * must always write out whole page (makes a difference when
2783 * blocksize < pagesize) so that we don't block on IO when we
2784 * try to write out the rest of the page. Journalled mode is
2785 * not supported by delalloc.
2786 */
2787 BUG_ON(ext4_should_journal_data(inode));
2788 needed_blocks = ext4_da_writepages_trans_blocks(inode);
2789
2790 /* start a new transaction */
2791 handle = ext4_journal_start_with_reserve(inode,
2792 EXT4_HT_WRITE_PAGE, needed_blocks, rsv_blocks);
2793 if (IS_ERR(handle)) {
2794 ret = PTR_ERR(handle);
2795 ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
2796 "%ld pages, ino %lu; err %d", __func__,
2797 wbc->nr_to_write, inode->i_ino, ret);
2798 /* Release allocated io_end */
2799 ext4_put_io_end(mpd.io_submit.io_end);
2800 mpd.io_submit.io_end = NULL;
2801 break;
2802 }
2803 mpd.do_map = 1;
2804
2805 trace_ext4_da_write_pages(inode, mpd.first_page, mpd.wbc);
2806 ret = mpage_prepare_extent_to_map(&mpd);
2807 if (!ret && mpd.map.m_len)
2808 ret = mpage_map_and_submit_extent(handle, &mpd,
2809 &give_up_on_write);
2810 /*
2811 * Caution: If the handle is synchronous,
2812 * ext4_journal_stop() can wait for transaction commit
2813 * to finish which may depend on writeback of pages to
2814 * complete or on page lock to be released. In that
2815 * case, we have to wait until after we have
2816 * submitted all the IO, released page locks we hold,
2817 * and dropped io_end reference (for extent conversion
2818 * to be able to complete) before stopping the handle.
2819 */
2820 if (!ext4_handle_valid(handle) || handle->h_sync == 0) {
2821 ext4_journal_stop(handle);
2822 handle = NULL;
2823 mpd.do_map = 0;
2824 }
2825 /* Unlock pages we didn't use */
2826 mpage_release_unused_pages(&mpd, give_up_on_write);
2827 /* Submit prepared bio */
2828 ext4_io_submit(&mpd.io_submit);
2829
2830 /*
2831 * Drop our io_end reference we got from init. We have
2832 * to be careful and use deferred io_end finishing if
2833 * we are still holding the transaction as we can
2834 * release the last reference to io_end which may end
2835 * up doing unwritten extent conversion.
2836 */
2837 if (handle) {
2838 ext4_put_io_end_defer(mpd.io_submit.io_end);
2839 ext4_journal_stop(handle);
2840 } else
2841 ext4_put_io_end(mpd.io_submit.io_end);
2842 mpd.io_submit.io_end = NULL;
2843
2844 if (ret == -ENOSPC && sbi->s_journal) {
2845 /*
2846 * Commit the transaction which would
2847 * free blocks released in the transaction
2848 * and try again
2849 */
2850 jbd2_journal_force_commit_nested(sbi->s_journal);
2851 ret = 0;
2852 continue;
2853 }
2854 /* Fatal error - ENOMEM, EIO... */
2855 if (ret)
2856 break;
2857 }
2858unplug:
2859 blk_finish_plug(&plug);
2860 if (!ret && !cycled && wbc->nr_to_write > 0) {
2861 cycled = 1;
2862 mpd.last_page = writeback_index - 1;
2863 mpd.first_page = 0;
2864 goto retry;
2865 }
2866
2867 /* Update index */
2868 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2869 /*
2870 * Set the writeback_index so that range_cyclic
2871 * mode will write it back later
2872 */
2873 mapping->writeback_index = mpd.first_page;
2874
2875out_writepages:
2876 trace_ext4_writepages_result(inode, wbc, ret,
2877 nr_to_write - wbc->nr_to_write);
2878 percpu_up_read(&sbi->s_writepages_rwsem);
2879 return ret;
2880}
2881
2882static int ext4_dax_writepages(struct address_space *mapping,
2883 struct writeback_control *wbc)
2884{
2885 int ret;
2886 long nr_to_write = wbc->nr_to_write;
2887 struct inode *inode = mapping->host;
2888 struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
2889
2890 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
2891 return -EIO;
2892
2893 percpu_down_read(&sbi->s_writepages_rwsem);
2894 trace_ext4_writepages(inode, wbc);
2895
2896 ret = dax_writeback_mapping_range(mapping, sbi->s_daxdev, wbc);
2897 trace_ext4_writepages_result(inode, wbc, ret,
2898 nr_to_write - wbc->nr_to_write);
2899 percpu_up_read(&sbi->s_writepages_rwsem);
2900 return ret;
2901}
2902
2903static int ext4_nonda_switch(struct super_block *sb)
2904{
2905 s64 free_clusters, dirty_clusters;
2906 struct ext4_sb_info *sbi = EXT4_SB(sb);
2907
2908 /*
2909 * switch to non delalloc mode if we are running low
2910 * on free block. The free block accounting via percpu
2911 * counters can get slightly wrong with percpu_counter_batch getting
2912 * accumulated on each CPU without updating global counters
2913 * Delalloc need an accurate free block accounting. So switch
2914 * to non delalloc when we are near to error range.
2915 */
2916 free_clusters =
2917 percpu_counter_read_positive(&sbi->s_freeclusters_counter);
2918 dirty_clusters =
2919 percpu_counter_read_positive(&sbi->s_dirtyclusters_counter);
2920 /*
2921 * Start pushing delalloc when 1/2 of free blocks are dirty.
2922 */
2923 if (dirty_clusters && (free_clusters < 2 * dirty_clusters))
2924 try_to_writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE);
2925
2926 if (2 * free_clusters < 3 * dirty_clusters ||
2927 free_clusters < (dirty_clusters + EXT4_FREECLUSTERS_WATERMARK)) {
2928 /*
2929 * free block count is less than 150% of dirty blocks
2930 * or free blocks is less than watermark
2931 */
2932 return 1;
2933 }
2934 return 0;
2935}
2936
2937/* We always reserve for an inode update; the superblock could be there too */
2938static int ext4_da_write_credits(struct inode *inode, loff_t pos, unsigned len)
2939{
2940 if (likely(ext4_has_feature_large_file(inode->i_sb)))
2941 return 1;
2942
2943 if (pos + len <= 0x7fffffffULL)
2944 return 1;
2945
2946 /* We might need to update the superblock to set LARGE_FILE */
2947 return 2;
2948}
2949
2950static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
2951 loff_t pos, unsigned len, unsigned flags,
2952 struct page **pagep, void **fsdata)
2953{
2954 int ret, retries = 0;
2955 struct page *page;
2956 pgoff_t index;
2957 struct inode *inode = mapping->host;
2958 handle_t *handle;
2959
2960 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
2961 return -EIO;
2962
2963 index = pos >> PAGE_SHIFT;
2964
2965 if (ext4_nonda_switch(inode->i_sb) || S_ISLNK(inode->i_mode) ||
2966 ext4_verity_in_progress(inode)) {
2967 *fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
2968 return ext4_write_begin(file, mapping, pos,
2969 len, flags, pagep, fsdata);
2970 }
2971 *fsdata = (void *)0;
2972 trace_ext4_da_write_begin(inode, pos, len, flags);
2973
2974 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
2975 ret = ext4_da_write_inline_data_begin(mapping, inode,
2976 pos, len, flags,
2977 pagep, fsdata);
2978 if (ret < 0)
2979 return ret;
2980 if (ret == 1)
2981 return 0;
2982 }
2983
2984 /*
2985 * grab_cache_page_write_begin() can take a long time if the
2986 * system is thrashing due to memory pressure, or if the page
2987 * is being written back. So grab it first before we start
2988 * the transaction handle. This also allows us to allocate
2989 * the page (if needed) without using GFP_NOFS.
2990 */
2991retry_grab:
2992 page = grab_cache_page_write_begin(mapping, index, flags);
2993 if (!page)
2994 return -ENOMEM;
2995 unlock_page(page);
2996
2997 /*
2998 * With delayed allocation, we don't log the i_disksize update
2999 * if there is delayed block allocation. But we still need
3000 * to journalling the i_disksize update if writes to the end
3001 * of file which has an already mapped buffer.
3002 */
3003retry_journal:
3004 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
3005 ext4_da_write_credits(inode, pos, len));
3006 if (IS_ERR(handle)) {
3007 put_page(page);
3008 return PTR_ERR(handle);
3009 }
3010
3011 lock_page(page);
3012 if (page->mapping != mapping) {
3013 /* The page got truncated from under us */
3014 unlock_page(page);
3015 put_page(page);
3016 ext4_journal_stop(handle);
3017 goto retry_grab;
3018 }
3019 /* In case writeback began while the page was unlocked */
3020 wait_for_stable_page(page);
3021
3022#ifdef CONFIG_FS_ENCRYPTION
3023 ret = ext4_block_write_begin(page, pos, len,
3024 ext4_da_get_block_prep);
3025#else
3026 ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep);
3027#endif
3028 if (ret < 0) {
3029 unlock_page(page);
3030 ext4_journal_stop(handle);
3031 /*
3032 * block_write_begin may have instantiated a few blocks
3033 * outside i_size. Trim these off again. Don't need
3034 * i_size_read because we hold i_mutex.
3035 */
3036 if (pos + len > inode->i_size)
3037 ext4_truncate_failed_write(inode);
3038
3039 if (ret == -ENOSPC &&
3040 ext4_should_retry_alloc(inode->i_sb, &retries))
3041 goto retry_journal;
3042
3043 put_page(page);
3044 return ret;
3045 }
3046
3047 *pagep = page;
3048 return ret;
3049}
3050
3051/*
3052 * Check if we should update i_disksize
3053 * when write to the end of file but not require block allocation
3054 */
3055static int ext4_da_should_update_i_disksize(struct page *page,
3056 unsigned long offset)
3057{
3058 struct buffer_head *bh;
3059 struct inode *inode = page->mapping->host;
3060 unsigned int idx;
3061 int i;
3062
3063 bh = page_buffers(page);
3064 idx = offset >> inode->i_blkbits;
3065
3066 for (i = 0; i < idx; i++)
3067 bh = bh->b_this_page;
3068
3069 if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh))
3070 return 0;
3071 return 1;
3072}
3073
3074static int ext4_da_write_end(struct file *file,
3075 struct address_space *mapping,
3076 loff_t pos, unsigned len, unsigned copied,
3077 struct page *page, void *fsdata)
3078{
3079 struct inode *inode = mapping->host;
3080 int ret = 0, ret2;
3081 handle_t *handle = ext4_journal_current_handle();
3082 loff_t new_i_size;
3083 unsigned long start, end;
3084 int write_mode = (int)(unsigned long)fsdata;
3085
3086 if (write_mode == FALL_BACK_TO_NONDELALLOC)
3087 return ext4_write_end(file, mapping, pos,
3088 len, copied, page, fsdata);
3089
3090 trace_ext4_da_write_end(inode, pos, len, copied);
3091 start = pos & (PAGE_SIZE - 1);
3092 end = start + copied - 1;
3093
3094 /*
3095 * Since we are holding inode lock, we are sure i_disksize <=
3096 * i_size. We also know that if i_disksize < i_size, there are
3097 * delalloc writes pending in the range upto i_size. If the end of
3098 * the current write is <= i_size, there's no need to touch
3099 * i_disksize since writeback will push i_disksize upto i_size
3100 * eventually. If the end of the current write is > i_size and
3101 * inside an allocated block (ext4_da_should_update_i_disksize()
3102 * check), we need to update i_disksize here as neither
3103 * ext4_writepage() nor certain ext4_writepages() paths not
3104 * allocating blocks update i_disksize.
3105 *
3106 * Note that we defer inode dirtying to generic_write_end() /
3107 * ext4_da_write_inline_data_end().
3108 */
3109 new_i_size = pos + copied;
3110 if (copied && new_i_size > inode->i_size) {
3111 if (ext4_has_inline_data(inode) ||
3112 ext4_da_should_update_i_disksize(page, end))
3113 ext4_update_i_disksize(inode, new_i_size);
3114 }
3115
3116 if (write_mode != CONVERT_INLINE_DATA &&
3117 ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) &&
3118 ext4_has_inline_data(inode))
3119 ret = ext4_da_write_inline_data_end(inode, pos, len, copied,
3120 page);
3121 else
3122 ret = generic_write_end(file, mapping, pos, len, copied,
3123 page, fsdata);
3124
3125 copied = ret;
3126 ret2 = ext4_journal_stop(handle);
3127 if (unlikely(ret2 && !ret))
3128 ret = ret2;
3129
3130 return ret ? ret : copied;
3131}
3132
3133/*
3134 * Force all delayed allocation blocks to be allocated for a given inode.
3135 */
3136int ext4_alloc_da_blocks(struct inode *inode)
3137{
3138 trace_ext4_alloc_da_blocks(inode);
3139
3140 if (!EXT4_I(inode)->i_reserved_data_blocks)
3141 return 0;
3142
3143 /*
3144 * We do something simple for now. The filemap_flush() will
3145 * also start triggering a write of the data blocks, which is
3146 * not strictly speaking necessary (and for users of
3147 * laptop_mode, not even desirable). However, to do otherwise
3148 * would require replicating code paths in:
3149 *
3150 * ext4_writepages() ->
3151 * write_cache_pages() ---> (via passed in callback function)
3152 * __mpage_da_writepage() -->
3153 * mpage_add_bh_to_extent()
3154 * mpage_da_map_blocks()
3155 *
3156 * The problem is that write_cache_pages(), located in
3157 * mm/page-writeback.c, marks pages clean in preparation for
3158 * doing I/O, which is not desirable if we're not planning on
3159 * doing I/O at all.
3160 *
3161 * We could call write_cache_pages(), and then redirty all of
3162 * the pages by calling redirty_page_for_writepage() but that
3163 * would be ugly in the extreme. So instead we would need to
3164 * replicate parts of the code in the above functions,
3165 * simplifying them because we wouldn't actually intend to
3166 * write out the pages, but rather only collect contiguous
3167 * logical block extents, call the multi-block allocator, and
3168 * then update the buffer heads with the block allocations.
3169 *
3170 * For now, though, we'll cheat by calling filemap_flush(),
3171 * which will map the blocks, and start the I/O, but not
3172 * actually wait for the I/O to complete.
3173 */
3174 return filemap_flush(inode->i_mapping);
3175}
3176
3177/*
3178 * bmap() is special. It gets used by applications such as lilo and by
3179 * the swapper to find the on-disk block of a specific piece of data.
3180 *
3181 * Naturally, this is dangerous if the block concerned is still in the
3182 * journal. If somebody makes a swapfile on an ext4 data-journaling
3183 * filesystem and enables swap, then they may get a nasty shock when the
3184 * data getting swapped to that swapfile suddenly gets overwritten by
3185 * the original zero's written out previously to the journal and
3186 * awaiting writeback in the kernel's buffer cache.
3187 *
3188 * So, if we see any bmap calls here on a modified, data-journaled file,
3189 * take extra steps to flush any blocks which might be in the cache.
3190 */
3191static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
3192{
3193 struct inode *inode = mapping->host;
3194 journal_t *journal;
3195 int err;
3196
3197 /*
3198 * We can get here for an inline file via the FIBMAP ioctl
3199 */
3200 if (ext4_has_inline_data(inode))
3201 return 0;
3202
3203 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
3204 test_opt(inode->i_sb, DELALLOC)) {
3205 /*
3206 * With delalloc we want to sync the file
3207 * so that we can make sure we allocate
3208 * blocks for file
3209 */
3210 filemap_write_and_wait(mapping);
3211 }
3212
3213 if (EXT4_JOURNAL(inode) &&
3214 ext4_test_inode_state(inode, EXT4_STATE_JDATA)) {
3215 /*
3216 * This is a REALLY heavyweight approach, but the use of
3217 * bmap on dirty files is expected to be extremely rare:
3218 * only if we run lilo or swapon on a freshly made file
3219 * do we expect this to happen.
3220 *
3221 * (bmap requires CAP_SYS_RAWIO so this does not
3222 * represent an unprivileged user DOS attack --- we'd be
3223 * in trouble if mortal users could trigger this path at
3224 * will.)
3225 *
3226 * NB. EXT4_STATE_JDATA is not set on files other than
3227 * regular files. If somebody wants to bmap a directory
3228 * or symlink and gets confused because the buffer
3229 * hasn't yet been flushed to disk, they deserve
3230 * everything they get.
3231 */
3232
3233 ext4_clear_inode_state(inode, EXT4_STATE_JDATA);
3234 journal = EXT4_JOURNAL(inode);
3235 jbd2_journal_lock_updates(journal);
3236 err = jbd2_journal_flush(journal, 0);
3237 jbd2_journal_unlock_updates(journal);
3238
3239 if (err)
3240 return 0;
3241 }
3242
3243 return iomap_bmap(mapping, block, &ext4_iomap_ops);
3244}
3245
3246static int ext4_readpage(struct file *file, struct page *page)
3247{
3248 int ret = -EAGAIN;
3249 struct inode *inode = page->mapping->host;
3250
3251 trace_ext4_readpage(page);
3252
3253 if (ext4_has_inline_data(inode))
3254 ret = ext4_readpage_inline(inode, page);
3255
3256 if (ret == -EAGAIN)
3257 return ext4_mpage_readpages(inode, NULL, page);
3258
3259 return ret;
3260}
3261
3262static void ext4_readahead(struct readahead_control *rac)
3263{
3264 struct inode *inode = rac->mapping->host;
3265
3266 /* If the file has inline data, no need to do readahead. */
3267 if (ext4_has_inline_data(inode))
3268 return;
3269
3270 ext4_mpage_readpages(inode, rac, NULL);
3271}
3272
3273static void ext4_invalidatepage(struct page *page, unsigned int offset,
3274 unsigned int length)
3275{
3276 trace_ext4_invalidatepage(page, offset, length);
3277
3278 /* No journalling happens on data buffers when this function is used */
3279 WARN_ON(page_has_buffers(page) && buffer_jbd(page_buffers(page)));
3280
3281 block_invalidatepage(page, offset, length);
3282}
3283
3284static int __ext4_journalled_invalidatepage(struct page *page,
3285 unsigned int offset,
3286 unsigned int length)
3287{
3288 journal_t *journal = EXT4_JOURNAL(page->mapping->host);
3289
3290 trace_ext4_journalled_invalidatepage(page, offset, length);
3291
3292 /*
3293 * If it's a full truncate we just forget about the pending dirtying
3294 */
3295 if (offset == 0 && length == PAGE_SIZE)
3296 ClearPageChecked(page);
3297
3298 return jbd2_journal_invalidatepage(journal, page, offset, length);
3299}
3300
3301/* Wrapper for aops... */
3302static void ext4_journalled_invalidatepage(struct page *page,
3303 unsigned int offset,
3304 unsigned int length)
3305{
3306 WARN_ON(__ext4_journalled_invalidatepage(page, offset, length) < 0);
3307}
3308
3309static int ext4_releasepage(struct page *page, gfp_t wait)
3310{
3311 journal_t *journal = EXT4_JOURNAL(page->mapping->host);
3312
3313 trace_ext4_releasepage(page);
3314
3315 /* Page has dirty journalled data -> cannot release */
3316 if (PageChecked(page))
3317 return 0;
3318 if (journal)
3319 return jbd2_journal_try_to_free_buffers(journal, page);
3320 else
3321 return try_to_free_buffers(page);
3322}
3323
3324static bool ext4_inode_datasync_dirty(struct inode *inode)
3325{
3326 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
3327
3328 if (journal) {
3329 if (jbd2_transaction_committed(journal,
3330 EXT4_I(inode)->i_datasync_tid))
3331 return false;
3332 if (test_opt2(inode->i_sb, JOURNAL_FAST_COMMIT))
3333 return !list_empty(&EXT4_I(inode)->i_fc_list);
3334 return true;
3335 }
3336
3337 /* Any metadata buffers to write? */
3338 if (!list_empty(&inode->i_mapping->private_list))
3339 return true;
3340 return inode->i_state & I_DIRTY_DATASYNC;
3341}
3342
3343static void ext4_set_iomap(struct inode *inode, struct iomap *iomap,
3344 struct ext4_map_blocks *map, loff_t offset,
3345 loff_t length)
3346{
3347 u8 blkbits = inode->i_blkbits;
3348
3349 /*
3350 * Writes that span EOF might trigger an I/O size update on completion,
3351 * so consider them to be dirty for the purpose of O_DSYNC, even if
3352 * there is no other metadata changes being made or are pending.
3353 */
3354 iomap->flags = 0;
3355 if (ext4_inode_datasync_dirty(inode) ||
3356 offset + length > i_size_read(inode))
3357 iomap->flags |= IOMAP_F_DIRTY;
3358
3359 if (map->m_flags & EXT4_MAP_NEW)
3360 iomap->flags |= IOMAP_F_NEW;
3361
3362 iomap->bdev = inode->i_sb->s_bdev;
3363 iomap->dax_dev = EXT4_SB(inode->i_sb)->s_daxdev;
3364 iomap->offset = (u64) map->m_lblk << blkbits;
3365 iomap->length = (u64) map->m_len << blkbits;
3366
3367 if ((map->m_flags & EXT4_MAP_MAPPED) &&
3368 !ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3369 iomap->flags |= IOMAP_F_MERGED;
3370
3371 /*
3372 * Flags passed to ext4_map_blocks() for direct I/O writes can result
3373 * in m_flags having both EXT4_MAP_MAPPED and EXT4_MAP_UNWRITTEN bits
3374 * set. In order for any allocated unwritten extents to be converted
3375 * into written extents correctly within the ->end_io() handler, we
3376 * need to ensure that the iomap->type is set appropriately. Hence, the
3377 * reason why we need to check whether the EXT4_MAP_UNWRITTEN bit has
3378 * been set first.
3379 */
3380 if (map->m_flags & EXT4_MAP_UNWRITTEN) {
3381 iomap->type = IOMAP_UNWRITTEN;
3382 iomap->addr = (u64) map->m_pblk << blkbits;
3383 } else if (map->m_flags & EXT4_MAP_MAPPED) {
3384 iomap->type = IOMAP_MAPPED;
3385 iomap->addr = (u64) map->m_pblk << blkbits;
3386 } else {
3387 iomap->type = IOMAP_HOLE;
3388 iomap->addr = IOMAP_NULL_ADDR;
3389 }
3390}
3391
3392static int ext4_iomap_alloc(struct inode *inode, struct ext4_map_blocks *map,
3393 unsigned int flags)
3394{
3395 handle_t *handle;
3396 u8 blkbits = inode->i_blkbits;
3397 int ret, dio_credits, m_flags = 0, retries = 0;
3398
3399 /*
3400 * Trim the mapping request to the maximum value that we can map at
3401 * once for direct I/O.
3402 */
3403 if (map->m_len > DIO_MAX_BLOCKS)
3404 map->m_len = DIO_MAX_BLOCKS;
3405 dio_credits = ext4_chunk_trans_blocks(inode, map->m_len);
3406
3407retry:
3408 /*
3409 * Either we allocate blocks and then don't get an unwritten extent, so
3410 * in that case we have reserved enough credits. Or, the blocks are
3411 * already allocated and unwritten. In that case, the extent conversion
3412 * fits into the credits as well.
3413 */
3414 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, dio_credits);
3415 if (IS_ERR(handle))
3416 return PTR_ERR(handle);
3417
3418 /*
3419 * DAX and direct I/O are the only two operations that are currently
3420 * supported with IOMAP_WRITE.
3421 */
3422 WARN_ON(!IS_DAX(inode) && !(flags & IOMAP_DIRECT));
3423 if (IS_DAX(inode))
3424 m_flags = EXT4_GET_BLOCKS_CREATE_ZERO;
3425 /*
3426 * We use i_size instead of i_disksize here because delalloc writeback
3427 * can complete at any point during the I/O and subsequently push the
3428 * i_disksize out to i_size. This could be beyond where direct I/O is
3429 * happening and thus expose allocated blocks to direct I/O reads.
3430 */
3431 else if (((loff_t)map->m_lblk << blkbits) >= i_size_read(inode))
3432 m_flags = EXT4_GET_BLOCKS_CREATE;
3433 else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3434 m_flags = EXT4_GET_BLOCKS_IO_CREATE_EXT;
3435
3436 ret = ext4_map_blocks(handle, inode, map, m_flags);
3437
3438 /*
3439 * We cannot fill holes in indirect tree based inodes as that could
3440 * expose stale data in the case of a crash. Use the magic error code
3441 * to fallback to buffered I/O.
3442 */
3443 if (!m_flags && !ret)
3444 ret = -ENOTBLK;
3445
3446 ext4_journal_stop(handle);
3447 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
3448 goto retry;
3449
3450 return ret;
3451}
3452
3453
3454static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
3455 unsigned flags, struct iomap *iomap, struct iomap *srcmap)
3456{
3457 int ret;
3458 struct ext4_map_blocks map;
3459 u8 blkbits = inode->i_blkbits;
3460
3461 if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK)
3462 return -EINVAL;
3463
3464 if (WARN_ON_ONCE(ext4_has_inline_data(inode)))
3465 return -ERANGE;
3466
3467 /*
3468 * Calculate the first and last logical blocks respectively.
3469 */
3470 map.m_lblk = offset >> blkbits;
3471 map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits,
3472 EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1;
3473
3474 if (flags & IOMAP_WRITE) {
3475 /*
3476 * We check here if the blocks are already allocated, then we
3477 * don't need to start a journal txn and we can directly return
3478 * the mapping information. This could boost performance
3479 * especially in multi-threaded overwrite requests.
3480 */
3481 if (offset + length <= i_size_read(inode)) {
3482 ret = ext4_map_blocks(NULL, inode, &map, 0);
3483 if (ret > 0 && (map.m_flags & EXT4_MAP_MAPPED))
3484 goto out;
3485 }
3486 ret = ext4_iomap_alloc(inode, &map, flags);
3487 } else {
3488 ret = ext4_map_blocks(NULL, inode, &map, 0);
3489 }
3490
3491 if (ret < 0)
3492 return ret;
3493out:
3494 ext4_set_iomap(inode, iomap, &map, offset, length);
3495
3496 return 0;
3497}
3498
3499static int ext4_iomap_overwrite_begin(struct inode *inode, loff_t offset,
3500 loff_t length, unsigned flags, struct iomap *iomap,
3501 struct iomap *srcmap)
3502{
3503 int ret;
3504
3505 /*
3506 * Even for writes we don't need to allocate blocks, so just pretend
3507 * we are reading to save overhead of starting a transaction.
3508 */
3509 flags &= ~IOMAP_WRITE;
3510 ret = ext4_iomap_begin(inode, offset, length, flags, iomap, srcmap);
3511 WARN_ON_ONCE(iomap->type != IOMAP_MAPPED);
3512 return ret;
3513}
3514
3515static int ext4_iomap_end(struct inode *inode, loff_t offset, loff_t length,
3516 ssize_t written, unsigned flags, struct iomap *iomap)
3517{
3518 /*
3519 * Check to see whether an error occurred while writing out the data to
3520 * the allocated blocks. If so, return the magic error code so that we
3521 * fallback to buffered I/O and attempt to complete the remainder of
3522 * the I/O. Any blocks that may have been allocated in preparation for
3523 * the direct I/O will be reused during buffered I/O.
3524 */
3525 if (flags & (IOMAP_WRITE | IOMAP_DIRECT) && written == 0)
3526 return -ENOTBLK;
3527
3528 return 0;
3529}
3530
3531const struct iomap_ops ext4_iomap_ops = {
3532 .iomap_begin = ext4_iomap_begin,
3533 .iomap_end = ext4_iomap_end,
3534};
3535
3536const struct iomap_ops ext4_iomap_overwrite_ops = {
3537 .iomap_begin = ext4_iomap_overwrite_begin,
3538 .iomap_end = ext4_iomap_end,
3539};
3540
3541static bool ext4_iomap_is_delalloc(struct inode *inode,
3542 struct ext4_map_blocks *map)
3543{
3544 struct extent_status es;
3545 ext4_lblk_t offset = 0, end = map->m_lblk + map->m_len - 1;
3546
3547 ext4_es_find_extent_range(inode, &ext4_es_is_delayed,
3548 map->m_lblk, end, &es);
3549
3550 if (!es.es_len || es.es_lblk > end)
3551 return false;
3552
3553 if (es.es_lblk > map->m_lblk) {
3554 map->m_len = es.es_lblk - map->m_lblk;
3555 return false;
3556 }
3557
3558 offset = map->m_lblk - es.es_lblk;
3559 map->m_len = es.es_len - offset;
3560
3561 return true;
3562}
3563
3564static int ext4_iomap_begin_report(struct inode *inode, loff_t offset,
3565 loff_t length, unsigned int flags,
3566 struct iomap *iomap, struct iomap *srcmap)
3567{
3568 int ret;
3569 bool delalloc = false;
3570 struct ext4_map_blocks map;
3571 u8 blkbits = inode->i_blkbits;
3572
3573 if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK)
3574 return -EINVAL;
3575
3576 if (ext4_has_inline_data(inode)) {
3577 ret = ext4_inline_data_iomap(inode, iomap);
3578 if (ret != -EAGAIN) {
3579 if (ret == 0 && offset >= iomap->length)
3580 ret = -ENOENT;
3581 return ret;
3582 }
3583 }
3584
3585 /*
3586 * Calculate the first and last logical block respectively.
3587 */
3588 map.m_lblk = offset >> blkbits;
3589 map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits,
3590 EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1;
3591
3592 /*
3593 * Fiemap callers may call for offset beyond s_bitmap_maxbytes.
3594 * So handle it here itself instead of querying ext4_map_blocks().
3595 * Since ext4_map_blocks() will warn about it and will return
3596 * -EIO error.
3597 */
3598 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
3599 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3600
3601 if (offset >= sbi->s_bitmap_maxbytes) {
3602 map.m_flags = 0;
3603 goto set_iomap;
3604 }
3605 }
3606
3607 ret = ext4_map_blocks(NULL, inode, &map, 0);
3608 if (ret < 0)
3609 return ret;
3610 if (ret == 0)
3611 delalloc = ext4_iomap_is_delalloc(inode, &map);
3612
3613set_iomap:
3614 ext4_set_iomap(inode, iomap, &map, offset, length);
3615 if (delalloc && iomap->type == IOMAP_HOLE)
3616 iomap->type = IOMAP_DELALLOC;
3617
3618 return 0;
3619}
3620
3621const struct iomap_ops ext4_iomap_report_ops = {
3622 .iomap_begin = ext4_iomap_begin_report,
3623};
3624
3625/*
3626 * Pages can be marked dirty completely asynchronously from ext4's journalling
3627 * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do
3628 * much here because ->set_page_dirty is called under VFS locks. The page is
3629 * not necessarily locked.
3630 *
3631 * We cannot just dirty the page and leave attached buffers clean, because the
3632 * buffers' dirty state is "definitive". We cannot just set the buffers dirty
3633 * or jbddirty because all the journalling code will explode.
3634 *
3635 * So what we do is to mark the page "pending dirty" and next time writepage
3636 * is called, propagate that into the buffers appropriately.
3637 */
3638static int ext4_journalled_set_page_dirty(struct page *page)
3639{
3640 SetPageChecked(page);
3641 return __set_page_dirty_nobuffers(page);
3642}
3643
3644static int ext4_set_page_dirty(struct page *page)
3645{
3646 WARN_ON_ONCE(!PageLocked(page) && !PageDirty(page));
3647 WARN_ON_ONCE(!page_has_buffers(page));
3648 return __set_page_dirty_buffers(page);
3649}
3650
3651static int ext4_iomap_swap_activate(struct swap_info_struct *sis,
3652 struct file *file, sector_t *span)
3653{
3654 return iomap_swapfile_activate(sis, file, span,
3655 &ext4_iomap_report_ops);
3656}
3657
3658static const struct address_space_operations ext4_aops = {
3659 .readpage = ext4_readpage,
3660 .readahead = ext4_readahead,
3661 .writepage = ext4_writepage,
3662 .writepages = ext4_writepages,
3663 .write_begin = ext4_write_begin,
3664 .write_end = ext4_write_end,
3665 .set_page_dirty = ext4_set_page_dirty,
3666 .bmap = ext4_bmap,
3667 .invalidatepage = ext4_invalidatepage,
3668 .releasepage = ext4_releasepage,
3669 .direct_IO = noop_direct_IO,
3670 .migratepage = buffer_migrate_page,
3671 .is_partially_uptodate = block_is_partially_uptodate,
3672 .error_remove_page = generic_error_remove_page,
3673 .swap_activate = ext4_iomap_swap_activate,
3674};
3675
3676static const struct address_space_operations ext4_journalled_aops = {
3677 .readpage = ext4_readpage,
3678 .readahead = ext4_readahead,
3679 .writepage = ext4_writepage,
3680 .writepages = ext4_writepages,
3681 .write_begin = ext4_write_begin,
3682 .write_end = ext4_journalled_write_end,
3683 .set_page_dirty = ext4_journalled_set_page_dirty,
3684 .bmap = ext4_bmap,
3685 .invalidatepage = ext4_journalled_invalidatepage,
3686 .releasepage = ext4_releasepage,
3687 .direct_IO = noop_direct_IO,
3688 .is_partially_uptodate = block_is_partially_uptodate,
3689 .error_remove_page = generic_error_remove_page,
3690 .swap_activate = ext4_iomap_swap_activate,
3691};
3692
3693static const struct address_space_operations ext4_da_aops = {
3694 .readpage = ext4_readpage,
3695 .readahead = ext4_readahead,
3696 .writepage = ext4_writepage,
3697 .writepages = ext4_writepages,
3698 .write_begin = ext4_da_write_begin,
3699 .write_end = ext4_da_write_end,
3700 .set_page_dirty = ext4_set_page_dirty,
3701 .bmap = ext4_bmap,
3702 .invalidatepage = ext4_invalidatepage,
3703 .releasepage = ext4_releasepage,
3704 .direct_IO = noop_direct_IO,
3705 .migratepage = buffer_migrate_page,
3706 .is_partially_uptodate = block_is_partially_uptodate,
3707 .error_remove_page = generic_error_remove_page,
3708 .swap_activate = ext4_iomap_swap_activate,
3709};
3710
3711static const struct address_space_operations ext4_dax_aops = {
3712 .writepages = ext4_dax_writepages,
3713 .direct_IO = noop_direct_IO,
3714 .set_page_dirty = __set_page_dirty_no_writeback,
3715 .bmap = ext4_bmap,
3716 .invalidatepage = noop_invalidatepage,
3717 .swap_activate = ext4_iomap_swap_activate,
3718};
3719
3720void ext4_set_aops(struct inode *inode)
3721{
3722 switch (ext4_inode_journal_mode(inode)) {
3723 case EXT4_INODE_ORDERED_DATA_MODE:
3724 case EXT4_INODE_WRITEBACK_DATA_MODE:
3725 break;
3726 case EXT4_INODE_JOURNAL_DATA_MODE:
3727 inode->i_mapping->a_ops = &ext4_journalled_aops;
3728 return;
3729 default:
3730 BUG();
3731 }
3732 if (IS_DAX(inode))
3733 inode->i_mapping->a_ops = &ext4_dax_aops;
3734 else if (test_opt(inode->i_sb, DELALLOC))
3735 inode->i_mapping->a_ops = &ext4_da_aops;
3736 else
3737 inode->i_mapping->a_ops = &ext4_aops;
3738}
3739
3740static int __ext4_block_zero_page_range(handle_t *handle,
3741 struct address_space *mapping, loff_t from, loff_t length)
3742{
3743 ext4_fsblk_t index = from >> PAGE_SHIFT;
3744 unsigned offset = from & (PAGE_SIZE-1);
3745 unsigned blocksize, pos;
3746 ext4_lblk_t iblock;
3747 struct inode *inode = mapping->host;
3748 struct buffer_head *bh;
3749 struct page *page;
3750 int err = 0;
3751
3752 page = find_or_create_page(mapping, from >> PAGE_SHIFT,
3753 mapping_gfp_constraint(mapping, ~__GFP_FS));
3754 if (!page)
3755 return -ENOMEM;
3756
3757 blocksize = inode->i_sb->s_blocksize;
3758
3759 iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
3760
3761 if (!page_has_buffers(page))
3762 create_empty_buffers(page, blocksize, 0);
3763
3764 /* Find the buffer that contains "offset" */
3765 bh = page_buffers(page);
3766 pos = blocksize;
3767 while (offset >= pos) {
3768 bh = bh->b_this_page;
3769 iblock++;
3770 pos += blocksize;
3771 }
3772 if (buffer_freed(bh)) {
3773 BUFFER_TRACE(bh, "freed: skip");
3774 goto unlock;
3775 }
3776 if (!buffer_mapped(bh)) {
3777 BUFFER_TRACE(bh, "unmapped");
3778 ext4_get_block(inode, iblock, bh, 0);
3779 /* unmapped? It's a hole - nothing to do */
3780 if (!buffer_mapped(bh)) {
3781 BUFFER_TRACE(bh, "still unmapped");
3782 goto unlock;
3783 }
3784 }
3785
3786 /* Ok, it's mapped. Make sure it's up-to-date */
3787 if (PageUptodate(page))
3788 set_buffer_uptodate(bh);
3789
3790 if (!buffer_uptodate(bh)) {
3791 err = ext4_read_bh_lock(bh, 0, true);
3792 if (err)
3793 goto unlock;
3794 if (fscrypt_inode_uses_fs_layer_crypto(inode)) {
3795 /* We expect the key to be set. */
3796 BUG_ON(!fscrypt_has_encryption_key(inode));
3797 err = fscrypt_decrypt_pagecache_blocks(page, blocksize,
3798 bh_offset(bh));
3799 if (err) {
3800 clear_buffer_uptodate(bh);
3801 goto unlock;
3802 }
3803 }
3804 }
3805 if (ext4_should_journal_data(inode)) {
3806 BUFFER_TRACE(bh, "get write access");
3807 err = ext4_journal_get_write_access(handle, bh);
3808 if (err)
3809 goto unlock;
3810 }
3811 zero_user(page, offset, length);
3812 BUFFER_TRACE(bh, "zeroed end of block");
3813
3814 if (ext4_should_journal_data(inode)) {
3815 err = ext4_handle_dirty_metadata(handle, inode, bh);
3816 } else {
3817 err = 0;
3818 mark_buffer_dirty(bh);
3819 if (ext4_should_order_data(inode))
3820 err = ext4_jbd2_inode_add_write(handle, inode, from,
3821 length);
3822 }
3823
3824unlock:
3825 unlock_page(page);
3826 put_page(page);
3827 return err;
3828}
3829
3830/*
3831 * ext4_block_zero_page_range() zeros out a mapping of length 'length'
3832 * starting from file offset 'from'. The range to be zero'd must
3833 * be contained with in one block. If the specified range exceeds
3834 * the end of the block it will be shortened to end of the block
3835 * that corresponds to 'from'
3836 */
3837static int ext4_block_zero_page_range(handle_t *handle,
3838 struct address_space *mapping, loff_t from, loff_t length)
3839{
3840 struct inode *inode = mapping->host;
3841 unsigned offset = from & (PAGE_SIZE-1);
3842 unsigned blocksize = inode->i_sb->s_blocksize;
3843 unsigned max = blocksize - (offset & (blocksize - 1));
3844
3845 /*
3846 * correct length if it does not fall between
3847 * 'from' and the end of the block
3848 */
3849 if (length > max || length < 0)
3850 length = max;
3851
3852 if (IS_DAX(inode)) {
3853 return iomap_zero_range(inode, from, length, NULL,
3854 &ext4_iomap_ops);
3855 }
3856 return __ext4_block_zero_page_range(handle, mapping, from, length);
3857}
3858
3859/*
3860 * ext4_block_truncate_page() zeroes out a mapping from file offset `from'
3861 * up to the end of the block which corresponds to `from'.
3862 * This required during truncate. We need to physically zero the tail end
3863 * of that block so it doesn't yield old data if the file is later grown.
3864 */
3865static int ext4_block_truncate_page(handle_t *handle,
3866 struct address_space *mapping, loff_t from)
3867{
3868 unsigned offset = from & (PAGE_SIZE-1);
3869 unsigned length;
3870 unsigned blocksize;
3871 struct inode *inode = mapping->host;
3872
3873 /* If we are processing an encrypted inode during orphan list handling */
3874 if (IS_ENCRYPTED(inode) && !fscrypt_has_encryption_key(inode))
3875 return 0;
3876
3877 blocksize = inode->i_sb->s_blocksize;
3878 length = blocksize - (offset & (blocksize - 1));
3879
3880 return ext4_block_zero_page_range(handle, mapping, from, length);
3881}
3882
3883int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode,
3884 loff_t lstart, loff_t length)
3885{
3886 struct super_block *sb = inode->i_sb;
3887 struct address_space *mapping = inode->i_mapping;
3888 unsigned partial_start, partial_end;
3889 ext4_fsblk_t start, end;
3890 loff_t byte_end = (lstart + length - 1);
3891 int err = 0;
3892
3893 partial_start = lstart & (sb->s_blocksize - 1);
3894 partial_end = byte_end & (sb->s_blocksize - 1);
3895
3896 start = lstart >> sb->s_blocksize_bits;
3897 end = byte_end >> sb->s_blocksize_bits;
3898
3899 /* Handle partial zero within the single block */
3900 if (start == end &&
3901 (partial_start || (partial_end != sb->s_blocksize - 1))) {
3902 err = ext4_block_zero_page_range(handle, mapping,
3903 lstart, length);
3904 return err;
3905 }
3906 /* Handle partial zero out on the start of the range */
3907 if (partial_start) {
3908 err = ext4_block_zero_page_range(handle, mapping,
3909 lstart, sb->s_blocksize);
3910 if (err)
3911 return err;
3912 }
3913 /* Handle partial zero out on the end of the range */
3914 if (partial_end != sb->s_blocksize - 1)
3915 err = ext4_block_zero_page_range(handle, mapping,
3916 byte_end - partial_end,
3917 partial_end + 1);
3918 return err;
3919}
3920
3921int ext4_can_truncate(struct inode *inode)
3922{
3923 if (S_ISREG(inode->i_mode))
3924 return 1;
3925 if (S_ISDIR(inode->i_mode))
3926 return 1;
3927 if (S_ISLNK(inode->i_mode))
3928 return !ext4_inode_is_fast_symlink(inode);
3929 return 0;
3930}
3931
3932/*
3933 * We have to make sure i_disksize gets properly updated before we truncate
3934 * page cache due to hole punching or zero range. Otherwise i_disksize update
3935 * can get lost as it may have been postponed to submission of writeback but
3936 * that will never happen after we truncate page cache.
3937 */
3938int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
3939 loff_t len)
3940{
3941 handle_t *handle;
3942 int ret;
3943
3944 loff_t size = i_size_read(inode);
3945
3946 WARN_ON(!inode_is_locked(inode));
3947 if (offset > size || offset + len < size)
3948 return 0;
3949
3950 if (EXT4_I(inode)->i_disksize >= size)
3951 return 0;
3952
3953 handle = ext4_journal_start(inode, EXT4_HT_MISC, 1);
3954 if (IS_ERR(handle))
3955 return PTR_ERR(handle);
3956 ext4_update_i_disksize(inode, size);
3957 ret = ext4_mark_inode_dirty(handle, inode);
3958 ext4_journal_stop(handle);
3959
3960 return ret;
3961}
3962
3963static void ext4_wait_dax_page(struct ext4_inode_info *ei)
3964{
3965 up_write(&ei->i_mmap_sem);
3966 schedule();
3967 down_write(&ei->i_mmap_sem);
3968}
3969
3970int ext4_break_layouts(struct inode *inode)
3971{
3972 struct ext4_inode_info *ei = EXT4_I(inode);
3973 struct page *page;
3974 int error;
3975
3976 if (WARN_ON_ONCE(!rwsem_is_locked(&ei->i_mmap_sem)))
3977 return -EINVAL;
3978
3979 do {
3980 page = dax_layout_busy_page(inode->i_mapping);
3981 if (!page)
3982 return 0;
3983
3984 error = ___wait_var_event(&page->_refcount,
3985 atomic_read(&page->_refcount) == 1,
3986 TASK_INTERRUPTIBLE, 0, 0,
3987 ext4_wait_dax_page(ei));
3988 } while (error == 0);
3989
3990 return error;
3991}
3992
3993/*
3994 * ext4_punch_hole: punches a hole in a file by releasing the blocks
3995 * associated with the given offset and length
3996 *
3997 * @inode: File inode
3998 * @offset: The offset where the hole will begin
3999 * @len: The length of the hole
4000 *
4001 * Returns: 0 on success or negative on failure
4002 */
4003
4004int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
4005{
4006 struct super_block *sb = inode->i_sb;
4007 ext4_lblk_t first_block, stop_block;
4008 struct address_space *mapping = inode->i_mapping;
4009 loff_t first_block_offset, last_block_offset;
4010 handle_t *handle;
4011 unsigned int credits;
4012 int ret = 0, ret2 = 0;
4013
4014 trace_ext4_punch_hole(inode, offset, length, 0);
4015
4016 ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
4017 if (ext4_has_inline_data(inode)) {
4018 down_write(&EXT4_I(inode)->i_mmap_sem);
4019 ret = ext4_convert_inline_data(inode);
4020 up_write(&EXT4_I(inode)->i_mmap_sem);
4021 if (ret)
4022 return ret;
4023 }
4024
4025 /*
4026 * Write out all dirty pages to avoid race conditions
4027 * Then release them.
4028 */
4029 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
4030 ret = filemap_write_and_wait_range(mapping, offset,
4031 offset + length - 1);
4032 if (ret)
4033 return ret;
4034 }
4035
4036 inode_lock(inode);
4037
4038 /* No need to punch hole beyond i_size */
4039 if (offset >= inode->i_size)
4040 goto out_mutex;
4041
4042 /*
4043 * If the hole extends beyond i_size, set the hole
4044 * to end after the page that contains i_size
4045 */
4046 if (offset + length > inode->i_size) {
4047 length = inode->i_size +
4048 PAGE_SIZE - (inode->i_size & (PAGE_SIZE - 1)) -
4049 offset;
4050 }
4051
4052 if (offset & (sb->s_blocksize - 1) ||
4053 (offset + length) & (sb->s_blocksize - 1)) {
4054 /*
4055 * Attach jinode to inode for jbd2 if we do any zeroing of
4056 * partial block
4057 */
4058 ret = ext4_inode_attach_jinode(inode);
4059 if (ret < 0)
4060 goto out_mutex;
4061
4062 }
4063
4064 /* Wait all existing dio workers, newcomers will block on i_mutex */
4065 inode_dio_wait(inode);
4066
4067 /*
4068 * Prevent page faults from reinstantiating pages we have released from
4069 * page cache.
4070 */
4071 down_write(&EXT4_I(inode)->i_mmap_sem);
4072
4073 ret = ext4_break_layouts(inode);
4074 if (ret)
4075 goto out_dio;
4076
4077 first_block_offset = round_up(offset, sb->s_blocksize);
4078 last_block_offset = round_down((offset + length), sb->s_blocksize) - 1;
4079
4080 /* Now release the pages and zero block aligned part of pages*/
4081 if (last_block_offset > first_block_offset) {
4082 ret = ext4_update_disksize_before_punch(inode, offset, length);
4083 if (ret)
4084 goto out_dio;
4085 truncate_pagecache_range(inode, first_block_offset,
4086 last_block_offset);
4087 }
4088
4089 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4090 credits = ext4_writepage_trans_blocks(inode);
4091 else
4092 credits = ext4_blocks_for_truncate(inode);
4093 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
4094 if (IS_ERR(handle)) {
4095 ret = PTR_ERR(handle);
4096 ext4_std_error(sb, ret);
4097 goto out_dio;
4098 }
4099
4100 ret = ext4_zero_partial_blocks(handle, inode, offset,
4101 length);
4102 if (ret)
4103 goto out_stop;
4104
4105 first_block = (offset + sb->s_blocksize - 1) >>
4106 EXT4_BLOCK_SIZE_BITS(sb);
4107 stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
4108
4109 /* If there are blocks to remove, do it */
4110 if (stop_block > first_block) {
4111
4112 down_write(&EXT4_I(inode)->i_data_sem);
4113 ext4_discard_preallocations(inode, 0);
4114
4115 ret = ext4_es_remove_extent(inode, first_block,
4116 stop_block - first_block);
4117 if (ret) {
4118 up_write(&EXT4_I(inode)->i_data_sem);
4119 goto out_stop;
4120 }
4121
4122 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4123 ret = ext4_ext_remove_space(inode, first_block,
4124 stop_block - 1);
4125 else
4126 ret = ext4_ind_remove_space(handle, inode, first_block,
4127 stop_block);
4128
4129 up_write(&EXT4_I(inode)->i_data_sem);
4130 }
4131 ext4_fc_track_range(handle, inode, first_block, stop_block);
4132 if (IS_SYNC(inode))
4133 ext4_handle_sync(handle);
4134
4135 inode->i_mtime = inode->i_ctime = current_time(inode);
4136 ret2 = ext4_mark_inode_dirty(handle, inode);
4137 if (unlikely(ret2))
4138 ret = ret2;
4139 if (ret >= 0)
4140 ext4_update_inode_fsync_trans(handle, inode, 1);
4141out_stop:
4142 ext4_journal_stop(handle);
4143out_dio:
4144 up_write(&EXT4_I(inode)->i_mmap_sem);
4145out_mutex:
4146 inode_unlock(inode);
4147 return ret;
4148}
4149
4150int ext4_inode_attach_jinode(struct inode *inode)
4151{
4152 struct ext4_inode_info *ei = EXT4_I(inode);
4153 struct jbd2_inode *jinode;
4154
4155 if (ei->jinode || !EXT4_SB(inode->i_sb)->s_journal)
4156 return 0;
4157
4158 jinode = jbd2_alloc_inode(GFP_KERNEL);
4159 spin_lock(&inode->i_lock);
4160 if (!ei->jinode) {
4161 if (!jinode) {
4162 spin_unlock(&inode->i_lock);
4163 return -ENOMEM;
4164 }
4165 ei->jinode = jinode;
4166 jbd2_journal_init_jbd_inode(ei->jinode, inode);
4167 jinode = NULL;
4168 }
4169 spin_unlock(&inode->i_lock);
4170 if (unlikely(jinode != NULL))
4171 jbd2_free_inode(jinode);
4172 return 0;
4173}
4174
4175/*
4176 * ext4_truncate()
4177 *
4178 * We block out ext4_get_block() block instantiations across the entire
4179 * transaction, and VFS/VM ensures that ext4_truncate() cannot run
4180 * simultaneously on behalf of the same inode.
4181 *
4182 * As we work through the truncate and commit bits of it to the journal there
4183 * is one core, guiding principle: the file's tree must always be consistent on
4184 * disk. We must be able to restart the truncate after a crash.
4185 *
4186 * The file's tree may be transiently inconsistent in memory (although it
4187 * probably isn't), but whenever we close off and commit a journal transaction,
4188 * the contents of (the filesystem + the journal) must be consistent and
4189 * restartable. It's pretty simple, really: bottom up, right to left (although
4190 * left-to-right works OK too).
4191 *
4192 * Note that at recovery time, journal replay occurs *before* the restart of
4193 * truncate against the orphan inode list.
4194 *
4195 * The committed inode has the new, desired i_size (which is the same as
4196 * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see
4197 * that this inode's truncate did not complete and it will again call
4198 * ext4_truncate() to have another go. So there will be instantiated blocks
4199 * to the right of the truncation point in a crashed ext4 filesystem. But
4200 * that's fine - as long as they are linked from the inode, the post-crash
4201 * ext4_truncate() run will find them and release them.
4202 */
4203int ext4_truncate(struct inode *inode)
4204{
4205 struct ext4_inode_info *ei = EXT4_I(inode);
4206 unsigned int credits;
4207 int err = 0, err2;
4208 handle_t *handle;
4209 struct address_space *mapping = inode->i_mapping;
4210
4211 /*
4212 * There is a possibility that we're either freeing the inode
4213 * or it's a completely new inode. In those cases we might not
4214 * have i_mutex locked because it's not necessary.
4215 */
4216 if (!(inode->i_state & (I_NEW|I_FREEING)))
4217 WARN_ON(!inode_is_locked(inode));
4218 trace_ext4_truncate_enter(inode);
4219
4220 if (!ext4_can_truncate(inode))
4221 goto out_trace;
4222
4223 if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
4224 ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
4225
4226 if (ext4_has_inline_data(inode)) {
4227 int has_inline = 1;
4228
4229 err = ext4_inline_data_truncate(inode, &has_inline);
4230 if (err || has_inline)
4231 goto out_trace;
4232 }
4233
4234 /* If we zero-out tail of the page, we have to create jinode for jbd2 */
4235 if (inode->i_size & (inode->i_sb->s_blocksize - 1)) {
4236 if (ext4_inode_attach_jinode(inode) < 0)
4237 goto out_trace;
4238 }
4239
4240 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4241 credits = ext4_writepage_trans_blocks(inode);
4242 else
4243 credits = ext4_blocks_for_truncate(inode);
4244
4245 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
4246 if (IS_ERR(handle)) {
4247 err = PTR_ERR(handle);
4248 goto out_trace;
4249 }
4250
4251 if (inode->i_size & (inode->i_sb->s_blocksize - 1))
4252 ext4_block_truncate_page(handle, mapping, inode->i_size);
4253
4254 /*
4255 * We add the inode to the orphan list, so that if this
4256 * truncate spans multiple transactions, and we crash, we will
4257 * resume the truncate when the filesystem recovers. It also
4258 * marks the inode dirty, to catch the new size.
4259 *
4260 * Implication: the file must always be in a sane, consistent
4261 * truncatable state while each transaction commits.
4262 */
4263 err = ext4_orphan_add(handle, inode);
4264 if (err)
4265 goto out_stop;
4266
4267 down_write(&EXT4_I(inode)->i_data_sem);
4268
4269 ext4_discard_preallocations(inode, 0);
4270
4271 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4272 err = ext4_ext_truncate(handle, inode);
4273 else
4274 ext4_ind_truncate(handle, inode);
4275
4276 up_write(&ei->i_data_sem);
4277 if (err)
4278 goto out_stop;
4279
4280 if (IS_SYNC(inode))
4281 ext4_handle_sync(handle);
4282
4283out_stop:
4284 /*
4285 * If this was a simple ftruncate() and the file will remain alive,
4286 * then we need to clear up the orphan record which we created above.
4287 * However, if this was a real unlink then we were called by
4288 * ext4_evict_inode(), and we allow that function to clean up the
4289 * orphan info for us.
4290 */
4291 if (inode->i_nlink)
4292 ext4_orphan_del(handle, inode);
4293
4294 inode->i_mtime = inode->i_ctime = current_time(inode);
4295 err2 = ext4_mark_inode_dirty(handle, inode);
4296 if (unlikely(err2 && !err))
4297 err = err2;
4298 ext4_journal_stop(handle);
4299
4300out_trace:
4301 trace_ext4_truncate_exit(inode);
4302 return err;
4303}
4304
4305/*
4306 * ext4_get_inode_loc returns with an extra refcount against the inode's
4307 * underlying buffer_head on success. If 'in_mem' is true, we have all
4308 * data in memory that is needed to recreate the on-disk version of this
4309 * inode.
4310 */
4311static int __ext4_get_inode_loc(struct super_block *sb, unsigned long ino,
4312 struct ext4_iloc *iloc, int in_mem,
4313 ext4_fsblk_t *ret_block)
4314{
4315 struct ext4_group_desc *gdp;
4316 struct buffer_head *bh;
4317 ext4_fsblk_t block;
4318 struct blk_plug plug;
4319 int inodes_per_block, inode_offset;
4320
4321 iloc->bh = NULL;
4322 if (ino < EXT4_ROOT_INO ||
4323 ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))
4324 return -EFSCORRUPTED;
4325
4326 iloc->block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
4327 gdp = ext4_get_group_desc(sb, iloc->block_group, NULL);
4328 if (!gdp)
4329 return -EIO;
4330
4331 /*
4332 * Figure out the offset within the block group inode table
4333 */
4334 inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
4335 inode_offset = ((ino - 1) %
4336 EXT4_INODES_PER_GROUP(sb));
4337 block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block);
4338 iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb);
4339
4340 bh = sb_getblk(sb, block);
4341 if (unlikely(!bh))
4342 return -ENOMEM;
4343 if (ext4_simulate_fail(sb, EXT4_SIM_INODE_EIO))
4344 goto simulate_eio;
4345 if (!buffer_uptodate(bh)) {
4346 lock_buffer(bh);
4347
4348 if (ext4_buffer_uptodate(bh)) {
4349 /* someone brought it uptodate while we waited */
4350 unlock_buffer(bh);
4351 goto has_buffer;
4352 }
4353
4354 /*
4355 * If we have all information of the inode in memory and this
4356 * is the only valid inode in the block, we need not read the
4357 * block.
4358 */
4359 if (in_mem) {
4360 struct buffer_head *bitmap_bh;
4361 int i, start;
4362
4363 start = inode_offset & ~(inodes_per_block - 1);
4364
4365 /* Is the inode bitmap in cache? */
4366 bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp));
4367 if (unlikely(!bitmap_bh))
4368 goto make_io;
4369
4370 /*
4371 * If the inode bitmap isn't in cache then the
4372 * optimisation may end up performing two reads instead
4373 * of one, so skip it.
4374 */
4375 if (!buffer_uptodate(bitmap_bh)) {
4376 brelse(bitmap_bh);
4377 goto make_io;
4378 }
4379 for (i = start; i < start + inodes_per_block; i++) {
4380 if (i == inode_offset)
4381 continue;
4382 if (ext4_test_bit(i, bitmap_bh->b_data))
4383 break;
4384 }
4385 brelse(bitmap_bh);
4386 if (i == start + inodes_per_block) {
4387 /* all other inodes are free, so skip I/O */
4388 memset(bh->b_data, 0, bh->b_size);
4389 set_buffer_uptodate(bh);
4390 unlock_buffer(bh);
4391 goto has_buffer;
4392 }
4393 }
4394
4395make_io:
4396 /*
4397 * If we need to do any I/O, try to pre-readahead extra
4398 * blocks from the inode table.
4399 */
4400 blk_start_plug(&plug);
4401 if (EXT4_SB(sb)->s_inode_readahead_blks) {
4402 ext4_fsblk_t b, end, table;
4403 unsigned num;
4404 __u32 ra_blks = EXT4_SB(sb)->s_inode_readahead_blks;
4405
4406 table = ext4_inode_table(sb, gdp);
4407 /* s_inode_readahead_blks is always a power of 2 */
4408 b = block & ~((ext4_fsblk_t) ra_blks - 1);
4409 if (table > b)
4410 b = table;
4411 end = b + ra_blks;
4412 num = EXT4_INODES_PER_GROUP(sb);
4413 if (ext4_has_group_desc_csum(sb))
4414 num -= ext4_itable_unused_count(sb, gdp);
4415 table += num / inodes_per_block;
4416 if (end > table)
4417 end = table;
4418 while (b <= end)
4419 ext4_sb_breadahead_unmovable(sb, b++);
4420 }
4421
4422 /*
4423 * There are other valid inodes in the buffer, this inode
4424 * has in-inode xattrs, or we don't have this inode in memory.
4425 * Read the block from disk.
4426 */
4427 trace_ext4_load_inode(sb, ino);
4428 ext4_read_bh_nowait(bh, REQ_META | REQ_PRIO, NULL);
4429 blk_finish_plug(&plug);
4430 wait_on_buffer(bh);
4431 if (!buffer_uptodate(bh)) {
4432 simulate_eio:
4433 if (ret_block)
4434 *ret_block = block;
4435 brelse(bh);
4436 return -EIO;
4437 }
4438 }
4439has_buffer:
4440 iloc->bh = bh;
4441 return 0;
4442}
4443
4444static int __ext4_get_inode_loc_noinmem(struct inode *inode,
4445 struct ext4_iloc *iloc)
4446{
4447 ext4_fsblk_t err_blk;
4448 int ret;
4449
4450 ret = __ext4_get_inode_loc(inode->i_sb, inode->i_ino, iloc, 0,
4451 &err_blk);
4452
4453 if (ret == -EIO)
4454 ext4_error_inode_block(inode, err_blk, EIO,
4455 "unable to read itable block");
4456
4457 return ret;
4458}
4459
4460int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
4461{
4462 ext4_fsblk_t err_blk;
4463 int ret;
4464
4465 /* We have all inode data except xattrs in memory here. */
4466 ret = __ext4_get_inode_loc(inode->i_sb, inode->i_ino, iloc,
4467 !ext4_test_inode_state(inode, EXT4_STATE_XATTR), &err_blk);
4468
4469 if (ret == -EIO)
4470 ext4_error_inode_block(inode, err_blk, EIO,
4471 "unable to read itable block");
4472
4473 return ret;
4474}
4475
4476
4477int ext4_get_fc_inode_loc(struct super_block *sb, unsigned long ino,
4478 struct ext4_iloc *iloc)
4479{
4480 return __ext4_get_inode_loc(sb, ino, iloc, 0, NULL);
4481}
4482
4483static bool ext4_should_enable_dax(struct inode *inode)
4484{
4485 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4486
4487 if (test_opt2(inode->i_sb, DAX_NEVER))
4488 return false;
4489 if (!S_ISREG(inode->i_mode))
4490 return false;
4491 if (ext4_should_journal_data(inode))
4492 return false;
4493 if (ext4_has_inline_data(inode))
4494 return false;
4495 if (ext4_test_inode_flag(inode, EXT4_INODE_ENCRYPT))
4496 return false;
4497 if (ext4_test_inode_flag(inode, EXT4_INODE_VERITY))
4498 return false;
4499 if (!test_bit(EXT4_FLAGS_BDEV_IS_DAX, &sbi->s_ext4_flags))
4500 return false;
4501 if (test_opt(inode->i_sb, DAX_ALWAYS))
4502 return true;
4503
4504 return ext4_test_inode_flag(inode, EXT4_INODE_DAX);
4505}
4506
4507void ext4_set_inode_flags(struct inode *inode, bool init)
4508{
4509 unsigned int flags = EXT4_I(inode)->i_flags;
4510 unsigned int new_fl = 0;
4511
4512 WARN_ON_ONCE(IS_DAX(inode) && init);
4513
4514 if (flags & EXT4_SYNC_FL)
4515 new_fl |= S_SYNC;
4516 if (flags & EXT4_APPEND_FL)
4517 new_fl |= S_APPEND;
4518 if (flags & EXT4_IMMUTABLE_FL)
4519 new_fl |= S_IMMUTABLE;
4520 if (flags & EXT4_NOATIME_FL)
4521 new_fl |= S_NOATIME;
4522 if (flags & EXT4_DIRSYNC_FL)
4523 new_fl |= S_DIRSYNC;
4524
4525 /* Because of the way inode_set_flags() works we must preserve S_DAX
4526 * here if already set. */
4527 new_fl |= (inode->i_flags & S_DAX);
4528 if (init && ext4_should_enable_dax(inode))
4529 new_fl |= S_DAX;
4530
4531 if (flags & EXT4_ENCRYPT_FL)
4532 new_fl |= S_ENCRYPTED;
4533 if (flags & EXT4_CASEFOLD_FL)
4534 new_fl |= S_CASEFOLD;
4535 if (flags & EXT4_VERITY_FL)
4536 new_fl |= S_VERITY;
4537 inode_set_flags(inode, new_fl,
4538 S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_DAX|
4539 S_ENCRYPTED|S_CASEFOLD|S_VERITY);
4540}
4541
4542static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
4543 struct ext4_inode_info *ei)
4544{
4545 blkcnt_t i_blocks ;
4546 struct inode *inode = &(ei->vfs_inode);
4547 struct super_block *sb = inode->i_sb;
4548
4549 if (ext4_has_feature_huge_file(sb)) {
4550 /* we are using combined 48 bit field */
4551 i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 |
4552 le32_to_cpu(raw_inode->i_blocks_lo);
4553 if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) {
4554 /* i_blocks represent file system block size */
4555 return i_blocks << (inode->i_blkbits - 9);
4556 } else {
4557 return i_blocks;
4558 }
4559 } else {
4560 return le32_to_cpu(raw_inode->i_blocks_lo);
4561 }
4562}
4563
4564static inline int ext4_iget_extra_inode(struct inode *inode,
4565 struct ext4_inode *raw_inode,
4566 struct ext4_inode_info *ei)
4567{
4568 __le32 *magic = (void *)raw_inode +
4569 EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize;
4570
4571 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize + sizeof(__le32) <=
4572 EXT4_INODE_SIZE(inode->i_sb) &&
4573 *magic == cpu_to_le32(EXT4_XATTR_MAGIC)) {
4574 ext4_set_inode_state(inode, EXT4_STATE_XATTR);
4575 return ext4_find_inline_data_nolock(inode);
4576 } else
4577 EXT4_I(inode)->i_inline_off = 0;
4578 return 0;
4579}
4580
4581int ext4_get_projid(struct inode *inode, kprojid_t *projid)
4582{
4583 if (!ext4_has_feature_project(inode->i_sb))
4584 return -EOPNOTSUPP;
4585 *projid = EXT4_I(inode)->i_projid;
4586 return 0;
4587}
4588
4589/*
4590 * ext4 has self-managed i_version for ea inodes, it stores the lower 32bit of
4591 * refcount in i_version, so use raw values if inode has EXT4_EA_INODE_FL flag
4592 * set.
4593 */
4594static inline void ext4_inode_set_iversion_queried(struct inode *inode, u64 val)
4595{
4596 if (unlikely(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
4597 inode_set_iversion_raw(inode, val);
4598 else
4599 inode_set_iversion_queried(inode, val);
4600}
4601static inline u64 ext4_inode_peek_iversion(const struct inode *inode)
4602{
4603 if (unlikely(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
4604 return inode_peek_iversion_raw(inode);
4605 else
4606 return inode_peek_iversion(inode);
4607}
4608
4609struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
4610 ext4_iget_flags flags, const char *function,
4611 unsigned int line)
4612{
4613 struct ext4_iloc iloc;
4614 struct ext4_inode *raw_inode;
4615 struct ext4_inode_info *ei;
4616 struct inode *inode;
4617 journal_t *journal = EXT4_SB(sb)->s_journal;
4618 long ret;
4619 loff_t size;
4620 int block;
4621 uid_t i_uid;
4622 gid_t i_gid;
4623 projid_t i_projid;
4624
4625 if ((!(flags & EXT4_IGET_SPECIAL) &&
4626 (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)) ||
4627 (ino < EXT4_ROOT_INO) ||
4628 (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))) {
4629 if (flags & EXT4_IGET_HANDLE)
4630 return ERR_PTR(-ESTALE);
4631 __ext4_error(sb, function, line, false, EFSCORRUPTED, 0,
4632 "inode #%lu: comm %s: iget: illegal inode #",
4633 ino, current->comm);
4634 return ERR_PTR(-EFSCORRUPTED);
4635 }
4636
4637 inode = iget_locked(sb, ino);
4638 if (!inode)
4639 return ERR_PTR(-ENOMEM);
4640 if (!(inode->i_state & I_NEW))
4641 return inode;
4642
4643 ei = EXT4_I(inode);
4644 iloc.bh = NULL;
4645
4646 ret = __ext4_get_inode_loc_noinmem(inode, &iloc);
4647 if (ret < 0)
4648 goto bad_inode;
4649 raw_inode = ext4_raw_inode(&iloc);
4650
4651 if ((ino == EXT4_ROOT_INO) && (raw_inode->i_links_count == 0)) {
4652 ext4_error_inode(inode, function, line, 0,
4653 "iget: root inode unallocated");
4654 ret = -EFSCORRUPTED;
4655 goto bad_inode;
4656 }
4657
4658 if ((flags & EXT4_IGET_HANDLE) &&
4659 (raw_inode->i_links_count == 0) && (raw_inode->i_mode == 0)) {
4660 ret = -ESTALE;
4661 goto bad_inode;
4662 }
4663
4664 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4665 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
4666 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
4667 EXT4_INODE_SIZE(inode->i_sb) ||
4668 (ei->i_extra_isize & 3)) {
4669 ext4_error_inode(inode, function, line, 0,
4670 "iget: bad extra_isize %u "
4671 "(inode size %u)",
4672 ei->i_extra_isize,
4673 EXT4_INODE_SIZE(inode->i_sb));
4674 ret = -EFSCORRUPTED;
4675 goto bad_inode;
4676 }
4677 } else
4678 ei->i_extra_isize = 0;
4679
4680 /* Precompute checksum seed for inode metadata */
4681 if (ext4_has_metadata_csum(sb)) {
4682 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4683 __u32 csum;
4684 __le32 inum = cpu_to_le32(inode->i_ino);
4685 __le32 gen = raw_inode->i_generation;
4686 csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
4687 sizeof(inum));
4688 ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
4689 sizeof(gen));
4690 }
4691
4692 if ((!ext4_inode_csum_verify(inode, raw_inode, ei) ||
4693 ext4_simulate_fail(sb, EXT4_SIM_INODE_CRC)) &&
4694 (!(EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY))) {
4695 ext4_error_inode_err(inode, function, line, 0,
4696 EFSBADCRC, "iget: checksum invalid");
4697 ret = -EFSBADCRC;
4698 goto bad_inode;
4699 }
4700
4701 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
4702 i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
4703 i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
4704 if (ext4_has_feature_project(sb) &&
4705 EXT4_INODE_SIZE(sb) > EXT4_GOOD_OLD_INODE_SIZE &&
4706 EXT4_FITS_IN_INODE(raw_inode, ei, i_projid))
4707 i_projid = (projid_t)le32_to_cpu(raw_inode->i_projid);
4708 else
4709 i_projid = EXT4_DEF_PROJID;
4710
4711 if (!(test_opt(inode->i_sb, NO_UID32))) {
4712 i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
4713 i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
4714 }
4715 i_uid_write(inode, i_uid);
4716 i_gid_write(inode, i_gid);
4717 ei->i_projid = make_kprojid(&init_user_ns, i_projid);
4718 set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
4719
4720 ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
4721 ei->i_inline_off = 0;
4722 ei->i_dir_start_lookup = 0;
4723 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
4724 /* We now have enough fields to check if the inode was active or not.
4725 * This is needed because nfsd might try to access dead inodes
4726 * the test is that same one that e2fsck uses
4727 * NeilBrown 1999oct15
4728 */
4729 if (inode->i_nlink == 0) {
4730 if ((inode->i_mode == 0 ||
4731 !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) &&
4732 ino != EXT4_BOOT_LOADER_INO) {
4733 /* this inode is deleted */
4734 ret = -ESTALE;
4735 goto bad_inode;
4736 }
4737 /* The only unlinked inodes we let through here have
4738 * valid i_mode and are being read by the orphan
4739 * recovery code: that's fine, we're about to complete
4740 * the process of deleting those.
4741 * OR it is the EXT4_BOOT_LOADER_INO which is
4742 * not initialized on a new filesystem. */
4743 }
4744 ei->i_flags = le32_to_cpu(raw_inode->i_flags);
4745 ext4_set_inode_flags(inode, true);
4746 inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
4747 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
4748 if (ext4_has_feature_64bit(sb))
4749 ei->i_file_acl |=
4750 ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
4751 inode->i_size = ext4_isize(sb, raw_inode);
4752 if ((size = i_size_read(inode)) < 0) {
4753 ext4_error_inode(inode, function, line, 0,
4754 "iget: bad i_size value: %lld", size);
4755 ret = -EFSCORRUPTED;
4756 goto bad_inode;
4757 }
4758 /*
4759 * If dir_index is not enabled but there's dir with INDEX flag set,
4760 * we'd normally treat htree data as empty space. But with metadata
4761 * checksumming that corrupts checksums so forbid that.
4762 */
4763 if (!ext4_has_feature_dir_index(sb) && ext4_has_metadata_csum(sb) &&
4764 ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) {
4765 ext4_error_inode(inode, function, line, 0,
4766 "iget: Dir with htree data on filesystem without dir_index feature.");
4767 ret = -EFSCORRUPTED;
4768 goto bad_inode;
4769 }
4770 ei->i_disksize = inode->i_size;
4771#ifdef CONFIG_QUOTA
4772 ei->i_reserved_quota = 0;
4773#endif
4774 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
4775 ei->i_block_group = iloc.block_group;
4776 ei->i_last_alloc_group = ~0;
4777 /*
4778 * NOTE! The in-memory inode i_data array is in little-endian order
4779 * even on big-endian machines: we do NOT byteswap the block numbers!
4780 */
4781 for (block = 0; block < EXT4_N_BLOCKS; block++)
4782 ei->i_data[block] = raw_inode->i_block[block];
4783 INIT_LIST_HEAD(&ei->i_orphan);
4784 ext4_fc_init_inode(&ei->vfs_inode);
4785
4786 /*
4787 * Set transaction id's of transactions that have to be committed
4788 * to finish f[data]sync. We set them to currently running transaction
4789 * as we cannot be sure that the inode or some of its metadata isn't
4790 * part of the transaction - the inode could have been reclaimed and
4791 * now it is reread from disk.
4792 */
4793 if (journal) {
4794 transaction_t *transaction;
4795 tid_t tid;
4796
4797 read_lock(&journal->j_state_lock);
4798 if (journal->j_running_transaction)
4799 transaction = journal->j_running_transaction;
4800 else
4801 transaction = journal->j_committing_transaction;
4802 if (transaction)
4803 tid = transaction->t_tid;
4804 else
4805 tid = journal->j_commit_sequence;
4806 read_unlock(&journal->j_state_lock);
4807 ei->i_sync_tid = tid;
4808 ei->i_datasync_tid = tid;
4809 }
4810
4811 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4812 if (ei->i_extra_isize == 0) {
4813 /* The extra space is currently unused. Use it. */
4814 BUILD_BUG_ON(sizeof(struct ext4_inode) & 3);
4815 ei->i_extra_isize = sizeof(struct ext4_inode) -
4816 EXT4_GOOD_OLD_INODE_SIZE;
4817 } else {
4818 ret = ext4_iget_extra_inode(inode, raw_inode, ei);
4819 if (ret)
4820 goto bad_inode;
4821 }
4822 }
4823
4824 EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
4825 EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
4826 EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
4827 EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
4828
4829 if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
4830 u64 ivers = le32_to_cpu(raw_inode->i_disk_version);
4831
4832 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4833 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
4834 ivers |=
4835 (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
4836 }
4837 ext4_inode_set_iversion_queried(inode, ivers);
4838 }
4839
4840 ret = 0;
4841 if (ei->i_file_acl &&
4842 !ext4_inode_block_valid(inode, ei->i_file_acl, 1)) {
4843 ext4_error_inode(inode, function, line, 0,
4844 "iget: bad extended attribute block %llu",
4845 ei->i_file_acl);
4846 ret = -EFSCORRUPTED;
4847 goto bad_inode;
4848 } else if (!ext4_has_inline_data(inode)) {
4849 /* validate the block references in the inode */
4850 if (!(EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY) &&
4851 (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
4852 (S_ISLNK(inode->i_mode) &&
4853 !ext4_inode_is_fast_symlink(inode)))) {
4854 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4855 ret = ext4_ext_check_inode(inode);
4856 else
4857 ret = ext4_ind_check_inode(inode);
4858 }
4859 }
4860 if (ret)
4861 goto bad_inode;
4862
4863 if (S_ISREG(inode->i_mode)) {
4864 inode->i_op = &ext4_file_inode_operations;
4865 inode->i_fop = &ext4_file_operations;
4866 ext4_set_aops(inode);
4867 } else if (S_ISDIR(inode->i_mode)) {
4868 inode->i_op = &ext4_dir_inode_operations;
4869 inode->i_fop = &ext4_dir_operations;
4870 } else if (S_ISLNK(inode->i_mode)) {
4871 /* VFS does not allow setting these so must be corruption */
4872 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) {
4873 ext4_error_inode(inode, function, line, 0,
4874 "iget: immutable or append flags "
4875 "not allowed on symlinks");
4876 ret = -EFSCORRUPTED;
4877 goto bad_inode;
4878 }
4879 if (IS_ENCRYPTED(inode)) {
4880 inode->i_op = &ext4_encrypted_symlink_inode_operations;
4881 ext4_set_aops(inode);
4882 } else if (ext4_inode_is_fast_symlink(inode)) {
4883 inode->i_link = (char *)ei->i_data;
4884 inode->i_op = &ext4_fast_symlink_inode_operations;
4885 nd_terminate_link(ei->i_data, inode->i_size,
4886 sizeof(ei->i_data) - 1);
4887 } else {
4888 inode->i_op = &ext4_symlink_inode_operations;
4889 ext4_set_aops(inode);
4890 }
4891 inode_nohighmem(inode);
4892 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
4893 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
4894 inode->i_op = &ext4_special_inode_operations;
4895 if (raw_inode->i_block[0])
4896 init_special_inode(inode, inode->i_mode,
4897 old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
4898 else
4899 init_special_inode(inode, inode->i_mode,
4900 new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
4901 } else if (ino == EXT4_BOOT_LOADER_INO) {
4902 make_bad_inode(inode);
4903 } else {
4904 ret = -EFSCORRUPTED;
4905 ext4_error_inode(inode, function, line, 0,
4906 "iget: bogus i_mode (%o)", inode->i_mode);
4907 goto bad_inode;
4908 }
4909 if (IS_CASEFOLDED(inode) && !ext4_has_feature_casefold(inode->i_sb))
4910 ext4_error_inode(inode, function, line, 0,
4911 "casefold flag without casefold feature");
4912 brelse(iloc.bh);
4913
4914 unlock_new_inode(inode);
4915 return inode;
4916
4917bad_inode:
4918 brelse(iloc.bh);
4919 iget_failed(inode);
4920 return ERR_PTR(ret);
4921}
4922
4923static int ext4_inode_blocks_set(handle_t *handle,
4924 struct ext4_inode *raw_inode,
4925 struct ext4_inode_info *ei)
4926{
4927 struct inode *inode = &(ei->vfs_inode);
4928 u64 i_blocks = READ_ONCE(inode->i_blocks);
4929 struct super_block *sb = inode->i_sb;
4930
4931 if (i_blocks <= ~0U) {
4932 /*
4933 * i_blocks can be represented in a 32 bit variable
4934 * as multiple of 512 bytes
4935 */
4936 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
4937 raw_inode->i_blocks_high = 0;
4938 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
4939 return 0;
4940 }
4941 if (!ext4_has_feature_huge_file(sb))
4942 return -EFBIG;
4943
4944 if (i_blocks <= 0xffffffffffffULL) {
4945 /*
4946 * i_blocks can be represented in a 48 bit variable
4947 * as multiple of 512 bytes
4948 */
4949 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
4950 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
4951 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
4952 } else {
4953 ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE);
4954 /* i_block is stored in file system block size */
4955 i_blocks = i_blocks >> (inode->i_blkbits - 9);
4956 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
4957 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
4958 }
4959 return 0;
4960}
4961
4962static void __ext4_update_other_inode_time(struct super_block *sb,
4963 unsigned long orig_ino,
4964 unsigned long ino,
4965 struct ext4_inode *raw_inode)
4966{
4967 struct inode *inode;
4968
4969 inode = find_inode_by_ino_rcu(sb, ino);
4970 if (!inode)
4971 return;
4972
4973 if (!inode_is_dirtytime_only(inode))
4974 return;
4975
4976 spin_lock(&inode->i_lock);
4977 if (inode_is_dirtytime_only(inode)) {
4978 struct ext4_inode_info *ei = EXT4_I(inode);
4979
4980 inode->i_state &= ~I_DIRTY_TIME;
4981 spin_unlock(&inode->i_lock);
4982
4983 spin_lock(&ei->i_raw_lock);
4984 EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
4985 EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
4986 EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
4987 ext4_inode_csum_set(inode, raw_inode, ei);
4988 spin_unlock(&ei->i_raw_lock);
4989 trace_ext4_other_inode_update_time(inode, orig_ino);
4990 return;
4991 }
4992 spin_unlock(&inode->i_lock);
4993}
4994
4995/*
4996 * Opportunistically update the other time fields for other inodes in
4997 * the same inode table block.
4998 */
4999static void ext4_update_other_inodes_time(struct super_block *sb,
5000 unsigned long orig_ino, char *buf)
5001{
5002 unsigned long ino;
5003 int i, inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
5004 int inode_size = EXT4_INODE_SIZE(sb);
5005
5006 /*
5007 * Calculate the first inode in the inode table block. Inode
5008 * numbers are one-based. That is, the first inode in a block
5009 * (assuming 4k blocks and 256 byte inodes) is (n*16 + 1).
5010 */
5011 ino = ((orig_ino - 1) & ~(inodes_per_block - 1)) + 1;
5012 rcu_read_lock();
5013 for (i = 0; i < inodes_per_block; i++, ino++, buf += inode_size) {
5014 if (ino == orig_ino)
5015 continue;
5016 __ext4_update_other_inode_time(sb, orig_ino, ino,
5017 (struct ext4_inode *)buf);
5018 }
5019 rcu_read_unlock();
5020}
5021
5022/*
5023 * Post the struct inode info into an on-disk inode location in the
5024 * buffer-cache. This gobbles the caller's reference to the
5025 * buffer_head in the inode location struct.
5026 *
5027 * The caller must have write access to iloc->bh.
5028 */
5029static int ext4_do_update_inode(handle_t *handle,
5030 struct inode *inode,
5031 struct ext4_iloc *iloc)
5032{
5033 struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
5034 struct ext4_inode_info *ei = EXT4_I(inode);
5035 struct buffer_head *bh = iloc->bh;
5036 struct super_block *sb = inode->i_sb;
5037 int err = 0, block;
5038 int need_datasync = 0, set_large_file = 0;
5039 uid_t i_uid;
5040 gid_t i_gid;
5041 projid_t i_projid;
5042
5043 spin_lock(&ei->i_raw_lock);
5044
5045 /* For fields not tracked in the in-memory inode,
5046 * initialise them to zero for new inodes. */
5047 if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
5048 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
5049
5050 err = ext4_inode_blocks_set(handle, raw_inode, ei);
5051 if (err) {
5052 spin_unlock(&ei->i_raw_lock);
5053 goto out_brelse;
5054 }
5055
5056 raw_inode->i_mode = cpu_to_le16(inode->i_mode);
5057 i_uid = i_uid_read(inode);
5058 i_gid = i_gid_read(inode);
5059 i_projid = from_kprojid(&init_user_ns, ei->i_projid);
5060 if (!(test_opt(inode->i_sb, NO_UID32))) {
5061 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid));
5062 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid));
5063/*
5064 * Fix up interoperability with old kernels. Otherwise, old inodes get
5065 * re-used with the upper 16 bits of the uid/gid intact
5066 */
5067 if (ei->i_dtime && list_empty(&ei->i_orphan)) {
5068 raw_inode->i_uid_high = 0;
5069 raw_inode->i_gid_high = 0;
5070 } else {
5071 raw_inode->i_uid_high =
5072 cpu_to_le16(high_16_bits(i_uid));
5073 raw_inode->i_gid_high =
5074 cpu_to_le16(high_16_bits(i_gid));
5075 }
5076 } else {
5077 raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid));
5078 raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(i_gid));
5079 raw_inode->i_uid_high = 0;
5080 raw_inode->i_gid_high = 0;
5081 }
5082 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
5083
5084 EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
5085 EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
5086 EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
5087 EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
5088
5089 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
5090 raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF);
5091 if (likely(!test_opt2(inode->i_sb, HURD_COMPAT)))
5092 raw_inode->i_file_acl_high =
5093 cpu_to_le16(ei->i_file_acl >> 32);
5094 raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
5095 if (READ_ONCE(ei->i_disksize) != ext4_isize(inode->i_sb, raw_inode)) {
5096 ext4_isize_set(raw_inode, ei->i_disksize);
5097 need_datasync = 1;
5098 }
5099 if (ei->i_disksize > 0x7fffffffULL) {
5100 if (!ext4_has_feature_large_file(sb) ||
5101 EXT4_SB(sb)->s_es->s_rev_level ==
5102 cpu_to_le32(EXT4_GOOD_OLD_REV))
5103 set_large_file = 1;
5104 }
5105 raw_inode->i_generation = cpu_to_le32(inode->i_generation);
5106 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
5107 if (old_valid_dev(inode->i_rdev)) {
5108 raw_inode->i_block[0] =
5109 cpu_to_le32(old_encode_dev(inode->i_rdev));
5110 raw_inode->i_block[1] = 0;
5111 } else {
5112 raw_inode->i_block[0] = 0;
5113 raw_inode->i_block[1] =
5114 cpu_to_le32(new_encode_dev(inode->i_rdev));
5115 raw_inode->i_block[2] = 0;
5116 }
5117 } else if (!ext4_has_inline_data(inode)) {
5118 for (block = 0; block < EXT4_N_BLOCKS; block++)
5119 raw_inode->i_block[block] = ei->i_data[block];
5120 }
5121
5122 if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
5123 u64 ivers = ext4_inode_peek_iversion(inode);
5124
5125 raw_inode->i_disk_version = cpu_to_le32(ivers);
5126 if (ei->i_extra_isize) {
5127 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
5128 raw_inode->i_version_hi =
5129 cpu_to_le32(ivers >> 32);
5130 raw_inode->i_extra_isize =
5131 cpu_to_le16(ei->i_extra_isize);
5132 }
5133 }
5134
5135 BUG_ON(!ext4_has_feature_project(inode->i_sb) &&
5136 i_projid != EXT4_DEF_PROJID);
5137
5138 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
5139 EXT4_FITS_IN_INODE(raw_inode, ei, i_projid))
5140 raw_inode->i_projid = cpu_to_le32(i_projid);
5141
5142 ext4_inode_csum_set(inode, raw_inode, ei);
5143 spin_unlock(&ei->i_raw_lock);
5144 if (inode->i_sb->s_flags & SB_LAZYTIME)
5145 ext4_update_other_inodes_time(inode->i_sb, inode->i_ino,
5146 bh->b_data);
5147
5148 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
5149 err = ext4_handle_dirty_metadata(handle, NULL, bh);
5150 if (err)
5151 goto out_brelse;
5152 ext4_clear_inode_state(inode, EXT4_STATE_NEW);
5153 if (set_large_file) {
5154 BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get write access");
5155 err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
5156 if (err)
5157 goto out_brelse;
5158 lock_buffer(EXT4_SB(sb)->s_sbh);
5159 ext4_set_feature_large_file(sb);
5160 ext4_superblock_csum_set(sb);
5161 unlock_buffer(EXT4_SB(sb)->s_sbh);
5162 ext4_handle_sync(handle);
5163 err = ext4_handle_dirty_metadata(handle, NULL,
5164 EXT4_SB(sb)->s_sbh);
5165 }
5166 ext4_update_inode_fsync_trans(handle, inode, need_datasync);
5167out_brelse:
5168 brelse(bh);
5169 ext4_std_error(inode->i_sb, err);
5170 return err;
5171}
5172
5173/*
5174 * ext4_write_inode()
5175 *
5176 * We are called from a few places:
5177 *
5178 * - Within generic_file_aio_write() -> generic_write_sync() for O_SYNC files.
5179 * Here, there will be no transaction running. We wait for any running
5180 * transaction to commit.
5181 *
5182 * - Within flush work (sys_sync(), kupdate and such).
5183 * We wait on commit, if told to.
5184 *
5185 * - Within iput_final() -> write_inode_now()
5186 * We wait on commit, if told to.
5187 *
5188 * In all cases it is actually safe for us to return without doing anything,
5189 * because the inode has been copied into a raw inode buffer in
5190 * ext4_mark_inode_dirty(). This is a correctness thing for WB_SYNC_ALL
5191 * writeback.
5192 *
5193 * Note that we are absolutely dependent upon all inode dirtiers doing the
5194 * right thing: they *must* call mark_inode_dirty() after dirtying info in
5195 * which we are interested.
5196 *
5197 * It would be a bug for them to not do this. The code:
5198 *
5199 * mark_inode_dirty(inode)
5200 * stuff();
5201 * inode->i_size = expr;
5202 *
5203 * is in error because write_inode() could occur while `stuff()' is running,
5204 * and the new i_size will be lost. Plus the inode will no longer be on the
5205 * superblock's dirty inode list.
5206 */
5207int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
5208{
5209 int err;
5210
5211 if (WARN_ON_ONCE(current->flags & PF_MEMALLOC) ||
5212 sb_rdonly(inode->i_sb))
5213 return 0;
5214
5215 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
5216 return -EIO;
5217
5218 if (EXT4_SB(inode->i_sb)->s_journal) {
5219 if (ext4_journal_current_handle()) {
5220 jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
5221 dump_stack();
5222 return -EIO;
5223 }
5224
5225 /*
5226 * No need to force transaction in WB_SYNC_NONE mode. Also
5227 * ext4_sync_fs() will force the commit after everything is
5228 * written.
5229 */
5230 if (wbc->sync_mode != WB_SYNC_ALL || wbc->for_sync)
5231 return 0;
5232
5233 err = ext4_fc_commit(EXT4_SB(inode->i_sb)->s_journal,
5234 EXT4_I(inode)->i_sync_tid);
5235 } else {
5236 struct ext4_iloc iloc;
5237
5238 err = __ext4_get_inode_loc_noinmem(inode, &iloc);
5239 if (err)
5240 return err;
5241 /*
5242 * sync(2) will flush the whole buffer cache. No need to do
5243 * it here separately for each inode.
5244 */
5245 if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
5246 sync_dirty_buffer(iloc.bh);
5247 if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
5248 ext4_error_inode_block(inode, iloc.bh->b_blocknr, EIO,
5249 "IO error syncing inode");
5250 err = -EIO;
5251 }
5252 brelse(iloc.bh);
5253 }
5254 return err;
5255}
5256
5257/*
5258 * In data=journal mode ext4_journalled_invalidatepage() may fail to invalidate
5259 * buffers that are attached to a page stradding i_size and are undergoing
5260 * commit. In that case we have to wait for commit to finish and try again.
5261 */
5262static void ext4_wait_for_tail_page_commit(struct inode *inode)
5263{
5264 struct page *page;
5265 unsigned offset;
5266 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
5267 tid_t commit_tid = 0;
5268 int ret;
5269
5270 offset = inode->i_size & (PAGE_SIZE - 1);
5271 /*
5272 * If the page is fully truncated, we don't need to wait for any commit
5273 * (and we even should not as __ext4_journalled_invalidatepage() may
5274 * strip all buffers from the page but keep the page dirty which can then
5275 * confuse e.g. concurrent ext4_writepage() seeing dirty page without
5276 * buffers). Also we don't need to wait for any commit if all buffers in
5277 * the page remain valid. This is most beneficial for the common case of
5278 * blocksize == PAGESIZE.
5279 */
5280 if (!offset || offset > (PAGE_SIZE - i_blocksize(inode)))
5281 return;
5282 while (1) {
5283 page = find_lock_page(inode->i_mapping,
5284 inode->i_size >> PAGE_SHIFT);
5285 if (!page)
5286 return;
5287 ret = __ext4_journalled_invalidatepage(page, offset,
5288 PAGE_SIZE - offset);
5289 unlock_page(page);
5290 put_page(page);
5291 if (ret != -EBUSY)
5292 return;
5293 commit_tid = 0;
5294 read_lock(&journal->j_state_lock);
5295 if (journal->j_committing_transaction)
5296 commit_tid = journal->j_committing_transaction->t_tid;
5297 read_unlock(&journal->j_state_lock);
5298 if (commit_tid)
5299 jbd2_log_wait_commit(journal, commit_tid);
5300 }
5301}
5302
5303/*
5304 * ext4_setattr()
5305 *
5306 * Called from notify_change.
5307 *
5308 * We want to trap VFS attempts to truncate the file as soon as
5309 * possible. In particular, we want to make sure that when the VFS
5310 * shrinks i_size, we put the inode on the orphan list and modify
5311 * i_disksize immediately, so that during the subsequent flushing of
5312 * dirty pages and freeing of disk blocks, we can guarantee that any
5313 * commit will leave the blocks being flushed in an unused state on
5314 * disk. (On recovery, the inode will get truncated and the blocks will
5315 * be freed, so we have a strong guarantee that no future commit will
5316 * leave these blocks visible to the user.)
5317 *
5318 * Another thing we have to assure is that if we are in ordered mode
5319 * and inode is still attached to the committing transaction, we must
5320 * we start writeout of all the dirty pages which are being truncated.
5321 * This way we are sure that all the data written in the previous
5322 * transaction are already on disk (truncate waits for pages under
5323 * writeback).
5324 *
5325 * Called with inode->i_mutex down.
5326 */
5327int ext4_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
5328 struct iattr *attr)
5329{
5330 struct inode *inode = d_inode(dentry);
5331 int error, rc = 0;
5332 int orphan = 0;
5333 const unsigned int ia_valid = attr->ia_valid;
5334
5335 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
5336 return -EIO;
5337
5338 if (unlikely(IS_IMMUTABLE(inode)))
5339 return -EPERM;
5340
5341 if (unlikely(IS_APPEND(inode) &&
5342 (ia_valid & (ATTR_MODE | ATTR_UID |
5343 ATTR_GID | ATTR_TIMES_SET))))
5344 return -EPERM;
5345
5346 error = setattr_prepare(mnt_userns, dentry, attr);
5347 if (error)
5348 return error;
5349
5350 error = fscrypt_prepare_setattr(dentry, attr);
5351 if (error)
5352 return error;
5353
5354 error = fsverity_prepare_setattr(dentry, attr);
5355 if (error)
5356 return error;
5357
5358 if (is_quota_modification(inode, attr)) {
5359 error = dquot_initialize(inode);
5360 if (error)
5361 return error;
5362 }
5363 ext4_fc_start_update(inode);
5364 if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
5365 (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
5366 handle_t *handle;
5367
5368 /* (user+group)*(old+new) structure, inode write (sb,
5369 * inode block, ? - but truncate inode update has it) */
5370 handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
5371 (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb) +
5372 EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)) + 3);
5373 if (IS_ERR(handle)) {
5374 error = PTR_ERR(handle);
5375 goto err_out;
5376 }
5377
5378 /* dquot_transfer() calls back ext4_get_inode_usage() which
5379 * counts xattr inode references.
5380 */
5381 down_read(&EXT4_I(inode)->xattr_sem);
5382 error = dquot_transfer(inode, attr);
5383 up_read(&EXT4_I(inode)->xattr_sem);
5384
5385 if (error) {
5386 ext4_journal_stop(handle);
5387 ext4_fc_stop_update(inode);
5388 return error;
5389 }
5390 /* Update corresponding info in inode so that everything is in
5391 * one transaction */
5392 if (attr->ia_valid & ATTR_UID)
5393 inode->i_uid = attr->ia_uid;
5394 if (attr->ia_valid & ATTR_GID)
5395 inode->i_gid = attr->ia_gid;
5396 error = ext4_mark_inode_dirty(handle, inode);
5397 ext4_journal_stop(handle);
5398 if (unlikely(error)) {
5399 ext4_fc_stop_update(inode);
5400 return error;
5401 }
5402 }
5403
5404 if (attr->ia_valid & ATTR_SIZE) {
5405 handle_t *handle;
5406 loff_t oldsize = inode->i_size;
5407 int shrink = (attr->ia_size < inode->i_size);
5408
5409 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
5410 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5411
5412 if (attr->ia_size > sbi->s_bitmap_maxbytes) {
5413 ext4_fc_stop_update(inode);
5414 return -EFBIG;
5415 }
5416 }
5417 if (!S_ISREG(inode->i_mode)) {
5418 ext4_fc_stop_update(inode);
5419 return -EINVAL;
5420 }
5421
5422 if (IS_I_VERSION(inode) && attr->ia_size != inode->i_size)
5423 inode_inc_iversion(inode);
5424
5425 if (shrink) {
5426 if (ext4_should_order_data(inode)) {
5427 error = ext4_begin_ordered_truncate(inode,
5428 attr->ia_size);
5429 if (error)
5430 goto err_out;
5431 }
5432 /*
5433 * Blocks are going to be removed from the inode. Wait
5434 * for dio in flight.
5435 */
5436 inode_dio_wait(inode);
5437 }
5438
5439 down_write(&EXT4_I(inode)->i_mmap_sem);
5440
5441 rc = ext4_break_layouts(inode);
5442 if (rc) {
5443 up_write(&EXT4_I(inode)->i_mmap_sem);
5444 goto err_out;
5445 }
5446
5447 if (attr->ia_size != inode->i_size) {
5448 handle = ext4_journal_start(inode, EXT4_HT_INODE, 3);
5449 if (IS_ERR(handle)) {
5450 error = PTR_ERR(handle);
5451 goto out_mmap_sem;
5452 }
5453 if (ext4_handle_valid(handle) && shrink) {
5454 error = ext4_orphan_add(handle, inode);
5455 orphan = 1;
5456 }
5457 /*
5458 * Update c/mtime on truncate up, ext4_truncate() will
5459 * update c/mtime in shrink case below
5460 */
5461 if (!shrink) {
5462 inode->i_mtime = current_time(inode);
5463 inode->i_ctime = inode->i_mtime;
5464 }
5465
5466 if (shrink)
5467 ext4_fc_track_range(handle, inode,
5468 (attr->ia_size > 0 ? attr->ia_size - 1 : 0) >>
5469 inode->i_sb->s_blocksize_bits,
5470 (oldsize > 0 ? oldsize - 1 : 0) >>
5471 inode->i_sb->s_blocksize_bits);
5472 else
5473 ext4_fc_track_range(
5474 handle, inode,
5475 (oldsize > 0 ? oldsize - 1 : oldsize) >>
5476 inode->i_sb->s_blocksize_bits,
5477 (attr->ia_size > 0 ? attr->ia_size - 1 : 0) >>
5478 inode->i_sb->s_blocksize_bits);
5479
5480 down_write(&EXT4_I(inode)->i_data_sem);
5481 EXT4_I(inode)->i_disksize = attr->ia_size;
5482 rc = ext4_mark_inode_dirty(handle, inode);
5483 if (!error)
5484 error = rc;
5485 /*
5486 * We have to update i_size under i_data_sem together
5487 * with i_disksize to avoid races with writeback code
5488 * running ext4_wb_update_i_disksize().
5489 */
5490 if (!error)
5491 i_size_write(inode, attr->ia_size);
5492 up_write(&EXT4_I(inode)->i_data_sem);
5493 ext4_journal_stop(handle);
5494 if (error)
5495 goto out_mmap_sem;
5496 if (!shrink) {
5497 pagecache_isize_extended(inode, oldsize,
5498 inode->i_size);
5499 } else if (ext4_should_journal_data(inode)) {
5500 ext4_wait_for_tail_page_commit(inode);
5501 }
5502 }
5503
5504 /*
5505 * Truncate pagecache after we've waited for commit
5506 * in data=journal mode to make pages freeable.
5507 */
5508 truncate_pagecache(inode, inode->i_size);
5509 /*
5510 * Call ext4_truncate() even if i_size didn't change to
5511 * truncate possible preallocated blocks.
5512 */
5513 if (attr->ia_size <= oldsize) {
5514 rc = ext4_truncate(inode);
5515 if (rc)
5516 error = rc;
5517 }
5518out_mmap_sem:
5519 up_write(&EXT4_I(inode)->i_mmap_sem);
5520 }
5521
5522 if (!error) {
5523 setattr_copy(mnt_userns, inode, attr);
5524 mark_inode_dirty(inode);
5525 }
5526
5527 /*
5528 * If the call to ext4_truncate failed to get a transaction handle at
5529 * all, we need to clean up the in-core orphan list manually.
5530 */
5531 if (orphan && inode->i_nlink)
5532 ext4_orphan_del(NULL, inode);
5533
5534 if (!error && (ia_valid & ATTR_MODE))
5535 rc = posix_acl_chmod(mnt_userns, inode, inode->i_mode);
5536
5537err_out:
5538 if (error)
5539 ext4_std_error(inode->i_sb, error);
5540 if (!error)
5541 error = rc;
5542 ext4_fc_stop_update(inode);
5543 return error;
5544}
5545
5546int ext4_getattr(struct user_namespace *mnt_userns, const struct path *path,
5547 struct kstat *stat, u32 request_mask, unsigned int query_flags)
5548{
5549 struct inode *inode = d_inode(path->dentry);
5550 struct ext4_inode *raw_inode;
5551 struct ext4_inode_info *ei = EXT4_I(inode);
5552 unsigned int flags;
5553
5554 if ((request_mask & STATX_BTIME) &&
5555 EXT4_FITS_IN_INODE(raw_inode, ei, i_crtime)) {
5556 stat->result_mask |= STATX_BTIME;
5557 stat->btime.tv_sec = ei->i_crtime.tv_sec;
5558 stat->btime.tv_nsec = ei->i_crtime.tv_nsec;
5559 }
5560
5561 flags = ei->i_flags & EXT4_FL_USER_VISIBLE;
5562 if (flags & EXT4_APPEND_FL)
5563 stat->attributes |= STATX_ATTR_APPEND;
5564 if (flags & EXT4_COMPR_FL)
5565 stat->attributes |= STATX_ATTR_COMPRESSED;
5566 if (flags & EXT4_ENCRYPT_FL)
5567 stat->attributes |= STATX_ATTR_ENCRYPTED;
5568 if (flags & EXT4_IMMUTABLE_FL)
5569 stat->attributes |= STATX_ATTR_IMMUTABLE;
5570 if (flags & EXT4_NODUMP_FL)
5571 stat->attributes |= STATX_ATTR_NODUMP;
5572 if (flags & EXT4_VERITY_FL)
5573 stat->attributes |= STATX_ATTR_VERITY;
5574
5575 stat->attributes_mask |= (STATX_ATTR_APPEND |
5576 STATX_ATTR_COMPRESSED |
5577 STATX_ATTR_ENCRYPTED |
5578 STATX_ATTR_IMMUTABLE |
5579 STATX_ATTR_NODUMP |
5580 STATX_ATTR_VERITY);
5581
5582 generic_fillattr(mnt_userns, inode, stat);
5583 return 0;
5584}
5585
5586int ext4_file_getattr(struct user_namespace *mnt_userns,
5587 const struct path *path, struct kstat *stat,
5588 u32 request_mask, unsigned int query_flags)
5589{
5590 struct inode *inode = d_inode(path->dentry);
5591 u64 delalloc_blocks;
5592
5593 ext4_getattr(mnt_userns, path, stat, request_mask, query_flags);
5594
5595 /*
5596 * If there is inline data in the inode, the inode will normally not
5597 * have data blocks allocated (it may have an external xattr block).
5598 * Report at least one sector for such files, so tools like tar, rsync,
5599 * others don't incorrectly think the file is completely sparse.
5600 */
5601 if (unlikely(ext4_has_inline_data(inode)))
5602 stat->blocks += (stat->size + 511) >> 9;
5603
5604 /*
5605 * We can't update i_blocks if the block allocation is delayed
5606 * otherwise in the case of system crash before the real block
5607 * allocation is done, we will have i_blocks inconsistent with
5608 * on-disk file blocks.
5609 * We always keep i_blocks updated together with real
5610 * allocation. But to not confuse with user, stat
5611 * will return the blocks that include the delayed allocation
5612 * blocks for this file.
5613 */
5614 delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb),
5615 EXT4_I(inode)->i_reserved_data_blocks);
5616 stat->blocks += delalloc_blocks << (inode->i_sb->s_blocksize_bits - 9);
5617 return 0;
5618}
5619
5620static int ext4_index_trans_blocks(struct inode *inode, int lblocks,
5621 int pextents)
5622{
5623 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
5624 return ext4_ind_trans_blocks(inode, lblocks);
5625 return ext4_ext_index_trans_blocks(inode, pextents);
5626}
5627
5628/*
5629 * Account for index blocks, block groups bitmaps and block group
5630 * descriptor blocks if modify datablocks and index blocks
5631 * worse case, the indexs blocks spread over different block groups
5632 *
5633 * If datablocks are discontiguous, they are possible to spread over
5634 * different block groups too. If they are contiguous, with flexbg,
5635 * they could still across block group boundary.
5636 *
5637 * Also account for superblock, inode, quota and xattr blocks
5638 */
5639static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
5640 int pextents)
5641{
5642 ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb);
5643 int gdpblocks;
5644 int idxblocks;
5645 int ret = 0;
5646
5647 /*
5648 * How many index blocks need to touch to map @lblocks logical blocks
5649 * to @pextents physical extents?
5650 */
5651 idxblocks = ext4_index_trans_blocks(inode, lblocks, pextents);
5652
5653 ret = idxblocks;
5654
5655 /*
5656 * Now let's see how many group bitmaps and group descriptors need
5657 * to account
5658 */
5659 groups = idxblocks + pextents;
5660 gdpblocks = groups;
5661 if (groups > ngroups)
5662 groups = ngroups;
5663 if (groups > EXT4_SB(inode->i_sb)->s_gdb_count)
5664 gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count;
5665
5666 /* bitmaps and block group descriptor blocks */
5667 ret += groups + gdpblocks;
5668
5669 /* Blocks for super block, inode, quota and xattr blocks */
5670 ret += EXT4_META_TRANS_BLOCKS(inode->i_sb);
5671
5672 return ret;
5673}
5674
5675/*
5676 * Calculate the total number of credits to reserve to fit
5677 * the modification of a single pages into a single transaction,
5678 * which may include multiple chunks of block allocations.
5679 *
5680 * This could be called via ext4_write_begin()
5681 *
5682 * We need to consider the worse case, when
5683 * one new block per extent.
5684 */
5685int ext4_writepage_trans_blocks(struct inode *inode)
5686{
5687 int bpp = ext4_journal_blocks_per_page(inode);
5688 int ret;
5689
5690 ret = ext4_meta_trans_blocks(inode, bpp, bpp);
5691
5692 /* Account for data blocks for journalled mode */
5693 if (ext4_should_journal_data(inode))
5694 ret += bpp;
5695 return ret;
5696}
5697
5698/*
5699 * Calculate the journal credits for a chunk of data modification.
5700 *
5701 * This is called from DIO, fallocate or whoever calling
5702 * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks.
5703 *
5704 * journal buffers for data blocks are not included here, as DIO
5705 * and fallocate do no need to journal data buffers.
5706 */
5707int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks)
5708{
5709 return ext4_meta_trans_blocks(inode, nrblocks, 1);
5710}
5711
5712/*
5713 * The caller must have previously called ext4_reserve_inode_write().
5714 * Give this, we know that the caller already has write access to iloc->bh.
5715 */
5716int ext4_mark_iloc_dirty(handle_t *handle,
5717 struct inode *inode, struct ext4_iloc *iloc)
5718{
5719 int err = 0;
5720
5721 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) {
5722 put_bh(iloc->bh);
5723 return -EIO;
5724 }
5725 ext4_fc_track_inode(handle, inode);
5726
5727 if (IS_I_VERSION(inode))
5728 inode_inc_iversion(inode);
5729
5730 /* the do_update_inode consumes one bh->b_count */
5731 get_bh(iloc->bh);
5732
5733 /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
5734 err = ext4_do_update_inode(handle, inode, iloc);
5735 put_bh(iloc->bh);
5736 return err;
5737}
5738
5739/*
5740 * On success, We end up with an outstanding reference count against
5741 * iloc->bh. This _must_ be cleaned up later.
5742 */
5743
5744int
5745ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
5746 struct ext4_iloc *iloc)
5747{
5748 int err;
5749
5750 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
5751 return -EIO;
5752
5753 err = ext4_get_inode_loc(inode, iloc);
5754 if (!err) {
5755 BUFFER_TRACE(iloc->bh, "get_write_access");
5756 err = ext4_journal_get_write_access(handle, iloc->bh);
5757 if (err) {
5758 brelse(iloc->bh);
5759 iloc->bh = NULL;
5760 }
5761 }
5762 ext4_std_error(inode->i_sb, err);
5763 return err;
5764}
5765
5766static int __ext4_expand_extra_isize(struct inode *inode,
5767 unsigned int new_extra_isize,
5768 struct ext4_iloc *iloc,
5769 handle_t *handle, int *no_expand)
5770{
5771 struct ext4_inode *raw_inode;
5772 struct ext4_xattr_ibody_header *header;
5773 unsigned int inode_size = EXT4_INODE_SIZE(inode->i_sb);
5774 struct ext4_inode_info *ei = EXT4_I(inode);
5775 int error;
5776
5777 /* this was checked at iget time, but double check for good measure */
5778 if ((EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > inode_size) ||
5779 (ei->i_extra_isize & 3)) {
5780 EXT4_ERROR_INODE(inode, "bad extra_isize %u (inode size %u)",
5781 ei->i_extra_isize,
5782 EXT4_INODE_SIZE(inode->i_sb));
5783 return -EFSCORRUPTED;
5784 }
5785 if ((new_extra_isize < ei->i_extra_isize) ||
5786 (new_extra_isize < 4) ||
5787 (new_extra_isize > inode_size - EXT4_GOOD_OLD_INODE_SIZE))
5788 return -EINVAL; /* Should never happen */
5789
5790 raw_inode = ext4_raw_inode(iloc);
5791
5792 header = IHDR(inode, raw_inode);
5793
5794 /* No extended attributes present */
5795 if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
5796 header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
5797 memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE +
5798 EXT4_I(inode)->i_extra_isize, 0,
5799 new_extra_isize - EXT4_I(inode)->i_extra_isize);
5800 EXT4_I(inode)->i_extra_isize = new_extra_isize;
5801 return 0;
5802 }
5803
5804 /* try to expand with EAs present */
5805 error = ext4_expand_extra_isize_ea(inode, new_extra_isize,
5806 raw_inode, handle);
5807 if (error) {
5808 /*
5809 * Inode size expansion failed; don't try again
5810 */
5811 *no_expand = 1;
5812 }
5813
5814 return error;
5815}
5816
5817/*
5818 * Expand an inode by new_extra_isize bytes.
5819 * Returns 0 on success or negative error number on failure.
5820 */
5821static int ext4_try_to_expand_extra_isize(struct inode *inode,
5822 unsigned int new_extra_isize,
5823 struct ext4_iloc iloc,
5824 handle_t *handle)
5825{
5826 int no_expand;
5827 int error;
5828
5829 if (ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND))
5830 return -EOVERFLOW;
5831
5832 /*
5833 * In nojournal mode, we can immediately attempt to expand
5834 * the inode. When journaled, we first need to obtain extra
5835 * buffer credits since we may write into the EA block
5836 * with this same handle. If journal_extend fails, then it will
5837 * only result in a minor loss of functionality for that inode.
5838 * If this is felt to be critical, then e2fsck should be run to
5839 * force a large enough s_min_extra_isize.
5840 */
5841 if (ext4_journal_extend(handle,
5842 EXT4_DATA_TRANS_BLOCKS(inode->i_sb), 0) != 0)
5843 return -ENOSPC;
5844
5845 if (ext4_write_trylock_xattr(inode, &no_expand) == 0)
5846 return -EBUSY;
5847
5848 error = __ext4_expand_extra_isize(inode, new_extra_isize, &iloc,
5849 handle, &no_expand);
5850 ext4_write_unlock_xattr(inode, &no_expand);
5851
5852 return error;
5853}
5854
5855int ext4_expand_extra_isize(struct inode *inode,
5856 unsigned int new_extra_isize,
5857 struct ext4_iloc *iloc)
5858{
5859 handle_t *handle;
5860 int no_expand;
5861 int error, rc;
5862
5863 if (ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
5864 brelse(iloc->bh);
5865 return -EOVERFLOW;
5866 }
5867
5868 handle = ext4_journal_start(inode, EXT4_HT_INODE,
5869 EXT4_DATA_TRANS_BLOCKS(inode->i_sb));
5870 if (IS_ERR(handle)) {
5871 error = PTR_ERR(handle);
5872 brelse(iloc->bh);
5873 return error;
5874 }
5875
5876 ext4_write_lock_xattr(inode, &no_expand);
5877
5878 BUFFER_TRACE(iloc->bh, "get_write_access");
5879 error = ext4_journal_get_write_access(handle, iloc->bh);
5880 if (error) {
5881 brelse(iloc->bh);
5882 goto out_unlock;
5883 }
5884
5885 error = __ext4_expand_extra_isize(inode, new_extra_isize, iloc,
5886 handle, &no_expand);
5887
5888 rc = ext4_mark_iloc_dirty(handle, inode, iloc);
5889 if (!error)
5890 error = rc;
5891
5892out_unlock:
5893 ext4_write_unlock_xattr(inode, &no_expand);
5894 ext4_journal_stop(handle);
5895 return error;
5896}
5897
5898/*
5899 * What we do here is to mark the in-core inode as clean with respect to inode
5900 * dirtiness (it may still be data-dirty).
5901 * This means that the in-core inode may be reaped by prune_icache
5902 * without having to perform any I/O. This is a very good thing,
5903 * because *any* task may call prune_icache - even ones which
5904 * have a transaction open against a different journal.
5905 *
5906 * Is this cheating? Not really. Sure, we haven't written the
5907 * inode out, but prune_icache isn't a user-visible syncing function.
5908 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
5909 * we start and wait on commits.
5910 */
5911int __ext4_mark_inode_dirty(handle_t *handle, struct inode *inode,
5912 const char *func, unsigned int line)
5913{
5914 struct ext4_iloc iloc;
5915 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5916 int err;
5917
5918 might_sleep();
5919 trace_ext4_mark_inode_dirty(inode, _RET_IP_);
5920 err = ext4_reserve_inode_write(handle, inode, &iloc);
5921 if (err)
5922 goto out;
5923
5924 if (EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize)
5925 ext4_try_to_expand_extra_isize(inode, sbi->s_want_extra_isize,
5926 iloc, handle);
5927
5928 err = ext4_mark_iloc_dirty(handle, inode, &iloc);
5929out:
5930 if (unlikely(err))
5931 ext4_error_inode_err(inode, func, line, 0, err,
5932 "mark_inode_dirty error");
5933 return err;
5934}
5935
5936/*
5937 * ext4_dirty_inode() is called from __mark_inode_dirty()
5938 *
5939 * We're really interested in the case where a file is being extended.
5940 * i_size has been changed by generic_commit_write() and we thus need
5941 * to include the updated inode in the current transaction.
5942 *
5943 * Also, dquot_alloc_block() will always dirty the inode when blocks
5944 * are allocated to the file.
5945 *
5946 * If the inode is marked synchronous, we don't honour that here - doing
5947 * so would cause a commit on atime updates, which we don't bother doing.
5948 * We handle synchronous inodes at the highest possible level.
5949 */
5950void ext4_dirty_inode(struct inode *inode, int flags)
5951{
5952 handle_t *handle;
5953
5954 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
5955 if (IS_ERR(handle))
5956 return;
5957 ext4_mark_inode_dirty(handle, inode);
5958 ext4_journal_stop(handle);
5959}
5960
5961int ext4_change_inode_journal_flag(struct inode *inode, int val)
5962{
5963 journal_t *journal;
5964 handle_t *handle;
5965 int err;
5966 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5967
5968 /*
5969 * We have to be very careful here: changing a data block's
5970 * journaling status dynamically is dangerous. If we write a
5971 * data block to the journal, change the status and then delete
5972 * that block, we risk forgetting to revoke the old log record
5973 * from the journal and so a subsequent replay can corrupt data.
5974 * So, first we make sure that the journal is empty and that
5975 * nobody is changing anything.
5976 */
5977
5978 journal = EXT4_JOURNAL(inode);
5979 if (!journal)
5980 return 0;
5981 if (is_journal_aborted(journal))
5982 return -EROFS;
5983
5984 /* Wait for all existing dio workers */
5985 inode_dio_wait(inode);
5986
5987 /*
5988 * Before flushing the journal and switching inode's aops, we have
5989 * to flush all dirty data the inode has. There can be outstanding
5990 * delayed allocations, there can be unwritten extents created by
5991 * fallocate or buffered writes in dioread_nolock mode covered by
5992 * dirty data which can be converted only after flushing the dirty
5993 * data (and journalled aops don't know how to handle these cases).
5994 */
5995 if (val) {
5996 down_write(&EXT4_I(inode)->i_mmap_sem);
5997 err = filemap_write_and_wait(inode->i_mapping);
5998 if (err < 0) {
5999 up_write(&EXT4_I(inode)->i_mmap_sem);
6000 return err;
6001 }
6002 }
6003
6004 percpu_down_write(&sbi->s_writepages_rwsem);
6005 jbd2_journal_lock_updates(journal);
6006
6007 /*
6008 * OK, there are no updates running now, and all cached data is
6009 * synced to disk. We are now in a completely consistent state
6010 * which doesn't have anything in the journal, and we know that
6011 * no filesystem updates are running, so it is safe to modify
6012 * the inode's in-core data-journaling state flag now.
6013 */
6014
6015 if (val)
6016 ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
6017 else {
6018 err = jbd2_journal_flush(journal, 0);
6019 if (err < 0) {
6020 jbd2_journal_unlock_updates(journal);
6021 percpu_up_write(&sbi->s_writepages_rwsem);
6022 return err;
6023 }
6024 ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
6025 }
6026 ext4_set_aops(inode);
6027
6028 jbd2_journal_unlock_updates(journal);
6029 percpu_up_write(&sbi->s_writepages_rwsem);
6030
6031 if (val)
6032 up_write(&EXT4_I(inode)->i_mmap_sem);
6033
6034 /* Finally we can mark the inode as dirty. */
6035
6036 handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
6037 if (IS_ERR(handle))
6038 return PTR_ERR(handle);
6039
6040 ext4_fc_mark_ineligible(inode->i_sb,
6041 EXT4_FC_REASON_JOURNAL_FLAG_CHANGE);
6042 err = ext4_mark_inode_dirty(handle, inode);
6043 ext4_handle_sync(handle);
6044 ext4_journal_stop(handle);
6045 ext4_std_error(inode->i_sb, err);
6046
6047 return err;
6048}
6049
6050static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh)
6051{
6052 return !buffer_mapped(bh);
6053}
6054
6055vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf)
6056{
6057 struct vm_area_struct *vma = vmf->vma;
6058 struct page *page = vmf->page;
6059 loff_t size;
6060 unsigned long len;
6061 int err;
6062 vm_fault_t ret;
6063 struct file *file = vma->vm_file;
6064 struct inode *inode = file_inode(file);
6065 struct address_space *mapping = inode->i_mapping;
6066 handle_t *handle;
6067 get_block_t *get_block;
6068 int retries = 0;
6069
6070 if (unlikely(IS_IMMUTABLE(inode)))
6071 return VM_FAULT_SIGBUS;
6072
6073 sb_start_pagefault(inode->i_sb);
6074 file_update_time(vma->vm_file);
6075
6076 down_read(&EXT4_I(inode)->i_mmap_sem);
6077
6078 err = ext4_convert_inline_data(inode);
6079 if (err)
6080 goto out_ret;
6081
6082 /*
6083 * On data journalling we skip straight to the transaction handle:
6084 * there's no delalloc; page truncated will be checked later; the
6085 * early return w/ all buffers mapped (calculates size/len) can't
6086 * be used; and there's no dioread_nolock, so only ext4_get_block.
6087 */
6088 if (ext4_should_journal_data(inode))
6089 goto retry_alloc;
6090
6091 /* Delalloc case is easy... */
6092 if (test_opt(inode->i_sb, DELALLOC) &&
6093 !ext4_nonda_switch(inode->i_sb)) {
6094 do {
6095 err = block_page_mkwrite(vma, vmf,
6096 ext4_da_get_block_prep);
6097 } while (err == -ENOSPC &&
6098 ext4_should_retry_alloc(inode->i_sb, &retries));
6099 goto out_ret;
6100 }
6101
6102 lock_page(page);
6103 size = i_size_read(inode);
6104 /* Page got truncated from under us? */
6105 if (page->mapping != mapping || page_offset(page) > size) {
6106 unlock_page(page);
6107 ret = VM_FAULT_NOPAGE;
6108 goto out;
6109 }
6110
6111 if (page->index == size >> PAGE_SHIFT)
6112 len = size & ~PAGE_MASK;
6113 else
6114 len = PAGE_SIZE;
6115 /*
6116 * Return if we have all the buffers mapped. This avoids the need to do
6117 * journal_start/journal_stop which can block and take a long time
6118 *
6119 * This cannot be done for data journalling, as we have to add the
6120 * inode to the transaction's list to writeprotect pages on commit.
6121 */
6122 if (page_has_buffers(page)) {
6123 if (!ext4_walk_page_buffers(NULL, page_buffers(page),
6124 0, len, NULL,
6125 ext4_bh_unmapped)) {
6126 /* Wait so that we don't change page under IO */
6127 wait_for_stable_page(page);
6128 ret = VM_FAULT_LOCKED;
6129 goto out;
6130 }
6131 }
6132 unlock_page(page);
6133 /* OK, we need to fill the hole... */
6134 if (ext4_should_dioread_nolock(inode))
6135 get_block = ext4_get_block_unwritten;
6136 else
6137 get_block = ext4_get_block;
6138retry_alloc:
6139 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
6140 ext4_writepage_trans_blocks(inode));
6141 if (IS_ERR(handle)) {
6142 ret = VM_FAULT_SIGBUS;
6143 goto out;
6144 }
6145 /*
6146 * Data journalling can't use block_page_mkwrite() because it
6147 * will set_buffer_dirty() before do_journal_get_write_access()
6148 * thus might hit warning messages for dirty metadata buffers.
6149 */
6150 if (!ext4_should_journal_data(inode)) {
6151 err = block_page_mkwrite(vma, vmf, get_block);
6152 } else {
6153 lock_page(page);
6154 size = i_size_read(inode);
6155 /* Page got truncated from under us? */
6156 if (page->mapping != mapping || page_offset(page) > size) {
6157 ret = VM_FAULT_NOPAGE;
6158 goto out_error;
6159 }
6160
6161 if (page->index == size >> PAGE_SHIFT)
6162 len = size & ~PAGE_MASK;
6163 else
6164 len = PAGE_SIZE;
6165
6166 err = __block_write_begin(page, 0, len, ext4_get_block);
6167 if (!err) {
6168 ret = VM_FAULT_SIGBUS;
6169 if (ext4_walk_page_buffers(handle, page_buffers(page),
6170 0, len, NULL, do_journal_get_write_access))
6171 goto out_error;
6172 if (ext4_walk_page_buffers(handle, page_buffers(page),
6173 0, len, NULL, write_end_fn))
6174 goto out_error;
6175 if (ext4_jbd2_inode_add_write(handle, inode,
6176 page_offset(page), len))
6177 goto out_error;
6178 ext4_set_inode_state(inode, EXT4_STATE_JDATA);
6179 } else {
6180 unlock_page(page);
6181 }
6182 }
6183 ext4_journal_stop(handle);
6184 if (err == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
6185 goto retry_alloc;
6186out_ret:
6187 ret = block_page_mkwrite_return(err);
6188out:
6189 up_read(&EXT4_I(inode)->i_mmap_sem);
6190 sb_end_pagefault(inode->i_sb);
6191 return ret;
6192out_error:
6193 unlock_page(page);
6194 ext4_journal_stop(handle);
6195 goto out;
6196}
6197
6198vm_fault_t ext4_filemap_fault(struct vm_fault *vmf)
6199{
6200 struct inode *inode = file_inode(vmf->vma->vm_file);
6201 vm_fault_t ret;
6202
6203 down_read(&EXT4_I(inode)->i_mmap_sem);
6204 ret = filemap_fault(vmf);
6205 up_read(&EXT4_I(inode)->i_mmap_sem);
6206
6207 return ret;
6208}