Loading...
1/*
2 * linux/fs/ext4/inode.c
3 *
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
8 *
9 * from
10 *
11 * linux/fs/minix/inode.c
12 *
13 * Copyright (C) 1991, 1992 Linus Torvalds
14 *
15 * 64-bit file support on 64-bit platforms by Jakub Jelinek
16 * (jj@sunsite.ms.mff.cuni.cz)
17 *
18 * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
19 */
20
21#include <linux/fs.h>
22#include <linux/time.h>
23#include <linux/jbd2.h>
24#include <linux/highuid.h>
25#include <linux/pagemap.h>
26#include <linux/quotaops.h>
27#include <linux/string.h>
28#include <linux/buffer_head.h>
29#include <linux/writeback.h>
30#include <linux/pagevec.h>
31#include <linux/mpage.h>
32#include <linux/namei.h>
33#include <linux/uio.h>
34#include <linux/bio.h>
35#include <linux/workqueue.h>
36#include <linux/kernel.h>
37#include <linux/printk.h>
38#include <linux/slab.h>
39#include <linux/ratelimit.h>
40
41#include "ext4_jbd2.h"
42#include "xattr.h"
43#include "acl.h"
44#include "truncate.h"
45
46#include <trace/events/ext4.h>
47
48#define MPAGE_DA_EXTENT_TAIL 0x01
49
50static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
51 struct ext4_inode_info *ei)
52{
53 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
54 __u16 csum_lo;
55 __u16 csum_hi = 0;
56 __u32 csum;
57
58 csum_lo = raw->i_checksum_lo;
59 raw->i_checksum_lo = 0;
60 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
61 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) {
62 csum_hi = raw->i_checksum_hi;
63 raw->i_checksum_hi = 0;
64 }
65
66 csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw,
67 EXT4_INODE_SIZE(inode->i_sb));
68
69 raw->i_checksum_lo = csum_lo;
70 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
71 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
72 raw->i_checksum_hi = csum_hi;
73
74 return csum;
75}
76
77static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw,
78 struct ext4_inode_info *ei)
79{
80 __u32 provided, calculated;
81
82 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
83 cpu_to_le32(EXT4_OS_LINUX) ||
84 !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
85 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
86 return 1;
87
88 provided = le16_to_cpu(raw->i_checksum_lo);
89 calculated = ext4_inode_csum(inode, raw, ei);
90 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
91 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
92 provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16;
93 else
94 calculated &= 0xFFFF;
95
96 return provided == calculated;
97}
98
99static void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw,
100 struct ext4_inode_info *ei)
101{
102 __u32 csum;
103
104 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
105 cpu_to_le32(EXT4_OS_LINUX) ||
106 !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
107 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
108 return;
109
110 csum = ext4_inode_csum(inode, raw, ei);
111 raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF);
112 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
113 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
114 raw->i_checksum_hi = cpu_to_le16(csum >> 16);
115}
116
117static inline int ext4_begin_ordered_truncate(struct inode *inode,
118 loff_t new_size)
119{
120 trace_ext4_begin_ordered_truncate(inode, new_size);
121 /*
122 * If jinode is zero, then we never opened the file for
123 * writing, so there's no need to call
124 * jbd2_journal_begin_ordered_truncate() since there's no
125 * outstanding writes we need to flush.
126 */
127 if (!EXT4_I(inode)->jinode)
128 return 0;
129 return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode),
130 EXT4_I(inode)->jinode,
131 new_size);
132}
133
134static void ext4_invalidatepage(struct page *page, unsigned long offset);
135static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
136 struct buffer_head *bh_result, int create);
137static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode);
138static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
139static int __ext4_journalled_writepage(struct page *page, unsigned int len);
140static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh);
141static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
142 struct inode *inode, struct page *page, loff_t from,
143 loff_t length, int flags);
144
145/*
146 * Test whether an inode is a fast symlink.
147 */
148static int ext4_inode_is_fast_symlink(struct inode *inode)
149{
150 int ea_blocks = EXT4_I(inode)->i_file_acl ?
151 (inode->i_sb->s_blocksize >> 9) : 0;
152
153 return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
154}
155
156/*
157 * Restart the transaction associated with *handle. This does a commit,
158 * so before we call here everything must be consistently dirtied against
159 * this transaction.
160 */
161int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
162 int nblocks)
163{
164 int ret;
165
166 /*
167 * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this
168 * moment, get_block can be called only for blocks inside i_size since
169 * page cache has been already dropped and writes are blocked by
170 * i_mutex. So we can safely drop the i_data_sem here.
171 */
172 BUG_ON(EXT4_JOURNAL(inode) == NULL);
173 jbd_debug(2, "restarting handle %p\n", handle);
174 up_write(&EXT4_I(inode)->i_data_sem);
175 ret = ext4_journal_restart(handle, nblocks);
176 down_write(&EXT4_I(inode)->i_data_sem);
177 ext4_discard_preallocations(inode);
178
179 return ret;
180}
181
182/*
183 * Called at the last iput() if i_nlink is zero.
184 */
185void ext4_evict_inode(struct inode *inode)
186{
187 handle_t *handle;
188 int err;
189
190 trace_ext4_evict_inode(inode);
191
192 ext4_ioend_wait(inode);
193
194 if (inode->i_nlink) {
195 /*
196 * When journalling data dirty buffers are tracked only in the
197 * journal. So although mm thinks everything is clean and
198 * ready for reaping the inode might still have some pages to
199 * write in the running transaction or waiting to be
200 * checkpointed. Thus calling jbd2_journal_invalidatepage()
201 * (via truncate_inode_pages()) to discard these buffers can
202 * cause data loss. Also even if we did not discard these
203 * buffers, we would have no way to find them after the inode
204 * is reaped and thus user could see stale data if he tries to
205 * read them before the transaction is checkpointed. So be
206 * careful and force everything to disk here... We use
207 * ei->i_datasync_tid to store the newest transaction
208 * containing inode's data.
209 *
210 * Note that directories do not have this problem because they
211 * don't use page cache.
212 */
213 if (ext4_should_journal_data(inode) &&
214 (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) {
215 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
216 tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;
217
218 jbd2_log_start_commit(journal, commit_tid);
219 jbd2_log_wait_commit(journal, commit_tid);
220 filemap_write_and_wait(&inode->i_data);
221 }
222 truncate_inode_pages(&inode->i_data, 0);
223 goto no_delete;
224 }
225
226 if (!is_bad_inode(inode))
227 dquot_initialize(inode);
228
229 if (ext4_should_order_data(inode))
230 ext4_begin_ordered_truncate(inode, 0);
231 truncate_inode_pages(&inode->i_data, 0);
232
233 if (is_bad_inode(inode))
234 goto no_delete;
235
236 handle = ext4_journal_start(inode, ext4_blocks_for_truncate(inode)+3);
237 if (IS_ERR(handle)) {
238 ext4_std_error(inode->i_sb, PTR_ERR(handle));
239 /*
240 * If we're going to skip the normal cleanup, we still need to
241 * make sure that the in-core orphan linked list is properly
242 * cleaned up.
243 */
244 ext4_orphan_del(NULL, inode);
245 goto no_delete;
246 }
247
248 if (IS_SYNC(inode))
249 ext4_handle_sync(handle);
250 inode->i_size = 0;
251 err = ext4_mark_inode_dirty(handle, inode);
252 if (err) {
253 ext4_warning(inode->i_sb,
254 "couldn't mark inode dirty (err %d)", err);
255 goto stop_handle;
256 }
257 if (inode->i_blocks)
258 ext4_truncate(inode);
259
260 /*
261 * ext4_ext_truncate() doesn't reserve any slop when it
262 * restarts journal transactions; therefore there may not be
263 * enough credits left in the handle to remove the inode from
264 * the orphan list and set the dtime field.
265 */
266 if (!ext4_handle_has_enough_credits(handle, 3)) {
267 err = ext4_journal_extend(handle, 3);
268 if (err > 0)
269 err = ext4_journal_restart(handle, 3);
270 if (err != 0) {
271 ext4_warning(inode->i_sb,
272 "couldn't extend journal (err %d)", err);
273 stop_handle:
274 ext4_journal_stop(handle);
275 ext4_orphan_del(NULL, inode);
276 goto no_delete;
277 }
278 }
279
280 /*
281 * Kill off the orphan record which ext4_truncate created.
282 * AKPM: I think this can be inside the above `if'.
283 * Note that ext4_orphan_del() has to be able to cope with the
284 * deletion of a non-existent orphan - this is because we don't
285 * know if ext4_truncate() actually created an orphan record.
286 * (Well, we could do this if we need to, but heck - it works)
287 */
288 ext4_orphan_del(handle, inode);
289 EXT4_I(inode)->i_dtime = get_seconds();
290
291 /*
292 * One subtle ordering requirement: if anything has gone wrong
293 * (transaction abort, IO errors, whatever), then we can still
294 * do these next steps (the fs will already have been marked as
295 * having errors), but we can't free the inode if the mark_dirty
296 * fails.
297 */
298 if (ext4_mark_inode_dirty(handle, inode))
299 /* If that failed, just do the required in-core inode clear. */
300 ext4_clear_inode(inode);
301 else
302 ext4_free_inode(handle, inode);
303 ext4_journal_stop(handle);
304 return;
305no_delete:
306 ext4_clear_inode(inode); /* We must guarantee clearing of inode... */
307}
308
309#ifdef CONFIG_QUOTA
310qsize_t *ext4_get_reserved_space(struct inode *inode)
311{
312 return &EXT4_I(inode)->i_reserved_quota;
313}
314#endif
315
316/*
317 * Calculate the number of metadata blocks need to reserve
318 * to allocate a block located at @lblock
319 */
320static int ext4_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
321{
322 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
323 return ext4_ext_calc_metadata_amount(inode, lblock);
324
325 return ext4_ind_calc_metadata_amount(inode, lblock);
326}
327
328/*
329 * Called with i_data_sem down, which is important since we can call
330 * ext4_discard_preallocations() from here.
331 */
332void ext4_da_update_reserve_space(struct inode *inode,
333 int used, int quota_claim)
334{
335 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
336 struct ext4_inode_info *ei = EXT4_I(inode);
337
338 spin_lock(&ei->i_block_reservation_lock);
339 trace_ext4_da_update_reserve_space(inode, used, quota_claim);
340 if (unlikely(used > ei->i_reserved_data_blocks)) {
341 ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, used %d "
342 "with only %d reserved data blocks",
343 __func__, inode->i_ino, used,
344 ei->i_reserved_data_blocks);
345 WARN_ON(1);
346 used = ei->i_reserved_data_blocks;
347 }
348
349 if (unlikely(ei->i_allocated_meta_blocks > ei->i_reserved_meta_blocks)) {
350 ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, allocated %d "
351 "with only %d reserved metadata blocks\n", __func__,
352 inode->i_ino, ei->i_allocated_meta_blocks,
353 ei->i_reserved_meta_blocks);
354 WARN_ON(1);
355 ei->i_allocated_meta_blocks = ei->i_reserved_meta_blocks;
356 }
357
358 /* Update per-inode reservations */
359 ei->i_reserved_data_blocks -= used;
360 ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks;
361 percpu_counter_sub(&sbi->s_dirtyclusters_counter,
362 used + ei->i_allocated_meta_blocks);
363 ei->i_allocated_meta_blocks = 0;
364
365 if (ei->i_reserved_data_blocks == 0) {
366 /*
367 * We can release all of the reserved metadata blocks
368 * only when we have written all of the delayed
369 * allocation blocks.
370 */
371 percpu_counter_sub(&sbi->s_dirtyclusters_counter,
372 ei->i_reserved_meta_blocks);
373 ei->i_reserved_meta_blocks = 0;
374 ei->i_da_metadata_calc_len = 0;
375 }
376 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
377
378 /* Update quota subsystem for data blocks */
379 if (quota_claim)
380 dquot_claim_block(inode, EXT4_C2B(sbi, used));
381 else {
382 /*
383 * We did fallocate with an offset that is already delayed
384 * allocated. So on delayed allocated writeback we should
385 * not re-claim the quota for fallocated blocks.
386 */
387 dquot_release_reservation_block(inode, EXT4_C2B(sbi, used));
388 }
389
390 /*
391 * If we have done all the pending block allocations and if
392 * there aren't any writers on the inode, we can discard the
393 * inode's preallocations.
394 */
395 if ((ei->i_reserved_data_blocks == 0) &&
396 (atomic_read(&inode->i_writecount) == 0))
397 ext4_discard_preallocations(inode);
398}
399
400static int __check_block_validity(struct inode *inode, const char *func,
401 unsigned int line,
402 struct ext4_map_blocks *map)
403{
404 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk,
405 map->m_len)) {
406 ext4_error_inode(inode, func, line, map->m_pblk,
407 "lblock %lu mapped to illegal pblock "
408 "(length %d)", (unsigned long) map->m_lblk,
409 map->m_len);
410 return -EIO;
411 }
412 return 0;
413}
414
415#define check_block_validity(inode, map) \
416 __check_block_validity((inode), __func__, __LINE__, (map))
417
418/*
419 * Return the number of contiguous dirty pages in a given inode
420 * starting at page frame idx.
421 */
422static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx,
423 unsigned int max_pages)
424{
425 struct address_space *mapping = inode->i_mapping;
426 pgoff_t index;
427 struct pagevec pvec;
428 pgoff_t num = 0;
429 int i, nr_pages, done = 0;
430
431 if (max_pages == 0)
432 return 0;
433 pagevec_init(&pvec, 0);
434 while (!done) {
435 index = idx;
436 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
437 PAGECACHE_TAG_DIRTY,
438 (pgoff_t)PAGEVEC_SIZE);
439 if (nr_pages == 0)
440 break;
441 for (i = 0; i < nr_pages; i++) {
442 struct page *page = pvec.pages[i];
443 struct buffer_head *bh, *head;
444
445 lock_page(page);
446 if (unlikely(page->mapping != mapping) ||
447 !PageDirty(page) ||
448 PageWriteback(page) ||
449 page->index != idx) {
450 done = 1;
451 unlock_page(page);
452 break;
453 }
454 if (page_has_buffers(page)) {
455 bh = head = page_buffers(page);
456 do {
457 if (!buffer_delay(bh) &&
458 !buffer_unwritten(bh))
459 done = 1;
460 bh = bh->b_this_page;
461 } while (!done && (bh != head));
462 }
463 unlock_page(page);
464 if (done)
465 break;
466 idx++;
467 num++;
468 if (num >= max_pages) {
469 done = 1;
470 break;
471 }
472 }
473 pagevec_release(&pvec);
474 }
475 return num;
476}
477
478/*
479 * Sets the BH_Da_Mapped bit on the buffer heads corresponding to the given map.
480 */
481static void set_buffers_da_mapped(struct inode *inode,
482 struct ext4_map_blocks *map)
483{
484 struct address_space *mapping = inode->i_mapping;
485 struct pagevec pvec;
486 int i, nr_pages;
487 pgoff_t index, end;
488
489 index = map->m_lblk >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
490 end = (map->m_lblk + map->m_len - 1) >>
491 (PAGE_CACHE_SHIFT - inode->i_blkbits);
492
493 pagevec_init(&pvec, 0);
494 while (index <= end) {
495 nr_pages = pagevec_lookup(&pvec, mapping, index,
496 min(end - index + 1,
497 (pgoff_t)PAGEVEC_SIZE));
498 if (nr_pages == 0)
499 break;
500 for (i = 0; i < nr_pages; i++) {
501 struct page *page = pvec.pages[i];
502 struct buffer_head *bh, *head;
503
504 if (unlikely(page->mapping != mapping) ||
505 !PageDirty(page))
506 break;
507
508 if (page_has_buffers(page)) {
509 bh = head = page_buffers(page);
510 do {
511 set_buffer_da_mapped(bh);
512 bh = bh->b_this_page;
513 } while (bh != head);
514 }
515 index++;
516 }
517 pagevec_release(&pvec);
518 }
519}
520
521/*
522 * The ext4_map_blocks() function tries to look up the requested blocks,
523 * and returns if the blocks are already mapped.
524 *
525 * Otherwise it takes the write lock of the i_data_sem and allocate blocks
526 * and store the allocated blocks in the result buffer head and mark it
527 * mapped.
528 *
529 * If file type is extents based, it will call ext4_ext_map_blocks(),
530 * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
531 * based files
532 *
533 * On success, it returns the number of blocks being mapped or allocate.
534 * if create==0 and the blocks are pre-allocated and uninitialized block,
535 * the result buffer head is unmapped. If the create ==1, it will make sure
536 * the buffer head is mapped.
537 *
538 * It returns 0 if plain look up failed (blocks have not been allocated), in
539 * that case, buffer head is unmapped
540 *
541 * It returns the error in case of allocation failure.
542 */
543int ext4_map_blocks(handle_t *handle, struct inode *inode,
544 struct ext4_map_blocks *map, int flags)
545{
546 int retval;
547
548 map->m_flags = 0;
549 ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u,"
550 "logical block %lu\n", inode->i_ino, flags, map->m_len,
551 (unsigned long) map->m_lblk);
552 /*
553 * Try to see if we can get the block without requesting a new
554 * file system block.
555 */
556 down_read((&EXT4_I(inode)->i_data_sem));
557 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
558 retval = ext4_ext_map_blocks(handle, inode, map, flags &
559 EXT4_GET_BLOCKS_KEEP_SIZE);
560 } else {
561 retval = ext4_ind_map_blocks(handle, inode, map, flags &
562 EXT4_GET_BLOCKS_KEEP_SIZE);
563 }
564 up_read((&EXT4_I(inode)->i_data_sem));
565
566 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
567 int ret = check_block_validity(inode, map);
568 if (ret != 0)
569 return ret;
570 }
571
572 /* If it is only a block(s) look up */
573 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
574 return retval;
575
576 /*
577 * Returns if the blocks have already allocated
578 *
579 * Note that if blocks have been preallocated
580 * ext4_ext_get_block() returns the create = 0
581 * with buffer head unmapped.
582 */
583 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
584 return retval;
585
586 /*
587 * When we call get_blocks without the create flag, the
588 * BH_Unwritten flag could have gotten set if the blocks
589 * requested were part of a uninitialized extent. We need to
590 * clear this flag now that we are committed to convert all or
591 * part of the uninitialized extent to be an initialized
592 * extent. This is because we need to avoid the combination
593 * of BH_Unwritten and BH_Mapped flags being simultaneously
594 * set on the buffer_head.
595 */
596 map->m_flags &= ~EXT4_MAP_UNWRITTEN;
597
598 /*
599 * New blocks allocate and/or writing to uninitialized extent
600 * will possibly result in updating i_data, so we take
601 * the write lock of i_data_sem, and call get_blocks()
602 * with create == 1 flag.
603 */
604 down_write((&EXT4_I(inode)->i_data_sem));
605
606 /*
607 * if the caller is from delayed allocation writeout path
608 * we have already reserved fs blocks for allocation
609 * let the underlying get_block() function know to
610 * avoid double accounting
611 */
612 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
613 ext4_set_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
614 /*
615 * We need to check for EXT4 here because migrate
616 * could have changed the inode type in between
617 */
618 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
619 retval = ext4_ext_map_blocks(handle, inode, map, flags);
620 } else {
621 retval = ext4_ind_map_blocks(handle, inode, map, flags);
622
623 if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
624 /*
625 * We allocated new blocks which will result in
626 * i_data's format changing. Force the migrate
627 * to fail by clearing migrate flags
628 */
629 ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
630 }
631
632 /*
633 * Update reserved blocks/metadata blocks after successful
634 * block allocation which had been deferred till now. We don't
635 * support fallocate for non extent files. So we can update
636 * reserve space here.
637 */
638 if ((retval > 0) &&
639 (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
640 ext4_da_update_reserve_space(inode, retval, 1);
641 }
642 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
643 ext4_clear_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
644
645 /* If we have successfully mapped the delayed allocated blocks,
646 * set the BH_Da_Mapped bit on them. Its important to do this
647 * under the protection of i_data_sem.
648 */
649 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
650 set_buffers_da_mapped(inode, map);
651 }
652
653 up_write((&EXT4_I(inode)->i_data_sem));
654 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
655 int ret = check_block_validity(inode, map);
656 if (ret != 0)
657 return ret;
658 }
659 return retval;
660}
661
662/* Maximum number of blocks we map for direct IO at once. */
663#define DIO_MAX_BLOCKS 4096
664
665static int _ext4_get_block(struct inode *inode, sector_t iblock,
666 struct buffer_head *bh, int flags)
667{
668 handle_t *handle = ext4_journal_current_handle();
669 struct ext4_map_blocks map;
670 int ret = 0, started = 0;
671 int dio_credits;
672
673 map.m_lblk = iblock;
674 map.m_len = bh->b_size >> inode->i_blkbits;
675
676 if (flags && !handle) {
677 /* Direct IO write... */
678 if (map.m_len > DIO_MAX_BLOCKS)
679 map.m_len = DIO_MAX_BLOCKS;
680 dio_credits = ext4_chunk_trans_blocks(inode, map.m_len);
681 handle = ext4_journal_start(inode, dio_credits);
682 if (IS_ERR(handle)) {
683 ret = PTR_ERR(handle);
684 return ret;
685 }
686 started = 1;
687 }
688
689 ret = ext4_map_blocks(handle, inode, &map, flags);
690 if (ret > 0) {
691 map_bh(bh, inode->i_sb, map.m_pblk);
692 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
693 bh->b_size = inode->i_sb->s_blocksize * map.m_len;
694 ret = 0;
695 }
696 if (started)
697 ext4_journal_stop(handle);
698 return ret;
699}
700
701int ext4_get_block(struct inode *inode, sector_t iblock,
702 struct buffer_head *bh, int create)
703{
704 return _ext4_get_block(inode, iblock, bh,
705 create ? EXT4_GET_BLOCKS_CREATE : 0);
706}
707
708/*
709 * `handle' can be NULL if create is zero
710 */
711struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
712 ext4_lblk_t block, int create, int *errp)
713{
714 struct ext4_map_blocks map;
715 struct buffer_head *bh;
716 int fatal = 0, err;
717
718 J_ASSERT(handle != NULL || create == 0);
719
720 map.m_lblk = block;
721 map.m_len = 1;
722 err = ext4_map_blocks(handle, inode, &map,
723 create ? EXT4_GET_BLOCKS_CREATE : 0);
724
725 if (err < 0)
726 *errp = err;
727 if (err <= 0)
728 return NULL;
729 *errp = 0;
730
731 bh = sb_getblk(inode->i_sb, map.m_pblk);
732 if (!bh) {
733 *errp = -EIO;
734 return NULL;
735 }
736 if (map.m_flags & EXT4_MAP_NEW) {
737 J_ASSERT(create != 0);
738 J_ASSERT(handle != NULL);
739
740 /*
741 * Now that we do not always journal data, we should
742 * keep in mind whether this should always journal the
743 * new buffer as metadata. For now, regular file
744 * writes use ext4_get_block instead, so it's not a
745 * problem.
746 */
747 lock_buffer(bh);
748 BUFFER_TRACE(bh, "call get_create_access");
749 fatal = ext4_journal_get_create_access(handle, bh);
750 if (!fatal && !buffer_uptodate(bh)) {
751 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
752 set_buffer_uptodate(bh);
753 }
754 unlock_buffer(bh);
755 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
756 err = ext4_handle_dirty_metadata(handle, inode, bh);
757 if (!fatal)
758 fatal = err;
759 } else {
760 BUFFER_TRACE(bh, "not a new buffer");
761 }
762 if (fatal) {
763 *errp = fatal;
764 brelse(bh);
765 bh = NULL;
766 }
767 return bh;
768}
769
770struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
771 ext4_lblk_t block, int create, int *err)
772{
773 struct buffer_head *bh;
774
775 bh = ext4_getblk(handle, inode, block, create, err);
776 if (!bh)
777 return bh;
778 if (buffer_uptodate(bh))
779 return bh;
780 ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh);
781 wait_on_buffer(bh);
782 if (buffer_uptodate(bh))
783 return bh;
784 put_bh(bh);
785 *err = -EIO;
786 return NULL;
787}
788
789static int walk_page_buffers(handle_t *handle,
790 struct buffer_head *head,
791 unsigned from,
792 unsigned to,
793 int *partial,
794 int (*fn)(handle_t *handle,
795 struct buffer_head *bh))
796{
797 struct buffer_head *bh;
798 unsigned block_start, block_end;
799 unsigned blocksize = head->b_size;
800 int err, ret = 0;
801 struct buffer_head *next;
802
803 for (bh = head, block_start = 0;
804 ret == 0 && (bh != head || !block_start);
805 block_start = block_end, bh = next) {
806 next = bh->b_this_page;
807 block_end = block_start + blocksize;
808 if (block_end <= from || block_start >= to) {
809 if (partial && !buffer_uptodate(bh))
810 *partial = 1;
811 continue;
812 }
813 err = (*fn)(handle, bh);
814 if (!ret)
815 ret = err;
816 }
817 return ret;
818}
819
820/*
821 * To preserve ordering, it is essential that the hole instantiation and
822 * the data write be encapsulated in a single transaction. We cannot
823 * close off a transaction and start a new one between the ext4_get_block()
824 * and the commit_write(). So doing the jbd2_journal_start at the start of
825 * prepare_write() is the right place.
826 *
827 * Also, this function can nest inside ext4_writepage() ->
828 * block_write_full_page(). In that case, we *know* that ext4_writepage()
829 * has generated enough buffer credits to do the whole page. So we won't
830 * block on the journal in that case, which is good, because the caller may
831 * be PF_MEMALLOC.
832 *
833 * By accident, ext4 can be reentered when a transaction is open via
834 * quota file writes. If we were to commit the transaction while thus
835 * reentered, there can be a deadlock - we would be holding a quota
836 * lock, and the commit would never complete if another thread had a
837 * transaction open and was blocking on the quota lock - a ranking
838 * violation.
839 *
840 * So what we do is to rely on the fact that jbd2_journal_stop/journal_start
841 * will _not_ run commit under these circumstances because handle->h_ref
842 * is elevated. We'll still have enough credits for the tiny quotafile
843 * write.
844 */
845static int do_journal_get_write_access(handle_t *handle,
846 struct buffer_head *bh)
847{
848 int dirty = buffer_dirty(bh);
849 int ret;
850
851 if (!buffer_mapped(bh) || buffer_freed(bh))
852 return 0;
853 /*
854 * __block_write_begin() could have dirtied some buffers. Clean
855 * the dirty bit as jbd2_journal_get_write_access() could complain
856 * otherwise about fs integrity issues. Setting of the dirty bit
857 * by __block_write_begin() isn't a real problem here as we clear
858 * the bit before releasing a page lock and thus writeback cannot
859 * ever write the buffer.
860 */
861 if (dirty)
862 clear_buffer_dirty(bh);
863 ret = ext4_journal_get_write_access(handle, bh);
864 if (!ret && dirty)
865 ret = ext4_handle_dirty_metadata(handle, NULL, bh);
866 return ret;
867}
868
869static int ext4_get_block_write(struct inode *inode, sector_t iblock,
870 struct buffer_head *bh_result, int create);
871static int ext4_write_begin(struct file *file, struct address_space *mapping,
872 loff_t pos, unsigned len, unsigned flags,
873 struct page **pagep, void **fsdata)
874{
875 struct inode *inode = mapping->host;
876 int ret, needed_blocks;
877 handle_t *handle;
878 int retries = 0;
879 struct page *page;
880 pgoff_t index;
881 unsigned from, to;
882
883 trace_ext4_write_begin(inode, pos, len, flags);
884 /*
885 * Reserve one block more for addition to orphan list in case
886 * we allocate blocks but write fails for some reason
887 */
888 needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
889 index = pos >> PAGE_CACHE_SHIFT;
890 from = pos & (PAGE_CACHE_SIZE - 1);
891 to = from + len;
892
893retry:
894 handle = ext4_journal_start(inode, needed_blocks);
895 if (IS_ERR(handle)) {
896 ret = PTR_ERR(handle);
897 goto out;
898 }
899
900 /* We cannot recurse into the filesystem as the transaction is already
901 * started */
902 flags |= AOP_FLAG_NOFS;
903
904 page = grab_cache_page_write_begin(mapping, index, flags);
905 if (!page) {
906 ext4_journal_stop(handle);
907 ret = -ENOMEM;
908 goto out;
909 }
910 *pagep = page;
911
912 if (ext4_should_dioread_nolock(inode))
913 ret = __block_write_begin(page, pos, len, ext4_get_block_write);
914 else
915 ret = __block_write_begin(page, pos, len, ext4_get_block);
916
917 if (!ret && ext4_should_journal_data(inode)) {
918 ret = walk_page_buffers(handle, page_buffers(page),
919 from, to, NULL, do_journal_get_write_access);
920 }
921
922 if (ret) {
923 unlock_page(page);
924 page_cache_release(page);
925 /*
926 * __block_write_begin may have instantiated a few blocks
927 * outside i_size. Trim these off again. Don't need
928 * i_size_read because we hold i_mutex.
929 *
930 * Add inode to orphan list in case we crash before
931 * truncate finishes
932 */
933 if (pos + len > inode->i_size && ext4_can_truncate(inode))
934 ext4_orphan_add(handle, inode);
935
936 ext4_journal_stop(handle);
937 if (pos + len > inode->i_size) {
938 ext4_truncate_failed_write(inode);
939 /*
940 * If truncate failed early the inode might
941 * still be on the orphan list; we need to
942 * make sure the inode is removed from the
943 * orphan list in that case.
944 */
945 if (inode->i_nlink)
946 ext4_orphan_del(NULL, inode);
947 }
948 }
949
950 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
951 goto retry;
952out:
953 return ret;
954}
955
956/* For write_end() in data=journal mode */
957static int write_end_fn(handle_t *handle, struct buffer_head *bh)
958{
959 if (!buffer_mapped(bh) || buffer_freed(bh))
960 return 0;
961 set_buffer_uptodate(bh);
962 return ext4_handle_dirty_metadata(handle, NULL, bh);
963}
964
965static int ext4_generic_write_end(struct file *file,
966 struct address_space *mapping,
967 loff_t pos, unsigned len, unsigned copied,
968 struct page *page, void *fsdata)
969{
970 int i_size_changed = 0;
971 struct inode *inode = mapping->host;
972 handle_t *handle = ext4_journal_current_handle();
973
974 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
975
976 /*
977 * No need to use i_size_read() here, the i_size
978 * cannot change under us because we hold i_mutex.
979 *
980 * But it's important to update i_size while still holding page lock:
981 * page writeout could otherwise come in and zero beyond i_size.
982 */
983 if (pos + copied > inode->i_size) {
984 i_size_write(inode, pos + copied);
985 i_size_changed = 1;
986 }
987
988 if (pos + copied > EXT4_I(inode)->i_disksize) {
989 /* We need to mark inode dirty even if
990 * new_i_size is less that inode->i_size
991 * bu greater than i_disksize.(hint delalloc)
992 */
993 ext4_update_i_disksize(inode, (pos + copied));
994 i_size_changed = 1;
995 }
996 unlock_page(page);
997 page_cache_release(page);
998
999 /*
1000 * Don't mark the inode dirty under page lock. First, it unnecessarily
1001 * makes the holding time of page lock longer. Second, it forces lock
1002 * ordering of page lock and transaction start for journaling
1003 * filesystems.
1004 */
1005 if (i_size_changed)
1006 ext4_mark_inode_dirty(handle, inode);
1007
1008 return copied;
1009}
1010
1011/*
1012 * We need to pick up the new inode size which generic_commit_write gave us
1013 * `file' can be NULL - eg, when called from page_symlink().
1014 *
1015 * ext4 never places buffers on inode->i_mapping->private_list. metadata
1016 * buffers are managed internally.
1017 */
1018static int ext4_ordered_write_end(struct file *file,
1019 struct address_space *mapping,
1020 loff_t pos, unsigned len, unsigned copied,
1021 struct page *page, void *fsdata)
1022{
1023 handle_t *handle = ext4_journal_current_handle();
1024 struct inode *inode = mapping->host;
1025 int ret = 0, ret2;
1026
1027 trace_ext4_ordered_write_end(inode, pos, len, copied);
1028 ret = ext4_jbd2_file_inode(handle, inode);
1029
1030 if (ret == 0) {
1031 ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
1032 page, fsdata);
1033 copied = ret2;
1034 if (pos + len > inode->i_size && ext4_can_truncate(inode))
1035 /* if we have allocated more blocks and copied
1036 * less. We will have blocks allocated outside
1037 * inode->i_size. So truncate them
1038 */
1039 ext4_orphan_add(handle, inode);
1040 if (ret2 < 0)
1041 ret = ret2;
1042 } else {
1043 unlock_page(page);
1044 page_cache_release(page);
1045 }
1046
1047 ret2 = ext4_journal_stop(handle);
1048 if (!ret)
1049 ret = ret2;
1050
1051 if (pos + len > inode->i_size) {
1052 ext4_truncate_failed_write(inode);
1053 /*
1054 * If truncate failed early the inode might still be
1055 * on the orphan list; we need to make sure the inode
1056 * is removed from the orphan list in that case.
1057 */
1058 if (inode->i_nlink)
1059 ext4_orphan_del(NULL, inode);
1060 }
1061
1062
1063 return ret ? ret : copied;
1064}
1065
1066static int ext4_writeback_write_end(struct file *file,
1067 struct address_space *mapping,
1068 loff_t pos, unsigned len, unsigned copied,
1069 struct page *page, void *fsdata)
1070{
1071 handle_t *handle = ext4_journal_current_handle();
1072 struct inode *inode = mapping->host;
1073 int ret = 0, ret2;
1074
1075 trace_ext4_writeback_write_end(inode, pos, len, copied);
1076 ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
1077 page, fsdata);
1078 copied = ret2;
1079 if (pos + len > inode->i_size && ext4_can_truncate(inode))
1080 /* if we have allocated more blocks and copied
1081 * less. We will have blocks allocated outside
1082 * inode->i_size. So truncate them
1083 */
1084 ext4_orphan_add(handle, inode);
1085
1086 if (ret2 < 0)
1087 ret = ret2;
1088
1089 ret2 = ext4_journal_stop(handle);
1090 if (!ret)
1091 ret = ret2;
1092
1093 if (pos + len > inode->i_size) {
1094 ext4_truncate_failed_write(inode);
1095 /*
1096 * If truncate failed early the inode might still be
1097 * on the orphan list; we need to make sure the inode
1098 * is removed from the orphan list in that case.
1099 */
1100 if (inode->i_nlink)
1101 ext4_orphan_del(NULL, inode);
1102 }
1103
1104 return ret ? ret : copied;
1105}
1106
1107static int ext4_journalled_write_end(struct file *file,
1108 struct address_space *mapping,
1109 loff_t pos, unsigned len, unsigned copied,
1110 struct page *page, void *fsdata)
1111{
1112 handle_t *handle = ext4_journal_current_handle();
1113 struct inode *inode = mapping->host;
1114 int ret = 0, ret2;
1115 int partial = 0;
1116 unsigned from, to;
1117 loff_t new_i_size;
1118
1119 trace_ext4_journalled_write_end(inode, pos, len, copied);
1120 from = pos & (PAGE_CACHE_SIZE - 1);
1121 to = from + len;
1122
1123 BUG_ON(!ext4_handle_valid(handle));
1124
1125 if (copied < len) {
1126 if (!PageUptodate(page))
1127 copied = 0;
1128 page_zero_new_buffers(page, from+copied, to);
1129 }
1130
1131 ret = walk_page_buffers(handle, page_buffers(page), from,
1132 to, &partial, write_end_fn);
1133 if (!partial)
1134 SetPageUptodate(page);
1135 new_i_size = pos + copied;
1136 if (new_i_size > inode->i_size)
1137 i_size_write(inode, pos+copied);
1138 ext4_set_inode_state(inode, EXT4_STATE_JDATA);
1139 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
1140 if (new_i_size > EXT4_I(inode)->i_disksize) {
1141 ext4_update_i_disksize(inode, new_i_size);
1142 ret2 = ext4_mark_inode_dirty(handle, inode);
1143 if (!ret)
1144 ret = ret2;
1145 }
1146
1147 unlock_page(page);
1148 page_cache_release(page);
1149 if (pos + len > inode->i_size && ext4_can_truncate(inode))
1150 /* if we have allocated more blocks and copied
1151 * less. We will have blocks allocated outside
1152 * inode->i_size. So truncate them
1153 */
1154 ext4_orphan_add(handle, inode);
1155
1156 ret2 = ext4_journal_stop(handle);
1157 if (!ret)
1158 ret = ret2;
1159 if (pos + len > inode->i_size) {
1160 ext4_truncate_failed_write(inode);
1161 /*
1162 * If truncate failed early the inode might still be
1163 * on the orphan list; we need to make sure the inode
1164 * is removed from the orphan list in that case.
1165 */
1166 if (inode->i_nlink)
1167 ext4_orphan_del(NULL, inode);
1168 }
1169
1170 return ret ? ret : copied;
1171}
1172
1173/*
1174 * Reserve a single cluster located at lblock
1175 */
1176static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
1177{
1178 int retries = 0;
1179 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1180 struct ext4_inode_info *ei = EXT4_I(inode);
1181 unsigned int md_needed;
1182 int ret;
1183 ext4_lblk_t save_last_lblock;
1184 int save_len;
1185
1186 /*
1187 * We will charge metadata quota at writeout time; this saves
1188 * us from metadata over-estimation, though we may go over by
1189 * a small amount in the end. Here we just reserve for data.
1190 */
1191 ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
1192 if (ret)
1193 return ret;
1194
1195 /*
1196 * recalculate the amount of metadata blocks to reserve
1197 * in order to allocate nrblocks
1198 * worse case is one extent per block
1199 */
1200repeat:
1201 spin_lock(&ei->i_block_reservation_lock);
1202 /*
1203 * ext4_calc_metadata_amount() has side effects, which we have
1204 * to be prepared undo if we fail to claim space.
1205 */
1206 save_len = ei->i_da_metadata_calc_len;
1207 save_last_lblock = ei->i_da_metadata_calc_last_lblock;
1208 md_needed = EXT4_NUM_B2C(sbi,
1209 ext4_calc_metadata_amount(inode, lblock));
1210 trace_ext4_da_reserve_space(inode, md_needed);
1211
1212 /*
1213 * We do still charge estimated metadata to the sb though;
1214 * we cannot afford to run out of free blocks.
1215 */
1216 if (ext4_claim_free_clusters(sbi, md_needed + 1, 0)) {
1217 ei->i_da_metadata_calc_len = save_len;
1218 ei->i_da_metadata_calc_last_lblock = save_last_lblock;
1219 spin_unlock(&ei->i_block_reservation_lock);
1220 if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
1221 yield();
1222 goto repeat;
1223 }
1224 dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
1225 return -ENOSPC;
1226 }
1227 ei->i_reserved_data_blocks++;
1228 ei->i_reserved_meta_blocks += md_needed;
1229 spin_unlock(&ei->i_block_reservation_lock);
1230
1231 return 0; /* success */
1232}
1233
1234static void ext4_da_release_space(struct inode *inode, int to_free)
1235{
1236 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1237 struct ext4_inode_info *ei = EXT4_I(inode);
1238
1239 if (!to_free)
1240 return; /* Nothing to release, exit */
1241
1242 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1243
1244 trace_ext4_da_release_space(inode, to_free);
1245 if (unlikely(to_free > ei->i_reserved_data_blocks)) {
1246 /*
1247 * if there aren't enough reserved blocks, then the
1248 * counter is messed up somewhere. Since this
1249 * function is called from invalidate page, it's
1250 * harmless to return without any action.
1251 */
1252 ext4_msg(inode->i_sb, KERN_NOTICE, "ext4_da_release_space: "
1253 "ino %lu, to_free %d with only %d reserved "
1254 "data blocks", inode->i_ino, to_free,
1255 ei->i_reserved_data_blocks);
1256 WARN_ON(1);
1257 to_free = ei->i_reserved_data_blocks;
1258 }
1259 ei->i_reserved_data_blocks -= to_free;
1260
1261 if (ei->i_reserved_data_blocks == 0) {
1262 /*
1263 * We can release all of the reserved metadata blocks
1264 * only when we have written all of the delayed
1265 * allocation blocks.
1266 * Note that in case of bigalloc, i_reserved_meta_blocks,
1267 * i_reserved_data_blocks, etc. refer to number of clusters.
1268 */
1269 percpu_counter_sub(&sbi->s_dirtyclusters_counter,
1270 ei->i_reserved_meta_blocks);
1271 ei->i_reserved_meta_blocks = 0;
1272 ei->i_da_metadata_calc_len = 0;
1273 }
1274
1275 /* update fs dirty data blocks counter */
1276 percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free);
1277
1278 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1279
1280 dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free));
1281}
1282
1283static void ext4_da_page_release_reservation(struct page *page,
1284 unsigned long offset)
1285{
1286 int to_release = 0;
1287 struct buffer_head *head, *bh;
1288 unsigned int curr_off = 0;
1289 struct inode *inode = page->mapping->host;
1290 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1291 int num_clusters;
1292
1293 head = page_buffers(page);
1294 bh = head;
1295 do {
1296 unsigned int next_off = curr_off + bh->b_size;
1297
1298 if ((offset <= curr_off) && (buffer_delay(bh))) {
1299 to_release++;
1300 clear_buffer_delay(bh);
1301 clear_buffer_da_mapped(bh);
1302 }
1303 curr_off = next_off;
1304 } while ((bh = bh->b_this_page) != head);
1305
1306 /* If we have released all the blocks belonging to a cluster, then we
1307 * need to release the reserved space for that cluster. */
1308 num_clusters = EXT4_NUM_B2C(sbi, to_release);
1309 while (num_clusters > 0) {
1310 ext4_fsblk_t lblk;
1311 lblk = (page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits)) +
1312 ((num_clusters - 1) << sbi->s_cluster_bits);
1313 if (sbi->s_cluster_ratio == 1 ||
1314 !ext4_find_delalloc_cluster(inode, lblk, 1))
1315 ext4_da_release_space(inode, 1);
1316
1317 num_clusters--;
1318 }
1319}
1320
1321/*
1322 * Delayed allocation stuff
1323 */
1324
1325/*
1326 * mpage_da_submit_io - walks through extent of pages and try to write
1327 * them with writepage() call back
1328 *
1329 * @mpd->inode: inode
1330 * @mpd->first_page: first page of the extent
1331 * @mpd->next_page: page after the last page of the extent
1332 *
1333 * By the time mpage_da_submit_io() is called we expect all blocks
1334 * to be allocated. this may be wrong if allocation failed.
1335 *
1336 * As pages are already locked by write_cache_pages(), we can't use it
1337 */
1338static int mpage_da_submit_io(struct mpage_da_data *mpd,
1339 struct ext4_map_blocks *map)
1340{
1341 struct pagevec pvec;
1342 unsigned long index, end;
1343 int ret = 0, err, nr_pages, i;
1344 struct inode *inode = mpd->inode;
1345 struct address_space *mapping = inode->i_mapping;
1346 loff_t size = i_size_read(inode);
1347 unsigned int len, block_start;
1348 struct buffer_head *bh, *page_bufs = NULL;
1349 int journal_data = ext4_should_journal_data(inode);
1350 sector_t pblock = 0, cur_logical = 0;
1351 struct ext4_io_submit io_submit;
1352
1353 BUG_ON(mpd->next_page <= mpd->first_page);
1354 memset(&io_submit, 0, sizeof(io_submit));
1355 /*
1356 * We need to start from the first_page to the next_page - 1
1357 * to make sure we also write the mapped dirty buffer_heads.
1358 * If we look at mpd->b_blocknr we would only be looking
1359 * at the currently mapped buffer_heads.
1360 */
1361 index = mpd->first_page;
1362 end = mpd->next_page - 1;
1363
1364 pagevec_init(&pvec, 0);
1365 while (index <= end) {
1366 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
1367 if (nr_pages == 0)
1368 break;
1369 for (i = 0; i < nr_pages; i++) {
1370 int commit_write = 0, skip_page = 0;
1371 struct page *page = pvec.pages[i];
1372
1373 index = page->index;
1374 if (index > end)
1375 break;
1376
1377 if (index == size >> PAGE_CACHE_SHIFT)
1378 len = size & ~PAGE_CACHE_MASK;
1379 else
1380 len = PAGE_CACHE_SIZE;
1381 if (map) {
1382 cur_logical = index << (PAGE_CACHE_SHIFT -
1383 inode->i_blkbits);
1384 pblock = map->m_pblk + (cur_logical -
1385 map->m_lblk);
1386 }
1387 index++;
1388
1389 BUG_ON(!PageLocked(page));
1390 BUG_ON(PageWriteback(page));
1391
1392 /*
1393 * If the page does not have buffers (for
1394 * whatever reason), try to create them using
1395 * __block_write_begin. If this fails,
1396 * skip the page and move on.
1397 */
1398 if (!page_has_buffers(page)) {
1399 if (__block_write_begin(page, 0, len,
1400 noalloc_get_block_write)) {
1401 skip_page:
1402 unlock_page(page);
1403 continue;
1404 }
1405 commit_write = 1;
1406 }
1407
1408 bh = page_bufs = page_buffers(page);
1409 block_start = 0;
1410 do {
1411 if (!bh)
1412 goto skip_page;
1413 if (map && (cur_logical >= map->m_lblk) &&
1414 (cur_logical <= (map->m_lblk +
1415 (map->m_len - 1)))) {
1416 if (buffer_delay(bh)) {
1417 clear_buffer_delay(bh);
1418 bh->b_blocknr = pblock;
1419 }
1420 if (buffer_da_mapped(bh))
1421 clear_buffer_da_mapped(bh);
1422 if (buffer_unwritten(bh) ||
1423 buffer_mapped(bh))
1424 BUG_ON(bh->b_blocknr != pblock);
1425 if (map->m_flags & EXT4_MAP_UNINIT)
1426 set_buffer_uninit(bh);
1427 clear_buffer_unwritten(bh);
1428 }
1429
1430 /*
1431 * skip page if block allocation undone and
1432 * block is dirty
1433 */
1434 if (ext4_bh_delay_or_unwritten(NULL, bh))
1435 skip_page = 1;
1436 bh = bh->b_this_page;
1437 block_start += bh->b_size;
1438 cur_logical++;
1439 pblock++;
1440 } while (bh != page_bufs);
1441
1442 if (skip_page)
1443 goto skip_page;
1444
1445 if (commit_write)
1446 /* mark the buffer_heads as dirty & uptodate */
1447 block_commit_write(page, 0, len);
1448
1449 clear_page_dirty_for_io(page);
1450 /*
1451 * Delalloc doesn't support data journalling,
1452 * but eventually maybe we'll lift this
1453 * restriction.
1454 */
1455 if (unlikely(journal_data && PageChecked(page)))
1456 err = __ext4_journalled_writepage(page, len);
1457 else if (test_opt(inode->i_sb, MBLK_IO_SUBMIT))
1458 err = ext4_bio_write_page(&io_submit, page,
1459 len, mpd->wbc);
1460 else if (buffer_uninit(page_bufs)) {
1461 ext4_set_bh_endio(page_bufs, inode);
1462 err = block_write_full_page_endio(page,
1463 noalloc_get_block_write,
1464 mpd->wbc, ext4_end_io_buffer_write);
1465 } else
1466 err = block_write_full_page(page,
1467 noalloc_get_block_write, mpd->wbc);
1468
1469 if (!err)
1470 mpd->pages_written++;
1471 /*
1472 * In error case, we have to continue because
1473 * remaining pages are still locked
1474 */
1475 if (ret == 0)
1476 ret = err;
1477 }
1478 pagevec_release(&pvec);
1479 }
1480 ext4_io_submit(&io_submit);
1481 return ret;
1482}
1483
1484static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd)
1485{
1486 int nr_pages, i;
1487 pgoff_t index, end;
1488 struct pagevec pvec;
1489 struct inode *inode = mpd->inode;
1490 struct address_space *mapping = inode->i_mapping;
1491
1492 index = mpd->first_page;
1493 end = mpd->next_page - 1;
1494 while (index <= end) {
1495 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
1496 if (nr_pages == 0)
1497 break;
1498 for (i = 0; i < nr_pages; i++) {
1499 struct page *page = pvec.pages[i];
1500 if (page->index > end)
1501 break;
1502 BUG_ON(!PageLocked(page));
1503 BUG_ON(PageWriteback(page));
1504 block_invalidatepage(page, 0);
1505 ClearPageUptodate(page);
1506 unlock_page(page);
1507 }
1508 index = pvec.pages[nr_pages - 1]->index + 1;
1509 pagevec_release(&pvec);
1510 }
1511 return;
1512}
1513
1514static void ext4_print_free_blocks(struct inode *inode)
1515{
1516 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1517 struct super_block *sb = inode->i_sb;
1518
1519 ext4_msg(sb, KERN_CRIT, "Total free blocks count %lld",
1520 EXT4_C2B(EXT4_SB(inode->i_sb),
1521 ext4_count_free_clusters(inode->i_sb)));
1522 ext4_msg(sb, KERN_CRIT, "Free/Dirty block details");
1523 ext4_msg(sb, KERN_CRIT, "free_blocks=%lld",
1524 (long long) EXT4_C2B(EXT4_SB(inode->i_sb),
1525 percpu_counter_sum(&sbi->s_freeclusters_counter)));
1526 ext4_msg(sb, KERN_CRIT, "dirty_blocks=%lld",
1527 (long long) EXT4_C2B(EXT4_SB(inode->i_sb),
1528 percpu_counter_sum(&sbi->s_dirtyclusters_counter)));
1529 ext4_msg(sb, KERN_CRIT, "Block reservation details");
1530 ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u",
1531 EXT4_I(inode)->i_reserved_data_blocks);
1532 ext4_msg(sb, KERN_CRIT, "i_reserved_meta_blocks=%u",
1533 EXT4_I(inode)->i_reserved_meta_blocks);
1534 return;
1535}
1536
1537/*
1538 * mpage_da_map_and_submit - go through given space, map them
1539 * if necessary, and then submit them for I/O
1540 *
1541 * @mpd - bh describing space
1542 *
1543 * The function skips space we know is already mapped to disk blocks.
1544 *
1545 */
1546static void mpage_da_map_and_submit(struct mpage_da_data *mpd)
1547{
1548 int err, blks, get_blocks_flags;
1549 struct ext4_map_blocks map, *mapp = NULL;
1550 sector_t next = mpd->b_blocknr;
1551 unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits;
1552 loff_t disksize = EXT4_I(mpd->inode)->i_disksize;
1553 handle_t *handle = NULL;
1554
1555 /*
1556 * If the blocks are mapped already, or we couldn't accumulate
1557 * any blocks, then proceed immediately to the submission stage.
1558 */
1559 if ((mpd->b_size == 0) ||
1560 ((mpd->b_state & (1 << BH_Mapped)) &&
1561 !(mpd->b_state & (1 << BH_Delay)) &&
1562 !(mpd->b_state & (1 << BH_Unwritten))))
1563 goto submit_io;
1564
1565 handle = ext4_journal_current_handle();
1566 BUG_ON(!handle);
1567
1568 /*
1569 * Call ext4_map_blocks() to allocate any delayed allocation
1570 * blocks, or to convert an uninitialized extent to be
1571 * initialized (in the case where we have written into
1572 * one or more preallocated blocks).
1573 *
1574 * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE to
1575 * indicate that we are on the delayed allocation path. This
1576 * affects functions in many different parts of the allocation
1577 * call path. This flag exists primarily because we don't
1578 * want to change *many* call functions, so ext4_map_blocks()
1579 * will set the EXT4_STATE_DELALLOC_RESERVED flag once the
1580 * inode's allocation semaphore is taken.
1581 *
1582 * If the blocks in questions were delalloc blocks, set
1583 * EXT4_GET_BLOCKS_DELALLOC_RESERVE so the delalloc accounting
1584 * variables are updated after the blocks have been allocated.
1585 */
1586 map.m_lblk = next;
1587 map.m_len = max_blocks;
1588 get_blocks_flags = EXT4_GET_BLOCKS_CREATE;
1589 if (ext4_should_dioread_nolock(mpd->inode))
1590 get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
1591 if (mpd->b_state & (1 << BH_Delay))
1592 get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
1593
1594 blks = ext4_map_blocks(handle, mpd->inode, &map, get_blocks_flags);
1595 if (blks < 0) {
1596 struct super_block *sb = mpd->inode->i_sb;
1597
1598 err = blks;
1599 /*
1600 * If get block returns EAGAIN or ENOSPC and there
1601 * appears to be free blocks we will just let
1602 * mpage_da_submit_io() unlock all of the pages.
1603 */
1604 if (err == -EAGAIN)
1605 goto submit_io;
1606
1607 if (err == -ENOSPC && ext4_count_free_clusters(sb)) {
1608 mpd->retval = err;
1609 goto submit_io;
1610 }
1611
1612 /*
1613 * get block failure will cause us to loop in
1614 * writepages, because a_ops->writepage won't be able
1615 * to make progress. The page will be redirtied by
1616 * writepage and writepages will again try to write
1617 * the same.
1618 */
1619 if (!(EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)) {
1620 ext4_msg(sb, KERN_CRIT,
1621 "delayed block allocation failed for inode %lu "
1622 "at logical offset %llu with max blocks %zd "
1623 "with error %d", mpd->inode->i_ino,
1624 (unsigned long long) next,
1625 mpd->b_size >> mpd->inode->i_blkbits, err);
1626 ext4_msg(sb, KERN_CRIT,
1627 "This should not happen!! Data will be lost\n");
1628 if (err == -ENOSPC)
1629 ext4_print_free_blocks(mpd->inode);
1630 }
1631 /* invalidate all the pages */
1632 ext4_da_block_invalidatepages(mpd);
1633
1634 /* Mark this page range as having been completed */
1635 mpd->io_done = 1;
1636 return;
1637 }
1638 BUG_ON(blks == 0);
1639
1640 mapp = ↦
1641 if (map.m_flags & EXT4_MAP_NEW) {
1642 struct block_device *bdev = mpd->inode->i_sb->s_bdev;
1643 int i;
1644
1645 for (i = 0; i < map.m_len; i++)
1646 unmap_underlying_metadata(bdev, map.m_pblk + i);
1647
1648 if (ext4_should_order_data(mpd->inode)) {
1649 err = ext4_jbd2_file_inode(handle, mpd->inode);
1650 if (err) {
1651 /* Only if the journal is aborted */
1652 mpd->retval = err;
1653 goto submit_io;
1654 }
1655 }
1656 }
1657
1658 /*
1659 * Update on-disk size along with block allocation.
1660 */
1661 disksize = ((loff_t) next + blks) << mpd->inode->i_blkbits;
1662 if (disksize > i_size_read(mpd->inode))
1663 disksize = i_size_read(mpd->inode);
1664 if (disksize > EXT4_I(mpd->inode)->i_disksize) {
1665 ext4_update_i_disksize(mpd->inode, disksize);
1666 err = ext4_mark_inode_dirty(handle, mpd->inode);
1667 if (err)
1668 ext4_error(mpd->inode->i_sb,
1669 "Failed to mark inode %lu dirty",
1670 mpd->inode->i_ino);
1671 }
1672
1673submit_io:
1674 mpage_da_submit_io(mpd, mapp);
1675 mpd->io_done = 1;
1676}
1677
1678#define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \
1679 (1 << BH_Delay) | (1 << BH_Unwritten))
1680
1681/*
1682 * mpage_add_bh_to_extent - try to add one more block to extent of blocks
1683 *
1684 * @mpd->lbh - extent of blocks
1685 * @logical - logical number of the block in the file
1686 * @bh - bh of the block (used to access block's state)
1687 *
1688 * the function is used to collect contig. blocks in same state
1689 */
1690static void mpage_add_bh_to_extent(struct mpage_da_data *mpd,
1691 sector_t logical, size_t b_size,
1692 unsigned long b_state)
1693{
1694 sector_t next;
1695 int nrblocks = mpd->b_size >> mpd->inode->i_blkbits;
1696
1697 /*
1698 * XXX Don't go larger than mballoc is willing to allocate
1699 * This is a stopgap solution. We eventually need to fold
1700 * mpage_da_submit_io() into this function and then call
1701 * ext4_map_blocks() multiple times in a loop
1702 */
1703 if (nrblocks >= 8*1024*1024/mpd->inode->i_sb->s_blocksize)
1704 goto flush_it;
1705
1706 /* check if thereserved journal credits might overflow */
1707 if (!(ext4_test_inode_flag(mpd->inode, EXT4_INODE_EXTENTS))) {
1708 if (nrblocks >= EXT4_MAX_TRANS_DATA) {
1709 /*
1710 * With non-extent format we are limited by the journal
1711 * credit available. Total credit needed to insert
1712 * nrblocks contiguous blocks is dependent on the
1713 * nrblocks. So limit nrblocks.
1714 */
1715 goto flush_it;
1716 } else if ((nrblocks + (b_size >> mpd->inode->i_blkbits)) >
1717 EXT4_MAX_TRANS_DATA) {
1718 /*
1719 * Adding the new buffer_head would make it cross the
1720 * allowed limit for which we have journal credit
1721 * reserved. So limit the new bh->b_size
1722 */
1723 b_size = (EXT4_MAX_TRANS_DATA - nrblocks) <<
1724 mpd->inode->i_blkbits;
1725 /* we will do mpage_da_submit_io in the next loop */
1726 }
1727 }
1728 /*
1729 * First block in the extent
1730 */
1731 if (mpd->b_size == 0) {
1732 mpd->b_blocknr = logical;
1733 mpd->b_size = b_size;
1734 mpd->b_state = b_state & BH_FLAGS;
1735 return;
1736 }
1737
1738 next = mpd->b_blocknr + nrblocks;
1739 /*
1740 * Can we merge the block to our big extent?
1741 */
1742 if (logical == next && (b_state & BH_FLAGS) == mpd->b_state) {
1743 mpd->b_size += b_size;
1744 return;
1745 }
1746
1747flush_it:
1748 /*
1749 * We couldn't merge the block to our extent, so we
1750 * need to flush current extent and start new one
1751 */
1752 mpage_da_map_and_submit(mpd);
1753 return;
1754}
1755
1756static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh)
1757{
1758 return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh);
1759}
1760
1761/*
1762 * This function is grabs code from the very beginning of
1763 * ext4_map_blocks, but assumes that the caller is from delayed write
1764 * time. This function looks up the requested blocks and sets the
1765 * buffer delay bit under the protection of i_data_sem.
1766 */
1767static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
1768 struct ext4_map_blocks *map,
1769 struct buffer_head *bh)
1770{
1771 int retval;
1772 sector_t invalid_block = ~((sector_t) 0xffff);
1773
1774 if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
1775 invalid_block = ~0;
1776
1777 map->m_flags = 0;
1778 ext_debug("ext4_da_map_blocks(): inode %lu, max_blocks %u,"
1779 "logical block %lu\n", inode->i_ino, map->m_len,
1780 (unsigned long) map->m_lblk);
1781 /*
1782 * Try to see if we can get the block without requesting a new
1783 * file system block.
1784 */
1785 down_read((&EXT4_I(inode)->i_data_sem));
1786 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
1787 retval = ext4_ext_map_blocks(NULL, inode, map, 0);
1788 else
1789 retval = ext4_ind_map_blocks(NULL, inode, map, 0);
1790
1791 if (retval == 0) {
1792 /*
1793 * XXX: __block_prepare_write() unmaps passed block,
1794 * is it OK?
1795 */
1796 /* If the block was allocated from previously allocated cluster,
1797 * then we dont need to reserve it again. */
1798 if (!(map->m_flags & EXT4_MAP_FROM_CLUSTER)) {
1799 retval = ext4_da_reserve_space(inode, iblock);
1800 if (retval)
1801 /* not enough space to reserve */
1802 goto out_unlock;
1803 }
1804
1805 /* Clear EXT4_MAP_FROM_CLUSTER flag since its purpose is served
1806 * and it should not appear on the bh->b_state.
1807 */
1808 map->m_flags &= ~EXT4_MAP_FROM_CLUSTER;
1809
1810 map_bh(bh, inode->i_sb, invalid_block);
1811 set_buffer_new(bh);
1812 set_buffer_delay(bh);
1813 }
1814
1815out_unlock:
1816 up_read((&EXT4_I(inode)->i_data_sem));
1817
1818 return retval;
1819}
1820
1821/*
1822 * This is a special get_blocks_t callback which is used by
1823 * ext4_da_write_begin(). It will either return mapped block or
1824 * reserve space for a single block.
1825 *
1826 * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set.
1827 * We also have b_blocknr = -1 and b_bdev initialized properly
1828 *
1829 * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set.
1830 * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev
1831 * initialized properly.
1832 */
1833static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
1834 struct buffer_head *bh, int create)
1835{
1836 struct ext4_map_blocks map;
1837 int ret = 0;
1838
1839 BUG_ON(create == 0);
1840 BUG_ON(bh->b_size != inode->i_sb->s_blocksize);
1841
1842 map.m_lblk = iblock;
1843 map.m_len = 1;
1844
1845 /*
1846 * first, we need to know whether the block is allocated already
1847 * preallocated blocks are unmapped but should treated
1848 * the same as allocated blocks.
1849 */
1850 ret = ext4_da_map_blocks(inode, iblock, &map, bh);
1851 if (ret <= 0)
1852 return ret;
1853
1854 map_bh(bh, inode->i_sb, map.m_pblk);
1855 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
1856
1857 if (buffer_unwritten(bh)) {
1858 /* A delayed write to unwritten bh should be marked
1859 * new and mapped. Mapped ensures that we don't do
1860 * get_block multiple times when we write to the same
1861 * offset and new ensures that we do proper zero out
1862 * for partial write.
1863 */
1864 set_buffer_new(bh);
1865 set_buffer_mapped(bh);
1866 }
1867 return 0;
1868}
1869
1870/*
1871 * This function is used as a standard get_block_t calback function
1872 * when there is no desire to allocate any blocks. It is used as a
1873 * callback function for block_write_begin() and block_write_full_page().
1874 * These functions should only try to map a single block at a time.
1875 *
1876 * Since this function doesn't do block allocations even if the caller
1877 * requests it by passing in create=1, it is critically important that
1878 * any caller checks to make sure that any buffer heads are returned
1879 * by this function are either all already mapped or marked for
1880 * delayed allocation before calling block_write_full_page(). Otherwise,
1881 * b_blocknr could be left unitialized, and the page write functions will
1882 * be taken by surprise.
1883 */
1884static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
1885 struct buffer_head *bh_result, int create)
1886{
1887 BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize);
1888 return _ext4_get_block(inode, iblock, bh_result, 0);
1889}
1890
1891static int bget_one(handle_t *handle, struct buffer_head *bh)
1892{
1893 get_bh(bh);
1894 return 0;
1895}
1896
1897static int bput_one(handle_t *handle, struct buffer_head *bh)
1898{
1899 put_bh(bh);
1900 return 0;
1901}
1902
1903static int __ext4_journalled_writepage(struct page *page,
1904 unsigned int len)
1905{
1906 struct address_space *mapping = page->mapping;
1907 struct inode *inode = mapping->host;
1908 struct buffer_head *page_bufs;
1909 handle_t *handle = NULL;
1910 int ret = 0;
1911 int err;
1912
1913 ClearPageChecked(page);
1914 page_bufs = page_buffers(page);
1915 BUG_ON(!page_bufs);
1916 walk_page_buffers(handle, page_bufs, 0, len, NULL, bget_one);
1917 /* As soon as we unlock the page, it can go away, but we have
1918 * references to buffers so we are safe */
1919 unlock_page(page);
1920
1921 handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
1922 if (IS_ERR(handle)) {
1923 ret = PTR_ERR(handle);
1924 goto out;
1925 }
1926
1927 BUG_ON(!ext4_handle_valid(handle));
1928
1929 ret = walk_page_buffers(handle, page_bufs, 0, len, NULL,
1930 do_journal_get_write_access);
1931
1932 err = walk_page_buffers(handle, page_bufs, 0, len, NULL,
1933 write_end_fn);
1934 if (ret == 0)
1935 ret = err;
1936 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
1937 err = ext4_journal_stop(handle);
1938 if (!ret)
1939 ret = err;
1940
1941 walk_page_buffers(handle, page_bufs, 0, len, NULL, bput_one);
1942 ext4_set_inode_state(inode, EXT4_STATE_JDATA);
1943out:
1944 return ret;
1945}
1946
1947static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode);
1948static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
1949
1950/*
1951 * Note that we don't need to start a transaction unless we're journaling data
1952 * because we should have holes filled from ext4_page_mkwrite(). We even don't
1953 * need to file the inode to the transaction's list in ordered mode because if
1954 * we are writing back data added by write(), the inode is already there and if
1955 * we are writing back data modified via mmap(), no one guarantees in which
1956 * transaction the data will hit the disk. In case we are journaling data, we
1957 * cannot start transaction directly because transaction start ranks above page
1958 * lock so we have to do some magic.
1959 *
1960 * This function can get called via...
1961 * - ext4_da_writepages after taking page lock (have journal handle)
1962 * - journal_submit_inode_data_buffers (no journal handle)
1963 * - shrink_page_list via pdflush (no journal handle)
1964 * - grab_page_cache when doing write_begin (have journal handle)
1965 *
1966 * We don't do any block allocation in this function. If we have page with
1967 * multiple blocks we need to write those buffer_heads that are mapped. This
1968 * is important for mmaped based write. So if we do with blocksize 1K
1969 * truncate(f, 1024);
1970 * a = mmap(f, 0, 4096);
1971 * a[0] = 'a';
1972 * truncate(f, 4096);
1973 * we have in the page first buffer_head mapped via page_mkwrite call back
1974 * but other buffer_heads would be unmapped but dirty (dirty done via the
1975 * do_wp_page). So writepage should write the first block. If we modify
1976 * the mmap area beyond 1024 we will again get a page_fault and the
1977 * page_mkwrite callback will do the block allocation and mark the
1978 * buffer_heads mapped.
1979 *
1980 * We redirty the page if we have any buffer_heads that is either delay or
1981 * unwritten in the page.
1982 *
1983 * We can get recursively called as show below.
1984 *
1985 * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
1986 * ext4_writepage()
1987 *
1988 * But since we don't do any block allocation we should not deadlock.
1989 * Page also have the dirty flag cleared so we don't get recurive page_lock.
1990 */
1991static int ext4_writepage(struct page *page,
1992 struct writeback_control *wbc)
1993{
1994 int ret = 0, commit_write = 0;
1995 loff_t size;
1996 unsigned int len;
1997 struct buffer_head *page_bufs = NULL;
1998 struct inode *inode = page->mapping->host;
1999
2000 trace_ext4_writepage(page);
2001 size = i_size_read(inode);
2002 if (page->index == size >> PAGE_CACHE_SHIFT)
2003 len = size & ~PAGE_CACHE_MASK;
2004 else
2005 len = PAGE_CACHE_SIZE;
2006
2007 /*
2008 * If the page does not have buffers (for whatever reason),
2009 * try to create them using __block_write_begin. If this
2010 * fails, redirty the page and move on.
2011 */
2012 if (!page_has_buffers(page)) {
2013 if (__block_write_begin(page, 0, len,
2014 noalloc_get_block_write)) {
2015 redirty_page:
2016 redirty_page_for_writepage(wbc, page);
2017 unlock_page(page);
2018 return 0;
2019 }
2020 commit_write = 1;
2021 }
2022 page_bufs = page_buffers(page);
2023 if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
2024 ext4_bh_delay_or_unwritten)) {
2025 /*
2026 * We don't want to do block allocation, so redirty
2027 * the page and return. We may reach here when we do
2028 * a journal commit via journal_submit_inode_data_buffers.
2029 * We can also reach here via shrink_page_list but it
2030 * should never be for direct reclaim so warn if that
2031 * happens
2032 */
2033 WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD)) ==
2034 PF_MEMALLOC);
2035 goto redirty_page;
2036 }
2037 if (commit_write)
2038 /* now mark the buffer_heads as dirty and uptodate */
2039 block_commit_write(page, 0, len);
2040
2041 if (PageChecked(page) && ext4_should_journal_data(inode))
2042 /*
2043 * It's mmapped pagecache. Add buffers and journal it. There
2044 * doesn't seem much point in redirtying the page here.
2045 */
2046 return __ext4_journalled_writepage(page, len);
2047
2048 if (buffer_uninit(page_bufs)) {
2049 ext4_set_bh_endio(page_bufs, inode);
2050 ret = block_write_full_page_endio(page, noalloc_get_block_write,
2051 wbc, ext4_end_io_buffer_write);
2052 } else
2053 ret = block_write_full_page(page, noalloc_get_block_write,
2054 wbc);
2055
2056 return ret;
2057}
2058
2059/*
2060 * This is called via ext4_da_writepages() to
2061 * calculate the total number of credits to reserve to fit
2062 * a single extent allocation into a single transaction,
2063 * ext4_da_writpeages() will loop calling this before
2064 * the block allocation.
2065 */
2066
2067static int ext4_da_writepages_trans_blocks(struct inode *inode)
2068{
2069 int max_blocks = EXT4_I(inode)->i_reserved_data_blocks;
2070
2071 /*
2072 * With non-extent format the journal credit needed to
2073 * insert nrblocks contiguous block is dependent on
2074 * number of contiguous block. So we will limit
2075 * number of contiguous block to a sane value
2076 */
2077 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) &&
2078 (max_blocks > EXT4_MAX_TRANS_DATA))
2079 max_blocks = EXT4_MAX_TRANS_DATA;
2080
2081 return ext4_chunk_trans_blocks(inode, max_blocks);
2082}
2083
2084/*
2085 * write_cache_pages_da - walk the list of dirty pages of the given
2086 * address space and accumulate pages that need writing, and call
2087 * mpage_da_map_and_submit to map a single contiguous memory region
2088 * and then write them.
2089 */
2090static int write_cache_pages_da(struct address_space *mapping,
2091 struct writeback_control *wbc,
2092 struct mpage_da_data *mpd,
2093 pgoff_t *done_index)
2094{
2095 struct buffer_head *bh, *head;
2096 struct inode *inode = mapping->host;
2097 struct pagevec pvec;
2098 unsigned int nr_pages;
2099 sector_t logical;
2100 pgoff_t index, end;
2101 long nr_to_write = wbc->nr_to_write;
2102 int i, tag, ret = 0;
2103
2104 memset(mpd, 0, sizeof(struct mpage_da_data));
2105 mpd->wbc = wbc;
2106 mpd->inode = inode;
2107 pagevec_init(&pvec, 0);
2108 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2109 end = wbc->range_end >> PAGE_CACHE_SHIFT;
2110
2111 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2112 tag = PAGECACHE_TAG_TOWRITE;
2113 else
2114 tag = PAGECACHE_TAG_DIRTY;
2115
2116 *done_index = index;
2117 while (index <= end) {
2118 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
2119 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
2120 if (nr_pages == 0)
2121 return 0;
2122
2123 for (i = 0; i < nr_pages; i++) {
2124 struct page *page = pvec.pages[i];
2125
2126 /*
2127 * At this point, the page may be truncated or
2128 * invalidated (changing page->mapping to NULL), or
2129 * even swizzled back from swapper_space to tmpfs file
2130 * mapping. However, page->index will not change
2131 * because we have a reference on the page.
2132 */
2133 if (page->index > end)
2134 goto out;
2135
2136 *done_index = page->index + 1;
2137
2138 /*
2139 * If we can't merge this page, and we have
2140 * accumulated an contiguous region, write it
2141 */
2142 if ((mpd->next_page != page->index) &&
2143 (mpd->next_page != mpd->first_page)) {
2144 mpage_da_map_and_submit(mpd);
2145 goto ret_extent_tail;
2146 }
2147
2148 lock_page(page);
2149
2150 /*
2151 * If the page is no longer dirty, or its
2152 * mapping no longer corresponds to inode we
2153 * are writing (which means it has been
2154 * truncated or invalidated), or the page is
2155 * already under writeback and we are not
2156 * doing a data integrity writeback, skip the page
2157 */
2158 if (!PageDirty(page) ||
2159 (PageWriteback(page) &&
2160 (wbc->sync_mode == WB_SYNC_NONE)) ||
2161 unlikely(page->mapping != mapping)) {
2162 unlock_page(page);
2163 continue;
2164 }
2165
2166 wait_on_page_writeback(page);
2167 BUG_ON(PageWriteback(page));
2168
2169 if (mpd->next_page != page->index)
2170 mpd->first_page = page->index;
2171 mpd->next_page = page->index + 1;
2172 logical = (sector_t) page->index <<
2173 (PAGE_CACHE_SHIFT - inode->i_blkbits);
2174
2175 if (!page_has_buffers(page)) {
2176 mpage_add_bh_to_extent(mpd, logical,
2177 PAGE_CACHE_SIZE,
2178 (1 << BH_Dirty) | (1 << BH_Uptodate));
2179 if (mpd->io_done)
2180 goto ret_extent_tail;
2181 } else {
2182 /*
2183 * Page with regular buffer heads,
2184 * just add all dirty ones
2185 */
2186 head = page_buffers(page);
2187 bh = head;
2188 do {
2189 BUG_ON(buffer_locked(bh));
2190 /*
2191 * We need to try to allocate
2192 * unmapped blocks in the same page.
2193 * Otherwise we won't make progress
2194 * with the page in ext4_writepage
2195 */
2196 if (ext4_bh_delay_or_unwritten(NULL, bh)) {
2197 mpage_add_bh_to_extent(mpd, logical,
2198 bh->b_size,
2199 bh->b_state);
2200 if (mpd->io_done)
2201 goto ret_extent_tail;
2202 } else if (buffer_dirty(bh) && (buffer_mapped(bh))) {
2203 /*
2204 * mapped dirty buffer. We need
2205 * to update the b_state
2206 * because we look at b_state
2207 * in mpage_da_map_blocks. We
2208 * don't update b_size because
2209 * if we find an unmapped
2210 * buffer_head later we need to
2211 * use the b_state flag of that
2212 * buffer_head.
2213 */
2214 if (mpd->b_size == 0)
2215 mpd->b_state = bh->b_state & BH_FLAGS;
2216 }
2217 logical++;
2218 } while ((bh = bh->b_this_page) != head);
2219 }
2220
2221 if (nr_to_write > 0) {
2222 nr_to_write--;
2223 if (nr_to_write == 0 &&
2224 wbc->sync_mode == WB_SYNC_NONE)
2225 /*
2226 * We stop writing back only if we are
2227 * not doing integrity sync. In case of
2228 * integrity sync we have to keep going
2229 * because someone may be concurrently
2230 * dirtying pages, and we might have
2231 * synced a lot of newly appeared dirty
2232 * pages, but have not synced all of the
2233 * old dirty pages.
2234 */
2235 goto out;
2236 }
2237 }
2238 pagevec_release(&pvec);
2239 cond_resched();
2240 }
2241 return 0;
2242ret_extent_tail:
2243 ret = MPAGE_DA_EXTENT_TAIL;
2244out:
2245 pagevec_release(&pvec);
2246 cond_resched();
2247 return ret;
2248}
2249
2250
2251static int ext4_da_writepages(struct address_space *mapping,
2252 struct writeback_control *wbc)
2253{
2254 pgoff_t index;
2255 int range_whole = 0;
2256 handle_t *handle = NULL;
2257 struct mpage_da_data mpd;
2258 struct inode *inode = mapping->host;
2259 int pages_written = 0;
2260 unsigned int max_pages;
2261 int range_cyclic, cycled = 1, io_done = 0;
2262 int needed_blocks, ret = 0;
2263 long desired_nr_to_write, nr_to_writebump = 0;
2264 loff_t range_start = wbc->range_start;
2265 struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
2266 pgoff_t done_index = 0;
2267 pgoff_t end;
2268 struct blk_plug plug;
2269
2270 trace_ext4_da_writepages(inode, wbc);
2271
2272 /*
2273 * No pages to write? This is mainly a kludge to avoid starting
2274 * a transaction for special inodes like journal inode on last iput()
2275 * because that could violate lock ordering on umount
2276 */
2277 if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
2278 return 0;
2279
2280 /*
2281 * If the filesystem has aborted, it is read-only, so return
2282 * right away instead of dumping stack traces later on that
2283 * will obscure the real source of the problem. We test
2284 * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because
2285 * the latter could be true if the filesystem is mounted
2286 * read-only, and in that case, ext4_da_writepages should
2287 * *never* be called, so if that ever happens, we would want
2288 * the stack trace.
2289 */
2290 if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED))
2291 return -EROFS;
2292
2293 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2294 range_whole = 1;
2295
2296 range_cyclic = wbc->range_cyclic;
2297 if (wbc->range_cyclic) {
2298 index = mapping->writeback_index;
2299 if (index)
2300 cycled = 0;
2301 wbc->range_start = index << PAGE_CACHE_SHIFT;
2302 wbc->range_end = LLONG_MAX;
2303 wbc->range_cyclic = 0;
2304 end = -1;
2305 } else {
2306 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2307 end = wbc->range_end >> PAGE_CACHE_SHIFT;
2308 }
2309
2310 /*
2311 * This works around two forms of stupidity. The first is in
2312 * the writeback code, which caps the maximum number of pages
2313 * written to be 1024 pages. This is wrong on multiple
2314 * levels; different architectues have a different page size,
2315 * which changes the maximum amount of data which gets
2316 * written. Secondly, 4 megabytes is way too small. XFS
2317 * forces this value to be 16 megabytes by multiplying
2318 * nr_to_write parameter by four, and then relies on its
2319 * allocator to allocate larger extents to make them
2320 * contiguous. Unfortunately this brings us to the second
2321 * stupidity, which is that ext4's mballoc code only allocates
2322 * at most 2048 blocks. So we force contiguous writes up to
2323 * the number of dirty blocks in the inode, or
2324 * sbi->max_writeback_mb_bump whichever is smaller.
2325 */
2326 max_pages = sbi->s_max_writeback_mb_bump << (20 - PAGE_CACHE_SHIFT);
2327 if (!range_cyclic && range_whole) {
2328 if (wbc->nr_to_write == LONG_MAX)
2329 desired_nr_to_write = wbc->nr_to_write;
2330 else
2331 desired_nr_to_write = wbc->nr_to_write * 8;
2332 } else
2333 desired_nr_to_write = ext4_num_dirty_pages(inode, index,
2334 max_pages);
2335 if (desired_nr_to_write > max_pages)
2336 desired_nr_to_write = max_pages;
2337
2338 if (wbc->nr_to_write < desired_nr_to_write) {
2339 nr_to_writebump = desired_nr_to_write - wbc->nr_to_write;
2340 wbc->nr_to_write = desired_nr_to_write;
2341 }
2342
2343retry:
2344 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2345 tag_pages_for_writeback(mapping, index, end);
2346
2347 blk_start_plug(&plug);
2348 while (!ret && wbc->nr_to_write > 0) {
2349
2350 /*
2351 * we insert one extent at a time. So we need
2352 * credit needed for single extent allocation.
2353 * journalled mode is currently not supported
2354 * by delalloc
2355 */
2356 BUG_ON(ext4_should_journal_data(inode));
2357 needed_blocks = ext4_da_writepages_trans_blocks(inode);
2358
2359 /* start a new transaction*/
2360 handle = ext4_journal_start(inode, needed_blocks);
2361 if (IS_ERR(handle)) {
2362 ret = PTR_ERR(handle);
2363 ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
2364 "%ld pages, ino %lu; err %d", __func__,
2365 wbc->nr_to_write, inode->i_ino, ret);
2366 blk_finish_plug(&plug);
2367 goto out_writepages;
2368 }
2369
2370 /*
2371 * Now call write_cache_pages_da() to find the next
2372 * contiguous region of logical blocks that need
2373 * blocks to be allocated by ext4 and submit them.
2374 */
2375 ret = write_cache_pages_da(mapping, wbc, &mpd, &done_index);
2376 /*
2377 * If we have a contiguous extent of pages and we
2378 * haven't done the I/O yet, map the blocks and submit
2379 * them for I/O.
2380 */
2381 if (!mpd.io_done && mpd.next_page != mpd.first_page) {
2382 mpage_da_map_and_submit(&mpd);
2383 ret = MPAGE_DA_EXTENT_TAIL;
2384 }
2385 trace_ext4_da_write_pages(inode, &mpd);
2386 wbc->nr_to_write -= mpd.pages_written;
2387
2388 ext4_journal_stop(handle);
2389
2390 if ((mpd.retval == -ENOSPC) && sbi->s_journal) {
2391 /* commit the transaction which would
2392 * free blocks released in the transaction
2393 * and try again
2394 */
2395 jbd2_journal_force_commit_nested(sbi->s_journal);
2396 ret = 0;
2397 } else if (ret == MPAGE_DA_EXTENT_TAIL) {
2398 /*
2399 * Got one extent now try with rest of the pages.
2400 * If mpd.retval is set -EIO, journal is aborted.
2401 * So we don't need to write any more.
2402 */
2403 pages_written += mpd.pages_written;
2404 ret = mpd.retval;
2405 io_done = 1;
2406 } else if (wbc->nr_to_write)
2407 /*
2408 * There is no more writeout needed
2409 * or we requested for a noblocking writeout
2410 * and we found the device congested
2411 */
2412 break;
2413 }
2414 blk_finish_plug(&plug);
2415 if (!io_done && !cycled) {
2416 cycled = 1;
2417 index = 0;
2418 wbc->range_start = index << PAGE_CACHE_SHIFT;
2419 wbc->range_end = mapping->writeback_index - 1;
2420 goto retry;
2421 }
2422
2423 /* Update index */
2424 wbc->range_cyclic = range_cyclic;
2425 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2426 /*
2427 * set the writeback_index so that range_cyclic
2428 * mode will write it back later
2429 */
2430 mapping->writeback_index = done_index;
2431
2432out_writepages:
2433 wbc->nr_to_write -= nr_to_writebump;
2434 wbc->range_start = range_start;
2435 trace_ext4_da_writepages_result(inode, wbc, ret, pages_written);
2436 return ret;
2437}
2438
2439#define FALL_BACK_TO_NONDELALLOC 1
2440static int ext4_nonda_switch(struct super_block *sb)
2441{
2442 s64 free_blocks, dirty_blocks;
2443 struct ext4_sb_info *sbi = EXT4_SB(sb);
2444
2445 /*
2446 * switch to non delalloc mode if we are running low
2447 * on free block. The free block accounting via percpu
2448 * counters can get slightly wrong with percpu_counter_batch getting
2449 * accumulated on each CPU without updating global counters
2450 * Delalloc need an accurate free block accounting. So switch
2451 * to non delalloc when we are near to error range.
2452 */
2453 free_blocks = EXT4_C2B(sbi,
2454 percpu_counter_read_positive(&sbi->s_freeclusters_counter));
2455 dirty_blocks = percpu_counter_read_positive(&sbi->s_dirtyclusters_counter);
2456 if (2 * free_blocks < 3 * dirty_blocks ||
2457 free_blocks < (dirty_blocks + EXT4_FREECLUSTERS_WATERMARK)) {
2458 /*
2459 * free block count is less than 150% of dirty blocks
2460 * or free blocks is less than watermark
2461 */
2462 return 1;
2463 }
2464 /*
2465 * Even if we don't switch but are nearing capacity,
2466 * start pushing delalloc when 1/2 of free blocks are dirty.
2467 */
2468 if (free_blocks < 2 * dirty_blocks)
2469 writeback_inodes_sb_if_idle(sb, WB_REASON_FS_FREE_SPACE);
2470
2471 return 0;
2472}
2473
2474static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
2475 loff_t pos, unsigned len, unsigned flags,
2476 struct page **pagep, void **fsdata)
2477{
2478 int ret, retries = 0;
2479 struct page *page;
2480 pgoff_t index;
2481 struct inode *inode = mapping->host;
2482 handle_t *handle;
2483
2484 index = pos >> PAGE_CACHE_SHIFT;
2485
2486 if (ext4_nonda_switch(inode->i_sb)) {
2487 *fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
2488 return ext4_write_begin(file, mapping, pos,
2489 len, flags, pagep, fsdata);
2490 }
2491 *fsdata = (void *)0;
2492 trace_ext4_da_write_begin(inode, pos, len, flags);
2493retry:
2494 /*
2495 * With delayed allocation, we don't log the i_disksize update
2496 * if there is delayed block allocation. But we still need
2497 * to journalling the i_disksize update if writes to the end
2498 * of file which has an already mapped buffer.
2499 */
2500 handle = ext4_journal_start(inode, 1);
2501 if (IS_ERR(handle)) {
2502 ret = PTR_ERR(handle);
2503 goto out;
2504 }
2505 /* We cannot recurse into the filesystem as the transaction is already
2506 * started */
2507 flags |= AOP_FLAG_NOFS;
2508
2509 page = grab_cache_page_write_begin(mapping, index, flags);
2510 if (!page) {
2511 ext4_journal_stop(handle);
2512 ret = -ENOMEM;
2513 goto out;
2514 }
2515 *pagep = page;
2516
2517 ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep);
2518 if (ret < 0) {
2519 unlock_page(page);
2520 ext4_journal_stop(handle);
2521 page_cache_release(page);
2522 /*
2523 * block_write_begin may have instantiated a few blocks
2524 * outside i_size. Trim these off again. Don't need
2525 * i_size_read because we hold i_mutex.
2526 */
2527 if (pos + len > inode->i_size)
2528 ext4_truncate_failed_write(inode);
2529 }
2530
2531 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
2532 goto retry;
2533out:
2534 return ret;
2535}
2536
2537/*
2538 * Check if we should update i_disksize
2539 * when write to the end of file but not require block allocation
2540 */
2541static int ext4_da_should_update_i_disksize(struct page *page,
2542 unsigned long offset)
2543{
2544 struct buffer_head *bh;
2545 struct inode *inode = page->mapping->host;
2546 unsigned int idx;
2547 int i;
2548
2549 bh = page_buffers(page);
2550 idx = offset >> inode->i_blkbits;
2551
2552 for (i = 0; i < idx; i++)
2553 bh = bh->b_this_page;
2554
2555 if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh))
2556 return 0;
2557 return 1;
2558}
2559
2560static int ext4_da_write_end(struct file *file,
2561 struct address_space *mapping,
2562 loff_t pos, unsigned len, unsigned copied,
2563 struct page *page, void *fsdata)
2564{
2565 struct inode *inode = mapping->host;
2566 int ret = 0, ret2;
2567 handle_t *handle = ext4_journal_current_handle();
2568 loff_t new_i_size;
2569 unsigned long start, end;
2570 int write_mode = (int)(unsigned long)fsdata;
2571
2572 if (write_mode == FALL_BACK_TO_NONDELALLOC) {
2573 switch (ext4_inode_journal_mode(inode)) {
2574 case EXT4_INODE_ORDERED_DATA_MODE:
2575 return ext4_ordered_write_end(file, mapping, pos,
2576 len, copied, page, fsdata);
2577 case EXT4_INODE_WRITEBACK_DATA_MODE:
2578 return ext4_writeback_write_end(file, mapping, pos,
2579 len, copied, page, fsdata);
2580 default:
2581 BUG();
2582 }
2583 }
2584
2585 trace_ext4_da_write_end(inode, pos, len, copied);
2586 start = pos & (PAGE_CACHE_SIZE - 1);
2587 end = start + copied - 1;
2588
2589 /*
2590 * generic_write_end() will run mark_inode_dirty() if i_size
2591 * changes. So let's piggyback the i_disksize mark_inode_dirty
2592 * into that.
2593 */
2594
2595 new_i_size = pos + copied;
2596 if (copied && new_i_size > EXT4_I(inode)->i_disksize) {
2597 if (ext4_da_should_update_i_disksize(page, end)) {
2598 down_write(&EXT4_I(inode)->i_data_sem);
2599 if (new_i_size > EXT4_I(inode)->i_disksize) {
2600 /*
2601 * Updating i_disksize when extending file
2602 * without needing block allocation
2603 */
2604 if (ext4_should_order_data(inode))
2605 ret = ext4_jbd2_file_inode(handle,
2606 inode);
2607
2608 EXT4_I(inode)->i_disksize = new_i_size;
2609 }
2610 up_write(&EXT4_I(inode)->i_data_sem);
2611 /* We need to mark inode dirty even if
2612 * new_i_size is less that inode->i_size
2613 * bu greater than i_disksize.(hint delalloc)
2614 */
2615 ext4_mark_inode_dirty(handle, inode);
2616 }
2617 }
2618 ret2 = generic_write_end(file, mapping, pos, len, copied,
2619 page, fsdata);
2620 copied = ret2;
2621 if (ret2 < 0)
2622 ret = ret2;
2623 ret2 = ext4_journal_stop(handle);
2624 if (!ret)
2625 ret = ret2;
2626
2627 return ret ? ret : copied;
2628}
2629
2630static void ext4_da_invalidatepage(struct page *page, unsigned long offset)
2631{
2632 /*
2633 * Drop reserved blocks
2634 */
2635 BUG_ON(!PageLocked(page));
2636 if (!page_has_buffers(page))
2637 goto out;
2638
2639 ext4_da_page_release_reservation(page, offset);
2640
2641out:
2642 ext4_invalidatepage(page, offset);
2643
2644 return;
2645}
2646
2647/*
2648 * Force all delayed allocation blocks to be allocated for a given inode.
2649 */
2650int ext4_alloc_da_blocks(struct inode *inode)
2651{
2652 trace_ext4_alloc_da_blocks(inode);
2653
2654 if (!EXT4_I(inode)->i_reserved_data_blocks &&
2655 !EXT4_I(inode)->i_reserved_meta_blocks)
2656 return 0;
2657
2658 /*
2659 * We do something simple for now. The filemap_flush() will
2660 * also start triggering a write of the data blocks, which is
2661 * not strictly speaking necessary (and for users of
2662 * laptop_mode, not even desirable). However, to do otherwise
2663 * would require replicating code paths in:
2664 *
2665 * ext4_da_writepages() ->
2666 * write_cache_pages() ---> (via passed in callback function)
2667 * __mpage_da_writepage() -->
2668 * mpage_add_bh_to_extent()
2669 * mpage_da_map_blocks()
2670 *
2671 * The problem is that write_cache_pages(), located in
2672 * mm/page-writeback.c, marks pages clean in preparation for
2673 * doing I/O, which is not desirable if we're not planning on
2674 * doing I/O at all.
2675 *
2676 * We could call write_cache_pages(), and then redirty all of
2677 * the pages by calling redirty_page_for_writepage() but that
2678 * would be ugly in the extreme. So instead we would need to
2679 * replicate parts of the code in the above functions,
2680 * simplifying them because we wouldn't actually intend to
2681 * write out the pages, but rather only collect contiguous
2682 * logical block extents, call the multi-block allocator, and
2683 * then update the buffer heads with the block allocations.
2684 *
2685 * For now, though, we'll cheat by calling filemap_flush(),
2686 * which will map the blocks, and start the I/O, but not
2687 * actually wait for the I/O to complete.
2688 */
2689 return filemap_flush(inode->i_mapping);
2690}
2691
2692/*
2693 * bmap() is special. It gets used by applications such as lilo and by
2694 * the swapper to find the on-disk block of a specific piece of data.
2695 *
2696 * Naturally, this is dangerous if the block concerned is still in the
2697 * journal. If somebody makes a swapfile on an ext4 data-journaling
2698 * filesystem and enables swap, then they may get a nasty shock when the
2699 * data getting swapped to that swapfile suddenly gets overwritten by
2700 * the original zero's written out previously to the journal and
2701 * awaiting writeback in the kernel's buffer cache.
2702 *
2703 * So, if we see any bmap calls here on a modified, data-journaled file,
2704 * take extra steps to flush any blocks which might be in the cache.
2705 */
2706static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
2707{
2708 struct inode *inode = mapping->host;
2709 journal_t *journal;
2710 int err;
2711
2712 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
2713 test_opt(inode->i_sb, DELALLOC)) {
2714 /*
2715 * With delalloc we want to sync the file
2716 * so that we can make sure we allocate
2717 * blocks for file
2718 */
2719 filemap_write_and_wait(mapping);
2720 }
2721
2722 if (EXT4_JOURNAL(inode) &&
2723 ext4_test_inode_state(inode, EXT4_STATE_JDATA)) {
2724 /*
2725 * This is a REALLY heavyweight approach, but the use of
2726 * bmap on dirty files is expected to be extremely rare:
2727 * only if we run lilo or swapon on a freshly made file
2728 * do we expect this to happen.
2729 *
2730 * (bmap requires CAP_SYS_RAWIO so this does not
2731 * represent an unprivileged user DOS attack --- we'd be
2732 * in trouble if mortal users could trigger this path at
2733 * will.)
2734 *
2735 * NB. EXT4_STATE_JDATA is not set on files other than
2736 * regular files. If somebody wants to bmap a directory
2737 * or symlink and gets confused because the buffer
2738 * hasn't yet been flushed to disk, they deserve
2739 * everything they get.
2740 */
2741
2742 ext4_clear_inode_state(inode, EXT4_STATE_JDATA);
2743 journal = EXT4_JOURNAL(inode);
2744 jbd2_journal_lock_updates(journal);
2745 err = jbd2_journal_flush(journal);
2746 jbd2_journal_unlock_updates(journal);
2747
2748 if (err)
2749 return 0;
2750 }
2751
2752 return generic_block_bmap(mapping, block, ext4_get_block);
2753}
2754
2755static int ext4_readpage(struct file *file, struct page *page)
2756{
2757 trace_ext4_readpage(page);
2758 return mpage_readpage(page, ext4_get_block);
2759}
2760
2761static int
2762ext4_readpages(struct file *file, struct address_space *mapping,
2763 struct list_head *pages, unsigned nr_pages)
2764{
2765 return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
2766}
2767
2768static void ext4_invalidatepage_free_endio(struct page *page, unsigned long offset)
2769{
2770 struct buffer_head *head, *bh;
2771 unsigned int curr_off = 0;
2772
2773 if (!page_has_buffers(page))
2774 return;
2775 head = bh = page_buffers(page);
2776 do {
2777 if (offset <= curr_off && test_clear_buffer_uninit(bh)
2778 && bh->b_private) {
2779 ext4_free_io_end(bh->b_private);
2780 bh->b_private = NULL;
2781 bh->b_end_io = NULL;
2782 }
2783 curr_off = curr_off + bh->b_size;
2784 bh = bh->b_this_page;
2785 } while (bh != head);
2786}
2787
2788static void ext4_invalidatepage(struct page *page, unsigned long offset)
2789{
2790 journal_t *journal = EXT4_JOURNAL(page->mapping->host);
2791
2792 trace_ext4_invalidatepage(page, offset);
2793
2794 /*
2795 * free any io_end structure allocated for buffers to be discarded
2796 */
2797 if (ext4_should_dioread_nolock(page->mapping->host))
2798 ext4_invalidatepage_free_endio(page, offset);
2799 /*
2800 * If it's a full truncate we just forget about the pending dirtying
2801 */
2802 if (offset == 0)
2803 ClearPageChecked(page);
2804
2805 if (journal)
2806 jbd2_journal_invalidatepage(journal, page, offset);
2807 else
2808 block_invalidatepage(page, offset);
2809}
2810
2811static int ext4_releasepage(struct page *page, gfp_t wait)
2812{
2813 journal_t *journal = EXT4_JOURNAL(page->mapping->host);
2814
2815 trace_ext4_releasepage(page);
2816
2817 WARN_ON(PageChecked(page));
2818 if (!page_has_buffers(page))
2819 return 0;
2820 if (journal)
2821 return jbd2_journal_try_to_free_buffers(journal, page, wait);
2822 else
2823 return try_to_free_buffers(page);
2824}
2825
2826/*
2827 * ext4_get_block used when preparing for a DIO write or buffer write.
2828 * We allocate an uinitialized extent if blocks haven't been allocated.
2829 * The extent will be converted to initialized after the IO is complete.
2830 */
2831static int ext4_get_block_write(struct inode *inode, sector_t iblock,
2832 struct buffer_head *bh_result, int create)
2833{
2834 ext4_debug("ext4_get_block_write: inode %lu, create flag %d\n",
2835 inode->i_ino, create);
2836 return _ext4_get_block(inode, iblock, bh_result,
2837 EXT4_GET_BLOCKS_IO_CREATE_EXT);
2838}
2839
2840static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
2841 ssize_t size, void *private, int ret,
2842 bool is_async)
2843{
2844 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
2845 ext4_io_end_t *io_end = iocb->private;
2846 struct workqueue_struct *wq;
2847 unsigned long flags;
2848 struct ext4_inode_info *ei;
2849
2850 /* if not async direct IO or dio with 0 bytes write, just return */
2851 if (!io_end || !size)
2852 goto out;
2853
2854 ext_debug("ext4_end_io_dio(): io_end 0x%p "
2855 "for inode %lu, iocb 0x%p, offset %llu, size %zd\n",
2856 iocb->private, io_end->inode->i_ino, iocb, offset,
2857 size);
2858
2859 iocb->private = NULL;
2860
2861 /* if not aio dio with unwritten extents, just free io and return */
2862 if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
2863 ext4_free_io_end(io_end);
2864out:
2865 if (is_async)
2866 aio_complete(iocb, ret, 0);
2867 inode_dio_done(inode);
2868 return;
2869 }
2870
2871 io_end->offset = offset;
2872 io_end->size = size;
2873 if (is_async) {
2874 io_end->iocb = iocb;
2875 io_end->result = ret;
2876 }
2877 wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq;
2878
2879 /* Add the io_end to per-inode completed aio dio list*/
2880 ei = EXT4_I(io_end->inode);
2881 spin_lock_irqsave(&ei->i_completed_io_lock, flags);
2882 list_add_tail(&io_end->list, &ei->i_completed_io_list);
2883 spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
2884
2885 /* queue the work to convert unwritten extents to written */
2886 queue_work(wq, &io_end->work);
2887}
2888
2889static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate)
2890{
2891 ext4_io_end_t *io_end = bh->b_private;
2892 struct workqueue_struct *wq;
2893 struct inode *inode;
2894 unsigned long flags;
2895
2896 if (!test_clear_buffer_uninit(bh) || !io_end)
2897 goto out;
2898
2899 if (!(io_end->inode->i_sb->s_flags & MS_ACTIVE)) {
2900 ext4_msg(io_end->inode->i_sb, KERN_INFO,
2901 "sb umounted, discard end_io request for inode %lu",
2902 io_end->inode->i_ino);
2903 ext4_free_io_end(io_end);
2904 goto out;
2905 }
2906
2907 /*
2908 * It may be over-defensive here to check EXT4_IO_END_UNWRITTEN now,
2909 * but being more careful is always safe for the future change.
2910 */
2911 inode = io_end->inode;
2912 ext4_set_io_unwritten_flag(inode, io_end);
2913
2914 /* Add the io_end to per-inode completed io list*/
2915 spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags);
2916 list_add_tail(&io_end->list, &EXT4_I(inode)->i_completed_io_list);
2917 spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags);
2918
2919 wq = EXT4_SB(inode->i_sb)->dio_unwritten_wq;
2920 /* queue the work to convert unwritten extents to written */
2921 queue_work(wq, &io_end->work);
2922out:
2923 bh->b_private = NULL;
2924 bh->b_end_io = NULL;
2925 clear_buffer_uninit(bh);
2926 end_buffer_async_write(bh, uptodate);
2927}
2928
2929static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode)
2930{
2931 ext4_io_end_t *io_end;
2932 struct page *page = bh->b_page;
2933 loff_t offset = (sector_t)page->index << PAGE_CACHE_SHIFT;
2934 size_t size = bh->b_size;
2935
2936retry:
2937 io_end = ext4_init_io_end(inode, GFP_ATOMIC);
2938 if (!io_end) {
2939 pr_warn_ratelimited("%s: allocation fail\n", __func__);
2940 schedule();
2941 goto retry;
2942 }
2943 io_end->offset = offset;
2944 io_end->size = size;
2945 /*
2946 * We need to hold a reference to the page to make sure it
2947 * doesn't get evicted before ext4_end_io_work() has a chance
2948 * to convert the extent from written to unwritten.
2949 */
2950 io_end->page = page;
2951 get_page(io_end->page);
2952
2953 bh->b_private = io_end;
2954 bh->b_end_io = ext4_end_io_buffer_write;
2955 return 0;
2956}
2957
2958/*
2959 * For ext4 extent files, ext4 will do direct-io write to holes,
2960 * preallocated extents, and those write extend the file, no need to
2961 * fall back to buffered IO.
2962 *
2963 * For holes, we fallocate those blocks, mark them as uninitialized
2964 * If those blocks were preallocated, we mark sure they are splited, but
2965 * still keep the range to write as uninitialized.
2966 *
2967 * The unwrritten extents will be converted to written when DIO is completed.
2968 * For async direct IO, since the IO may still pending when return, we
2969 * set up an end_io call back function, which will do the conversion
2970 * when async direct IO completed.
2971 *
2972 * If the O_DIRECT write will extend the file then add this inode to the
2973 * orphan list. So recovery will truncate it back to the original size
2974 * if the machine crashes during the write.
2975 *
2976 */
2977static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
2978 const struct iovec *iov, loff_t offset,
2979 unsigned long nr_segs)
2980{
2981 struct file *file = iocb->ki_filp;
2982 struct inode *inode = file->f_mapping->host;
2983 ssize_t ret;
2984 size_t count = iov_length(iov, nr_segs);
2985
2986 loff_t final_size = offset + count;
2987 if (rw == WRITE && final_size <= inode->i_size) {
2988 /*
2989 * We could direct write to holes and fallocate.
2990 *
2991 * Allocated blocks to fill the hole are marked as uninitialized
2992 * to prevent parallel buffered read to expose the stale data
2993 * before DIO complete the data IO.
2994 *
2995 * As to previously fallocated extents, ext4 get_block
2996 * will just simply mark the buffer mapped but still
2997 * keep the extents uninitialized.
2998 *
2999 * for non AIO case, we will convert those unwritten extents
3000 * to written after return back from blockdev_direct_IO.
3001 *
3002 * for async DIO, the conversion needs to be defered when
3003 * the IO is completed. The ext4 end_io callback function
3004 * will be called to take care of the conversion work.
3005 * Here for async case, we allocate an io_end structure to
3006 * hook to the iocb.
3007 */
3008 iocb->private = NULL;
3009 EXT4_I(inode)->cur_aio_dio = NULL;
3010 if (!is_sync_kiocb(iocb)) {
3011 ext4_io_end_t *io_end =
3012 ext4_init_io_end(inode, GFP_NOFS);
3013 if (!io_end)
3014 return -ENOMEM;
3015 io_end->flag |= EXT4_IO_END_DIRECT;
3016 iocb->private = io_end;
3017 /*
3018 * we save the io structure for current async
3019 * direct IO, so that later ext4_map_blocks()
3020 * could flag the io structure whether there
3021 * is a unwritten extents needs to be converted
3022 * when IO is completed.
3023 */
3024 EXT4_I(inode)->cur_aio_dio = iocb->private;
3025 }
3026
3027 ret = __blockdev_direct_IO(rw, iocb, inode,
3028 inode->i_sb->s_bdev, iov,
3029 offset, nr_segs,
3030 ext4_get_block_write,
3031 ext4_end_io_dio,
3032 NULL,
3033 DIO_LOCKING);
3034 if (iocb->private)
3035 EXT4_I(inode)->cur_aio_dio = NULL;
3036 /*
3037 * The io_end structure takes a reference to the inode,
3038 * that structure needs to be destroyed and the
3039 * reference to the inode need to be dropped, when IO is
3040 * complete, even with 0 byte write, or failed.
3041 *
3042 * In the successful AIO DIO case, the io_end structure will be
3043 * desctroyed and the reference to the inode will be dropped
3044 * after the end_io call back function is called.
3045 *
3046 * In the case there is 0 byte write, or error case, since
3047 * VFS direct IO won't invoke the end_io call back function,
3048 * we need to free the end_io structure here.
3049 */
3050 if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) {
3051 ext4_free_io_end(iocb->private);
3052 iocb->private = NULL;
3053 } else if (ret > 0 && ext4_test_inode_state(inode,
3054 EXT4_STATE_DIO_UNWRITTEN)) {
3055 int err;
3056 /*
3057 * for non AIO case, since the IO is already
3058 * completed, we could do the conversion right here
3059 */
3060 err = ext4_convert_unwritten_extents(inode,
3061 offset, ret);
3062 if (err < 0)
3063 ret = err;
3064 ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
3065 }
3066 return ret;
3067 }
3068
3069 /* for write the the end of file case, we fall back to old way */
3070 return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
3071}
3072
3073static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
3074 const struct iovec *iov, loff_t offset,
3075 unsigned long nr_segs)
3076{
3077 struct file *file = iocb->ki_filp;
3078 struct inode *inode = file->f_mapping->host;
3079 ssize_t ret;
3080
3081 /*
3082 * If we are doing data journalling we don't support O_DIRECT
3083 */
3084 if (ext4_should_journal_data(inode))
3085 return 0;
3086
3087 trace_ext4_direct_IO_enter(inode, offset, iov_length(iov, nr_segs), rw);
3088 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3089 ret = ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs);
3090 else
3091 ret = ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
3092 trace_ext4_direct_IO_exit(inode, offset,
3093 iov_length(iov, nr_segs), rw, ret);
3094 return ret;
3095}
3096
3097/*
3098 * Pages can be marked dirty completely asynchronously from ext4's journalling
3099 * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do
3100 * much here because ->set_page_dirty is called under VFS locks. The page is
3101 * not necessarily locked.
3102 *
3103 * We cannot just dirty the page and leave attached buffers clean, because the
3104 * buffers' dirty state is "definitive". We cannot just set the buffers dirty
3105 * or jbddirty because all the journalling code will explode.
3106 *
3107 * So what we do is to mark the page "pending dirty" and next time writepage
3108 * is called, propagate that into the buffers appropriately.
3109 */
3110static int ext4_journalled_set_page_dirty(struct page *page)
3111{
3112 SetPageChecked(page);
3113 return __set_page_dirty_nobuffers(page);
3114}
3115
3116static const struct address_space_operations ext4_ordered_aops = {
3117 .readpage = ext4_readpage,
3118 .readpages = ext4_readpages,
3119 .writepage = ext4_writepage,
3120 .write_begin = ext4_write_begin,
3121 .write_end = ext4_ordered_write_end,
3122 .bmap = ext4_bmap,
3123 .invalidatepage = ext4_invalidatepage,
3124 .releasepage = ext4_releasepage,
3125 .direct_IO = ext4_direct_IO,
3126 .migratepage = buffer_migrate_page,
3127 .is_partially_uptodate = block_is_partially_uptodate,
3128 .error_remove_page = generic_error_remove_page,
3129};
3130
3131static const struct address_space_operations ext4_writeback_aops = {
3132 .readpage = ext4_readpage,
3133 .readpages = ext4_readpages,
3134 .writepage = ext4_writepage,
3135 .write_begin = ext4_write_begin,
3136 .write_end = ext4_writeback_write_end,
3137 .bmap = ext4_bmap,
3138 .invalidatepage = ext4_invalidatepage,
3139 .releasepage = ext4_releasepage,
3140 .direct_IO = ext4_direct_IO,
3141 .migratepage = buffer_migrate_page,
3142 .is_partially_uptodate = block_is_partially_uptodate,
3143 .error_remove_page = generic_error_remove_page,
3144};
3145
3146static const struct address_space_operations ext4_journalled_aops = {
3147 .readpage = ext4_readpage,
3148 .readpages = ext4_readpages,
3149 .writepage = ext4_writepage,
3150 .write_begin = ext4_write_begin,
3151 .write_end = ext4_journalled_write_end,
3152 .set_page_dirty = ext4_journalled_set_page_dirty,
3153 .bmap = ext4_bmap,
3154 .invalidatepage = ext4_invalidatepage,
3155 .releasepage = ext4_releasepage,
3156 .direct_IO = ext4_direct_IO,
3157 .is_partially_uptodate = block_is_partially_uptodate,
3158 .error_remove_page = generic_error_remove_page,
3159};
3160
3161static const struct address_space_operations ext4_da_aops = {
3162 .readpage = ext4_readpage,
3163 .readpages = ext4_readpages,
3164 .writepage = ext4_writepage,
3165 .writepages = ext4_da_writepages,
3166 .write_begin = ext4_da_write_begin,
3167 .write_end = ext4_da_write_end,
3168 .bmap = ext4_bmap,
3169 .invalidatepage = ext4_da_invalidatepage,
3170 .releasepage = ext4_releasepage,
3171 .direct_IO = ext4_direct_IO,
3172 .migratepage = buffer_migrate_page,
3173 .is_partially_uptodate = block_is_partially_uptodate,
3174 .error_remove_page = generic_error_remove_page,
3175};
3176
3177void ext4_set_aops(struct inode *inode)
3178{
3179 switch (ext4_inode_journal_mode(inode)) {
3180 case EXT4_INODE_ORDERED_DATA_MODE:
3181 if (test_opt(inode->i_sb, DELALLOC))
3182 inode->i_mapping->a_ops = &ext4_da_aops;
3183 else
3184 inode->i_mapping->a_ops = &ext4_ordered_aops;
3185 break;
3186 case EXT4_INODE_WRITEBACK_DATA_MODE:
3187 if (test_opt(inode->i_sb, DELALLOC))
3188 inode->i_mapping->a_ops = &ext4_da_aops;
3189 else
3190 inode->i_mapping->a_ops = &ext4_writeback_aops;
3191 break;
3192 case EXT4_INODE_JOURNAL_DATA_MODE:
3193 inode->i_mapping->a_ops = &ext4_journalled_aops;
3194 break;
3195 default:
3196 BUG();
3197 }
3198}
3199
3200
3201/*
3202 * ext4_discard_partial_page_buffers()
3203 * Wrapper function for ext4_discard_partial_page_buffers_no_lock.
3204 * This function finds and locks the page containing the offset
3205 * "from" and passes it to ext4_discard_partial_page_buffers_no_lock.
3206 * Calling functions that already have the page locked should call
3207 * ext4_discard_partial_page_buffers_no_lock directly.
3208 */
3209int ext4_discard_partial_page_buffers(handle_t *handle,
3210 struct address_space *mapping, loff_t from,
3211 loff_t length, int flags)
3212{
3213 struct inode *inode = mapping->host;
3214 struct page *page;
3215 int err = 0;
3216
3217 page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT,
3218 mapping_gfp_mask(mapping) & ~__GFP_FS);
3219 if (!page)
3220 return -ENOMEM;
3221
3222 err = ext4_discard_partial_page_buffers_no_lock(handle, inode, page,
3223 from, length, flags);
3224
3225 unlock_page(page);
3226 page_cache_release(page);
3227 return err;
3228}
3229
3230/*
3231 * ext4_discard_partial_page_buffers_no_lock()
3232 * Zeros a page range of length 'length' starting from offset 'from'.
3233 * Buffer heads that correspond to the block aligned regions of the
3234 * zeroed range will be unmapped. Unblock aligned regions
3235 * will have the corresponding buffer head mapped if needed so that
3236 * that region of the page can be updated with the partial zero out.
3237 *
3238 * This function assumes that the page has already been locked. The
3239 * The range to be discarded must be contained with in the given page.
3240 * If the specified range exceeds the end of the page it will be shortened
3241 * to the end of the page that corresponds to 'from'. This function is
3242 * appropriate for updating a page and it buffer heads to be unmapped and
3243 * zeroed for blocks that have been either released, or are going to be
3244 * released.
3245 *
3246 * handle: The journal handle
3247 * inode: The files inode
3248 * page: A locked page that contains the offset "from"
3249 * from: The starting byte offset (from the begining of the file)
3250 * to begin discarding
3251 * len: The length of bytes to discard
3252 * flags: Optional flags that may be used:
3253 *
3254 * EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED
3255 * Only zero the regions of the page whose buffer heads
3256 * have already been unmapped. This flag is appropriate
3257 * for updateing the contents of a page whose blocks may
3258 * have already been released, and we only want to zero
3259 * out the regions that correspond to those released blocks.
3260 *
3261 * Returns zero on sucess or negative on failure.
3262 */
3263static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
3264 struct inode *inode, struct page *page, loff_t from,
3265 loff_t length, int flags)
3266{
3267 ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
3268 unsigned int offset = from & (PAGE_CACHE_SIZE-1);
3269 unsigned int blocksize, max, pos;
3270 ext4_lblk_t iblock;
3271 struct buffer_head *bh;
3272 int err = 0;
3273
3274 blocksize = inode->i_sb->s_blocksize;
3275 max = PAGE_CACHE_SIZE - offset;
3276
3277 if (index != page->index)
3278 return -EINVAL;
3279
3280 /*
3281 * correct length if it does not fall between
3282 * 'from' and the end of the page
3283 */
3284 if (length > max || length < 0)
3285 length = max;
3286
3287 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
3288
3289 if (!page_has_buffers(page))
3290 create_empty_buffers(page, blocksize, 0);
3291
3292 /* Find the buffer that contains "offset" */
3293 bh = page_buffers(page);
3294 pos = blocksize;
3295 while (offset >= pos) {
3296 bh = bh->b_this_page;
3297 iblock++;
3298 pos += blocksize;
3299 }
3300
3301 pos = offset;
3302 while (pos < offset + length) {
3303 unsigned int end_of_block, range_to_discard;
3304
3305 err = 0;
3306
3307 /* The length of space left to zero and unmap */
3308 range_to_discard = offset + length - pos;
3309
3310 /* The length of space until the end of the block */
3311 end_of_block = blocksize - (pos & (blocksize-1));
3312
3313 /*
3314 * Do not unmap or zero past end of block
3315 * for this buffer head
3316 */
3317 if (range_to_discard > end_of_block)
3318 range_to_discard = end_of_block;
3319
3320
3321 /*
3322 * Skip this buffer head if we are only zeroing unampped
3323 * regions of the page
3324 */
3325 if (flags & EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED &&
3326 buffer_mapped(bh))
3327 goto next;
3328
3329 /* If the range is block aligned, unmap */
3330 if (range_to_discard == blocksize) {
3331 clear_buffer_dirty(bh);
3332 bh->b_bdev = NULL;
3333 clear_buffer_mapped(bh);
3334 clear_buffer_req(bh);
3335 clear_buffer_new(bh);
3336 clear_buffer_delay(bh);
3337 clear_buffer_unwritten(bh);
3338 clear_buffer_uptodate(bh);
3339 zero_user(page, pos, range_to_discard);
3340 BUFFER_TRACE(bh, "Buffer discarded");
3341 goto next;
3342 }
3343
3344 /*
3345 * If this block is not completely contained in the range
3346 * to be discarded, then it is not going to be released. Because
3347 * we need to keep this block, we need to make sure this part
3348 * of the page is uptodate before we modify it by writeing
3349 * partial zeros on it.
3350 */
3351 if (!buffer_mapped(bh)) {
3352 /*
3353 * Buffer head must be mapped before we can read
3354 * from the block
3355 */
3356 BUFFER_TRACE(bh, "unmapped");
3357 ext4_get_block(inode, iblock, bh, 0);
3358 /* unmapped? It's a hole - nothing to do */
3359 if (!buffer_mapped(bh)) {
3360 BUFFER_TRACE(bh, "still unmapped");
3361 goto next;
3362 }
3363 }
3364
3365 /* Ok, it's mapped. Make sure it's up-to-date */
3366 if (PageUptodate(page))
3367 set_buffer_uptodate(bh);
3368
3369 if (!buffer_uptodate(bh)) {
3370 err = -EIO;
3371 ll_rw_block(READ, 1, &bh);
3372 wait_on_buffer(bh);
3373 /* Uhhuh. Read error. Complain and punt.*/
3374 if (!buffer_uptodate(bh))
3375 goto next;
3376 }
3377
3378 if (ext4_should_journal_data(inode)) {
3379 BUFFER_TRACE(bh, "get write access");
3380 err = ext4_journal_get_write_access(handle, bh);
3381 if (err)
3382 goto next;
3383 }
3384
3385 zero_user(page, pos, range_to_discard);
3386
3387 err = 0;
3388 if (ext4_should_journal_data(inode)) {
3389 err = ext4_handle_dirty_metadata(handle, inode, bh);
3390 } else
3391 mark_buffer_dirty(bh);
3392
3393 BUFFER_TRACE(bh, "Partial buffer zeroed");
3394next:
3395 bh = bh->b_this_page;
3396 iblock++;
3397 pos += range_to_discard;
3398 }
3399
3400 return err;
3401}
3402
3403int ext4_can_truncate(struct inode *inode)
3404{
3405 if (S_ISREG(inode->i_mode))
3406 return 1;
3407 if (S_ISDIR(inode->i_mode))
3408 return 1;
3409 if (S_ISLNK(inode->i_mode))
3410 return !ext4_inode_is_fast_symlink(inode);
3411 return 0;
3412}
3413
3414/*
3415 * ext4_punch_hole: punches a hole in a file by releaseing the blocks
3416 * associated with the given offset and length
3417 *
3418 * @inode: File inode
3419 * @offset: The offset where the hole will begin
3420 * @len: The length of the hole
3421 *
3422 * Returns: 0 on sucess or negative on failure
3423 */
3424
3425int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
3426{
3427 struct inode *inode = file->f_path.dentry->d_inode;
3428 if (!S_ISREG(inode->i_mode))
3429 return -EOPNOTSUPP;
3430
3431 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
3432 /* TODO: Add support for non extent hole punching */
3433 return -EOPNOTSUPP;
3434 }
3435
3436 if (EXT4_SB(inode->i_sb)->s_cluster_ratio > 1) {
3437 /* TODO: Add support for bigalloc file systems */
3438 return -EOPNOTSUPP;
3439 }
3440
3441 return ext4_ext_punch_hole(file, offset, length);
3442}
3443
3444/*
3445 * ext4_truncate()
3446 *
3447 * We block out ext4_get_block() block instantiations across the entire
3448 * transaction, and VFS/VM ensures that ext4_truncate() cannot run
3449 * simultaneously on behalf of the same inode.
3450 *
3451 * As we work through the truncate and commit bits of it to the journal there
3452 * is one core, guiding principle: the file's tree must always be consistent on
3453 * disk. We must be able to restart the truncate after a crash.
3454 *
3455 * The file's tree may be transiently inconsistent in memory (although it
3456 * probably isn't), but whenever we close off and commit a journal transaction,
3457 * the contents of (the filesystem + the journal) must be consistent and
3458 * restartable. It's pretty simple, really: bottom up, right to left (although
3459 * left-to-right works OK too).
3460 *
3461 * Note that at recovery time, journal replay occurs *before* the restart of
3462 * truncate against the orphan inode list.
3463 *
3464 * The committed inode has the new, desired i_size (which is the same as
3465 * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see
3466 * that this inode's truncate did not complete and it will again call
3467 * ext4_truncate() to have another go. So there will be instantiated blocks
3468 * to the right of the truncation point in a crashed ext4 filesystem. But
3469 * that's fine - as long as they are linked from the inode, the post-crash
3470 * ext4_truncate() run will find them and release them.
3471 */
3472void ext4_truncate(struct inode *inode)
3473{
3474 trace_ext4_truncate_enter(inode);
3475
3476 if (!ext4_can_truncate(inode))
3477 return;
3478
3479 ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
3480
3481 if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
3482 ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
3483
3484 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3485 ext4_ext_truncate(inode);
3486 else
3487 ext4_ind_truncate(inode);
3488
3489 trace_ext4_truncate_exit(inode);
3490}
3491
3492/*
3493 * ext4_get_inode_loc returns with an extra refcount against the inode's
3494 * underlying buffer_head on success. If 'in_mem' is true, we have all
3495 * data in memory that is needed to recreate the on-disk version of this
3496 * inode.
3497 */
3498static int __ext4_get_inode_loc(struct inode *inode,
3499 struct ext4_iloc *iloc, int in_mem)
3500{
3501 struct ext4_group_desc *gdp;
3502 struct buffer_head *bh;
3503 struct super_block *sb = inode->i_sb;
3504 ext4_fsblk_t block;
3505 int inodes_per_block, inode_offset;
3506
3507 iloc->bh = NULL;
3508 if (!ext4_valid_inum(sb, inode->i_ino))
3509 return -EIO;
3510
3511 iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb);
3512 gdp = ext4_get_group_desc(sb, iloc->block_group, NULL);
3513 if (!gdp)
3514 return -EIO;
3515
3516 /*
3517 * Figure out the offset within the block group inode table
3518 */
3519 inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
3520 inode_offset = ((inode->i_ino - 1) %
3521 EXT4_INODES_PER_GROUP(sb));
3522 block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block);
3523 iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb);
3524
3525 bh = sb_getblk(sb, block);
3526 if (!bh) {
3527 EXT4_ERROR_INODE_BLOCK(inode, block,
3528 "unable to read itable block");
3529 return -EIO;
3530 }
3531 if (!buffer_uptodate(bh)) {
3532 lock_buffer(bh);
3533
3534 /*
3535 * If the buffer has the write error flag, we have failed
3536 * to write out another inode in the same block. In this
3537 * case, we don't have to read the block because we may
3538 * read the old inode data successfully.
3539 */
3540 if (buffer_write_io_error(bh) && !buffer_uptodate(bh))
3541 set_buffer_uptodate(bh);
3542
3543 if (buffer_uptodate(bh)) {
3544 /* someone brought it uptodate while we waited */
3545 unlock_buffer(bh);
3546 goto has_buffer;
3547 }
3548
3549 /*
3550 * If we have all information of the inode in memory and this
3551 * is the only valid inode in the block, we need not read the
3552 * block.
3553 */
3554 if (in_mem) {
3555 struct buffer_head *bitmap_bh;
3556 int i, start;
3557
3558 start = inode_offset & ~(inodes_per_block - 1);
3559
3560 /* Is the inode bitmap in cache? */
3561 bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp));
3562 if (!bitmap_bh)
3563 goto make_io;
3564
3565 /*
3566 * If the inode bitmap isn't in cache then the
3567 * optimisation may end up performing two reads instead
3568 * of one, so skip it.
3569 */
3570 if (!buffer_uptodate(bitmap_bh)) {
3571 brelse(bitmap_bh);
3572 goto make_io;
3573 }
3574 for (i = start; i < start + inodes_per_block; i++) {
3575 if (i == inode_offset)
3576 continue;
3577 if (ext4_test_bit(i, bitmap_bh->b_data))
3578 break;
3579 }
3580 brelse(bitmap_bh);
3581 if (i == start + inodes_per_block) {
3582 /* all other inodes are free, so skip I/O */
3583 memset(bh->b_data, 0, bh->b_size);
3584 set_buffer_uptodate(bh);
3585 unlock_buffer(bh);
3586 goto has_buffer;
3587 }
3588 }
3589
3590make_io:
3591 /*
3592 * If we need to do any I/O, try to pre-readahead extra
3593 * blocks from the inode table.
3594 */
3595 if (EXT4_SB(sb)->s_inode_readahead_blks) {
3596 ext4_fsblk_t b, end, table;
3597 unsigned num;
3598
3599 table = ext4_inode_table(sb, gdp);
3600 /* s_inode_readahead_blks is always a power of 2 */
3601 b = block & ~(EXT4_SB(sb)->s_inode_readahead_blks-1);
3602 if (table > b)
3603 b = table;
3604 end = b + EXT4_SB(sb)->s_inode_readahead_blks;
3605 num = EXT4_INODES_PER_GROUP(sb);
3606 if (ext4_has_group_desc_csum(sb))
3607 num -= ext4_itable_unused_count(sb, gdp);
3608 table += num / inodes_per_block;
3609 if (end > table)
3610 end = table;
3611 while (b <= end)
3612 sb_breadahead(sb, b++);
3613 }
3614
3615 /*
3616 * There are other valid inodes in the buffer, this inode
3617 * has in-inode xattrs, or we don't have this inode in memory.
3618 * Read the block from disk.
3619 */
3620 trace_ext4_load_inode(inode);
3621 get_bh(bh);
3622 bh->b_end_io = end_buffer_read_sync;
3623 submit_bh(READ | REQ_META | REQ_PRIO, bh);
3624 wait_on_buffer(bh);
3625 if (!buffer_uptodate(bh)) {
3626 EXT4_ERROR_INODE_BLOCK(inode, block,
3627 "unable to read itable block");
3628 brelse(bh);
3629 return -EIO;
3630 }
3631 }
3632has_buffer:
3633 iloc->bh = bh;
3634 return 0;
3635}
3636
3637int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
3638{
3639 /* We have all inode data except xattrs in memory here. */
3640 return __ext4_get_inode_loc(inode, iloc,
3641 !ext4_test_inode_state(inode, EXT4_STATE_XATTR));
3642}
3643
3644void ext4_set_inode_flags(struct inode *inode)
3645{
3646 unsigned int flags = EXT4_I(inode)->i_flags;
3647
3648 inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
3649 if (flags & EXT4_SYNC_FL)
3650 inode->i_flags |= S_SYNC;
3651 if (flags & EXT4_APPEND_FL)
3652 inode->i_flags |= S_APPEND;
3653 if (flags & EXT4_IMMUTABLE_FL)
3654 inode->i_flags |= S_IMMUTABLE;
3655 if (flags & EXT4_NOATIME_FL)
3656 inode->i_flags |= S_NOATIME;
3657 if (flags & EXT4_DIRSYNC_FL)
3658 inode->i_flags |= S_DIRSYNC;
3659}
3660
3661/* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
3662void ext4_get_inode_flags(struct ext4_inode_info *ei)
3663{
3664 unsigned int vfs_fl;
3665 unsigned long old_fl, new_fl;
3666
3667 do {
3668 vfs_fl = ei->vfs_inode.i_flags;
3669 old_fl = ei->i_flags;
3670 new_fl = old_fl & ~(EXT4_SYNC_FL|EXT4_APPEND_FL|
3671 EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|
3672 EXT4_DIRSYNC_FL);
3673 if (vfs_fl & S_SYNC)
3674 new_fl |= EXT4_SYNC_FL;
3675 if (vfs_fl & S_APPEND)
3676 new_fl |= EXT4_APPEND_FL;
3677 if (vfs_fl & S_IMMUTABLE)
3678 new_fl |= EXT4_IMMUTABLE_FL;
3679 if (vfs_fl & S_NOATIME)
3680 new_fl |= EXT4_NOATIME_FL;
3681 if (vfs_fl & S_DIRSYNC)
3682 new_fl |= EXT4_DIRSYNC_FL;
3683 } while (cmpxchg(&ei->i_flags, old_fl, new_fl) != old_fl);
3684}
3685
3686static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
3687 struct ext4_inode_info *ei)
3688{
3689 blkcnt_t i_blocks ;
3690 struct inode *inode = &(ei->vfs_inode);
3691 struct super_block *sb = inode->i_sb;
3692
3693 if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
3694 EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) {
3695 /* we are using combined 48 bit field */
3696 i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 |
3697 le32_to_cpu(raw_inode->i_blocks_lo);
3698 if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) {
3699 /* i_blocks represent file system block size */
3700 return i_blocks << (inode->i_blkbits - 9);
3701 } else {
3702 return i_blocks;
3703 }
3704 } else {
3705 return le32_to_cpu(raw_inode->i_blocks_lo);
3706 }
3707}
3708
3709struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
3710{
3711 struct ext4_iloc iloc;
3712 struct ext4_inode *raw_inode;
3713 struct ext4_inode_info *ei;
3714 struct inode *inode;
3715 journal_t *journal = EXT4_SB(sb)->s_journal;
3716 long ret;
3717 int block;
3718 uid_t i_uid;
3719 gid_t i_gid;
3720
3721 inode = iget_locked(sb, ino);
3722 if (!inode)
3723 return ERR_PTR(-ENOMEM);
3724 if (!(inode->i_state & I_NEW))
3725 return inode;
3726
3727 ei = EXT4_I(inode);
3728 iloc.bh = NULL;
3729
3730 ret = __ext4_get_inode_loc(inode, &iloc, 0);
3731 if (ret < 0)
3732 goto bad_inode;
3733 raw_inode = ext4_raw_inode(&iloc);
3734
3735 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
3736 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
3737 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
3738 EXT4_INODE_SIZE(inode->i_sb)) {
3739 EXT4_ERROR_INODE(inode, "bad extra_isize (%u != %u)",
3740 EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize,
3741 EXT4_INODE_SIZE(inode->i_sb));
3742 ret = -EIO;
3743 goto bad_inode;
3744 }
3745 } else
3746 ei->i_extra_isize = 0;
3747
3748 /* Precompute checksum seed for inode metadata */
3749 if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
3750 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
3751 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3752 __u32 csum;
3753 __le32 inum = cpu_to_le32(inode->i_ino);
3754 __le32 gen = raw_inode->i_generation;
3755 csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
3756 sizeof(inum));
3757 ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
3758 sizeof(gen));
3759 }
3760
3761 if (!ext4_inode_csum_verify(inode, raw_inode, ei)) {
3762 EXT4_ERROR_INODE(inode, "checksum invalid");
3763 ret = -EIO;
3764 goto bad_inode;
3765 }
3766
3767 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
3768 i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
3769 i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
3770 if (!(test_opt(inode->i_sb, NO_UID32))) {
3771 i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
3772 i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
3773 }
3774 i_uid_write(inode, i_uid);
3775 i_gid_write(inode, i_gid);
3776 set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
3777
3778 ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
3779 ei->i_dir_start_lookup = 0;
3780 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
3781 /* We now have enough fields to check if the inode was active or not.
3782 * This is needed because nfsd might try to access dead inodes
3783 * the test is that same one that e2fsck uses
3784 * NeilBrown 1999oct15
3785 */
3786 if (inode->i_nlink == 0) {
3787 if (inode->i_mode == 0 ||
3788 !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) {
3789 /* this inode is deleted */
3790 ret = -ESTALE;
3791 goto bad_inode;
3792 }
3793 /* The only unlinked inodes we let through here have
3794 * valid i_mode and are being read by the orphan
3795 * recovery code: that's fine, we're about to complete
3796 * the process of deleting those. */
3797 }
3798 ei->i_flags = le32_to_cpu(raw_inode->i_flags);
3799 inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
3800 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
3801 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT))
3802 ei->i_file_acl |=
3803 ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
3804 inode->i_size = ext4_isize(raw_inode);
3805 ei->i_disksize = inode->i_size;
3806#ifdef CONFIG_QUOTA
3807 ei->i_reserved_quota = 0;
3808#endif
3809 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
3810 ei->i_block_group = iloc.block_group;
3811 ei->i_last_alloc_group = ~0;
3812 /*
3813 * NOTE! The in-memory inode i_data array is in little-endian order
3814 * even on big-endian machines: we do NOT byteswap the block numbers!
3815 */
3816 for (block = 0; block < EXT4_N_BLOCKS; block++)
3817 ei->i_data[block] = raw_inode->i_block[block];
3818 INIT_LIST_HEAD(&ei->i_orphan);
3819
3820 /*
3821 * Set transaction id's of transactions that have to be committed
3822 * to finish f[data]sync. We set them to currently running transaction
3823 * as we cannot be sure that the inode or some of its metadata isn't
3824 * part of the transaction - the inode could have been reclaimed and
3825 * now it is reread from disk.
3826 */
3827 if (journal) {
3828 transaction_t *transaction;
3829 tid_t tid;
3830
3831 read_lock(&journal->j_state_lock);
3832 if (journal->j_running_transaction)
3833 transaction = journal->j_running_transaction;
3834 else
3835 transaction = journal->j_committing_transaction;
3836 if (transaction)
3837 tid = transaction->t_tid;
3838 else
3839 tid = journal->j_commit_sequence;
3840 read_unlock(&journal->j_state_lock);
3841 ei->i_sync_tid = tid;
3842 ei->i_datasync_tid = tid;
3843 }
3844
3845 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
3846 if (ei->i_extra_isize == 0) {
3847 /* The extra space is currently unused. Use it. */
3848 ei->i_extra_isize = sizeof(struct ext4_inode) -
3849 EXT4_GOOD_OLD_INODE_SIZE;
3850 } else {
3851 __le32 *magic = (void *)raw_inode +
3852 EXT4_GOOD_OLD_INODE_SIZE +
3853 ei->i_extra_isize;
3854 if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC))
3855 ext4_set_inode_state(inode, EXT4_STATE_XATTR);
3856 }
3857 }
3858
3859 EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
3860 EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
3861 EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
3862 EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
3863
3864 inode->i_version = le32_to_cpu(raw_inode->i_disk_version);
3865 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
3866 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
3867 inode->i_version |=
3868 (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
3869 }
3870
3871 ret = 0;
3872 if (ei->i_file_acl &&
3873 !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) {
3874 EXT4_ERROR_INODE(inode, "bad extended attribute block %llu",
3875 ei->i_file_acl);
3876 ret = -EIO;
3877 goto bad_inode;
3878 } else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
3879 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
3880 (S_ISLNK(inode->i_mode) &&
3881 !ext4_inode_is_fast_symlink(inode)))
3882 /* Validate extent which is part of inode */
3883 ret = ext4_ext_check_inode(inode);
3884 } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
3885 (S_ISLNK(inode->i_mode) &&
3886 !ext4_inode_is_fast_symlink(inode))) {
3887 /* Validate block references which are part of inode */
3888 ret = ext4_ind_check_inode(inode);
3889 }
3890 if (ret)
3891 goto bad_inode;
3892
3893 if (S_ISREG(inode->i_mode)) {
3894 inode->i_op = &ext4_file_inode_operations;
3895 inode->i_fop = &ext4_file_operations;
3896 ext4_set_aops(inode);
3897 } else if (S_ISDIR(inode->i_mode)) {
3898 inode->i_op = &ext4_dir_inode_operations;
3899 inode->i_fop = &ext4_dir_operations;
3900 } else if (S_ISLNK(inode->i_mode)) {
3901 if (ext4_inode_is_fast_symlink(inode)) {
3902 inode->i_op = &ext4_fast_symlink_inode_operations;
3903 nd_terminate_link(ei->i_data, inode->i_size,
3904 sizeof(ei->i_data) - 1);
3905 } else {
3906 inode->i_op = &ext4_symlink_inode_operations;
3907 ext4_set_aops(inode);
3908 }
3909 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
3910 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
3911 inode->i_op = &ext4_special_inode_operations;
3912 if (raw_inode->i_block[0])
3913 init_special_inode(inode, inode->i_mode,
3914 old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
3915 else
3916 init_special_inode(inode, inode->i_mode,
3917 new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
3918 } else {
3919 ret = -EIO;
3920 EXT4_ERROR_INODE(inode, "bogus i_mode (%o)", inode->i_mode);
3921 goto bad_inode;
3922 }
3923 brelse(iloc.bh);
3924 ext4_set_inode_flags(inode);
3925 unlock_new_inode(inode);
3926 return inode;
3927
3928bad_inode:
3929 brelse(iloc.bh);
3930 iget_failed(inode);
3931 return ERR_PTR(ret);
3932}
3933
3934static int ext4_inode_blocks_set(handle_t *handle,
3935 struct ext4_inode *raw_inode,
3936 struct ext4_inode_info *ei)
3937{
3938 struct inode *inode = &(ei->vfs_inode);
3939 u64 i_blocks = inode->i_blocks;
3940 struct super_block *sb = inode->i_sb;
3941
3942 if (i_blocks <= ~0U) {
3943 /*
3944 * i_blocks can be represnted in a 32 bit variable
3945 * as multiple of 512 bytes
3946 */
3947 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
3948 raw_inode->i_blocks_high = 0;
3949 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
3950 return 0;
3951 }
3952 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE))
3953 return -EFBIG;
3954
3955 if (i_blocks <= 0xffffffffffffULL) {
3956 /*
3957 * i_blocks can be represented in a 48 bit variable
3958 * as multiple of 512 bytes
3959 */
3960 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
3961 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
3962 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
3963 } else {
3964 ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE);
3965 /* i_block is stored in file system block size */
3966 i_blocks = i_blocks >> (inode->i_blkbits - 9);
3967 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
3968 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
3969 }
3970 return 0;
3971}
3972
3973/*
3974 * Post the struct inode info into an on-disk inode location in the
3975 * buffer-cache. This gobbles the caller's reference to the
3976 * buffer_head in the inode location struct.
3977 *
3978 * The caller must have write access to iloc->bh.
3979 */
3980static int ext4_do_update_inode(handle_t *handle,
3981 struct inode *inode,
3982 struct ext4_iloc *iloc)
3983{
3984 struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
3985 struct ext4_inode_info *ei = EXT4_I(inode);
3986 struct buffer_head *bh = iloc->bh;
3987 int err = 0, rc, block;
3988 uid_t i_uid;
3989 gid_t i_gid;
3990
3991 /* For fields not not tracking in the in-memory inode,
3992 * initialise them to zero for new inodes. */
3993 if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
3994 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
3995
3996 ext4_get_inode_flags(ei);
3997 raw_inode->i_mode = cpu_to_le16(inode->i_mode);
3998 i_uid = i_uid_read(inode);
3999 i_gid = i_gid_read(inode);
4000 if (!(test_opt(inode->i_sb, NO_UID32))) {
4001 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid));
4002 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid));
4003/*
4004 * Fix up interoperability with old kernels. Otherwise, old inodes get
4005 * re-used with the upper 16 bits of the uid/gid intact
4006 */
4007 if (!ei->i_dtime) {
4008 raw_inode->i_uid_high =
4009 cpu_to_le16(high_16_bits(i_uid));
4010 raw_inode->i_gid_high =
4011 cpu_to_le16(high_16_bits(i_gid));
4012 } else {
4013 raw_inode->i_uid_high = 0;
4014 raw_inode->i_gid_high = 0;
4015 }
4016 } else {
4017 raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid));
4018 raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(i_gid));
4019 raw_inode->i_uid_high = 0;
4020 raw_inode->i_gid_high = 0;
4021 }
4022 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
4023
4024 EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
4025 EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
4026 EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
4027 EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
4028
4029 if (ext4_inode_blocks_set(handle, raw_inode, ei))
4030 goto out_brelse;
4031 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
4032 raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF);
4033 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
4034 cpu_to_le32(EXT4_OS_HURD))
4035 raw_inode->i_file_acl_high =
4036 cpu_to_le16(ei->i_file_acl >> 32);
4037 raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
4038 ext4_isize_set(raw_inode, ei->i_disksize);
4039 if (ei->i_disksize > 0x7fffffffULL) {
4040 struct super_block *sb = inode->i_sb;
4041 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
4042 EXT4_FEATURE_RO_COMPAT_LARGE_FILE) ||
4043 EXT4_SB(sb)->s_es->s_rev_level ==
4044 cpu_to_le32(EXT4_GOOD_OLD_REV)) {
4045 /* If this is the first large file
4046 * created, add a flag to the superblock.
4047 */
4048 err = ext4_journal_get_write_access(handle,
4049 EXT4_SB(sb)->s_sbh);
4050 if (err)
4051 goto out_brelse;
4052 ext4_update_dynamic_rev(sb);
4053 EXT4_SET_RO_COMPAT_FEATURE(sb,
4054 EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
4055 ext4_handle_sync(handle);
4056 err = ext4_handle_dirty_super_now(handle, sb);
4057 }
4058 }
4059 raw_inode->i_generation = cpu_to_le32(inode->i_generation);
4060 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
4061 if (old_valid_dev(inode->i_rdev)) {
4062 raw_inode->i_block[0] =
4063 cpu_to_le32(old_encode_dev(inode->i_rdev));
4064 raw_inode->i_block[1] = 0;
4065 } else {
4066 raw_inode->i_block[0] = 0;
4067 raw_inode->i_block[1] =
4068 cpu_to_le32(new_encode_dev(inode->i_rdev));
4069 raw_inode->i_block[2] = 0;
4070 }
4071 } else
4072 for (block = 0; block < EXT4_N_BLOCKS; block++)
4073 raw_inode->i_block[block] = ei->i_data[block];
4074
4075 raw_inode->i_disk_version = cpu_to_le32(inode->i_version);
4076 if (ei->i_extra_isize) {
4077 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
4078 raw_inode->i_version_hi =
4079 cpu_to_le32(inode->i_version >> 32);
4080 raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
4081 }
4082
4083 ext4_inode_csum_set(inode, raw_inode, ei);
4084
4085 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
4086 rc = ext4_handle_dirty_metadata(handle, NULL, bh);
4087 if (!err)
4088 err = rc;
4089 ext4_clear_inode_state(inode, EXT4_STATE_NEW);
4090
4091 ext4_update_inode_fsync_trans(handle, inode, 0);
4092out_brelse:
4093 brelse(bh);
4094 ext4_std_error(inode->i_sb, err);
4095 return err;
4096}
4097
4098/*
4099 * ext4_write_inode()
4100 *
4101 * We are called from a few places:
4102 *
4103 * - Within generic_file_write() for O_SYNC files.
4104 * Here, there will be no transaction running. We wait for any running
4105 * trasnaction to commit.
4106 *
4107 * - Within sys_sync(), kupdate and such.
4108 * We wait on commit, if tol to.
4109 *
4110 * - Within prune_icache() (PF_MEMALLOC == true)
4111 * Here we simply return. We can't afford to block kswapd on the
4112 * journal commit.
4113 *
4114 * In all cases it is actually safe for us to return without doing anything,
4115 * because the inode has been copied into a raw inode buffer in
4116 * ext4_mark_inode_dirty(). This is a correctness thing for O_SYNC and for
4117 * knfsd.
4118 *
4119 * Note that we are absolutely dependent upon all inode dirtiers doing the
4120 * right thing: they *must* call mark_inode_dirty() after dirtying info in
4121 * which we are interested.
4122 *
4123 * It would be a bug for them to not do this. The code:
4124 *
4125 * mark_inode_dirty(inode)
4126 * stuff();
4127 * inode->i_size = expr;
4128 *
4129 * is in error because a kswapd-driven write_inode() could occur while
4130 * `stuff()' is running, and the new i_size will be lost. Plus the inode
4131 * will no longer be on the superblock's dirty inode list.
4132 */
4133int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
4134{
4135 int err;
4136
4137 if (current->flags & PF_MEMALLOC)
4138 return 0;
4139
4140 if (EXT4_SB(inode->i_sb)->s_journal) {
4141 if (ext4_journal_current_handle()) {
4142 jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
4143 dump_stack();
4144 return -EIO;
4145 }
4146
4147 if (wbc->sync_mode != WB_SYNC_ALL)
4148 return 0;
4149
4150 err = ext4_force_commit(inode->i_sb);
4151 } else {
4152 struct ext4_iloc iloc;
4153
4154 err = __ext4_get_inode_loc(inode, &iloc, 0);
4155 if (err)
4156 return err;
4157 if (wbc->sync_mode == WB_SYNC_ALL)
4158 sync_dirty_buffer(iloc.bh);
4159 if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
4160 EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr,
4161 "IO error syncing inode");
4162 err = -EIO;
4163 }
4164 brelse(iloc.bh);
4165 }
4166 return err;
4167}
4168
4169/*
4170 * ext4_setattr()
4171 *
4172 * Called from notify_change.
4173 *
4174 * We want to trap VFS attempts to truncate the file as soon as
4175 * possible. In particular, we want to make sure that when the VFS
4176 * shrinks i_size, we put the inode on the orphan list and modify
4177 * i_disksize immediately, so that during the subsequent flushing of
4178 * dirty pages and freeing of disk blocks, we can guarantee that any
4179 * commit will leave the blocks being flushed in an unused state on
4180 * disk. (On recovery, the inode will get truncated and the blocks will
4181 * be freed, so we have a strong guarantee that no future commit will
4182 * leave these blocks visible to the user.)
4183 *
4184 * Another thing we have to assure is that if we are in ordered mode
4185 * and inode is still attached to the committing transaction, we must
4186 * we start writeout of all the dirty pages which are being truncated.
4187 * This way we are sure that all the data written in the previous
4188 * transaction are already on disk (truncate waits for pages under
4189 * writeback).
4190 *
4191 * Called with inode->i_mutex down.
4192 */
4193int ext4_setattr(struct dentry *dentry, struct iattr *attr)
4194{
4195 struct inode *inode = dentry->d_inode;
4196 int error, rc = 0;
4197 int orphan = 0;
4198 const unsigned int ia_valid = attr->ia_valid;
4199
4200 error = inode_change_ok(inode, attr);
4201 if (error)
4202 return error;
4203
4204 if (is_quota_modification(inode, attr))
4205 dquot_initialize(inode);
4206 if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
4207 (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
4208 handle_t *handle;
4209
4210 /* (user+group)*(old+new) structure, inode write (sb,
4211 * inode block, ? - but truncate inode update has it) */
4212 handle = ext4_journal_start(inode, (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)+
4213 EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb))+3);
4214 if (IS_ERR(handle)) {
4215 error = PTR_ERR(handle);
4216 goto err_out;
4217 }
4218 error = dquot_transfer(inode, attr);
4219 if (error) {
4220 ext4_journal_stop(handle);
4221 return error;
4222 }
4223 /* Update corresponding info in inode so that everything is in
4224 * one transaction */
4225 if (attr->ia_valid & ATTR_UID)
4226 inode->i_uid = attr->ia_uid;
4227 if (attr->ia_valid & ATTR_GID)
4228 inode->i_gid = attr->ia_gid;
4229 error = ext4_mark_inode_dirty(handle, inode);
4230 ext4_journal_stop(handle);
4231 }
4232
4233 if (attr->ia_valid & ATTR_SIZE) {
4234 inode_dio_wait(inode);
4235
4236 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
4237 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4238
4239 if (attr->ia_size > sbi->s_bitmap_maxbytes)
4240 return -EFBIG;
4241 }
4242 }
4243
4244 if (S_ISREG(inode->i_mode) &&
4245 attr->ia_valid & ATTR_SIZE &&
4246 (attr->ia_size < inode->i_size)) {
4247 handle_t *handle;
4248
4249 handle = ext4_journal_start(inode, 3);
4250 if (IS_ERR(handle)) {
4251 error = PTR_ERR(handle);
4252 goto err_out;
4253 }
4254 if (ext4_handle_valid(handle)) {
4255 error = ext4_orphan_add(handle, inode);
4256 orphan = 1;
4257 }
4258 EXT4_I(inode)->i_disksize = attr->ia_size;
4259 rc = ext4_mark_inode_dirty(handle, inode);
4260 if (!error)
4261 error = rc;
4262 ext4_journal_stop(handle);
4263
4264 if (ext4_should_order_data(inode)) {
4265 error = ext4_begin_ordered_truncate(inode,
4266 attr->ia_size);
4267 if (error) {
4268 /* Do as much error cleanup as possible */
4269 handle = ext4_journal_start(inode, 3);
4270 if (IS_ERR(handle)) {
4271 ext4_orphan_del(NULL, inode);
4272 goto err_out;
4273 }
4274 ext4_orphan_del(handle, inode);
4275 orphan = 0;
4276 ext4_journal_stop(handle);
4277 goto err_out;
4278 }
4279 }
4280 }
4281
4282 if (attr->ia_valid & ATTR_SIZE) {
4283 if (attr->ia_size != i_size_read(inode))
4284 truncate_setsize(inode, attr->ia_size);
4285 ext4_truncate(inode);
4286 }
4287
4288 if (!rc) {
4289 setattr_copy(inode, attr);
4290 mark_inode_dirty(inode);
4291 }
4292
4293 /*
4294 * If the call to ext4_truncate failed to get a transaction handle at
4295 * all, we need to clean up the in-core orphan list manually.
4296 */
4297 if (orphan && inode->i_nlink)
4298 ext4_orphan_del(NULL, inode);
4299
4300 if (!rc && (ia_valid & ATTR_MODE))
4301 rc = ext4_acl_chmod(inode);
4302
4303err_out:
4304 ext4_std_error(inode->i_sb, error);
4305 if (!error)
4306 error = rc;
4307 return error;
4308}
4309
4310int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
4311 struct kstat *stat)
4312{
4313 struct inode *inode;
4314 unsigned long delalloc_blocks;
4315
4316 inode = dentry->d_inode;
4317 generic_fillattr(inode, stat);
4318
4319 /*
4320 * We can't update i_blocks if the block allocation is delayed
4321 * otherwise in the case of system crash before the real block
4322 * allocation is done, we will have i_blocks inconsistent with
4323 * on-disk file blocks.
4324 * We always keep i_blocks updated together with real
4325 * allocation. But to not confuse with user, stat
4326 * will return the blocks that include the delayed allocation
4327 * blocks for this file.
4328 */
4329 delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb),
4330 EXT4_I(inode)->i_reserved_data_blocks);
4331
4332 stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9;
4333 return 0;
4334}
4335
4336static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
4337{
4338 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
4339 return ext4_ind_trans_blocks(inode, nrblocks, chunk);
4340 return ext4_ext_index_trans_blocks(inode, nrblocks, chunk);
4341}
4342
4343/*
4344 * Account for index blocks, block groups bitmaps and block group
4345 * descriptor blocks if modify datablocks and index blocks
4346 * worse case, the indexs blocks spread over different block groups
4347 *
4348 * If datablocks are discontiguous, they are possible to spread over
4349 * different block groups too. If they are contiuguous, with flexbg,
4350 * they could still across block group boundary.
4351 *
4352 * Also account for superblock, inode, quota and xattr blocks
4353 */
4354static int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk)
4355{
4356 ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb);
4357 int gdpblocks;
4358 int idxblocks;
4359 int ret = 0;
4360
4361 /*
4362 * How many index blocks need to touch to modify nrblocks?
4363 * The "Chunk" flag indicating whether the nrblocks is
4364 * physically contiguous on disk
4365 *
4366 * For Direct IO and fallocate, they calls get_block to allocate
4367 * one single extent at a time, so they could set the "Chunk" flag
4368 */
4369 idxblocks = ext4_index_trans_blocks(inode, nrblocks, chunk);
4370
4371 ret = idxblocks;
4372
4373 /*
4374 * Now let's see how many group bitmaps and group descriptors need
4375 * to account
4376 */
4377 groups = idxblocks;
4378 if (chunk)
4379 groups += 1;
4380 else
4381 groups += nrblocks;
4382
4383 gdpblocks = groups;
4384 if (groups > ngroups)
4385 groups = ngroups;
4386 if (groups > EXT4_SB(inode->i_sb)->s_gdb_count)
4387 gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count;
4388
4389 /* bitmaps and block group descriptor blocks */
4390 ret += groups + gdpblocks;
4391
4392 /* Blocks for super block, inode, quota and xattr blocks */
4393 ret += EXT4_META_TRANS_BLOCKS(inode->i_sb);
4394
4395 return ret;
4396}
4397
4398/*
4399 * Calculate the total number of credits to reserve to fit
4400 * the modification of a single pages into a single transaction,
4401 * which may include multiple chunks of block allocations.
4402 *
4403 * This could be called via ext4_write_begin()
4404 *
4405 * We need to consider the worse case, when
4406 * one new block per extent.
4407 */
4408int ext4_writepage_trans_blocks(struct inode *inode)
4409{
4410 int bpp = ext4_journal_blocks_per_page(inode);
4411 int ret;
4412
4413 ret = ext4_meta_trans_blocks(inode, bpp, 0);
4414
4415 /* Account for data blocks for journalled mode */
4416 if (ext4_should_journal_data(inode))
4417 ret += bpp;
4418 return ret;
4419}
4420
4421/*
4422 * Calculate the journal credits for a chunk of data modification.
4423 *
4424 * This is called from DIO, fallocate or whoever calling
4425 * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks.
4426 *
4427 * journal buffers for data blocks are not included here, as DIO
4428 * and fallocate do no need to journal data buffers.
4429 */
4430int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks)
4431{
4432 return ext4_meta_trans_blocks(inode, nrblocks, 1);
4433}
4434
4435/*
4436 * The caller must have previously called ext4_reserve_inode_write().
4437 * Give this, we know that the caller already has write access to iloc->bh.
4438 */
4439int ext4_mark_iloc_dirty(handle_t *handle,
4440 struct inode *inode, struct ext4_iloc *iloc)
4441{
4442 int err = 0;
4443
4444 if (IS_I_VERSION(inode))
4445 inode_inc_iversion(inode);
4446
4447 /* the do_update_inode consumes one bh->b_count */
4448 get_bh(iloc->bh);
4449
4450 /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
4451 err = ext4_do_update_inode(handle, inode, iloc);
4452 put_bh(iloc->bh);
4453 return err;
4454}
4455
4456/*
4457 * On success, We end up with an outstanding reference count against
4458 * iloc->bh. This _must_ be cleaned up later.
4459 */
4460
4461int
4462ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
4463 struct ext4_iloc *iloc)
4464{
4465 int err;
4466
4467 err = ext4_get_inode_loc(inode, iloc);
4468 if (!err) {
4469 BUFFER_TRACE(iloc->bh, "get_write_access");
4470 err = ext4_journal_get_write_access(handle, iloc->bh);
4471 if (err) {
4472 brelse(iloc->bh);
4473 iloc->bh = NULL;
4474 }
4475 }
4476 ext4_std_error(inode->i_sb, err);
4477 return err;
4478}
4479
4480/*
4481 * Expand an inode by new_extra_isize bytes.
4482 * Returns 0 on success or negative error number on failure.
4483 */
4484static int ext4_expand_extra_isize(struct inode *inode,
4485 unsigned int new_extra_isize,
4486 struct ext4_iloc iloc,
4487 handle_t *handle)
4488{
4489 struct ext4_inode *raw_inode;
4490 struct ext4_xattr_ibody_header *header;
4491
4492 if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
4493 return 0;
4494
4495 raw_inode = ext4_raw_inode(&iloc);
4496
4497 header = IHDR(inode, raw_inode);
4498
4499 /* No extended attributes present */
4500 if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
4501 header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
4502 memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
4503 new_extra_isize);
4504 EXT4_I(inode)->i_extra_isize = new_extra_isize;
4505 return 0;
4506 }
4507
4508 /* try to expand with EAs present */
4509 return ext4_expand_extra_isize_ea(inode, new_extra_isize,
4510 raw_inode, handle);
4511}
4512
4513/*
4514 * What we do here is to mark the in-core inode as clean with respect to inode
4515 * dirtiness (it may still be data-dirty).
4516 * This means that the in-core inode may be reaped by prune_icache
4517 * without having to perform any I/O. This is a very good thing,
4518 * because *any* task may call prune_icache - even ones which
4519 * have a transaction open against a different journal.
4520 *
4521 * Is this cheating? Not really. Sure, we haven't written the
4522 * inode out, but prune_icache isn't a user-visible syncing function.
4523 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
4524 * we start and wait on commits.
4525 *
4526 * Is this efficient/effective? Well, we're being nice to the system
4527 * by cleaning up our inodes proactively so they can be reaped
4528 * without I/O. But we are potentially leaving up to five seconds'
4529 * worth of inodes floating about which prune_icache wants us to
4530 * write out. One way to fix that would be to get prune_icache()
4531 * to do a write_super() to free up some memory. It has the desired
4532 * effect.
4533 */
4534int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
4535{
4536 struct ext4_iloc iloc;
4537 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4538 static unsigned int mnt_count;
4539 int err, ret;
4540
4541 might_sleep();
4542 trace_ext4_mark_inode_dirty(inode, _RET_IP_);
4543 err = ext4_reserve_inode_write(handle, inode, &iloc);
4544 if (ext4_handle_valid(handle) &&
4545 EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
4546 !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
4547 /*
4548 * We need extra buffer credits since we may write into EA block
4549 * with this same handle. If journal_extend fails, then it will
4550 * only result in a minor loss of functionality for that inode.
4551 * If this is felt to be critical, then e2fsck should be run to
4552 * force a large enough s_min_extra_isize.
4553 */
4554 if ((jbd2_journal_extend(handle,
4555 EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) {
4556 ret = ext4_expand_extra_isize(inode,
4557 sbi->s_want_extra_isize,
4558 iloc, handle);
4559 if (ret) {
4560 ext4_set_inode_state(inode,
4561 EXT4_STATE_NO_EXPAND);
4562 if (mnt_count !=
4563 le16_to_cpu(sbi->s_es->s_mnt_count)) {
4564 ext4_warning(inode->i_sb,
4565 "Unable to expand inode %lu. Delete"
4566 " some EAs or run e2fsck.",
4567 inode->i_ino);
4568 mnt_count =
4569 le16_to_cpu(sbi->s_es->s_mnt_count);
4570 }
4571 }
4572 }
4573 }
4574 if (!err)
4575 err = ext4_mark_iloc_dirty(handle, inode, &iloc);
4576 return err;
4577}
4578
4579/*
4580 * ext4_dirty_inode() is called from __mark_inode_dirty()
4581 *
4582 * We're really interested in the case where a file is being extended.
4583 * i_size has been changed by generic_commit_write() and we thus need
4584 * to include the updated inode in the current transaction.
4585 *
4586 * Also, dquot_alloc_block() will always dirty the inode when blocks
4587 * are allocated to the file.
4588 *
4589 * If the inode is marked synchronous, we don't honour that here - doing
4590 * so would cause a commit on atime updates, which we don't bother doing.
4591 * We handle synchronous inodes at the highest possible level.
4592 */
4593void ext4_dirty_inode(struct inode *inode, int flags)
4594{
4595 handle_t *handle;
4596
4597 handle = ext4_journal_start(inode, 2);
4598 if (IS_ERR(handle))
4599 goto out;
4600
4601 ext4_mark_inode_dirty(handle, inode);
4602
4603 ext4_journal_stop(handle);
4604out:
4605 return;
4606}
4607
4608#if 0
4609/*
4610 * Bind an inode's backing buffer_head into this transaction, to prevent
4611 * it from being flushed to disk early. Unlike
4612 * ext4_reserve_inode_write, this leaves behind no bh reference and
4613 * returns no iloc structure, so the caller needs to repeat the iloc
4614 * lookup to mark the inode dirty later.
4615 */
4616static int ext4_pin_inode(handle_t *handle, struct inode *inode)
4617{
4618 struct ext4_iloc iloc;
4619
4620 int err = 0;
4621 if (handle) {
4622 err = ext4_get_inode_loc(inode, &iloc);
4623 if (!err) {
4624 BUFFER_TRACE(iloc.bh, "get_write_access");
4625 err = jbd2_journal_get_write_access(handle, iloc.bh);
4626 if (!err)
4627 err = ext4_handle_dirty_metadata(handle,
4628 NULL,
4629 iloc.bh);
4630 brelse(iloc.bh);
4631 }
4632 }
4633 ext4_std_error(inode->i_sb, err);
4634 return err;
4635}
4636#endif
4637
4638int ext4_change_inode_journal_flag(struct inode *inode, int val)
4639{
4640 journal_t *journal;
4641 handle_t *handle;
4642 int err;
4643
4644 /*
4645 * We have to be very careful here: changing a data block's
4646 * journaling status dynamically is dangerous. If we write a
4647 * data block to the journal, change the status and then delete
4648 * that block, we risk forgetting to revoke the old log record
4649 * from the journal and so a subsequent replay can corrupt data.
4650 * So, first we make sure that the journal is empty and that
4651 * nobody is changing anything.
4652 */
4653
4654 journal = EXT4_JOURNAL(inode);
4655 if (!journal)
4656 return 0;
4657 if (is_journal_aborted(journal))
4658 return -EROFS;
4659 /* We have to allocate physical blocks for delalloc blocks
4660 * before flushing journal. otherwise delalloc blocks can not
4661 * be allocated any more. even more truncate on delalloc blocks
4662 * could trigger BUG by flushing delalloc blocks in journal.
4663 * There is no delalloc block in non-journal data mode.
4664 */
4665 if (val && test_opt(inode->i_sb, DELALLOC)) {
4666 err = ext4_alloc_da_blocks(inode);
4667 if (err < 0)
4668 return err;
4669 }
4670
4671 jbd2_journal_lock_updates(journal);
4672
4673 /*
4674 * OK, there are no updates running now, and all cached data is
4675 * synced to disk. We are now in a completely consistent state
4676 * which doesn't have anything in the journal, and we know that
4677 * no filesystem updates are running, so it is safe to modify
4678 * the inode's in-core data-journaling state flag now.
4679 */
4680
4681 if (val)
4682 ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
4683 else {
4684 jbd2_journal_flush(journal);
4685 ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
4686 }
4687 ext4_set_aops(inode);
4688
4689 jbd2_journal_unlock_updates(journal);
4690
4691 /* Finally we can mark the inode as dirty. */
4692
4693 handle = ext4_journal_start(inode, 1);
4694 if (IS_ERR(handle))
4695 return PTR_ERR(handle);
4696
4697 err = ext4_mark_inode_dirty(handle, inode);
4698 ext4_handle_sync(handle);
4699 ext4_journal_stop(handle);
4700 ext4_std_error(inode->i_sb, err);
4701
4702 return err;
4703}
4704
4705static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh)
4706{
4707 return !buffer_mapped(bh);
4708}
4709
4710int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
4711{
4712 struct page *page = vmf->page;
4713 loff_t size;
4714 unsigned long len;
4715 int ret;
4716 struct file *file = vma->vm_file;
4717 struct inode *inode = file->f_path.dentry->d_inode;
4718 struct address_space *mapping = inode->i_mapping;
4719 handle_t *handle;
4720 get_block_t *get_block;
4721 int retries = 0;
4722
4723 /*
4724 * This check is racy but catches the common case. We rely on
4725 * __block_page_mkwrite() to do a reliable check.
4726 */
4727 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
4728 /* Delalloc case is easy... */
4729 if (test_opt(inode->i_sb, DELALLOC) &&
4730 !ext4_should_journal_data(inode) &&
4731 !ext4_nonda_switch(inode->i_sb)) {
4732 do {
4733 ret = __block_page_mkwrite(vma, vmf,
4734 ext4_da_get_block_prep);
4735 } while (ret == -ENOSPC &&
4736 ext4_should_retry_alloc(inode->i_sb, &retries));
4737 goto out_ret;
4738 }
4739
4740 lock_page(page);
4741 size = i_size_read(inode);
4742 /* Page got truncated from under us? */
4743 if (page->mapping != mapping || page_offset(page) > size) {
4744 unlock_page(page);
4745 ret = VM_FAULT_NOPAGE;
4746 goto out;
4747 }
4748
4749 if (page->index == size >> PAGE_CACHE_SHIFT)
4750 len = size & ~PAGE_CACHE_MASK;
4751 else
4752 len = PAGE_CACHE_SIZE;
4753 /*
4754 * Return if we have all the buffers mapped. This avoids the need to do
4755 * journal_start/journal_stop which can block and take a long time
4756 */
4757 if (page_has_buffers(page)) {
4758 if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
4759 ext4_bh_unmapped)) {
4760 /* Wait so that we don't change page under IO */
4761 wait_on_page_writeback(page);
4762 ret = VM_FAULT_LOCKED;
4763 goto out;
4764 }
4765 }
4766 unlock_page(page);
4767 /* OK, we need to fill the hole... */
4768 if (ext4_should_dioread_nolock(inode))
4769 get_block = ext4_get_block_write;
4770 else
4771 get_block = ext4_get_block;
4772retry_alloc:
4773 handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
4774 if (IS_ERR(handle)) {
4775 ret = VM_FAULT_SIGBUS;
4776 goto out;
4777 }
4778 ret = __block_page_mkwrite(vma, vmf, get_block);
4779 if (!ret && ext4_should_journal_data(inode)) {
4780 if (walk_page_buffers(handle, page_buffers(page), 0,
4781 PAGE_CACHE_SIZE, NULL, do_journal_get_write_access)) {
4782 unlock_page(page);
4783 ret = VM_FAULT_SIGBUS;
4784 ext4_journal_stop(handle);
4785 goto out;
4786 }
4787 ext4_set_inode_state(inode, EXT4_STATE_JDATA);
4788 }
4789 ext4_journal_stop(handle);
4790 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
4791 goto retry_alloc;
4792out_ret:
4793 ret = block_page_mkwrite_return(ret);
4794out:
4795 return ret;
4796}
1/*
2 * linux/fs/ext4/inode.c
3 *
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
8 *
9 * from
10 *
11 * linux/fs/minix/inode.c
12 *
13 * Copyright (C) 1991, 1992 Linus Torvalds
14 *
15 * 64-bit file support on 64-bit platforms by Jakub Jelinek
16 * (jj@sunsite.ms.mff.cuni.cz)
17 *
18 * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
19 */
20
21#include <linux/fs.h>
22#include <linux/time.h>
23#include <linux/highuid.h>
24#include <linux/pagemap.h>
25#include <linux/dax.h>
26#include <linux/quotaops.h>
27#include <linux/string.h>
28#include <linux/buffer_head.h>
29#include <linux/writeback.h>
30#include <linux/pagevec.h>
31#include <linux/mpage.h>
32#include <linux/namei.h>
33#include <linux/uio.h>
34#include <linux/bio.h>
35#include <linux/workqueue.h>
36#include <linux/kernel.h>
37#include <linux/printk.h>
38#include <linux/slab.h>
39#include <linux/bitops.h>
40#include <linux/iomap.h>
41
42#include "ext4_jbd2.h"
43#include "xattr.h"
44#include "acl.h"
45#include "truncate.h"
46
47#include <trace/events/ext4.h>
48
49#define MPAGE_DA_EXTENT_TAIL 0x01
50
51static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
52 struct ext4_inode_info *ei)
53{
54 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
55 __u32 csum;
56 __u16 dummy_csum = 0;
57 int offset = offsetof(struct ext4_inode, i_checksum_lo);
58 unsigned int csum_size = sizeof(dummy_csum);
59
60 csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw, offset);
61 csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, csum_size);
62 offset += csum_size;
63 csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
64 EXT4_GOOD_OLD_INODE_SIZE - offset);
65
66 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
67 offset = offsetof(struct ext4_inode, i_checksum_hi);
68 csum = ext4_chksum(sbi, csum, (__u8 *)raw +
69 EXT4_GOOD_OLD_INODE_SIZE,
70 offset - EXT4_GOOD_OLD_INODE_SIZE);
71 if (EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) {
72 csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum,
73 csum_size);
74 offset += csum_size;
75 }
76 csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
77 EXT4_INODE_SIZE(inode->i_sb) - offset);
78 }
79
80 return csum;
81}
82
83static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw,
84 struct ext4_inode_info *ei)
85{
86 __u32 provided, calculated;
87
88 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
89 cpu_to_le32(EXT4_OS_LINUX) ||
90 !ext4_has_metadata_csum(inode->i_sb))
91 return 1;
92
93 provided = le16_to_cpu(raw->i_checksum_lo);
94 calculated = ext4_inode_csum(inode, raw, ei);
95 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
96 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
97 provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16;
98 else
99 calculated &= 0xFFFF;
100
101 return provided == calculated;
102}
103
104static void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw,
105 struct ext4_inode_info *ei)
106{
107 __u32 csum;
108
109 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
110 cpu_to_le32(EXT4_OS_LINUX) ||
111 !ext4_has_metadata_csum(inode->i_sb))
112 return;
113
114 csum = ext4_inode_csum(inode, raw, ei);
115 raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF);
116 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
117 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
118 raw->i_checksum_hi = cpu_to_le16(csum >> 16);
119}
120
121static inline int ext4_begin_ordered_truncate(struct inode *inode,
122 loff_t new_size)
123{
124 trace_ext4_begin_ordered_truncate(inode, new_size);
125 /*
126 * If jinode is zero, then we never opened the file for
127 * writing, so there's no need to call
128 * jbd2_journal_begin_ordered_truncate() since there's no
129 * outstanding writes we need to flush.
130 */
131 if (!EXT4_I(inode)->jinode)
132 return 0;
133 return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode),
134 EXT4_I(inode)->jinode,
135 new_size);
136}
137
138static void ext4_invalidatepage(struct page *page, unsigned int offset,
139 unsigned int length);
140static int __ext4_journalled_writepage(struct page *page, unsigned int len);
141static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh);
142static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
143 int pextents);
144
145/*
146 * Test whether an inode is a fast symlink.
147 */
148int ext4_inode_is_fast_symlink(struct inode *inode)
149{
150 int ea_blocks = EXT4_I(inode)->i_file_acl ?
151 EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0;
152
153 if (ext4_has_inline_data(inode))
154 return 0;
155
156 return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
157}
158
159/*
160 * Restart the transaction associated with *handle. This does a commit,
161 * so before we call here everything must be consistently dirtied against
162 * this transaction.
163 */
164int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
165 int nblocks)
166{
167 int ret;
168
169 /*
170 * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this
171 * moment, get_block can be called only for blocks inside i_size since
172 * page cache has been already dropped and writes are blocked by
173 * i_mutex. So we can safely drop the i_data_sem here.
174 */
175 BUG_ON(EXT4_JOURNAL(inode) == NULL);
176 jbd_debug(2, "restarting handle %p\n", handle);
177 up_write(&EXT4_I(inode)->i_data_sem);
178 ret = ext4_journal_restart(handle, nblocks);
179 down_write(&EXT4_I(inode)->i_data_sem);
180 ext4_discard_preallocations(inode);
181
182 return ret;
183}
184
185/*
186 * Called at the last iput() if i_nlink is zero.
187 */
188void ext4_evict_inode(struct inode *inode)
189{
190 handle_t *handle;
191 int err;
192
193 trace_ext4_evict_inode(inode);
194
195 if (inode->i_nlink) {
196 /*
197 * When journalling data dirty buffers are tracked only in the
198 * journal. So although mm thinks everything is clean and
199 * ready for reaping the inode might still have some pages to
200 * write in the running transaction or waiting to be
201 * checkpointed. Thus calling jbd2_journal_invalidatepage()
202 * (via truncate_inode_pages()) to discard these buffers can
203 * cause data loss. Also even if we did not discard these
204 * buffers, we would have no way to find them after the inode
205 * is reaped and thus user could see stale data if he tries to
206 * read them before the transaction is checkpointed. So be
207 * careful and force everything to disk here... We use
208 * ei->i_datasync_tid to store the newest transaction
209 * containing inode's data.
210 *
211 * Note that directories do not have this problem because they
212 * don't use page cache.
213 */
214 if (inode->i_ino != EXT4_JOURNAL_INO &&
215 ext4_should_journal_data(inode) &&
216 (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) {
217 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
218 tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;
219
220 jbd2_complete_transaction(journal, commit_tid);
221 filemap_write_and_wait(&inode->i_data);
222 }
223 truncate_inode_pages_final(&inode->i_data);
224
225 goto no_delete;
226 }
227
228 if (is_bad_inode(inode))
229 goto no_delete;
230 dquot_initialize(inode);
231
232 if (ext4_should_order_data(inode))
233 ext4_begin_ordered_truncate(inode, 0);
234 truncate_inode_pages_final(&inode->i_data);
235
236 /*
237 * Protect us against freezing - iput() caller didn't have to have any
238 * protection against it
239 */
240 sb_start_intwrite(inode->i_sb);
241 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE,
242 ext4_blocks_for_truncate(inode)+3);
243 if (IS_ERR(handle)) {
244 ext4_std_error(inode->i_sb, PTR_ERR(handle));
245 /*
246 * If we're going to skip the normal cleanup, we still need to
247 * make sure that the in-core orphan linked list is properly
248 * cleaned up.
249 */
250 ext4_orphan_del(NULL, inode);
251 sb_end_intwrite(inode->i_sb);
252 goto no_delete;
253 }
254
255 if (IS_SYNC(inode))
256 ext4_handle_sync(handle);
257 inode->i_size = 0;
258 err = ext4_mark_inode_dirty(handle, inode);
259 if (err) {
260 ext4_warning(inode->i_sb,
261 "couldn't mark inode dirty (err %d)", err);
262 goto stop_handle;
263 }
264 if (inode->i_blocks) {
265 err = ext4_truncate(inode);
266 if (err) {
267 ext4_error(inode->i_sb,
268 "couldn't truncate inode %lu (err %d)",
269 inode->i_ino, err);
270 goto stop_handle;
271 }
272 }
273
274 /*
275 * ext4_ext_truncate() doesn't reserve any slop when it
276 * restarts journal transactions; therefore there may not be
277 * enough credits left in the handle to remove the inode from
278 * the orphan list and set the dtime field.
279 */
280 if (!ext4_handle_has_enough_credits(handle, 3)) {
281 err = ext4_journal_extend(handle, 3);
282 if (err > 0)
283 err = ext4_journal_restart(handle, 3);
284 if (err != 0) {
285 ext4_warning(inode->i_sb,
286 "couldn't extend journal (err %d)", err);
287 stop_handle:
288 ext4_journal_stop(handle);
289 ext4_orphan_del(NULL, inode);
290 sb_end_intwrite(inode->i_sb);
291 goto no_delete;
292 }
293 }
294
295 /*
296 * Kill off the orphan record which ext4_truncate created.
297 * AKPM: I think this can be inside the above `if'.
298 * Note that ext4_orphan_del() has to be able to cope with the
299 * deletion of a non-existent orphan - this is because we don't
300 * know if ext4_truncate() actually created an orphan record.
301 * (Well, we could do this if we need to, but heck - it works)
302 */
303 ext4_orphan_del(handle, inode);
304 EXT4_I(inode)->i_dtime = get_seconds();
305
306 /*
307 * One subtle ordering requirement: if anything has gone wrong
308 * (transaction abort, IO errors, whatever), then we can still
309 * do these next steps (the fs will already have been marked as
310 * having errors), but we can't free the inode if the mark_dirty
311 * fails.
312 */
313 if (ext4_mark_inode_dirty(handle, inode))
314 /* If that failed, just do the required in-core inode clear. */
315 ext4_clear_inode(inode);
316 else
317 ext4_free_inode(handle, inode);
318 ext4_journal_stop(handle);
319 sb_end_intwrite(inode->i_sb);
320 return;
321no_delete:
322 ext4_clear_inode(inode); /* We must guarantee clearing of inode... */
323}
324
325#ifdef CONFIG_QUOTA
326qsize_t *ext4_get_reserved_space(struct inode *inode)
327{
328 return &EXT4_I(inode)->i_reserved_quota;
329}
330#endif
331
332/*
333 * Called with i_data_sem down, which is important since we can call
334 * ext4_discard_preallocations() from here.
335 */
336void ext4_da_update_reserve_space(struct inode *inode,
337 int used, int quota_claim)
338{
339 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
340 struct ext4_inode_info *ei = EXT4_I(inode);
341
342 spin_lock(&ei->i_block_reservation_lock);
343 trace_ext4_da_update_reserve_space(inode, used, quota_claim);
344 if (unlikely(used > ei->i_reserved_data_blocks)) {
345 ext4_warning(inode->i_sb, "%s: ino %lu, used %d "
346 "with only %d reserved data blocks",
347 __func__, inode->i_ino, used,
348 ei->i_reserved_data_blocks);
349 WARN_ON(1);
350 used = ei->i_reserved_data_blocks;
351 }
352
353 /* Update per-inode reservations */
354 ei->i_reserved_data_blocks -= used;
355 percpu_counter_sub(&sbi->s_dirtyclusters_counter, used);
356
357 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
358
359 /* Update quota subsystem for data blocks */
360 if (quota_claim)
361 dquot_claim_block(inode, EXT4_C2B(sbi, used));
362 else {
363 /*
364 * We did fallocate with an offset that is already delayed
365 * allocated. So on delayed allocated writeback we should
366 * not re-claim the quota for fallocated blocks.
367 */
368 dquot_release_reservation_block(inode, EXT4_C2B(sbi, used));
369 }
370
371 /*
372 * If we have done all the pending block allocations and if
373 * there aren't any writers on the inode, we can discard the
374 * inode's preallocations.
375 */
376 if ((ei->i_reserved_data_blocks == 0) &&
377 (atomic_read(&inode->i_writecount) == 0))
378 ext4_discard_preallocations(inode);
379}
380
381static int __check_block_validity(struct inode *inode, const char *func,
382 unsigned int line,
383 struct ext4_map_blocks *map)
384{
385 if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk,
386 map->m_len)) {
387 ext4_error_inode(inode, func, line, map->m_pblk,
388 "lblock %lu mapped to illegal pblock "
389 "(length %d)", (unsigned long) map->m_lblk,
390 map->m_len);
391 return -EFSCORRUPTED;
392 }
393 return 0;
394}
395
396int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk,
397 ext4_lblk_t len)
398{
399 int ret;
400
401 if (ext4_encrypted_inode(inode))
402 return fscrypt_zeroout_range(inode, lblk, pblk, len);
403
404 ret = sb_issue_zeroout(inode->i_sb, pblk, len, GFP_NOFS);
405 if (ret > 0)
406 ret = 0;
407
408 return ret;
409}
410
411#define check_block_validity(inode, map) \
412 __check_block_validity((inode), __func__, __LINE__, (map))
413
414#ifdef ES_AGGRESSIVE_TEST
415static void ext4_map_blocks_es_recheck(handle_t *handle,
416 struct inode *inode,
417 struct ext4_map_blocks *es_map,
418 struct ext4_map_blocks *map,
419 int flags)
420{
421 int retval;
422
423 map->m_flags = 0;
424 /*
425 * There is a race window that the result is not the same.
426 * e.g. xfstests #223 when dioread_nolock enables. The reason
427 * is that we lookup a block mapping in extent status tree with
428 * out taking i_data_sem. So at the time the unwritten extent
429 * could be converted.
430 */
431 down_read(&EXT4_I(inode)->i_data_sem);
432 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
433 retval = ext4_ext_map_blocks(handle, inode, map, flags &
434 EXT4_GET_BLOCKS_KEEP_SIZE);
435 } else {
436 retval = ext4_ind_map_blocks(handle, inode, map, flags &
437 EXT4_GET_BLOCKS_KEEP_SIZE);
438 }
439 up_read((&EXT4_I(inode)->i_data_sem));
440
441 /*
442 * We don't check m_len because extent will be collpased in status
443 * tree. So the m_len might not equal.
444 */
445 if (es_map->m_lblk != map->m_lblk ||
446 es_map->m_flags != map->m_flags ||
447 es_map->m_pblk != map->m_pblk) {
448 printk("ES cache assertion failed for inode: %lu "
449 "es_cached ex [%d/%d/%llu/%x] != "
450 "found ex [%d/%d/%llu/%x] retval %d flags %x\n",
451 inode->i_ino, es_map->m_lblk, es_map->m_len,
452 es_map->m_pblk, es_map->m_flags, map->m_lblk,
453 map->m_len, map->m_pblk, map->m_flags,
454 retval, flags);
455 }
456}
457#endif /* ES_AGGRESSIVE_TEST */
458
459/*
460 * The ext4_map_blocks() function tries to look up the requested blocks,
461 * and returns if the blocks are already mapped.
462 *
463 * Otherwise it takes the write lock of the i_data_sem and allocate blocks
464 * and store the allocated blocks in the result buffer head and mark it
465 * mapped.
466 *
467 * If file type is extents based, it will call ext4_ext_map_blocks(),
468 * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
469 * based files
470 *
471 * On success, it returns the number of blocks being mapped or allocated. if
472 * create==0 and the blocks are pre-allocated and unwritten, the resulting @map
473 * is marked as unwritten. If the create == 1, it will mark @map as mapped.
474 *
475 * It returns 0 if plain look up failed (blocks have not been allocated), in
476 * that case, @map is returned as unmapped but we still do fill map->m_len to
477 * indicate the length of a hole starting at map->m_lblk.
478 *
479 * It returns the error in case of allocation failure.
480 */
481int ext4_map_blocks(handle_t *handle, struct inode *inode,
482 struct ext4_map_blocks *map, int flags)
483{
484 struct extent_status es;
485 int retval;
486 int ret = 0;
487#ifdef ES_AGGRESSIVE_TEST
488 struct ext4_map_blocks orig_map;
489
490 memcpy(&orig_map, map, sizeof(*map));
491#endif
492
493 map->m_flags = 0;
494 ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u,"
495 "logical block %lu\n", inode->i_ino, flags, map->m_len,
496 (unsigned long) map->m_lblk);
497
498 /*
499 * ext4_map_blocks returns an int, and m_len is an unsigned int
500 */
501 if (unlikely(map->m_len > INT_MAX))
502 map->m_len = INT_MAX;
503
504 /* We can handle the block number less than EXT_MAX_BLOCKS */
505 if (unlikely(map->m_lblk >= EXT_MAX_BLOCKS))
506 return -EFSCORRUPTED;
507
508 /* Lookup extent status tree firstly */
509 if (ext4_es_lookup_extent(inode, map->m_lblk, &es)) {
510 if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) {
511 map->m_pblk = ext4_es_pblock(&es) +
512 map->m_lblk - es.es_lblk;
513 map->m_flags |= ext4_es_is_written(&es) ?
514 EXT4_MAP_MAPPED : EXT4_MAP_UNWRITTEN;
515 retval = es.es_len - (map->m_lblk - es.es_lblk);
516 if (retval > map->m_len)
517 retval = map->m_len;
518 map->m_len = retval;
519 } else if (ext4_es_is_delayed(&es) || ext4_es_is_hole(&es)) {
520 map->m_pblk = 0;
521 retval = es.es_len - (map->m_lblk - es.es_lblk);
522 if (retval > map->m_len)
523 retval = map->m_len;
524 map->m_len = retval;
525 retval = 0;
526 } else {
527 BUG_ON(1);
528 }
529#ifdef ES_AGGRESSIVE_TEST
530 ext4_map_blocks_es_recheck(handle, inode, map,
531 &orig_map, flags);
532#endif
533 goto found;
534 }
535
536 /*
537 * Try to see if we can get the block without requesting a new
538 * file system block.
539 */
540 down_read(&EXT4_I(inode)->i_data_sem);
541 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
542 retval = ext4_ext_map_blocks(handle, inode, map, flags &
543 EXT4_GET_BLOCKS_KEEP_SIZE);
544 } else {
545 retval = ext4_ind_map_blocks(handle, inode, map, flags &
546 EXT4_GET_BLOCKS_KEEP_SIZE);
547 }
548 if (retval > 0) {
549 unsigned int status;
550
551 if (unlikely(retval != map->m_len)) {
552 ext4_warning(inode->i_sb,
553 "ES len assertion failed for inode "
554 "%lu: retval %d != map->m_len %d",
555 inode->i_ino, retval, map->m_len);
556 WARN_ON(1);
557 }
558
559 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
560 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
561 if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
562 !(status & EXTENT_STATUS_WRITTEN) &&
563 ext4_find_delalloc_range(inode, map->m_lblk,
564 map->m_lblk + map->m_len - 1))
565 status |= EXTENT_STATUS_DELAYED;
566 ret = ext4_es_insert_extent(inode, map->m_lblk,
567 map->m_len, map->m_pblk, status);
568 if (ret < 0)
569 retval = ret;
570 }
571 up_read((&EXT4_I(inode)->i_data_sem));
572
573found:
574 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
575 ret = check_block_validity(inode, map);
576 if (ret != 0)
577 return ret;
578 }
579
580 /* If it is only a block(s) look up */
581 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
582 return retval;
583
584 /*
585 * Returns if the blocks have already allocated
586 *
587 * Note that if blocks have been preallocated
588 * ext4_ext_get_block() returns the create = 0
589 * with buffer head unmapped.
590 */
591 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
592 /*
593 * If we need to convert extent to unwritten
594 * we continue and do the actual work in
595 * ext4_ext_map_blocks()
596 */
597 if (!(flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN))
598 return retval;
599
600 /*
601 * Here we clear m_flags because after allocating an new extent,
602 * it will be set again.
603 */
604 map->m_flags &= ~EXT4_MAP_FLAGS;
605
606 /*
607 * New blocks allocate and/or writing to unwritten extent
608 * will possibly result in updating i_data, so we take
609 * the write lock of i_data_sem, and call get_block()
610 * with create == 1 flag.
611 */
612 down_write(&EXT4_I(inode)->i_data_sem);
613
614 /*
615 * We need to check for EXT4 here because migrate
616 * could have changed the inode type in between
617 */
618 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
619 retval = ext4_ext_map_blocks(handle, inode, map, flags);
620 } else {
621 retval = ext4_ind_map_blocks(handle, inode, map, flags);
622
623 if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
624 /*
625 * We allocated new blocks which will result in
626 * i_data's format changing. Force the migrate
627 * to fail by clearing migrate flags
628 */
629 ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
630 }
631
632 /*
633 * Update reserved blocks/metadata blocks after successful
634 * block allocation which had been deferred till now. We don't
635 * support fallocate for non extent files. So we can update
636 * reserve space here.
637 */
638 if ((retval > 0) &&
639 (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
640 ext4_da_update_reserve_space(inode, retval, 1);
641 }
642
643 if (retval > 0) {
644 unsigned int status;
645
646 if (unlikely(retval != map->m_len)) {
647 ext4_warning(inode->i_sb,
648 "ES len assertion failed for inode "
649 "%lu: retval %d != map->m_len %d",
650 inode->i_ino, retval, map->m_len);
651 WARN_ON(1);
652 }
653
654 /*
655 * We have to zeroout blocks before inserting them into extent
656 * status tree. Otherwise someone could look them up there and
657 * use them before they are really zeroed. We also have to
658 * unmap metadata before zeroing as otherwise writeback can
659 * overwrite zeros with stale data from block device.
660 */
661 if (flags & EXT4_GET_BLOCKS_ZERO &&
662 map->m_flags & EXT4_MAP_MAPPED &&
663 map->m_flags & EXT4_MAP_NEW) {
664 clean_bdev_aliases(inode->i_sb->s_bdev, map->m_pblk,
665 map->m_len);
666 ret = ext4_issue_zeroout(inode, map->m_lblk,
667 map->m_pblk, map->m_len);
668 if (ret) {
669 retval = ret;
670 goto out_sem;
671 }
672 }
673
674 /*
675 * If the extent has been zeroed out, we don't need to update
676 * extent status tree.
677 */
678 if ((flags & EXT4_GET_BLOCKS_PRE_IO) &&
679 ext4_es_lookup_extent(inode, map->m_lblk, &es)) {
680 if (ext4_es_is_written(&es))
681 goto out_sem;
682 }
683 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
684 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
685 if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
686 !(status & EXTENT_STATUS_WRITTEN) &&
687 ext4_find_delalloc_range(inode, map->m_lblk,
688 map->m_lblk + map->m_len - 1))
689 status |= EXTENT_STATUS_DELAYED;
690 ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
691 map->m_pblk, status);
692 if (ret < 0) {
693 retval = ret;
694 goto out_sem;
695 }
696 }
697
698out_sem:
699 up_write((&EXT4_I(inode)->i_data_sem));
700 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
701 ret = check_block_validity(inode, map);
702 if (ret != 0)
703 return ret;
704
705 /*
706 * Inodes with freshly allocated blocks where contents will be
707 * visible after transaction commit must be on transaction's
708 * ordered data list.
709 */
710 if (map->m_flags & EXT4_MAP_NEW &&
711 !(map->m_flags & EXT4_MAP_UNWRITTEN) &&
712 !(flags & EXT4_GET_BLOCKS_ZERO) &&
713 !IS_NOQUOTA(inode) &&
714 ext4_should_order_data(inode)) {
715 if (flags & EXT4_GET_BLOCKS_IO_SUBMIT)
716 ret = ext4_jbd2_inode_add_wait(handle, inode);
717 else
718 ret = ext4_jbd2_inode_add_write(handle, inode);
719 if (ret)
720 return ret;
721 }
722 }
723 return retval;
724}
725
726/*
727 * Update EXT4_MAP_FLAGS in bh->b_state. For buffer heads attached to pages
728 * we have to be careful as someone else may be manipulating b_state as well.
729 */
730static void ext4_update_bh_state(struct buffer_head *bh, unsigned long flags)
731{
732 unsigned long old_state;
733 unsigned long new_state;
734
735 flags &= EXT4_MAP_FLAGS;
736
737 /* Dummy buffer_head? Set non-atomically. */
738 if (!bh->b_page) {
739 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | flags;
740 return;
741 }
742 /*
743 * Someone else may be modifying b_state. Be careful! This is ugly but
744 * once we get rid of using bh as a container for mapping information
745 * to pass to / from get_block functions, this can go away.
746 */
747 do {
748 old_state = READ_ONCE(bh->b_state);
749 new_state = (old_state & ~EXT4_MAP_FLAGS) | flags;
750 } while (unlikely(
751 cmpxchg(&bh->b_state, old_state, new_state) != old_state));
752}
753
754static int _ext4_get_block(struct inode *inode, sector_t iblock,
755 struct buffer_head *bh, int flags)
756{
757 struct ext4_map_blocks map;
758 int ret = 0;
759
760 if (ext4_has_inline_data(inode))
761 return -ERANGE;
762
763 map.m_lblk = iblock;
764 map.m_len = bh->b_size >> inode->i_blkbits;
765
766 ret = ext4_map_blocks(ext4_journal_current_handle(), inode, &map,
767 flags);
768 if (ret > 0) {
769 map_bh(bh, inode->i_sb, map.m_pblk);
770 ext4_update_bh_state(bh, map.m_flags);
771 bh->b_size = inode->i_sb->s_blocksize * map.m_len;
772 ret = 0;
773 } else if (ret == 0) {
774 /* hole case, need to fill in bh->b_size */
775 bh->b_size = inode->i_sb->s_blocksize * map.m_len;
776 }
777 return ret;
778}
779
780int ext4_get_block(struct inode *inode, sector_t iblock,
781 struct buffer_head *bh, int create)
782{
783 return _ext4_get_block(inode, iblock, bh,
784 create ? EXT4_GET_BLOCKS_CREATE : 0);
785}
786
787/*
788 * Get block function used when preparing for buffered write if we require
789 * creating an unwritten extent if blocks haven't been allocated. The extent
790 * will be converted to written after the IO is complete.
791 */
792int ext4_get_block_unwritten(struct inode *inode, sector_t iblock,
793 struct buffer_head *bh_result, int create)
794{
795 ext4_debug("ext4_get_block_unwritten: inode %lu, create flag %d\n",
796 inode->i_ino, create);
797 return _ext4_get_block(inode, iblock, bh_result,
798 EXT4_GET_BLOCKS_IO_CREATE_EXT);
799}
800
801/* Maximum number of blocks we map for direct IO at once. */
802#define DIO_MAX_BLOCKS 4096
803
804/*
805 * Get blocks function for the cases that need to start a transaction -
806 * generally difference cases of direct IO and DAX IO. It also handles retries
807 * in case of ENOSPC.
808 */
809static int ext4_get_block_trans(struct inode *inode, sector_t iblock,
810 struct buffer_head *bh_result, int flags)
811{
812 int dio_credits;
813 handle_t *handle;
814 int retries = 0;
815 int ret;
816
817 /* Trim mapping request to maximum we can map at once for DIO */
818 if (bh_result->b_size >> inode->i_blkbits > DIO_MAX_BLOCKS)
819 bh_result->b_size = DIO_MAX_BLOCKS << inode->i_blkbits;
820 dio_credits = ext4_chunk_trans_blocks(inode,
821 bh_result->b_size >> inode->i_blkbits);
822retry:
823 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, dio_credits);
824 if (IS_ERR(handle))
825 return PTR_ERR(handle);
826
827 ret = _ext4_get_block(inode, iblock, bh_result, flags);
828 ext4_journal_stop(handle);
829
830 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
831 goto retry;
832 return ret;
833}
834
835/* Get block function for DIO reads and writes to inodes without extents */
836int ext4_dio_get_block(struct inode *inode, sector_t iblock,
837 struct buffer_head *bh, int create)
838{
839 /* We don't expect handle for direct IO */
840 WARN_ON_ONCE(ext4_journal_current_handle());
841
842 if (!create)
843 return _ext4_get_block(inode, iblock, bh, 0);
844 return ext4_get_block_trans(inode, iblock, bh, EXT4_GET_BLOCKS_CREATE);
845}
846
847/*
848 * Get block function for AIO DIO writes when we create unwritten extent if
849 * blocks are not allocated yet. The extent will be converted to written
850 * after IO is complete.
851 */
852static int ext4_dio_get_block_unwritten_async(struct inode *inode,
853 sector_t iblock, struct buffer_head *bh_result, int create)
854{
855 int ret;
856
857 /* We don't expect handle for direct IO */
858 WARN_ON_ONCE(ext4_journal_current_handle());
859
860 ret = ext4_get_block_trans(inode, iblock, bh_result,
861 EXT4_GET_BLOCKS_IO_CREATE_EXT);
862
863 /*
864 * When doing DIO using unwritten extents, we need io_end to convert
865 * unwritten extents to written on IO completion. We allocate io_end
866 * once we spot unwritten extent and store it in b_private. Generic
867 * DIO code keeps b_private set and furthermore passes the value to
868 * our completion callback in 'private' argument.
869 */
870 if (!ret && buffer_unwritten(bh_result)) {
871 if (!bh_result->b_private) {
872 ext4_io_end_t *io_end;
873
874 io_end = ext4_init_io_end(inode, GFP_KERNEL);
875 if (!io_end)
876 return -ENOMEM;
877 bh_result->b_private = io_end;
878 ext4_set_io_unwritten_flag(inode, io_end);
879 }
880 set_buffer_defer_completion(bh_result);
881 }
882
883 return ret;
884}
885
886/*
887 * Get block function for non-AIO DIO writes when we create unwritten extent if
888 * blocks are not allocated yet. The extent will be converted to written
889 * after IO is complete from ext4_ext_direct_IO() function.
890 */
891static int ext4_dio_get_block_unwritten_sync(struct inode *inode,
892 sector_t iblock, struct buffer_head *bh_result, int create)
893{
894 int ret;
895
896 /* We don't expect handle for direct IO */
897 WARN_ON_ONCE(ext4_journal_current_handle());
898
899 ret = ext4_get_block_trans(inode, iblock, bh_result,
900 EXT4_GET_BLOCKS_IO_CREATE_EXT);
901
902 /*
903 * Mark inode as having pending DIO writes to unwritten extents.
904 * ext4_ext_direct_IO() checks this flag and converts extents to
905 * written.
906 */
907 if (!ret && buffer_unwritten(bh_result))
908 ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
909
910 return ret;
911}
912
913static int ext4_dio_get_block_overwrite(struct inode *inode, sector_t iblock,
914 struct buffer_head *bh_result, int create)
915{
916 int ret;
917
918 ext4_debug("ext4_dio_get_block_overwrite: inode %lu, create flag %d\n",
919 inode->i_ino, create);
920 /* We don't expect handle for direct IO */
921 WARN_ON_ONCE(ext4_journal_current_handle());
922
923 ret = _ext4_get_block(inode, iblock, bh_result, 0);
924 /*
925 * Blocks should have been preallocated! ext4_file_write_iter() checks
926 * that.
927 */
928 WARN_ON_ONCE(!buffer_mapped(bh_result) || buffer_unwritten(bh_result));
929
930 return ret;
931}
932
933
934/*
935 * `handle' can be NULL if create is zero
936 */
937struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
938 ext4_lblk_t block, int map_flags)
939{
940 struct ext4_map_blocks map;
941 struct buffer_head *bh;
942 int create = map_flags & EXT4_GET_BLOCKS_CREATE;
943 int err;
944
945 J_ASSERT(handle != NULL || create == 0);
946
947 map.m_lblk = block;
948 map.m_len = 1;
949 err = ext4_map_blocks(handle, inode, &map, map_flags);
950
951 if (err == 0)
952 return create ? ERR_PTR(-ENOSPC) : NULL;
953 if (err < 0)
954 return ERR_PTR(err);
955
956 bh = sb_getblk(inode->i_sb, map.m_pblk);
957 if (unlikely(!bh))
958 return ERR_PTR(-ENOMEM);
959 if (map.m_flags & EXT4_MAP_NEW) {
960 J_ASSERT(create != 0);
961 J_ASSERT(handle != NULL);
962
963 /*
964 * Now that we do not always journal data, we should
965 * keep in mind whether this should always journal the
966 * new buffer as metadata. For now, regular file
967 * writes use ext4_get_block instead, so it's not a
968 * problem.
969 */
970 lock_buffer(bh);
971 BUFFER_TRACE(bh, "call get_create_access");
972 err = ext4_journal_get_create_access(handle, bh);
973 if (unlikely(err)) {
974 unlock_buffer(bh);
975 goto errout;
976 }
977 if (!buffer_uptodate(bh)) {
978 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
979 set_buffer_uptodate(bh);
980 }
981 unlock_buffer(bh);
982 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
983 err = ext4_handle_dirty_metadata(handle, inode, bh);
984 if (unlikely(err))
985 goto errout;
986 } else
987 BUFFER_TRACE(bh, "not a new buffer");
988 return bh;
989errout:
990 brelse(bh);
991 return ERR_PTR(err);
992}
993
994struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
995 ext4_lblk_t block, int map_flags)
996{
997 struct buffer_head *bh;
998
999 bh = ext4_getblk(handle, inode, block, map_flags);
1000 if (IS_ERR(bh))
1001 return bh;
1002 if (!bh || buffer_uptodate(bh))
1003 return bh;
1004 ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &bh);
1005 wait_on_buffer(bh);
1006 if (buffer_uptodate(bh))
1007 return bh;
1008 put_bh(bh);
1009 return ERR_PTR(-EIO);
1010}
1011
1012int ext4_walk_page_buffers(handle_t *handle,
1013 struct buffer_head *head,
1014 unsigned from,
1015 unsigned to,
1016 int *partial,
1017 int (*fn)(handle_t *handle,
1018 struct buffer_head *bh))
1019{
1020 struct buffer_head *bh;
1021 unsigned block_start, block_end;
1022 unsigned blocksize = head->b_size;
1023 int err, ret = 0;
1024 struct buffer_head *next;
1025
1026 for (bh = head, block_start = 0;
1027 ret == 0 && (bh != head || !block_start);
1028 block_start = block_end, bh = next) {
1029 next = bh->b_this_page;
1030 block_end = block_start + blocksize;
1031 if (block_end <= from || block_start >= to) {
1032 if (partial && !buffer_uptodate(bh))
1033 *partial = 1;
1034 continue;
1035 }
1036 err = (*fn)(handle, bh);
1037 if (!ret)
1038 ret = err;
1039 }
1040 return ret;
1041}
1042
1043/*
1044 * To preserve ordering, it is essential that the hole instantiation and
1045 * the data write be encapsulated in a single transaction. We cannot
1046 * close off a transaction and start a new one between the ext4_get_block()
1047 * and the commit_write(). So doing the jbd2_journal_start at the start of
1048 * prepare_write() is the right place.
1049 *
1050 * Also, this function can nest inside ext4_writepage(). In that case, we
1051 * *know* that ext4_writepage() has generated enough buffer credits to do the
1052 * whole page. So we won't block on the journal in that case, which is good,
1053 * because the caller may be PF_MEMALLOC.
1054 *
1055 * By accident, ext4 can be reentered when a transaction is open via
1056 * quota file writes. If we were to commit the transaction while thus
1057 * reentered, there can be a deadlock - we would be holding a quota
1058 * lock, and the commit would never complete if another thread had a
1059 * transaction open and was blocking on the quota lock - a ranking
1060 * violation.
1061 *
1062 * So what we do is to rely on the fact that jbd2_journal_stop/journal_start
1063 * will _not_ run commit under these circumstances because handle->h_ref
1064 * is elevated. We'll still have enough credits for the tiny quotafile
1065 * write.
1066 */
1067int do_journal_get_write_access(handle_t *handle,
1068 struct buffer_head *bh)
1069{
1070 int dirty = buffer_dirty(bh);
1071 int ret;
1072
1073 if (!buffer_mapped(bh) || buffer_freed(bh))
1074 return 0;
1075 /*
1076 * __block_write_begin() could have dirtied some buffers. Clean
1077 * the dirty bit as jbd2_journal_get_write_access() could complain
1078 * otherwise about fs integrity issues. Setting of the dirty bit
1079 * by __block_write_begin() isn't a real problem here as we clear
1080 * the bit before releasing a page lock and thus writeback cannot
1081 * ever write the buffer.
1082 */
1083 if (dirty)
1084 clear_buffer_dirty(bh);
1085 BUFFER_TRACE(bh, "get write access");
1086 ret = ext4_journal_get_write_access(handle, bh);
1087 if (!ret && dirty)
1088 ret = ext4_handle_dirty_metadata(handle, NULL, bh);
1089 return ret;
1090}
1091
1092#ifdef CONFIG_EXT4_FS_ENCRYPTION
1093static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
1094 get_block_t *get_block)
1095{
1096 unsigned from = pos & (PAGE_SIZE - 1);
1097 unsigned to = from + len;
1098 struct inode *inode = page->mapping->host;
1099 unsigned block_start, block_end;
1100 sector_t block;
1101 int err = 0;
1102 unsigned blocksize = inode->i_sb->s_blocksize;
1103 unsigned bbits;
1104 struct buffer_head *bh, *head, *wait[2], **wait_bh = wait;
1105 bool decrypt = false;
1106
1107 BUG_ON(!PageLocked(page));
1108 BUG_ON(from > PAGE_SIZE);
1109 BUG_ON(to > PAGE_SIZE);
1110 BUG_ON(from > to);
1111
1112 if (!page_has_buffers(page))
1113 create_empty_buffers(page, blocksize, 0);
1114 head = page_buffers(page);
1115 bbits = ilog2(blocksize);
1116 block = (sector_t)page->index << (PAGE_SHIFT - bbits);
1117
1118 for (bh = head, block_start = 0; bh != head || !block_start;
1119 block++, block_start = block_end, bh = bh->b_this_page) {
1120 block_end = block_start + blocksize;
1121 if (block_end <= from || block_start >= to) {
1122 if (PageUptodate(page)) {
1123 if (!buffer_uptodate(bh))
1124 set_buffer_uptodate(bh);
1125 }
1126 continue;
1127 }
1128 if (buffer_new(bh))
1129 clear_buffer_new(bh);
1130 if (!buffer_mapped(bh)) {
1131 WARN_ON(bh->b_size != blocksize);
1132 err = get_block(inode, block, bh, 1);
1133 if (err)
1134 break;
1135 if (buffer_new(bh)) {
1136 clean_bdev_bh_alias(bh);
1137 if (PageUptodate(page)) {
1138 clear_buffer_new(bh);
1139 set_buffer_uptodate(bh);
1140 mark_buffer_dirty(bh);
1141 continue;
1142 }
1143 if (block_end > to || block_start < from)
1144 zero_user_segments(page, to, block_end,
1145 block_start, from);
1146 continue;
1147 }
1148 }
1149 if (PageUptodate(page)) {
1150 if (!buffer_uptodate(bh))
1151 set_buffer_uptodate(bh);
1152 continue;
1153 }
1154 if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1155 !buffer_unwritten(bh) &&
1156 (block_start < from || block_end > to)) {
1157 ll_rw_block(REQ_OP_READ, 0, 1, &bh);
1158 *wait_bh++ = bh;
1159 decrypt = ext4_encrypted_inode(inode) &&
1160 S_ISREG(inode->i_mode);
1161 }
1162 }
1163 /*
1164 * If we issued read requests, let them complete.
1165 */
1166 while (wait_bh > wait) {
1167 wait_on_buffer(*--wait_bh);
1168 if (!buffer_uptodate(*wait_bh))
1169 err = -EIO;
1170 }
1171 if (unlikely(err))
1172 page_zero_new_buffers(page, from, to);
1173 else if (decrypt)
1174 err = fscrypt_decrypt_page(page->mapping->host, page,
1175 PAGE_SIZE, 0, page->index);
1176 return err;
1177}
1178#endif
1179
1180static int ext4_write_begin(struct file *file, struct address_space *mapping,
1181 loff_t pos, unsigned len, unsigned flags,
1182 struct page **pagep, void **fsdata)
1183{
1184 struct inode *inode = mapping->host;
1185 int ret, needed_blocks;
1186 handle_t *handle;
1187 int retries = 0;
1188 struct page *page;
1189 pgoff_t index;
1190 unsigned from, to;
1191
1192 trace_ext4_write_begin(inode, pos, len, flags);
1193 /*
1194 * Reserve one block more for addition to orphan list in case
1195 * we allocate blocks but write fails for some reason
1196 */
1197 needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
1198 index = pos >> PAGE_SHIFT;
1199 from = pos & (PAGE_SIZE - 1);
1200 to = from + len;
1201
1202 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
1203 ret = ext4_try_to_write_inline_data(mapping, inode, pos, len,
1204 flags, pagep);
1205 if (ret < 0)
1206 return ret;
1207 if (ret == 1)
1208 return 0;
1209 }
1210
1211 /*
1212 * grab_cache_page_write_begin() can take a long time if the
1213 * system is thrashing due to memory pressure, or if the page
1214 * is being written back. So grab it first before we start
1215 * the transaction handle. This also allows us to allocate
1216 * the page (if needed) without using GFP_NOFS.
1217 */
1218retry_grab:
1219 page = grab_cache_page_write_begin(mapping, index, flags);
1220 if (!page)
1221 return -ENOMEM;
1222 unlock_page(page);
1223
1224retry_journal:
1225 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks);
1226 if (IS_ERR(handle)) {
1227 put_page(page);
1228 return PTR_ERR(handle);
1229 }
1230
1231 lock_page(page);
1232 if (page->mapping != mapping) {
1233 /* The page got truncated from under us */
1234 unlock_page(page);
1235 put_page(page);
1236 ext4_journal_stop(handle);
1237 goto retry_grab;
1238 }
1239 /* In case writeback began while the page was unlocked */
1240 wait_for_stable_page(page);
1241
1242#ifdef CONFIG_EXT4_FS_ENCRYPTION
1243 if (ext4_should_dioread_nolock(inode))
1244 ret = ext4_block_write_begin(page, pos, len,
1245 ext4_get_block_unwritten);
1246 else
1247 ret = ext4_block_write_begin(page, pos, len,
1248 ext4_get_block);
1249#else
1250 if (ext4_should_dioread_nolock(inode))
1251 ret = __block_write_begin(page, pos, len,
1252 ext4_get_block_unwritten);
1253 else
1254 ret = __block_write_begin(page, pos, len, ext4_get_block);
1255#endif
1256 if (!ret && ext4_should_journal_data(inode)) {
1257 ret = ext4_walk_page_buffers(handle, page_buffers(page),
1258 from, to, NULL,
1259 do_journal_get_write_access);
1260 }
1261
1262 if (ret) {
1263 unlock_page(page);
1264 /*
1265 * __block_write_begin may have instantiated a few blocks
1266 * outside i_size. Trim these off again. Don't need
1267 * i_size_read because we hold i_mutex.
1268 *
1269 * Add inode to orphan list in case we crash before
1270 * truncate finishes
1271 */
1272 if (pos + len > inode->i_size && ext4_can_truncate(inode))
1273 ext4_orphan_add(handle, inode);
1274
1275 ext4_journal_stop(handle);
1276 if (pos + len > inode->i_size) {
1277 ext4_truncate_failed_write(inode);
1278 /*
1279 * If truncate failed early the inode might
1280 * still be on the orphan list; we need to
1281 * make sure the inode is removed from the
1282 * orphan list in that case.
1283 */
1284 if (inode->i_nlink)
1285 ext4_orphan_del(NULL, inode);
1286 }
1287
1288 if (ret == -ENOSPC &&
1289 ext4_should_retry_alloc(inode->i_sb, &retries))
1290 goto retry_journal;
1291 put_page(page);
1292 return ret;
1293 }
1294 *pagep = page;
1295 return ret;
1296}
1297
1298/* For write_end() in data=journal mode */
1299static int write_end_fn(handle_t *handle, struct buffer_head *bh)
1300{
1301 int ret;
1302 if (!buffer_mapped(bh) || buffer_freed(bh))
1303 return 0;
1304 set_buffer_uptodate(bh);
1305 ret = ext4_handle_dirty_metadata(handle, NULL, bh);
1306 clear_buffer_meta(bh);
1307 clear_buffer_prio(bh);
1308 return ret;
1309}
1310
1311/*
1312 * We need to pick up the new inode size which generic_commit_write gave us
1313 * `file' can be NULL - eg, when called from page_symlink().
1314 *
1315 * ext4 never places buffers on inode->i_mapping->private_list. metadata
1316 * buffers are managed internally.
1317 */
1318static int ext4_write_end(struct file *file,
1319 struct address_space *mapping,
1320 loff_t pos, unsigned len, unsigned copied,
1321 struct page *page, void *fsdata)
1322{
1323 handle_t *handle = ext4_journal_current_handle();
1324 struct inode *inode = mapping->host;
1325 loff_t old_size = inode->i_size;
1326 int ret = 0, ret2;
1327 int i_size_changed = 0;
1328
1329 trace_ext4_write_end(inode, pos, len, copied);
1330 if (ext4_has_inline_data(inode)) {
1331 ret = ext4_write_inline_data_end(inode, pos, len,
1332 copied, page);
1333 if (ret < 0) {
1334 unlock_page(page);
1335 put_page(page);
1336 goto errout;
1337 }
1338 copied = ret;
1339 } else
1340 copied = block_write_end(file, mapping, pos,
1341 len, copied, page, fsdata);
1342 /*
1343 * it's important to update i_size while still holding page lock:
1344 * page writeout could otherwise come in and zero beyond i_size.
1345 */
1346 i_size_changed = ext4_update_inode_size(inode, pos + copied);
1347 unlock_page(page);
1348 put_page(page);
1349
1350 if (old_size < pos)
1351 pagecache_isize_extended(inode, old_size, pos);
1352 /*
1353 * Don't mark the inode dirty under page lock. First, it unnecessarily
1354 * makes the holding time of page lock longer. Second, it forces lock
1355 * ordering of page lock and transaction start for journaling
1356 * filesystems.
1357 */
1358 if (i_size_changed)
1359 ext4_mark_inode_dirty(handle, inode);
1360
1361 if (pos + len > inode->i_size && ext4_can_truncate(inode))
1362 /* if we have allocated more blocks and copied
1363 * less. We will have blocks allocated outside
1364 * inode->i_size. So truncate them
1365 */
1366 ext4_orphan_add(handle, inode);
1367errout:
1368 ret2 = ext4_journal_stop(handle);
1369 if (!ret)
1370 ret = ret2;
1371
1372 if (pos + len > inode->i_size) {
1373 ext4_truncate_failed_write(inode);
1374 /*
1375 * If truncate failed early the inode might still be
1376 * on the orphan list; we need to make sure the inode
1377 * is removed from the orphan list in that case.
1378 */
1379 if (inode->i_nlink)
1380 ext4_orphan_del(NULL, inode);
1381 }
1382
1383 return ret ? ret : copied;
1384}
1385
1386/*
1387 * This is a private version of page_zero_new_buffers() which doesn't
1388 * set the buffer to be dirty, since in data=journalled mode we need
1389 * to call ext4_handle_dirty_metadata() instead.
1390 */
1391static void ext4_journalled_zero_new_buffers(handle_t *handle,
1392 struct page *page,
1393 unsigned from, unsigned to)
1394{
1395 unsigned int block_start = 0, block_end;
1396 struct buffer_head *head, *bh;
1397
1398 bh = head = page_buffers(page);
1399 do {
1400 block_end = block_start + bh->b_size;
1401 if (buffer_new(bh)) {
1402 if (block_end > from && block_start < to) {
1403 if (!PageUptodate(page)) {
1404 unsigned start, size;
1405
1406 start = max(from, block_start);
1407 size = min(to, block_end) - start;
1408
1409 zero_user(page, start, size);
1410 write_end_fn(handle, bh);
1411 }
1412 clear_buffer_new(bh);
1413 }
1414 }
1415 block_start = block_end;
1416 bh = bh->b_this_page;
1417 } while (bh != head);
1418}
1419
1420static int ext4_journalled_write_end(struct file *file,
1421 struct address_space *mapping,
1422 loff_t pos, unsigned len, unsigned copied,
1423 struct page *page, void *fsdata)
1424{
1425 handle_t *handle = ext4_journal_current_handle();
1426 struct inode *inode = mapping->host;
1427 loff_t old_size = inode->i_size;
1428 int ret = 0, ret2;
1429 int partial = 0;
1430 unsigned from, to;
1431 int size_changed = 0;
1432
1433 trace_ext4_journalled_write_end(inode, pos, len, copied);
1434 from = pos & (PAGE_SIZE - 1);
1435 to = from + len;
1436
1437 BUG_ON(!ext4_handle_valid(handle));
1438
1439 if (ext4_has_inline_data(inode)) {
1440 ret = ext4_write_inline_data_end(inode, pos, len,
1441 copied, page);
1442 if (ret < 0) {
1443 unlock_page(page);
1444 put_page(page);
1445 goto errout;
1446 }
1447 copied = ret;
1448 } else if (unlikely(copied < len) && !PageUptodate(page)) {
1449 copied = 0;
1450 ext4_journalled_zero_new_buffers(handle, page, from, to);
1451 } else {
1452 if (unlikely(copied < len))
1453 ext4_journalled_zero_new_buffers(handle, page,
1454 from + copied, to);
1455 ret = ext4_walk_page_buffers(handle, page_buffers(page), from,
1456 from + copied, &partial,
1457 write_end_fn);
1458 if (!partial)
1459 SetPageUptodate(page);
1460 }
1461 size_changed = ext4_update_inode_size(inode, pos + copied);
1462 ext4_set_inode_state(inode, EXT4_STATE_JDATA);
1463 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
1464 unlock_page(page);
1465 put_page(page);
1466
1467 if (old_size < pos)
1468 pagecache_isize_extended(inode, old_size, pos);
1469
1470 if (size_changed) {
1471 ret2 = ext4_mark_inode_dirty(handle, inode);
1472 if (!ret)
1473 ret = ret2;
1474 }
1475
1476 if (pos + len > inode->i_size && ext4_can_truncate(inode))
1477 /* if we have allocated more blocks and copied
1478 * less. We will have blocks allocated outside
1479 * inode->i_size. So truncate them
1480 */
1481 ext4_orphan_add(handle, inode);
1482
1483errout:
1484 ret2 = ext4_journal_stop(handle);
1485 if (!ret)
1486 ret = ret2;
1487 if (pos + len > inode->i_size) {
1488 ext4_truncate_failed_write(inode);
1489 /*
1490 * If truncate failed early the inode might still be
1491 * on the orphan list; we need to make sure the inode
1492 * is removed from the orphan list in that case.
1493 */
1494 if (inode->i_nlink)
1495 ext4_orphan_del(NULL, inode);
1496 }
1497
1498 return ret ? ret : copied;
1499}
1500
1501/*
1502 * Reserve space for a single cluster
1503 */
1504static int ext4_da_reserve_space(struct inode *inode)
1505{
1506 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1507 struct ext4_inode_info *ei = EXT4_I(inode);
1508 int ret;
1509
1510 /*
1511 * We will charge metadata quota at writeout time; this saves
1512 * us from metadata over-estimation, though we may go over by
1513 * a small amount in the end. Here we just reserve for data.
1514 */
1515 ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
1516 if (ret)
1517 return ret;
1518
1519 spin_lock(&ei->i_block_reservation_lock);
1520 if (ext4_claim_free_clusters(sbi, 1, 0)) {
1521 spin_unlock(&ei->i_block_reservation_lock);
1522 dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
1523 return -ENOSPC;
1524 }
1525 ei->i_reserved_data_blocks++;
1526 trace_ext4_da_reserve_space(inode);
1527 spin_unlock(&ei->i_block_reservation_lock);
1528
1529 return 0; /* success */
1530}
1531
1532static void ext4_da_release_space(struct inode *inode, int to_free)
1533{
1534 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1535 struct ext4_inode_info *ei = EXT4_I(inode);
1536
1537 if (!to_free)
1538 return; /* Nothing to release, exit */
1539
1540 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1541
1542 trace_ext4_da_release_space(inode, to_free);
1543 if (unlikely(to_free > ei->i_reserved_data_blocks)) {
1544 /*
1545 * if there aren't enough reserved blocks, then the
1546 * counter is messed up somewhere. Since this
1547 * function is called from invalidate page, it's
1548 * harmless to return without any action.
1549 */
1550 ext4_warning(inode->i_sb, "ext4_da_release_space: "
1551 "ino %lu, to_free %d with only %d reserved "
1552 "data blocks", inode->i_ino, to_free,
1553 ei->i_reserved_data_blocks);
1554 WARN_ON(1);
1555 to_free = ei->i_reserved_data_blocks;
1556 }
1557 ei->i_reserved_data_blocks -= to_free;
1558
1559 /* update fs dirty data blocks counter */
1560 percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free);
1561
1562 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1563
1564 dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free));
1565}
1566
1567static void ext4_da_page_release_reservation(struct page *page,
1568 unsigned int offset,
1569 unsigned int length)
1570{
1571 int to_release = 0, contiguous_blks = 0;
1572 struct buffer_head *head, *bh;
1573 unsigned int curr_off = 0;
1574 struct inode *inode = page->mapping->host;
1575 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1576 unsigned int stop = offset + length;
1577 int num_clusters;
1578 ext4_fsblk_t lblk;
1579
1580 BUG_ON(stop > PAGE_SIZE || stop < length);
1581
1582 head = page_buffers(page);
1583 bh = head;
1584 do {
1585 unsigned int next_off = curr_off + bh->b_size;
1586
1587 if (next_off > stop)
1588 break;
1589
1590 if ((offset <= curr_off) && (buffer_delay(bh))) {
1591 to_release++;
1592 contiguous_blks++;
1593 clear_buffer_delay(bh);
1594 } else if (contiguous_blks) {
1595 lblk = page->index <<
1596 (PAGE_SHIFT - inode->i_blkbits);
1597 lblk += (curr_off >> inode->i_blkbits) -
1598 contiguous_blks;
1599 ext4_es_remove_extent(inode, lblk, contiguous_blks);
1600 contiguous_blks = 0;
1601 }
1602 curr_off = next_off;
1603 } while ((bh = bh->b_this_page) != head);
1604
1605 if (contiguous_blks) {
1606 lblk = page->index << (PAGE_SHIFT - inode->i_blkbits);
1607 lblk += (curr_off >> inode->i_blkbits) - contiguous_blks;
1608 ext4_es_remove_extent(inode, lblk, contiguous_blks);
1609 }
1610
1611 /* If we have released all the blocks belonging to a cluster, then we
1612 * need to release the reserved space for that cluster. */
1613 num_clusters = EXT4_NUM_B2C(sbi, to_release);
1614 while (num_clusters > 0) {
1615 lblk = (page->index << (PAGE_SHIFT - inode->i_blkbits)) +
1616 ((num_clusters - 1) << sbi->s_cluster_bits);
1617 if (sbi->s_cluster_ratio == 1 ||
1618 !ext4_find_delalloc_cluster(inode, lblk))
1619 ext4_da_release_space(inode, 1);
1620
1621 num_clusters--;
1622 }
1623}
1624
1625/*
1626 * Delayed allocation stuff
1627 */
1628
1629struct mpage_da_data {
1630 struct inode *inode;
1631 struct writeback_control *wbc;
1632
1633 pgoff_t first_page; /* The first page to write */
1634 pgoff_t next_page; /* Current page to examine */
1635 pgoff_t last_page; /* Last page to examine */
1636 /*
1637 * Extent to map - this can be after first_page because that can be
1638 * fully mapped. We somewhat abuse m_flags to store whether the extent
1639 * is delalloc or unwritten.
1640 */
1641 struct ext4_map_blocks map;
1642 struct ext4_io_submit io_submit; /* IO submission data */
1643};
1644
1645static void mpage_release_unused_pages(struct mpage_da_data *mpd,
1646 bool invalidate)
1647{
1648 int nr_pages, i;
1649 pgoff_t index, end;
1650 struct pagevec pvec;
1651 struct inode *inode = mpd->inode;
1652 struct address_space *mapping = inode->i_mapping;
1653
1654 /* This is necessary when next_page == 0. */
1655 if (mpd->first_page >= mpd->next_page)
1656 return;
1657
1658 index = mpd->first_page;
1659 end = mpd->next_page - 1;
1660 if (invalidate) {
1661 ext4_lblk_t start, last;
1662 start = index << (PAGE_SHIFT - inode->i_blkbits);
1663 last = end << (PAGE_SHIFT - inode->i_blkbits);
1664 ext4_es_remove_extent(inode, start, last - start + 1);
1665 }
1666
1667 pagevec_init(&pvec, 0);
1668 while (index <= end) {
1669 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
1670 if (nr_pages == 0)
1671 break;
1672 for (i = 0; i < nr_pages; i++) {
1673 struct page *page = pvec.pages[i];
1674 if (page->index > end)
1675 break;
1676 BUG_ON(!PageLocked(page));
1677 BUG_ON(PageWriteback(page));
1678 if (invalidate) {
1679 if (page_mapped(page))
1680 clear_page_dirty_for_io(page);
1681 block_invalidatepage(page, 0, PAGE_SIZE);
1682 ClearPageUptodate(page);
1683 }
1684 unlock_page(page);
1685 }
1686 index = pvec.pages[nr_pages - 1]->index + 1;
1687 pagevec_release(&pvec);
1688 }
1689}
1690
1691static void ext4_print_free_blocks(struct inode *inode)
1692{
1693 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1694 struct super_block *sb = inode->i_sb;
1695 struct ext4_inode_info *ei = EXT4_I(inode);
1696
1697 ext4_msg(sb, KERN_CRIT, "Total free blocks count %lld",
1698 EXT4_C2B(EXT4_SB(inode->i_sb),
1699 ext4_count_free_clusters(sb)));
1700 ext4_msg(sb, KERN_CRIT, "Free/Dirty block details");
1701 ext4_msg(sb, KERN_CRIT, "free_blocks=%lld",
1702 (long long) EXT4_C2B(EXT4_SB(sb),
1703 percpu_counter_sum(&sbi->s_freeclusters_counter)));
1704 ext4_msg(sb, KERN_CRIT, "dirty_blocks=%lld",
1705 (long long) EXT4_C2B(EXT4_SB(sb),
1706 percpu_counter_sum(&sbi->s_dirtyclusters_counter)));
1707 ext4_msg(sb, KERN_CRIT, "Block reservation details");
1708 ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u",
1709 ei->i_reserved_data_blocks);
1710 return;
1711}
1712
1713static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh)
1714{
1715 return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh);
1716}
1717
1718/*
1719 * This function is grabs code from the very beginning of
1720 * ext4_map_blocks, but assumes that the caller is from delayed write
1721 * time. This function looks up the requested blocks and sets the
1722 * buffer delay bit under the protection of i_data_sem.
1723 */
1724static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
1725 struct ext4_map_blocks *map,
1726 struct buffer_head *bh)
1727{
1728 struct extent_status es;
1729 int retval;
1730 sector_t invalid_block = ~((sector_t) 0xffff);
1731#ifdef ES_AGGRESSIVE_TEST
1732 struct ext4_map_blocks orig_map;
1733
1734 memcpy(&orig_map, map, sizeof(*map));
1735#endif
1736
1737 if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
1738 invalid_block = ~0;
1739
1740 map->m_flags = 0;
1741 ext_debug("ext4_da_map_blocks(): inode %lu, max_blocks %u,"
1742 "logical block %lu\n", inode->i_ino, map->m_len,
1743 (unsigned long) map->m_lblk);
1744
1745 /* Lookup extent status tree firstly */
1746 if (ext4_es_lookup_extent(inode, iblock, &es)) {
1747 if (ext4_es_is_hole(&es)) {
1748 retval = 0;
1749 down_read(&EXT4_I(inode)->i_data_sem);
1750 goto add_delayed;
1751 }
1752
1753 /*
1754 * Delayed extent could be allocated by fallocate.
1755 * So we need to check it.
1756 */
1757 if (ext4_es_is_delayed(&es) && !ext4_es_is_unwritten(&es)) {
1758 map_bh(bh, inode->i_sb, invalid_block);
1759 set_buffer_new(bh);
1760 set_buffer_delay(bh);
1761 return 0;
1762 }
1763
1764 map->m_pblk = ext4_es_pblock(&es) + iblock - es.es_lblk;
1765 retval = es.es_len - (iblock - es.es_lblk);
1766 if (retval > map->m_len)
1767 retval = map->m_len;
1768 map->m_len = retval;
1769 if (ext4_es_is_written(&es))
1770 map->m_flags |= EXT4_MAP_MAPPED;
1771 else if (ext4_es_is_unwritten(&es))
1772 map->m_flags |= EXT4_MAP_UNWRITTEN;
1773 else
1774 BUG_ON(1);
1775
1776#ifdef ES_AGGRESSIVE_TEST
1777 ext4_map_blocks_es_recheck(NULL, inode, map, &orig_map, 0);
1778#endif
1779 return retval;
1780 }
1781
1782 /*
1783 * Try to see if we can get the block without requesting a new
1784 * file system block.
1785 */
1786 down_read(&EXT4_I(inode)->i_data_sem);
1787 if (ext4_has_inline_data(inode))
1788 retval = 0;
1789 else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
1790 retval = ext4_ext_map_blocks(NULL, inode, map, 0);
1791 else
1792 retval = ext4_ind_map_blocks(NULL, inode, map, 0);
1793
1794add_delayed:
1795 if (retval == 0) {
1796 int ret;
1797 /*
1798 * XXX: __block_prepare_write() unmaps passed block,
1799 * is it OK?
1800 */
1801 /*
1802 * If the block was allocated from previously allocated cluster,
1803 * then we don't need to reserve it again. However we still need
1804 * to reserve metadata for every block we're going to write.
1805 */
1806 if (EXT4_SB(inode->i_sb)->s_cluster_ratio == 1 ||
1807 !ext4_find_delalloc_cluster(inode, map->m_lblk)) {
1808 ret = ext4_da_reserve_space(inode);
1809 if (ret) {
1810 /* not enough space to reserve */
1811 retval = ret;
1812 goto out_unlock;
1813 }
1814 }
1815
1816 ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
1817 ~0, EXTENT_STATUS_DELAYED);
1818 if (ret) {
1819 retval = ret;
1820 goto out_unlock;
1821 }
1822
1823 map_bh(bh, inode->i_sb, invalid_block);
1824 set_buffer_new(bh);
1825 set_buffer_delay(bh);
1826 } else if (retval > 0) {
1827 int ret;
1828 unsigned int status;
1829
1830 if (unlikely(retval != map->m_len)) {
1831 ext4_warning(inode->i_sb,
1832 "ES len assertion failed for inode "
1833 "%lu: retval %d != map->m_len %d",
1834 inode->i_ino, retval, map->m_len);
1835 WARN_ON(1);
1836 }
1837
1838 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
1839 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
1840 ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
1841 map->m_pblk, status);
1842 if (ret != 0)
1843 retval = ret;
1844 }
1845
1846out_unlock:
1847 up_read((&EXT4_I(inode)->i_data_sem));
1848
1849 return retval;
1850}
1851
1852/*
1853 * This is a special get_block_t callback which is used by
1854 * ext4_da_write_begin(). It will either return mapped block or
1855 * reserve space for a single block.
1856 *
1857 * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set.
1858 * We also have b_blocknr = -1 and b_bdev initialized properly
1859 *
1860 * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set.
1861 * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev
1862 * initialized properly.
1863 */
1864int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
1865 struct buffer_head *bh, int create)
1866{
1867 struct ext4_map_blocks map;
1868 int ret = 0;
1869
1870 BUG_ON(create == 0);
1871 BUG_ON(bh->b_size != inode->i_sb->s_blocksize);
1872
1873 map.m_lblk = iblock;
1874 map.m_len = 1;
1875
1876 /*
1877 * first, we need to know whether the block is allocated already
1878 * preallocated blocks are unmapped but should treated
1879 * the same as allocated blocks.
1880 */
1881 ret = ext4_da_map_blocks(inode, iblock, &map, bh);
1882 if (ret <= 0)
1883 return ret;
1884
1885 map_bh(bh, inode->i_sb, map.m_pblk);
1886 ext4_update_bh_state(bh, map.m_flags);
1887
1888 if (buffer_unwritten(bh)) {
1889 /* A delayed write to unwritten bh should be marked
1890 * new and mapped. Mapped ensures that we don't do
1891 * get_block multiple times when we write to the same
1892 * offset and new ensures that we do proper zero out
1893 * for partial write.
1894 */
1895 set_buffer_new(bh);
1896 set_buffer_mapped(bh);
1897 }
1898 return 0;
1899}
1900
1901static int bget_one(handle_t *handle, struct buffer_head *bh)
1902{
1903 get_bh(bh);
1904 return 0;
1905}
1906
1907static int bput_one(handle_t *handle, struct buffer_head *bh)
1908{
1909 put_bh(bh);
1910 return 0;
1911}
1912
1913static int __ext4_journalled_writepage(struct page *page,
1914 unsigned int len)
1915{
1916 struct address_space *mapping = page->mapping;
1917 struct inode *inode = mapping->host;
1918 struct buffer_head *page_bufs = NULL;
1919 handle_t *handle = NULL;
1920 int ret = 0, err = 0;
1921 int inline_data = ext4_has_inline_data(inode);
1922 struct buffer_head *inode_bh = NULL;
1923
1924 ClearPageChecked(page);
1925
1926 if (inline_data) {
1927 BUG_ON(page->index != 0);
1928 BUG_ON(len > ext4_get_max_inline_size(inode));
1929 inode_bh = ext4_journalled_write_inline_data(inode, len, page);
1930 if (inode_bh == NULL)
1931 goto out;
1932 } else {
1933 page_bufs = page_buffers(page);
1934 if (!page_bufs) {
1935 BUG();
1936 goto out;
1937 }
1938 ext4_walk_page_buffers(handle, page_bufs, 0, len,
1939 NULL, bget_one);
1940 }
1941 /*
1942 * We need to release the page lock before we start the
1943 * journal, so grab a reference so the page won't disappear
1944 * out from under us.
1945 */
1946 get_page(page);
1947 unlock_page(page);
1948
1949 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
1950 ext4_writepage_trans_blocks(inode));
1951 if (IS_ERR(handle)) {
1952 ret = PTR_ERR(handle);
1953 put_page(page);
1954 goto out_no_pagelock;
1955 }
1956 BUG_ON(!ext4_handle_valid(handle));
1957
1958 lock_page(page);
1959 put_page(page);
1960 if (page->mapping != mapping) {
1961 /* The page got truncated from under us */
1962 ext4_journal_stop(handle);
1963 ret = 0;
1964 goto out;
1965 }
1966
1967 if (inline_data) {
1968 BUFFER_TRACE(inode_bh, "get write access");
1969 ret = ext4_journal_get_write_access(handle, inode_bh);
1970
1971 err = ext4_handle_dirty_metadata(handle, inode, inode_bh);
1972
1973 } else {
1974 ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL,
1975 do_journal_get_write_access);
1976
1977 err = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL,
1978 write_end_fn);
1979 }
1980 if (ret == 0)
1981 ret = err;
1982 EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
1983 err = ext4_journal_stop(handle);
1984 if (!ret)
1985 ret = err;
1986
1987 if (!ext4_has_inline_data(inode))
1988 ext4_walk_page_buffers(NULL, page_bufs, 0, len,
1989 NULL, bput_one);
1990 ext4_set_inode_state(inode, EXT4_STATE_JDATA);
1991out:
1992 unlock_page(page);
1993out_no_pagelock:
1994 brelse(inode_bh);
1995 return ret;
1996}
1997
1998/*
1999 * Note that we don't need to start a transaction unless we're journaling data
2000 * because we should have holes filled from ext4_page_mkwrite(). We even don't
2001 * need to file the inode to the transaction's list in ordered mode because if
2002 * we are writing back data added by write(), the inode is already there and if
2003 * we are writing back data modified via mmap(), no one guarantees in which
2004 * transaction the data will hit the disk. In case we are journaling data, we
2005 * cannot start transaction directly because transaction start ranks above page
2006 * lock so we have to do some magic.
2007 *
2008 * This function can get called via...
2009 * - ext4_writepages after taking page lock (have journal handle)
2010 * - journal_submit_inode_data_buffers (no journal handle)
2011 * - shrink_page_list via the kswapd/direct reclaim (no journal handle)
2012 * - grab_page_cache when doing write_begin (have journal handle)
2013 *
2014 * We don't do any block allocation in this function. If we have page with
2015 * multiple blocks we need to write those buffer_heads that are mapped. This
2016 * is important for mmaped based write. So if we do with blocksize 1K
2017 * truncate(f, 1024);
2018 * a = mmap(f, 0, 4096);
2019 * a[0] = 'a';
2020 * truncate(f, 4096);
2021 * we have in the page first buffer_head mapped via page_mkwrite call back
2022 * but other buffer_heads would be unmapped but dirty (dirty done via the
2023 * do_wp_page). So writepage should write the first block. If we modify
2024 * the mmap area beyond 1024 we will again get a page_fault and the
2025 * page_mkwrite callback will do the block allocation and mark the
2026 * buffer_heads mapped.
2027 *
2028 * We redirty the page if we have any buffer_heads that is either delay or
2029 * unwritten in the page.
2030 *
2031 * We can get recursively called as show below.
2032 *
2033 * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
2034 * ext4_writepage()
2035 *
2036 * But since we don't do any block allocation we should not deadlock.
2037 * Page also have the dirty flag cleared so we don't get recurive page_lock.
2038 */
2039static int ext4_writepage(struct page *page,
2040 struct writeback_control *wbc)
2041{
2042 int ret = 0;
2043 loff_t size;
2044 unsigned int len;
2045 struct buffer_head *page_bufs = NULL;
2046 struct inode *inode = page->mapping->host;
2047 struct ext4_io_submit io_submit;
2048 bool keep_towrite = false;
2049
2050 trace_ext4_writepage(page);
2051 size = i_size_read(inode);
2052 if (page->index == size >> PAGE_SHIFT)
2053 len = size & ~PAGE_MASK;
2054 else
2055 len = PAGE_SIZE;
2056
2057 page_bufs = page_buffers(page);
2058 /*
2059 * We cannot do block allocation or other extent handling in this
2060 * function. If there are buffers needing that, we have to redirty
2061 * the page. But we may reach here when we do a journal commit via
2062 * journal_submit_inode_data_buffers() and in that case we must write
2063 * allocated buffers to achieve data=ordered mode guarantees.
2064 *
2065 * Also, if there is only one buffer per page (the fs block
2066 * size == the page size), if one buffer needs block
2067 * allocation or needs to modify the extent tree to clear the
2068 * unwritten flag, we know that the page can't be written at
2069 * all, so we might as well refuse the write immediately.
2070 * Unfortunately if the block size != page size, we can't as
2071 * easily detect this case using ext4_walk_page_buffers(), but
2072 * for the extremely common case, this is an optimization that
2073 * skips a useless round trip through ext4_bio_write_page().
2074 */
2075 if (ext4_walk_page_buffers(NULL, page_bufs, 0, len, NULL,
2076 ext4_bh_delay_or_unwritten)) {
2077 redirty_page_for_writepage(wbc, page);
2078 if ((current->flags & PF_MEMALLOC) ||
2079 (inode->i_sb->s_blocksize == PAGE_SIZE)) {
2080 /*
2081 * For memory cleaning there's no point in writing only
2082 * some buffers. So just bail out. Warn if we came here
2083 * from direct reclaim.
2084 */
2085 WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD))
2086 == PF_MEMALLOC);
2087 unlock_page(page);
2088 return 0;
2089 }
2090 keep_towrite = true;
2091 }
2092
2093 if (PageChecked(page) && ext4_should_journal_data(inode))
2094 /*
2095 * It's mmapped pagecache. Add buffers and journal it. There
2096 * doesn't seem much point in redirtying the page here.
2097 */
2098 return __ext4_journalled_writepage(page, len);
2099
2100 ext4_io_submit_init(&io_submit, wbc);
2101 io_submit.io_end = ext4_init_io_end(inode, GFP_NOFS);
2102 if (!io_submit.io_end) {
2103 redirty_page_for_writepage(wbc, page);
2104 unlock_page(page);
2105 return -ENOMEM;
2106 }
2107 ret = ext4_bio_write_page(&io_submit, page, len, wbc, keep_towrite);
2108 ext4_io_submit(&io_submit);
2109 /* Drop io_end reference we got from init */
2110 ext4_put_io_end_defer(io_submit.io_end);
2111 return ret;
2112}
2113
2114static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page)
2115{
2116 int len;
2117 loff_t size = i_size_read(mpd->inode);
2118 int err;
2119
2120 BUG_ON(page->index != mpd->first_page);
2121 if (page->index == size >> PAGE_SHIFT)
2122 len = size & ~PAGE_MASK;
2123 else
2124 len = PAGE_SIZE;
2125 clear_page_dirty_for_io(page);
2126 err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc, false);
2127 if (!err)
2128 mpd->wbc->nr_to_write--;
2129 mpd->first_page++;
2130
2131 return err;
2132}
2133
2134#define BH_FLAGS ((1 << BH_Unwritten) | (1 << BH_Delay))
2135
2136/*
2137 * mballoc gives us at most this number of blocks...
2138 * XXX: That seems to be only a limitation of ext4_mb_normalize_request().
2139 * The rest of mballoc seems to handle chunks up to full group size.
2140 */
2141#define MAX_WRITEPAGES_EXTENT_LEN 2048
2142
2143/*
2144 * mpage_add_bh_to_extent - try to add bh to extent of blocks to map
2145 *
2146 * @mpd - extent of blocks
2147 * @lblk - logical number of the block in the file
2148 * @bh - buffer head we want to add to the extent
2149 *
2150 * The function is used to collect contig. blocks in the same state. If the
2151 * buffer doesn't require mapping for writeback and we haven't started the
2152 * extent of buffers to map yet, the function returns 'true' immediately - the
2153 * caller can write the buffer right away. Otherwise the function returns true
2154 * if the block has been added to the extent, false if the block couldn't be
2155 * added.
2156 */
2157static bool mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk,
2158 struct buffer_head *bh)
2159{
2160 struct ext4_map_blocks *map = &mpd->map;
2161
2162 /* Buffer that doesn't need mapping for writeback? */
2163 if (!buffer_dirty(bh) || !buffer_mapped(bh) ||
2164 (!buffer_delay(bh) && !buffer_unwritten(bh))) {
2165 /* So far no extent to map => we write the buffer right away */
2166 if (map->m_len == 0)
2167 return true;
2168 return false;
2169 }
2170
2171 /* First block in the extent? */
2172 if (map->m_len == 0) {
2173 map->m_lblk = lblk;
2174 map->m_len = 1;
2175 map->m_flags = bh->b_state & BH_FLAGS;
2176 return true;
2177 }
2178
2179 /* Don't go larger than mballoc is willing to allocate */
2180 if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN)
2181 return false;
2182
2183 /* Can we merge the block to our big extent? */
2184 if (lblk == map->m_lblk + map->m_len &&
2185 (bh->b_state & BH_FLAGS) == map->m_flags) {
2186 map->m_len++;
2187 return true;
2188 }
2189 return false;
2190}
2191
2192/*
2193 * mpage_process_page_bufs - submit page buffers for IO or add them to extent
2194 *
2195 * @mpd - extent of blocks for mapping
2196 * @head - the first buffer in the page
2197 * @bh - buffer we should start processing from
2198 * @lblk - logical number of the block in the file corresponding to @bh
2199 *
2200 * Walk through page buffers from @bh upto @head (exclusive) and either submit
2201 * the page for IO if all buffers in this page were mapped and there's no
2202 * accumulated extent of buffers to map or add buffers in the page to the
2203 * extent of buffers to map. The function returns 1 if the caller can continue
2204 * by processing the next page, 0 if it should stop adding buffers to the
2205 * extent to map because we cannot extend it anymore. It can also return value
2206 * < 0 in case of error during IO submission.
2207 */
2208static int mpage_process_page_bufs(struct mpage_da_data *mpd,
2209 struct buffer_head *head,
2210 struct buffer_head *bh,
2211 ext4_lblk_t lblk)
2212{
2213 struct inode *inode = mpd->inode;
2214 int err;
2215 ext4_lblk_t blocks = (i_size_read(inode) + (1 << inode->i_blkbits) - 1)
2216 >> inode->i_blkbits;
2217
2218 do {
2219 BUG_ON(buffer_locked(bh));
2220
2221 if (lblk >= blocks || !mpage_add_bh_to_extent(mpd, lblk, bh)) {
2222 /* Found extent to map? */
2223 if (mpd->map.m_len)
2224 return 0;
2225 /* Everything mapped so far and we hit EOF */
2226 break;
2227 }
2228 } while (lblk++, (bh = bh->b_this_page) != head);
2229 /* So far everything mapped? Submit the page for IO. */
2230 if (mpd->map.m_len == 0) {
2231 err = mpage_submit_page(mpd, head->b_page);
2232 if (err < 0)
2233 return err;
2234 }
2235 return lblk < blocks;
2236}
2237
2238/*
2239 * mpage_map_buffers - update buffers corresponding to changed extent and
2240 * submit fully mapped pages for IO
2241 *
2242 * @mpd - description of extent to map, on return next extent to map
2243 *
2244 * Scan buffers corresponding to changed extent (we expect corresponding pages
2245 * to be already locked) and update buffer state according to new extent state.
2246 * We map delalloc buffers to their physical location, clear unwritten bits,
2247 * and mark buffers as uninit when we perform writes to unwritten extents
2248 * and do extent conversion after IO is finished. If the last page is not fully
2249 * mapped, we update @map to the next extent in the last page that needs
2250 * mapping. Otherwise we submit the page for IO.
2251 */
2252static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
2253{
2254 struct pagevec pvec;
2255 int nr_pages, i;
2256 struct inode *inode = mpd->inode;
2257 struct buffer_head *head, *bh;
2258 int bpp_bits = PAGE_SHIFT - inode->i_blkbits;
2259 pgoff_t start, end;
2260 ext4_lblk_t lblk;
2261 sector_t pblock;
2262 int err;
2263
2264 start = mpd->map.m_lblk >> bpp_bits;
2265 end = (mpd->map.m_lblk + mpd->map.m_len - 1) >> bpp_bits;
2266 lblk = start << bpp_bits;
2267 pblock = mpd->map.m_pblk;
2268
2269 pagevec_init(&pvec, 0);
2270 while (start <= end) {
2271 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, start,
2272 PAGEVEC_SIZE);
2273 if (nr_pages == 0)
2274 break;
2275 for (i = 0; i < nr_pages; i++) {
2276 struct page *page = pvec.pages[i];
2277
2278 if (page->index > end)
2279 break;
2280 /* Up to 'end' pages must be contiguous */
2281 BUG_ON(page->index != start);
2282 bh = head = page_buffers(page);
2283 do {
2284 if (lblk < mpd->map.m_lblk)
2285 continue;
2286 if (lblk >= mpd->map.m_lblk + mpd->map.m_len) {
2287 /*
2288 * Buffer after end of mapped extent.
2289 * Find next buffer in the page to map.
2290 */
2291 mpd->map.m_len = 0;
2292 mpd->map.m_flags = 0;
2293 /*
2294 * FIXME: If dioread_nolock supports
2295 * blocksize < pagesize, we need to make
2296 * sure we add size mapped so far to
2297 * io_end->size as the following call
2298 * can submit the page for IO.
2299 */
2300 err = mpage_process_page_bufs(mpd, head,
2301 bh, lblk);
2302 pagevec_release(&pvec);
2303 if (err > 0)
2304 err = 0;
2305 return err;
2306 }
2307 if (buffer_delay(bh)) {
2308 clear_buffer_delay(bh);
2309 bh->b_blocknr = pblock++;
2310 }
2311 clear_buffer_unwritten(bh);
2312 } while (lblk++, (bh = bh->b_this_page) != head);
2313
2314 /*
2315 * FIXME: This is going to break if dioread_nolock
2316 * supports blocksize < pagesize as we will try to
2317 * convert potentially unmapped parts of inode.
2318 */
2319 mpd->io_submit.io_end->size += PAGE_SIZE;
2320 /* Page fully mapped - let IO run! */
2321 err = mpage_submit_page(mpd, page);
2322 if (err < 0) {
2323 pagevec_release(&pvec);
2324 return err;
2325 }
2326 start++;
2327 }
2328 pagevec_release(&pvec);
2329 }
2330 /* Extent fully mapped and matches with page boundary. We are done. */
2331 mpd->map.m_len = 0;
2332 mpd->map.m_flags = 0;
2333 return 0;
2334}
2335
2336static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
2337{
2338 struct inode *inode = mpd->inode;
2339 struct ext4_map_blocks *map = &mpd->map;
2340 int get_blocks_flags;
2341 int err, dioread_nolock;
2342
2343 trace_ext4_da_write_pages_extent(inode, map);
2344 /*
2345 * Call ext4_map_blocks() to allocate any delayed allocation blocks, or
2346 * to convert an unwritten extent to be initialized (in the case
2347 * where we have written into one or more preallocated blocks). It is
2348 * possible that we're going to need more metadata blocks than
2349 * previously reserved. However we must not fail because we're in
2350 * writeback and there is nothing we can do about it so it might result
2351 * in data loss. So use reserved blocks to allocate metadata if
2352 * possible.
2353 *
2354 * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE if
2355 * the blocks in question are delalloc blocks. This indicates
2356 * that the blocks and quotas has already been checked when
2357 * the data was copied into the page cache.
2358 */
2359 get_blocks_flags = EXT4_GET_BLOCKS_CREATE |
2360 EXT4_GET_BLOCKS_METADATA_NOFAIL |
2361 EXT4_GET_BLOCKS_IO_SUBMIT;
2362 dioread_nolock = ext4_should_dioread_nolock(inode);
2363 if (dioread_nolock)
2364 get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
2365 if (map->m_flags & (1 << BH_Delay))
2366 get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
2367
2368 err = ext4_map_blocks(handle, inode, map, get_blocks_flags);
2369 if (err < 0)
2370 return err;
2371 if (dioread_nolock && (map->m_flags & EXT4_MAP_UNWRITTEN)) {
2372 if (!mpd->io_submit.io_end->handle &&
2373 ext4_handle_valid(handle)) {
2374 mpd->io_submit.io_end->handle = handle->h_rsv_handle;
2375 handle->h_rsv_handle = NULL;
2376 }
2377 ext4_set_io_unwritten_flag(inode, mpd->io_submit.io_end);
2378 }
2379
2380 BUG_ON(map->m_len == 0);
2381 if (map->m_flags & EXT4_MAP_NEW) {
2382 clean_bdev_aliases(inode->i_sb->s_bdev, map->m_pblk,
2383 map->m_len);
2384 }
2385 return 0;
2386}
2387
2388/*
2389 * mpage_map_and_submit_extent - map extent starting at mpd->lblk of length
2390 * mpd->len and submit pages underlying it for IO
2391 *
2392 * @handle - handle for journal operations
2393 * @mpd - extent to map
2394 * @give_up_on_write - we set this to true iff there is a fatal error and there
2395 * is no hope of writing the data. The caller should discard
2396 * dirty pages to avoid infinite loops.
2397 *
2398 * The function maps extent starting at mpd->lblk of length mpd->len. If it is
2399 * delayed, blocks are allocated, if it is unwritten, we may need to convert
2400 * them to initialized or split the described range from larger unwritten
2401 * extent. Note that we need not map all the described range since allocation
2402 * can return less blocks or the range is covered by more unwritten extents. We
2403 * cannot map more because we are limited by reserved transaction credits. On
2404 * the other hand we always make sure that the last touched page is fully
2405 * mapped so that it can be written out (and thus forward progress is
2406 * guaranteed). After mapping we submit all mapped pages for IO.
2407 */
2408static int mpage_map_and_submit_extent(handle_t *handle,
2409 struct mpage_da_data *mpd,
2410 bool *give_up_on_write)
2411{
2412 struct inode *inode = mpd->inode;
2413 struct ext4_map_blocks *map = &mpd->map;
2414 int err;
2415 loff_t disksize;
2416 int progress = 0;
2417
2418 mpd->io_submit.io_end->offset =
2419 ((loff_t)map->m_lblk) << inode->i_blkbits;
2420 do {
2421 err = mpage_map_one_extent(handle, mpd);
2422 if (err < 0) {
2423 struct super_block *sb = inode->i_sb;
2424
2425 if (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)
2426 goto invalidate_dirty_pages;
2427 /*
2428 * Let the uper layers retry transient errors.
2429 * In the case of ENOSPC, if ext4_count_free_blocks()
2430 * is non-zero, a commit should free up blocks.
2431 */
2432 if ((err == -ENOMEM) ||
2433 (err == -ENOSPC && ext4_count_free_clusters(sb))) {
2434 if (progress)
2435 goto update_disksize;
2436 return err;
2437 }
2438 ext4_msg(sb, KERN_CRIT,
2439 "Delayed block allocation failed for "
2440 "inode %lu at logical offset %llu with"
2441 " max blocks %u with error %d",
2442 inode->i_ino,
2443 (unsigned long long)map->m_lblk,
2444 (unsigned)map->m_len, -err);
2445 ext4_msg(sb, KERN_CRIT,
2446 "This should not happen!! Data will "
2447 "be lost\n");
2448 if (err == -ENOSPC)
2449 ext4_print_free_blocks(inode);
2450 invalidate_dirty_pages:
2451 *give_up_on_write = true;
2452 return err;
2453 }
2454 progress = 1;
2455 /*
2456 * Update buffer state, submit mapped pages, and get us new
2457 * extent to map
2458 */
2459 err = mpage_map_and_submit_buffers(mpd);
2460 if (err < 0)
2461 goto update_disksize;
2462 } while (map->m_len);
2463
2464update_disksize:
2465 /*
2466 * Update on-disk size after IO is submitted. Races with
2467 * truncate are avoided by checking i_size under i_data_sem.
2468 */
2469 disksize = ((loff_t)mpd->first_page) << PAGE_SHIFT;
2470 if (disksize > EXT4_I(inode)->i_disksize) {
2471 int err2;
2472 loff_t i_size;
2473
2474 down_write(&EXT4_I(inode)->i_data_sem);
2475 i_size = i_size_read(inode);
2476 if (disksize > i_size)
2477 disksize = i_size;
2478 if (disksize > EXT4_I(inode)->i_disksize)
2479 EXT4_I(inode)->i_disksize = disksize;
2480 err2 = ext4_mark_inode_dirty(handle, inode);
2481 up_write(&EXT4_I(inode)->i_data_sem);
2482 if (err2)
2483 ext4_error(inode->i_sb,
2484 "Failed to mark inode %lu dirty",
2485 inode->i_ino);
2486 if (!err)
2487 err = err2;
2488 }
2489 return err;
2490}
2491
2492/*
2493 * Calculate the total number of credits to reserve for one writepages
2494 * iteration. This is called from ext4_writepages(). We map an extent of
2495 * up to MAX_WRITEPAGES_EXTENT_LEN blocks and then we go on and finish mapping
2496 * the last partial page. So in total we can map MAX_WRITEPAGES_EXTENT_LEN +
2497 * bpp - 1 blocks in bpp different extents.
2498 */
2499static int ext4_da_writepages_trans_blocks(struct inode *inode)
2500{
2501 int bpp = ext4_journal_blocks_per_page(inode);
2502
2503 return ext4_meta_trans_blocks(inode,
2504 MAX_WRITEPAGES_EXTENT_LEN + bpp - 1, bpp);
2505}
2506
2507/*
2508 * mpage_prepare_extent_to_map - find & lock contiguous range of dirty pages
2509 * and underlying extent to map
2510 *
2511 * @mpd - where to look for pages
2512 *
2513 * Walk dirty pages in the mapping. If they are fully mapped, submit them for
2514 * IO immediately. When we find a page which isn't mapped we start accumulating
2515 * extent of buffers underlying these pages that needs mapping (formed by
2516 * either delayed or unwritten buffers). We also lock the pages containing
2517 * these buffers. The extent found is returned in @mpd structure (starting at
2518 * mpd->lblk with length mpd->len blocks).
2519 *
2520 * Note that this function can attach bios to one io_end structure which are
2521 * neither logically nor physically contiguous. Although it may seem as an
2522 * unnecessary complication, it is actually inevitable in blocksize < pagesize
2523 * case as we need to track IO to all buffers underlying a page in one io_end.
2524 */
2525static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
2526{
2527 struct address_space *mapping = mpd->inode->i_mapping;
2528 struct pagevec pvec;
2529 unsigned int nr_pages;
2530 long left = mpd->wbc->nr_to_write;
2531 pgoff_t index = mpd->first_page;
2532 pgoff_t end = mpd->last_page;
2533 int tag;
2534 int i, err = 0;
2535 int blkbits = mpd->inode->i_blkbits;
2536 ext4_lblk_t lblk;
2537 struct buffer_head *head;
2538
2539 if (mpd->wbc->sync_mode == WB_SYNC_ALL || mpd->wbc->tagged_writepages)
2540 tag = PAGECACHE_TAG_TOWRITE;
2541 else
2542 tag = PAGECACHE_TAG_DIRTY;
2543
2544 pagevec_init(&pvec, 0);
2545 mpd->map.m_len = 0;
2546 mpd->next_page = index;
2547 while (index <= end) {
2548 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
2549 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
2550 if (nr_pages == 0)
2551 goto out;
2552
2553 for (i = 0; i < nr_pages; i++) {
2554 struct page *page = pvec.pages[i];
2555
2556 /*
2557 * At this point, the page may be truncated or
2558 * invalidated (changing page->mapping to NULL), or
2559 * even swizzled back from swapper_space to tmpfs file
2560 * mapping. However, page->index will not change
2561 * because we have a reference on the page.
2562 */
2563 if (page->index > end)
2564 goto out;
2565
2566 /*
2567 * Accumulated enough dirty pages? This doesn't apply
2568 * to WB_SYNC_ALL mode. For integrity sync we have to
2569 * keep going because someone may be concurrently
2570 * dirtying pages, and we might have synced a lot of
2571 * newly appeared dirty pages, but have not synced all
2572 * of the old dirty pages.
2573 */
2574 if (mpd->wbc->sync_mode == WB_SYNC_NONE && left <= 0)
2575 goto out;
2576
2577 /* If we can't merge this page, we are done. */
2578 if (mpd->map.m_len > 0 && mpd->next_page != page->index)
2579 goto out;
2580
2581 lock_page(page);
2582 /*
2583 * If the page is no longer dirty, or its mapping no
2584 * longer corresponds to inode we are writing (which
2585 * means it has been truncated or invalidated), or the
2586 * page is already under writeback and we are not doing
2587 * a data integrity writeback, skip the page
2588 */
2589 if (!PageDirty(page) ||
2590 (PageWriteback(page) &&
2591 (mpd->wbc->sync_mode == WB_SYNC_NONE)) ||
2592 unlikely(page->mapping != mapping)) {
2593 unlock_page(page);
2594 continue;
2595 }
2596
2597 wait_on_page_writeback(page);
2598 BUG_ON(PageWriteback(page));
2599
2600 if (mpd->map.m_len == 0)
2601 mpd->first_page = page->index;
2602 mpd->next_page = page->index + 1;
2603 /* Add all dirty buffers to mpd */
2604 lblk = ((ext4_lblk_t)page->index) <<
2605 (PAGE_SHIFT - blkbits);
2606 head = page_buffers(page);
2607 err = mpage_process_page_bufs(mpd, head, head, lblk);
2608 if (err <= 0)
2609 goto out;
2610 err = 0;
2611 left--;
2612 }
2613 pagevec_release(&pvec);
2614 cond_resched();
2615 }
2616 return 0;
2617out:
2618 pagevec_release(&pvec);
2619 return err;
2620}
2621
2622static int __writepage(struct page *page, struct writeback_control *wbc,
2623 void *data)
2624{
2625 struct address_space *mapping = data;
2626 int ret = ext4_writepage(page, wbc);
2627 mapping_set_error(mapping, ret);
2628 return ret;
2629}
2630
2631static int ext4_writepages(struct address_space *mapping,
2632 struct writeback_control *wbc)
2633{
2634 pgoff_t writeback_index = 0;
2635 long nr_to_write = wbc->nr_to_write;
2636 int range_whole = 0;
2637 int cycled = 1;
2638 handle_t *handle = NULL;
2639 struct mpage_da_data mpd;
2640 struct inode *inode = mapping->host;
2641 int needed_blocks, rsv_blocks = 0, ret = 0;
2642 struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
2643 bool done;
2644 struct blk_plug plug;
2645 bool give_up_on_write = false;
2646
2647 percpu_down_read(&sbi->s_journal_flag_rwsem);
2648 trace_ext4_writepages(inode, wbc);
2649
2650 if (dax_mapping(mapping)) {
2651 ret = dax_writeback_mapping_range(mapping, inode->i_sb->s_bdev,
2652 wbc);
2653 goto out_writepages;
2654 }
2655
2656 /*
2657 * No pages to write? This is mainly a kludge to avoid starting
2658 * a transaction for special inodes like journal inode on last iput()
2659 * because that could violate lock ordering on umount
2660 */
2661 if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
2662 goto out_writepages;
2663
2664 if (ext4_should_journal_data(inode)) {
2665 struct blk_plug plug;
2666
2667 blk_start_plug(&plug);
2668 ret = write_cache_pages(mapping, wbc, __writepage, mapping);
2669 blk_finish_plug(&plug);
2670 goto out_writepages;
2671 }
2672
2673 /*
2674 * If the filesystem has aborted, it is read-only, so return
2675 * right away instead of dumping stack traces later on that
2676 * will obscure the real source of the problem. We test
2677 * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because
2678 * the latter could be true if the filesystem is mounted
2679 * read-only, and in that case, ext4_writepages should
2680 * *never* be called, so if that ever happens, we would want
2681 * the stack trace.
2682 */
2683 if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED)) {
2684 ret = -EROFS;
2685 goto out_writepages;
2686 }
2687
2688 if (ext4_should_dioread_nolock(inode)) {
2689 /*
2690 * We may need to convert up to one extent per block in
2691 * the page and we may dirty the inode.
2692 */
2693 rsv_blocks = 1 + (PAGE_SIZE >> inode->i_blkbits);
2694 }
2695
2696 /*
2697 * If we have inline data and arrive here, it means that
2698 * we will soon create the block for the 1st page, so
2699 * we'd better clear the inline data here.
2700 */
2701 if (ext4_has_inline_data(inode)) {
2702 /* Just inode will be modified... */
2703 handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
2704 if (IS_ERR(handle)) {
2705 ret = PTR_ERR(handle);
2706 goto out_writepages;
2707 }
2708 BUG_ON(ext4_test_inode_state(inode,
2709 EXT4_STATE_MAY_INLINE_DATA));
2710 ext4_destroy_inline_data(handle, inode);
2711 ext4_journal_stop(handle);
2712 }
2713
2714 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2715 range_whole = 1;
2716
2717 if (wbc->range_cyclic) {
2718 writeback_index = mapping->writeback_index;
2719 if (writeback_index)
2720 cycled = 0;
2721 mpd.first_page = writeback_index;
2722 mpd.last_page = -1;
2723 } else {
2724 mpd.first_page = wbc->range_start >> PAGE_SHIFT;
2725 mpd.last_page = wbc->range_end >> PAGE_SHIFT;
2726 }
2727
2728 mpd.inode = inode;
2729 mpd.wbc = wbc;
2730 ext4_io_submit_init(&mpd.io_submit, wbc);
2731retry:
2732 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2733 tag_pages_for_writeback(mapping, mpd.first_page, mpd.last_page);
2734 done = false;
2735 blk_start_plug(&plug);
2736 while (!done && mpd.first_page <= mpd.last_page) {
2737 /* For each extent of pages we use new io_end */
2738 mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
2739 if (!mpd.io_submit.io_end) {
2740 ret = -ENOMEM;
2741 break;
2742 }
2743
2744 /*
2745 * We have two constraints: We find one extent to map and we
2746 * must always write out whole page (makes a difference when
2747 * blocksize < pagesize) so that we don't block on IO when we
2748 * try to write out the rest of the page. Journalled mode is
2749 * not supported by delalloc.
2750 */
2751 BUG_ON(ext4_should_journal_data(inode));
2752 needed_blocks = ext4_da_writepages_trans_blocks(inode);
2753
2754 /* start a new transaction */
2755 handle = ext4_journal_start_with_reserve(inode,
2756 EXT4_HT_WRITE_PAGE, needed_blocks, rsv_blocks);
2757 if (IS_ERR(handle)) {
2758 ret = PTR_ERR(handle);
2759 ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
2760 "%ld pages, ino %lu; err %d", __func__,
2761 wbc->nr_to_write, inode->i_ino, ret);
2762 /* Release allocated io_end */
2763 ext4_put_io_end(mpd.io_submit.io_end);
2764 break;
2765 }
2766
2767 trace_ext4_da_write_pages(inode, mpd.first_page, mpd.wbc);
2768 ret = mpage_prepare_extent_to_map(&mpd);
2769 if (!ret) {
2770 if (mpd.map.m_len)
2771 ret = mpage_map_and_submit_extent(handle, &mpd,
2772 &give_up_on_write);
2773 else {
2774 /*
2775 * We scanned the whole range (or exhausted
2776 * nr_to_write), submitted what was mapped and
2777 * didn't find anything needing mapping. We are
2778 * done.
2779 */
2780 done = true;
2781 }
2782 }
2783 /*
2784 * Caution: If the handle is synchronous,
2785 * ext4_journal_stop() can wait for transaction commit
2786 * to finish which may depend on writeback of pages to
2787 * complete or on page lock to be released. In that
2788 * case, we have to wait until after after we have
2789 * submitted all the IO, released page locks we hold,
2790 * and dropped io_end reference (for extent conversion
2791 * to be able to complete) before stopping the handle.
2792 */
2793 if (!ext4_handle_valid(handle) || handle->h_sync == 0) {
2794 ext4_journal_stop(handle);
2795 handle = NULL;
2796 }
2797 /* Submit prepared bio */
2798 ext4_io_submit(&mpd.io_submit);
2799 /* Unlock pages we didn't use */
2800 mpage_release_unused_pages(&mpd, give_up_on_write);
2801 /*
2802 * Drop our io_end reference we got from init. We have
2803 * to be careful and use deferred io_end finishing if
2804 * we are still holding the transaction as we can
2805 * release the last reference to io_end which may end
2806 * up doing unwritten extent conversion.
2807 */
2808 if (handle) {
2809 ext4_put_io_end_defer(mpd.io_submit.io_end);
2810 ext4_journal_stop(handle);
2811 } else
2812 ext4_put_io_end(mpd.io_submit.io_end);
2813
2814 if (ret == -ENOSPC && sbi->s_journal) {
2815 /*
2816 * Commit the transaction which would
2817 * free blocks released in the transaction
2818 * and try again
2819 */
2820 jbd2_journal_force_commit_nested(sbi->s_journal);
2821 ret = 0;
2822 continue;
2823 }
2824 /* Fatal error - ENOMEM, EIO... */
2825 if (ret)
2826 break;
2827 }
2828 blk_finish_plug(&plug);
2829 if (!ret && !cycled && wbc->nr_to_write > 0) {
2830 cycled = 1;
2831 mpd.last_page = writeback_index - 1;
2832 mpd.first_page = 0;
2833 goto retry;
2834 }
2835
2836 /* Update index */
2837 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2838 /*
2839 * Set the writeback_index so that range_cyclic
2840 * mode will write it back later
2841 */
2842 mapping->writeback_index = mpd.first_page;
2843
2844out_writepages:
2845 trace_ext4_writepages_result(inode, wbc, ret,
2846 nr_to_write - wbc->nr_to_write);
2847 percpu_up_read(&sbi->s_journal_flag_rwsem);
2848 return ret;
2849}
2850
2851static int ext4_nonda_switch(struct super_block *sb)
2852{
2853 s64 free_clusters, dirty_clusters;
2854 struct ext4_sb_info *sbi = EXT4_SB(sb);
2855
2856 /*
2857 * switch to non delalloc mode if we are running low
2858 * on free block. The free block accounting via percpu
2859 * counters can get slightly wrong with percpu_counter_batch getting
2860 * accumulated on each CPU without updating global counters
2861 * Delalloc need an accurate free block accounting. So switch
2862 * to non delalloc when we are near to error range.
2863 */
2864 free_clusters =
2865 percpu_counter_read_positive(&sbi->s_freeclusters_counter);
2866 dirty_clusters =
2867 percpu_counter_read_positive(&sbi->s_dirtyclusters_counter);
2868 /*
2869 * Start pushing delalloc when 1/2 of free blocks are dirty.
2870 */
2871 if (dirty_clusters && (free_clusters < 2 * dirty_clusters))
2872 try_to_writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE);
2873
2874 if (2 * free_clusters < 3 * dirty_clusters ||
2875 free_clusters < (dirty_clusters + EXT4_FREECLUSTERS_WATERMARK)) {
2876 /*
2877 * free block count is less than 150% of dirty blocks
2878 * or free blocks is less than watermark
2879 */
2880 return 1;
2881 }
2882 return 0;
2883}
2884
2885/* We always reserve for an inode update; the superblock could be there too */
2886static int ext4_da_write_credits(struct inode *inode, loff_t pos, unsigned len)
2887{
2888 if (likely(ext4_has_feature_large_file(inode->i_sb)))
2889 return 1;
2890
2891 if (pos + len <= 0x7fffffffULL)
2892 return 1;
2893
2894 /* We might need to update the superblock to set LARGE_FILE */
2895 return 2;
2896}
2897
2898static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
2899 loff_t pos, unsigned len, unsigned flags,
2900 struct page **pagep, void **fsdata)
2901{
2902 int ret, retries = 0;
2903 struct page *page;
2904 pgoff_t index;
2905 struct inode *inode = mapping->host;
2906 handle_t *handle;
2907
2908 index = pos >> PAGE_SHIFT;
2909
2910 if (ext4_nonda_switch(inode->i_sb) ||
2911 S_ISLNK(inode->i_mode)) {
2912 *fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
2913 return ext4_write_begin(file, mapping, pos,
2914 len, flags, pagep, fsdata);
2915 }
2916 *fsdata = (void *)0;
2917 trace_ext4_da_write_begin(inode, pos, len, flags);
2918
2919 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
2920 ret = ext4_da_write_inline_data_begin(mapping, inode,
2921 pos, len, flags,
2922 pagep, fsdata);
2923 if (ret < 0)
2924 return ret;
2925 if (ret == 1)
2926 return 0;
2927 }
2928
2929 /*
2930 * grab_cache_page_write_begin() can take a long time if the
2931 * system is thrashing due to memory pressure, or if the page
2932 * is being written back. So grab it first before we start
2933 * the transaction handle. This also allows us to allocate
2934 * the page (if needed) without using GFP_NOFS.
2935 */
2936retry_grab:
2937 page = grab_cache_page_write_begin(mapping, index, flags);
2938 if (!page)
2939 return -ENOMEM;
2940 unlock_page(page);
2941
2942 /*
2943 * With delayed allocation, we don't log the i_disksize update
2944 * if there is delayed block allocation. But we still need
2945 * to journalling the i_disksize update if writes to the end
2946 * of file which has an already mapped buffer.
2947 */
2948retry_journal:
2949 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
2950 ext4_da_write_credits(inode, pos, len));
2951 if (IS_ERR(handle)) {
2952 put_page(page);
2953 return PTR_ERR(handle);
2954 }
2955
2956 lock_page(page);
2957 if (page->mapping != mapping) {
2958 /* The page got truncated from under us */
2959 unlock_page(page);
2960 put_page(page);
2961 ext4_journal_stop(handle);
2962 goto retry_grab;
2963 }
2964 /* In case writeback began while the page was unlocked */
2965 wait_for_stable_page(page);
2966
2967#ifdef CONFIG_EXT4_FS_ENCRYPTION
2968 ret = ext4_block_write_begin(page, pos, len,
2969 ext4_da_get_block_prep);
2970#else
2971 ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep);
2972#endif
2973 if (ret < 0) {
2974 unlock_page(page);
2975 ext4_journal_stop(handle);
2976 /*
2977 * block_write_begin may have instantiated a few blocks
2978 * outside i_size. Trim these off again. Don't need
2979 * i_size_read because we hold i_mutex.
2980 */
2981 if (pos + len > inode->i_size)
2982 ext4_truncate_failed_write(inode);
2983
2984 if (ret == -ENOSPC &&
2985 ext4_should_retry_alloc(inode->i_sb, &retries))
2986 goto retry_journal;
2987
2988 put_page(page);
2989 return ret;
2990 }
2991
2992 *pagep = page;
2993 return ret;
2994}
2995
2996/*
2997 * Check if we should update i_disksize
2998 * when write to the end of file but not require block allocation
2999 */
3000static int ext4_da_should_update_i_disksize(struct page *page,
3001 unsigned long offset)
3002{
3003 struct buffer_head *bh;
3004 struct inode *inode = page->mapping->host;
3005 unsigned int idx;
3006 int i;
3007
3008 bh = page_buffers(page);
3009 idx = offset >> inode->i_blkbits;
3010
3011 for (i = 0; i < idx; i++)
3012 bh = bh->b_this_page;
3013
3014 if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh))
3015 return 0;
3016 return 1;
3017}
3018
3019static int ext4_da_write_end(struct file *file,
3020 struct address_space *mapping,
3021 loff_t pos, unsigned len, unsigned copied,
3022 struct page *page, void *fsdata)
3023{
3024 struct inode *inode = mapping->host;
3025 int ret = 0, ret2;
3026 handle_t *handle = ext4_journal_current_handle();
3027 loff_t new_i_size;
3028 unsigned long start, end;
3029 int write_mode = (int)(unsigned long)fsdata;
3030
3031 if (write_mode == FALL_BACK_TO_NONDELALLOC)
3032 return ext4_write_end(file, mapping, pos,
3033 len, copied, page, fsdata);
3034
3035 trace_ext4_da_write_end(inode, pos, len, copied);
3036 start = pos & (PAGE_SIZE - 1);
3037 end = start + copied - 1;
3038
3039 /*
3040 * generic_write_end() will run mark_inode_dirty() if i_size
3041 * changes. So let's piggyback the i_disksize mark_inode_dirty
3042 * into that.
3043 */
3044 new_i_size = pos + copied;
3045 if (copied && new_i_size > EXT4_I(inode)->i_disksize) {
3046 if (ext4_has_inline_data(inode) ||
3047 ext4_da_should_update_i_disksize(page, end)) {
3048 ext4_update_i_disksize(inode, new_i_size);
3049 /* We need to mark inode dirty even if
3050 * new_i_size is less that inode->i_size
3051 * bu greater than i_disksize.(hint delalloc)
3052 */
3053 ext4_mark_inode_dirty(handle, inode);
3054 }
3055 }
3056
3057 if (write_mode != CONVERT_INLINE_DATA &&
3058 ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) &&
3059 ext4_has_inline_data(inode))
3060 ret2 = ext4_da_write_inline_data_end(inode, pos, len, copied,
3061 page);
3062 else
3063 ret2 = generic_write_end(file, mapping, pos, len, copied,
3064 page, fsdata);
3065
3066 copied = ret2;
3067 if (ret2 < 0)
3068 ret = ret2;
3069 ret2 = ext4_journal_stop(handle);
3070 if (!ret)
3071 ret = ret2;
3072
3073 return ret ? ret : copied;
3074}
3075
3076static void ext4_da_invalidatepage(struct page *page, unsigned int offset,
3077 unsigned int length)
3078{
3079 /*
3080 * Drop reserved blocks
3081 */
3082 BUG_ON(!PageLocked(page));
3083 if (!page_has_buffers(page))
3084 goto out;
3085
3086 ext4_da_page_release_reservation(page, offset, length);
3087
3088out:
3089 ext4_invalidatepage(page, offset, length);
3090
3091 return;
3092}
3093
3094/*
3095 * Force all delayed allocation blocks to be allocated for a given inode.
3096 */
3097int ext4_alloc_da_blocks(struct inode *inode)
3098{
3099 trace_ext4_alloc_da_blocks(inode);
3100
3101 if (!EXT4_I(inode)->i_reserved_data_blocks)
3102 return 0;
3103
3104 /*
3105 * We do something simple for now. The filemap_flush() will
3106 * also start triggering a write of the data blocks, which is
3107 * not strictly speaking necessary (and for users of
3108 * laptop_mode, not even desirable). However, to do otherwise
3109 * would require replicating code paths in:
3110 *
3111 * ext4_writepages() ->
3112 * write_cache_pages() ---> (via passed in callback function)
3113 * __mpage_da_writepage() -->
3114 * mpage_add_bh_to_extent()
3115 * mpage_da_map_blocks()
3116 *
3117 * The problem is that write_cache_pages(), located in
3118 * mm/page-writeback.c, marks pages clean in preparation for
3119 * doing I/O, which is not desirable if we're not planning on
3120 * doing I/O at all.
3121 *
3122 * We could call write_cache_pages(), and then redirty all of
3123 * the pages by calling redirty_page_for_writepage() but that
3124 * would be ugly in the extreme. So instead we would need to
3125 * replicate parts of the code in the above functions,
3126 * simplifying them because we wouldn't actually intend to
3127 * write out the pages, but rather only collect contiguous
3128 * logical block extents, call the multi-block allocator, and
3129 * then update the buffer heads with the block allocations.
3130 *
3131 * For now, though, we'll cheat by calling filemap_flush(),
3132 * which will map the blocks, and start the I/O, but not
3133 * actually wait for the I/O to complete.
3134 */
3135 return filemap_flush(inode->i_mapping);
3136}
3137
3138/*
3139 * bmap() is special. It gets used by applications such as lilo and by
3140 * the swapper to find the on-disk block of a specific piece of data.
3141 *
3142 * Naturally, this is dangerous if the block concerned is still in the
3143 * journal. If somebody makes a swapfile on an ext4 data-journaling
3144 * filesystem and enables swap, then they may get a nasty shock when the
3145 * data getting swapped to that swapfile suddenly gets overwritten by
3146 * the original zero's written out previously to the journal and
3147 * awaiting writeback in the kernel's buffer cache.
3148 *
3149 * So, if we see any bmap calls here on a modified, data-journaled file,
3150 * take extra steps to flush any blocks which might be in the cache.
3151 */
3152static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
3153{
3154 struct inode *inode = mapping->host;
3155 journal_t *journal;
3156 int err;
3157
3158 /*
3159 * We can get here for an inline file via the FIBMAP ioctl
3160 */
3161 if (ext4_has_inline_data(inode))
3162 return 0;
3163
3164 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
3165 test_opt(inode->i_sb, DELALLOC)) {
3166 /*
3167 * With delalloc we want to sync the file
3168 * so that we can make sure we allocate
3169 * blocks for file
3170 */
3171 filemap_write_and_wait(mapping);
3172 }
3173
3174 if (EXT4_JOURNAL(inode) &&
3175 ext4_test_inode_state(inode, EXT4_STATE_JDATA)) {
3176 /*
3177 * This is a REALLY heavyweight approach, but the use of
3178 * bmap on dirty files is expected to be extremely rare:
3179 * only if we run lilo or swapon on a freshly made file
3180 * do we expect this to happen.
3181 *
3182 * (bmap requires CAP_SYS_RAWIO so this does not
3183 * represent an unprivileged user DOS attack --- we'd be
3184 * in trouble if mortal users could trigger this path at
3185 * will.)
3186 *
3187 * NB. EXT4_STATE_JDATA is not set on files other than
3188 * regular files. If somebody wants to bmap a directory
3189 * or symlink and gets confused because the buffer
3190 * hasn't yet been flushed to disk, they deserve
3191 * everything they get.
3192 */
3193
3194 ext4_clear_inode_state(inode, EXT4_STATE_JDATA);
3195 journal = EXT4_JOURNAL(inode);
3196 jbd2_journal_lock_updates(journal);
3197 err = jbd2_journal_flush(journal);
3198 jbd2_journal_unlock_updates(journal);
3199
3200 if (err)
3201 return 0;
3202 }
3203
3204 return generic_block_bmap(mapping, block, ext4_get_block);
3205}
3206
3207static int ext4_readpage(struct file *file, struct page *page)
3208{
3209 int ret = -EAGAIN;
3210 struct inode *inode = page->mapping->host;
3211
3212 trace_ext4_readpage(page);
3213
3214 if (ext4_has_inline_data(inode))
3215 ret = ext4_readpage_inline(inode, page);
3216
3217 if (ret == -EAGAIN)
3218 return ext4_mpage_readpages(page->mapping, NULL, page, 1);
3219
3220 return ret;
3221}
3222
3223static int
3224ext4_readpages(struct file *file, struct address_space *mapping,
3225 struct list_head *pages, unsigned nr_pages)
3226{
3227 struct inode *inode = mapping->host;
3228
3229 /* If the file has inline data, no need to do readpages. */
3230 if (ext4_has_inline_data(inode))
3231 return 0;
3232
3233 return ext4_mpage_readpages(mapping, pages, NULL, nr_pages);
3234}
3235
3236static void ext4_invalidatepage(struct page *page, unsigned int offset,
3237 unsigned int length)
3238{
3239 trace_ext4_invalidatepage(page, offset, length);
3240
3241 /* No journalling happens on data buffers when this function is used */
3242 WARN_ON(page_has_buffers(page) && buffer_jbd(page_buffers(page)));
3243
3244 block_invalidatepage(page, offset, length);
3245}
3246
3247static int __ext4_journalled_invalidatepage(struct page *page,
3248 unsigned int offset,
3249 unsigned int length)
3250{
3251 journal_t *journal = EXT4_JOURNAL(page->mapping->host);
3252
3253 trace_ext4_journalled_invalidatepage(page, offset, length);
3254
3255 /*
3256 * If it's a full truncate we just forget about the pending dirtying
3257 */
3258 if (offset == 0 && length == PAGE_SIZE)
3259 ClearPageChecked(page);
3260
3261 return jbd2_journal_invalidatepage(journal, page, offset, length);
3262}
3263
3264/* Wrapper for aops... */
3265static void ext4_journalled_invalidatepage(struct page *page,
3266 unsigned int offset,
3267 unsigned int length)
3268{
3269 WARN_ON(__ext4_journalled_invalidatepage(page, offset, length) < 0);
3270}
3271
3272static int ext4_releasepage(struct page *page, gfp_t wait)
3273{
3274 journal_t *journal = EXT4_JOURNAL(page->mapping->host);
3275
3276 trace_ext4_releasepage(page);
3277
3278 /* Page has dirty journalled data -> cannot release */
3279 if (PageChecked(page))
3280 return 0;
3281 if (journal)
3282 return jbd2_journal_try_to_free_buffers(journal, page, wait);
3283 else
3284 return try_to_free_buffers(page);
3285}
3286
3287#ifdef CONFIG_FS_DAX
3288static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
3289 unsigned flags, struct iomap *iomap)
3290{
3291 unsigned int blkbits = inode->i_blkbits;
3292 unsigned long first_block = offset >> blkbits;
3293 unsigned long last_block = (offset + length - 1) >> blkbits;
3294 struct ext4_map_blocks map;
3295 int ret;
3296
3297 if (WARN_ON_ONCE(ext4_has_inline_data(inode)))
3298 return -ERANGE;
3299
3300 map.m_lblk = first_block;
3301 map.m_len = last_block - first_block + 1;
3302
3303 if (!(flags & IOMAP_WRITE)) {
3304 ret = ext4_map_blocks(NULL, inode, &map, 0);
3305 } else {
3306 int dio_credits;
3307 handle_t *handle;
3308 int retries = 0;
3309
3310 /* Trim mapping request to maximum we can map at once for DIO */
3311 if (map.m_len > DIO_MAX_BLOCKS)
3312 map.m_len = DIO_MAX_BLOCKS;
3313 dio_credits = ext4_chunk_trans_blocks(inode, map.m_len);
3314retry:
3315 /*
3316 * Either we allocate blocks and then we don't get unwritten
3317 * extent so we have reserved enough credits, or the blocks
3318 * are already allocated and unwritten and in that case
3319 * extent conversion fits in the credits as well.
3320 */
3321 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
3322 dio_credits);
3323 if (IS_ERR(handle))
3324 return PTR_ERR(handle);
3325
3326 ret = ext4_map_blocks(handle, inode, &map,
3327 EXT4_GET_BLOCKS_CREATE_ZERO);
3328 if (ret < 0) {
3329 ext4_journal_stop(handle);
3330 if (ret == -ENOSPC &&
3331 ext4_should_retry_alloc(inode->i_sb, &retries))
3332 goto retry;
3333 return ret;
3334 }
3335
3336 /*
3337 * If we added blocks beyond i_size, we need to make sure they
3338 * will get truncated if we crash before updating i_size in
3339 * ext4_iomap_end(). For faults we don't need to do that (and
3340 * even cannot because for orphan list operations inode_lock is
3341 * required) - if we happen to instantiate block beyond i_size,
3342 * it is because we race with truncate which has already added
3343 * the inode to the orphan list.
3344 */
3345 if (!(flags & IOMAP_FAULT) && first_block + map.m_len >
3346 (i_size_read(inode) + (1 << blkbits) - 1) >> blkbits) {
3347 int err;
3348
3349 err = ext4_orphan_add(handle, inode);
3350 if (err < 0) {
3351 ext4_journal_stop(handle);
3352 return err;
3353 }
3354 }
3355 ext4_journal_stop(handle);
3356 }
3357
3358 iomap->flags = 0;
3359 iomap->bdev = inode->i_sb->s_bdev;
3360 iomap->offset = first_block << blkbits;
3361
3362 if (ret == 0) {
3363 iomap->type = IOMAP_HOLE;
3364 iomap->blkno = IOMAP_NULL_BLOCK;
3365 iomap->length = (u64)map.m_len << blkbits;
3366 } else {
3367 if (map.m_flags & EXT4_MAP_MAPPED) {
3368 iomap->type = IOMAP_MAPPED;
3369 } else if (map.m_flags & EXT4_MAP_UNWRITTEN) {
3370 iomap->type = IOMAP_UNWRITTEN;
3371 } else {
3372 WARN_ON_ONCE(1);
3373 return -EIO;
3374 }
3375 iomap->blkno = (sector_t)map.m_pblk << (blkbits - 9);
3376 iomap->length = (u64)map.m_len << blkbits;
3377 }
3378
3379 if (map.m_flags & EXT4_MAP_NEW)
3380 iomap->flags |= IOMAP_F_NEW;
3381 return 0;
3382}
3383
3384static int ext4_iomap_end(struct inode *inode, loff_t offset, loff_t length,
3385 ssize_t written, unsigned flags, struct iomap *iomap)
3386{
3387 int ret = 0;
3388 handle_t *handle;
3389 int blkbits = inode->i_blkbits;
3390 bool truncate = false;
3391
3392 if (!(flags & IOMAP_WRITE) || (flags & IOMAP_FAULT))
3393 return 0;
3394
3395 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
3396 if (IS_ERR(handle)) {
3397 ret = PTR_ERR(handle);
3398 goto orphan_del;
3399 }
3400 if (ext4_update_inode_size(inode, offset + written))
3401 ext4_mark_inode_dirty(handle, inode);
3402 /*
3403 * We may need to truncate allocated but not written blocks beyond EOF.
3404 */
3405 if (iomap->offset + iomap->length >
3406 ALIGN(inode->i_size, 1 << blkbits)) {
3407 ext4_lblk_t written_blk, end_blk;
3408
3409 written_blk = (offset + written) >> blkbits;
3410 end_blk = (offset + length) >> blkbits;
3411 if (written_blk < end_blk && ext4_can_truncate(inode))
3412 truncate = true;
3413 }
3414 /*
3415 * Remove inode from orphan list if we were extending a inode and
3416 * everything went fine.
3417 */
3418 if (!truncate && inode->i_nlink &&
3419 !list_empty(&EXT4_I(inode)->i_orphan))
3420 ext4_orphan_del(handle, inode);
3421 ext4_journal_stop(handle);
3422 if (truncate) {
3423 ext4_truncate_failed_write(inode);
3424orphan_del:
3425 /*
3426 * If truncate failed early the inode might still be on the
3427 * orphan list; we need to make sure the inode is removed from
3428 * the orphan list in that case.
3429 */
3430 if (inode->i_nlink)
3431 ext4_orphan_del(NULL, inode);
3432 }
3433 return ret;
3434}
3435
3436struct iomap_ops ext4_iomap_ops = {
3437 .iomap_begin = ext4_iomap_begin,
3438 .iomap_end = ext4_iomap_end,
3439};
3440
3441#endif
3442
3443static int ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
3444 ssize_t size, void *private)
3445{
3446 ext4_io_end_t *io_end = private;
3447
3448 /* if not async direct IO just return */
3449 if (!io_end)
3450 return 0;
3451
3452 ext_debug("ext4_end_io_dio(): io_end 0x%p "
3453 "for inode %lu, iocb 0x%p, offset %llu, size %zd\n",
3454 io_end, io_end->inode->i_ino, iocb, offset, size);
3455
3456 /*
3457 * Error during AIO DIO. We cannot convert unwritten extents as the
3458 * data was not written. Just clear the unwritten flag and drop io_end.
3459 */
3460 if (size <= 0) {
3461 ext4_clear_io_unwritten_flag(io_end);
3462 size = 0;
3463 }
3464 io_end->offset = offset;
3465 io_end->size = size;
3466 ext4_put_io_end(io_end);
3467
3468 return 0;
3469}
3470
3471/*
3472 * Handling of direct IO writes.
3473 *
3474 * For ext4 extent files, ext4 will do direct-io write even to holes,
3475 * preallocated extents, and those write extend the file, no need to
3476 * fall back to buffered IO.
3477 *
3478 * For holes, we fallocate those blocks, mark them as unwritten
3479 * If those blocks were preallocated, we mark sure they are split, but
3480 * still keep the range to write as unwritten.
3481 *
3482 * The unwritten extents will be converted to written when DIO is completed.
3483 * For async direct IO, since the IO may still pending when return, we
3484 * set up an end_io call back function, which will do the conversion
3485 * when async direct IO completed.
3486 *
3487 * If the O_DIRECT write will extend the file then add this inode to the
3488 * orphan list. So recovery will truncate it back to the original size
3489 * if the machine crashes during the write.
3490 *
3491 */
3492static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter)
3493{
3494 struct file *file = iocb->ki_filp;
3495 struct inode *inode = file->f_mapping->host;
3496 struct ext4_inode_info *ei = EXT4_I(inode);
3497 ssize_t ret;
3498 loff_t offset = iocb->ki_pos;
3499 size_t count = iov_iter_count(iter);
3500 int overwrite = 0;
3501 get_block_t *get_block_func = NULL;
3502 int dio_flags = 0;
3503 loff_t final_size = offset + count;
3504 int orphan = 0;
3505 handle_t *handle;
3506
3507 if (final_size > inode->i_size) {
3508 /* Credits for sb + inode write */
3509 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
3510 if (IS_ERR(handle)) {
3511 ret = PTR_ERR(handle);
3512 goto out;
3513 }
3514 ret = ext4_orphan_add(handle, inode);
3515 if (ret) {
3516 ext4_journal_stop(handle);
3517 goto out;
3518 }
3519 orphan = 1;
3520 ei->i_disksize = inode->i_size;
3521 ext4_journal_stop(handle);
3522 }
3523
3524 BUG_ON(iocb->private == NULL);
3525
3526 /*
3527 * Make all waiters for direct IO properly wait also for extent
3528 * conversion. This also disallows race between truncate() and
3529 * overwrite DIO as i_dio_count needs to be incremented under i_mutex.
3530 */
3531 inode_dio_begin(inode);
3532
3533 /* If we do a overwrite dio, i_mutex locking can be released */
3534 overwrite = *((int *)iocb->private);
3535
3536 if (overwrite)
3537 inode_unlock(inode);
3538
3539 /*
3540 * For extent mapped files we could direct write to holes and fallocate.
3541 *
3542 * Allocated blocks to fill the hole are marked as unwritten to prevent
3543 * parallel buffered read to expose the stale data before DIO complete
3544 * the data IO.
3545 *
3546 * As to previously fallocated extents, ext4 get_block will just simply
3547 * mark the buffer mapped but still keep the extents unwritten.
3548 *
3549 * For non AIO case, we will convert those unwritten extents to written
3550 * after return back from blockdev_direct_IO. That way we save us from
3551 * allocating io_end structure and also the overhead of offloading
3552 * the extent convertion to a workqueue.
3553 *
3554 * For async DIO, the conversion needs to be deferred when the
3555 * IO is completed. The ext4 end_io callback function will be
3556 * called to take care of the conversion work. Here for async
3557 * case, we allocate an io_end structure to hook to the iocb.
3558 */
3559 iocb->private = NULL;
3560 if (overwrite)
3561 get_block_func = ext4_dio_get_block_overwrite;
3562 else if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) ||
3563 round_down(offset, 1 << inode->i_blkbits) >= inode->i_size) {
3564 get_block_func = ext4_dio_get_block;
3565 dio_flags = DIO_LOCKING | DIO_SKIP_HOLES;
3566 } else if (is_sync_kiocb(iocb)) {
3567 get_block_func = ext4_dio_get_block_unwritten_sync;
3568 dio_flags = DIO_LOCKING;
3569 } else {
3570 get_block_func = ext4_dio_get_block_unwritten_async;
3571 dio_flags = DIO_LOCKING;
3572 }
3573#ifdef CONFIG_EXT4_FS_ENCRYPTION
3574 BUG_ON(ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode));
3575#endif
3576 ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
3577 get_block_func, ext4_end_io_dio, NULL,
3578 dio_flags);
3579
3580 if (ret > 0 && !overwrite && ext4_test_inode_state(inode,
3581 EXT4_STATE_DIO_UNWRITTEN)) {
3582 int err;
3583 /*
3584 * for non AIO case, since the IO is already
3585 * completed, we could do the conversion right here
3586 */
3587 err = ext4_convert_unwritten_extents(NULL, inode,
3588 offset, ret);
3589 if (err < 0)
3590 ret = err;
3591 ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
3592 }
3593
3594 inode_dio_end(inode);
3595 /* take i_mutex locking again if we do a ovewrite dio */
3596 if (overwrite)
3597 inode_lock(inode);
3598
3599 if (ret < 0 && final_size > inode->i_size)
3600 ext4_truncate_failed_write(inode);
3601
3602 /* Handle extending of i_size after direct IO write */
3603 if (orphan) {
3604 int err;
3605
3606 /* Credits for sb + inode write */
3607 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
3608 if (IS_ERR(handle)) {
3609 /* This is really bad luck. We've written the data
3610 * but cannot extend i_size. Bail out and pretend
3611 * the write failed... */
3612 ret = PTR_ERR(handle);
3613 if (inode->i_nlink)
3614 ext4_orphan_del(NULL, inode);
3615
3616 goto out;
3617 }
3618 if (inode->i_nlink)
3619 ext4_orphan_del(handle, inode);
3620 if (ret > 0) {
3621 loff_t end = offset + ret;
3622 if (end > inode->i_size) {
3623 ei->i_disksize = end;
3624 i_size_write(inode, end);
3625 /*
3626 * We're going to return a positive `ret'
3627 * here due to non-zero-length I/O, so there's
3628 * no way of reporting error returns from
3629 * ext4_mark_inode_dirty() to userspace. So
3630 * ignore it.
3631 */
3632 ext4_mark_inode_dirty(handle, inode);
3633 }
3634 }
3635 err = ext4_journal_stop(handle);
3636 if (ret == 0)
3637 ret = err;
3638 }
3639out:
3640 return ret;
3641}
3642
3643static ssize_t ext4_direct_IO_read(struct kiocb *iocb, struct iov_iter *iter)
3644{
3645 struct address_space *mapping = iocb->ki_filp->f_mapping;
3646 struct inode *inode = mapping->host;
3647 size_t count = iov_iter_count(iter);
3648 ssize_t ret;
3649
3650 /*
3651 * Shared inode_lock is enough for us - it protects against concurrent
3652 * writes & truncates and since we take care of writing back page cache,
3653 * we are protected against page writeback as well.
3654 */
3655 inode_lock_shared(inode);
3656 ret = filemap_write_and_wait_range(mapping, iocb->ki_pos,
3657 iocb->ki_pos + count);
3658 if (ret)
3659 goto out_unlock;
3660 ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
3661 iter, ext4_dio_get_block, NULL, NULL, 0);
3662out_unlock:
3663 inode_unlock_shared(inode);
3664 return ret;
3665}
3666
3667static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
3668{
3669 struct file *file = iocb->ki_filp;
3670 struct inode *inode = file->f_mapping->host;
3671 size_t count = iov_iter_count(iter);
3672 loff_t offset = iocb->ki_pos;
3673 ssize_t ret;
3674
3675#ifdef CONFIG_EXT4_FS_ENCRYPTION
3676 if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode))
3677 return 0;
3678#endif
3679
3680 /*
3681 * If we are doing data journalling we don't support O_DIRECT
3682 */
3683 if (ext4_should_journal_data(inode))
3684 return 0;
3685
3686 /* Let buffer I/O handle the inline data case. */
3687 if (ext4_has_inline_data(inode))
3688 return 0;
3689
3690 /* DAX uses iomap path now */
3691 if (WARN_ON_ONCE(IS_DAX(inode)))
3692 return 0;
3693
3694 trace_ext4_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
3695 if (iov_iter_rw(iter) == READ)
3696 ret = ext4_direct_IO_read(iocb, iter);
3697 else
3698 ret = ext4_direct_IO_write(iocb, iter);
3699 trace_ext4_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), ret);
3700 return ret;
3701}
3702
3703/*
3704 * Pages can be marked dirty completely asynchronously from ext4's journalling
3705 * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do
3706 * much here because ->set_page_dirty is called under VFS locks. The page is
3707 * not necessarily locked.
3708 *
3709 * We cannot just dirty the page and leave attached buffers clean, because the
3710 * buffers' dirty state is "definitive". We cannot just set the buffers dirty
3711 * or jbddirty because all the journalling code will explode.
3712 *
3713 * So what we do is to mark the page "pending dirty" and next time writepage
3714 * is called, propagate that into the buffers appropriately.
3715 */
3716static int ext4_journalled_set_page_dirty(struct page *page)
3717{
3718 SetPageChecked(page);
3719 return __set_page_dirty_nobuffers(page);
3720}
3721
3722static int ext4_set_page_dirty(struct page *page)
3723{
3724 WARN_ON_ONCE(!PageLocked(page) && !PageDirty(page));
3725 WARN_ON_ONCE(!page_has_buffers(page));
3726 return __set_page_dirty_buffers(page);
3727}
3728
3729static const struct address_space_operations ext4_aops = {
3730 .readpage = ext4_readpage,
3731 .readpages = ext4_readpages,
3732 .writepage = ext4_writepage,
3733 .writepages = ext4_writepages,
3734 .write_begin = ext4_write_begin,
3735 .write_end = ext4_write_end,
3736 .set_page_dirty = ext4_set_page_dirty,
3737 .bmap = ext4_bmap,
3738 .invalidatepage = ext4_invalidatepage,
3739 .releasepage = ext4_releasepage,
3740 .direct_IO = ext4_direct_IO,
3741 .migratepage = buffer_migrate_page,
3742 .is_partially_uptodate = block_is_partially_uptodate,
3743 .error_remove_page = generic_error_remove_page,
3744};
3745
3746static const struct address_space_operations ext4_journalled_aops = {
3747 .readpage = ext4_readpage,
3748 .readpages = ext4_readpages,
3749 .writepage = ext4_writepage,
3750 .writepages = ext4_writepages,
3751 .write_begin = ext4_write_begin,
3752 .write_end = ext4_journalled_write_end,
3753 .set_page_dirty = ext4_journalled_set_page_dirty,
3754 .bmap = ext4_bmap,
3755 .invalidatepage = ext4_journalled_invalidatepage,
3756 .releasepage = ext4_releasepage,
3757 .direct_IO = ext4_direct_IO,
3758 .is_partially_uptodate = block_is_partially_uptodate,
3759 .error_remove_page = generic_error_remove_page,
3760};
3761
3762static const struct address_space_operations ext4_da_aops = {
3763 .readpage = ext4_readpage,
3764 .readpages = ext4_readpages,
3765 .writepage = ext4_writepage,
3766 .writepages = ext4_writepages,
3767 .write_begin = ext4_da_write_begin,
3768 .write_end = ext4_da_write_end,
3769 .set_page_dirty = ext4_set_page_dirty,
3770 .bmap = ext4_bmap,
3771 .invalidatepage = ext4_da_invalidatepage,
3772 .releasepage = ext4_releasepage,
3773 .direct_IO = ext4_direct_IO,
3774 .migratepage = buffer_migrate_page,
3775 .is_partially_uptodate = block_is_partially_uptodate,
3776 .error_remove_page = generic_error_remove_page,
3777};
3778
3779void ext4_set_aops(struct inode *inode)
3780{
3781 switch (ext4_inode_journal_mode(inode)) {
3782 case EXT4_INODE_ORDERED_DATA_MODE:
3783 case EXT4_INODE_WRITEBACK_DATA_MODE:
3784 break;
3785 case EXT4_INODE_JOURNAL_DATA_MODE:
3786 inode->i_mapping->a_ops = &ext4_journalled_aops;
3787 return;
3788 default:
3789 BUG();
3790 }
3791 if (test_opt(inode->i_sb, DELALLOC))
3792 inode->i_mapping->a_ops = &ext4_da_aops;
3793 else
3794 inode->i_mapping->a_ops = &ext4_aops;
3795}
3796
3797static int __ext4_block_zero_page_range(handle_t *handle,
3798 struct address_space *mapping, loff_t from, loff_t length)
3799{
3800 ext4_fsblk_t index = from >> PAGE_SHIFT;
3801 unsigned offset = from & (PAGE_SIZE-1);
3802 unsigned blocksize, pos;
3803 ext4_lblk_t iblock;
3804 struct inode *inode = mapping->host;
3805 struct buffer_head *bh;
3806 struct page *page;
3807 int err = 0;
3808
3809 page = find_or_create_page(mapping, from >> PAGE_SHIFT,
3810 mapping_gfp_constraint(mapping, ~__GFP_FS));
3811 if (!page)
3812 return -ENOMEM;
3813
3814 blocksize = inode->i_sb->s_blocksize;
3815
3816 iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
3817
3818 if (!page_has_buffers(page))
3819 create_empty_buffers(page, blocksize, 0);
3820
3821 /* Find the buffer that contains "offset" */
3822 bh = page_buffers(page);
3823 pos = blocksize;
3824 while (offset >= pos) {
3825 bh = bh->b_this_page;
3826 iblock++;
3827 pos += blocksize;
3828 }
3829 if (buffer_freed(bh)) {
3830 BUFFER_TRACE(bh, "freed: skip");
3831 goto unlock;
3832 }
3833 if (!buffer_mapped(bh)) {
3834 BUFFER_TRACE(bh, "unmapped");
3835 ext4_get_block(inode, iblock, bh, 0);
3836 /* unmapped? It's a hole - nothing to do */
3837 if (!buffer_mapped(bh)) {
3838 BUFFER_TRACE(bh, "still unmapped");
3839 goto unlock;
3840 }
3841 }
3842
3843 /* Ok, it's mapped. Make sure it's up-to-date */
3844 if (PageUptodate(page))
3845 set_buffer_uptodate(bh);
3846
3847 if (!buffer_uptodate(bh)) {
3848 err = -EIO;
3849 ll_rw_block(REQ_OP_READ, 0, 1, &bh);
3850 wait_on_buffer(bh);
3851 /* Uhhuh. Read error. Complain and punt. */
3852 if (!buffer_uptodate(bh))
3853 goto unlock;
3854 if (S_ISREG(inode->i_mode) &&
3855 ext4_encrypted_inode(inode)) {
3856 /* We expect the key to be set. */
3857 BUG_ON(!fscrypt_has_encryption_key(inode));
3858 BUG_ON(blocksize != PAGE_SIZE);
3859 WARN_ON_ONCE(fscrypt_decrypt_page(page->mapping->host,
3860 page, PAGE_SIZE, 0, page->index));
3861 }
3862 }
3863 if (ext4_should_journal_data(inode)) {
3864 BUFFER_TRACE(bh, "get write access");
3865 err = ext4_journal_get_write_access(handle, bh);
3866 if (err)
3867 goto unlock;
3868 }
3869 zero_user(page, offset, length);
3870 BUFFER_TRACE(bh, "zeroed end of block");
3871
3872 if (ext4_should_journal_data(inode)) {
3873 err = ext4_handle_dirty_metadata(handle, inode, bh);
3874 } else {
3875 err = 0;
3876 mark_buffer_dirty(bh);
3877 if (ext4_should_order_data(inode))
3878 err = ext4_jbd2_inode_add_write(handle, inode);
3879 }
3880
3881unlock:
3882 unlock_page(page);
3883 put_page(page);
3884 return err;
3885}
3886
3887/*
3888 * ext4_block_zero_page_range() zeros out a mapping of length 'length'
3889 * starting from file offset 'from'. The range to be zero'd must
3890 * be contained with in one block. If the specified range exceeds
3891 * the end of the block it will be shortened to end of the block
3892 * that cooresponds to 'from'
3893 */
3894static int ext4_block_zero_page_range(handle_t *handle,
3895 struct address_space *mapping, loff_t from, loff_t length)
3896{
3897 struct inode *inode = mapping->host;
3898 unsigned offset = from & (PAGE_SIZE-1);
3899 unsigned blocksize = inode->i_sb->s_blocksize;
3900 unsigned max = blocksize - (offset & (blocksize - 1));
3901
3902 /*
3903 * correct length if it does not fall between
3904 * 'from' and the end of the block
3905 */
3906 if (length > max || length < 0)
3907 length = max;
3908
3909 if (IS_DAX(inode)) {
3910 return iomap_zero_range(inode, from, length, NULL,
3911 &ext4_iomap_ops);
3912 }
3913 return __ext4_block_zero_page_range(handle, mapping, from, length);
3914}
3915
3916/*
3917 * ext4_block_truncate_page() zeroes out a mapping from file offset `from'
3918 * up to the end of the block which corresponds to `from'.
3919 * This required during truncate. We need to physically zero the tail end
3920 * of that block so it doesn't yield old data if the file is later grown.
3921 */
3922static int ext4_block_truncate_page(handle_t *handle,
3923 struct address_space *mapping, loff_t from)
3924{
3925 unsigned offset = from & (PAGE_SIZE-1);
3926 unsigned length;
3927 unsigned blocksize;
3928 struct inode *inode = mapping->host;
3929
3930 /* If we are processing an encrypted inode during orphan list handling */
3931 if (ext4_encrypted_inode(inode) && !fscrypt_has_encryption_key(inode))
3932 return 0;
3933
3934 blocksize = inode->i_sb->s_blocksize;
3935 length = blocksize - (offset & (blocksize - 1));
3936
3937 return ext4_block_zero_page_range(handle, mapping, from, length);
3938}
3939
3940int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode,
3941 loff_t lstart, loff_t length)
3942{
3943 struct super_block *sb = inode->i_sb;
3944 struct address_space *mapping = inode->i_mapping;
3945 unsigned partial_start, partial_end;
3946 ext4_fsblk_t start, end;
3947 loff_t byte_end = (lstart + length - 1);
3948 int err = 0;
3949
3950 partial_start = lstart & (sb->s_blocksize - 1);
3951 partial_end = byte_end & (sb->s_blocksize - 1);
3952
3953 start = lstart >> sb->s_blocksize_bits;
3954 end = byte_end >> sb->s_blocksize_bits;
3955
3956 /* Handle partial zero within the single block */
3957 if (start == end &&
3958 (partial_start || (partial_end != sb->s_blocksize - 1))) {
3959 err = ext4_block_zero_page_range(handle, mapping,
3960 lstart, length);
3961 return err;
3962 }
3963 /* Handle partial zero out on the start of the range */
3964 if (partial_start) {
3965 err = ext4_block_zero_page_range(handle, mapping,
3966 lstart, sb->s_blocksize);
3967 if (err)
3968 return err;
3969 }
3970 /* Handle partial zero out on the end of the range */
3971 if (partial_end != sb->s_blocksize - 1)
3972 err = ext4_block_zero_page_range(handle, mapping,
3973 byte_end - partial_end,
3974 partial_end + 1);
3975 return err;
3976}
3977
3978int ext4_can_truncate(struct inode *inode)
3979{
3980 if (S_ISREG(inode->i_mode))
3981 return 1;
3982 if (S_ISDIR(inode->i_mode))
3983 return 1;
3984 if (S_ISLNK(inode->i_mode))
3985 return !ext4_inode_is_fast_symlink(inode);
3986 return 0;
3987}
3988
3989/*
3990 * We have to make sure i_disksize gets properly updated before we truncate
3991 * page cache due to hole punching or zero range. Otherwise i_disksize update
3992 * can get lost as it may have been postponed to submission of writeback but
3993 * that will never happen after we truncate page cache.
3994 */
3995int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
3996 loff_t len)
3997{
3998 handle_t *handle;
3999 loff_t size = i_size_read(inode);
4000
4001 WARN_ON(!inode_is_locked(inode));
4002 if (offset > size || offset + len < size)
4003 return 0;
4004
4005 if (EXT4_I(inode)->i_disksize >= size)
4006 return 0;
4007
4008 handle = ext4_journal_start(inode, EXT4_HT_MISC, 1);
4009 if (IS_ERR(handle))
4010 return PTR_ERR(handle);
4011 ext4_update_i_disksize(inode, size);
4012 ext4_mark_inode_dirty(handle, inode);
4013 ext4_journal_stop(handle);
4014
4015 return 0;
4016}
4017
4018/*
4019 * ext4_punch_hole: punches a hole in a file by releasing the blocks
4020 * associated with the given offset and length
4021 *
4022 * @inode: File inode
4023 * @offset: The offset where the hole will begin
4024 * @len: The length of the hole
4025 *
4026 * Returns: 0 on success or negative on failure
4027 */
4028
4029int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
4030{
4031 struct super_block *sb = inode->i_sb;
4032 ext4_lblk_t first_block, stop_block;
4033 struct address_space *mapping = inode->i_mapping;
4034 loff_t first_block_offset, last_block_offset;
4035 handle_t *handle;
4036 unsigned int credits;
4037 int ret = 0;
4038
4039 if (!S_ISREG(inode->i_mode))
4040 return -EOPNOTSUPP;
4041
4042 trace_ext4_punch_hole(inode, offset, length, 0);
4043
4044 /*
4045 * Write out all dirty pages to avoid race conditions
4046 * Then release them.
4047 */
4048 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
4049 ret = filemap_write_and_wait_range(mapping, offset,
4050 offset + length - 1);
4051 if (ret)
4052 return ret;
4053 }
4054
4055 inode_lock(inode);
4056
4057 /* No need to punch hole beyond i_size */
4058 if (offset >= inode->i_size)
4059 goto out_mutex;
4060
4061 /*
4062 * If the hole extends beyond i_size, set the hole
4063 * to end after the page that contains i_size
4064 */
4065 if (offset + length > inode->i_size) {
4066 length = inode->i_size +
4067 PAGE_SIZE - (inode->i_size & (PAGE_SIZE - 1)) -
4068 offset;
4069 }
4070
4071 if (offset & (sb->s_blocksize - 1) ||
4072 (offset + length) & (sb->s_blocksize - 1)) {
4073 /*
4074 * Attach jinode to inode for jbd2 if we do any zeroing of
4075 * partial block
4076 */
4077 ret = ext4_inode_attach_jinode(inode);
4078 if (ret < 0)
4079 goto out_mutex;
4080
4081 }
4082
4083 /* Wait all existing dio workers, newcomers will block on i_mutex */
4084 ext4_inode_block_unlocked_dio(inode);
4085 inode_dio_wait(inode);
4086
4087 /*
4088 * Prevent page faults from reinstantiating pages we have released from
4089 * page cache.
4090 */
4091 down_write(&EXT4_I(inode)->i_mmap_sem);
4092 first_block_offset = round_up(offset, sb->s_blocksize);
4093 last_block_offset = round_down((offset + length), sb->s_blocksize) - 1;
4094
4095 /* Now release the pages and zero block aligned part of pages*/
4096 if (last_block_offset > first_block_offset) {
4097 ret = ext4_update_disksize_before_punch(inode, offset, length);
4098 if (ret)
4099 goto out_dio;
4100 truncate_pagecache_range(inode, first_block_offset,
4101 last_block_offset);
4102 }
4103
4104 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4105 credits = ext4_writepage_trans_blocks(inode);
4106 else
4107 credits = ext4_blocks_for_truncate(inode);
4108 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
4109 if (IS_ERR(handle)) {
4110 ret = PTR_ERR(handle);
4111 ext4_std_error(sb, ret);
4112 goto out_dio;
4113 }
4114
4115 ret = ext4_zero_partial_blocks(handle, inode, offset,
4116 length);
4117 if (ret)
4118 goto out_stop;
4119
4120 first_block = (offset + sb->s_blocksize - 1) >>
4121 EXT4_BLOCK_SIZE_BITS(sb);
4122 stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
4123
4124 /* If there are no blocks to remove, return now */
4125 if (first_block >= stop_block)
4126 goto out_stop;
4127
4128 down_write(&EXT4_I(inode)->i_data_sem);
4129 ext4_discard_preallocations(inode);
4130
4131 ret = ext4_es_remove_extent(inode, first_block,
4132 stop_block - first_block);
4133 if (ret) {
4134 up_write(&EXT4_I(inode)->i_data_sem);
4135 goto out_stop;
4136 }
4137
4138 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4139 ret = ext4_ext_remove_space(inode, first_block,
4140 stop_block - 1);
4141 else
4142 ret = ext4_ind_remove_space(handle, inode, first_block,
4143 stop_block);
4144
4145 up_write(&EXT4_I(inode)->i_data_sem);
4146 if (IS_SYNC(inode))
4147 ext4_handle_sync(handle);
4148
4149 inode->i_mtime = inode->i_ctime = current_time(inode);
4150 ext4_mark_inode_dirty(handle, inode);
4151out_stop:
4152 ext4_journal_stop(handle);
4153out_dio:
4154 up_write(&EXT4_I(inode)->i_mmap_sem);
4155 ext4_inode_resume_unlocked_dio(inode);
4156out_mutex:
4157 inode_unlock(inode);
4158 return ret;
4159}
4160
4161int ext4_inode_attach_jinode(struct inode *inode)
4162{
4163 struct ext4_inode_info *ei = EXT4_I(inode);
4164 struct jbd2_inode *jinode;
4165
4166 if (ei->jinode || !EXT4_SB(inode->i_sb)->s_journal)
4167 return 0;
4168
4169 jinode = jbd2_alloc_inode(GFP_KERNEL);
4170 spin_lock(&inode->i_lock);
4171 if (!ei->jinode) {
4172 if (!jinode) {
4173 spin_unlock(&inode->i_lock);
4174 return -ENOMEM;
4175 }
4176 ei->jinode = jinode;
4177 jbd2_journal_init_jbd_inode(ei->jinode, inode);
4178 jinode = NULL;
4179 }
4180 spin_unlock(&inode->i_lock);
4181 if (unlikely(jinode != NULL))
4182 jbd2_free_inode(jinode);
4183 return 0;
4184}
4185
4186/*
4187 * ext4_truncate()
4188 *
4189 * We block out ext4_get_block() block instantiations across the entire
4190 * transaction, and VFS/VM ensures that ext4_truncate() cannot run
4191 * simultaneously on behalf of the same inode.
4192 *
4193 * As we work through the truncate and commit bits of it to the journal there
4194 * is one core, guiding principle: the file's tree must always be consistent on
4195 * disk. We must be able to restart the truncate after a crash.
4196 *
4197 * The file's tree may be transiently inconsistent in memory (although it
4198 * probably isn't), but whenever we close off and commit a journal transaction,
4199 * the contents of (the filesystem + the journal) must be consistent and
4200 * restartable. It's pretty simple, really: bottom up, right to left (although
4201 * left-to-right works OK too).
4202 *
4203 * Note that at recovery time, journal replay occurs *before* the restart of
4204 * truncate against the orphan inode list.
4205 *
4206 * The committed inode has the new, desired i_size (which is the same as
4207 * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see
4208 * that this inode's truncate did not complete and it will again call
4209 * ext4_truncate() to have another go. So there will be instantiated blocks
4210 * to the right of the truncation point in a crashed ext4 filesystem. But
4211 * that's fine - as long as they are linked from the inode, the post-crash
4212 * ext4_truncate() run will find them and release them.
4213 */
4214int ext4_truncate(struct inode *inode)
4215{
4216 struct ext4_inode_info *ei = EXT4_I(inode);
4217 unsigned int credits;
4218 int err = 0;
4219 handle_t *handle;
4220 struct address_space *mapping = inode->i_mapping;
4221
4222 /*
4223 * There is a possibility that we're either freeing the inode
4224 * or it's a completely new inode. In those cases we might not
4225 * have i_mutex locked because it's not necessary.
4226 */
4227 if (!(inode->i_state & (I_NEW|I_FREEING)))
4228 WARN_ON(!inode_is_locked(inode));
4229 trace_ext4_truncate_enter(inode);
4230
4231 if (!ext4_can_truncate(inode))
4232 return 0;
4233
4234 ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
4235
4236 if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
4237 ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
4238
4239 if (ext4_has_inline_data(inode)) {
4240 int has_inline = 1;
4241
4242 ext4_inline_data_truncate(inode, &has_inline);
4243 if (has_inline)
4244 return 0;
4245 }
4246
4247 /* If we zero-out tail of the page, we have to create jinode for jbd2 */
4248 if (inode->i_size & (inode->i_sb->s_blocksize - 1)) {
4249 if (ext4_inode_attach_jinode(inode) < 0)
4250 return 0;
4251 }
4252
4253 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4254 credits = ext4_writepage_trans_blocks(inode);
4255 else
4256 credits = ext4_blocks_for_truncate(inode);
4257
4258 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
4259 if (IS_ERR(handle))
4260 return PTR_ERR(handle);
4261
4262 if (inode->i_size & (inode->i_sb->s_blocksize - 1))
4263 ext4_block_truncate_page(handle, mapping, inode->i_size);
4264
4265 /*
4266 * We add the inode to the orphan list, so that if this
4267 * truncate spans multiple transactions, and we crash, we will
4268 * resume the truncate when the filesystem recovers. It also
4269 * marks the inode dirty, to catch the new size.
4270 *
4271 * Implication: the file must always be in a sane, consistent
4272 * truncatable state while each transaction commits.
4273 */
4274 err = ext4_orphan_add(handle, inode);
4275 if (err)
4276 goto out_stop;
4277
4278 down_write(&EXT4_I(inode)->i_data_sem);
4279
4280 ext4_discard_preallocations(inode);
4281
4282 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4283 err = ext4_ext_truncate(handle, inode);
4284 else
4285 ext4_ind_truncate(handle, inode);
4286
4287 up_write(&ei->i_data_sem);
4288 if (err)
4289 goto out_stop;
4290
4291 if (IS_SYNC(inode))
4292 ext4_handle_sync(handle);
4293
4294out_stop:
4295 /*
4296 * If this was a simple ftruncate() and the file will remain alive,
4297 * then we need to clear up the orphan record which we created above.
4298 * However, if this was a real unlink then we were called by
4299 * ext4_evict_inode(), and we allow that function to clean up the
4300 * orphan info for us.
4301 */
4302 if (inode->i_nlink)
4303 ext4_orphan_del(handle, inode);
4304
4305 inode->i_mtime = inode->i_ctime = current_time(inode);
4306 ext4_mark_inode_dirty(handle, inode);
4307 ext4_journal_stop(handle);
4308
4309 trace_ext4_truncate_exit(inode);
4310 return err;
4311}
4312
4313/*
4314 * ext4_get_inode_loc returns with an extra refcount against the inode's
4315 * underlying buffer_head on success. If 'in_mem' is true, we have all
4316 * data in memory that is needed to recreate the on-disk version of this
4317 * inode.
4318 */
4319static int __ext4_get_inode_loc(struct inode *inode,
4320 struct ext4_iloc *iloc, int in_mem)
4321{
4322 struct ext4_group_desc *gdp;
4323 struct buffer_head *bh;
4324 struct super_block *sb = inode->i_sb;
4325 ext4_fsblk_t block;
4326 int inodes_per_block, inode_offset;
4327
4328 iloc->bh = NULL;
4329 if (!ext4_valid_inum(sb, inode->i_ino))
4330 return -EFSCORRUPTED;
4331
4332 iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb);
4333 gdp = ext4_get_group_desc(sb, iloc->block_group, NULL);
4334 if (!gdp)
4335 return -EIO;
4336
4337 /*
4338 * Figure out the offset within the block group inode table
4339 */
4340 inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
4341 inode_offset = ((inode->i_ino - 1) %
4342 EXT4_INODES_PER_GROUP(sb));
4343 block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block);
4344 iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb);
4345
4346 bh = sb_getblk(sb, block);
4347 if (unlikely(!bh))
4348 return -ENOMEM;
4349 if (!buffer_uptodate(bh)) {
4350 lock_buffer(bh);
4351
4352 /*
4353 * If the buffer has the write error flag, we have failed
4354 * to write out another inode in the same block. In this
4355 * case, we don't have to read the block because we may
4356 * read the old inode data successfully.
4357 */
4358 if (buffer_write_io_error(bh) && !buffer_uptodate(bh))
4359 set_buffer_uptodate(bh);
4360
4361 if (buffer_uptodate(bh)) {
4362 /* someone brought it uptodate while we waited */
4363 unlock_buffer(bh);
4364 goto has_buffer;
4365 }
4366
4367 /*
4368 * If we have all information of the inode in memory and this
4369 * is the only valid inode in the block, we need not read the
4370 * block.
4371 */
4372 if (in_mem) {
4373 struct buffer_head *bitmap_bh;
4374 int i, start;
4375
4376 start = inode_offset & ~(inodes_per_block - 1);
4377
4378 /* Is the inode bitmap in cache? */
4379 bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp));
4380 if (unlikely(!bitmap_bh))
4381 goto make_io;
4382
4383 /*
4384 * If the inode bitmap isn't in cache then the
4385 * optimisation may end up performing two reads instead
4386 * of one, so skip it.
4387 */
4388 if (!buffer_uptodate(bitmap_bh)) {
4389 brelse(bitmap_bh);
4390 goto make_io;
4391 }
4392 for (i = start; i < start + inodes_per_block; i++) {
4393 if (i == inode_offset)
4394 continue;
4395 if (ext4_test_bit(i, bitmap_bh->b_data))
4396 break;
4397 }
4398 brelse(bitmap_bh);
4399 if (i == start + inodes_per_block) {
4400 /* all other inodes are free, so skip I/O */
4401 memset(bh->b_data, 0, bh->b_size);
4402 set_buffer_uptodate(bh);
4403 unlock_buffer(bh);
4404 goto has_buffer;
4405 }
4406 }
4407
4408make_io:
4409 /*
4410 * If we need to do any I/O, try to pre-readahead extra
4411 * blocks from the inode table.
4412 */
4413 if (EXT4_SB(sb)->s_inode_readahead_blks) {
4414 ext4_fsblk_t b, end, table;
4415 unsigned num;
4416 __u32 ra_blks = EXT4_SB(sb)->s_inode_readahead_blks;
4417
4418 table = ext4_inode_table(sb, gdp);
4419 /* s_inode_readahead_blks is always a power of 2 */
4420 b = block & ~((ext4_fsblk_t) ra_blks - 1);
4421 if (table > b)
4422 b = table;
4423 end = b + ra_blks;
4424 num = EXT4_INODES_PER_GROUP(sb);
4425 if (ext4_has_group_desc_csum(sb))
4426 num -= ext4_itable_unused_count(sb, gdp);
4427 table += num / inodes_per_block;
4428 if (end > table)
4429 end = table;
4430 while (b <= end)
4431 sb_breadahead(sb, b++);
4432 }
4433
4434 /*
4435 * There are other valid inodes in the buffer, this inode
4436 * has in-inode xattrs, or we don't have this inode in memory.
4437 * Read the block from disk.
4438 */
4439 trace_ext4_load_inode(inode);
4440 get_bh(bh);
4441 bh->b_end_io = end_buffer_read_sync;
4442 submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, bh);
4443 wait_on_buffer(bh);
4444 if (!buffer_uptodate(bh)) {
4445 EXT4_ERROR_INODE_BLOCK(inode, block,
4446 "unable to read itable block");
4447 brelse(bh);
4448 return -EIO;
4449 }
4450 }
4451has_buffer:
4452 iloc->bh = bh;
4453 return 0;
4454}
4455
4456int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
4457{
4458 /* We have all inode data except xattrs in memory here. */
4459 return __ext4_get_inode_loc(inode, iloc,
4460 !ext4_test_inode_state(inode, EXT4_STATE_XATTR));
4461}
4462
4463void ext4_set_inode_flags(struct inode *inode)
4464{
4465 unsigned int flags = EXT4_I(inode)->i_flags;
4466 unsigned int new_fl = 0;
4467
4468 if (flags & EXT4_SYNC_FL)
4469 new_fl |= S_SYNC;
4470 if (flags & EXT4_APPEND_FL)
4471 new_fl |= S_APPEND;
4472 if (flags & EXT4_IMMUTABLE_FL)
4473 new_fl |= S_IMMUTABLE;
4474 if (flags & EXT4_NOATIME_FL)
4475 new_fl |= S_NOATIME;
4476 if (flags & EXT4_DIRSYNC_FL)
4477 new_fl |= S_DIRSYNC;
4478 if (test_opt(inode->i_sb, DAX) && S_ISREG(inode->i_mode) &&
4479 !ext4_should_journal_data(inode) && !ext4_has_inline_data(inode) &&
4480 !ext4_encrypted_inode(inode))
4481 new_fl |= S_DAX;
4482 inode_set_flags(inode, new_fl,
4483 S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_DAX);
4484}
4485
4486/* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
4487void ext4_get_inode_flags(struct ext4_inode_info *ei)
4488{
4489 unsigned int vfs_fl;
4490 unsigned long old_fl, new_fl;
4491
4492 do {
4493 vfs_fl = ei->vfs_inode.i_flags;
4494 old_fl = ei->i_flags;
4495 new_fl = old_fl & ~(EXT4_SYNC_FL|EXT4_APPEND_FL|
4496 EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|
4497 EXT4_DIRSYNC_FL);
4498 if (vfs_fl & S_SYNC)
4499 new_fl |= EXT4_SYNC_FL;
4500 if (vfs_fl & S_APPEND)
4501 new_fl |= EXT4_APPEND_FL;
4502 if (vfs_fl & S_IMMUTABLE)
4503 new_fl |= EXT4_IMMUTABLE_FL;
4504 if (vfs_fl & S_NOATIME)
4505 new_fl |= EXT4_NOATIME_FL;
4506 if (vfs_fl & S_DIRSYNC)
4507 new_fl |= EXT4_DIRSYNC_FL;
4508 } while (cmpxchg(&ei->i_flags, old_fl, new_fl) != old_fl);
4509}
4510
4511static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
4512 struct ext4_inode_info *ei)
4513{
4514 blkcnt_t i_blocks ;
4515 struct inode *inode = &(ei->vfs_inode);
4516 struct super_block *sb = inode->i_sb;
4517
4518 if (ext4_has_feature_huge_file(sb)) {
4519 /* we are using combined 48 bit field */
4520 i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 |
4521 le32_to_cpu(raw_inode->i_blocks_lo);
4522 if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) {
4523 /* i_blocks represent file system block size */
4524 return i_blocks << (inode->i_blkbits - 9);
4525 } else {
4526 return i_blocks;
4527 }
4528 } else {
4529 return le32_to_cpu(raw_inode->i_blocks_lo);
4530 }
4531}
4532
4533static inline void ext4_iget_extra_inode(struct inode *inode,
4534 struct ext4_inode *raw_inode,
4535 struct ext4_inode_info *ei)
4536{
4537 __le32 *magic = (void *)raw_inode +
4538 EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize;
4539 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize + sizeof(__le32) <=
4540 EXT4_INODE_SIZE(inode->i_sb) &&
4541 *magic == cpu_to_le32(EXT4_XATTR_MAGIC)) {
4542 ext4_set_inode_state(inode, EXT4_STATE_XATTR);
4543 ext4_find_inline_data_nolock(inode);
4544 } else
4545 EXT4_I(inode)->i_inline_off = 0;
4546}
4547
4548int ext4_get_projid(struct inode *inode, kprojid_t *projid)
4549{
4550 if (!ext4_has_feature_project(inode->i_sb))
4551 return -EOPNOTSUPP;
4552 *projid = EXT4_I(inode)->i_projid;
4553 return 0;
4554}
4555
4556struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
4557{
4558 struct ext4_iloc iloc;
4559 struct ext4_inode *raw_inode;
4560 struct ext4_inode_info *ei;
4561 struct inode *inode;
4562 journal_t *journal = EXT4_SB(sb)->s_journal;
4563 long ret;
4564 loff_t size;
4565 int block;
4566 uid_t i_uid;
4567 gid_t i_gid;
4568 projid_t i_projid;
4569
4570 inode = iget_locked(sb, ino);
4571 if (!inode)
4572 return ERR_PTR(-ENOMEM);
4573 if (!(inode->i_state & I_NEW))
4574 return inode;
4575
4576 ei = EXT4_I(inode);
4577 iloc.bh = NULL;
4578
4579 ret = __ext4_get_inode_loc(inode, &iloc, 0);
4580 if (ret < 0)
4581 goto bad_inode;
4582 raw_inode = ext4_raw_inode(&iloc);
4583
4584 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4585 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
4586 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
4587 EXT4_INODE_SIZE(inode->i_sb) ||
4588 (ei->i_extra_isize & 3)) {
4589 EXT4_ERROR_INODE(inode,
4590 "bad extra_isize %u (inode size %u)",
4591 ei->i_extra_isize,
4592 EXT4_INODE_SIZE(inode->i_sb));
4593 ret = -EFSCORRUPTED;
4594 goto bad_inode;
4595 }
4596 } else
4597 ei->i_extra_isize = 0;
4598
4599 /* Precompute checksum seed for inode metadata */
4600 if (ext4_has_metadata_csum(sb)) {
4601 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4602 __u32 csum;
4603 __le32 inum = cpu_to_le32(inode->i_ino);
4604 __le32 gen = raw_inode->i_generation;
4605 csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
4606 sizeof(inum));
4607 ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
4608 sizeof(gen));
4609 }
4610
4611 if (!ext4_inode_csum_verify(inode, raw_inode, ei)) {
4612 EXT4_ERROR_INODE(inode, "checksum invalid");
4613 ret = -EFSBADCRC;
4614 goto bad_inode;
4615 }
4616
4617 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
4618 i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
4619 i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
4620 if (ext4_has_feature_project(sb) &&
4621 EXT4_INODE_SIZE(sb) > EXT4_GOOD_OLD_INODE_SIZE &&
4622 EXT4_FITS_IN_INODE(raw_inode, ei, i_projid))
4623 i_projid = (projid_t)le32_to_cpu(raw_inode->i_projid);
4624 else
4625 i_projid = EXT4_DEF_PROJID;
4626
4627 if (!(test_opt(inode->i_sb, NO_UID32))) {
4628 i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
4629 i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
4630 }
4631 i_uid_write(inode, i_uid);
4632 i_gid_write(inode, i_gid);
4633 ei->i_projid = make_kprojid(&init_user_ns, i_projid);
4634 set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
4635
4636 ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
4637 ei->i_inline_off = 0;
4638 ei->i_dir_start_lookup = 0;
4639 ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
4640 /* We now have enough fields to check if the inode was active or not.
4641 * This is needed because nfsd might try to access dead inodes
4642 * the test is that same one that e2fsck uses
4643 * NeilBrown 1999oct15
4644 */
4645 if (inode->i_nlink == 0) {
4646 if ((inode->i_mode == 0 ||
4647 !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) &&
4648 ino != EXT4_BOOT_LOADER_INO) {
4649 /* this inode is deleted */
4650 ret = -ESTALE;
4651 goto bad_inode;
4652 }
4653 /* The only unlinked inodes we let through here have
4654 * valid i_mode and are being read by the orphan
4655 * recovery code: that's fine, we're about to complete
4656 * the process of deleting those.
4657 * OR it is the EXT4_BOOT_LOADER_INO which is
4658 * not initialized on a new filesystem. */
4659 }
4660 ei->i_flags = le32_to_cpu(raw_inode->i_flags);
4661 inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
4662 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
4663 if (ext4_has_feature_64bit(sb))
4664 ei->i_file_acl |=
4665 ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
4666 inode->i_size = ext4_isize(raw_inode);
4667 if ((size = i_size_read(inode)) < 0) {
4668 EXT4_ERROR_INODE(inode, "bad i_size value: %lld", size);
4669 ret = -EFSCORRUPTED;
4670 goto bad_inode;
4671 }
4672 ei->i_disksize = inode->i_size;
4673#ifdef CONFIG_QUOTA
4674 ei->i_reserved_quota = 0;
4675#endif
4676 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
4677 ei->i_block_group = iloc.block_group;
4678 ei->i_last_alloc_group = ~0;
4679 /*
4680 * NOTE! The in-memory inode i_data array is in little-endian order
4681 * even on big-endian machines: we do NOT byteswap the block numbers!
4682 */
4683 for (block = 0; block < EXT4_N_BLOCKS; block++)
4684 ei->i_data[block] = raw_inode->i_block[block];
4685 INIT_LIST_HEAD(&ei->i_orphan);
4686
4687 /*
4688 * Set transaction id's of transactions that have to be committed
4689 * to finish f[data]sync. We set them to currently running transaction
4690 * as we cannot be sure that the inode or some of its metadata isn't
4691 * part of the transaction - the inode could have been reclaimed and
4692 * now it is reread from disk.
4693 */
4694 if (journal) {
4695 transaction_t *transaction;
4696 tid_t tid;
4697
4698 read_lock(&journal->j_state_lock);
4699 if (journal->j_running_transaction)
4700 transaction = journal->j_running_transaction;
4701 else
4702 transaction = journal->j_committing_transaction;
4703 if (transaction)
4704 tid = transaction->t_tid;
4705 else
4706 tid = journal->j_commit_sequence;
4707 read_unlock(&journal->j_state_lock);
4708 ei->i_sync_tid = tid;
4709 ei->i_datasync_tid = tid;
4710 }
4711
4712 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4713 if (ei->i_extra_isize == 0) {
4714 /* The extra space is currently unused. Use it. */
4715 BUILD_BUG_ON(sizeof(struct ext4_inode) & 3);
4716 ei->i_extra_isize = sizeof(struct ext4_inode) -
4717 EXT4_GOOD_OLD_INODE_SIZE;
4718 } else {
4719 ext4_iget_extra_inode(inode, raw_inode, ei);
4720 }
4721 }
4722
4723 EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
4724 EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
4725 EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
4726 EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
4727
4728 if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
4729 inode->i_version = le32_to_cpu(raw_inode->i_disk_version);
4730 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4731 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
4732 inode->i_version |=
4733 (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
4734 }
4735 }
4736
4737 ret = 0;
4738 if (ei->i_file_acl &&
4739 !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) {
4740 EXT4_ERROR_INODE(inode, "bad extended attribute block %llu",
4741 ei->i_file_acl);
4742 ret = -EFSCORRUPTED;
4743 goto bad_inode;
4744 } else if (!ext4_has_inline_data(inode)) {
4745 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
4746 if ((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
4747 (S_ISLNK(inode->i_mode) &&
4748 !ext4_inode_is_fast_symlink(inode))))
4749 /* Validate extent which is part of inode */
4750 ret = ext4_ext_check_inode(inode);
4751 } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
4752 (S_ISLNK(inode->i_mode) &&
4753 !ext4_inode_is_fast_symlink(inode))) {
4754 /* Validate block references which are part of inode */
4755 ret = ext4_ind_check_inode(inode);
4756 }
4757 }
4758 if (ret)
4759 goto bad_inode;
4760
4761 if (S_ISREG(inode->i_mode)) {
4762 inode->i_op = &ext4_file_inode_operations;
4763 inode->i_fop = &ext4_file_operations;
4764 ext4_set_aops(inode);
4765 } else if (S_ISDIR(inode->i_mode)) {
4766 inode->i_op = &ext4_dir_inode_operations;
4767 inode->i_fop = &ext4_dir_operations;
4768 } else if (S_ISLNK(inode->i_mode)) {
4769 if (ext4_encrypted_inode(inode)) {
4770 inode->i_op = &ext4_encrypted_symlink_inode_operations;
4771 ext4_set_aops(inode);
4772 } else if (ext4_inode_is_fast_symlink(inode)) {
4773 inode->i_link = (char *)ei->i_data;
4774 inode->i_op = &ext4_fast_symlink_inode_operations;
4775 nd_terminate_link(ei->i_data, inode->i_size,
4776 sizeof(ei->i_data) - 1);
4777 } else {
4778 inode->i_op = &ext4_symlink_inode_operations;
4779 ext4_set_aops(inode);
4780 }
4781 inode_nohighmem(inode);
4782 } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
4783 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
4784 inode->i_op = &ext4_special_inode_operations;
4785 if (raw_inode->i_block[0])
4786 init_special_inode(inode, inode->i_mode,
4787 old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
4788 else
4789 init_special_inode(inode, inode->i_mode,
4790 new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
4791 } else if (ino == EXT4_BOOT_LOADER_INO) {
4792 make_bad_inode(inode);
4793 } else {
4794 ret = -EFSCORRUPTED;
4795 EXT4_ERROR_INODE(inode, "bogus i_mode (%o)", inode->i_mode);
4796 goto bad_inode;
4797 }
4798 brelse(iloc.bh);
4799 ext4_set_inode_flags(inode);
4800 unlock_new_inode(inode);
4801 return inode;
4802
4803bad_inode:
4804 brelse(iloc.bh);
4805 iget_failed(inode);
4806 return ERR_PTR(ret);
4807}
4808
4809struct inode *ext4_iget_normal(struct super_block *sb, unsigned long ino)
4810{
4811 if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)
4812 return ERR_PTR(-EFSCORRUPTED);
4813 return ext4_iget(sb, ino);
4814}
4815
4816static int ext4_inode_blocks_set(handle_t *handle,
4817 struct ext4_inode *raw_inode,
4818 struct ext4_inode_info *ei)
4819{
4820 struct inode *inode = &(ei->vfs_inode);
4821 u64 i_blocks = inode->i_blocks;
4822 struct super_block *sb = inode->i_sb;
4823
4824 if (i_blocks <= ~0U) {
4825 /*
4826 * i_blocks can be represented in a 32 bit variable
4827 * as multiple of 512 bytes
4828 */
4829 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
4830 raw_inode->i_blocks_high = 0;
4831 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
4832 return 0;
4833 }
4834 if (!ext4_has_feature_huge_file(sb))
4835 return -EFBIG;
4836
4837 if (i_blocks <= 0xffffffffffffULL) {
4838 /*
4839 * i_blocks can be represented in a 48 bit variable
4840 * as multiple of 512 bytes
4841 */
4842 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
4843 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
4844 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
4845 } else {
4846 ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE);
4847 /* i_block is stored in file system block size */
4848 i_blocks = i_blocks >> (inode->i_blkbits - 9);
4849 raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
4850 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
4851 }
4852 return 0;
4853}
4854
4855struct other_inode {
4856 unsigned long orig_ino;
4857 struct ext4_inode *raw_inode;
4858};
4859
4860static int other_inode_match(struct inode * inode, unsigned long ino,
4861 void *data)
4862{
4863 struct other_inode *oi = (struct other_inode *) data;
4864
4865 if ((inode->i_ino != ino) ||
4866 (inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW |
4867 I_DIRTY_SYNC | I_DIRTY_DATASYNC)) ||
4868 ((inode->i_state & I_DIRTY_TIME) == 0))
4869 return 0;
4870 spin_lock(&inode->i_lock);
4871 if (((inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW |
4872 I_DIRTY_SYNC | I_DIRTY_DATASYNC)) == 0) &&
4873 (inode->i_state & I_DIRTY_TIME)) {
4874 struct ext4_inode_info *ei = EXT4_I(inode);
4875
4876 inode->i_state &= ~(I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED);
4877 spin_unlock(&inode->i_lock);
4878
4879 spin_lock(&ei->i_raw_lock);
4880 EXT4_INODE_SET_XTIME(i_ctime, inode, oi->raw_inode);
4881 EXT4_INODE_SET_XTIME(i_mtime, inode, oi->raw_inode);
4882 EXT4_INODE_SET_XTIME(i_atime, inode, oi->raw_inode);
4883 ext4_inode_csum_set(inode, oi->raw_inode, ei);
4884 spin_unlock(&ei->i_raw_lock);
4885 trace_ext4_other_inode_update_time(inode, oi->orig_ino);
4886 return -1;
4887 }
4888 spin_unlock(&inode->i_lock);
4889 return -1;
4890}
4891
4892/*
4893 * Opportunistically update the other time fields for other inodes in
4894 * the same inode table block.
4895 */
4896static void ext4_update_other_inodes_time(struct super_block *sb,
4897 unsigned long orig_ino, char *buf)
4898{
4899 struct other_inode oi;
4900 unsigned long ino;
4901 int i, inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
4902 int inode_size = EXT4_INODE_SIZE(sb);
4903
4904 oi.orig_ino = orig_ino;
4905 /*
4906 * Calculate the first inode in the inode table block. Inode
4907 * numbers are one-based. That is, the first inode in a block
4908 * (assuming 4k blocks and 256 byte inodes) is (n*16 + 1).
4909 */
4910 ino = ((orig_ino - 1) & ~(inodes_per_block - 1)) + 1;
4911 for (i = 0; i < inodes_per_block; i++, ino++, buf += inode_size) {
4912 if (ino == orig_ino)
4913 continue;
4914 oi.raw_inode = (struct ext4_inode *) buf;
4915 (void) find_inode_nowait(sb, ino, other_inode_match, &oi);
4916 }
4917}
4918
4919/*
4920 * Post the struct inode info into an on-disk inode location in the
4921 * buffer-cache. This gobbles the caller's reference to the
4922 * buffer_head in the inode location struct.
4923 *
4924 * The caller must have write access to iloc->bh.
4925 */
4926static int ext4_do_update_inode(handle_t *handle,
4927 struct inode *inode,
4928 struct ext4_iloc *iloc)
4929{
4930 struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
4931 struct ext4_inode_info *ei = EXT4_I(inode);
4932 struct buffer_head *bh = iloc->bh;
4933 struct super_block *sb = inode->i_sb;
4934 int err = 0, rc, block;
4935 int need_datasync = 0, set_large_file = 0;
4936 uid_t i_uid;
4937 gid_t i_gid;
4938 projid_t i_projid;
4939
4940 spin_lock(&ei->i_raw_lock);
4941
4942 /* For fields not tracked in the in-memory inode,
4943 * initialise them to zero for new inodes. */
4944 if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
4945 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
4946
4947 ext4_get_inode_flags(ei);
4948 raw_inode->i_mode = cpu_to_le16(inode->i_mode);
4949 i_uid = i_uid_read(inode);
4950 i_gid = i_gid_read(inode);
4951 i_projid = from_kprojid(&init_user_ns, ei->i_projid);
4952 if (!(test_opt(inode->i_sb, NO_UID32))) {
4953 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid));
4954 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid));
4955/*
4956 * Fix up interoperability with old kernels. Otherwise, old inodes get
4957 * re-used with the upper 16 bits of the uid/gid intact
4958 */
4959 if (ei->i_dtime && list_empty(&ei->i_orphan)) {
4960 raw_inode->i_uid_high = 0;
4961 raw_inode->i_gid_high = 0;
4962 } else {
4963 raw_inode->i_uid_high =
4964 cpu_to_le16(high_16_bits(i_uid));
4965 raw_inode->i_gid_high =
4966 cpu_to_le16(high_16_bits(i_gid));
4967 }
4968 } else {
4969 raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid));
4970 raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(i_gid));
4971 raw_inode->i_uid_high = 0;
4972 raw_inode->i_gid_high = 0;
4973 }
4974 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
4975
4976 EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
4977 EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
4978 EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
4979 EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
4980
4981 err = ext4_inode_blocks_set(handle, raw_inode, ei);
4982 if (err) {
4983 spin_unlock(&ei->i_raw_lock);
4984 goto out_brelse;
4985 }
4986 raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
4987 raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF);
4988 if (likely(!test_opt2(inode->i_sb, HURD_COMPAT)))
4989 raw_inode->i_file_acl_high =
4990 cpu_to_le16(ei->i_file_acl >> 32);
4991 raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
4992 if (ei->i_disksize != ext4_isize(raw_inode)) {
4993 ext4_isize_set(raw_inode, ei->i_disksize);
4994 need_datasync = 1;
4995 }
4996 if (ei->i_disksize > 0x7fffffffULL) {
4997 if (!ext4_has_feature_large_file(sb) ||
4998 EXT4_SB(sb)->s_es->s_rev_level ==
4999 cpu_to_le32(EXT4_GOOD_OLD_REV))
5000 set_large_file = 1;
5001 }
5002 raw_inode->i_generation = cpu_to_le32(inode->i_generation);
5003 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
5004 if (old_valid_dev(inode->i_rdev)) {
5005 raw_inode->i_block[0] =
5006 cpu_to_le32(old_encode_dev(inode->i_rdev));
5007 raw_inode->i_block[1] = 0;
5008 } else {
5009 raw_inode->i_block[0] = 0;
5010 raw_inode->i_block[1] =
5011 cpu_to_le32(new_encode_dev(inode->i_rdev));
5012 raw_inode->i_block[2] = 0;
5013 }
5014 } else if (!ext4_has_inline_data(inode)) {
5015 for (block = 0; block < EXT4_N_BLOCKS; block++)
5016 raw_inode->i_block[block] = ei->i_data[block];
5017 }
5018
5019 if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
5020 raw_inode->i_disk_version = cpu_to_le32(inode->i_version);
5021 if (ei->i_extra_isize) {
5022 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
5023 raw_inode->i_version_hi =
5024 cpu_to_le32(inode->i_version >> 32);
5025 raw_inode->i_extra_isize =
5026 cpu_to_le16(ei->i_extra_isize);
5027 }
5028 }
5029
5030 BUG_ON(!ext4_has_feature_project(inode->i_sb) &&
5031 i_projid != EXT4_DEF_PROJID);
5032
5033 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
5034 EXT4_FITS_IN_INODE(raw_inode, ei, i_projid))
5035 raw_inode->i_projid = cpu_to_le32(i_projid);
5036
5037 ext4_inode_csum_set(inode, raw_inode, ei);
5038 spin_unlock(&ei->i_raw_lock);
5039 if (inode->i_sb->s_flags & MS_LAZYTIME)
5040 ext4_update_other_inodes_time(inode->i_sb, inode->i_ino,
5041 bh->b_data);
5042
5043 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
5044 rc = ext4_handle_dirty_metadata(handle, NULL, bh);
5045 if (!err)
5046 err = rc;
5047 ext4_clear_inode_state(inode, EXT4_STATE_NEW);
5048 if (set_large_file) {
5049 BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get write access");
5050 err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
5051 if (err)
5052 goto out_brelse;
5053 ext4_update_dynamic_rev(sb);
5054 ext4_set_feature_large_file(sb);
5055 ext4_handle_sync(handle);
5056 err = ext4_handle_dirty_super(handle, sb);
5057 }
5058 ext4_update_inode_fsync_trans(handle, inode, need_datasync);
5059out_brelse:
5060 brelse(bh);
5061 ext4_std_error(inode->i_sb, err);
5062 return err;
5063}
5064
5065/*
5066 * ext4_write_inode()
5067 *
5068 * We are called from a few places:
5069 *
5070 * - Within generic_file_aio_write() -> generic_write_sync() for O_SYNC files.
5071 * Here, there will be no transaction running. We wait for any running
5072 * transaction to commit.
5073 *
5074 * - Within flush work (sys_sync(), kupdate and such).
5075 * We wait on commit, if told to.
5076 *
5077 * - Within iput_final() -> write_inode_now()
5078 * We wait on commit, if told to.
5079 *
5080 * In all cases it is actually safe for us to return without doing anything,
5081 * because the inode has been copied into a raw inode buffer in
5082 * ext4_mark_inode_dirty(). This is a correctness thing for WB_SYNC_ALL
5083 * writeback.
5084 *
5085 * Note that we are absolutely dependent upon all inode dirtiers doing the
5086 * right thing: they *must* call mark_inode_dirty() after dirtying info in
5087 * which we are interested.
5088 *
5089 * It would be a bug for them to not do this. The code:
5090 *
5091 * mark_inode_dirty(inode)
5092 * stuff();
5093 * inode->i_size = expr;
5094 *
5095 * is in error because write_inode() could occur while `stuff()' is running,
5096 * and the new i_size will be lost. Plus the inode will no longer be on the
5097 * superblock's dirty inode list.
5098 */
5099int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
5100{
5101 int err;
5102
5103 if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
5104 return 0;
5105
5106 if (EXT4_SB(inode->i_sb)->s_journal) {
5107 if (ext4_journal_current_handle()) {
5108 jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
5109 dump_stack();
5110 return -EIO;
5111 }
5112
5113 /*
5114 * No need to force transaction in WB_SYNC_NONE mode. Also
5115 * ext4_sync_fs() will force the commit after everything is
5116 * written.
5117 */
5118 if (wbc->sync_mode != WB_SYNC_ALL || wbc->for_sync)
5119 return 0;
5120
5121 err = ext4_force_commit(inode->i_sb);
5122 } else {
5123 struct ext4_iloc iloc;
5124
5125 err = __ext4_get_inode_loc(inode, &iloc, 0);
5126 if (err)
5127 return err;
5128 /*
5129 * sync(2) will flush the whole buffer cache. No need to do
5130 * it here separately for each inode.
5131 */
5132 if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
5133 sync_dirty_buffer(iloc.bh);
5134 if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
5135 EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr,
5136 "IO error syncing inode");
5137 err = -EIO;
5138 }
5139 brelse(iloc.bh);
5140 }
5141 return err;
5142}
5143
5144/*
5145 * In data=journal mode ext4_journalled_invalidatepage() may fail to invalidate
5146 * buffers that are attached to a page stradding i_size and are undergoing
5147 * commit. In that case we have to wait for commit to finish and try again.
5148 */
5149static void ext4_wait_for_tail_page_commit(struct inode *inode)
5150{
5151 struct page *page;
5152 unsigned offset;
5153 journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
5154 tid_t commit_tid = 0;
5155 int ret;
5156
5157 offset = inode->i_size & (PAGE_SIZE - 1);
5158 /*
5159 * All buffers in the last page remain valid? Then there's nothing to
5160 * do. We do the check mainly to optimize the common PAGE_SIZE ==
5161 * blocksize case
5162 */
5163 if (offset > PAGE_SIZE - (1 << inode->i_blkbits))
5164 return;
5165 while (1) {
5166 page = find_lock_page(inode->i_mapping,
5167 inode->i_size >> PAGE_SHIFT);
5168 if (!page)
5169 return;
5170 ret = __ext4_journalled_invalidatepage(page, offset,
5171 PAGE_SIZE - offset);
5172 unlock_page(page);
5173 put_page(page);
5174 if (ret != -EBUSY)
5175 return;
5176 commit_tid = 0;
5177 read_lock(&journal->j_state_lock);
5178 if (journal->j_committing_transaction)
5179 commit_tid = journal->j_committing_transaction->t_tid;
5180 read_unlock(&journal->j_state_lock);
5181 if (commit_tid)
5182 jbd2_log_wait_commit(journal, commit_tid);
5183 }
5184}
5185
5186/*
5187 * ext4_setattr()
5188 *
5189 * Called from notify_change.
5190 *
5191 * We want to trap VFS attempts to truncate the file as soon as
5192 * possible. In particular, we want to make sure that when the VFS
5193 * shrinks i_size, we put the inode on the orphan list and modify
5194 * i_disksize immediately, so that during the subsequent flushing of
5195 * dirty pages and freeing of disk blocks, we can guarantee that any
5196 * commit will leave the blocks being flushed in an unused state on
5197 * disk. (On recovery, the inode will get truncated and the blocks will
5198 * be freed, so we have a strong guarantee that no future commit will
5199 * leave these blocks visible to the user.)
5200 *
5201 * Another thing we have to assure is that if we are in ordered mode
5202 * and inode is still attached to the committing transaction, we must
5203 * we start writeout of all the dirty pages which are being truncated.
5204 * This way we are sure that all the data written in the previous
5205 * transaction are already on disk (truncate waits for pages under
5206 * writeback).
5207 *
5208 * Called with inode->i_mutex down.
5209 */
5210int ext4_setattr(struct dentry *dentry, struct iattr *attr)
5211{
5212 struct inode *inode = d_inode(dentry);
5213 int error, rc = 0;
5214 int orphan = 0;
5215 const unsigned int ia_valid = attr->ia_valid;
5216
5217 error = setattr_prepare(dentry, attr);
5218 if (error)
5219 return error;
5220
5221 if (is_quota_modification(inode, attr)) {
5222 error = dquot_initialize(inode);
5223 if (error)
5224 return error;
5225 }
5226 if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
5227 (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
5228 handle_t *handle;
5229
5230 /* (user+group)*(old+new) structure, inode write (sb,
5231 * inode block, ? - but truncate inode update has it) */
5232 handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
5233 (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb) +
5234 EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)) + 3);
5235 if (IS_ERR(handle)) {
5236 error = PTR_ERR(handle);
5237 goto err_out;
5238 }
5239 error = dquot_transfer(inode, attr);
5240 if (error) {
5241 ext4_journal_stop(handle);
5242 return error;
5243 }
5244 /* Update corresponding info in inode so that everything is in
5245 * one transaction */
5246 if (attr->ia_valid & ATTR_UID)
5247 inode->i_uid = attr->ia_uid;
5248 if (attr->ia_valid & ATTR_GID)
5249 inode->i_gid = attr->ia_gid;
5250 error = ext4_mark_inode_dirty(handle, inode);
5251 ext4_journal_stop(handle);
5252 }
5253
5254 if (attr->ia_valid & ATTR_SIZE) {
5255 handle_t *handle;
5256 loff_t oldsize = inode->i_size;
5257 int shrink = (attr->ia_size <= inode->i_size);
5258
5259 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
5260 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5261
5262 if (attr->ia_size > sbi->s_bitmap_maxbytes)
5263 return -EFBIG;
5264 }
5265 if (!S_ISREG(inode->i_mode))
5266 return -EINVAL;
5267
5268 if (IS_I_VERSION(inode) && attr->ia_size != inode->i_size)
5269 inode_inc_iversion(inode);
5270
5271 if (ext4_should_order_data(inode) &&
5272 (attr->ia_size < inode->i_size)) {
5273 error = ext4_begin_ordered_truncate(inode,
5274 attr->ia_size);
5275 if (error)
5276 goto err_out;
5277 }
5278 if (attr->ia_size != inode->i_size) {
5279 handle = ext4_journal_start(inode, EXT4_HT_INODE, 3);
5280 if (IS_ERR(handle)) {
5281 error = PTR_ERR(handle);
5282 goto err_out;
5283 }
5284 if (ext4_handle_valid(handle) && shrink) {
5285 error = ext4_orphan_add(handle, inode);
5286 orphan = 1;
5287 }
5288 /*
5289 * Update c/mtime on truncate up, ext4_truncate() will
5290 * update c/mtime in shrink case below
5291 */
5292 if (!shrink) {
5293 inode->i_mtime = current_time(inode);
5294 inode->i_ctime = inode->i_mtime;
5295 }
5296 down_write(&EXT4_I(inode)->i_data_sem);
5297 EXT4_I(inode)->i_disksize = attr->ia_size;
5298 rc = ext4_mark_inode_dirty(handle, inode);
5299 if (!error)
5300 error = rc;
5301 /*
5302 * We have to update i_size under i_data_sem together
5303 * with i_disksize to avoid races with writeback code
5304 * running ext4_wb_update_i_disksize().
5305 */
5306 if (!error)
5307 i_size_write(inode, attr->ia_size);
5308 up_write(&EXT4_I(inode)->i_data_sem);
5309 ext4_journal_stop(handle);
5310 if (error) {
5311 if (orphan)
5312 ext4_orphan_del(NULL, inode);
5313 goto err_out;
5314 }
5315 }
5316 if (!shrink)
5317 pagecache_isize_extended(inode, oldsize, inode->i_size);
5318
5319 /*
5320 * Blocks are going to be removed from the inode. Wait
5321 * for dio in flight. Temporarily disable
5322 * dioread_nolock to prevent livelock.
5323 */
5324 if (orphan) {
5325 if (!ext4_should_journal_data(inode)) {
5326 ext4_inode_block_unlocked_dio(inode);
5327 inode_dio_wait(inode);
5328 ext4_inode_resume_unlocked_dio(inode);
5329 } else
5330 ext4_wait_for_tail_page_commit(inode);
5331 }
5332 down_write(&EXT4_I(inode)->i_mmap_sem);
5333 /*
5334 * Truncate pagecache after we've waited for commit
5335 * in data=journal mode to make pages freeable.
5336 */
5337 truncate_pagecache(inode, inode->i_size);
5338 if (shrink) {
5339 rc = ext4_truncate(inode);
5340 if (rc)
5341 error = rc;
5342 }
5343 up_write(&EXT4_I(inode)->i_mmap_sem);
5344 }
5345
5346 if (!error) {
5347 setattr_copy(inode, attr);
5348 mark_inode_dirty(inode);
5349 }
5350
5351 /*
5352 * If the call to ext4_truncate failed to get a transaction handle at
5353 * all, we need to clean up the in-core orphan list manually.
5354 */
5355 if (orphan && inode->i_nlink)
5356 ext4_orphan_del(NULL, inode);
5357
5358 if (!error && (ia_valid & ATTR_MODE))
5359 rc = posix_acl_chmod(inode, inode->i_mode);
5360
5361err_out:
5362 ext4_std_error(inode->i_sb, error);
5363 if (!error)
5364 error = rc;
5365 return error;
5366}
5367
5368int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
5369 struct kstat *stat)
5370{
5371 struct inode *inode;
5372 unsigned long long delalloc_blocks;
5373
5374 inode = d_inode(dentry);
5375 generic_fillattr(inode, stat);
5376
5377 /*
5378 * If there is inline data in the inode, the inode will normally not
5379 * have data blocks allocated (it may have an external xattr block).
5380 * Report at least one sector for such files, so tools like tar, rsync,
5381 * others doen't incorrectly think the file is completely sparse.
5382 */
5383 if (unlikely(ext4_has_inline_data(inode)))
5384 stat->blocks += (stat->size + 511) >> 9;
5385
5386 /*
5387 * We can't update i_blocks if the block allocation is delayed
5388 * otherwise in the case of system crash before the real block
5389 * allocation is done, we will have i_blocks inconsistent with
5390 * on-disk file blocks.
5391 * We always keep i_blocks updated together with real
5392 * allocation. But to not confuse with user, stat
5393 * will return the blocks that include the delayed allocation
5394 * blocks for this file.
5395 */
5396 delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb),
5397 EXT4_I(inode)->i_reserved_data_blocks);
5398 stat->blocks += delalloc_blocks << (inode->i_sb->s_blocksize_bits - 9);
5399 return 0;
5400}
5401
5402static int ext4_index_trans_blocks(struct inode *inode, int lblocks,
5403 int pextents)
5404{
5405 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
5406 return ext4_ind_trans_blocks(inode, lblocks);
5407 return ext4_ext_index_trans_blocks(inode, pextents);
5408}
5409
5410/*
5411 * Account for index blocks, block groups bitmaps and block group
5412 * descriptor blocks if modify datablocks and index blocks
5413 * worse case, the indexs blocks spread over different block groups
5414 *
5415 * If datablocks are discontiguous, they are possible to spread over
5416 * different block groups too. If they are contiguous, with flexbg,
5417 * they could still across block group boundary.
5418 *
5419 * Also account for superblock, inode, quota and xattr blocks
5420 */
5421static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
5422 int pextents)
5423{
5424 ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb);
5425 int gdpblocks;
5426 int idxblocks;
5427 int ret = 0;
5428
5429 /*
5430 * How many index blocks need to touch to map @lblocks logical blocks
5431 * to @pextents physical extents?
5432 */
5433 idxblocks = ext4_index_trans_blocks(inode, lblocks, pextents);
5434
5435 ret = idxblocks;
5436
5437 /*
5438 * Now let's see how many group bitmaps and group descriptors need
5439 * to account
5440 */
5441 groups = idxblocks + pextents;
5442 gdpblocks = groups;
5443 if (groups > ngroups)
5444 groups = ngroups;
5445 if (groups > EXT4_SB(inode->i_sb)->s_gdb_count)
5446 gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count;
5447
5448 /* bitmaps and block group descriptor blocks */
5449 ret += groups + gdpblocks;
5450
5451 /* Blocks for super block, inode, quota and xattr blocks */
5452 ret += EXT4_META_TRANS_BLOCKS(inode->i_sb);
5453
5454 return ret;
5455}
5456
5457/*
5458 * Calculate the total number of credits to reserve to fit
5459 * the modification of a single pages into a single transaction,
5460 * which may include multiple chunks of block allocations.
5461 *
5462 * This could be called via ext4_write_begin()
5463 *
5464 * We need to consider the worse case, when
5465 * one new block per extent.
5466 */
5467int ext4_writepage_trans_blocks(struct inode *inode)
5468{
5469 int bpp = ext4_journal_blocks_per_page(inode);
5470 int ret;
5471
5472 ret = ext4_meta_trans_blocks(inode, bpp, bpp);
5473
5474 /* Account for data blocks for journalled mode */
5475 if (ext4_should_journal_data(inode))
5476 ret += bpp;
5477 return ret;
5478}
5479
5480/*
5481 * Calculate the journal credits for a chunk of data modification.
5482 *
5483 * This is called from DIO, fallocate or whoever calling
5484 * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks.
5485 *
5486 * journal buffers for data blocks are not included here, as DIO
5487 * and fallocate do no need to journal data buffers.
5488 */
5489int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks)
5490{
5491 return ext4_meta_trans_blocks(inode, nrblocks, 1);
5492}
5493
5494/*
5495 * The caller must have previously called ext4_reserve_inode_write().
5496 * Give this, we know that the caller already has write access to iloc->bh.
5497 */
5498int ext4_mark_iloc_dirty(handle_t *handle,
5499 struct inode *inode, struct ext4_iloc *iloc)
5500{
5501 int err = 0;
5502
5503 if (IS_I_VERSION(inode))
5504 inode_inc_iversion(inode);
5505
5506 /* the do_update_inode consumes one bh->b_count */
5507 get_bh(iloc->bh);
5508
5509 /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
5510 err = ext4_do_update_inode(handle, inode, iloc);
5511 put_bh(iloc->bh);
5512 return err;
5513}
5514
5515/*
5516 * On success, We end up with an outstanding reference count against
5517 * iloc->bh. This _must_ be cleaned up later.
5518 */
5519
5520int
5521ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
5522 struct ext4_iloc *iloc)
5523{
5524 int err;
5525
5526 err = ext4_get_inode_loc(inode, iloc);
5527 if (!err) {
5528 BUFFER_TRACE(iloc->bh, "get_write_access");
5529 err = ext4_journal_get_write_access(handle, iloc->bh);
5530 if (err) {
5531 brelse(iloc->bh);
5532 iloc->bh = NULL;
5533 }
5534 }
5535 ext4_std_error(inode->i_sb, err);
5536 return err;
5537}
5538
5539/*
5540 * Expand an inode by new_extra_isize bytes.
5541 * Returns 0 on success or negative error number on failure.
5542 */
5543static int ext4_expand_extra_isize(struct inode *inode,
5544 unsigned int new_extra_isize,
5545 struct ext4_iloc iloc,
5546 handle_t *handle)
5547{
5548 struct ext4_inode *raw_inode;
5549 struct ext4_xattr_ibody_header *header;
5550
5551 if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
5552 return 0;
5553
5554 raw_inode = ext4_raw_inode(&iloc);
5555
5556 header = IHDR(inode, raw_inode);
5557
5558 /* No extended attributes present */
5559 if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
5560 header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
5561 memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
5562 new_extra_isize);
5563 EXT4_I(inode)->i_extra_isize = new_extra_isize;
5564 return 0;
5565 }
5566
5567 /* try to expand with EAs present */
5568 return ext4_expand_extra_isize_ea(inode, new_extra_isize,
5569 raw_inode, handle);
5570}
5571
5572/*
5573 * What we do here is to mark the in-core inode as clean with respect to inode
5574 * dirtiness (it may still be data-dirty).
5575 * This means that the in-core inode may be reaped by prune_icache
5576 * without having to perform any I/O. This is a very good thing,
5577 * because *any* task may call prune_icache - even ones which
5578 * have a transaction open against a different journal.
5579 *
5580 * Is this cheating? Not really. Sure, we haven't written the
5581 * inode out, but prune_icache isn't a user-visible syncing function.
5582 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
5583 * we start and wait on commits.
5584 */
5585int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
5586{
5587 struct ext4_iloc iloc;
5588 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5589 static unsigned int mnt_count;
5590 int err, ret;
5591
5592 might_sleep();
5593 trace_ext4_mark_inode_dirty(inode, _RET_IP_);
5594 err = ext4_reserve_inode_write(handle, inode, &iloc);
5595 if (err)
5596 return err;
5597 if (EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
5598 !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
5599 /*
5600 * In nojournal mode, we can immediately attempt to expand
5601 * the inode. When journaled, we first need to obtain extra
5602 * buffer credits since we may write into the EA block
5603 * with this same handle. If journal_extend fails, then it will
5604 * only result in a minor loss of functionality for that inode.
5605 * If this is felt to be critical, then e2fsck should be run to
5606 * force a large enough s_min_extra_isize.
5607 */
5608 if (!ext4_handle_valid(handle) ||
5609 jbd2_journal_extend(handle,
5610 EXT4_DATA_TRANS_BLOCKS(inode->i_sb)) == 0) {
5611 ret = ext4_expand_extra_isize(inode,
5612 sbi->s_want_extra_isize,
5613 iloc, handle);
5614 if (ret) {
5615 if (mnt_count !=
5616 le16_to_cpu(sbi->s_es->s_mnt_count)) {
5617 ext4_warning(inode->i_sb,
5618 "Unable to expand inode %lu. Delete"
5619 " some EAs or run e2fsck.",
5620 inode->i_ino);
5621 mnt_count =
5622 le16_to_cpu(sbi->s_es->s_mnt_count);
5623 }
5624 }
5625 }
5626 }
5627 return ext4_mark_iloc_dirty(handle, inode, &iloc);
5628}
5629
5630/*
5631 * ext4_dirty_inode() is called from __mark_inode_dirty()
5632 *
5633 * We're really interested in the case where a file is being extended.
5634 * i_size has been changed by generic_commit_write() and we thus need
5635 * to include the updated inode in the current transaction.
5636 *
5637 * Also, dquot_alloc_block() will always dirty the inode when blocks
5638 * are allocated to the file.
5639 *
5640 * If the inode is marked synchronous, we don't honour that here - doing
5641 * so would cause a commit on atime updates, which we don't bother doing.
5642 * We handle synchronous inodes at the highest possible level.
5643 *
5644 * If only the I_DIRTY_TIME flag is set, we can skip everything. If
5645 * I_DIRTY_TIME and I_DIRTY_SYNC is set, the only inode fields we need
5646 * to copy into the on-disk inode structure are the timestamp files.
5647 */
5648void ext4_dirty_inode(struct inode *inode, int flags)
5649{
5650 handle_t *handle;
5651
5652 if (flags == I_DIRTY_TIME)
5653 return;
5654 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
5655 if (IS_ERR(handle))
5656 goto out;
5657
5658 ext4_mark_inode_dirty(handle, inode);
5659
5660 ext4_journal_stop(handle);
5661out:
5662 return;
5663}
5664
5665#if 0
5666/*
5667 * Bind an inode's backing buffer_head into this transaction, to prevent
5668 * it from being flushed to disk early. Unlike
5669 * ext4_reserve_inode_write, this leaves behind no bh reference and
5670 * returns no iloc structure, so the caller needs to repeat the iloc
5671 * lookup to mark the inode dirty later.
5672 */
5673static int ext4_pin_inode(handle_t *handle, struct inode *inode)
5674{
5675 struct ext4_iloc iloc;
5676
5677 int err = 0;
5678 if (handle) {
5679 err = ext4_get_inode_loc(inode, &iloc);
5680 if (!err) {
5681 BUFFER_TRACE(iloc.bh, "get_write_access");
5682 err = jbd2_journal_get_write_access(handle, iloc.bh);
5683 if (!err)
5684 err = ext4_handle_dirty_metadata(handle,
5685 NULL,
5686 iloc.bh);
5687 brelse(iloc.bh);
5688 }
5689 }
5690 ext4_std_error(inode->i_sb, err);
5691 return err;
5692}
5693#endif
5694
5695int ext4_change_inode_journal_flag(struct inode *inode, int val)
5696{
5697 journal_t *journal;
5698 handle_t *handle;
5699 int err;
5700 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5701
5702 /*
5703 * We have to be very careful here: changing a data block's
5704 * journaling status dynamically is dangerous. If we write a
5705 * data block to the journal, change the status and then delete
5706 * that block, we risk forgetting to revoke the old log record
5707 * from the journal and so a subsequent replay can corrupt data.
5708 * So, first we make sure that the journal is empty and that
5709 * nobody is changing anything.
5710 */
5711
5712 journal = EXT4_JOURNAL(inode);
5713 if (!journal)
5714 return 0;
5715 if (is_journal_aborted(journal))
5716 return -EROFS;
5717
5718 /* Wait for all existing dio workers */
5719 ext4_inode_block_unlocked_dio(inode);
5720 inode_dio_wait(inode);
5721
5722 /*
5723 * Before flushing the journal and switching inode's aops, we have
5724 * to flush all dirty data the inode has. There can be outstanding
5725 * delayed allocations, there can be unwritten extents created by
5726 * fallocate or buffered writes in dioread_nolock mode covered by
5727 * dirty data which can be converted only after flushing the dirty
5728 * data (and journalled aops don't know how to handle these cases).
5729 */
5730 if (val) {
5731 down_write(&EXT4_I(inode)->i_mmap_sem);
5732 err = filemap_write_and_wait(inode->i_mapping);
5733 if (err < 0) {
5734 up_write(&EXT4_I(inode)->i_mmap_sem);
5735 ext4_inode_resume_unlocked_dio(inode);
5736 return err;
5737 }
5738 }
5739
5740 percpu_down_write(&sbi->s_journal_flag_rwsem);
5741 jbd2_journal_lock_updates(journal);
5742
5743 /*
5744 * OK, there are no updates running now, and all cached data is
5745 * synced to disk. We are now in a completely consistent state
5746 * which doesn't have anything in the journal, and we know that
5747 * no filesystem updates are running, so it is safe to modify
5748 * the inode's in-core data-journaling state flag now.
5749 */
5750
5751 if (val)
5752 ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
5753 else {
5754 err = jbd2_journal_flush(journal);
5755 if (err < 0) {
5756 jbd2_journal_unlock_updates(journal);
5757 percpu_up_write(&sbi->s_journal_flag_rwsem);
5758 ext4_inode_resume_unlocked_dio(inode);
5759 return err;
5760 }
5761 ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
5762 }
5763 ext4_set_aops(inode);
5764 /*
5765 * Update inode->i_flags after EXT4_INODE_JOURNAL_DATA was updated.
5766 * E.g. S_DAX may get cleared / set.
5767 */
5768 ext4_set_inode_flags(inode);
5769
5770 jbd2_journal_unlock_updates(journal);
5771 percpu_up_write(&sbi->s_journal_flag_rwsem);
5772
5773 if (val)
5774 up_write(&EXT4_I(inode)->i_mmap_sem);
5775 ext4_inode_resume_unlocked_dio(inode);
5776
5777 /* Finally we can mark the inode as dirty. */
5778
5779 handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
5780 if (IS_ERR(handle))
5781 return PTR_ERR(handle);
5782
5783 err = ext4_mark_inode_dirty(handle, inode);
5784 ext4_handle_sync(handle);
5785 ext4_journal_stop(handle);
5786 ext4_std_error(inode->i_sb, err);
5787
5788 return err;
5789}
5790
5791static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh)
5792{
5793 return !buffer_mapped(bh);
5794}
5795
5796int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
5797{
5798 struct page *page = vmf->page;
5799 loff_t size;
5800 unsigned long len;
5801 int ret;
5802 struct file *file = vma->vm_file;
5803 struct inode *inode = file_inode(file);
5804 struct address_space *mapping = inode->i_mapping;
5805 handle_t *handle;
5806 get_block_t *get_block;
5807 int retries = 0;
5808
5809 sb_start_pagefault(inode->i_sb);
5810 file_update_time(vma->vm_file);
5811
5812 down_read(&EXT4_I(inode)->i_mmap_sem);
5813 /* Delalloc case is easy... */
5814 if (test_opt(inode->i_sb, DELALLOC) &&
5815 !ext4_should_journal_data(inode) &&
5816 !ext4_nonda_switch(inode->i_sb)) {
5817 do {
5818 ret = block_page_mkwrite(vma, vmf,
5819 ext4_da_get_block_prep);
5820 } while (ret == -ENOSPC &&
5821 ext4_should_retry_alloc(inode->i_sb, &retries));
5822 goto out_ret;
5823 }
5824
5825 lock_page(page);
5826 size = i_size_read(inode);
5827 /* Page got truncated from under us? */
5828 if (page->mapping != mapping || page_offset(page) > size) {
5829 unlock_page(page);
5830 ret = VM_FAULT_NOPAGE;
5831 goto out;
5832 }
5833
5834 if (page->index == size >> PAGE_SHIFT)
5835 len = size & ~PAGE_MASK;
5836 else
5837 len = PAGE_SIZE;
5838 /*
5839 * Return if we have all the buffers mapped. This avoids the need to do
5840 * journal_start/journal_stop which can block and take a long time
5841 */
5842 if (page_has_buffers(page)) {
5843 if (!ext4_walk_page_buffers(NULL, page_buffers(page),
5844 0, len, NULL,
5845 ext4_bh_unmapped)) {
5846 /* Wait so that we don't change page under IO */
5847 wait_for_stable_page(page);
5848 ret = VM_FAULT_LOCKED;
5849 goto out;
5850 }
5851 }
5852 unlock_page(page);
5853 /* OK, we need to fill the hole... */
5854 if (ext4_should_dioread_nolock(inode))
5855 get_block = ext4_get_block_unwritten;
5856 else
5857 get_block = ext4_get_block;
5858retry_alloc:
5859 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
5860 ext4_writepage_trans_blocks(inode));
5861 if (IS_ERR(handle)) {
5862 ret = VM_FAULT_SIGBUS;
5863 goto out;
5864 }
5865 ret = block_page_mkwrite(vma, vmf, get_block);
5866 if (!ret && ext4_should_journal_data(inode)) {
5867 if (ext4_walk_page_buffers(handle, page_buffers(page), 0,
5868 PAGE_SIZE, NULL, do_journal_get_write_access)) {
5869 unlock_page(page);
5870 ret = VM_FAULT_SIGBUS;
5871 ext4_journal_stop(handle);
5872 goto out;
5873 }
5874 ext4_set_inode_state(inode, EXT4_STATE_JDATA);
5875 }
5876 ext4_journal_stop(handle);
5877 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
5878 goto retry_alloc;
5879out_ret:
5880 ret = block_page_mkwrite_return(ret);
5881out:
5882 up_read(&EXT4_I(inode)->i_mmap_sem);
5883 sb_end_pagefault(inode->i_sb);
5884 return ret;
5885}
5886
5887int ext4_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5888{
5889 struct inode *inode = file_inode(vma->vm_file);
5890 int err;
5891
5892 down_read(&EXT4_I(inode)->i_mmap_sem);
5893 err = filemap_fault(vma, vmf);
5894 up_read(&EXT4_I(inode)->i_mmap_sem);
5895
5896 return err;
5897}
5898
5899/*
5900 * Find the first extent at or after @lblk in an inode that is not a hole.
5901 * Search for @map_len blocks at most. The extent is returned in @result.
5902 *
5903 * The function returns 1 if we found an extent. The function returns 0 in
5904 * case there is no extent at or after @lblk and in that case also sets
5905 * @result->es_len to 0. In case of error, the error code is returned.
5906 */
5907int ext4_get_next_extent(struct inode *inode, ext4_lblk_t lblk,
5908 unsigned int map_len, struct extent_status *result)
5909{
5910 struct ext4_map_blocks map;
5911 struct extent_status es = {};
5912 int ret;
5913
5914 map.m_lblk = lblk;
5915 map.m_len = map_len;
5916
5917 /*
5918 * For non-extent based files this loop may iterate several times since
5919 * we do not determine full hole size.
5920 */
5921 while (map.m_len > 0) {
5922 ret = ext4_map_blocks(NULL, inode, &map, 0);
5923 if (ret < 0)
5924 return ret;
5925 /* There's extent covering m_lblk? Just return it. */
5926 if (ret > 0) {
5927 int status;
5928
5929 ext4_es_store_pblock(result, map.m_pblk);
5930 result->es_lblk = map.m_lblk;
5931 result->es_len = map.m_len;
5932 if (map.m_flags & EXT4_MAP_UNWRITTEN)
5933 status = EXTENT_STATUS_UNWRITTEN;
5934 else
5935 status = EXTENT_STATUS_WRITTEN;
5936 ext4_es_store_status(result, status);
5937 return 1;
5938 }
5939 ext4_es_find_delayed_extent_range(inode, map.m_lblk,
5940 map.m_lblk + map.m_len - 1,
5941 &es);
5942 /* Is delalloc data before next block in extent tree? */
5943 if (es.es_len && es.es_lblk < map.m_lblk + map.m_len) {
5944 ext4_lblk_t offset = 0;
5945
5946 if (es.es_lblk < lblk)
5947 offset = lblk - es.es_lblk;
5948 result->es_lblk = es.es_lblk + offset;
5949 ext4_es_store_pblock(result,
5950 ext4_es_pblock(&es) + offset);
5951 result->es_len = es.es_len - offset;
5952 ext4_es_store_status(result, ext4_es_status(&es));
5953
5954 return 1;
5955 }
5956 /* There's a hole at m_lblk, advance us after it */
5957 map.m_lblk += map.m_len;
5958 map_len -= map.m_len;
5959 map.m_len = map_len;
5960 cond_resched();
5961 }
5962 result->es_len = 0;
5963 return 0;
5964}