Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 */
6
7#include <linux/sched.h>
8#include <linux/slab.h>
9#include <linux/spinlock.h>
10#include <linux/completion.h>
11#include <linux/buffer_head.h>
12#include <linux/pagemap.h>
13#include <linux/pagevec.h>
14#include <linux/mpage.h>
15#include <linux/fs.h>
16#include <linux/writeback.h>
17#include <linux/swap.h>
18#include <linux/gfs2_ondisk.h>
19#include <linux/backing-dev.h>
20#include <linux/uio.h>
21#include <trace/events/writeback.h>
22#include <linux/sched/signal.h>
23
24#include "gfs2.h"
25#include "incore.h"
26#include "bmap.h"
27#include "glock.h"
28#include "inode.h"
29#include "log.h"
30#include "meta_io.h"
31#include "quota.h"
32#include "trans.h"
33#include "rgrp.h"
34#include "super.h"
35#include "util.h"
36#include "glops.h"
37#include "aops.h"
38
39
40void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
41 unsigned int from, unsigned int len)
42{
43 struct buffer_head *head = page_buffers(page);
44 unsigned int bsize = head->b_size;
45 struct buffer_head *bh;
46 unsigned int to = from + len;
47 unsigned int start, end;
48
49 for (bh = head, start = 0; bh != head || !start;
50 bh = bh->b_this_page, start = end) {
51 end = start + bsize;
52 if (end <= from)
53 continue;
54 if (start >= to)
55 break;
56 set_buffer_uptodate(bh);
57 gfs2_trans_add_data(ip->i_gl, bh);
58 }
59}
60
61/**
62 * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
63 * @inode: The inode
64 * @lblock: The block number to look up
65 * @bh_result: The buffer head to return the result in
66 * @create: Non-zero if we may add block to the file
67 *
68 * Returns: errno
69 */
70
71static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
72 struct buffer_head *bh_result, int create)
73{
74 int error;
75
76 error = gfs2_block_map(inode, lblock, bh_result, 0);
77 if (error)
78 return error;
79 if (!buffer_mapped(bh_result))
80 return -ENODATA;
81 return 0;
82}
83
84/**
85 * gfs2_write_jdata_page - gfs2 jdata-specific version of block_write_full_page
86 * @page: The page to write
87 * @wbc: The writeback control
88 *
89 * This is the same as calling block_write_full_page, but it also
90 * writes pages outside of i_size
91 */
92static int gfs2_write_jdata_page(struct page *page,
93 struct writeback_control *wbc)
94{
95 struct inode * const inode = page->mapping->host;
96 loff_t i_size = i_size_read(inode);
97 const pgoff_t end_index = i_size >> PAGE_SHIFT;
98 unsigned offset;
99
100 /*
101 * The page straddles i_size. It must be zeroed out on each and every
102 * writepage invocation because it may be mmapped. "A file is mapped
103 * in multiples of the page size. For a file that is not a multiple of
104 * the page size, the remaining memory is zeroed when mapped, and
105 * writes to that region are not written out to the file."
106 */
107 offset = i_size & (PAGE_SIZE - 1);
108 if (page->index == end_index && offset)
109 zero_user_segment(page, offset, PAGE_SIZE);
110
111 return __block_write_full_page(inode, page, gfs2_get_block_noalloc, wbc,
112 end_buffer_async_write);
113}
114
115/**
116 * __gfs2_jdata_writepage - The core of jdata writepage
117 * @page: The page to write
118 * @wbc: The writeback control
119 *
120 * This is shared between writepage and writepages and implements the
121 * core of the writepage operation. If a transaction is required then
122 * PageChecked will have been set and the transaction will have
123 * already been started before this is called.
124 */
125
126static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
127{
128 struct inode *inode = page->mapping->host;
129 struct gfs2_inode *ip = GFS2_I(inode);
130 struct gfs2_sbd *sdp = GFS2_SB(inode);
131
132 if (PageChecked(page)) {
133 ClearPageChecked(page);
134 if (!page_has_buffers(page)) {
135 create_empty_buffers(page, inode->i_sb->s_blocksize,
136 BIT(BH_Dirty)|BIT(BH_Uptodate));
137 }
138 gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize);
139 }
140 return gfs2_write_jdata_page(page, wbc);
141}
142
143/**
144 * gfs2_jdata_writepage - Write complete page
145 * @page: Page to write
146 * @wbc: The writeback control
147 *
148 * Returns: errno
149 *
150 */
151
152static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
153{
154 struct inode *inode = page->mapping->host;
155 struct gfs2_inode *ip = GFS2_I(inode);
156 struct gfs2_sbd *sdp = GFS2_SB(inode);
157
158 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
159 goto out;
160 if (PageChecked(page) || current->journal_info)
161 goto out_ignore;
162 return __gfs2_jdata_writepage(page, wbc);
163
164out_ignore:
165 redirty_page_for_writepage(wbc, page);
166out:
167 unlock_page(page);
168 return 0;
169}
170
171/**
172 * gfs2_writepages - Write a bunch of dirty pages back to disk
173 * @mapping: The mapping to write
174 * @wbc: Write-back control
175 *
176 * Used for both ordered and writeback modes.
177 */
178static int gfs2_writepages(struct address_space *mapping,
179 struct writeback_control *wbc)
180{
181 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
182 struct iomap_writepage_ctx wpc = { };
183 int ret;
184
185 /*
186 * Even if we didn't write any pages here, we might still be holding
187 * dirty pages in the ail. We forcibly flush the ail because we don't
188 * want balance_dirty_pages() to loop indefinitely trying to write out
189 * pages held in the ail that it can't find.
190 */
191 ret = iomap_writepages(mapping, wbc, &wpc, &gfs2_writeback_ops);
192 if (ret == 0)
193 set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
194 return ret;
195}
196
197/**
198 * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages
199 * @mapping: The mapping
200 * @wbc: The writeback control
201 * @pvec: The vector of pages
202 * @nr_pages: The number of pages to write
203 * @done_index: Page index
204 *
205 * Returns: non-zero if loop should terminate, zero otherwise
206 */
207
208static int gfs2_write_jdata_pagevec(struct address_space *mapping,
209 struct writeback_control *wbc,
210 struct pagevec *pvec,
211 int nr_pages,
212 pgoff_t *done_index)
213{
214 struct inode *inode = mapping->host;
215 struct gfs2_sbd *sdp = GFS2_SB(inode);
216 unsigned nrblocks = nr_pages * (PAGE_SIZE >> inode->i_blkbits);
217 int i;
218 int ret;
219
220 ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
221 if (ret < 0)
222 return ret;
223
224 for(i = 0; i < nr_pages; i++) {
225 struct page *page = pvec->pages[i];
226
227 *done_index = page->index;
228
229 lock_page(page);
230
231 if (unlikely(page->mapping != mapping)) {
232continue_unlock:
233 unlock_page(page);
234 continue;
235 }
236
237 if (!PageDirty(page)) {
238 /* someone wrote it for us */
239 goto continue_unlock;
240 }
241
242 if (PageWriteback(page)) {
243 if (wbc->sync_mode != WB_SYNC_NONE)
244 wait_on_page_writeback(page);
245 else
246 goto continue_unlock;
247 }
248
249 BUG_ON(PageWriteback(page));
250 if (!clear_page_dirty_for_io(page))
251 goto continue_unlock;
252
253 trace_wbc_writepage(wbc, inode_to_bdi(inode));
254
255 ret = __gfs2_jdata_writepage(page, wbc);
256 if (unlikely(ret)) {
257 if (ret == AOP_WRITEPAGE_ACTIVATE) {
258 unlock_page(page);
259 ret = 0;
260 } else {
261
262 /*
263 * done_index is set past this page,
264 * so media errors will not choke
265 * background writeout for the entire
266 * file. This has consequences for
267 * range_cyclic semantics (ie. it may
268 * not be suitable for data integrity
269 * writeout).
270 */
271 *done_index = page->index + 1;
272 ret = 1;
273 break;
274 }
275 }
276
277 /*
278 * We stop writing back only if we are not doing
279 * integrity sync. In case of integrity sync we have to
280 * keep going until we have written all the pages
281 * we tagged for writeback prior to entering this loop.
282 */
283 if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) {
284 ret = 1;
285 break;
286 }
287
288 }
289 gfs2_trans_end(sdp);
290 return ret;
291}
292
293/**
294 * gfs2_write_cache_jdata - Like write_cache_pages but different
295 * @mapping: The mapping to write
296 * @wbc: The writeback control
297 *
298 * The reason that we use our own function here is that we need to
299 * start transactions before we grab page locks. This allows us
300 * to get the ordering right.
301 */
302
303static int gfs2_write_cache_jdata(struct address_space *mapping,
304 struct writeback_control *wbc)
305{
306 int ret = 0;
307 int done = 0;
308 struct pagevec pvec;
309 int nr_pages;
310 pgoff_t writeback_index;
311 pgoff_t index;
312 pgoff_t end;
313 pgoff_t done_index;
314 int cycled;
315 int range_whole = 0;
316 xa_mark_t tag;
317
318 pagevec_init(&pvec);
319 if (wbc->range_cyclic) {
320 writeback_index = mapping->writeback_index; /* prev offset */
321 index = writeback_index;
322 if (index == 0)
323 cycled = 1;
324 else
325 cycled = 0;
326 end = -1;
327 } else {
328 index = wbc->range_start >> PAGE_SHIFT;
329 end = wbc->range_end >> PAGE_SHIFT;
330 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
331 range_whole = 1;
332 cycled = 1; /* ignore range_cyclic tests */
333 }
334 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
335 tag = PAGECACHE_TAG_TOWRITE;
336 else
337 tag = PAGECACHE_TAG_DIRTY;
338
339retry:
340 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
341 tag_pages_for_writeback(mapping, index, end);
342 done_index = index;
343 while (!done && (index <= end)) {
344 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
345 tag);
346 if (nr_pages == 0)
347 break;
348
349 ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, &done_index);
350 if (ret)
351 done = 1;
352 if (ret > 0)
353 ret = 0;
354 pagevec_release(&pvec);
355 cond_resched();
356 }
357
358 if (!cycled && !done) {
359 /*
360 * range_cyclic:
361 * We hit the last page and there is more work to be done: wrap
362 * back to the start of the file
363 */
364 cycled = 1;
365 index = 0;
366 end = writeback_index - 1;
367 goto retry;
368 }
369
370 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
371 mapping->writeback_index = done_index;
372
373 return ret;
374}
375
376
377/**
378 * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
379 * @mapping: The mapping to write
380 * @wbc: The writeback control
381 *
382 */
383
384static int gfs2_jdata_writepages(struct address_space *mapping,
385 struct writeback_control *wbc)
386{
387 struct gfs2_inode *ip = GFS2_I(mapping->host);
388 struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
389 int ret;
390
391 ret = gfs2_write_cache_jdata(mapping, wbc);
392 if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
393 gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
394 GFS2_LFC_JDATA_WPAGES);
395 ret = gfs2_write_cache_jdata(mapping, wbc);
396 }
397 return ret;
398}
399
400/**
401 * stuffed_readpage - Fill in a Linux page with stuffed file data
402 * @ip: the inode
403 * @page: the page
404 *
405 * Returns: errno
406 */
407static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
408{
409 struct buffer_head *dibh;
410 u64 dsize = i_size_read(&ip->i_inode);
411 void *kaddr;
412 int error;
413
414 /*
415 * Due to the order of unstuffing files and ->fault(), we can be
416 * asked for a zero page in the case of a stuffed file being extended,
417 * so we need to supply one here. It doesn't happen often.
418 */
419 if (unlikely(page->index)) {
420 zero_user(page, 0, PAGE_SIZE);
421 SetPageUptodate(page);
422 return 0;
423 }
424
425 error = gfs2_meta_inode_buffer(ip, &dibh);
426 if (error)
427 return error;
428
429 kaddr = kmap_atomic(page);
430 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
431 memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
432 kunmap_atomic(kaddr);
433 flush_dcache_page(page);
434 brelse(dibh);
435 SetPageUptodate(page);
436
437 return 0;
438}
439
440/**
441 * gfs2_read_folio - read a folio from a file
442 * @file: The file to read
443 * @folio: The folio in the file
444 */
445static int gfs2_read_folio(struct file *file, struct folio *folio)
446{
447 struct inode *inode = folio->mapping->host;
448 struct gfs2_inode *ip = GFS2_I(inode);
449 struct gfs2_sbd *sdp = GFS2_SB(inode);
450 int error;
451
452 if (!gfs2_is_jdata(ip) ||
453 (i_blocksize(inode) == PAGE_SIZE && !folio_buffers(folio))) {
454 error = iomap_read_folio(folio, &gfs2_iomap_ops);
455 } else if (gfs2_is_stuffed(ip)) {
456 error = stuffed_readpage(ip, &folio->page);
457 folio_unlock(folio);
458 } else {
459 error = mpage_read_folio(folio, gfs2_block_map);
460 }
461
462 if (unlikely(gfs2_withdrawn(sdp)))
463 return -EIO;
464
465 return error;
466}
467
468/**
469 * gfs2_internal_read - read an internal file
470 * @ip: The gfs2 inode
471 * @buf: The buffer to fill
472 * @pos: The file position
473 * @size: The amount to read
474 *
475 */
476
477int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
478 unsigned size)
479{
480 struct address_space *mapping = ip->i_inode.i_mapping;
481 unsigned long index = *pos >> PAGE_SHIFT;
482 unsigned offset = *pos & (PAGE_SIZE - 1);
483 unsigned copied = 0;
484 unsigned amt;
485 struct page *page;
486 void *p;
487
488 do {
489 amt = size - copied;
490 if (offset + size > PAGE_SIZE)
491 amt = PAGE_SIZE - offset;
492 page = read_cache_page(mapping, index, gfs2_read_folio, NULL);
493 if (IS_ERR(page))
494 return PTR_ERR(page);
495 p = kmap_atomic(page);
496 memcpy(buf + copied, p + offset, amt);
497 kunmap_atomic(p);
498 put_page(page);
499 copied += amt;
500 index++;
501 offset = 0;
502 } while(copied < size);
503 (*pos) += size;
504 return size;
505}
506
507/**
508 * gfs2_readahead - Read a bunch of pages at once
509 * @rac: Read-ahead control structure
510 *
511 * Some notes:
512 * 1. This is only for readahead, so we can simply ignore any things
513 * which are slightly inconvenient (such as locking conflicts between
514 * the page lock and the glock) and return having done no I/O. Its
515 * obviously not something we'd want to do on too regular a basis.
516 * Any I/O we ignore at this time will be done via readpage later.
517 * 2. We don't handle stuffed files here we let readpage do the honours.
518 * 3. mpage_readahead() does most of the heavy lifting in the common case.
519 * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
520 */
521
522static void gfs2_readahead(struct readahead_control *rac)
523{
524 struct inode *inode = rac->mapping->host;
525 struct gfs2_inode *ip = GFS2_I(inode);
526
527 if (gfs2_is_stuffed(ip))
528 ;
529 else if (gfs2_is_jdata(ip))
530 mpage_readahead(rac, gfs2_block_map);
531 else
532 iomap_readahead(rac, &gfs2_iomap_ops);
533}
534
535/**
536 * adjust_fs_space - Adjusts the free space available due to gfs2_grow
537 * @inode: the rindex inode
538 */
539void adjust_fs_space(struct inode *inode)
540{
541 struct gfs2_sbd *sdp = GFS2_SB(inode);
542 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
543 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
544 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
545 struct buffer_head *m_bh;
546 u64 fs_total, new_free;
547
548 if (gfs2_trans_begin(sdp, 2 * RES_STATFS, 0) != 0)
549 return;
550
551 /* Total up the file system space, according to the latest rindex. */
552 fs_total = gfs2_ri_total(sdp);
553 if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
554 goto out;
555
556 spin_lock(&sdp->sd_statfs_spin);
557 gfs2_statfs_change_in(m_sc, m_bh->b_data +
558 sizeof(struct gfs2_dinode));
559 if (fs_total > (m_sc->sc_total + l_sc->sc_total))
560 new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
561 else
562 new_free = 0;
563 spin_unlock(&sdp->sd_statfs_spin);
564 fs_warn(sdp, "File system extended by %llu blocks.\n",
565 (unsigned long long)new_free);
566 gfs2_statfs_change(sdp, new_free, new_free, 0);
567
568 update_statfs(sdp, m_bh);
569 brelse(m_bh);
570out:
571 sdp->sd_rindex_uptodate = 0;
572 gfs2_trans_end(sdp);
573}
574
575static bool jdata_dirty_folio(struct address_space *mapping,
576 struct folio *folio)
577{
578 if (current->journal_info)
579 folio_set_checked(folio);
580 return block_dirty_folio(mapping, folio);
581}
582
583/**
584 * gfs2_bmap - Block map function
585 * @mapping: Address space info
586 * @lblock: The block to map
587 *
588 * Returns: The disk address for the block or 0 on hole or error
589 */
590
591static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
592{
593 struct gfs2_inode *ip = GFS2_I(mapping->host);
594 struct gfs2_holder i_gh;
595 sector_t dblock = 0;
596 int error;
597
598 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
599 if (error)
600 return 0;
601
602 if (!gfs2_is_stuffed(ip))
603 dblock = iomap_bmap(mapping, lblock, &gfs2_iomap_ops);
604
605 gfs2_glock_dq_uninit(&i_gh);
606
607 return dblock;
608}
609
610static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
611{
612 struct gfs2_bufdata *bd;
613
614 lock_buffer(bh);
615 gfs2_log_lock(sdp);
616 clear_buffer_dirty(bh);
617 bd = bh->b_private;
618 if (bd) {
619 if (!list_empty(&bd->bd_list) && !buffer_pinned(bh))
620 list_del_init(&bd->bd_list);
621 else {
622 spin_lock(&sdp->sd_ail_lock);
623 gfs2_remove_from_journal(bh, REMOVE_JDATA);
624 spin_unlock(&sdp->sd_ail_lock);
625 }
626 }
627 bh->b_bdev = NULL;
628 clear_buffer_mapped(bh);
629 clear_buffer_req(bh);
630 clear_buffer_new(bh);
631 gfs2_log_unlock(sdp);
632 unlock_buffer(bh);
633}
634
635static void gfs2_invalidate_folio(struct folio *folio, size_t offset,
636 size_t length)
637{
638 struct gfs2_sbd *sdp = GFS2_SB(folio->mapping->host);
639 size_t stop = offset + length;
640 int partial_page = (offset || length < folio_size(folio));
641 struct buffer_head *bh, *head;
642 unsigned long pos = 0;
643
644 BUG_ON(!folio_test_locked(folio));
645 if (!partial_page)
646 folio_clear_checked(folio);
647 head = folio_buffers(folio);
648 if (!head)
649 goto out;
650
651 bh = head;
652 do {
653 if (pos + bh->b_size > stop)
654 return;
655
656 if (offset <= pos)
657 gfs2_discard(sdp, bh);
658 pos += bh->b_size;
659 bh = bh->b_this_page;
660 } while (bh != head);
661out:
662 if (!partial_page)
663 filemap_release_folio(folio, 0);
664}
665
666/**
667 * gfs2_release_folio - free the metadata associated with a folio
668 * @folio: the folio that's being released
669 * @gfp_mask: passed from Linux VFS, ignored by us
670 *
671 * Calls try_to_free_buffers() to free the buffers and put the folio if the
672 * buffers can be released.
673 *
674 * Returns: true if the folio was put or else false
675 */
676
677bool gfs2_release_folio(struct folio *folio, gfp_t gfp_mask)
678{
679 struct address_space *mapping = folio->mapping;
680 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
681 struct buffer_head *bh, *head;
682 struct gfs2_bufdata *bd;
683
684 head = folio_buffers(folio);
685 if (!head)
686 return false;
687
688 /*
689 * mm accommodates an old ext3 case where clean folios might
690 * not have had the dirty bit cleared. Thus, it can send actual
691 * dirty folios to ->release_folio() via shrink_active_list().
692 *
693 * As a workaround, we skip folios that contain dirty buffers
694 * below. Once ->release_folio isn't called on dirty folios
695 * anymore, we can warn on dirty buffers like we used to here
696 * again.
697 */
698
699 gfs2_log_lock(sdp);
700 bh = head;
701 do {
702 if (atomic_read(&bh->b_count))
703 goto cannot_release;
704 bd = bh->b_private;
705 if (bd && bd->bd_tr)
706 goto cannot_release;
707 if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh)))
708 goto cannot_release;
709 bh = bh->b_this_page;
710 } while (bh != head);
711
712 bh = head;
713 do {
714 bd = bh->b_private;
715 if (bd) {
716 gfs2_assert_warn(sdp, bd->bd_bh == bh);
717 bd->bd_bh = NULL;
718 bh->b_private = NULL;
719 /*
720 * The bd may still be queued as a revoke, in which
721 * case we must not dequeue nor free it.
722 */
723 if (!bd->bd_blkno && !list_empty(&bd->bd_list))
724 list_del_init(&bd->bd_list);
725 if (list_empty(&bd->bd_list))
726 kmem_cache_free(gfs2_bufdata_cachep, bd);
727 }
728
729 bh = bh->b_this_page;
730 } while (bh != head);
731 gfs2_log_unlock(sdp);
732
733 return try_to_free_buffers(folio);
734
735cannot_release:
736 gfs2_log_unlock(sdp);
737 return false;
738}
739
740static const struct address_space_operations gfs2_aops = {
741 .writepages = gfs2_writepages,
742 .read_folio = gfs2_read_folio,
743 .readahead = gfs2_readahead,
744 .dirty_folio = filemap_dirty_folio,
745 .release_folio = iomap_release_folio,
746 .invalidate_folio = iomap_invalidate_folio,
747 .bmap = gfs2_bmap,
748 .direct_IO = noop_direct_IO,
749 .migrate_folio = filemap_migrate_folio,
750 .is_partially_uptodate = iomap_is_partially_uptodate,
751 .error_remove_page = generic_error_remove_page,
752};
753
754static const struct address_space_operations gfs2_jdata_aops = {
755 .writepage = gfs2_jdata_writepage,
756 .writepages = gfs2_jdata_writepages,
757 .read_folio = gfs2_read_folio,
758 .readahead = gfs2_readahead,
759 .dirty_folio = jdata_dirty_folio,
760 .bmap = gfs2_bmap,
761 .invalidate_folio = gfs2_invalidate_folio,
762 .release_folio = gfs2_release_folio,
763 .is_partially_uptodate = block_is_partially_uptodate,
764 .error_remove_page = generic_error_remove_page,
765};
766
767void gfs2_set_aops(struct inode *inode)
768{
769 if (gfs2_is_jdata(GFS2_I(inode)))
770 inode->i_mapping->a_ops = &gfs2_jdata_aops;
771 else
772 inode->i_mapping->a_ops = &gfs2_aops;
773}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 */
6
7#include <linux/sched.h>
8#include <linux/slab.h>
9#include <linux/spinlock.h>
10#include <linux/completion.h>
11#include <linux/buffer_head.h>
12#include <linux/pagemap.h>
13#include <linux/pagevec.h>
14#include <linux/mpage.h>
15#include <linux/fs.h>
16#include <linux/writeback.h>
17#include <linux/swap.h>
18#include <linux/gfs2_ondisk.h>
19#include <linux/backing-dev.h>
20#include <linux/uio.h>
21#include <trace/events/writeback.h>
22#include <linux/sched/signal.h>
23
24#include "gfs2.h"
25#include "incore.h"
26#include "bmap.h"
27#include "glock.h"
28#include "inode.h"
29#include "log.h"
30#include "meta_io.h"
31#include "quota.h"
32#include "trans.h"
33#include "rgrp.h"
34#include "super.h"
35#include "util.h"
36#include "glops.h"
37#include "aops.h"
38
39
40void gfs2_trans_add_databufs(struct gfs2_inode *ip, struct folio *folio,
41 size_t from, size_t len)
42{
43 struct buffer_head *head = folio_buffers(folio);
44 unsigned int bsize = head->b_size;
45 struct buffer_head *bh;
46 size_t to = from + len;
47 size_t start, end;
48
49 for (bh = head, start = 0; bh != head || !start;
50 bh = bh->b_this_page, start = end) {
51 end = start + bsize;
52 if (end <= from)
53 continue;
54 if (start >= to)
55 break;
56 set_buffer_uptodate(bh);
57 gfs2_trans_add_data(ip->i_gl, bh);
58 }
59}
60
61/**
62 * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
63 * @inode: The inode
64 * @lblock: The block number to look up
65 * @bh_result: The buffer head to return the result in
66 * @create: Non-zero if we may add block to the file
67 *
68 * Returns: errno
69 */
70
71static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
72 struct buffer_head *bh_result, int create)
73{
74 int error;
75
76 error = gfs2_block_map(inode, lblock, bh_result, 0);
77 if (error)
78 return error;
79 if (!buffer_mapped(bh_result))
80 return -ENODATA;
81 return 0;
82}
83
84/**
85 * gfs2_write_jdata_folio - gfs2 jdata-specific version of block_write_full_folio
86 * @folio: The folio to write
87 * @wbc: The writeback control
88 *
89 * This is the same as calling block_write_full_folio, but it also
90 * writes pages outside of i_size
91 */
92static int gfs2_write_jdata_folio(struct folio *folio,
93 struct writeback_control *wbc)
94{
95 struct inode * const inode = folio->mapping->host;
96 loff_t i_size = i_size_read(inode);
97
98 /*
99 * The folio straddles i_size. It must be zeroed out on each and every
100 * writepage invocation because it may be mmapped. "A file is mapped
101 * in multiples of the page size. For a file that is not a multiple of
102 * the page size, the remaining memory is zeroed when mapped, and
103 * writes to that region are not written out to the file."
104 */
105 if (folio_pos(folio) < i_size &&
106 i_size < folio_pos(folio) + folio_size(folio))
107 folio_zero_segment(folio, offset_in_folio(folio, i_size),
108 folio_size(folio));
109
110 return __block_write_full_folio(inode, folio, gfs2_get_block_noalloc,
111 wbc);
112}
113
114/**
115 * __gfs2_jdata_write_folio - The core of jdata writepage
116 * @folio: The folio to write
117 * @wbc: The writeback control
118 *
119 * This is shared between writepage and writepages and implements the
120 * core of the writepage operation. If a transaction is required then
121 * the checked flag will have been set and the transaction will have
122 * already been started before this is called.
123 */
124static int __gfs2_jdata_write_folio(struct folio *folio,
125 struct writeback_control *wbc)
126{
127 struct inode *inode = folio->mapping->host;
128 struct gfs2_inode *ip = GFS2_I(inode);
129
130 if (folio_test_checked(folio)) {
131 folio_clear_checked(folio);
132 if (!folio_buffers(folio)) {
133 create_empty_buffers(folio,
134 inode->i_sb->s_blocksize,
135 BIT(BH_Dirty)|BIT(BH_Uptodate));
136 }
137 gfs2_trans_add_databufs(ip, folio, 0, folio_size(folio));
138 }
139 return gfs2_write_jdata_folio(folio, wbc);
140}
141
142/**
143 * gfs2_jdata_writepage - Write complete page
144 * @page: Page to write
145 * @wbc: The writeback control
146 *
147 * Returns: errno
148 *
149 */
150
151static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
152{
153 struct folio *folio = page_folio(page);
154 struct inode *inode = page->mapping->host;
155 struct gfs2_inode *ip = GFS2_I(inode);
156 struct gfs2_sbd *sdp = GFS2_SB(inode);
157
158 if (gfs2_assert_withdraw(sdp, ip->i_gl->gl_state == LM_ST_EXCLUSIVE))
159 goto out;
160 if (folio_test_checked(folio) || current->journal_info)
161 goto out_ignore;
162 return __gfs2_jdata_write_folio(folio, wbc);
163
164out_ignore:
165 folio_redirty_for_writepage(wbc, folio);
166out:
167 folio_unlock(folio);
168 return 0;
169}
170
171/**
172 * gfs2_writepages - Write a bunch of dirty pages back to disk
173 * @mapping: The mapping to write
174 * @wbc: Write-back control
175 *
176 * Used for both ordered and writeback modes.
177 */
178static int gfs2_writepages(struct address_space *mapping,
179 struct writeback_control *wbc)
180{
181 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
182 struct iomap_writepage_ctx wpc = { };
183 int ret;
184
185 /*
186 * Even if we didn't write enough pages here, we might still be holding
187 * dirty pages in the ail. We forcibly flush the ail because we don't
188 * want balance_dirty_pages() to loop indefinitely trying to write out
189 * pages held in the ail that it can't find.
190 */
191 ret = iomap_writepages(mapping, wbc, &wpc, &gfs2_writeback_ops);
192 if (ret == 0 && wbc->nr_to_write > 0)
193 set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
194 return ret;
195}
196
197/**
198 * gfs2_write_jdata_batch - Write back a folio batch's worth of folios
199 * @mapping: The mapping
200 * @wbc: The writeback control
201 * @fbatch: The batch of folios
202 * @done_index: Page index
203 *
204 * Returns: non-zero if loop should terminate, zero otherwise
205 */
206
207static int gfs2_write_jdata_batch(struct address_space *mapping,
208 struct writeback_control *wbc,
209 struct folio_batch *fbatch,
210 pgoff_t *done_index)
211{
212 struct inode *inode = mapping->host;
213 struct gfs2_sbd *sdp = GFS2_SB(inode);
214 unsigned nrblocks;
215 int i;
216 int ret;
217 size_t size = 0;
218 int nr_folios = folio_batch_count(fbatch);
219
220 for (i = 0; i < nr_folios; i++)
221 size += folio_size(fbatch->folios[i]);
222 nrblocks = size >> inode->i_blkbits;
223
224 ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
225 if (ret < 0)
226 return ret;
227
228 for (i = 0; i < nr_folios; i++) {
229 struct folio *folio = fbatch->folios[i];
230
231 *done_index = folio->index;
232
233 folio_lock(folio);
234
235 if (unlikely(folio->mapping != mapping)) {
236continue_unlock:
237 folio_unlock(folio);
238 continue;
239 }
240
241 if (!folio_test_dirty(folio)) {
242 /* someone wrote it for us */
243 goto continue_unlock;
244 }
245
246 if (folio_test_writeback(folio)) {
247 if (wbc->sync_mode != WB_SYNC_NONE)
248 folio_wait_writeback(folio);
249 else
250 goto continue_unlock;
251 }
252
253 BUG_ON(folio_test_writeback(folio));
254 if (!folio_clear_dirty_for_io(folio))
255 goto continue_unlock;
256
257 trace_wbc_writepage(wbc, inode_to_bdi(inode));
258
259 ret = __gfs2_jdata_write_folio(folio, wbc);
260 if (unlikely(ret)) {
261 if (ret == AOP_WRITEPAGE_ACTIVATE) {
262 folio_unlock(folio);
263 ret = 0;
264 } else {
265
266 /*
267 * done_index is set past this page,
268 * so media errors will not choke
269 * background writeout for the entire
270 * file. This has consequences for
271 * range_cyclic semantics (ie. it may
272 * not be suitable for data integrity
273 * writeout).
274 */
275 *done_index = folio_next_index(folio);
276 ret = 1;
277 break;
278 }
279 }
280
281 /*
282 * We stop writing back only if we are not doing
283 * integrity sync. In case of integrity sync we have to
284 * keep going until we have written all the pages
285 * we tagged for writeback prior to entering this loop.
286 */
287 if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) {
288 ret = 1;
289 break;
290 }
291
292 }
293 gfs2_trans_end(sdp);
294 return ret;
295}
296
297/**
298 * gfs2_write_cache_jdata - Like write_cache_pages but different
299 * @mapping: The mapping to write
300 * @wbc: The writeback control
301 *
302 * The reason that we use our own function here is that we need to
303 * start transactions before we grab page locks. This allows us
304 * to get the ordering right.
305 */
306
307static int gfs2_write_cache_jdata(struct address_space *mapping,
308 struct writeback_control *wbc)
309{
310 int ret = 0;
311 int done = 0;
312 struct folio_batch fbatch;
313 int nr_folios;
314 pgoff_t writeback_index;
315 pgoff_t index;
316 pgoff_t end;
317 pgoff_t done_index;
318 int cycled;
319 int range_whole = 0;
320 xa_mark_t tag;
321
322 folio_batch_init(&fbatch);
323 if (wbc->range_cyclic) {
324 writeback_index = mapping->writeback_index; /* prev offset */
325 index = writeback_index;
326 if (index == 0)
327 cycled = 1;
328 else
329 cycled = 0;
330 end = -1;
331 } else {
332 index = wbc->range_start >> PAGE_SHIFT;
333 end = wbc->range_end >> PAGE_SHIFT;
334 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
335 range_whole = 1;
336 cycled = 1; /* ignore range_cyclic tests */
337 }
338 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
339 tag = PAGECACHE_TAG_TOWRITE;
340 else
341 tag = PAGECACHE_TAG_DIRTY;
342
343retry:
344 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
345 tag_pages_for_writeback(mapping, index, end);
346 done_index = index;
347 while (!done && (index <= end)) {
348 nr_folios = filemap_get_folios_tag(mapping, &index, end,
349 tag, &fbatch);
350 if (nr_folios == 0)
351 break;
352
353 ret = gfs2_write_jdata_batch(mapping, wbc, &fbatch,
354 &done_index);
355 if (ret)
356 done = 1;
357 if (ret > 0)
358 ret = 0;
359 folio_batch_release(&fbatch);
360 cond_resched();
361 }
362
363 if (!cycled && !done) {
364 /*
365 * range_cyclic:
366 * We hit the last page and there is more work to be done: wrap
367 * back to the start of the file
368 */
369 cycled = 1;
370 index = 0;
371 end = writeback_index - 1;
372 goto retry;
373 }
374
375 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
376 mapping->writeback_index = done_index;
377
378 return ret;
379}
380
381
382/**
383 * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
384 * @mapping: The mapping to write
385 * @wbc: The writeback control
386 *
387 */
388
389static int gfs2_jdata_writepages(struct address_space *mapping,
390 struct writeback_control *wbc)
391{
392 struct gfs2_inode *ip = GFS2_I(mapping->host);
393 struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
394 int ret;
395
396 ret = gfs2_write_cache_jdata(mapping, wbc);
397 if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
398 gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
399 GFS2_LFC_JDATA_WPAGES);
400 ret = gfs2_write_cache_jdata(mapping, wbc);
401 }
402 return ret;
403}
404
405/**
406 * stuffed_read_folio - Fill in a Linux folio with stuffed file data
407 * @ip: the inode
408 * @folio: the folio
409 *
410 * Returns: errno
411 */
412static int stuffed_read_folio(struct gfs2_inode *ip, struct folio *folio)
413{
414 struct buffer_head *dibh = NULL;
415 size_t dsize = i_size_read(&ip->i_inode);
416 void *from = NULL;
417 int error = 0;
418
419 /*
420 * Due to the order of unstuffing files and ->fault(), we can be
421 * asked for a zero folio in the case of a stuffed file being extended,
422 * so we need to supply one here. It doesn't happen often.
423 */
424 if (unlikely(folio->index)) {
425 dsize = 0;
426 } else {
427 error = gfs2_meta_inode_buffer(ip, &dibh);
428 if (error)
429 goto out;
430 from = dibh->b_data + sizeof(struct gfs2_dinode);
431 }
432
433 folio_fill_tail(folio, 0, from, dsize);
434 brelse(dibh);
435out:
436 folio_end_read(folio, error == 0);
437
438 return error;
439}
440
441/**
442 * gfs2_read_folio - read a folio from a file
443 * @file: The file to read
444 * @folio: The folio in the file
445 */
446static int gfs2_read_folio(struct file *file, struct folio *folio)
447{
448 struct inode *inode = folio->mapping->host;
449 struct gfs2_inode *ip = GFS2_I(inode);
450 struct gfs2_sbd *sdp = GFS2_SB(inode);
451 int error;
452
453 if (!gfs2_is_jdata(ip) ||
454 (i_blocksize(inode) == PAGE_SIZE && !folio_buffers(folio))) {
455 error = iomap_read_folio(folio, &gfs2_iomap_ops);
456 } else if (gfs2_is_stuffed(ip)) {
457 error = stuffed_read_folio(ip, folio);
458 } else {
459 error = mpage_read_folio(folio, gfs2_block_map);
460 }
461
462 if (gfs2_withdrawing_or_withdrawn(sdp))
463 return -EIO;
464
465 return error;
466}
467
468/**
469 * gfs2_internal_read - read an internal file
470 * @ip: The gfs2 inode
471 * @buf: The buffer to fill
472 * @pos: The file position
473 * @size: The amount to read
474 *
475 */
476
477ssize_t gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
478 size_t size)
479{
480 struct address_space *mapping = ip->i_inode.i_mapping;
481 unsigned long index = *pos >> PAGE_SHIFT;
482 size_t copied = 0;
483
484 do {
485 size_t offset, chunk;
486 struct folio *folio;
487
488 folio = read_cache_folio(mapping, index, gfs2_read_folio, NULL);
489 if (IS_ERR(folio)) {
490 if (PTR_ERR(folio) == -EINTR)
491 continue;
492 return PTR_ERR(folio);
493 }
494 offset = *pos + copied - folio_pos(folio);
495 chunk = min(size - copied, folio_size(folio) - offset);
496 memcpy_from_folio(buf + copied, folio, offset, chunk);
497 index = folio_next_index(folio);
498 folio_put(folio);
499 copied += chunk;
500 } while(copied < size);
501 (*pos) += size;
502 return size;
503}
504
505/**
506 * gfs2_readahead - Read a bunch of pages at once
507 * @rac: Read-ahead control structure
508 *
509 * Some notes:
510 * 1. This is only for readahead, so we can simply ignore any things
511 * which are slightly inconvenient (such as locking conflicts between
512 * the page lock and the glock) and return having done no I/O. Its
513 * obviously not something we'd want to do on too regular a basis.
514 * Any I/O we ignore at this time will be done via readpage later.
515 * 2. We don't handle stuffed files here we let readpage do the honours.
516 * 3. mpage_readahead() does most of the heavy lifting in the common case.
517 * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
518 */
519
520static void gfs2_readahead(struct readahead_control *rac)
521{
522 struct inode *inode = rac->mapping->host;
523 struct gfs2_inode *ip = GFS2_I(inode);
524
525 if (gfs2_is_stuffed(ip))
526 ;
527 else if (gfs2_is_jdata(ip))
528 mpage_readahead(rac, gfs2_block_map);
529 else
530 iomap_readahead(rac, &gfs2_iomap_ops);
531}
532
533/**
534 * adjust_fs_space - Adjusts the free space available due to gfs2_grow
535 * @inode: the rindex inode
536 */
537void adjust_fs_space(struct inode *inode)
538{
539 struct gfs2_sbd *sdp = GFS2_SB(inode);
540 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
541 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
542 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
543 struct buffer_head *m_bh;
544 u64 fs_total, new_free;
545
546 if (gfs2_trans_begin(sdp, 2 * RES_STATFS, 0) != 0)
547 return;
548
549 /* Total up the file system space, according to the latest rindex. */
550 fs_total = gfs2_ri_total(sdp);
551 if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
552 goto out;
553
554 spin_lock(&sdp->sd_statfs_spin);
555 gfs2_statfs_change_in(m_sc, m_bh->b_data +
556 sizeof(struct gfs2_dinode));
557 if (fs_total > (m_sc->sc_total + l_sc->sc_total))
558 new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
559 else
560 new_free = 0;
561 spin_unlock(&sdp->sd_statfs_spin);
562 fs_warn(sdp, "File system extended by %llu blocks.\n",
563 (unsigned long long)new_free);
564 gfs2_statfs_change(sdp, new_free, new_free, 0);
565
566 update_statfs(sdp, m_bh);
567 brelse(m_bh);
568out:
569 sdp->sd_rindex_uptodate = 0;
570 gfs2_trans_end(sdp);
571}
572
573static bool jdata_dirty_folio(struct address_space *mapping,
574 struct folio *folio)
575{
576 if (current->journal_info)
577 folio_set_checked(folio);
578 return block_dirty_folio(mapping, folio);
579}
580
581/**
582 * gfs2_bmap - Block map function
583 * @mapping: Address space info
584 * @lblock: The block to map
585 *
586 * Returns: The disk address for the block or 0 on hole or error
587 */
588
589static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
590{
591 struct gfs2_inode *ip = GFS2_I(mapping->host);
592 struct gfs2_holder i_gh;
593 sector_t dblock = 0;
594 int error;
595
596 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
597 if (error)
598 return 0;
599
600 if (!gfs2_is_stuffed(ip))
601 dblock = iomap_bmap(mapping, lblock, &gfs2_iomap_ops);
602
603 gfs2_glock_dq_uninit(&i_gh);
604
605 return dblock;
606}
607
608static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
609{
610 struct gfs2_bufdata *bd;
611
612 lock_buffer(bh);
613 gfs2_log_lock(sdp);
614 clear_buffer_dirty(bh);
615 bd = bh->b_private;
616 if (bd) {
617 if (!list_empty(&bd->bd_list) && !buffer_pinned(bh))
618 list_del_init(&bd->bd_list);
619 else {
620 spin_lock(&sdp->sd_ail_lock);
621 gfs2_remove_from_journal(bh, REMOVE_JDATA);
622 spin_unlock(&sdp->sd_ail_lock);
623 }
624 }
625 bh->b_bdev = NULL;
626 clear_buffer_mapped(bh);
627 clear_buffer_req(bh);
628 clear_buffer_new(bh);
629 gfs2_log_unlock(sdp);
630 unlock_buffer(bh);
631}
632
633static void gfs2_invalidate_folio(struct folio *folio, size_t offset,
634 size_t length)
635{
636 struct gfs2_sbd *sdp = GFS2_SB(folio->mapping->host);
637 size_t stop = offset + length;
638 int partial_page = (offset || length < folio_size(folio));
639 struct buffer_head *bh, *head;
640 unsigned long pos = 0;
641
642 BUG_ON(!folio_test_locked(folio));
643 if (!partial_page)
644 folio_clear_checked(folio);
645 head = folio_buffers(folio);
646 if (!head)
647 goto out;
648
649 bh = head;
650 do {
651 if (pos + bh->b_size > stop)
652 return;
653
654 if (offset <= pos)
655 gfs2_discard(sdp, bh);
656 pos += bh->b_size;
657 bh = bh->b_this_page;
658 } while (bh != head);
659out:
660 if (!partial_page)
661 filemap_release_folio(folio, 0);
662}
663
664/**
665 * gfs2_release_folio - free the metadata associated with a folio
666 * @folio: the folio that's being released
667 * @gfp_mask: passed from Linux VFS, ignored by us
668 *
669 * Calls try_to_free_buffers() to free the buffers and put the folio if the
670 * buffers can be released.
671 *
672 * Returns: true if the folio was put or else false
673 */
674
675bool gfs2_release_folio(struct folio *folio, gfp_t gfp_mask)
676{
677 struct address_space *mapping = folio->mapping;
678 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
679 struct buffer_head *bh, *head;
680 struct gfs2_bufdata *bd;
681
682 head = folio_buffers(folio);
683 if (!head)
684 return false;
685
686 /*
687 * mm accommodates an old ext3 case where clean folios might
688 * not have had the dirty bit cleared. Thus, it can send actual
689 * dirty folios to ->release_folio() via shrink_active_list().
690 *
691 * As a workaround, we skip folios that contain dirty buffers
692 * below. Once ->release_folio isn't called on dirty folios
693 * anymore, we can warn on dirty buffers like we used to here
694 * again.
695 */
696
697 gfs2_log_lock(sdp);
698 bh = head;
699 do {
700 if (atomic_read(&bh->b_count))
701 goto cannot_release;
702 bd = bh->b_private;
703 if (bd && bd->bd_tr)
704 goto cannot_release;
705 if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh)))
706 goto cannot_release;
707 bh = bh->b_this_page;
708 } while (bh != head);
709
710 bh = head;
711 do {
712 bd = bh->b_private;
713 if (bd) {
714 gfs2_assert_warn(sdp, bd->bd_bh == bh);
715 bd->bd_bh = NULL;
716 bh->b_private = NULL;
717 /*
718 * The bd may still be queued as a revoke, in which
719 * case we must not dequeue nor free it.
720 */
721 if (!bd->bd_blkno && !list_empty(&bd->bd_list))
722 list_del_init(&bd->bd_list);
723 if (list_empty(&bd->bd_list))
724 kmem_cache_free(gfs2_bufdata_cachep, bd);
725 }
726
727 bh = bh->b_this_page;
728 } while (bh != head);
729 gfs2_log_unlock(sdp);
730
731 return try_to_free_buffers(folio);
732
733cannot_release:
734 gfs2_log_unlock(sdp);
735 return false;
736}
737
738static const struct address_space_operations gfs2_aops = {
739 .writepages = gfs2_writepages,
740 .read_folio = gfs2_read_folio,
741 .readahead = gfs2_readahead,
742 .dirty_folio = iomap_dirty_folio,
743 .release_folio = iomap_release_folio,
744 .invalidate_folio = iomap_invalidate_folio,
745 .bmap = gfs2_bmap,
746 .migrate_folio = filemap_migrate_folio,
747 .is_partially_uptodate = iomap_is_partially_uptodate,
748 .error_remove_folio = generic_error_remove_folio,
749};
750
751static const struct address_space_operations gfs2_jdata_aops = {
752 .writepage = gfs2_jdata_writepage,
753 .writepages = gfs2_jdata_writepages,
754 .read_folio = gfs2_read_folio,
755 .readahead = gfs2_readahead,
756 .dirty_folio = jdata_dirty_folio,
757 .bmap = gfs2_bmap,
758 .invalidate_folio = gfs2_invalidate_folio,
759 .release_folio = gfs2_release_folio,
760 .is_partially_uptodate = block_is_partially_uptodate,
761 .error_remove_folio = generic_error_remove_folio,
762};
763
764void gfs2_set_aops(struct inode *inode)
765{
766 if (gfs2_is_jdata(GFS2_I(inode)))
767 inode->i_mapping->a_ops = &gfs2_jdata_aops;
768 else
769 inode->i_mapping->a_ops = &gfs2_aops;
770}