Loading...
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/pagemap.h>
16#include <linux/pagevec.h>
17#include <linux/mpage.h>
18#include <linux/fs.h>
19#include <linux/writeback.h>
20#include <linux/swap.h>
21#include <linux/gfs2_ondisk.h>
22#include <linux/backing-dev.h>
23
24#include "gfs2.h"
25#include "incore.h"
26#include "bmap.h"
27#include "glock.h"
28#include "inode.h"
29#include "log.h"
30#include "meta_io.h"
31#include "quota.h"
32#include "trans.h"
33#include "rgrp.h"
34#include "super.h"
35#include "util.h"
36#include "glops.h"
37
38
39void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
40 unsigned int from, unsigned int to)
41{
42 struct buffer_head *head = page_buffers(page);
43 unsigned int bsize = head->b_size;
44 struct buffer_head *bh;
45 unsigned int start, end;
46
47 for (bh = head, start = 0; bh != head || !start;
48 bh = bh->b_this_page, start = end) {
49 end = start + bsize;
50 if (end <= from || start >= to)
51 continue;
52 if (gfs2_is_jdata(ip))
53 set_buffer_uptodate(bh);
54 gfs2_trans_add_bh(ip->i_gl, bh, 0);
55 }
56}
57
58/**
59 * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
60 * @inode: The inode
61 * @lblock: The block number to look up
62 * @bh_result: The buffer head to return the result in
63 * @create: Non-zero if we may add block to the file
64 *
65 * Returns: errno
66 */
67
68static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
69 struct buffer_head *bh_result, int create)
70{
71 int error;
72
73 error = gfs2_block_map(inode, lblock, bh_result, 0);
74 if (error)
75 return error;
76 if (!buffer_mapped(bh_result))
77 return -EIO;
78 return 0;
79}
80
81static int gfs2_get_block_direct(struct inode *inode, sector_t lblock,
82 struct buffer_head *bh_result, int create)
83{
84 return gfs2_block_map(inode, lblock, bh_result, 0);
85}
86
87/**
88 * gfs2_writepage_common - Common bits of writepage
89 * @page: The page to be written
90 * @wbc: The writeback control
91 *
92 * Returns: 1 if writepage is ok, otherwise an error code or zero if no error.
93 */
94
95static int gfs2_writepage_common(struct page *page,
96 struct writeback_control *wbc)
97{
98 struct inode *inode = page->mapping->host;
99 struct gfs2_inode *ip = GFS2_I(inode);
100 struct gfs2_sbd *sdp = GFS2_SB(inode);
101 loff_t i_size = i_size_read(inode);
102 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
103 unsigned offset;
104
105 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
106 goto out;
107 if (current->journal_info)
108 goto redirty;
109 /* Is the page fully outside i_size? (truncate in progress) */
110 offset = i_size & (PAGE_CACHE_SIZE-1);
111 if (page->index > end_index || (page->index == end_index && !offset)) {
112 page->mapping->a_ops->invalidatepage(page, 0);
113 goto out;
114 }
115 return 1;
116redirty:
117 redirty_page_for_writepage(wbc, page);
118out:
119 unlock_page(page);
120 return 0;
121}
122
123/**
124 * gfs2_writeback_writepage - Write page for writeback mappings
125 * @page: The page
126 * @wbc: The writeback control
127 *
128 */
129
130static int gfs2_writeback_writepage(struct page *page,
131 struct writeback_control *wbc)
132{
133 int ret;
134
135 ret = gfs2_writepage_common(page, wbc);
136 if (ret <= 0)
137 return ret;
138
139 return nobh_writepage(page, gfs2_get_block_noalloc, wbc);
140}
141
142/**
143 * gfs2_ordered_writepage - Write page for ordered data files
144 * @page: The page to write
145 * @wbc: The writeback control
146 *
147 */
148
149static int gfs2_ordered_writepage(struct page *page,
150 struct writeback_control *wbc)
151{
152 struct inode *inode = page->mapping->host;
153 struct gfs2_inode *ip = GFS2_I(inode);
154 int ret;
155
156 ret = gfs2_writepage_common(page, wbc);
157 if (ret <= 0)
158 return ret;
159
160 if (!page_has_buffers(page)) {
161 create_empty_buffers(page, inode->i_sb->s_blocksize,
162 (1 << BH_Dirty)|(1 << BH_Uptodate));
163 }
164 gfs2_page_add_databufs(ip, page, 0, inode->i_sb->s_blocksize-1);
165 return block_write_full_page(page, gfs2_get_block_noalloc, wbc);
166}
167
168/**
169 * __gfs2_jdata_writepage - The core of jdata writepage
170 * @page: The page to write
171 * @wbc: The writeback control
172 *
173 * This is shared between writepage and writepages and implements the
174 * core of the writepage operation. If a transaction is required then
175 * PageChecked will have been set and the transaction will have
176 * already been started before this is called.
177 */
178
179static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
180{
181 struct inode *inode = page->mapping->host;
182 struct gfs2_inode *ip = GFS2_I(inode);
183 struct gfs2_sbd *sdp = GFS2_SB(inode);
184
185 if (PageChecked(page)) {
186 ClearPageChecked(page);
187 if (!page_has_buffers(page)) {
188 create_empty_buffers(page, inode->i_sb->s_blocksize,
189 (1 << BH_Dirty)|(1 << BH_Uptodate));
190 }
191 gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1);
192 }
193 return block_write_full_page(page, gfs2_get_block_noalloc, wbc);
194}
195
196/**
197 * gfs2_jdata_writepage - Write complete page
198 * @page: Page to write
199 *
200 * Returns: errno
201 *
202 */
203
204static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
205{
206 struct inode *inode = page->mapping->host;
207 struct gfs2_sbd *sdp = GFS2_SB(inode);
208 int ret;
209 int done_trans = 0;
210
211 if (PageChecked(page)) {
212 if (wbc->sync_mode != WB_SYNC_ALL)
213 goto out_ignore;
214 ret = gfs2_trans_begin(sdp, RES_DINODE + 1, 0);
215 if (ret)
216 goto out_ignore;
217 done_trans = 1;
218 }
219 ret = gfs2_writepage_common(page, wbc);
220 if (ret > 0)
221 ret = __gfs2_jdata_writepage(page, wbc);
222 if (done_trans)
223 gfs2_trans_end(sdp);
224 return ret;
225
226out_ignore:
227 redirty_page_for_writepage(wbc, page);
228 unlock_page(page);
229 return 0;
230}
231
232/**
233 * gfs2_writeback_writepages - Write a bunch of dirty pages back to disk
234 * @mapping: The mapping to write
235 * @wbc: Write-back control
236 *
237 * For the data=writeback case we can already ignore buffer heads
238 * and write whole extents at once. This is a big reduction in the
239 * number of I/O requests we send and the bmap calls we make in this case.
240 */
241static int gfs2_writeback_writepages(struct address_space *mapping,
242 struct writeback_control *wbc)
243{
244 return mpage_writepages(mapping, wbc, gfs2_get_block_noalloc);
245}
246
247/**
248 * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages
249 * @mapping: The mapping
250 * @wbc: The writeback control
251 * @writepage: The writepage function to call for each page
252 * @pvec: The vector of pages
253 * @nr_pages: The number of pages to write
254 *
255 * Returns: non-zero if loop should terminate, zero otherwise
256 */
257
258static int gfs2_write_jdata_pagevec(struct address_space *mapping,
259 struct writeback_control *wbc,
260 struct pagevec *pvec,
261 int nr_pages, pgoff_t end)
262{
263 struct inode *inode = mapping->host;
264 struct gfs2_sbd *sdp = GFS2_SB(inode);
265 loff_t i_size = i_size_read(inode);
266 pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
267 unsigned offset = i_size & (PAGE_CACHE_SIZE-1);
268 unsigned nrblocks = nr_pages * (PAGE_CACHE_SIZE/inode->i_sb->s_blocksize);
269 int i;
270 int ret;
271
272 ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
273 if (ret < 0)
274 return ret;
275
276 for(i = 0; i < nr_pages; i++) {
277 struct page *page = pvec->pages[i];
278
279 lock_page(page);
280
281 if (unlikely(page->mapping != mapping)) {
282 unlock_page(page);
283 continue;
284 }
285
286 if (!wbc->range_cyclic && page->index > end) {
287 ret = 1;
288 unlock_page(page);
289 continue;
290 }
291
292 if (wbc->sync_mode != WB_SYNC_NONE)
293 wait_on_page_writeback(page);
294
295 if (PageWriteback(page) ||
296 !clear_page_dirty_for_io(page)) {
297 unlock_page(page);
298 continue;
299 }
300
301 /* Is the page fully outside i_size? (truncate in progress) */
302 if (page->index > end_index || (page->index == end_index && !offset)) {
303 page->mapping->a_ops->invalidatepage(page, 0);
304 unlock_page(page);
305 continue;
306 }
307
308 ret = __gfs2_jdata_writepage(page, wbc);
309
310 if (ret || (--(wbc->nr_to_write) <= 0))
311 ret = 1;
312 }
313 gfs2_trans_end(sdp);
314 return ret;
315}
316
317/**
318 * gfs2_write_cache_jdata - Like write_cache_pages but different
319 * @mapping: The mapping to write
320 * @wbc: The writeback control
321 * @writepage: The writepage function to call
322 * @data: The data to pass to writepage
323 *
324 * The reason that we use our own function here is that we need to
325 * start transactions before we grab page locks. This allows us
326 * to get the ordering right.
327 */
328
329static int gfs2_write_cache_jdata(struct address_space *mapping,
330 struct writeback_control *wbc)
331{
332 int ret = 0;
333 int done = 0;
334 struct pagevec pvec;
335 int nr_pages;
336 pgoff_t index;
337 pgoff_t end;
338 int scanned = 0;
339 int range_whole = 0;
340
341 pagevec_init(&pvec, 0);
342 if (wbc->range_cyclic) {
343 index = mapping->writeback_index; /* Start from prev offset */
344 end = -1;
345 } else {
346 index = wbc->range_start >> PAGE_CACHE_SHIFT;
347 end = wbc->range_end >> PAGE_CACHE_SHIFT;
348 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
349 range_whole = 1;
350 scanned = 1;
351 }
352
353retry:
354 while (!done && (index <= end) &&
355 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
356 PAGECACHE_TAG_DIRTY,
357 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
358 scanned = 1;
359 ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, end);
360 if (ret)
361 done = 1;
362 if (ret > 0)
363 ret = 0;
364
365 pagevec_release(&pvec);
366 cond_resched();
367 }
368
369 if (!scanned && !done) {
370 /*
371 * We hit the last page and there is more work to be done: wrap
372 * back to the start of the file
373 */
374 scanned = 1;
375 index = 0;
376 goto retry;
377 }
378
379 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
380 mapping->writeback_index = index;
381 return ret;
382}
383
384
385/**
386 * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
387 * @mapping: The mapping to write
388 * @wbc: The writeback control
389 *
390 */
391
392static int gfs2_jdata_writepages(struct address_space *mapping,
393 struct writeback_control *wbc)
394{
395 struct gfs2_inode *ip = GFS2_I(mapping->host);
396 struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
397 int ret;
398
399 ret = gfs2_write_cache_jdata(mapping, wbc);
400 if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
401 gfs2_log_flush(sdp, ip->i_gl);
402 ret = gfs2_write_cache_jdata(mapping, wbc);
403 }
404 return ret;
405}
406
407/**
408 * stuffed_readpage - Fill in a Linux page with stuffed file data
409 * @ip: the inode
410 * @page: the page
411 *
412 * Returns: errno
413 */
414
415static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
416{
417 struct buffer_head *dibh;
418 u64 dsize = i_size_read(&ip->i_inode);
419 void *kaddr;
420 int error;
421
422 /*
423 * Due to the order of unstuffing files and ->fault(), we can be
424 * asked for a zero page in the case of a stuffed file being extended,
425 * so we need to supply one here. It doesn't happen often.
426 */
427 if (unlikely(page->index)) {
428 zero_user(page, 0, PAGE_CACHE_SIZE);
429 SetPageUptodate(page);
430 return 0;
431 }
432
433 error = gfs2_meta_inode_buffer(ip, &dibh);
434 if (error)
435 return error;
436
437 kaddr = kmap_atomic(page, KM_USER0);
438 if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode)))
439 dsize = (dibh->b_size - sizeof(struct gfs2_dinode));
440 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
441 memset(kaddr + dsize, 0, PAGE_CACHE_SIZE - dsize);
442 kunmap_atomic(kaddr, KM_USER0);
443 flush_dcache_page(page);
444 brelse(dibh);
445 SetPageUptodate(page);
446
447 return 0;
448}
449
450
451/**
452 * __gfs2_readpage - readpage
453 * @file: The file to read a page for
454 * @page: The page to read
455 *
456 * This is the core of gfs2's readpage. Its used by the internal file
457 * reading code as in that case we already hold the glock. Also its
458 * called by gfs2_readpage() once the required lock has been granted.
459 *
460 */
461
462static int __gfs2_readpage(void *file, struct page *page)
463{
464 struct gfs2_inode *ip = GFS2_I(page->mapping->host);
465 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
466 int error;
467
468 if (gfs2_is_stuffed(ip)) {
469 error = stuffed_readpage(ip, page);
470 unlock_page(page);
471 } else {
472 error = mpage_readpage(page, gfs2_block_map);
473 }
474
475 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
476 return -EIO;
477
478 return error;
479}
480
481/**
482 * gfs2_readpage - read a page of a file
483 * @file: The file to read
484 * @page: The page of the file
485 *
486 * This deals with the locking required. We have to unlock and
487 * relock the page in order to get the locking in the right
488 * order.
489 */
490
491static int gfs2_readpage(struct file *file, struct page *page)
492{
493 struct address_space *mapping = page->mapping;
494 struct gfs2_inode *ip = GFS2_I(mapping->host);
495 struct gfs2_holder gh;
496 int error;
497
498 unlock_page(page);
499 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
500 error = gfs2_glock_nq(&gh);
501 if (unlikely(error))
502 goto out;
503 error = AOP_TRUNCATED_PAGE;
504 lock_page(page);
505 if (page->mapping == mapping && !PageUptodate(page))
506 error = __gfs2_readpage(file, page);
507 else
508 unlock_page(page);
509 gfs2_glock_dq(&gh);
510out:
511 gfs2_holder_uninit(&gh);
512 if (error && error != AOP_TRUNCATED_PAGE)
513 lock_page(page);
514 return error;
515}
516
517/**
518 * gfs2_internal_read - read an internal file
519 * @ip: The gfs2 inode
520 * @ra_state: The readahead state (or NULL for no readahead)
521 * @buf: The buffer to fill
522 * @pos: The file position
523 * @size: The amount to read
524 *
525 */
526
527int gfs2_internal_read(struct gfs2_inode *ip, struct file_ra_state *ra_state,
528 char *buf, loff_t *pos, unsigned size)
529{
530 struct address_space *mapping = ip->i_inode.i_mapping;
531 unsigned long index = *pos / PAGE_CACHE_SIZE;
532 unsigned offset = *pos & (PAGE_CACHE_SIZE - 1);
533 unsigned copied = 0;
534 unsigned amt;
535 struct page *page;
536 void *p;
537
538 do {
539 amt = size - copied;
540 if (offset + size > PAGE_CACHE_SIZE)
541 amt = PAGE_CACHE_SIZE - offset;
542 page = read_cache_page(mapping, index, __gfs2_readpage, NULL);
543 if (IS_ERR(page))
544 return PTR_ERR(page);
545 p = kmap_atomic(page, KM_USER0);
546 memcpy(buf + copied, p + offset, amt);
547 kunmap_atomic(p, KM_USER0);
548 mark_page_accessed(page);
549 page_cache_release(page);
550 copied += amt;
551 index++;
552 offset = 0;
553 } while(copied < size);
554 (*pos) += size;
555 return size;
556}
557
558/**
559 * gfs2_readpages - Read a bunch of pages at once
560 *
561 * Some notes:
562 * 1. This is only for readahead, so we can simply ignore any things
563 * which are slightly inconvenient (such as locking conflicts between
564 * the page lock and the glock) and return having done no I/O. Its
565 * obviously not something we'd want to do on too regular a basis.
566 * Any I/O we ignore at this time will be done via readpage later.
567 * 2. We don't handle stuffed files here we let readpage do the honours.
568 * 3. mpage_readpages() does most of the heavy lifting in the common case.
569 * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
570 */
571
572static int gfs2_readpages(struct file *file, struct address_space *mapping,
573 struct list_head *pages, unsigned nr_pages)
574{
575 struct inode *inode = mapping->host;
576 struct gfs2_inode *ip = GFS2_I(inode);
577 struct gfs2_sbd *sdp = GFS2_SB(inode);
578 struct gfs2_holder gh;
579 int ret;
580
581 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
582 ret = gfs2_glock_nq(&gh);
583 if (unlikely(ret))
584 goto out_uninit;
585 if (!gfs2_is_stuffed(ip))
586 ret = mpage_readpages(mapping, pages, nr_pages, gfs2_block_map);
587 gfs2_glock_dq(&gh);
588out_uninit:
589 gfs2_holder_uninit(&gh);
590 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
591 ret = -EIO;
592 return ret;
593}
594
595/**
596 * gfs2_write_begin - Begin to write to a file
597 * @file: The file to write to
598 * @mapping: The mapping in which to write
599 * @pos: The file offset at which to start writing
600 * @len: Length of the write
601 * @flags: Various flags
602 * @pagep: Pointer to return the page
603 * @fsdata: Pointer to return fs data (unused by GFS2)
604 *
605 * Returns: errno
606 */
607
608static int gfs2_write_begin(struct file *file, struct address_space *mapping,
609 loff_t pos, unsigned len, unsigned flags,
610 struct page **pagep, void **fsdata)
611{
612 struct gfs2_inode *ip = GFS2_I(mapping->host);
613 struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
614 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
615 unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
616 int alloc_required;
617 int error = 0;
618 struct gfs2_alloc *al = NULL;
619 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
620 unsigned from = pos & (PAGE_CACHE_SIZE - 1);
621 struct page *page;
622
623 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
624 error = gfs2_glock_nq(&ip->i_gh);
625 if (unlikely(error))
626 goto out_uninit;
627 if (&ip->i_inode == sdp->sd_rindex) {
628 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE,
629 GL_NOCACHE, &m_ip->i_gh);
630 if (unlikely(error)) {
631 gfs2_glock_dq(&ip->i_gh);
632 goto out_uninit;
633 }
634 }
635
636 alloc_required = gfs2_write_alloc_required(ip, pos, len);
637
638 if (alloc_required || gfs2_is_jdata(ip))
639 gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks);
640
641 if (alloc_required) {
642 al = gfs2_alloc_get(ip);
643 if (!al) {
644 error = -ENOMEM;
645 goto out_unlock;
646 }
647
648 error = gfs2_quota_lock_check(ip);
649 if (error)
650 goto out_alloc_put;
651
652 al->al_requested = data_blocks + ind_blocks;
653 error = gfs2_inplace_reserve(ip);
654 if (error)
655 goto out_qunlock;
656 }
657
658 rblocks = RES_DINODE + ind_blocks;
659 if (gfs2_is_jdata(ip))
660 rblocks += data_blocks ? data_blocks : 1;
661 if (ind_blocks || data_blocks)
662 rblocks += RES_STATFS + RES_QUOTA;
663 if (&ip->i_inode == sdp->sd_rindex)
664 rblocks += 2 * RES_STATFS;
665 if (alloc_required)
666 rblocks += gfs2_rg_blocks(al);
667
668 error = gfs2_trans_begin(sdp, rblocks,
669 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
670 if (error)
671 goto out_trans_fail;
672
673 error = -ENOMEM;
674 flags |= AOP_FLAG_NOFS;
675 page = grab_cache_page_write_begin(mapping, index, flags);
676 *pagep = page;
677 if (unlikely(!page))
678 goto out_endtrans;
679
680 if (gfs2_is_stuffed(ip)) {
681 error = 0;
682 if (pos + len > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) {
683 error = gfs2_unstuff_dinode(ip, page);
684 if (error == 0)
685 goto prepare_write;
686 } else if (!PageUptodate(page)) {
687 error = stuffed_readpage(ip, page);
688 }
689 goto out;
690 }
691
692prepare_write:
693 error = __block_write_begin(page, from, len, gfs2_block_map);
694out:
695 if (error == 0)
696 return 0;
697
698 unlock_page(page);
699 page_cache_release(page);
700
701 gfs2_trans_end(sdp);
702 if (pos + len > ip->i_inode.i_size)
703 gfs2_trim_blocks(&ip->i_inode);
704 goto out_trans_fail;
705
706out_endtrans:
707 gfs2_trans_end(sdp);
708out_trans_fail:
709 if (alloc_required) {
710 gfs2_inplace_release(ip);
711out_qunlock:
712 gfs2_quota_unlock(ip);
713out_alloc_put:
714 gfs2_alloc_put(ip);
715 }
716out_unlock:
717 if (&ip->i_inode == sdp->sd_rindex) {
718 gfs2_glock_dq(&m_ip->i_gh);
719 gfs2_holder_uninit(&m_ip->i_gh);
720 }
721 gfs2_glock_dq(&ip->i_gh);
722out_uninit:
723 gfs2_holder_uninit(&ip->i_gh);
724 return error;
725}
726
727/**
728 * adjust_fs_space - Adjusts the free space available due to gfs2_grow
729 * @inode: the rindex inode
730 */
731static void adjust_fs_space(struct inode *inode)
732{
733 struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
734 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
735 struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
736 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
737 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
738 struct buffer_head *m_bh, *l_bh;
739 u64 fs_total, new_free;
740
741 /* Total up the file system space, according to the latest rindex. */
742 fs_total = gfs2_ri_total(sdp);
743 if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
744 return;
745
746 spin_lock(&sdp->sd_statfs_spin);
747 gfs2_statfs_change_in(m_sc, m_bh->b_data +
748 sizeof(struct gfs2_dinode));
749 if (fs_total > (m_sc->sc_total + l_sc->sc_total))
750 new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
751 else
752 new_free = 0;
753 spin_unlock(&sdp->sd_statfs_spin);
754 fs_warn(sdp, "File system extended by %llu blocks.\n",
755 (unsigned long long)new_free);
756 gfs2_statfs_change(sdp, new_free, new_free, 0);
757
758 if (gfs2_meta_inode_buffer(l_ip, &l_bh) != 0)
759 goto out;
760 update_statfs(sdp, m_bh, l_bh);
761 brelse(l_bh);
762out:
763 brelse(m_bh);
764}
765
766/**
767 * gfs2_stuffed_write_end - Write end for stuffed files
768 * @inode: The inode
769 * @dibh: The buffer_head containing the on-disk inode
770 * @pos: The file position
771 * @len: The length of the write
772 * @copied: How much was actually copied by the VFS
773 * @page: The page
774 *
775 * This copies the data from the page into the inode block after
776 * the inode data structure itself.
777 *
778 * Returns: errno
779 */
780static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
781 loff_t pos, unsigned len, unsigned copied,
782 struct page *page)
783{
784 struct gfs2_inode *ip = GFS2_I(inode);
785 struct gfs2_sbd *sdp = GFS2_SB(inode);
786 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
787 u64 to = pos + copied;
788 void *kaddr;
789 unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode);
790 struct gfs2_dinode *di = (struct gfs2_dinode *)dibh->b_data;
791
792 BUG_ON((pos + len) > (dibh->b_size - sizeof(struct gfs2_dinode)));
793 kaddr = kmap_atomic(page, KM_USER0);
794 memcpy(buf + pos, kaddr + pos, copied);
795 memset(kaddr + pos + copied, 0, len - copied);
796 flush_dcache_page(page);
797 kunmap_atomic(kaddr, KM_USER0);
798
799 if (!PageUptodate(page))
800 SetPageUptodate(page);
801 unlock_page(page);
802 page_cache_release(page);
803
804 if (copied) {
805 if (inode->i_size < to)
806 i_size_write(inode, to);
807 gfs2_dinode_out(ip, di);
808 mark_inode_dirty(inode);
809 }
810
811 if (inode == sdp->sd_rindex) {
812 adjust_fs_space(inode);
813 ip->i_gh.gh_flags |= GL_NOCACHE;
814 }
815
816 brelse(dibh);
817 gfs2_trans_end(sdp);
818 if (inode == sdp->sd_rindex) {
819 gfs2_glock_dq(&m_ip->i_gh);
820 gfs2_holder_uninit(&m_ip->i_gh);
821 }
822 gfs2_glock_dq(&ip->i_gh);
823 gfs2_holder_uninit(&ip->i_gh);
824 return copied;
825}
826
827/**
828 * gfs2_write_end
829 * @file: The file to write to
830 * @mapping: The address space to write to
831 * @pos: The file position
832 * @len: The length of the data
833 * @copied:
834 * @page: The page that has been written
835 * @fsdata: The fsdata (unused in GFS2)
836 *
837 * The main write_end function for GFS2. We have a separate one for
838 * stuffed files as they are slightly different, otherwise we just
839 * put our locking around the VFS provided functions.
840 *
841 * Returns: errno
842 */
843
844static int gfs2_write_end(struct file *file, struct address_space *mapping,
845 loff_t pos, unsigned len, unsigned copied,
846 struct page *page, void *fsdata)
847{
848 struct inode *inode = page->mapping->host;
849 struct gfs2_inode *ip = GFS2_I(inode);
850 struct gfs2_sbd *sdp = GFS2_SB(inode);
851 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
852 struct buffer_head *dibh;
853 struct gfs2_alloc *al = ip->i_alloc;
854 unsigned int from = pos & (PAGE_CACHE_SIZE - 1);
855 unsigned int to = from + len;
856 int ret;
857
858 BUG_ON(gfs2_glock_is_locked_by_me(ip->i_gl) == NULL);
859
860 ret = gfs2_meta_inode_buffer(ip, &dibh);
861 if (unlikely(ret)) {
862 unlock_page(page);
863 page_cache_release(page);
864 goto failed;
865 }
866
867 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
868
869 if (gfs2_is_stuffed(ip))
870 return gfs2_stuffed_write_end(inode, dibh, pos, len, copied, page);
871
872 if (!gfs2_is_writeback(ip))
873 gfs2_page_add_databufs(ip, page, from, to);
874
875 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
876 if (ret > 0) {
877 gfs2_dinode_out(ip, dibh->b_data);
878 mark_inode_dirty(inode);
879 }
880
881 if (inode == sdp->sd_rindex) {
882 adjust_fs_space(inode);
883 ip->i_gh.gh_flags |= GL_NOCACHE;
884 }
885
886 brelse(dibh);
887failed:
888 gfs2_trans_end(sdp);
889 if (al) {
890 gfs2_inplace_release(ip);
891 gfs2_quota_unlock(ip);
892 gfs2_alloc_put(ip);
893 }
894 if (inode == sdp->sd_rindex) {
895 gfs2_glock_dq(&m_ip->i_gh);
896 gfs2_holder_uninit(&m_ip->i_gh);
897 }
898 gfs2_glock_dq(&ip->i_gh);
899 gfs2_holder_uninit(&ip->i_gh);
900 return ret;
901}
902
903/**
904 * gfs2_set_page_dirty - Page dirtying function
905 * @page: The page to dirty
906 *
907 * Returns: 1 if it dirtyed the page, or 0 otherwise
908 */
909
910static int gfs2_set_page_dirty(struct page *page)
911{
912 SetPageChecked(page);
913 return __set_page_dirty_buffers(page);
914}
915
916/**
917 * gfs2_bmap - Block map function
918 * @mapping: Address space info
919 * @lblock: The block to map
920 *
921 * Returns: The disk address for the block or 0 on hole or error
922 */
923
924static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
925{
926 struct gfs2_inode *ip = GFS2_I(mapping->host);
927 struct gfs2_holder i_gh;
928 sector_t dblock = 0;
929 int error;
930
931 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
932 if (error)
933 return 0;
934
935 if (!gfs2_is_stuffed(ip))
936 dblock = generic_block_bmap(mapping, lblock, gfs2_block_map);
937
938 gfs2_glock_dq_uninit(&i_gh);
939
940 return dblock;
941}
942
943static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
944{
945 struct gfs2_bufdata *bd;
946
947 lock_buffer(bh);
948 gfs2_log_lock(sdp);
949 clear_buffer_dirty(bh);
950 bd = bh->b_private;
951 if (bd) {
952 if (!list_empty(&bd->bd_le.le_list) && !buffer_pinned(bh))
953 list_del_init(&bd->bd_le.le_list);
954 else
955 gfs2_remove_from_journal(bh, current->journal_info, 0);
956 }
957 bh->b_bdev = NULL;
958 clear_buffer_mapped(bh);
959 clear_buffer_req(bh);
960 clear_buffer_new(bh);
961 gfs2_log_unlock(sdp);
962 unlock_buffer(bh);
963}
964
965static void gfs2_invalidatepage(struct page *page, unsigned long offset)
966{
967 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
968 struct buffer_head *bh, *head;
969 unsigned long pos = 0;
970
971 BUG_ON(!PageLocked(page));
972 if (offset == 0)
973 ClearPageChecked(page);
974 if (!page_has_buffers(page))
975 goto out;
976
977 bh = head = page_buffers(page);
978 do {
979 if (offset <= pos)
980 gfs2_discard(sdp, bh);
981 pos += bh->b_size;
982 bh = bh->b_this_page;
983 } while (bh != head);
984out:
985 if (offset == 0)
986 try_to_release_page(page, 0);
987}
988
989/**
990 * gfs2_ok_for_dio - check that dio is valid on this file
991 * @ip: The inode
992 * @rw: READ or WRITE
993 * @offset: The offset at which we are reading or writing
994 *
995 * Returns: 0 (to ignore the i/o request and thus fall back to buffered i/o)
996 * 1 (to accept the i/o request)
997 */
998static int gfs2_ok_for_dio(struct gfs2_inode *ip, int rw, loff_t offset)
999{
1000 /*
1001 * Should we return an error here? I can't see that O_DIRECT for
1002 * a stuffed file makes any sense. For now we'll silently fall
1003 * back to buffered I/O
1004 */
1005 if (gfs2_is_stuffed(ip))
1006 return 0;
1007
1008 if (offset >= i_size_read(&ip->i_inode))
1009 return 0;
1010 return 1;
1011}
1012
1013
1014
1015static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
1016 const struct iovec *iov, loff_t offset,
1017 unsigned long nr_segs)
1018{
1019 struct file *file = iocb->ki_filp;
1020 struct inode *inode = file->f_mapping->host;
1021 struct gfs2_inode *ip = GFS2_I(inode);
1022 struct gfs2_holder gh;
1023 int rv;
1024
1025 /*
1026 * Deferred lock, even if its a write, since we do no allocation
1027 * on this path. All we need change is atime, and this lock mode
1028 * ensures that other nodes have flushed their buffered read caches
1029 * (i.e. their page cache entries for this inode). We do not,
1030 * unfortunately have the option of only flushing a range like
1031 * the VFS does.
1032 */
1033 gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, &gh);
1034 rv = gfs2_glock_nq(&gh);
1035 if (rv)
1036 return rv;
1037 rv = gfs2_ok_for_dio(ip, rw, offset);
1038 if (rv != 1)
1039 goto out; /* dio not valid, fall back to buffered i/o */
1040
1041 rv = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
1042 offset, nr_segs, gfs2_get_block_direct,
1043 NULL, NULL, 0);
1044out:
1045 gfs2_glock_dq_m(1, &gh);
1046 gfs2_holder_uninit(&gh);
1047 return rv;
1048}
1049
1050/**
1051 * gfs2_releasepage - free the metadata associated with a page
1052 * @page: the page that's being released
1053 * @gfp_mask: passed from Linux VFS, ignored by us
1054 *
1055 * Call try_to_free_buffers() if the buffers in this page can be
1056 * released.
1057 *
1058 * Returns: 0
1059 */
1060
1061int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
1062{
1063 struct address_space *mapping = page->mapping;
1064 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
1065 struct buffer_head *bh, *head;
1066 struct gfs2_bufdata *bd;
1067
1068 if (!page_has_buffers(page))
1069 return 0;
1070
1071 gfs2_log_lock(sdp);
1072 spin_lock(&sdp->sd_ail_lock);
1073 head = bh = page_buffers(page);
1074 do {
1075 if (atomic_read(&bh->b_count))
1076 goto cannot_release;
1077 bd = bh->b_private;
1078 if (bd && bd->bd_ail)
1079 goto cannot_release;
1080 if (buffer_pinned(bh) || buffer_dirty(bh))
1081 goto not_possible;
1082 bh = bh->b_this_page;
1083 } while(bh != head);
1084 spin_unlock(&sdp->sd_ail_lock);
1085 gfs2_log_unlock(sdp);
1086
1087 head = bh = page_buffers(page);
1088 do {
1089 gfs2_log_lock(sdp);
1090 bd = bh->b_private;
1091 if (bd) {
1092 gfs2_assert_warn(sdp, bd->bd_bh == bh);
1093 gfs2_assert_warn(sdp, list_empty(&bd->bd_list_tr));
1094 if (!list_empty(&bd->bd_le.le_list)) {
1095 if (!buffer_pinned(bh))
1096 list_del_init(&bd->bd_le.le_list);
1097 else
1098 bd = NULL;
1099 }
1100 if (bd)
1101 bd->bd_bh = NULL;
1102 bh->b_private = NULL;
1103 }
1104 gfs2_log_unlock(sdp);
1105 if (bd)
1106 kmem_cache_free(gfs2_bufdata_cachep, bd);
1107
1108 bh = bh->b_this_page;
1109 } while (bh != head);
1110
1111 return try_to_free_buffers(page);
1112
1113not_possible: /* Should never happen */
1114 WARN_ON(buffer_dirty(bh));
1115 WARN_ON(buffer_pinned(bh));
1116cannot_release:
1117 spin_unlock(&sdp->sd_ail_lock);
1118 gfs2_log_unlock(sdp);
1119 return 0;
1120}
1121
1122static const struct address_space_operations gfs2_writeback_aops = {
1123 .writepage = gfs2_writeback_writepage,
1124 .writepages = gfs2_writeback_writepages,
1125 .readpage = gfs2_readpage,
1126 .readpages = gfs2_readpages,
1127 .write_begin = gfs2_write_begin,
1128 .write_end = gfs2_write_end,
1129 .bmap = gfs2_bmap,
1130 .invalidatepage = gfs2_invalidatepage,
1131 .releasepage = gfs2_releasepage,
1132 .direct_IO = gfs2_direct_IO,
1133 .migratepage = buffer_migrate_page,
1134 .is_partially_uptodate = block_is_partially_uptodate,
1135 .error_remove_page = generic_error_remove_page,
1136};
1137
1138static const struct address_space_operations gfs2_ordered_aops = {
1139 .writepage = gfs2_ordered_writepage,
1140 .readpage = gfs2_readpage,
1141 .readpages = gfs2_readpages,
1142 .write_begin = gfs2_write_begin,
1143 .write_end = gfs2_write_end,
1144 .set_page_dirty = gfs2_set_page_dirty,
1145 .bmap = gfs2_bmap,
1146 .invalidatepage = gfs2_invalidatepage,
1147 .releasepage = gfs2_releasepage,
1148 .direct_IO = gfs2_direct_IO,
1149 .migratepage = buffer_migrate_page,
1150 .is_partially_uptodate = block_is_partially_uptodate,
1151 .error_remove_page = generic_error_remove_page,
1152};
1153
1154static const struct address_space_operations gfs2_jdata_aops = {
1155 .writepage = gfs2_jdata_writepage,
1156 .writepages = gfs2_jdata_writepages,
1157 .readpage = gfs2_readpage,
1158 .readpages = gfs2_readpages,
1159 .write_begin = gfs2_write_begin,
1160 .write_end = gfs2_write_end,
1161 .set_page_dirty = gfs2_set_page_dirty,
1162 .bmap = gfs2_bmap,
1163 .invalidatepage = gfs2_invalidatepage,
1164 .releasepage = gfs2_releasepage,
1165 .is_partially_uptodate = block_is_partially_uptodate,
1166 .error_remove_page = generic_error_remove_page,
1167};
1168
1169void gfs2_set_aops(struct inode *inode)
1170{
1171 struct gfs2_inode *ip = GFS2_I(inode);
1172
1173 if (gfs2_is_writeback(ip))
1174 inode->i_mapping->a_ops = &gfs2_writeback_aops;
1175 else if (gfs2_is_ordered(ip))
1176 inode->i_mapping->a_ops = &gfs2_ordered_aops;
1177 else if (gfs2_is_jdata(ip))
1178 inode->i_mapping->a_ops = &gfs2_jdata_aops;
1179 else
1180 BUG();
1181}
1182
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 */
6
7#include <linux/sched.h>
8#include <linux/slab.h>
9#include <linux/spinlock.h>
10#include <linux/completion.h>
11#include <linux/buffer_head.h>
12#include <linux/pagemap.h>
13#include <linux/pagevec.h>
14#include <linux/mpage.h>
15#include <linux/fs.h>
16#include <linux/writeback.h>
17#include <linux/swap.h>
18#include <linux/gfs2_ondisk.h>
19#include <linux/backing-dev.h>
20#include <linux/uio.h>
21#include <trace/events/writeback.h>
22#include <linux/sched/signal.h>
23
24#include "gfs2.h"
25#include "incore.h"
26#include "bmap.h"
27#include "glock.h"
28#include "inode.h"
29#include "log.h"
30#include "meta_io.h"
31#include "quota.h"
32#include "trans.h"
33#include "rgrp.h"
34#include "super.h"
35#include "util.h"
36#include "glops.h"
37#include "aops.h"
38
39
40void gfs2_trans_add_databufs(struct gfs2_inode *ip, struct folio *folio,
41 size_t from, size_t len)
42{
43 struct buffer_head *head = folio_buffers(folio);
44 unsigned int bsize = head->b_size;
45 struct buffer_head *bh;
46 size_t to = from + len;
47 size_t start, end;
48
49 for (bh = head, start = 0; bh != head || !start;
50 bh = bh->b_this_page, start = end) {
51 end = start + bsize;
52 if (end <= from)
53 continue;
54 if (start >= to)
55 break;
56 set_buffer_uptodate(bh);
57 gfs2_trans_add_data(ip->i_gl, bh);
58 }
59}
60
61/**
62 * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
63 * @inode: The inode
64 * @lblock: The block number to look up
65 * @bh_result: The buffer head to return the result in
66 * @create: Non-zero if we may add block to the file
67 *
68 * Returns: errno
69 */
70
71static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
72 struct buffer_head *bh_result, int create)
73{
74 int error;
75
76 error = gfs2_block_map(inode, lblock, bh_result, 0);
77 if (error)
78 return error;
79 if (!buffer_mapped(bh_result))
80 return -ENODATA;
81 return 0;
82}
83
84/**
85 * gfs2_write_jdata_folio - gfs2 jdata-specific version of block_write_full_folio
86 * @folio: The folio to write
87 * @wbc: The writeback control
88 *
89 * This is the same as calling block_write_full_folio, but it also
90 * writes pages outside of i_size
91 */
92static int gfs2_write_jdata_folio(struct folio *folio,
93 struct writeback_control *wbc)
94{
95 struct inode * const inode = folio->mapping->host;
96 loff_t i_size = i_size_read(inode);
97
98 /*
99 * The folio straddles i_size. It must be zeroed out on each and every
100 * writepage invocation because it may be mmapped. "A file is mapped
101 * in multiples of the page size. For a file that is not a multiple of
102 * the page size, the remaining memory is zeroed when mapped, and
103 * writes to that region are not written out to the file."
104 */
105 if (folio_pos(folio) < i_size &&
106 i_size < folio_pos(folio) + folio_size(folio))
107 folio_zero_segment(folio, offset_in_folio(folio, i_size),
108 folio_size(folio));
109
110 return __block_write_full_folio(inode, folio, gfs2_get_block_noalloc,
111 wbc);
112}
113
114/**
115 * __gfs2_jdata_write_folio - The core of jdata writepage
116 * @folio: The folio to write
117 * @wbc: The writeback control
118 *
119 * Implements the core of write back. If a transaction is required then
120 * the checked flag will have been set and the transaction will have
121 * already been started before this is called.
122 */
123static int __gfs2_jdata_write_folio(struct folio *folio,
124 struct writeback_control *wbc)
125{
126 struct inode *inode = folio->mapping->host;
127 struct gfs2_inode *ip = GFS2_I(inode);
128
129 if (folio_test_checked(folio)) {
130 folio_clear_checked(folio);
131 if (!folio_buffers(folio)) {
132 create_empty_buffers(folio,
133 inode->i_sb->s_blocksize,
134 BIT(BH_Dirty)|BIT(BH_Uptodate));
135 }
136 gfs2_trans_add_databufs(ip, folio, 0, folio_size(folio));
137 }
138 return gfs2_write_jdata_folio(folio, wbc);
139}
140
141/**
142 * gfs2_writepages - Write a bunch of dirty pages back to disk
143 * @mapping: The mapping to write
144 * @wbc: Write-back control
145 *
146 * Used for both ordered and writeback modes.
147 */
148static int gfs2_writepages(struct address_space *mapping,
149 struct writeback_control *wbc)
150{
151 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
152 struct iomap_writepage_ctx wpc = { };
153 int ret;
154
155 /*
156 * Even if we didn't write enough pages here, we might still be holding
157 * dirty pages in the ail. We forcibly flush the ail because we don't
158 * want balance_dirty_pages() to loop indefinitely trying to write out
159 * pages held in the ail that it can't find.
160 */
161 ret = iomap_writepages(mapping, wbc, &wpc, &gfs2_writeback_ops);
162 if (ret == 0 && wbc->nr_to_write > 0)
163 set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
164 return ret;
165}
166
167/**
168 * gfs2_write_jdata_batch - Write back a folio batch's worth of folios
169 * @mapping: The mapping
170 * @wbc: The writeback control
171 * @fbatch: The batch of folios
172 * @done_index: Page index
173 *
174 * Returns: non-zero if loop should terminate, zero otherwise
175 */
176
177static int gfs2_write_jdata_batch(struct address_space *mapping,
178 struct writeback_control *wbc,
179 struct folio_batch *fbatch,
180 pgoff_t *done_index)
181{
182 struct inode *inode = mapping->host;
183 struct gfs2_sbd *sdp = GFS2_SB(inode);
184 unsigned nrblocks;
185 int i;
186 int ret;
187 size_t size = 0;
188 int nr_folios = folio_batch_count(fbatch);
189
190 for (i = 0; i < nr_folios; i++)
191 size += folio_size(fbatch->folios[i]);
192 nrblocks = size >> inode->i_blkbits;
193
194 ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
195 if (ret < 0)
196 return ret;
197
198 for (i = 0; i < nr_folios; i++) {
199 struct folio *folio = fbatch->folios[i];
200
201 *done_index = folio->index;
202
203 folio_lock(folio);
204
205 if (unlikely(folio->mapping != mapping)) {
206continue_unlock:
207 folio_unlock(folio);
208 continue;
209 }
210
211 if (!folio_test_dirty(folio)) {
212 /* someone wrote it for us */
213 goto continue_unlock;
214 }
215
216 if (folio_test_writeback(folio)) {
217 if (wbc->sync_mode != WB_SYNC_NONE)
218 folio_wait_writeback(folio);
219 else
220 goto continue_unlock;
221 }
222
223 BUG_ON(folio_test_writeback(folio));
224 if (!folio_clear_dirty_for_io(folio))
225 goto continue_unlock;
226
227 trace_wbc_writepage(wbc, inode_to_bdi(inode));
228
229 ret = __gfs2_jdata_write_folio(folio, wbc);
230 if (unlikely(ret)) {
231 if (ret == AOP_WRITEPAGE_ACTIVATE) {
232 folio_unlock(folio);
233 ret = 0;
234 } else {
235
236 /*
237 * done_index is set past this page,
238 * so media errors will not choke
239 * background writeout for the entire
240 * file. This has consequences for
241 * range_cyclic semantics (ie. it may
242 * not be suitable for data integrity
243 * writeout).
244 */
245 *done_index = folio_next_index(folio);
246 ret = 1;
247 break;
248 }
249 }
250
251 /*
252 * We stop writing back only if we are not doing
253 * integrity sync. In case of integrity sync we have to
254 * keep going until we have written all the pages
255 * we tagged for writeback prior to entering this loop.
256 */
257 if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) {
258 ret = 1;
259 break;
260 }
261
262 }
263 gfs2_trans_end(sdp);
264 return ret;
265}
266
267/**
268 * gfs2_write_cache_jdata - Like write_cache_pages but different
269 * @mapping: The mapping to write
270 * @wbc: The writeback control
271 *
272 * The reason that we use our own function here is that we need to
273 * start transactions before we grab page locks. This allows us
274 * to get the ordering right.
275 */
276
277static int gfs2_write_cache_jdata(struct address_space *mapping,
278 struct writeback_control *wbc)
279{
280 int ret = 0;
281 int done = 0;
282 struct folio_batch fbatch;
283 int nr_folios;
284 pgoff_t writeback_index;
285 pgoff_t index;
286 pgoff_t end;
287 pgoff_t done_index;
288 int cycled;
289 int range_whole = 0;
290 xa_mark_t tag;
291
292 folio_batch_init(&fbatch);
293 if (wbc->range_cyclic) {
294 writeback_index = mapping->writeback_index; /* prev offset */
295 index = writeback_index;
296 if (index == 0)
297 cycled = 1;
298 else
299 cycled = 0;
300 end = -1;
301 } else {
302 index = wbc->range_start >> PAGE_SHIFT;
303 end = wbc->range_end >> PAGE_SHIFT;
304 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
305 range_whole = 1;
306 cycled = 1; /* ignore range_cyclic tests */
307 }
308 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
309 tag = PAGECACHE_TAG_TOWRITE;
310 else
311 tag = PAGECACHE_TAG_DIRTY;
312
313retry:
314 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
315 tag_pages_for_writeback(mapping, index, end);
316 done_index = index;
317 while (!done && (index <= end)) {
318 nr_folios = filemap_get_folios_tag(mapping, &index, end,
319 tag, &fbatch);
320 if (nr_folios == 0)
321 break;
322
323 ret = gfs2_write_jdata_batch(mapping, wbc, &fbatch,
324 &done_index);
325 if (ret)
326 done = 1;
327 if (ret > 0)
328 ret = 0;
329 folio_batch_release(&fbatch);
330 cond_resched();
331 }
332
333 if (!cycled && !done) {
334 /*
335 * range_cyclic:
336 * We hit the last page and there is more work to be done: wrap
337 * back to the start of the file
338 */
339 cycled = 1;
340 index = 0;
341 end = writeback_index - 1;
342 goto retry;
343 }
344
345 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
346 mapping->writeback_index = done_index;
347
348 return ret;
349}
350
351
352/**
353 * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
354 * @mapping: The mapping to write
355 * @wbc: The writeback control
356 *
357 */
358
359static int gfs2_jdata_writepages(struct address_space *mapping,
360 struct writeback_control *wbc)
361{
362 struct gfs2_inode *ip = GFS2_I(mapping->host);
363 struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
364 int ret;
365
366 ret = gfs2_write_cache_jdata(mapping, wbc);
367 if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
368 gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
369 GFS2_LFC_JDATA_WPAGES);
370 ret = gfs2_write_cache_jdata(mapping, wbc);
371 }
372 return ret;
373}
374
375/**
376 * stuffed_read_folio - Fill in a Linux folio with stuffed file data
377 * @ip: the inode
378 * @folio: the folio
379 *
380 * Returns: errno
381 */
382static int stuffed_read_folio(struct gfs2_inode *ip, struct folio *folio)
383{
384 struct buffer_head *dibh = NULL;
385 size_t dsize = i_size_read(&ip->i_inode);
386 void *from = NULL;
387 int error = 0;
388
389 /*
390 * Due to the order of unstuffing files and ->fault(), we can be
391 * asked for a zero folio in the case of a stuffed file being extended,
392 * so we need to supply one here. It doesn't happen often.
393 */
394 if (unlikely(folio->index)) {
395 dsize = 0;
396 } else {
397 error = gfs2_meta_inode_buffer(ip, &dibh);
398 if (error)
399 goto out;
400 from = dibh->b_data + sizeof(struct gfs2_dinode);
401 }
402
403 folio_fill_tail(folio, 0, from, dsize);
404 brelse(dibh);
405out:
406 folio_end_read(folio, error == 0);
407
408 return error;
409}
410
411/**
412 * gfs2_read_folio - read a folio from a file
413 * @file: The file to read
414 * @folio: The folio in the file
415 */
416static int gfs2_read_folio(struct file *file, struct folio *folio)
417{
418 struct inode *inode = folio->mapping->host;
419 struct gfs2_inode *ip = GFS2_I(inode);
420 struct gfs2_sbd *sdp = GFS2_SB(inode);
421 int error;
422
423 if (!gfs2_is_jdata(ip) ||
424 (i_blocksize(inode) == PAGE_SIZE && !folio_buffers(folio))) {
425 error = iomap_read_folio(folio, &gfs2_iomap_ops);
426 } else if (gfs2_is_stuffed(ip)) {
427 error = stuffed_read_folio(ip, folio);
428 } else {
429 error = mpage_read_folio(folio, gfs2_block_map);
430 }
431
432 if (gfs2_withdrawing_or_withdrawn(sdp))
433 return -EIO;
434
435 return error;
436}
437
438/**
439 * gfs2_internal_read - read an internal file
440 * @ip: The gfs2 inode
441 * @buf: The buffer to fill
442 * @pos: The file position
443 * @size: The amount to read
444 *
445 */
446
447ssize_t gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
448 size_t size)
449{
450 struct address_space *mapping = ip->i_inode.i_mapping;
451 unsigned long index = *pos >> PAGE_SHIFT;
452 size_t copied = 0;
453
454 do {
455 size_t offset, chunk;
456 struct folio *folio;
457
458 folio = read_cache_folio(mapping, index, gfs2_read_folio, NULL);
459 if (IS_ERR(folio)) {
460 if (PTR_ERR(folio) == -EINTR)
461 continue;
462 return PTR_ERR(folio);
463 }
464 offset = *pos + copied - folio_pos(folio);
465 chunk = min(size - copied, folio_size(folio) - offset);
466 memcpy_from_folio(buf + copied, folio, offset, chunk);
467 index = folio_next_index(folio);
468 folio_put(folio);
469 copied += chunk;
470 } while(copied < size);
471 (*pos) += size;
472 return size;
473}
474
475/**
476 * gfs2_readahead - Read a bunch of pages at once
477 * @rac: Read-ahead control structure
478 *
479 * Some notes:
480 * 1. This is only for readahead, so we can simply ignore any things
481 * which are slightly inconvenient (such as locking conflicts between
482 * the page lock and the glock) and return having done no I/O. Its
483 * obviously not something we'd want to do on too regular a basis.
484 * Any I/O we ignore at this time will be done via readpage later.
485 * 2. We don't handle stuffed files here we let readpage do the honours.
486 * 3. mpage_readahead() does most of the heavy lifting in the common case.
487 * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
488 */
489
490static void gfs2_readahead(struct readahead_control *rac)
491{
492 struct inode *inode = rac->mapping->host;
493 struct gfs2_inode *ip = GFS2_I(inode);
494
495 if (gfs2_is_stuffed(ip))
496 ;
497 else if (gfs2_is_jdata(ip))
498 mpage_readahead(rac, gfs2_block_map);
499 else
500 iomap_readahead(rac, &gfs2_iomap_ops);
501}
502
503/**
504 * adjust_fs_space - Adjusts the free space available due to gfs2_grow
505 * @inode: the rindex inode
506 */
507void adjust_fs_space(struct inode *inode)
508{
509 struct gfs2_sbd *sdp = GFS2_SB(inode);
510 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
511 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
512 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
513 struct buffer_head *m_bh;
514 u64 fs_total, new_free;
515
516 if (gfs2_trans_begin(sdp, 2 * RES_STATFS, 0) != 0)
517 return;
518
519 /* Total up the file system space, according to the latest rindex. */
520 fs_total = gfs2_ri_total(sdp);
521 if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
522 goto out;
523
524 spin_lock(&sdp->sd_statfs_spin);
525 gfs2_statfs_change_in(m_sc, m_bh->b_data +
526 sizeof(struct gfs2_dinode));
527 if (fs_total > (m_sc->sc_total + l_sc->sc_total))
528 new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
529 else
530 new_free = 0;
531 spin_unlock(&sdp->sd_statfs_spin);
532 fs_warn(sdp, "File system extended by %llu blocks.\n",
533 (unsigned long long)new_free);
534 gfs2_statfs_change(sdp, new_free, new_free, 0);
535
536 update_statfs(sdp, m_bh);
537 brelse(m_bh);
538out:
539 sdp->sd_rindex_uptodate = 0;
540 gfs2_trans_end(sdp);
541}
542
543static bool jdata_dirty_folio(struct address_space *mapping,
544 struct folio *folio)
545{
546 if (current->journal_info)
547 folio_set_checked(folio);
548 return block_dirty_folio(mapping, folio);
549}
550
551/**
552 * gfs2_bmap - Block map function
553 * @mapping: Address space info
554 * @lblock: The block to map
555 *
556 * Returns: The disk address for the block or 0 on hole or error
557 */
558
559static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
560{
561 struct gfs2_inode *ip = GFS2_I(mapping->host);
562 struct gfs2_holder i_gh;
563 sector_t dblock = 0;
564 int error;
565
566 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
567 if (error)
568 return 0;
569
570 if (!gfs2_is_stuffed(ip))
571 dblock = iomap_bmap(mapping, lblock, &gfs2_iomap_ops);
572
573 gfs2_glock_dq_uninit(&i_gh);
574
575 return dblock;
576}
577
578static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
579{
580 struct gfs2_bufdata *bd;
581
582 lock_buffer(bh);
583 gfs2_log_lock(sdp);
584 clear_buffer_dirty(bh);
585 bd = bh->b_private;
586 if (bd) {
587 if (!list_empty(&bd->bd_list) && !buffer_pinned(bh))
588 list_del_init(&bd->bd_list);
589 else {
590 spin_lock(&sdp->sd_ail_lock);
591 gfs2_remove_from_journal(bh, REMOVE_JDATA);
592 spin_unlock(&sdp->sd_ail_lock);
593 }
594 }
595 bh->b_bdev = NULL;
596 clear_buffer_mapped(bh);
597 clear_buffer_req(bh);
598 clear_buffer_new(bh);
599 gfs2_log_unlock(sdp);
600 unlock_buffer(bh);
601}
602
603static void gfs2_invalidate_folio(struct folio *folio, size_t offset,
604 size_t length)
605{
606 struct gfs2_sbd *sdp = GFS2_SB(folio->mapping->host);
607 size_t stop = offset + length;
608 int partial_page = (offset || length < folio_size(folio));
609 struct buffer_head *bh, *head;
610 unsigned long pos = 0;
611
612 BUG_ON(!folio_test_locked(folio));
613 if (!partial_page)
614 folio_clear_checked(folio);
615 head = folio_buffers(folio);
616 if (!head)
617 goto out;
618
619 bh = head;
620 do {
621 if (pos + bh->b_size > stop)
622 return;
623
624 if (offset <= pos)
625 gfs2_discard(sdp, bh);
626 pos += bh->b_size;
627 bh = bh->b_this_page;
628 } while (bh != head);
629out:
630 if (!partial_page)
631 filemap_release_folio(folio, 0);
632}
633
634/**
635 * gfs2_release_folio - free the metadata associated with a folio
636 * @folio: the folio that's being released
637 * @gfp_mask: passed from Linux VFS, ignored by us
638 *
639 * Calls try_to_free_buffers() to free the buffers and put the folio if the
640 * buffers can be released.
641 *
642 * Returns: true if the folio was put or else false
643 */
644
645bool gfs2_release_folio(struct folio *folio, gfp_t gfp_mask)
646{
647 struct address_space *mapping = folio->mapping;
648 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
649 struct buffer_head *bh, *head;
650 struct gfs2_bufdata *bd;
651
652 head = folio_buffers(folio);
653 if (!head)
654 return false;
655
656 /*
657 * mm accommodates an old ext3 case where clean folios might
658 * not have had the dirty bit cleared. Thus, it can send actual
659 * dirty folios to ->release_folio() via shrink_active_list().
660 *
661 * As a workaround, we skip folios that contain dirty buffers
662 * below. Once ->release_folio isn't called on dirty folios
663 * anymore, we can warn on dirty buffers like we used to here
664 * again.
665 */
666
667 gfs2_log_lock(sdp);
668 bh = head;
669 do {
670 if (atomic_read(&bh->b_count))
671 goto cannot_release;
672 bd = bh->b_private;
673 if (bd && bd->bd_tr)
674 goto cannot_release;
675 if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh)))
676 goto cannot_release;
677 bh = bh->b_this_page;
678 } while (bh != head);
679
680 bh = head;
681 do {
682 bd = bh->b_private;
683 if (bd) {
684 gfs2_assert_warn(sdp, bd->bd_bh == bh);
685 bd->bd_bh = NULL;
686 bh->b_private = NULL;
687 /*
688 * The bd may still be queued as a revoke, in which
689 * case we must not dequeue nor free it.
690 */
691 if (!bd->bd_blkno && !list_empty(&bd->bd_list))
692 list_del_init(&bd->bd_list);
693 if (list_empty(&bd->bd_list))
694 kmem_cache_free(gfs2_bufdata_cachep, bd);
695 }
696
697 bh = bh->b_this_page;
698 } while (bh != head);
699 gfs2_log_unlock(sdp);
700
701 return try_to_free_buffers(folio);
702
703cannot_release:
704 gfs2_log_unlock(sdp);
705 return false;
706}
707
708static const struct address_space_operations gfs2_aops = {
709 .writepages = gfs2_writepages,
710 .read_folio = gfs2_read_folio,
711 .readahead = gfs2_readahead,
712 .dirty_folio = iomap_dirty_folio,
713 .release_folio = iomap_release_folio,
714 .invalidate_folio = iomap_invalidate_folio,
715 .bmap = gfs2_bmap,
716 .migrate_folio = filemap_migrate_folio,
717 .is_partially_uptodate = iomap_is_partially_uptodate,
718 .error_remove_folio = generic_error_remove_folio,
719};
720
721static const struct address_space_operations gfs2_jdata_aops = {
722 .writepages = gfs2_jdata_writepages,
723 .read_folio = gfs2_read_folio,
724 .readahead = gfs2_readahead,
725 .dirty_folio = jdata_dirty_folio,
726 .bmap = gfs2_bmap,
727 .migrate_folio = buffer_migrate_folio,
728 .invalidate_folio = gfs2_invalidate_folio,
729 .release_folio = gfs2_release_folio,
730 .is_partially_uptodate = block_is_partially_uptodate,
731 .error_remove_folio = generic_error_remove_folio,
732};
733
734void gfs2_set_aops(struct inode *inode)
735{
736 if (gfs2_is_jdata(GFS2_I(inode)))
737 inode->i_mapping->a_ops = &gfs2_jdata_aops;
738 else
739 inode->i_mapping->a_ops = &gfs2_aops;
740}