Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C) International Business Machines Corp., 2000-2005
4 * Portions Copyright (C) Christoph Hellwig, 2001-2002
5 */
6
7#include <linux/fs.h>
8#include <linux/mm.h>
9#include <linux/module.h>
10#include <linux/bio.h>
11#include <linux/slab.h>
12#include <linux/init.h>
13#include <linux/buffer_head.h>
14#include <linux/mempool.h>
15#include <linux/seq_file.h>
16#include <linux/writeback.h>
17#include "jfs_incore.h"
18#include "jfs_superblock.h"
19#include "jfs_filsys.h"
20#include "jfs_metapage.h"
21#include "jfs_txnmgr.h"
22#include "jfs_debug.h"
23
24#ifdef CONFIG_JFS_STATISTICS
25static struct {
26 uint pagealloc; /* # of page allocations */
27 uint pagefree; /* # of page frees */
28 uint lockwait; /* # of sleeping lock_metapage() calls */
29} mpStat;
30#endif
31
32#define metapage_locked(mp) test_bit(META_locked, &(mp)->flag)
33#define trylock_metapage(mp) test_and_set_bit_lock(META_locked, &(mp)->flag)
34
35static inline void unlock_metapage(struct metapage *mp)
36{
37 clear_bit_unlock(META_locked, &mp->flag);
38 wake_up(&mp->wait);
39}
40
41static inline void __lock_metapage(struct metapage *mp)
42{
43 DECLARE_WAITQUEUE(wait, current);
44 INCREMENT(mpStat.lockwait);
45 add_wait_queue_exclusive(&mp->wait, &wait);
46 do {
47 set_current_state(TASK_UNINTERRUPTIBLE);
48 if (metapage_locked(mp)) {
49 unlock_page(mp->page);
50 io_schedule();
51 lock_page(mp->page);
52 }
53 } while (trylock_metapage(mp));
54 __set_current_state(TASK_RUNNING);
55 remove_wait_queue(&mp->wait, &wait);
56}
57
58/*
59 * Must have mp->page locked
60 */
61static inline void lock_metapage(struct metapage *mp)
62{
63 if (trylock_metapage(mp))
64 __lock_metapage(mp);
65}
66
67#define METAPOOL_MIN_PAGES 32
68static struct kmem_cache *metapage_cache;
69static mempool_t *metapage_mempool;
70
71#define MPS_PER_PAGE (PAGE_SIZE >> L2PSIZE)
72
73#if MPS_PER_PAGE > 1
74
75struct meta_anchor {
76 int mp_count;
77 atomic_t io_count;
78 struct metapage *mp[MPS_PER_PAGE];
79};
80#define mp_anchor(page) ((struct meta_anchor *)page_private(page))
81
82static inline struct metapage *page_to_mp(struct page *page, int offset)
83{
84 if (!PagePrivate(page))
85 return NULL;
86 return mp_anchor(page)->mp[offset >> L2PSIZE];
87}
88
89static inline int insert_metapage(struct page *page, struct metapage *mp)
90{
91 struct meta_anchor *a;
92 int index;
93 int l2mp_blocks; /* log2 blocks per metapage */
94
95 if (PagePrivate(page))
96 a = mp_anchor(page);
97 else {
98 a = kzalloc(sizeof(struct meta_anchor), GFP_NOFS);
99 if (!a)
100 return -ENOMEM;
101 set_page_private(page, (unsigned long)a);
102 SetPagePrivate(page);
103 kmap(page);
104 }
105
106 if (mp) {
107 l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits;
108 index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1);
109 a->mp_count++;
110 a->mp[index] = mp;
111 }
112
113 return 0;
114}
115
116static inline void remove_metapage(struct page *page, struct metapage *mp)
117{
118 struct meta_anchor *a = mp_anchor(page);
119 int l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits;
120 int index;
121
122 index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1);
123
124 BUG_ON(a->mp[index] != mp);
125
126 a->mp[index] = NULL;
127 if (--a->mp_count == 0) {
128 kfree(a);
129 set_page_private(page, 0);
130 ClearPagePrivate(page);
131 kunmap(page);
132 }
133}
134
135static inline void inc_io(struct page *page)
136{
137 atomic_inc(&mp_anchor(page)->io_count);
138}
139
140static inline void dec_io(struct page *page, void (*handler) (struct page *))
141{
142 if (atomic_dec_and_test(&mp_anchor(page)->io_count))
143 handler(page);
144}
145
146#else
147static inline struct metapage *page_to_mp(struct page *page, int offset)
148{
149 return PagePrivate(page) ? (struct metapage *)page_private(page) : NULL;
150}
151
152static inline int insert_metapage(struct page *page, struct metapage *mp)
153{
154 if (mp) {
155 set_page_private(page, (unsigned long)mp);
156 SetPagePrivate(page);
157 kmap(page);
158 }
159 return 0;
160}
161
162static inline void remove_metapage(struct page *page, struct metapage *mp)
163{
164 set_page_private(page, 0);
165 ClearPagePrivate(page);
166 kunmap(page);
167}
168
169#define inc_io(page) do {} while(0)
170#define dec_io(page, handler) handler(page)
171
172#endif
173
174static inline struct metapage *alloc_metapage(gfp_t gfp_mask)
175{
176 struct metapage *mp = mempool_alloc(metapage_mempool, gfp_mask);
177
178 if (mp) {
179 mp->lid = 0;
180 mp->lsn = 0;
181 mp->data = NULL;
182 mp->clsn = 0;
183 mp->log = NULL;
184 init_waitqueue_head(&mp->wait);
185 }
186 return mp;
187}
188
189static inline void free_metapage(struct metapage *mp)
190{
191 mempool_free(mp, metapage_mempool);
192}
193
194int __init metapage_init(void)
195{
196 /*
197 * Allocate the metapage structures
198 */
199 metapage_cache = kmem_cache_create("jfs_mp", sizeof(struct metapage),
200 0, 0, NULL);
201 if (metapage_cache == NULL)
202 return -ENOMEM;
203
204 metapage_mempool = mempool_create_slab_pool(METAPOOL_MIN_PAGES,
205 metapage_cache);
206
207 if (metapage_mempool == NULL) {
208 kmem_cache_destroy(metapage_cache);
209 return -ENOMEM;
210 }
211
212 return 0;
213}
214
215void metapage_exit(void)
216{
217 mempool_destroy(metapage_mempool);
218 kmem_cache_destroy(metapage_cache);
219}
220
221static inline void drop_metapage(struct page *page, struct metapage *mp)
222{
223 if (mp->count || mp->nohomeok || test_bit(META_dirty, &mp->flag) ||
224 test_bit(META_io, &mp->flag))
225 return;
226 remove_metapage(page, mp);
227 INCREMENT(mpStat.pagefree);
228 free_metapage(mp);
229}
230
231/*
232 * Metapage address space operations
233 */
234
235static sector_t metapage_get_blocks(struct inode *inode, sector_t lblock,
236 int *len)
237{
238 int rc = 0;
239 int xflag;
240 s64 xaddr;
241 sector_t file_blocks = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
242 inode->i_blkbits;
243
244 if (lblock >= file_blocks)
245 return 0;
246 if (lblock + *len > file_blocks)
247 *len = file_blocks - lblock;
248
249 if (inode->i_ino) {
250 rc = xtLookup(inode, (s64)lblock, *len, &xflag, &xaddr, len, 0);
251 if ((rc == 0) && *len)
252 lblock = (sector_t)xaddr;
253 else
254 lblock = 0;
255 } /* else no mapping */
256
257 return lblock;
258}
259
260static void last_read_complete(struct page *page)
261{
262 if (!PageError(page))
263 SetPageUptodate(page);
264 unlock_page(page);
265}
266
267static void metapage_read_end_io(struct bio *bio)
268{
269 struct page *page = bio->bi_private;
270
271 if (bio->bi_status) {
272 printk(KERN_ERR "metapage_read_end_io: I/O error\n");
273 SetPageError(page);
274 }
275
276 dec_io(page, last_read_complete);
277 bio_put(bio);
278}
279
280static void remove_from_logsync(struct metapage *mp)
281{
282 struct jfs_log *log = mp->log;
283 unsigned long flags;
284/*
285 * This can race. Recheck that log hasn't been set to null, and after
286 * acquiring logsync lock, recheck lsn
287 */
288 if (!log)
289 return;
290
291 LOGSYNC_LOCK(log, flags);
292 if (mp->lsn) {
293 mp->log = NULL;
294 mp->lsn = 0;
295 mp->clsn = 0;
296 log->count--;
297 list_del(&mp->synclist);
298 }
299 LOGSYNC_UNLOCK(log, flags);
300}
301
302static void last_write_complete(struct page *page)
303{
304 struct metapage *mp;
305 unsigned int offset;
306
307 for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
308 mp = page_to_mp(page, offset);
309 if (mp && test_bit(META_io, &mp->flag)) {
310 if (mp->lsn)
311 remove_from_logsync(mp);
312 clear_bit(META_io, &mp->flag);
313 }
314 /*
315 * I'd like to call drop_metapage here, but I don't think it's
316 * safe unless I have the page locked
317 */
318 }
319 end_page_writeback(page);
320}
321
322static void metapage_write_end_io(struct bio *bio)
323{
324 struct page *page = bio->bi_private;
325
326 BUG_ON(!PagePrivate(page));
327
328 if (bio->bi_status) {
329 printk(KERN_ERR "metapage_write_end_io: I/O error\n");
330 SetPageError(page);
331 }
332 dec_io(page, last_write_complete);
333 bio_put(bio);
334}
335
336static int metapage_writepage(struct page *page, struct writeback_control *wbc)
337{
338 struct bio *bio = NULL;
339 int block_offset; /* block offset of mp within page */
340 struct inode *inode = page->mapping->host;
341 int blocks_per_mp = JFS_SBI(inode->i_sb)->nbperpage;
342 int len;
343 int xlen;
344 struct metapage *mp;
345 int redirty = 0;
346 sector_t lblock;
347 int nr_underway = 0;
348 sector_t pblock;
349 sector_t next_block = 0;
350 sector_t page_start;
351 unsigned long bio_bytes = 0;
352 unsigned long bio_offset = 0;
353 int offset;
354 int bad_blocks = 0;
355
356 page_start = (sector_t)page->index <<
357 (PAGE_SHIFT - inode->i_blkbits);
358 BUG_ON(!PageLocked(page));
359 BUG_ON(PageWriteback(page));
360 set_page_writeback(page);
361
362 for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
363 mp = page_to_mp(page, offset);
364
365 if (!mp || !test_bit(META_dirty, &mp->flag))
366 continue;
367
368 if (mp->nohomeok && !test_bit(META_forcewrite, &mp->flag)) {
369 redirty = 1;
370 /*
371 * Make sure this page isn't blocked indefinitely.
372 * If the journal isn't undergoing I/O, push it
373 */
374 if (mp->log && !(mp->log->cflag & logGC_PAGEOUT))
375 jfs_flush_journal(mp->log, 0);
376 continue;
377 }
378
379 clear_bit(META_dirty, &mp->flag);
380 set_bit(META_io, &mp->flag);
381 block_offset = offset >> inode->i_blkbits;
382 lblock = page_start + block_offset;
383 if (bio) {
384 if (xlen && lblock == next_block) {
385 /* Contiguous, in memory & on disk */
386 len = min(xlen, blocks_per_mp);
387 xlen -= len;
388 bio_bytes += len << inode->i_blkbits;
389 continue;
390 }
391 /* Not contiguous */
392 if (bio_add_page(bio, page, bio_bytes, bio_offset) <
393 bio_bytes)
394 goto add_failed;
395 /*
396 * Increment counter before submitting i/o to keep
397 * count from hitting zero before we're through
398 */
399 inc_io(page);
400 if (!bio->bi_iter.bi_size)
401 goto dump_bio;
402 submit_bio(bio);
403 nr_underway++;
404 bio = NULL;
405 } else
406 inc_io(page);
407 xlen = (PAGE_SIZE - offset) >> inode->i_blkbits;
408 pblock = metapage_get_blocks(inode, lblock, &xlen);
409 if (!pblock) {
410 printk(KERN_ERR "JFS: metapage_get_blocks failed\n");
411 /*
412 * We already called inc_io(), but can't cancel it
413 * with dec_io() until we're done with the page
414 */
415 bad_blocks++;
416 continue;
417 }
418 len = min(xlen, (int)JFS_SBI(inode->i_sb)->nbperpage);
419
420 bio = bio_alloc(inode->i_sb->s_bdev, 1, REQ_OP_WRITE, GFP_NOFS);
421 bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9);
422 bio->bi_end_io = metapage_write_end_io;
423 bio->bi_private = page;
424
425 /* Don't call bio_add_page yet, we may add to this vec */
426 bio_offset = offset;
427 bio_bytes = len << inode->i_blkbits;
428
429 xlen -= len;
430 next_block = lblock + len;
431 }
432 if (bio) {
433 if (bio_add_page(bio, page, bio_bytes, bio_offset) < bio_bytes)
434 goto add_failed;
435 if (!bio->bi_iter.bi_size)
436 goto dump_bio;
437
438 submit_bio(bio);
439 nr_underway++;
440 }
441 if (redirty)
442 redirty_page_for_writepage(wbc, page);
443
444 unlock_page(page);
445
446 if (bad_blocks)
447 goto err_out;
448
449 if (nr_underway == 0)
450 end_page_writeback(page);
451
452 return 0;
453add_failed:
454 /* We should never reach here, since we're only adding one vec */
455 printk(KERN_ERR "JFS: bio_add_page failed unexpectedly\n");
456 goto skip;
457dump_bio:
458 print_hex_dump(KERN_ERR, "JFS: dump of bio: ", DUMP_PREFIX_ADDRESS, 16,
459 4, bio, sizeof(*bio), 0);
460skip:
461 bio_put(bio);
462 unlock_page(page);
463 dec_io(page, last_write_complete);
464err_out:
465 while (bad_blocks--)
466 dec_io(page, last_write_complete);
467 return -EIO;
468}
469
470static int metapage_read_folio(struct file *fp, struct folio *folio)
471{
472 struct page *page = &folio->page;
473 struct inode *inode = page->mapping->host;
474 struct bio *bio = NULL;
475 int block_offset;
476 int blocks_per_page = i_blocks_per_page(inode, page);
477 sector_t page_start; /* address of page in fs blocks */
478 sector_t pblock;
479 int xlen;
480 unsigned int len;
481 int offset;
482
483 BUG_ON(!PageLocked(page));
484 page_start = (sector_t)page->index <<
485 (PAGE_SHIFT - inode->i_blkbits);
486
487 block_offset = 0;
488 while (block_offset < blocks_per_page) {
489 xlen = blocks_per_page - block_offset;
490 pblock = metapage_get_blocks(inode, page_start + block_offset,
491 &xlen);
492 if (pblock) {
493 if (!PagePrivate(page))
494 insert_metapage(page, NULL);
495 inc_io(page);
496 if (bio)
497 submit_bio(bio);
498
499 bio = bio_alloc(inode->i_sb->s_bdev, 1, REQ_OP_READ,
500 GFP_NOFS);
501 bio->bi_iter.bi_sector =
502 pblock << (inode->i_blkbits - 9);
503 bio->bi_end_io = metapage_read_end_io;
504 bio->bi_private = page;
505 len = xlen << inode->i_blkbits;
506 offset = block_offset << inode->i_blkbits;
507 if (bio_add_page(bio, page, len, offset) < len)
508 goto add_failed;
509 block_offset += xlen;
510 } else
511 block_offset++;
512 }
513 if (bio)
514 submit_bio(bio);
515 else
516 unlock_page(page);
517
518 return 0;
519
520add_failed:
521 printk(KERN_ERR "JFS: bio_add_page failed unexpectedly\n");
522 bio_put(bio);
523 dec_io(page, last_read_complete);
524 return -EIO;
525}
526
527static bool metapage_release_folio(struct folio *folio, gfp_t gfp_mask)
528{
529 struct metapage *mp;
530 bool ret = true;
531 int offset;
532
533 for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
534 mp = page_to_mp(&folio->page, offset);
535
536 if (!mp)
537 continue;
538
539 jfs_info("metapage_release_folio: mp = 0x%p", mp);
540 if (mp->count || mp->nohomeok ||
541 test_bit(META_dirty, &mp->flag)) {
542 jfs_info("count = %ld, nohomeok = %d", mp->count,
543 mp->nohomeok);
544 ret = false;
545 continue;
546 }
547 if (mp->lsn)
548 remove_from_logsync(mp);
549 remove_metapage(&folio->page, mp);
550 INCREMENT(mpStat.pagefree);
551 free_metapage(mp);
552 }
553 return ret;
554}
555
556static void metapage_invalidate_folio(struct folio *folio, size_t offset,
557 size_t length)
558{
559 BUG_ON(offset || length < folio_size(folio));
560
561 BUG_ON(folio_test_writeback(folio));
562
563 metapage_release_folio(folio, 0);
564}
565
566const struct address_space_operations jfs_metapage_aops = {
567 .read_folio = metapage_read_folio,
568 .writepage = metapage_writepage,
569 .release_folio = metapage_release_folio,
570 .invalidate_folio = metapage_invalidate_folio,
571 .dirty_folio = filemap_dirty_folio,
572};
573
574struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
575 unsigned int size, int absolute,
576 unsigned long new)
577{
578 int l2BlocksPerPage;
579 int l2bsize;
580 struct address_space *mapping;
581 struct metapage *mp = NULL;
582 struct page *page;
583 unsigned long page_index;
584 unsigned long page_offset;
585
586 jfs_info("__get_metapage: ino = %ld, lblock = 0x%lx, abs=%d",
587 inode->i_ino, lblock, absolute);
588
589 l2bsize = inode->i_blkbits;
590 l2BlocksPerPage = PAGE_SHIFT - l2bsize;
591 page_index = lblock >> l2BlocksPerPage;
592 page_offset = (lblock - (page_index << l2BlocksPerPage)) << l2bsize;
593 if ((page_offset + size) > PAGE_SIZE) {
594 jfs_err("MetaData crosses page boundary!!");
595 jfs_err("lblock = %lx, size = %d", lblock, size);
596 dump_stack();
597 return NULL;
598 }
599 if (absolute)
600 mapping = JFS_SBI(inode->i_sb)->direct_inode->i_mapping;
601 else {
602 /*
603 * If an nfs client tries to read an inode that is larger
604 * than any existing inodes, we may try to read past the
605 * end of the inode map
606 */
607 if ((lblock << inode->i_blkbits) >= inode->i_size)
608 return NULL;
609 mapping = inode->i_mapping;
610 }
611
612 if (new && (PSIZE == PAGE_SIZE)) {
613 page = grab_cache_page(mapping, page_index);
614 if (!page) {
615 jfs_err("grab_cache_page failed!");
616 return NULL;
617 }
618 SetPageUptodate(page);
619 } else {
620 page = read_mapping_page(mapping, page_index, NULL);
621 if (IS_ERR(page)) {
622 jfs_err("read_mapping_page failed!");
623 return NULL;
624 }
625 lock_page(page);
626 }
627
628 mp = page_to_mp(page, page_offset);
629 if (mp) {
630 if (mp->logical_size != size) {
631 jfs_error(inode->i_sb,
632 "get_mp->logical_size != size\n");
633 jfs_err("logical_size = %d, size = %d",
634 mp->logical_size, size);
635 dump_stack();
636 goto unlock;
637 }
638 mp->count++;
639 lock_metapage(mp);
640 if (test_bit(META_discard, &mp->flag)) {
641 if (!new) {
642 jfs_error(inode->i_sb,
643 "using a discarded metapage\n");
644 discard_metapage(mp);
645 goto unlock;
646 }
647 clear_bit(META_discard, &mp->flag);
648 }
649 } else {
650 INCREMENT(mpStat.pagealloc);
651 mp = alloc_metapage(GFP_NOFS);
652 if (!mp)
653 goto unlock;
654 mp->page = page;
655 mp->sb = inode->i_sb;
656 mp->flag = 0;
657 mp->xflag = COMMIT_PAGE;
658 mp->count = 1;
659 mp->nohomeok = 0;
660 mp->logical_size = size;
661 mp->data = page_address(page) + page_offset;
662 mp->index = lblock;
663 if (unlikely(insert_metapage(page, mp))) {
664 free_metapage(mp);
665 goto unlock;
666 }
667 lock_metapage(mp);
668 }
669
670 if (new) {
671 jfs_info("zeroing mp = 0x%p", mp);
672 memset(mp->data, 0, PSIZE);
673 }
674
675 unlock_page(page);
676 jfs_info("__get_metapage: returning = 0x%p data = 0x%p", mp, mp->data);
677 return mp;
678
679unlock:
680 unlock_page(page);
681 return NULL;
682}
683
684void grab_metapage(struct metapage * mp)
685{
686 jfs_info("grab_metapage: mp = 0x%p", mp);
687 get_page(mp->page);
688 lock_page(mp->page);
689 mp->count++;
690 lock_metapage(mp);
691 unlock_page(mp->page);
692}
693
694static int metapage_write_one(struct page *page)
695{
696 struct folio *folio = page_folio(page);
697 struct address_space *mapping = folio->mapping;
698 struct writeback_control wbc = {
699 .sync_mode = WB_SYNC_ALL,
700 .nr_to_write = folio_nr_pages(folio),
701 };
702 int ret = 0;
703
704 BUG_ON(!folio_test_locked(folio));
705
706 folio_wait_writeback(folio);
707
708 if (folio_clear_dirty_for_io(folio)) {
709 folio_get(folio);
710 ret = metapage_writepage(page, &wbc);
711 if (ret == 0)
712 folio_wait_writeback(folio);
713 folio_put(folio);
714 } else {
715 folio_unlock(folio);
716 }
717
718 if (!ret)
719 ret = filemap_check_errors(mapping);
720 return ret;
721}
722
723void force_metapage(struct metapage *mp)
724{
725 struct page *page = mp->page;
726 jfs_info("force_metapage: mp = 0x%p", mp);
727 set_bit(META_forcewrite, &mp->flag);
728 clear_bit(META_sync, &mp->flag);
729 get_page(page);
730 lock_page(page);
731 set_page_dirty(page);
732 if (metapage_write_one(page))
733 jfs_error(mp->sb, "metapage_write_one() failed\n");
734 clear_bit(META_forcewrite, &mp->flag);
735 put_page(page);
736}
737
738void hold_metapage(struct metapage *mp)
739{
740 lock_page(mp->page);
741}
742
743void put_metapage(struct metapage *mp)
744{
745 if (mp->count || mp->nohomeok) {
746 /* Someone else will release this */
747 unlock_page(mp->page);
748 return;
749 }
750 get_page(mp->page);
751 mp->count++;
752 lock_metapage(mp);
753 unlock_page(mp->page);
754 release_metapage(mp);
755}
756
757void release_metapage(struct metapage * mp)
758{
759 struct page *page = mp->page;
760 jfs_info("release_metapage: mp = 0x%p, flag = 0x%lx", mp, mp->flag);
761
762 BUG_ON(!page);
763
764 lock_page(page);
765 unlock_metapage(mp);
766
767 assert(mp->count);
768 if (--mp->count || mp->nohomeok) {
769 unlock_page(page);
770 put_page(page);
771 return;
772 }
773
774 if (test_bit(META_dirty, &mp->flag)) {
775 set_page_dirty(page);
776 if (test_bit(META_sync, &mp->flag)) {
777 clear_bit(META_sync, &mp->flag);
778 if (metapage_write_one(page))
779 jfs_error(mp->sb, "metapage_write_one() failed\n");
780 lock_page(page);
781 }
782 } else if (mp->lsn) /* discard_metapage doesn't remove it */
783 remove_from_logsync(mp);
784
785 /* Try to keep metapages from using up too much memory */
786 drop_metapage(page, mp);
787
788 unlock_page(page);
789 put_page(page);
790}
791
792void __invalidate_metapages(struct inode *ip, s64 addr, int len)
793{
794 sector_t lblock;
795 int l2BlocksPerPage = PAGE_SHIFT - ip->i_blkbits;
796 int BlocksPerPage = 1 << l2BlocksPerPage;
797 /* All callers are interested in block device's mapping */
798 struct address_space *mapping =
799 JFS_SBI(ip->i_sb)->direct_inode->i_mapping;
800 struct metapage *mp;
801 struct page *page;
802 unsigned int offset;
803
804 /*
805 * Mark metapages to discard. They will eventually be
806 * released, but should not be written.
807 */
808 for (lblock = addr & ~(BlocksPerPage - 1); lblock < addr + len;
809 lblock += BlocksPerPage) {
810 page = find_lock_page(mapping, lblock >> l2BlocksPerPage);
811 if (!page)
812 continue;
813 for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
814 mp = page_to_mp(page, offset);
815 if (!mp)
816 continue;
817 if (mp->index < addr)
818 continue;
819 if (mp->index >= addr + len)
820 break;
821
822 clear_bit(META_dirty, &mp->flag);
823 set_bit(META_discard, &mp->flag);
824 if (mp->lsn)
825 remove_from_logsync(mp);
826 }
827 unlock_page(page);
828 put_page(page);
829 }
830}
831
832#ifdef CONFIG_JFS_STATISTICS
833int jfs_mpstat_proc_show(struct seq_file *m, void *v)
834{
835 seq_printf(m,
836 "JFS Metapage statistics\n"
837 "=======================\n"
838 "page allocations = %d\n"
839 "page frees = %d\n"
840 "lock waits = %d\n",
841 mpStat.pagealloc,
842 mpStat.pagefree,
843 mpStat.lockwait);
844 return 0;
845}
846#endif
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C) International Business Machines Corp., 2000-2005
4 * Portions Copyright (C) Christoph Hellwig, 2001-2002
5 */
6
7#include <linux/blkdev.h>
8#include <linux/fs.h>
9#include <linux/mm.h>
10#include <linux/module.h>
11#include <linux/bio.h>
12#include <linux/slab.h>
13#include <linux/init.h>
14#include <linux/buffer_head.h>
15#include <linux/mempool.h>
16#include <linux/seq_file.h>
17#include <linux/writeback.h>
18#include "jfs_incore.h"
19#include "jfs_superblock.h"
20#include "jfs_filsys.h"
21#include "jfs_metapage.h"
22#include "jfs_txnmgr.h"
23#include "jfs_debug.h"
24
25#ifdef CONFIG_JFS_STATISTICS
26static struct {
27 uint pagealloc; /* # of page allocations */
28 uint pagefree; /* # of page frees */
29 uint lockwait; /* # of sleeping lock_metapage() calls */
30} mpStat;
31#endif
32
33#define metapage_locked(mp) test_bit(META_locked, &(mp)->flag)
34#define trylock_metapage(mp) test_and_set_bit_lock(META_locked, &(mp)->flag)
35
36static inline void unlock_metapage(struct metapage *mp)
37{
38 clear_bit_unlock(META_locked, &mp->flag);
39 wake_up(&mp->wait);
40}
41
42static inline void __lock_metapage(struct metapage *mp)
43{
44 DECLARE_WAITQUEUE(wait, current);
45 INCREMENT(mpStat.lockwait);
46 add_wait_queue_exclusive(&mp->wait, &wait);
47 do {
48 set_current_state(TASK_UNINTERRUPTIBLE);
49 if (metapage_locked(mp)) {
50 folio_unlock(mp->folio);
51 io_schedule();
52 folio_lock(mp->folio);
53 }
54 } while (trylock_metapage(mp));
55 __set_current_state(TASK_RUNNING);
56 remove_wait_queue(&mp->wait, &wait);
57}
58
59/*
60 * Must have mp->folio locked
61 */
62static inline void lock_metapage(struct metapage *mp)
63{
64 if (trylock_metapage(mp))
65 __lock_metapage(mp);
66}
67
68#define METAPOOL_MIN_PAGES 32
69static struct kmem_cache *metapage_cache;
70static mempool_t *metapage_mempool;
71
72#define MPS_PER_PAGE (PAGE_SIZE >> L2PSIZE)
73
74#if MPS_PER_PAGE > 1
75
76struct meta_anchor {
77 int mp_count;
78 atomic_t io_count;
79 blk_status_t status;
80 struct metapage *mp[MPS_PER_PAGE];
81};
82
83static inline struct metapage *folio_to_mp(struct folio *folio, int offset)
84{
85 struct meta_anchor *anchor = folio->private;
86
87 if (!anchor)
88 return NULL;
89 return anchor->mp[offset >> L2PSIZE];
90}
91
92static inline int insert_metapage(struct folio *folio, struct metapage *mp)
93{
94 struct meta_anchor *a;
95 int index;
96 int l2mp_blocks; /* log2 blocks per metapage */
97
98 a = folio->private;
99 if (!a) {
100 a = kzalloc(sizeof(struct meta_anchor), GFP_NOFS);
101 if (!a)
102 return -ENOMEM;
103 folio_attach_private(folio, a);
104 kmap(&folio->page);
105 }
106
107 if (mp) {
108 l2mp_blocks = L2PSIZE - folio->mapping->host->i_blkbits;
109 index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1);
110 a->mp_count++;
111 a->mp[index] = mp;
112 }
113
114 return 0;
115}
116
117static inline void remove_metapage(struct folio *folio, struct metapage *mp)
118{
119 struct meta_anchor *a = folio->private;
120 int l2mp_blocks = L2PSIZE - folio->mapping->host->i_blkbits;
121 int index;
122
123 index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1);
124
125 BUG_ON(a->mp[index] != mp);
126
127 a->mp[index] = NULL;
128 if (--a->mp_count == 0) {
129 kfree(a);
130 folio_detach_private(folio);
131 kunmap(&folio->page);
132 }
133}
134
135static inline void inc_io(struct folio *folio)
136{
137 struct meta_anchor *anchor = folio->private;
138
139 atomic_inc(&anchor->io_count);
140}
141
142static inline void dec_io(struct folio *folio, blk_status_t status,
143 void (*handler)(struct folio *, blk_status_t))
144{
145 struct meta_anchor *anchor = folio->private;
146
147 if (anchor->status == BLK_STS_OK)
148 anchor->status = status;
149
150 if (atomic_dec_and_test(&anchor->io_count))
151 handler(folio, anchor->status);
152}
153
154#else
155static inline struct metapage *folio_to_mp(struct folio *folio, int offset)
156{
157 return folio->private;
158}
159
160static inline int insert_metapage(struct folio *folio, struct metapage *mp)
161{
162 if (mp) {
163 folio_attach_private(folio, mp);
164 kmap(&folio->page);
165 }
166 return 0;
167}
168
169static inline void remove_metapage(struct folio *folio, struct metapage *mp)
170{
171 folio_detach_private(folio);
172 kunmap(&folio->page);
173}
174
175#define inc_io(folio) do {} while(0)
176#define dec_io(folio, status, handler) handler(folio, status)
177
178#endif
179
180static inline struct metapage *alloc_metapage(gfp_t gfp_mask)
181{
182 struct metapage *mp = mempool_alloc(metapage_mempool, gfp_mask);
183
184 if (mp) {
185 mp->lid = 0;
186 mp->lsn = 0;
187 mp->data = NULL;
188 mp->clsn = 0;
189 mp->log = NULL;
190 init_waitqueue_head(&mp->wait);
191 }
192 return mp;
193}
194
195static inline void free_metapage(struct metapage *mp)
196{
197 mempool_free(mp, metapage_mempool);
198}
199
200int __init metapage_init(void)
201{
202 /*
203 * Allocate the metapage structures
204 */
205 metapage_cache = kmem_cache_create("jfs_mp", sizeof(struct metapage),
206 0, 0, NULL);
207 if (metapage_cache == NULL)
208 return -ENOMEM;
209
210 metapage_mempool = mempool_create_slab_pool(METAPOOL_MIN_PAGES,
211 metapage_cache);
212
213 if (metapage_mempool == NULL) {
214 kmem_cache_destroy(metapage_cache);
215 return -ENOMEM;
216 }
217
218 return 0;
219}
220
221void metapage_exit(void)
222{
223 mempool_destroy(metapage_mempool);
224 kmem_cache_destroy(metapage_cache);
225}
226
227static inline void drop_metapage(struct folio *folio, struct metapage *mp)
228{
229 if (mp->count || mp->nohomeok || test_bit(META_dirty, &mp->flag) ||
230 test_bit(META_io, &mp->flag))
231 return;
232 remove_metapage(folio, mp);
233 INCREMENT(mpStat.pagefree);
234 free_metapage(mp);
235}
236
237/*
238 * Metapage address space operations
239 */
240
241static sector_t metapage_get_blocks(struct inode *inode, sector_t lblock,
242 int *len)
243{
244 int rc = 0;
245 int xflag;
246 s64 xaddr;
247 sector_t file_blocks = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
248 inode->i_blkbits;
249
250 if (lblock >= file_blocks)
251 return 0;
252 if (lblock + *len > file_blocks)
253 *len = file_blocks - lblock;
254
255 if (inode->i_ino) {
256 rc = xtLookup(inode, (s64)lblock, *len, &xflag, &xaddr, len, 0);
257 if ((rc == 0) && *len)
258 lblock = (sector_t)xaddr;
259 else
260 lblock = 0;
261 } /* else no mapping */
262
263 return lblock;
264}
265
266static void last_read_complete(struct folio *folio, blk_status_t status)
267{
268 if (status)
269 printk(KERN_ERR "Read error %d at %#llx\n", status,
270 folio_pos(folio));
271
272 folio_end_read(folio, status == 0);
273}
274
275static void metapage_read_end_io(struct bio *bio)
276{
277 struct folio *folio = bio->bi_private;
278
279 dec_io(folio, bio->bi_status, last_read_complete);
280 bio_put(bio);
281}
282
283static void remove_from_logsync(struct metapage *mp)
284{
285 struct jfs_log *log = mp->log;
286 unsigned long flags;
287/*
288 * This can race. Recheck that log hasn't been set to null, and after
289 * acquiring logsync lock, recheck lsn
290 */
291 if (!log)
292 return;
293
294 LOGSYNC_LOCK(log, flags);
295 if (mp->lsn) {
296 mp->log = NULL;
297 mp->lsn = 0;
298 mp->clsn = 0;
299 log->count--;
300 list_del(&mp->synclist);
301 }
302 LOGSYNC_UNLOCK(log, flags);
303}
304
305static void last_write_complete(struct folio *folio, blk_status_t status)
306{
307 struct metapage *mp;
308 unsigned int offset;
309
310 if (status) {
311 int err = blk_status_to_errno(status);
312 printk(KERN_ERR "metapage_write_end_io: I/O error\n");
313 mapping_set_error(folio->mapping, err);
314 }
315
316 for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
317 mp = folio_to_mp(folio, offset);
318 if (mp && test_bit(META_io, &mp->flag)) {
319 if (mp->lsn)
320 remove_from_logsync(mp);
321 clear_bit(META_io, &mp->flag);
322 }
323 /*
324 * I'd like to call drop_metapage here, but I don't think it's
325 * safe unless I have the page locked
326 */
327 }
328 folio_end_writeback(folio);
329}
330
331static void metapage_write_end_io(struct bio *bio)
332{
333 struct folio *folio = bio->bi_private;
334
335 BUG_ON(!folio->private);
336
337 dec_io(folio, bio->bi_status, last_write_complete);
338 bio_put(bio);
339}
340
341static int metapage_write_folio(struct folio *folio,
342 struct writeback_control *wbc, void *unused)
343{
344 struct bio *bio = NULL;
345 int block_offset; /* block offset of mp within page */
346 struct inode *inode = folio->mapping->host;
347 int blocks_per_mp = JFS_SBI(inode->i_sb)->nbperpage;
348 int len;
349 int xlen;
350 struct metapage *mp;
351 int redirty = 0;
352 sector_t lblock;
353 int nr_underway = 0;
354 sector_t pblock;
355 sector_t next_block = 0;
356 sector_t page_start;
357 unsigned long bio_bytes = 0;
358 unsigned long bio_offset = 0;
359 int offset;
360 int bad_blocks = 0;
361
362 page_start = folio_pos(folio) >> inode->i_blkbits;
363 BUG_ON(!folio_test_locked(folio));
364 BUG_ON(folio_test_writeback(folio));
365 folio_start_writeback(folio);
366
367 for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
368 mp = folio_to_mp(folio, offset);
369
370 if (!mp || !test_bit(META_dirty, &mp->flag))
371 continue;
372
373 if (mp->nohomeok && !test_bit(META_forcewrite, &mp->flag)) {
374 redirty = 1;
375 /*
376 * Make sure this page isn't blocked indefinitely.
377 * If the journal isn't undergoing I/O, push it
378 */
379 if (mp->log && !(mp->log->cflag & logGC_PAGEOUT))
380 jfs_flush_journal(mp->log, 0);
381 continue;
382 }
383
384 clear_bit(META_dirty, &mp->flag);
385 set_bit(META_io, &mp->flag);
386 block_offset = offset >> inode->i_blkbits;
387 lblock = page_start + block_offset;
388 if (bio) {
389 if (xlen && lblock == next_block) {
390 /* Contiguous, in memory & on disk */
391 len = min(xlen, blocks_per_mp);
392 xlen -= len;
393 bio_bytes += len << inode->i_blkbits;
394 continue;
395 }
396 /* Not contiguous */
397 bio_add_folio_nofail(bio, folio, bio_bytes, bio_offset);
398 /*
399 * Increment counter before submitting i/o to keep
400 * count from hitting zero before we're through
401 */
402 inc_io(folio);
403 if (!bio->bi_iter.bi_size)
404 goto dump_bio;
405 submit_bio(bio);
406 nr_underway++;
407 bio = NULL;
408 } else
409 inc_io(folio);
410 xlen = (folio_size(folio) - offset) >> inode->i_blkbits;
411 pblock = metapage_get_blocks(inode, lblock, &xlen);
412 if (!pblock) {
413 printk(KERN_ERR "JFS: metapage_get_blocks failed\n");
414 /*
415 * We already called inc_io(), but can't cancel it
416 * with dec_io() until we're done with the page
417 */
418 bad_blocks++;
419 continue;
420 }
421 len = min(xlen, (int)JFS_SBI(inode->i_sb)->nbperpage);
422
423 bio = bio_alloc(inode->i_sb->s_bdev, 1, REQ_OP_WRITE, GFP_NOFS);
424 bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9);
425 bio->bi_end_io = metapage_write_end_io;
426 bio->bi_private = folio;
427
428 /* Don't call bio_add_page yet, we may add to this vec */
429 bio_offset = offset;
430 bio_bytes = len << inode->i_blkbits;
431
432 xlen -= len;
433 next_block = lblock + len;
434 }
435 if (bio) {
436 bio_add_folio_nofail(bio, folio, bio_bytes, bio_offset);
437 if (!bio->bi_iter.bi_size)
438 goto dump_bio;
439
440 submit_bio(bio);
441 nr_underway++;
442 }
443 if (redirty)
444 folio_redirty_for_writepage(wbc, folio);
445
446 folio_unlock(folio);
447
448 if (bad_blocks)
449 goto err_out;
450
451 if (nr_underway == 0)
452 folio_end_writeback(folio);
453
454 return 0;
455dump_bio:
456 print_hex_dump(KERN_ERR, "JFS: dump of bio: ", DUMP_PREFIX_ADDRESS, 16,
457 4, bio, sizeof(*bio), 0);
458 bio_put(bio);
459 folio_unlock(folio);
460 dec_io(folio, BLK_STS_OK, last_write_complete);
461err_out:
462 while (bad_blocks--)
463 dec_io(folio, BLK_STS_OK, last_write_complete);
464 return -EIO;
465}
466
467static int metapage_writepages(struct address_space *mapping,
468 struct writeback_control *wbc)
469{
470 struct blk_plug plug;
471 int err;
472
473 blk_start_plug(&plug);
474 err = write_cache_pages(mapping, wbc, metapage_write_folio, NULL);
475 blk_finish_plug(&plug);
476
477 return err;
478}
479
480static int metapage_read_folio(struct file *fp, struct folio *folio)
481{
482 struct inode *inode = folio->mapping->host;
483 struct bio *bio = NULL;
484 int block_offset;
485 int blocks_per_page = i_blocks_per_folio(inode, folio);
486 sector_t page_start; /* address of page in fs blocks */
487 sector_t pblock;
488 int xlen;
489 unsigned int len;
490 int offset;
491
492 BUG_ON(!folio_test_locked(folio));
493 page_start = folio_pos(folio) >> inode->i_blkbits;
494
495 block_offset = 0;
496 while (block_offset < blocks_per_page) {
497 xlen = blocks_per_page - block_offset;
498 pblock = metapage_get_blocks(inode, page_start + block_offset,
499 &xlen);
500 if (pblock) {
501 if (!folio->private)
502 insert_metapage(folio, NULL);
503 inc_io(folio);
504 if (bio)
505 submit_bio(bio);
506
507 bio = bio_alloc(inode->i_sb->s_bdev, 1, REQ_OP_READ,
508 GFP_NOFS);
509 bio->bi_iter.bi_sector =
510 pblock << (inode->i_blkbits - 9);
511 bio->bi_end_io = metapage_read_end_io;
512 bio->bi_private = folio;
513 len = xlen << inode->i_blkbits;
514 offset = block_offset << inode->i_blkbits;
515 bio_add_folio_nofail(bio, folio, len, offset);
516 block_offset += xlen;
517 } else
518 block_offset++;
519 }
520 if (bio)
521 submit_bio(bio);
522 else
523 folio_unlock(folio);
524
525 return 0;
526}
527
528static bool metapage_release_folio(struct folio *folio, gfp_t gfp_mask)
529{
530 struct metapage *mp;
531 bool ret = true;
532 int offset;
533
534 for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
535 mp = folio_to_mp(folio, offset);
536
537 if (!mp)
538 continue;
539
540 jfs_info("metapage_release_folio: mp = 0x%p", mp);
541 if (mp->count || mp->nohomeok ||
542 test_bit(META_dirty, &mp->flag)) {
543 jfs_info("count = %ld, nohomeok = %d", mp->count,
544 mp->nohomeok);
545 ret = false;
546 continue;
547 }
548 if (mp->lsn)
549 remove_from_logsync(mp);
550 remove_metapage(folio, mp);
551 INCREMENT(mpStat.pagefree);
552 free_metapage(mp);
553 }
554 return ret;
555}
556
557static void metapage_invalidate_folio(struct folio *folio, size_t offset,
558 size_t length)
559{
560 BUG_ON(offset || length < folio_size(folio));
561
562 BUG_ON(folio_test_writeback(folio));
563
564 metapage_release_folio(folio, 0);
565}
566
567const struct address_space_operations jfs_metapage_aops = {
568 .read_folio = metapage_read_folio,
569 .writepages = metapage_writepages,
570 .release_folio = metapage_release_folio,
571 .invalidate_folio = metapage_invalidate_folio,
572 .dirty_folio = filemap_dirty_folio,
573};
574
575struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
576 unsigned int size, int absolute,
577 unsigned long new)
578{
579 int l2BlocksPerPage;
580 int l2bsize;
581 struct address_space *mapping;
582 struct metapage *mp = NULL;
583 struct folio *folio;
584 unsigned long page_index;
585 unsigned long page_offset;
586
587 jfs_info("__get_metapage: ino = %ld, lblock = 0x%lx, abs=%d",
588 inode->i_ino, lblock, absolute);
589
590 l2bsize = inode->i_blkbits;
591 l2BlocksPerPage = PAGE_SHIFT - l2bsize;
592 page_index = lblock >> l2BlocksPerPage;
593 page_offset = (lblock - (page_index << l2BlocksPerPage)) << l2bsize;
594 if ((page_offset + size) > PAGE_SIZE) {
595 jfs_err("MetaData crosses page boundary!!");
596 jfs_err("lblock = %lx, size = %d", lblock, size);
597 dump_stack();
598 return NULL;
599 }
600 if (absolute)
601 mapping = JFS_SBI(inode->i_sb)->direct_inode->i_mapping;
602 else {
603 /*
604 * If an nfs client tries to read an inode that is larger
605 * than any existing inodes, we may try to read past the
606 * end of the inode map
607 */
608 if ((lblock << inode->i_blkbits) >= inode->i_size)
609 return NULL;
610 mapping = inode->i_mapping;
611 }
612
613 if (new && (PSIZE == PAGE_SIZE)) {
614 folio = filemap_grab_folio(mapping, page_index);
615 if (IS_ERR(folio)) {
616 jfs_err("filemap_grab_folio failed!");
617 return NULL;
618 }
619 folio_mark_uptodate(folio);
620 } else {
621 folio = read_mapping_folio(mapping, page_index, NULL);
622 if (IS_ERR(folio)) {
623 jfs_err("read_mapping_page failed!");
624 return NULL;
625 }
626 folio_lock(folio);
627 }
628
629 mp = folio_to_mp(folio, page_offset);
630 if (mp) {
631 if (mp->logical_size != size) {
632 jfs_error(inode->i_sb,
633 "get_mp->logical_size != size\n");
634 jfs_err("logical_size = %d, size = %d",
635 mp->logical_size, size);
636 dump_stack();
637 goto unlock;
638 }
639 mp->count++;
640 lock_metapage(mp);
641 if (test_bit(META_discard, &mp->flag)) {
642 if (!new) {
643 jfs_error(inode->i_sb,
644 "using a discarded metapage\n");
645 discard_metapage(mp);
646 goto unlock;
647 }
648 clear_bit(META_discard, &mp->flag);
649 }
650 } else {
651 INCREMENT(mpStat.pagealloc);
652 mp = alloc_metapage(GFP_NOFS);
653 if (!mp)
654 goto unlock;
655 mp->folio = folio;
656 mp->sb = inode->i_sb;
657 mp->flag = 0;
658 mp->xflag = COMMIT_PAGE;
659 mp->count = 1;
660 mp->nohomeok = 0;
661 mp->logical_size = size;
662 mp->data = folio_address(folio) + page_offset;
663 mp->index = lblock;
664 if (unlikely(insert_metapage(folio, mp))) {
665 free_metapage(mp);
666 goto unlock;
667 }
668 lock_metapage(mp);
669 }
670
671 if (new) {
672 jfs_info("zeroing mp = 0x%p", mp);
673 memset(mp->data, 0, PSIZE);
674 }
675
676 folio_unlock(folio);
677 jfs_info("__get_metapage: returning = 0x%p data = 0x%p", mp, mp->data);
678 return mp;
679
680unlock:
681 folio_unlock(folio);
682 return NULL;
683}
684
685void grab_metapage(struct metapage * mp)
686{
687 jfs_info("grab_metapage: mp = 0x%p", mp);
688 folio_get(mp->folio);
689 folio_lock(mp->folio);
690 mp->count++;
691 lock_metapage(mp);
692 folio_unlock(mp->folio);
693}
694
695static int metapage_write_one(struct folio *folio)
696{
697 struct address_space *mapping = folio->mapping;
698 struct writeback_control wbc = {
699 .sync_mode = WB_SYNC_ALL,
700 .nr_to_write = folio_nr_pages(folio),
701 };
702 int ret = 0;
703
704 BUG_ON(!folio_test_locked(folio));
705
706 folio_wait_writeback(folio);
707
708 if (folio_clear_dirty_for_io(folio)) {
709 folio_get(folio);
710 ret = metapage_write_folio(folio, &wbc, NULL);
711 if (ret == 0)
712 folio_wait_writeback(folio);
713 folio_put(folio);
714 } else {
715 folio_unlock(folio);
716 }
717
718 if (!ret)
719 ret = filemap_check_errors(mapping);
720 return ret;
721}
722
723void force_metapage(struct metapage *mp)
724{
725 struct folio *folio = mp->folio;
726 jfs_info("force_metapage: mp = 0x%p", mp);
727 set_bit(META_forcewrite, &mp->flag);
728 clear_bit(META_sync, &mp->flag);
729 folio_get(folio);
730 folio_lock(folio);
731 folio_mark_dirty(folio);
732 if (metapage_write_one(folio))
733 jfs_error(mp->sb, "metapage_write_one() failed\n");
734 clear_bit(META_forcewrite, &mp->flag);
735 folio_put(folio);
736}
737
738void hold_metapage(struct metapage *mp)
739{
740 folio_lock(mp->folio);
741}
742
743void put_metapage(struct metapage *mp)
744{
745 if (mp->count || mp->nohomeok) {
746 /* Someone else will release this */
747 folio_unlock(mp->folio);
748 return;
749 }
750 folio_get(mp->folio);
751 mp->count++;
752 lock_metapage(mp);
753 folio_unlock(mp->folio);
754 release_metapage(mp);
755}
756
757void release_metapage(struct metapage * mp)
758{
759 struct folio *folio = mp->folio;
760 jfs_info("release_metapage: mp = 0x%p, flag = 0x%lx", mp, mp->flag);
761
762 folio_lock(folio);
763 unlock_metapage(mp);
764
765 assert(mp->count);
766 if (--mp->count || mp->nohomeok) {
767 folio_unlock(folio);
768 folio_put(folio);
769 return;
770 }
771
772 if (test_bit(META_dirty, &mp->flag)) {
773 folio_mark_dirty(folio);
774 if (test_bit(META_sync, &mp->flag)) {
775 clear_bit(META_sync, &mp->flag);
776 if (metapage_write_one(folio))
777 jfs_error(mp->sb, "metapage_write_one() failed\n");
778 folio_lock(folio);
779 }
780 } else if (mp->lsn) /* discard_metapage doesn't remove it */
781 remove_from_logsync(mp);
782
783 /* Try to keep metapages from using up too much memory */
784 drop_metapage(folio, mp);
785
786 folio_unlock(folio);
787 folio_put(folio);
788}
789
790void __invalidate_metapages(struct inode *ip, s64 addr, int len)
791{
792 sector_t lblock;
793 int l2BlocksPerPage = PAGE_SHIFT - ip->i_blkbits;
794 int BlocksPerPage = 1 << l2BlocksPerPage;
795 /* All callers are interested in block device's mapping */
796 struct address_space *mapping =
797 JFS_SBI(ip->i_sb)->direct_inode->i_mapping;
798 struct metapage *mp;
799 unsigned int offset;
800
801 /*
802 * Mark metapages to discard. They will eventually be
803 * released, but should not be written.
804 */
805 for (lblock = addr & ~(BlocksPerPage - 1); lblock < addr + len;
806 lblock += BlocksPerPage) {
807 struct folio *folio = filemap_lock_folio(mapping,
808 lblock >> l2BlocksPerPage);
809 if (IS_ERR(folio))
810 continue;
811 for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
812 mp = folio_to_mp(folio, offset);
813 if (!mp)
814 continue;
815 if (mp->index < addr)
816 continue;
817 if (mp->index >= addr + len)
818 break;
819
820 clear_bit(META_dirty, &mp->flag);
821 set_bit(META_discard, &mp->flag);
822 if (mp->lsn)
823 remove_from_logsync(mp);
824 }
825 folio_unlock(folio);
826 folio_put(folio);
827 }
828}
829
830#ifdef CONFIG_JFS_STATISTICS
831int jfs_mpstat_proc_show(struct seq_file *m, void *v)
832{
833 seq_printf(m,
834 "JFS Metapage statistics\n"
835 "=======================\n"
836 "page allocations = %d\n"
837 "page frees = %d\n"
838 "lock waits = %d\n",
839 mpStat.pagealloc,
840 mpStat.pagefree,
841 mpStat.lockwait);
842 return 0;
843}
844#endif