Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  linux/mm/page_io.c
  4 *
  5 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
  6 *
  7 *  Swap reorganised 29.12.95, 
  8 *  Asynchronous swapping added 30.12.95. Stephen Tweedie
  9 *  Removed race in async swapping. 14.4.1996. Bruno Haible
 10 *  Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie
 11 *  Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman
 12 */
 13
 14#include <linux/mm.h>
 15#include <linux/kernel_stat.h>
 16#include <linux/gfp.h>
 17#include <linux/pagemap.h>
 18#include <linux/swap.h>
 19#include <linux/bio.h>
 20#include <linux/swapops.h>
 21#include <linux/buffer_head.h>
 22#include <linux/writeback.h>
 23#include <linux/frontswap.h>
 24#include <linux/blkdev.h>
 25#include <linux/psi.h>
 26#include <linux/uio.h>
 27#include <linux/sched/task.h>
 28#include <linux/delayacct.h>
 
 29#include "swap.h"
 30
 31static void end_swap_bio_write(struct bio *bio)
 32{
 33	struct page *page = bio_first_page_all(bio);
 34
 35	if (bio->bi_status) {
 36		SetPageError(page);
 37		/*
 38		 * We failed to write the page out to swap-space.
 39		 * Re-dirty the page in order to avoid it being reclaimed.
 40		 * Also print a dire warning that things will go BAD (tm)
 41		 * very quickly.
 42		 *
 43		 * Also clear PG_reclaim to avoid folio_rotate_reclaimable()
 44		 */
 45		set_page_dirty(page);
 46		pr_alert_ratelimited("Write-error on swap-device (%u:%u:%llu)\n",
 47				     MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
 48				     (unsigned long long)bio->bi_iter.bi_sector);
 49		ClearPageReclaim(page);
 50	}
 51	end_page_writeback(page);
 
 
 
 
 
 52	bio_put(bio);
 53}
 54
 55static void end_swap_bio_read(struct bio *bio)
 56{
 57	struct page *page = bio_first_page_all(bio);
 58	struct task_struct *waiter = bio->bi_private;
 59
 60	if (bio->bi_status) {
 61		SetPageError(page);
 62		ClearPageUptodate(page);
 63		pr_alert_ratelimited("Read-error on swap-device (%u:%u:%llu)\n",
 64				     MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
 65				     (unsigned long long)bio->bi_iter.bi_sector);
 66		goto out;
 
 67	}
 
 
 68
 69	SetPageUptodate(page);
 70out:
 71	unlock_page(page);
 72	WRITE_ONCE(bio->bi_private, NULL);
 73	bio_put(bio);
 74	if (waiter) {
 75		blk_wake_io_task(waiter);
 76		put_task_struct(waiter);
 77	}
 78}
 79
 80int generic_swapfile_activate(struct swap_info_struct *sis,
 81				struct file *swap_file,
 82				sector_t *span)
 83{
 84	struct address_space *mapping = swap_file->f_mapping;
 85	struct inode *inode = mapping->host;
 86	unsigned blocks_per_page;
 87	unsigned long page_no;
 88	unsigned blkbits;
 89	sector_t probe_block;
 90	sector_t last_block;
 91	sector_t lowest_block = -1;
 92	sector_t highest_block = 0;
 93	int nr_extents = 0;
 94	int ret;
 95
 96	blkbits = inode->i_blkbits;
 97	blocks_per_page = PAGE_SIZE >> blkbits;
 98
 99	/*
100	 * Map all the blocks into the extent tree.  This code doesn't try
101	 * to be very smart.
102	 */
103	probe_block = 0;
104	page_no = 0;
105	last_block = i_size_read(inode) >> blkbits;
106	while ((probe_block + blocks_per_page) <= last_block &&
107			page_no < sis->max) {
108		unsigned block_in_page;
109		sector_t first_block;
110
111		cond_resched();
112
113		first_block = probe_block;
114		ret = bmap(inode, &first_block);
115		if (ret || !first_block)
116			goto bad_bmap;
117
118		/*
119		 * It must be PAGE_SIZE aligned on-disk
120		 */
121		if (first_block & (blocks_per_page - 1)) {
122			probe_block++;
123			goto reprobe;
124		}
125
126		for (block_in_page = 1; block_in_page < blocks_per_page;
127					block_in_page++) {
128			sector_t block;
129
130			block = probe_block + block_in_page;
131			ret = bmap(inode, &block);
132			if (ret || !block)
133				goto bad_bmap;
134
135			if (block != first_block + block_in_page) {
136				/* Discontiguity */
137				probe_block++;
138				goto reprobe;
139			}
140		}
141
142		first_block >>= (PAGE_SHIFT - blkbits);
143		if (page_no) {	/* exclude the header page */
144			if (first_block < lowest_block)
145				lowest_block = first_block;
146			if (first_block > highest_block)
147				highest_block = first_block;
148		}
149
150		/*
151		 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
152		 */
153		ret = add_swap_extent(sis, page_no, 1, first_block);
154		if (ret < 0)
155			goto out;
156		nr_extents += ret;
157		page_no++;
158		probe_block += blocks_per_page;
159reprobe:
160		continue;
161	}
162	ret = nr_extents;
163	*span = 1 + highest_block - lowest_block;
164	if (page_no == 0)
165		page_no = 1;	/* force Empty message */
166	sis->max = page_no;
167	sis->pages = page_no - 1;
168	sis->highest_bit = page_no - 1;
169out:
170	return ret;
171bad_bmap:
172	pr_err("swapon: swapfile has holes\n");
173	ret = -EINVAL;
174	goto out;
175}
176
177/*
178 * We may have stale swap cache pages in memory: notice
179 * them here and get rid of the unnecessary final write.
180 */
181int swap_writepage(struct page *page, struct writeback_control *wbc)
182{
183	struct folio *folio = page_folio(page);
184	int ret = 0;
185
186	if (folio_free_swap(folio)) {
187		folio_unlock(folio);
188		goto out;
189	}
190	/*
191	 * Arch code may have to preserve more data than just the page
192	 * contents, e.g. memory tags.
193	 */
194	ret = arch_prepare_to_swap(&folio->page);
195	if (ret) {
196		folio_mark_dirty(folio);
197		folio_unlock(folio);
198		goto out;
199	}
200	if (frontswap_store(&folio->page) == 0) {
201		folio_start_writeback(folio);
202		folio_unlock(folio);
203		folio_end_writeback(folio);
204		goto out;
205	}
206	ret = __swap_writepage(&folio->page, wbc);
207out:
208	return ret;
 
 
 
 
209}
210
211static inline void count_swpout_vm_event(struct page *page)
212{
213#ifdef CONFIG_TRANSPARENT_HUGEPAGE
214	if (unlikely(PageTransHuge(page)))
 
215		count_vm_event(THP_SWPOUT);
 
216#endif
217	count_vm_events(PSWPOUT, thp_nr_pages(page));
218}
219
220#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
221static void bio_associate_blkg_from_page(struct bio *bio, struct page *page)
222{
223	struct cgroup_subsys_state *css;
224	struct mem_cgroup *memcg;
225
226	memcg = page_memcg(page);
227	if (!memcg)
228		return;
229
230	rcu_read_lock();
231	css = cgroup_e_css(memcg->css.cgroup, &io_cgrp_subsys);
232	bio_associate_blkg_from_css(bio, css);
233	rcu_read_unlock();
234}
235#else
236#define bio_associate_blkg_from_page(bio, page)		do { } while (0)
237#endif /* CONFIG_MEMCG && CONFIG_BLK_CGROUP */
238
239struct swap_iocb {
240	struct kiocb		iocb;
241	struct bio_vec		bvec[SWAP_CLUSTER_MAX];
242	int			pages;
243	int			len;
244};
245static mempool_t *sio_pool;
246
247int sio_pool_init(void)
248{
249	if (!sio_pool) {
250		mempool_t *pool = mempool_create_kmalloc_pool(
251			SWAP_CLUSTER_MAX, sizeof(struct swap_iocb));
252		if (cmpxchg(&sio_pool, NULL, pool))
253			mempool_destroy(pool);
254	}
255	if (!sio_pool)
256		return -ENOMEM;
257	return 0;
258}
259
260static void sio_write_complete(struct kiocb *iocb, long ret)
261{
262	struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb);
263	struct page *page = sio->bvec[0].bv_page;
264	int p;
265
266	if (ret != sio->len) {
267		/*
268		 * In the case of swap-over-nfs, this can be a
269		 * temporary failure if the system has limited
270		 * memory for allocating transmit buffers.
271		 * Mark the page dirty and avoid
272		 * folio_rotate_reclaimable but rate-limit the
273		 * messages but do not flag PageError like
274		 * the normal direct-to-bio case as it could
275		 * be temporary.
276		 */
277		pr_err_ratelimited("Write error %ld on dio swapfile (%llu)\n",
278				   ret, page_file_offset(page));
279		for (p = 0; p < sio->pages; p++) {
280			page = sio->bvec[p].bv_page;
281			set_page_dirty(page);
282			ClearPageReclaim(page);
283		}
284	} else {
285		for (p = 0; p < sio->pages; p++)
286			count_swpout_vm_event(sio->bvec[p].bv_page);
287	}
288
289	for (p = 0; p < sio->pages; p++)
290		end_page_writeback(sio->bvec[p].bv_page);
291
292	mempool_free(sio, sio_pool);
293}
294
295static int swap_writepage_fs(struct page *page, struct writeback_control *wbc)
296{
297	struct swap_iocb *sio = NULL;
298	struct swap_info_struct *sis = page_swap_info(page);
299	struct file *swap_file = sis->swap_file;
300	loff_t pos = page_file_offset(page);
301
302	set_page_writeback(page);
303	unlock_page(page);
 
304	if (wbc->swap_plug)
305		sio = *wbc->swap_plug;
306	if (sio) {
307		if (sio->iocb.ki_filp != swap_file ||
308		    sio->iocb.ki_pos + sio->len != pos) {
309			swap_write_unplug(sio);
310			sio = NULL;
311		}
312	}
313	if (!sio) {
314		sio = mempool_alloc(sio_pool, GFP_NOIO);
315		init_sync_kiocb(&sio->iocb, swap_file);
316		sio->iocb.ki_complete = sio_write_complete;
317		sio->iocb.ki_pos = pos;
318		sio->pages = 0;
319		sio->len = 0;
320	}
321	sio->bvec[sio->pages].bv_page = page;
322	sio->bvec[sio->pages].bv_len = thp_size(page);
323	sio->bvec[sio->pages].bv_offset = 0;
324	sio->len += thp_size(page);
325	sio->pages += 1;
326	if (sio->pages == ARRAY_SIZE(sio->bvec) || !wbc->swap_plug) {
327		swap_write_unplug(sio);
328		sio = NULL;
329	}
330	if (wbc->swap_plug)
331		*wbc->swap_plug = sio;
332
333	return 0;
334}
335
336int __swap_writepage(struct page *page, struct writeback_control *wbc)
 
337{
338	struct bio *bio;
339	int ret;
340	struct swap_info_struct *sis = page_swap_info(page);
341
342	VM_BUG_ON_PAGE(!PageSwapCache(page), page);
343	/*
344	 * ->flags can be updated non-atomicially (scan_swap_map_slots),
345	 * but that will never affect SWP_FS_OPS, so the data_race
346	 * is safe.
347	 */
348	if (data_race(sis->flags & SWP_FS_OPS))
349		return swap_writepage_fs(page, wbc);
350
351	ret = bdev_write_page(sis->bdev, swap_page_sector(page), page, wbc);
352	if (!ret) {
353		count_swpout_vm_event(page);
354		return 0;
355	}
 
 
 
 
 
 
 
 
 
356
357	bio = bio_alloc(sis->bdev, 1,
358			REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc),
359			GFP_NOIO);
360	bio->bi_iter.bi_sector = swap_page_sector(page);
361	bio->bi_end_io = end_swap_bio_write;
362	bio_add_page(bio, page, thp_size(page), 0);
363
364	bio_associate_blkg_from_page(bio, page);
365	count_swpout_vm_event(page);
366	set_page_writeback(page);
367	unlock_page(page);
368	submit_bio(bio);
 
369
370	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
371}
372
373void swap_write_unplug(struct swap_iocb *sio)
374{
375	struct iov_iter from;
376	struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
377	int ret;
378
379	iov_iter_bvec(&from, ITER_SOURCE, sio->bvec, sio->pages, sio->len);
380	ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
381	if (ret != -EIOCBQUEUED)
382		sio_write_complete(&sio->iocb, ret);
383}
384
385static void sio_read_complete(struct kiocb *iocb, long ret)
386{
387	struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb);
388	int p;
389
390	if (ret == sio->len) {
391		for (p = 0; p < sio->pages; p++) {
392			struct page *page = sio->bvec[p].bv_page;
393
394			SetPageUptodate(page);
395			unlock_page(page);
396		}
397		count_vm_events(PSWPIN, sio->pages);
398	} else {
399		for (p = 0; p < sio->pages; p++) {
400			struct page *page = sio->bvec[p].bv_page;
401
402			SetPageError(page);
403			ClearPageUptodate(page);
404			unlock_page(page);
405		}
406		pr_alert_ratelimited("Read-error on swap-device\n");
407	}
408	mempool_free(sio, sio_pool);
409}
410
411static void swap_readpage_fs(struct page *page,
412			     struct swap_iocb **plug)
413{
414	struct swap_info_struct *sis = page_swap_info(page);
415	struct swap_iocb *sio = NULL;
416	loff_t pos = page_file_offset(page);
417
418	if (plug)
419		sio = *plug;
420	if (sio) {
421		if (sio->iocb.ki_filp != sis->swap_file ||
422		    sio->iocb.ki_pos + sio->len != pos) {
423			swap_read_unplug(sio);
424			sio = NULL;
425		}
426	}
427	if (!sio) {
428		sio = mempool_alloc(sio_pool, GFP_KERNEL);
429		init_sync_kiocb(&sio->iocb, sis->swap_file);
430		sio->iocb.ki_pos = pos;
431		sio->iocb.ki_complete = sio_read_complete;
432		sio->pages = 0;
433		sio->len = 0;
434	}
435	sio->bvec[sio->pages].bv_page = page;
436	sio->bvec[sio->pages].bv_len = thp_size(page);
437	sio->bvec[sio->pages].bv_offset = 0;
438	sio->len += thp_size(page);
439	sio->pages += 1;
440	if (sio->pages == ARRAY_SIZE(sio->bvec) || !plug) {
441		swap_read_unplug(sio);
442		sio = NULL;
443	}
444	if (plug)
445		*plug = sio;
446}
447
448int swap_readpage(struct page *page, bool synchronous,
449		  struct swap_iocb **plug)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
450{
451	struct bio *bio;
452	int ret = 0;
453	struct swap_info_struct *sis = page_swap_info(page);
454	bool workingset = PageWorkingset(page);
 
 
 
 
 
 
 
 
 
 
 
455	unsigned long pflags;
456	bool in_thrashing;
457
458	VM_BUG_ON_PAGE(!PageSwapCache(page) && !synchronous, page);
459	VM_BUG_ON_PAGE(!PageLocked(page), page);
460	VM_BUG_ON_PAGE(PageUptodate(page), page);
461
462	/*
463	 * Count submission time as memory stall and delay. When the device
464	 * is congested, or the submitting cgroup IO-throttled, submission
465	 * can be a significant part of overall IO time.
466	 */
467	if (workingset) {
468		delayacct_thrashing_start(&in_thrashing);
469		psi_memstall_enter(&pflags);
470	}
471	delayacct_swapin_start();
472
473	if (frontswap_load(page) == 0) {
474		SetPageUptodate(page);
475		unlock_page(page);
476		goto out;
477	}
478
479	if (data_race(sis->flags & SWP_FS_OPS)) {
480		swap_readpage_fs(page, plug);
481		goto out;
482	}
483
484	if (sis->flags & SWP_SYNCHRONOUS_IO) {
485		ret = bdev_read_page(sis->bdev, swap_page_sector(page), page);
486		if (!ret) {
487			count_vm_event(PSWPIN);
488			goto out;
489		}
490	}
491
492	ret = 0;
493	bio = bio_alloc(sis->bdev, 1, REQ_OP_READ, GFP_KERNEL);
494	bio->bi_iter.bi_sector = swap_page_sector(page);
495	bio->bi_end_io = end_swap_bio_read;
496	bio_add_page(bio, page, thp_size(page), 0);
497	/*
498	 * Keep this task valid during swap readpage because the oom killer may
499	 * attempt to access it in the page fault retry time check.
500	 */
501	if (synchronous) {
502		get_task_struct(current);
503		bio->bi_private = current;
504	}
505	count_vm_event(PSWPIN);
506	bio_get(bio);
507	submit_bio(bio);
508	while (synchronous) {
509		set_current_state(TASK_UNINTERRUPTIBLE);
510		if (!READ_ONCE(bio->bi_private))
511			break;
512
513		blk_io_schedule();
514	}
515	__set_current_state(TASK_RUNNING);
516	bio_put(bio);
517
518out:
519	if (workingset) {
520		delayacct_thrashing_end(&in_thrashing);
521		psi_memstall_leave(&pflags);
522	}
523	delayacct_swapin_end();
524	return ret;
525}
526
527void __swap_read_unplug(struct swap_iocb *sio)
528{
529	struct iov_iter from;
530	struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
531	int ret;
532
533	iov_iter_bvec(&from, ITER_DEST, sio->bvec, sio->pages, sio->len);
534	ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
535	if (ret != -EIOCBQUEUED)
536		sio_read_complete(&sio->iocb, ret);
537}
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  linux/mm/page_io.c
  4 *
  5 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
  6 *
  7 *  Swap reorganised 29.12.95, 
  8 *  Asynchronous swapping added 30.12.95. Stephen Tweedie
  9 *  Removed race in async swapping. 14.4.1996. Bruno Haible
 10 *  Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie
 11 *  Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman
 12 */
 13
 14#include <linux/mm.h>
 15#include <linux/kernel_stat.h>
 16#include <linux/gfp.h>
 17#include <linux/pagemap.h>
 18#include <linux/swap.h>
 19#include <linux/bio.h>
 20#include <linux/swapops.h>
 
 21#include <linux/writeback.h>
 
 22#include <linux/blkdev.h>
 23#include <linux/psi.h>
 24#include <linux/uio.h>
 25#include <linux/sched/task.h>
 26#include <linux/delayacct.h>
 27#include <linux/zswap.h>
 28#include "swap.h"
 29
 30static void __end_swap_bio_write(struct bio *bio)
 31{
 32	struct folio *folio = bio_first_folio_all(bio);
 33
 34	if (bio->bi_status) {
 
 35		/*
 36		 * We failed to write the page out to swap-space.
 37		 * Re-dirty the page in order to avoid it being reclaimed.
 38		 * Also print a dire warning that things will go BAD (tm)
 39		 * very quickly.
 40		 *
 41		 * Also clear PG_reclaim to avoid folio_rotate_reclaimable()
 42		 */
 43		folio_mark_dirty(folio);
 44		pr_alert_ratelimited("Write-error on swap-device (%u:%u:%llu)\n",
 45				     MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
 46				     (unsigned long long)bio->bi_iter.bi_sector);
 47		folio_clear_reclaim(folio);
 48	}
 49	folio_end_writeback(folio);
 50}
 51
 52static void end_swap_bio_write(struct bio *bio)
 53{
 54	__end_swap_bio_write(bio);
 55	bio_put(bio);
 56}
 57
 58static void __end_swap_bio_read(struct bio *bio)
 59{
 60	struct folio *folio = bio_first_folio_all(bio);
 
 61
 62	if (bio->bi_status) {
 
 
 63		pr_alert_ratelimited("Read-error on swap-device (%u:%u:%llu)\n",
 64				     MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
 65				     (unsigned long long)bio->bi_iter.bi_sector);
 66	} else {
 67		folio_mark_uptodate(folio);
 68	}
 69	folio_unlock(folio);
 70}
 71
 72static void end_swap_bio_read(struct bio *bio)
 73{
 74	__end_swap_bio_read(bio);
 
 75	bio_put(bio);
 
 
 
 
 76}
 77
 78int generic_swapfile_activate(struct swap_info_struct *sis,
 79				struct file *swap_file,
 80				sector_t *span)
 81{
 82	struct address_space *mapping = swap_file->f_mapping;
 83	struct inode *inode = mapping->host;
 84	unsigned blocks_per_page;
 85	unsigned long page_no;
 86	unsigned blkbits;
 87	sector_t probe_block;
 88	sector_t last_block;
 89	sector_t lowest_block = -1;
 90	sector_t highest_block = 0;
 91	int nr_extents = 0;
 92	int ret;
 93
 94	blkbits = inode->i_blkbits;
 95	blocks_per_page = PAGE_SIZE >> blkbits;
 96
 97	/*
 98	 * Map all the blocks into the extent tree.  This code doesn't try
 99	 * to be very smart.
100	 */
101	probe_block = 0;
102	page_no = 0;
103	last_block = i_size_read(inode) >> blkbits;
104	while ((probe_block + blocks_per_page) <= last_block &&
105			page_no < sis->max) {
106		unsigned block_in_page;
107		sector_t first_block;
108
109		cond_resched();
110
111		first_block = probe_block;
112		ret = bmap(inode, &first_block);
113		if (ret || !first_block)
114			goto bad_bmap;
115
116		/*
117		 * It must be PAGE_SIZE aligned on-disk
118		 */
119		if (first_block & (blocks_per_page - 1)) {
120			probe_block++;
121			goto reprobe;
122		}
123
124		for (block_in_page = 1; block_in_page < blocks_per_page;
125					block_in_page++) {
126			sector_t block;
127
128			block = probe_block + block_in_page;
129			ret = bmap(inode, &block);
130			if (ret || !block)
131				goto bad_bmap;
132
133			if (block != first_block + block_in_page) {
134				/* Discontiguity */
135				probe_block++;
136				goto reprobe;
137			}
138		}
139
140		first_block >>= (PAGE_SHIFT - blkbits);
141		if (page_no) {	/* exclude the header page */
142			if (first_block < lowest_block)
143				lowest_block = first_block;
144			if (first_block > highest_block)
145				highest_block = first_block;
146		}
147
148		/*
149		 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
150		 */
151		ret = add_swap_extent(sis, page_no, 1, first_block);
152		if (ret < 0)
153			goto out;
154		nr_extents += ret;
155		page_no++;
156		probe_block += blocks_per_page;
157reprobe:
158		continue;
159	}
160	ret = nr_extents;
161	*span = 1 + highest_block - lowest_block;
162	if (page_no == 0)
163		page_no = 1;	/* force Empty message */
164	sis->max = page_no;
165	sis->pages = page_no - 1;
166	sis->highest_bit = page_no - 1;
167out:
168	return ret;
169bad_bmap:
170	pr_err("swapon: swapfile has holes\n");
171	ret = -EINVAL;
172	goto out;
173}
174
175/*
176 * We may have stale swap cache pages in memory: notice
177 * them here and get rid of the unnecessary final write.
178 */
179int swap_writepage(struct page *page, struct writeback_control *wbc)
180{
181	struct folio *folio = page_folio(page);
182	int ret;
183
184	if (folio_free_swap(folio)) {
185		folio_unlock(folio);
186		return 0;
187	}
188	/*
189	 * Arch code may have to preserve more data than just the page
190	 * contents, e.g. memory tags.
191	 */
192	ret = arch_prepare_to_swap(&folio->page);
193	if (ret) {
194		folio_mark_dirty(folio);
195		folio_unlock(folio);
196		return ret;
197	}
198	if (zswap_store(folio)) {
199		folio_start_writeback(folio);
200		folio_unlock(folio);
201		folio_end_writeback(folio);
202		return 0;
203	}
204	if (!mem_cgroup_zswap_writeback_enabled(folio_memcg(folio))) {
205		folio_mark_dirty(folio);
206		return AOP_WRITEPAGE_ACTIVATE;
207	}
208
209	__swap_writepage(folio, wbc);
210	return 0;
211}
212
213static inline void count_swpout_vm_event(struct folio *folio)
214{
215#ifdef CONFIG_TRANSPARENT_HUGEPAGE
216	if (unlikely(folio_test_pmd_mappable(folio))) {
217		count_memcg_folio_events(folio, THP_SWPOUT, 1);
218		count_vm_event(THP_SWPOUT);
219	}
220#endif
221	count_vm_events(PSWPOUT, folio_nr_pages(folio));
222}
223
224#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
225static void bio_associate_blkg_from_page(struct bio *bio, struct folio *folio)
226{
227	struct cgroup_subsys_state *css;
228	struct mem_cgroup *memcg;
229
230	memcg = folio_memcg(folio);
231	if (!memcg)
232		return;
233
234	rcu_read_lock();
235	css = cgroup_e_css(memcg->css.cgroup, &io_cgrp_subsys);
236	bio_associate_blkg_from_css(bio, css);
237	rcu_read_unlock();
238}
239#else
240#define bio_associate_blkg_from_page(bio, folio)		do { } while (0)
241#endif /* CONFIG_MEMCG && CONFIG_BLK_CGROUP */
242
243struct swap_iocb {
244	struct kiocb		iocb;
245	struct bio_vec		bvec[SWAP_CLUSTER_MAX];
246	int			pages;
247	int			len;
248};
249static mempool_t *sio_pool;
250
251int sio_pool_init(void)
252{
253	if (!sio_pool) {
254		mempool_t *pool = mempool_create_kmalloc_pool(
255			SWAP_CLUSTER_MAX, sizeof(struct swap_iocb));
256		if (cmpxchg(&sio_pool, NULL, pool))
257			mempool_destroy(pool);
258	}
259	if (!sio_pool)
260		return -ENOMEM;
261	return 0;
262}
263
264static void sio_write_complete(struct kiocb *iocb, long ret)
265{
266	struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb);
267	struct page *page = sio->bvec[0].bv_page;
268	int p;
269
270	if (ret != sio->len) {
271		/*
272		 * In the case of swap-over-nfs, this can be a
273		 * temporary failure if the system has limited
274		 * memory for allocating transmit buffers.
275		 * Mark the page dirty and avoid
276		 * folio_rotate_reclaimable but rate-limit the
277		 * messages but do not flag PageError like
278		 * the normal direct-to-bio case as it could
279		 * be temporary.
280		 */
281		pr_err_ratelimited("Write error %ld on dio swapfile (%llu)\n",
282				   ret, page_file_offset(page));
283		for (p = 0; p < sio->pages; p++) {
284			page = sio->bvec[p].bv_page;
285			set_page_dirty(page);
286			ClearPageReclaim(page);
287		}
 
 
 
288	}
289
290	for (p = 0; p < sio->pages; p++)
291		end_page_writeback(sio->bvec[p].bv_page);
292
293	mempool_free(sio, sio_pool);
294}
295
296static void swap_writepage_fs(struct folio *folio, struct writeback_control *wbc)
297{
298	struct swap_iocb *sio = NULL;
299	struct swap_info_struct *sis = swp_swap_info(folio->swap);
300	struct file *swap_file = sis->swap_file;
301	loff_t pos = folio_file_pos(folio);
302
303	count_swpout_vm_event(folio);
304	folio_start_writeback(folio);
305	folio_unlock(folio);
306	if (wbc->swap_plug)
307		sio = *wbc->swap_plug;
308	if (sio) {
309		if (sio->iocb.ki_filp != swap_file ||
310		    sio->iocb.ki_pos + sio->len != pos) {
311			swap_write_unplug(sio);
312			sio = NULL;
313		}
314	}
315	if (!sio) {
316		sio = mempool_alloc(sio_pool, GFP_NOIO);
317		init_sync_kiocb(&sio->iocb, swap_file);
318		sio->iocb.ki_complete = sio_write_complete;
319		sio->iocb.ki_pos = pos;
320		sio->pages = 0;
321		sio->len = 0;
322	}
323	bvec_set_folio(&sio->bvec[sio->pages], folio, folio_size(folio), 0);
324	sio->len += folio_size(folio);
 
 
325	sio->pages += 1;
326	if (sio->pages == ARRAY_SIZE(sio->bvec) || !wbc->swap_plug) {
327		swap_write_unplug(sio);
328		sio = NULL;
329	}
330	if (wbc->swap_plug)
331		*wbc->swap_plug = sio;
 
 
332}
333
334static void swap_writepage_bdev_sync(struct folio *folio,
335		struct writeback_control *wbc, struct swap_info_struct *sis)
336{
337	struct bio_vec bv;
338	struct bio bio;
 
339
340	bio_init(&bio, sis->bdev, &bv, 1,
341		 REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc));
342	bio.bi_iter.bi_sector = swap_folio_sector(folio);
343	bio_add_folio_nofail(&bio, folio, folio_size(folio), 0);
 
 
 
 
344
345	bio_associate_blkg_from_page(&bio, folio);
346	count_swpout_vm_event(folio);
347
348	folio_start_writeback(folio);
349	folio_unlock(folio);
350
351	submit_bio_wait(&bio);
352	__end_swap_bio_write(&bio);
353}
354
355static void swap_writepage_bdev_async(struct folio *folio,
356		struct writeback_control *wbc, struct swap_info_struct *sis)
357{
358	struct bio *bio;
359
360	bio = bio_alloc(sis->bdev, 1,
361			REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc),
362			GFP_NOIO);
363	bio->bi_iter.bi_sector = swap_folio_sector(folio);
364	bio->bi_end_io = end_swap_bio_write;
365	bio_add_folio_nofail(bio, folio, folio_size(folio), 0);
366
367	bio_associate_blkg_from_page(bio, folio);
368	count_swpout_vm_event(folio);
369	folio_start_writeback(folio);
370	folio_unlock(folio);
371	submit_bio(bio);
372}
373
374void __swap_writepage(struct folio *folio, struct writeback_control *wbc)
375{
376	struct swap_info_struct *sis = swp_swap_info(folio->swap);
377
378	VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio);
379	/*
380	 * ->flags can be updated non-atomicially (scan_swap_map_slots),
381	 * but that will never affect SWP_FS_OPS, so the data_race
382	 * is safe.
383	 */
384	if (data_race(sis->flags & SWP_FS_OPS))
385		swap_writepage_fs(folio, wbc);
386	else if (sis->flags & SWP_SYNCHRONOUS_IO)
387		swap_writepage_bdev_sync(folio, wbc, sis);
388	else
389		swap_writepage_bdev_async(folio, wbc, sis);
390}
391
392void swap_write_unplug(struct swap_iocb *sio)
393{
394	struct iov_iter from;
395	struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
396	int ret;
397
398	iov_iter_bvec(&from, ITER_SOURCE, sio->bvec, sio->pages, sio->len);
399	ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
400	if (ret != -EIOCBQUEUED)
401		sio_write_complete(&sio->iocb, ret);
402}
403
404static void sio_read_complete(struct kiocb *iocb, long ret)
405{
406	struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb);
407	int p;
408
409	if (ret == sio->len) {
410		for (p = 0; p < sio->pages; p++) {
411			struct folio *folio = page_folio(sio->bvec[p].bv_page);
412
413			folio_mark_uptodate(folio);
414			folio_unlock(folio);
415		}
416		count_vm_events(PSWPIN, sio->pages);
417	} else {
418		for (p = 0; p < sio->pages; p++) {
419			struct folio *folio = page_folio(sio->bvec[p].bv_page);
420
421			folio_unlock(folio);
 
 
422		}
423		pr_alert_ratelimited("Read-error on swap-device\n");
424	}
425	mempool_free(sio, sio_pool);
426}
427
428static void swap_read_folio_fs(struct folio *folio, struct swap_iocb **plug)
 
429{
430	struct swap_info_struct *sis = swp_swap_info(folio->swap);
431	struct swap_iocb *sio = NULL;
432	loff_t pos = folio_file_pos(folio);
433
434	if (plug)
435		sio = *plug;
436	if (sio) {
437		if (sio->iocb.ki_filp != sis->swap_file ||
438		    sio->iocb.ki_pos + sio->len != pos) {
439			swap_read_unplug(sio);
440			sio = NULL;
441		}
442	}
443	if (!sio) {
444		sio = mempool_alloc(sio_pool, GFP_KERNEL);
445		init_sync_kiocb(&sio->iocb, sis->swap_file);
446		sio->iocb.ki_pos = pos;
447		sio->iocb.ki_complete = sio_read_complete;
448		sio->pages = 0;
449		sio->len = 0;
450	}
451	bvec_set_folio(&sio->bvec[sio->pages], folio, folio_size(folio), 0);
452	sio->len += folio_size(folio);
 
 
453	sio->pages += 1;
454	if (sio->pages == ARRAY_SIZE(sio->bvec) || !plug) {
455		swap_read_unplug(sio);
456		sio = NULL;
457	}
458	if (plug)
459		*plug = sio;
460}
461
462static void swap_read_folio_bdev_sync(struct folio *folio,
463		struct swap_info_struct *sis)
464{
465	struct bio_vec bv;
466	struct bio bio;
467
468	bio_init(&bio, sis->bdev, &bv, 1, REQ_OP_READ);
469	bio.bi_iter.bi_sector = swap_folio_sector(folio);
470	bio_add_folio_nofail(&bio, folio, folio_size(folio), 0);
471	/*
472	 * Keep this task valid during swap readpage because the oom killer may
473	 * attempt to access it in the page fault retry time check.
474	 */
475	get_task_struct(current);
476	count_vm_event(PSWPIN);
477	submit_bio_wait(&bio);
478	__end_swap_bio_read(&bio);
479	put_task_struct(current);
480}
481
482static void swap_read_folio_bdev_async(struct folio *folio,
483		struct swap_info_struct *sis)
484{
485	struct bio *bio;
486
487	bio = bio_alloc(sis->bdev, 1, REQ_OP_READ, GFP_KERNEL);
488	bio->bi_iter.bi_sector = swap_folio_sector(folio);
489	bio->bi_end_io = end_swap_bio_read;
490	bio_add_folio_nofail(bio, folio, folio_size(folio), 0);
491	count_vm_event(PSWPIN);
492	submit_bio(bio);
493}
494
495void swap_read_folio(struct folio *folio, bool synchronous,
496		struct swap_iocb **plug)
497{
498	struct swap_info_struct *sis = swp_swap_info(folio->swap);
499	bool workingset = folio_test_workingset(folio);
500	unsigned long pflags;
501	bool in_thrashing;
502
503	VM_BUG_ON_FOLIO(!folio_test_swapcache(folio) && !synchronous, folio);
504	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
505	VM_BUG_ON_FOLIO(folio_test_uptodate(folio), folio);
506
507	/*
508	 * Count submission time as memory stall and delay. When the device
509	 * is congested, or the submitting cgroup IO-throttled, submission
510	 * can be a significant part of overall IO time.
511	 */
512	if (workingset) {
513		delayacct_thrashing_start(&in_thrashing);
514		psi_memstall_enter(&pflags);
515	}
516	delayacct_swapin_start();
517
518	if (zswap_load(folio)) {
519		folio_mark_uptodate(folio);
520		folio_unlock(folio);
521	} else if (data_race(sis->flags & SWP_FS_OPS)) {
522		swap_read_folio_fs(folio, plug);
523	} else if (synchronous || (sis->flags & SWP_SYNCHRONOUS_IO)) {
524		swap_read_folio_bdev_sync(folio, sis);
525	} else {
526		swap_read_folio_bdev_async(folio, sis);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
527	}
 
 
528
 
529	if (workingset) {
530		delayacct_thrashing_end(&in_thrashing);
531		psi_memstall_leave(&pflags);
532	}
533	delayacct_swapin_end();
 
534}
535
536void __swap_read_unplug(struct swap_iocb *sio)
537{
538	struct iov_iter from;
539	struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
540	int ret;
541
542	iov_iter_bvec(&from, ITER_DEST, sio->bvec, sio->pages, sio->len);
543	ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
544	if (ret != -EIOCBQUEUED)
545		sio_read_complete(&sio->iocb, ret);
546}