Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Memory merging support.
4 *
5 * This code enables dynamic sharing of identical pages found in different
6 * memory areas, even if they are not shared by fork()
7 *
8 * Copyright (C) 2008-2009 Red Hat, Inc.
9 * Authors:
10 * Izik Eidus
11 * Andrea Arcangeli
12 * Chris Wright
13 * Hugh Dickins
14 */
15
16#include <linux/errno.h>
17#include <linux/mm.h>
18#include <linux/mm_inline.h>
19#include <linux/fs.h>
20#include <linux/mman.h>
21#include <linux/sched.h>
22#include <linux/sched/mm.h>
23#include <linux/sched/cputime.h>
24#include <linux/rwsem.h>
25#include <linux/pagemap.h>
26#include <linux/rmap.h>
27#include <linux/spinlock.h>
28#include <linux/xxhash.h>
29#include <linux/delay.h>
30#include <linux/kthread.h>
31#include <linux/wait.h>
32#include <linux/slab.h>
33#include <linux/rbtree.h>
34#include <linux/memory.h>
35#include <linux/mmu_notifier.h>
36#include <linux/swap.h>
37#include <linux/ksm.h>
38#include <linux/hashtable.h>
39#include <linux/freezer.h>
40#include <linux/oom.h>
41#include <linux/numa.h>
42#include <linux/pagewalk.h>
43
44#include <asm/tlbflush.h>
45#include "internal.h"
46#include "mm_slot.h"
47
48#define CREATE_TRACE_POINTS
49#include <trace/events/ksm.h>
50
51#ifdef CONFIG_NUMA
52#define NUMA(x) (x)
53#define DO_NUMA(x) do { (x); } while (0)
54#else
55#define NUMA(x) (0)
56#define DO_NUMA(x) do { } while (0)
57#endif
58
59typedef u8 rmap_age_t;
60
61/**
62 * DOC: Overview
63 *
64 * A few notes about the KSM scanning process,
65 * to make it easier to understand the data structures below:
66 *
67 * In order to reduce excessive scanning, KSM sorts the memory pages by their
68 * contents into a data structure that holds pointers to the pages' locations.
69 *
70 * Since the contents of the pages may change at any moment, KSM cannot just
71 * insert the pages into a normal sorted tree and expect it to find anything.
72 * Therefore KSM uses two data structures - the stable and the unstable tree.
73 *
74 * The stable tree holds pointers to all the merged pages (ksm pages), sorted
75 * by their contents. Because each such page is write-protected, searching on
76 * this tree is fully assured to be working (except when pages are unmapped),
77 * and therefore this tree is called the stable tree.
78 *
79 * The stable tree node includes information required for reverse
80 * mapping from a KSM page to virtual addresses that map this page.
81 *
82 * In order to avoid large latencies of the rmap walks on KSM pages,
83 * KSM maintains two types of nodes in the stable tree:
84 *
85 * * the regular nodes that keep the reverse mapping structures in a
86 * linked list
87 * * the "chains" that link nodes ("dups") that represent the same
88 * write protected memory content, but each "dup" corresponds to a
89 * different KSM page copy of that content
90 *
91 * Internally, the regular nodes, "dups" and "chains" are represented
92 * using the same struct ksm_stable_node structure.
93 *
94 * In addition to the stable tree, KSM uses a second data structure called the
95 * unstable tree: this tree holds pointers to pages which have been found to
96 * be "unchanged for a period of time". The unstable tree sorts these pages
97 * by their contents, but since they are not write-protected, KSM cannot rely
98 * upon the unstable tree to work correctly - the unstable tree is liable to
99 * be corrupted as its contents are modified, and so it is called unstable.
100 *
101 * KSM solves this problem by several techniques:
102 *
103 * 1) The unstable tree is flushed every time KSM completes scanning all
104 * memory areas, and then the tree is rebuilt again from the beginning.
105 * 2) KSM will only insert into the unstable tree, pages whose hash value
106 * has not changed since the previous scan of all memory areas.
107 * 3) The unstable tree is a RedBlack Tree - so its balancing is based on the
108 * colors of the nodes and not on their contents, assuring that even when
109 * the tree gets "corrupted" it won't get out of balance, so scanning time
110 * remains the same (also, searching and inserting nodes in an rbtree uses
111 * the same algorithm, so we have no overhead when we flush and rebuild).
112 * 4) KSM never flushes the stable tree, which means that even if it were to
113 * take 10 attempts to find a page in the unstable tree, once it is found,
114 * it is secured in the stable tree. (When we scan a new page, we first
115 * compare it against the stable tree, and then against the unstable tree.)
116 *
117 * If the merge_across_nodes tunable is unset, then KSM maintains multiple
118 * stable trees and multiple unstable trees: one of each for each NUMA node.
119 */
120
121/**
122 * struct ksm_mm_slot - ksm information per mm that is being scanned
123 * @slot: hash lookup from mm to mm_slot
124 * @rmap_list: head for this mm_slot's singly-linked list of rmap_items
125 */
126struct ksm_mm_slot {
127 struct mm_slot slot;
128 struct ksm_rmap_item *rmap_list;
129};
130
131/**
132 * struct ksm_scan - cursor for scanning
133 * @mm_slot: the current mm_slot we are scanning
134 * @address: the next address inside that to be scanned
135 * @rmap_list: link to the next rmap to be scanned in the rmap_list
136 * @seqnr: count of completed full scans (needed when removing unstable node)
137 *
138 * There is only the one ksm_scan instance of this cursor structure.
139 */
140struct ksm_scan {
141 struct ksm_mm_slot *mm_slot;
142 unsigned long address;
143 struct ksm_rmap_item **rmap_list;
144 unsigned long seqnr;
145};
146
147/**
148 * struct ksm_stable_node - node of the stable rbtree
149 * @node: rb node of this ksm page in the stable tree
150 * @head: (overlaying parent) &migrate_nodes indicates temporarily on that list
151 * @hlist_dup: linked into the stable_node->hlist with a stable_node chain
152 * @list: linked into migrate_nodes, pending placement in the proper node tree
153 * @hlist: hlist head of rmap_items using this ksm page
154 * @kpfn: page frame number of this ksm page (perhaps temporarily on wrong nid)
155 * @chain_prune_time: time of the last full garbage collection
156 * @rmap_hlist_len: number of rmap_item entries in hlist or STABLE_NODE_CHAIN
157 * @nid: NUMA node id of stable tree in which linked (may not match kpfn)
158 */
159struct ksm_stable_node {
160 union {
161 struct rb_node node; /* when node of stable tree */
162 struct { /* when listed for migration */
163 struct list_head *head;
164 struct {
165 struct hlist_node hlist_dup;
166 struct list_head list;
167 };
168 };
169 };
170 struct hlist_head hlist;
171 union {
172 unsigned long kpfn;
173 unsigned long chain_prune_time;
174 };
175 /*
176 * STABLE_NODE_CHAIN can be any negative number in
177 * rmap_hlist_len negative range, but better not -1 to be able
178 * to reliably detect underflows.
179 */
180#define STABLE_NODE_CHAIN -1024
181 int rmap_hlist_len;
182#ifdef CONFIG_NUMA
183 int nid;
184#endif
185};
186
187/**
188 * struct ksm_rmap_item - reverse mapping item for virtual addresses
189 * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list
190 * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree
191 * @nid: NUMA node id of unstable tree in which linked (may not match page)
192 * @mm: the memory structure this rmap_item is pointing into
193 * @address: the virtual address this rmap_item tracks (+ flags in low bits)
194 * @oldchecksum: previous checksum of the page at that virtual address
195 * @node: rb node of this rmap_item in the unstable tree
196 * @head: pointer to stable_node heading this list in the stable tree
197 * @hlist: link into hlist of rmap_items hanging off that stable_node
198 * @age: number of scan iterations since creation
199 * @remaining_skips: how many scans to skip
200 */
201struct ksm_rmap_item {
202 struct ksm_rmap_item *rmap_list;
203 union {
204 struct anon_vma *anon_vma; /* when stable */
205#ifdef CONFIG_NUMA
206 int nid; /* when node of unstable tree */
207#endif
208 };
209 struct mm_struct *mm;
210 unsigned long address; /* + low bits used for flags below */
211 unsigned int oldchecksum; /* when unstable */
212 rmap_age_t age;
213 rmap_age_t remaining_skips;
214 union {
215 struct rb_node node; /* when node of unstable tree */
216 struct { /* when listed from stable tree */
217 struct ksm_stable_node *head;
218 struct hlist_node hlist;
219 };
220 };
221};
222
223#define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */
224#define UNSTABLE_FLAG 0x100 /* is a node of the unstable tree */
225#define STABLE_FLAG 0x200 /* is listed from the stable tree */
226
227/* The stable and unstable tree heads */
228static struct rb_root one_stable_tree[1] = { RB_ROOT };
229static struct rb_root one_unstable_tree[1] = { RB_ROOT };
230static struct rb_root *root_stable_tree = one_stable_tree;
231static struct rb_root *root_unstable_tree = one_unstable_tree;
232
233/* Recently migrated nodes of stable tree, pending proper placement */
234static LIST_HEAD(migrate_nodes);
235#define STABLE_NODE_DUP_HEAD ((struct list_head *)&migrate_nodes.prev)
236
237#define MM_SLOTS_HASH_BITS 10
238static DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
239
240static struct ksm_mm_slot ksm_mm_head = {
241 .slot.mm_node = LIST_HEAD_INIT(ksm_mm_head.slot.mm_node),
242};
243static struct ksm_scan ksm_scan = {
244 .mm_slot = &ksm_mm_head,
245};
246
247static struct kmem_cache *rmap_item_cache;
248static struct kmem_cache *stable_node_cache;
249static struct kmem_cache *mm_slot_cache;
250
251/* Default number of pages to scan per batch */
252#define DEFAULT_PAGES_TO_SCAN 100
253
254/* The number of pages scanned */
255static unsigned long ksm_pages_scanned;
256
257/* The number of nodes in the stable tree */
258static unsigned long ksm_pages_shared;
259
260/* The number of page slots additionally sharing those nodes */
261static unsigned long ksm_pages_sharing;
262
263/* The number of nodes in the unstable tree */
264static unsigned long ksm_pages_unshared;
265
266/* The number of rmap_items in use: to calculate pages_volatile */
267static unsigned long ksm_rmap_items;
268
269/* The number of stable_node chains */
270static unsigned long ksm_stable_node_chains;
271
272/* The number of stable_node dups linked to the stable_node chains */
273static unsigned long ksm_stable_node_dups;
274
275/* Delay in pruning stale stable_node_dups in the stable_node_chains */
276static unsigned int ksm_stable_node_chains_prune_millisecs = 2000;
277
278/* Maximum number of page slots sharing a stable node */
279static int ksm_max_page_sharing = 256;
280
281/* Number of pages ksmd should scan in one batch */
282static unsigned int ksm_thread_pages_to_scan = DEFAULT_PAGES_TO_SCAN;
283
284/* Milliseconds ksmd should sleep between batches */
285static unsigned int ksm_thread_sleep_millisecs = 20;
286
287/* Checksum of an empty (zeroed) page */
288static unsigned int zero_checksum __read_mostly;
289
290/* Whether to merge empty (zeroed) pages with actual zero pages */
291static bool ksm_use_zero_pages __read_mostly;
292
293/* Skip pages that couldn't be de-duplicated previously */
294/* Default to true at least temporarily, for testing */
295static bool ksm_smart_scan = true;
296
297/* The number of zero pages which is placed by KSM */
298atomic_long_t ksm_zero_pages = ATOMIC_LONG_INIT(0);
299
300/* The number of pages that have been skipped due to "smart scanning" */
301static unsigned long ksm_pages_skipped;
302
303/* Don't scan more than max pages per batch. */
304static unsigned long ksm_advisor_max_pages_to_scan = 30000;
305
306/* Min CPU for scanning pages per scan */
307#define KSM_ADVISOR_MIN_CPU 10
308
309/* Max CPU for scanning pages per scan */
310static unsigned int ksm_advisor_max_cpu = 70;
311
312/* Target scan time in seconds to analyze all KSM candidate pages. */
313static unsigned long ksm_advisor_target_scan_time = 200;
314
315/* Exponentially weighted moving average. */
316#define EWMA_WEIGHT 30
317
318/**
319 * struct advisor_ctx - metadata for KSM advisor
320 * @start_scan: start time of the current scan
321 * @scan_time: scan time of previous scan
322 * @change: change in percent to pages_to_scan parameter
323 * @cpu_time: cpu time consumed by the ksmd thread in the previous scan
324 */
325struct advisor_ctx {
326 ktime_t start_scan;
327 unsigned long scan_time;
328 unsigned long change;
329 unsigned long long cpu_time;
330};
331static struct advisor_ctx advisor_ctx;
332
333/* Define different advisor's */
334enum ksm_advisor_type {
335 KSM_ADVISOR_NONE,
336 KSM_ADVISOR_SCAN_TIME,
337};
338static enum ksm_advisor_type ksm_advisor;
339
340#ifdef CONFIG_SYSFS
341/*
342 * Only called through the sysfs control interface:
343 */
344
345/* At least scan this many pages per batch. */
346static unsigned long ksm_advisor_min_pages_to_scan = 500;
347
348static void set_advisor_defaults(void)
349{
350 if (ksm_advisor == KSM_ADVISOR_NONE) {
351 ksm_thread_pages_to_scan = DEFAULT_PAGES_TO_SCAN;
352 } else if (ksm_advisor == KSM_ADVISOR_SCAN_TIME) {
353 advisor_ctx = (const struct advisor_ctx){ 0 };
354 ksm_thread_pages_to_scan = ksm_advisor_min_pages_to_scan;
355 }
356}
357#endif /* CONFIG_SYSFS */
358
359static inline void advisor_start_scan(void)
360{
361 if (ksm_advisor == KSM_ADVISOR_SCAN_TIME)
362 advisor_ctx.start_scan = ktime_get();
363}
364
365/*
366 * Use previous scan time if available, otherwise use current scan time as an
367 * approximation for the previous scan time.
368 */
369static inline unsigned long prev_scan_time(struct advisor_ctx *ctx,
370 unsigned long scan_time)
371{
372 return ctx->scan_time ? ctx->scan_time : scan_time;
373}
374
375/* Calculate exponential weighted moving average */
376static unsigned long ewma(unsigned long prev, unsigned long curr)
377{
378 return ((100 - EWMA_WEIGHT) * prev + EWMA_WEIGHT * curr) / 100;
379}
380
381/*
382 * The scan time advisor is based on the current scan rate and the target
383 * scan rate.
384 *
385 * new_pages_to_scan = pages_to_scan * (scan_time / target_scan_time)
386 *
387 * To avoid perturbations it calculates a change factor of previous changes.
388 * A new change factor is calculated for each iteration and it uses an
389 * exponentially weighted moving average. The new pages_to_scan value is
390 * multiplied with that change factor:
391 *
392 * new_pages_to_scan *= change facor
393 *
394 * The new_pages_to_scan value is limited by the cpu min and max values. It
395 * calculates the cpu percent for the last scan and calculates the new
396 * estimated cpu percent cost for the next scan. That value is capped by the
397 * cpu min and max setting.
398 *
399 * In addition the new pages_to_scan value is capped by the max and min
400 * limits.
401 */
402static void scan_time_advisor(void)
403{
404 unsigned int cpu_percent;
405 unsigned long cpu_time;
406 unsigned long cpu_time_diff;
407 unsigned long cpu_time_diff_ms;
408 unsigned long pages;
409 unsigned long per_page_cost;
410 unsigned long factor;
411 unsigned long change;
412 unsigned long last_scan_time;
413 unsigned long scan_time;
414
415 /* Convert scan time to seconds */
416 scan_time = div_s64(ktime_ms_delta(ktime_get(), advisor_ctx.start_scan),
417 MSEC_PER_SEC);
418 scan_time = scan_time ? scan_time : 1;
419
420 /* Calculate CPU consumption of ksmd background thread */
421 cpu_time = task_sched_runtime(current);
422 cpu_time_diff = cpu_time - advisor_ctx.cpu_time;
423 cpu_time_diff_ms = cpu_time_diff / 1000 / 1000;
424
425 cpu_percent = (cpu_time_diff_ms * 100) / (scan_time * 1000);
426 cpu_percent = cpu_percent ? cpu_percent : 1;
427 last_scan_time = prev_scan_time(&advisor_ctx, scan_time);
428
429 /* Calculate scan time as percentage of target scan time */
430 factor = ksm_advisor_target_scan_time * 100 / scan_time;
431 factor = factor ? factor : 1;
432
433 /*
434 * Calculate scan time as percentage of last scan time and use
435 * exponentially weighted average to smooth it
436 */
437 change = scan_time * 100 / last_scan_time;
438 change = change ? change : 1;
439 change = ewma(advisor_ctx.change, change);
440
441 /* Calculate new scan rate based on target scan rate. */
442 pages = ksm_thread_pages_to_scan * 100 / factor;
443 /* Update pages_to_scan by weighted change percentage. */
444 pages = pages * change / 100;
445
446 /* Cap new pages_to_scan value */
447 per_page_cost = ksm_thread_pages_to_scan / cpu_percent;
448 per_page_cost = per_page_cost ? per_page_cost : 1;
449
450 pages = min(pages, per_page_cost * ksm_advisor_max_cpu);
451 pages = max(pages, per_page_cost * KSM_ADVISOR_MIN_CPU);
452 pages = min(pages, ksm_advisor_max_pages_to_scan);
453
454 /* Update advisor context */
455 advisor_ctx.change = change;
456 advisor_ctx.scan_time = scan_time;
457 advisor_ctx.cpu_time = cpu_time;
458
459 ksm_thread_pages_to_scan = pages;
460 trace_ksm_advisor(scan_time, pages, cpu_percent);
461}
462
463static void advisor_stop_scan(void)
464{
465 if (ksm_advisor == KSM_ADVISOR_SCAN_TIME)
466 scan_time_advisor();
467}
468
469#ifdef CONFIG_NUMA
470/* Zeroed when merging across nodes is not allowed */
471static unsigned int ksm_merge_across_nodes = 1;
472static int ksm_nr_node_ids = 1;
473#else
474#define ksm_merge_across_nodes 1U
475#define ksm_nr_node_ids 1
476#endif
477
478#define KSM_RUN_STOP 0
479#define KSM_RUN_MERGE 1
480#define KSM_RUN_UNMERGE 2
481#define KSM_RUN_OFFLINE 4
482static unsigned long ksm_run = KSM_RUN_STOP;
483static void wait_while_offlining(void);
484
485static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait);
486static DECLARE_WAIT_QUEUE_HEAD(ksm_iter_wait);
487static DEFINE_MUTEX(ksm_thread_mutex);
488static DEFINE_SPINLOCK(ksm_mmlist_lock);
489
490static int __init ksm_slab_init(void)
491{
492 rmap_item_cache = KMEM_CACHE(ksm_rmap_item, 0);
493 if (!rmap_item_cache)
494 goto out;
495
496 stable_node_cache = KMEM_CACHE(ksm_stable_node, 0);
497 if (!stable_node_cache)
498 goto out_free1;
499
500 mm_slot_cache = KMEM_CACHE(ksm_mm_slot, 0);
501 if (!mm_slot_cache)
502 goto out_free2;
503
504 return 0;
505
506out_free2:
507 kmem_cache_destroy(stable_node_cache);
508out_free1:
509 kmem_cache_destroy(rmap_item_cache);
510out:
511 return -ENOMEM;
512}
513
514static void __init ksm_slab_free(void)
515{
516 kmem_cache_destroy(mm_slot_cache);
517 kmem_cache_destroy(stable_node_cache);
518 kmem_cache_destroy(rmap_item_cache);
519 mm_slot_cache = NULL;
520}
521
522static __always_inline bool is_stable_node_chain(struct ksm_stable_node *chain)
523{
524 return chain->rmap_hlist_len == STABLE_NODE_CHAIN;
525}
526
527static __always_inline bool is_stable_node_dup(struct ksm_stable_node *dup)
528{
529 return dup->head == STABLE_NODE_DUP_HEAD;
530}
531
532static inline void stable_node_chain_add_dup(struct ksm_stable_node *dup,
533 struct ksm_stable_node *chain)
534{
535 VM_BUG_ON(is_stable_node_dup(dup));
536 dup->head = STABLE_NODE_DUP_HEAD;
537 VM_BUG_ON(!is_stable_node_chain(chain));
538 hlist_add_head(&dup->hlist_dup, &chain->hlist);
539 ksm_stable_node_dups++;
540}
541
542static inline void __stable_node_dup_del(struct ksm_stable_node *dup)
543{
544 VM_BUG_ON(!is_stable_node_dup(dup));
545 hlist_del(&dup->hlist_dup);
546 ksm_stable_node_dups--;
547}
548
549static inline void stable_node_dup_del(struct ksm_stable_node *dup)
550{
551 VM_BUG_ON(is_stable_node_chain(dup));
552 if (is_stable_node_dup(dup))
553 __stable_node_dup_del(dup);
554 else
555 rb_erase(&dup->node, root_stable_tree + NUMA(dup->nid));
556#ifdef CONFIG_DEBUG_VM
557 dup->head = NULL;
558#endif
559}
560
561static inline struct ksm_rmap_item *alloc_rmap_item(void)
562{
563 struct ksm_rmap_item *rmap_item;
564
565 rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL |
566 __GFP_NORETRY | __GFP_NOWARN);
567 if (rmap_item)
568 ksm_rmap_items++;
569 return rmap_item;
570}
571
572static inline void free_rmap_item(struct ksm_rmap_item *rmap_item)
573{
574 ksm_rmap_items--;
575 rmap_item->mm->ksm_rmap_items--;
576 rmap_item->mm = NULL; /* debug safety */
577 kmem_cache_free(rmap_item_cache, rmap_item);
578}
579
580static inline struct ksm_stable_node *alloc_stable_node(void)
581{
582 /*
583 * The allocation can take too long with GFP_KERNEL when memory is under
584 * pressure, which may lead to hung task warnings. Adding __GFP_HIGH
585 * grants access to memory reserves, helping to avoid this problem.
586 */
587 return kmem_cache_alloc(stable_node_cache, GFP_KERNEL | __GFP_HIGH);
588}
589
590static inline void free_stable_node(struct ksm_stable_node *stable_node)
591{
592 VM_BUG_ON(stable_node->rmap_hlist_len &&
593 !is_stable_node_chain(stable_node));
594 kmem_cache_free(stable_node_cache, stable_node);
595}
596
597/*
598 * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's
599 * page tables after it has passed through ksm_exit() - which, if necessary,
600 * takes mmap_lock briefly to serialize against them. ksm_exit() does not set
601 * a special flag: they can just back out as soon as mm_users goes to zero.
602 * ksm_test_exit() is used throughout to make this test for exit: in some
603 * places for correctness, in some places just to avoid unnecessary work.
604 */
605static inline bool ksm_test_exit(struct mm_struct *mm)
606{
607 return atomic_read(&mm->mm_users) == 0;
608}
609
610/*
611 * We use break_ksm to break COW on a ksm page by triggering unsharing,
612 * such that the ksm page will get replaced by an exclusive anonymous page.
613 *
614 * We take great care only to touch a ksm page, in a VM_MERGEABLE vma,
615 * in case the application has unmapped and remapped mm,addr meanwhile.
616 * Could a ksm page appear anywhere else? Actually yes, in a VM_PFNMAP
617 * mmap of /dev/mem, where we would not want to touch it.
618 *
619 * FAULT_FLAG_REMOTE/FOLL_REMOTE are because we do this outside the context
620 * of the process that owns 'vma'. We also do not want to enforce
621 * protection keys here anyway.
622 */
623static int break_ksm(struct vm_area_struct *vma, unsigned long addr, bool lock_vma)
624{
625 vm_fault_t ret = 0;
626
627 if (lock_vma)
628 vma_start_write(vma);
629
630 do {
631 bool ksm_page = false;
632 struct folio_walk fw;
633 struct folio *folio;
634
635 cond_resched();
636 folio = folio_walk_start(&fw, vma, addr,
637 FW_MIGRATION | FW_ZEROPAGE);
638 if (folio) {
639 /* Small folio implies FW_LEVEL_PTE. */
640 if (!folio_test_large(folio) &&
641 (folio_test_ksm(folio) || is_ksm_zero_pte(fw.pte)))
642 ksm_page = true;
643 folio_walk_end(&fw, vma);
644 }
645
646 if (!ksm_page)
647 return 0;
648 ret = handle_mm_fault(vma, addr,
649 FAULT_FLAG_UNSHARE | FAULT_FLAG_REMOTE,
650 NULL);
651 } while (!(ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM)));
652 /*
653 * We must loop until we no longer find a KSM page because
654 * handle_mm_fault() may back out if there's any difficulty e.g. if
655 * pte accessed bit gets updated concurrently.
656 *
657 * VM_FAULT_SIGBUS could occur if we race with truncation of the
658 * backing file, which also invalidates anonymous pages: that's
659 * okay, that truncation will have unmapped the KSM page for us.
660 *
661 * VM_FAULT_OOM: at the time of writing (late July 2009), setting
662 * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the
663 * current task has TIF_MEMDIE set, and will be OOM killed on return
664 * to user; and ksmd, having no mm, would never be chosen for that.
665 *
666 * But if the mm is in a limited mem_cgroup, then the fault may fail
667 * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and
668 * even ksmd can fail in this way - though it's usually breaking ksm
669 * just to undo a merge it made a moment before, so unlikely to oom.
670 *
671 * That's a pity: we might therefore have more kernel pages allocated
672 * than we're counting as nodes in the stable tree; but ksm_do_scan
673 * will retry to break_cow on each pass, so should recover the page
674 * in due course. The important thing is to not let VM_MERGEABLE
675 * be cleared while any such pages might remain in the area.
676 */
677 return (ret & VM_FAULT_OOM) ? -ENOMEM : 0;
678}
679
680static bool vma_ksm_compatible(struct vm_area_struct *vma)
681{
682 if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE | VM_PFNMAP |
683 VM_IO | VM_DONTEXPAND | VM_HUGETLB |
684 VM_MIXEDMAP| VM_DROPPABLE))
685 return false; /* just ignore the advice */
686
687 if (vma_is_dax(vma))
688 return false;
689
690#ifdef VM_SAO
691 if (vma->vm_flags & VM_SAO)
692 return false;
693#endif
694#ifdef VM_SPARC_ADI
695 if (vma->vm_flags & VM_SPARC_ADI)
696 return false;
697#endif
698
699 return true;
700}
701
702static struct vm_area_struct *find_mergeable_vma(struct mm_struct *mm,
703 unsigned long addr)
704{
705 struct vm_area_struct *vma;
706 if (ksm_test_exit(mm))
707 return NULL;
708 vma = vma_lookup(mm, addr);
709 if (!vma || !(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
710 return NULL;
711 return vma;
712}
713
714static void break_cow(struct ksm_rmap_item *rmap_item)
715{
716 struct mm_struct *mm = rmap_item->mm;
717 unsigned long addr = rmap_item->address;
718 struct vm_area_struct *vma;
719
720 /*
721 * It is not an accident that whenever we want to break COW
722 * to undo, we also need to drop a reference to the anon_vma.
723 */
724 put_anon_vma(rmap_item->anon_vma);
725
726 mmap_read_lock(mm);
727 vma = find_mergeable_vma(mm, addr);
728 if (vma)
729 break_ksm(vma, addr, false);
730 mmap_read_unlock(mm);
731}
732
733static struct page *get_mergeable_page(struct ksm_rmap_item *rmap_item)
734{
735 struct mm_struct *mm = rmap_item->mm;
736 unsigned long addr = rmap_item->address;
737 struct vm_area_struct *vma;
738 struct page *page = NULL;
739 struct folio_walk fw;
740 struct folio *folio;
741
742 mmap_read_lock(mm);
743 vma = find_mergeable_vma(mm, addr);
744 if (!vma)
745 goto out;
746
747 folio = folio_walk_start(&fw, vma, addr, 0);
748 if (folio) {
749 if (!folio_is_zone_device(folio) &&
750 folio_test_anon(folio)) {
751 folio_get(folio);
752 page = fw.page;
753 }
754 folio_walk_end(&fw, vma);
755 }
756out:
757 if (page) {
758 flush_anon_page(vma, page, addr);
759 flush_dcache_page(page);
760 }
761 mmap_read_unlock(mm);
762 return page;
763}
764
765/*
766 * This helper is used for getting right index into array of tree roots.
767 * When merge_across_nodes knob is set to 1, there are only two rb-trees for
768 * stable and unstable pages from all nodes with roots in index 0. Otherwise,
769 * every node has its own stable and unstable tree.
770 */
771static inline int get_kpfn_nid(unsigned long kpfn)
772{
773 return ksm_merge_across_nodes ? 0 : NUMA(pfn_to_nid(kpfn));
774}
775
776static struct ksm_stable_node *alloc_stable_node_chain(struct ksm_stable_node *dup,
777 struct rb_root *root)
778{
779 struct ksm_stable_node *chain = alloc_stable_node();
780 VM_BUG_ON(is_stable_node_chain(dup));
781 if (likely(chain)) {
782 INIT_HLIST_HEAD(&chain->hlist);
783 chain->chain_prune_time = jiffies;
784 chain->rmap_hlist_len = STABLE_NODE_CHAIN;
785#if defined (CONFIG_DEBUG_VM) && defined(CONFIG_NUMA)
786 chain->nid = NUMA_NO_NODE; /* debug */
787#endif
788 ksm_stable_node_chains++;
789
790 /*
791 * Put the stable node chain in the first dimension of
792 * the stable tree and at the same time remove the old
793 * stable node.
794 */
795 rb_replace_node(&dup->node, &chain->node, root);
796
797 /*
798 * Move the old stable node to the second dimension
799 * queued in the hlist_dup. The invariant is that all
800 * dup stable_nodes in the chain->hlist point to pages
801 * that are write protected and have the exact same
802 * content.
803 */
804 stable_node_chain_add_dup(dup, chain);
805 }
806 return chain;
807}
808
809static inline void free_stable_node_chain(struct ksm_stable_node *chain,
810 struct rb_root *root)
811{
812 rb_erase(&chain->node, root);
813 free_stable_node(chain);
814 ksm_stable_node_chains--;
815}
816
817static void remove_node_from_stable_tree(struct ksm_stable_node *stable_node)
818{
819 struct ksm_rmap_item *rmap_item;
820
821 /* check it's not STABLE_NODE_CHAIN or negative */
822 BUG_ON(stable_node->rmap_hlist_len < 0);
823
824 hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
825 if (rmap_item->hlist.next) {
826 ksm_pages_sharing--;
827 trace_ksm_remove_rmap_item(stable_node->kpfn, rmap_item, rmap_item->mm);
828 } else {
829 ksm_pages_shared--;
830 }
831
832 rmap_item->mm->ksm_merging_pages--;
833
834 VM_BUG_ON(stable_node->rmap_hlist_len <= 0);
835 stable_node->rmap_hlist_len--;
836 put_anon_vma(rmap_item->anon_vma);
837 rmap_item->address &= PAGE_MASK;
838 cond_resched();
839 }
840
841 /*
842 * We need the second aligned pointer of the migrate_nodes
843 * list_head to stay clear from the rb_parent_color union
844 * (aligned and different than any node) and also different
845 * from &migrate_nodes. This will verify that future list.h changes
846 * don't break STABLE_NODE_DUP_HEAD. Only recent gcc can handle it.
847 */
848 BUILD_BUG_ON(STABLE_NODE_DUP_HEAD <= &migrate_nodes);
849 BUILD_BUG_ON(STABLE_NODE_DUP_HEAD >= &migrate_nodes + 1);
850
851 trace_ksm_remove_ksm_page(stable_node->kpfn);
852 if (stable_node->head == &migrate_nodes)
853 list_del(&stable_node->list);
854 else
855 stable_node_dup_del(stable_node);
856 free_stable_node(stable_node);
857}
858
859enum ksm_get_folio_flags {
860 KSM_GET_FOLIO_NOLOCK,
861 KSM_GET_FOLIO_LOCK,
862 KSM_GET_FOLIO_TRYLOCK
863};
864
865/*
866 * ksm_get_folio: checks if the page indicated by the stable node
867 * is still its ksm page, despite having held no reference to it.
868 * In which case we can trust the content of the page, and it
869 * returns the gotten page; but if the page has now been zapped,
870 * remove the stale node from the stable tree and return NULL.
871 * But beware, the stable node's page might be being migrated.
872 *
873 * You would expect the stable_node to hold a reference to the ksm page.
874 * But if it increments the page's count, swapping out has to wait for
875 * ksmd to come around again before it can free the page, which may take
876 * seconds or even minutes: much too unresponsive. So instead we use a
877 * "keyhole reference": access to the ksm page from the stable node peeps
878 * out through its keyhole to see if that page still holds the right key,
879 * pointing back to this stable node. This relies on freeing a PageAnon
880 * page to reset its page->mapping to NULL, and relies on no other use of
881 * a page to put something that might look like our key in page->mapping.
882 * is on its way to being freed; but it is an anomaly to bear in mind.
883 */
884static struct folio *ksm_get_folio(struct ksm_stable_node *stable_node,
885 enum ksm_get_folio_flags flags)
886{
887 struct folio *folio;
888 void *expected_mapping;
889 unsigned long kpfn;
890
891 expected_mapping = (void *)((unsigned long)stable_node |
892 PAGE_MAPPING_KSM);
893again:
894 kpfn = READ_ONCE(stable_node->kpfn); /* Address dependency. */
895 folio = pfn_folio(kpfn);
896 if (READ_ONCE(folio->mapping) != expected_mapping)
897 goto stale;
898
899 /*
900 * We cannot do anything with the page while its refcount is 0.
901 * Usually 0 means free, or tail of a higher-order page: in which
902 * case this node is no longer referenced, and should be freed;
903 * however, it might mean that the page is under page_ref_freeze().
904 * The __remove_mapping() case is easy, again the node is now stale;
905 * the same is in reuse_ksm_page() case; but if page is swapcache
906 * in folio_migrate_mapping(), it might still be our page,
907 * in which case it's essential to keep the node.
908 */
909 while (!folio_try_get(folio)) {
910 /*
911 * Another check for folio->mapping != expected_mapping
912 * would work here too. We have chosen to test the
913 * swapcache flag to optimize the common case, when the
914 * folio is or is about to be freed: the swapcache flag
915 * is cleared (under spin_lock_irq) in the ref_freeze
916 * section of __remove_mapping(); but anon folio->mapping
917 * is reset to NULL later, in free_pages_prepare().
918 */
919 if (!folio_test_swapcache(folio))
920 goto stale;
921 cpu_relax();
922 }
923
924 if (READ_ONCE(folio->mapping) != expected_mapping) {
925 folio_put(folio);
926 goto stale;
927 }
928
929 if (flags == KSM_GET_FOLIO_TRYLOCK) {
930 if (!folio_trylock(folio)) {
931 folio_put(folio);
932 return ERR_PTR(-EBUSY);
933 }
934 } else if (flags == KSM_GET_FOLIO_LOCK)
935 folio_lock(folio);
936
937 if (flags != KSM_GET_FOLIO_NOLOCK) {
938 if (READ_ONCE(folio->mapping) != expected_mapping) {
939 folio_unlock(folio);
940 folio_put(folio);
941 goto stale;
942 }
943 }
944 return folio;
945
946stale:
947 /*
948 * We come here from above when folio->mapping or the swapcache flag
949 * suggests that the node is stale; but it might be under migration.
950 * We need smp_rmb(), matching the smp_wmb() in folio_migrate_ksm(),
951 * before checking whether node->kpfn has been changed.
952 */
953 smp_rmb();
954 if (READ_ONCE(stable_node->kpfn) != kpfn)
955 goto again;
956 remove_node_from_stable_tree(stable_node);
957 return NULL;
958}
959
960/*
961 * Removing rmap_item from stable or unstable tree.
962 * This function will clean the information from the stable/unstable tree.
963 */
964static void remove_rmap_item_from_tree(struct ksm_rmap_item *rmap_item)
965{
966 if (rmap_item->address & STABLE_FLAG) {
967 struct ksm_stable_node *stable_node;
968 struct folio *folio;
969
970 stable_node = rmap_item->head;
971 folio = ksm_get_folio(stable_node, KSM_GET_FOLIO_LOCK);
972 if (!folio)
973 goto out;
974
975 hlist_del(&rmap_item->hlist);
976 folio_unlock(folio);
977 folio_put(folio);
978
979 if (!hlist_empty(&stable_node->hlist))
980 ksm_pages_sharing--;
981 else
982 ksm_pages_shared--;
983
984 rmap_item->mm->ksm_merging_pages--;
985
986 VM_BUG_ON(stable_node->rmap_hlist_len <= 0);
987 stable_node->rmap_hlist_len--;
988
989 put_anon_vma(rmap_item->anon_vma);
990 rmap_item->head = NULL;
991 rmap_item->address &= PAGE_MASK;
992
993 } else if (rmap_item->address & UNSTABLE_FLAG) {
994 unsigned char age;
995 /*
996 * Usually ksmd can and must skip the rb_erase, because
997 * root_unstable_tree was already reset to RB_ROOT.
998 * But be careful when an mm is exiting: do the rb_erase
999 * if this rmap_item was inserted by this scan, rather
1000 * than left over from before.
1001 */
1002 age = (unsigned char)(ksm_scan.seqnr - rmap_item->address);
1003 BUG_ON(age > 1);
1004 if (!age)
1005 rb_erase(&rmap_item->node,
1006 root_unstable_tree + NUMA(rmap_item->nid));
1007 ksm_pages_unshared--;
1008 rmap_item->address &= PAGE_MASK;
1009 }
1010out:
1011 cond_resched(); /* we're called from many long loops */
1012}
1013
1014static void remove_trailing_rmap_items(struct ksm_rmap_item **rmap_list)
1015{
1016 while (*rmap_list) {
1017 struct ksm_rmap_item *rmap_item = *rmap_list;
1018 *rmap_list = rmap_item->rmap_list;
1019 remove_rmap_item_from_tree(rmap_item);
1020 free_rmap_item(rmap_item);
1021 }
1022}
1023
1024/*
1025 * Though it's very tempting to unmerge rmap_items from stable tree rather
1026 * than check every pte of a given vma, the locking doesn't quite work for
1027 * that - an rmap_item is assigned to the stable tree after inserting ksm
1028 * page and upping mmap_lock. Nor does it fit with the way we skip dup'ing
1029 * rmap_items from parent to child at fork time (so as not to waste time
1030 * if exit comes before the next scan reaches it).
1031 *
1032 * Similarly, although we'd like to remove rmap_items (so updating counts
1033 * and freeing memory) when unmerging an area, it's easier to leave that
1034 * to the next pass of ksmd - consider, for example, how ksmd might be
1035 * in cmp_and_merge_page on one of the rmap_items we would be removing.
1036 */
1037static int unmerge_ksm_pages(struct vm_area_struct *vma,
1038 unsigned long start, unsigned long end, bool lock_vma)
1039{
1040 unsigned long addr;
1041 int err = 0;
1042
1043 for (addr = start; addr < end && !err; addr += PAGE_SIZE) {
1044 if (ksm_test_exit(vma->vm_mm))
1045 break;
1046 if (signal_pending(current))
1047 err = -ERESTARTSYS;
1048 else
1049 err = break_ksm(vma, addr, lock_vma);
1050 }
1051 return err;
1052}
1053
1054static inline
1055struct ksm_stable_node *folio_stable_node(const struct folio *folio)
1056{
1057 return folio_test_ksm(folio) ? folio_raw_mapping(folio) : NULL;
1058}
1059
1060static inline struct ksm_stable_node *page_stable_node(struct page *page)
1061{
1062 return folio_stable_node(page_folio(page));
1063}
1064
1065static inline void folio_set_stable_node(struct folio *folio,
1066 struct ksm_stable_node *stable_node)
1067{
1068 VM_WARN_ON_FOLIO(folio_test_anon(folio) && PageAnonExclusive(&folio->page), folio);
1069 folio->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM);
1070}
1071
1072#ifdef CONFIG_SYSFS
1073/*
1074 * Only called through the sysfs control interface:
1075 */
1076static int remove_stable_node(struct ksm_stable_node *stable_node)
1077{
1078 struct folio *folio;
1079 int err;
1080
1081 folio = ksm_get_folio(stable_node, KSM_GET_FOLIO_LOCK);
1082 if (!folio) {
1083 /*
1084 * ksm_get_folio did remove_node_from_stable_tree itself.
1085 */
1086 return 0;
1087 }
1088
1089 /*
1090 * Page could be still mapped if this races with __mmput() running in
1091 * between ksm_exit() and exit_mmap(). Just refuse to let
1092 * merge_across_nodes/max_page_sharing be switched.
1093 */
1094 err = -EBUSY;
1095 if (!folio_mapped(folio)) {
1096 /*
1097 * The stable node did not yet appear stale to ksm_get_folio(),
1098 * since that allows for an unmapped ksm folio to be recognized
1099 * right up until it is freed; but the node is safe to remove.
1100 * This folio might be in an LRU cache waiting to be freed,
1101 * or it might be in the swapcache (perhaps under writeback),
1102 * or it might have been removed from swapcache a moment ago.
1103 */
1104 folio_set_stable_node(folio, NULL);
1105 remove_node_from_stable_tree(stable_node);
1106 err = 0;
1107 }
1108
1109 folio_unlock(folio);
1110 folio_put(folio);
1111 return err;
1112}
1113
1114static int remove_stable_node_chain(struct ksm_stable_node *stable_node,
1115 struct rb_root *root)
1116{
1117 struct ksm_stable_node *dup;
1118 struct hlist_node *hlist_safe;
1119
1120 if (!is_stable_node_chain(stable_node)) {
1121 VM_BUG_ON(is_stable_node_dup(stable_node));
1122 if (remove_stable_node(stable_node))
1123 return true;
1124 else
1125 return false;
1126 }
1127
1128 hlist_for_each_entry_safe(dup, hlist_safe,
1129 &stable_node->hlist, hlist_dup) {
1130 VM_BUG_ON(!is_stable_node_dup(dup));
1131 if (remove_stable_node(dup))
1132 return true;
1133 }
1134 BUG_ON(!hlist_empty(&stable_node->hlist));
1135 free_stable_node_chain(stable_node, root);
1136 return false;
1137}
1138
1139static int remove_all_stable_nodes(void)
1140{
1141 struct ksm_stable_node *stable_node, *next;
1142 int nid;
1143 int err = 0;
1144
1145 for (nid = 0; nid < ksm_nr_node_ids; nid++) {
1146 while (root_stable_tree[nid].rb_node) {
1147 stable_node = rb_entry(root_stable_tree[nid].rb_node,
1148 struct ksm_stable_node, node);
1149 if (remove_stable_node_chain(stable_node,
1150 root_stable_tree + nid)) {
1151 err = -EBUSY;
1152 break; /* proceed to next nid */
1153 }
1154 cond_resched();
1155 }
1156 }
1157 list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) {
1158 if (remove_stable_node(stable_node))
1159 err = -EBUSY;
1160 cond_resched();
1161 }
1162 return err;
1163}
1164
1165static int unmerge_and_remove_all_rmap_items(void)
1166{
1167 struct ksm_mm_slot *mm_slot;
1168 struct mm_slot *slot;
1169 struct mm_struct *mm;
1170 struct vm_area_struct *vma;
1171 int err = 0;
1172
1173 spin_lock(&ksm_mmlist_lock);
1174 slot = list_entry(ksm_mm_head.slot.mm_node.next,
1175 struct mm_slot, mm_node);
1176 ksm_scan.mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot);
1177 spin_unlock(&ksm_mmlist_lock);
1178
1179 for (mm_slot = ksm_scan.mm_slot; mm_slot != &ksm_mm_head;
1180 mm_slot = ksm_scan.mm_slot) {
1181 VMA_ITERATOR(vmi, mm_slot->slot.mm, 0);
1182
1183 mm = mm_slot->slot.mm;
1184 mmap_read_lock(mm);
1185
1186 /*
1187 * Exit right away if mm is exiting to avoid lockdep issue in
1188 * the maple tree
1189 */
1190 if (ksm_test_exit(mm))
1191 goto mm_exiting;
1192
1193 for_each_vma(vmi, vma) {
1194 if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
1195 continue;
1196 err = unmerge_ksm_pages(vma,
1197 vma->vm_start, vma->vm_end, false);
1198 if (err)
1199 goto error;
1200 }
1201
1202mm_exiting:
1203 remove_trailing_rmap_items(&mm_slot->rmap_list);
1204 mmap_read_unlock(mm);
1205
1206 spin_lock(&ksm_mmlist_lock);
1207 slot = list_entry(mm_slot->slot.mm_node.next,
1208 struct mm_slot, mm_node);
1209 ksm_scan.mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot);
1210 if (ksm_test_exit(mm)) {
1211 hash_del(&mm_slot->slot.hash);
1212 list_del(&mm_slot->slot.mm_node);
1213 spin_unlock(&ksm_mmlist_lock);
1214
1215 mm_slot_free(mm_slot_cache, mm_slot);
1216 clear_bit(MMF_VM_MERGEABLE, &mm->flags);
1217 clear_bit(MMF_VM_MERGE_ANY, &mm->flags);
1218 mmdrop(mm);
1219 } else
1220 spin_unlock(&ksm_mmlist_lock);
1221 }
1222
1223 /* Clean up stable nodes, but don't worry if some are still busy */
1224 remove_all_stable_nodes();
1225 ksm_scan.seqnr = 0;
1226 return 0;
1227
1228error:
1229 mmap_read_unlock(mm);
1230 spin_lock(&ksm_mmlist_lock);
1231 ksm_scan.mm_slot = &ksm_mm_head;
1232 spin_unlock(&ksm_mmlist_lock);
1233 return err;
1234}
1235#endif /* CONFIG_SYSFS */
1236
1237static u32 calc_checksum(struct page *page)
1238{
1239 u32 checksum;
1240 void *addr = kmap_local_page(page);
1241 checksum = xxhash(addr, PAGE_SIZE, 0);
1242 kunmap_local(addr);
1243 return checksum;
1244}
1245
1246static int write_protect_page(struct vm_area_struct *vma, struct folio *folio,
1247 pte_t *orig_pte)
1248{
1249 struct mm_struct *mm = vma->vm_mm;
1250 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, 0, 0);
1251 int swapped;
1252 int err = -EFAULT;
1253 struct mmu_notifier_range range;
1254 bool anon_exclusive;
1255 pte_t entry;
1256
1257 if (WARN_ON_ONCE(folio_test_large(folio)))
1258 return err;
1259
1260 pvmw.address = page_address_in_vma(folio, folio_page(folio, 0), vma);
1261 if (pvmw.address == -EFAULT)
1262 goto out;
1263
1264 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, pvmw.address,
1265 pvmw.address + PAGE_SIZE);
1266 mmu_notifier_invalidate_range_start(&range);
1267
1268 if (!page_vma_mapped_walk(&pvmw))
1269 goto out_mn;
1270 if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?"))
1271 goto out_unlock;
1272
1273 anon_exclusive = PageAnonExclusive(&folio->page);
1274 entry = ptep_get(pvmw.pte);
1275 if (pte_write(entry) || pte_dirty(entry) ||
1276 anon_exclusive || mm_tlb_flush_pending(mm)) {
1277 swapped = folio_test_swapcache(folio);
1278 flush_cache_page(vma, pvmw.address, folio_pfn(folio));
1279 /*
1280 * Ok this is tricky, when get_user_pages_fast() run it doesn't
1281 * take any lock, therefore the check that we are going to make
1282 * with the pagecount against the mapcount is racy and
1283 * O_DIRECT can happen right after the check.
1284 * So we clear the pte and flush the tlb before the check
1285 * this assure us that no O_DIRECT can happen after the check
1286 * or in the middle of the check.
1287 *
1288 * No need to notify as we are downgrading page table to read
1289 * only not changing it to point to a new page.
1290 *
1291 * See Documentation/mm/mmu_notifier.rst
1292 */
1293 entry = ptep_clear_flush(vma, pvmw.address, pvmw.pte);
1294 /*
1295 * Check that no O_DIRECT or similar I/O is in progress on the
1296 * page
1297 */
1298 if (folio_mapcount(folio) + 1 + swapped != folio_ref_count(folio)) {
1299 set_pte_at(mm, pvmw.address, pvmw.pte, entry);
1300 goto out_unlock;
1301 }
1302
1303 /* See folio_try_share_anon_rmap_pte(): clear PTE first. */
1304 if (anon_exclusive &&
1305 folio_try_share_anon_rmap_pte(folio, &folio->page)) {
1306 set_pte_at(mm, pvmw.address, pvmw.pte, entry);
1307 goto out_unlock;
1308 }
1309
1310 if (pte_dirty(entry))
1311 folio_mark_dirty(folio);
1312 entry = pte_mkclean(entry);
1313
1314 if (pte_write(entry))
1315 entry = pte_wrprotect(entry);
1316
1317 set_pte_at(mm, pvmw.address, pvmw.pte, entry);
1318 }
1319 *orig_pte = entry;
1320 err = 0;
1321
1322out_unlock:
1323 page_vma_mapped_walk_done(&pvmw);
1324out_mn:
1325 mmu_notifier_invalidate_range_end(&range);
1326out:
1327 return err;
1328}
1329
1330/**
1331 * replace_page - replace page in vma by new ksm page
1332 * @vma: vma that holds the pte pointing to page
1333 * @page: the page we are replacing by kpage
1334 * @kpage: the ksm page we replace page by
1335 * @orig_pte: the original value of the pte
1336 *
1337 * Returns 0 on success, -EFAULT on failure.
1338 */
1339static int replace_page(struct vm_area_struct *vma, struct page *page,
1340 struct page *kpage, pte_t orig_pte)
1341{
1342 struct folio *kfolio = page_folio(kpage);
1343 struct mm_struct *mm = vma->vm_mm;
1344 struct folio *folio = page_folio(page);
1345 pmd_t *pmd;
1346 pmd_t pmde;
1347 pte_t *ptep;
1348 pte_t newpte;
1349 spinlock_t *ptl;
1350 unsigned long addr;
1351 int err = -EFAULT;
1352 struct mmu_notifier_range range;
1353
1354 addr = page_address_in_vma(folio, page, vma);
1355 if (addr == -EFAULT)
1356 goto out;
1357
1358 pmd = mm_find_pmd(mm, addr);
1359 if (!pmd)
1360 goto out;
1361 /*
1362 * Some THP functions use the sequence pmdp_huge_clear_flush(), set_pmd_at()
1363 * without holding anon_vma lock for write. So when looking for a
1364 * genuine pmde (in which to find pte), test present and !THP together.
1365 */
1366 pmde = pmdp_get_lockless(pmd);
1367 if (!pmd_present(pmde) || pmd_trans_huge(pmde))
1368 goto out;
1369
1370 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, addr,
1371 addr + PAGE_SIZE);
1372 mmu_notifier_invalidate_range_start(&range);
1373
1374 ptep = pte_offset_map_lock(mm, pmd, addr, &ptl);
1375 if (!ptep)
1376 goto out_mn;
1377 if (!pte_same(ptep_get(ptep), orig_pte)) {
1378 pte_unmap_unlock(ptep, ptl);
1379 goto out_mn;
1380 }
1381 VM_BUG_ON_PAGE(PageAnonExclusive(page), page);
1382 VM_BUG_ON_FOLIO(folio_test_anon(kfolio) && PageAnonExclusive(kpage),
1383 kfolio);
1384
1385 /*
1386 * No need to check ksm_use_zero_pages here: we can only have a
1387 * zero_page here if ksm_use_zero_pages was enabled already.
1388 */
1389 if (!is_zero_pfn(page_to_pfn(kpage))) {
1390 folio_get(kfolio);
1391 folio_add_anon_rmap_pte(kfolio, kpage, vma, addr, RMAP_NONE);
1392 newpte = mk_pte(kpage, vma->vm_page_prot);
1393 } else {
1394 /*
1395 * Use pte_mkdirty to mark the zero page mapped by KSM, and then
1396 * we can easily track all KSM-placed zero pages by checking if
1397 * the dirty bit in zero page's PTE is set.
1398 */
1399 newpte = pte_mkdirty(pte_mkspecial(pfn_pte(page_to_pfn(kpage), vma->vm_page_prot)));
1400 ksm_map_zero_page(mm);
1401 /*
1402 * We're replacing an anonymous page with a zero page, which is
1403 * not anonymous. We need to do proper accounting otherwise we
1404 * will get wrong values in /proc, and a BUG message in dmesg
1405 * when tearing down the mm.
1406 */
1407 dec_mm_counter(mm, MM_ANONPAGES);
1408 }
1409
1410 flush_cache_page(vma, addr, pte_pfn(ptep_get(ptep)));
1411 /*
1412 * No need to notify as we are replacing a read only page with another
1413 * read only page with the same content.
1414 *
1415 * See Documentation/mm/mmu_notifier.rst
1416 */
1417 ptep_clear_flush(vma, addr, ptep);
1418 set_pte_at(mm, addr, ptep, newpte);
1419
1420 folio_remove_rmap_pte(folio, page, vma);
1421 if (!folio_mapped(folio))
1422 folio_free_swap(folio);
1423 folio_put(folio);
1424
1425 pte_unmap_unlock(ptep, ptl);
1426 err = 0;
1427out_mn:
1428 mmu_notifier_invalidate_range_end(&range);
1429out:
1430 return err;
1431}
1432
1433/*
1434 * try_to_merge_one_page - take two pages and merge them into one
1435 * @vma: the vma that holds the pte pointing to page
1436 * @page: the PageAnon page that we want to replace with kpage
1437 * @kpage: the KSM page that we want to map instead of page,
1438 * or NULL the first time when we want to use page as kpage.
1439 *
1440 * This function returns 0 if the pages were merged, -EFAULT otherwise.
1441 */
1442static int try_to_merge_one_page(struct vm_area_struct *vma,
1443 struct page *page, struct page *kpage)
1444{
1445 struct folio *folio = page_folio(page);
1446 pte_t orig_pte = __pte(0);
1447 int err = -EFAULT;
1448
1449 if (page == kpage) /* ksm page forked */
1450 return 0;
1451
1452 if (!folio_test_anon(folio))
1453 goto out;
1454
1455 /*
1456 * We need the folio lock to read a stable swapcache flag in
1457 * write_protect_page(). We trylock because we don't want to wait
1458 * here - we prefer to continue scanning and merging different
1459 * pages, then come back to this page when it is unlocked.
1460 */
1461 if (!folio_trylock(folio))
1462 goto out;
1463
1464 if (folio_test_large(folio)) {
1465 if (split_huge_page(page))
1466 goto out_unlock;
1467 folio = page_folio(page);
1468 }
1469
1470 /*
1471 * If this anonymous page is mapped only here, its pte may need
1472 * to be write-protected. If it's mapped elsewhere, all of its
1473 * ptes are necessarily already write-protected. But in either
1474 * case, we need to lock and check page_count is not raised.
1475 */
1476 if (write_protect_page(vma, folio, &orig_pte) == 0) {
1477 if (!kpage) {
1478 /*
1479 * While we hold folio lock, upgrade folio from
1480 * anon to a NULL stable_node with the KSM flag set:
1481 * stable_tree_insert() will update stable_node.
1482 */
1483 folio_set_stable_node(folio, NULL);
1484 folio_mark_accessed(folio);
1485 /*
1486 * Page reclaim just frees a clean folio with no dirty
1487 * ptes: make sure that the ksm page would be swapped.
1488 */
1489 if (!folio_test_dirty(folio))
1490 folio_mark_dirty(folio);
1491 err = 0;
1492 } else if (pages_identical(page, kpage))
1493 err = replace_page(vma, page, kpage, orig_pte);
1494 }
1495
1496out_unlock:
1497 folio_unlock(folio);
1498out:
1499 return err;
1500}
1501
1502/*
1503 * This function returns 0 if the pages were merged or if they are
1504 * no longer merging candidates (e.g., VMA stale), -EFAULT otherwise.
1505 */
1506static int try_to_merge_with_zero_page(struct ksm_rmap_item *rmap_item,
1507 struct page *page)
1508{
1509 struct mm_struct *mm = rmap_item->mm;
1510 int err = -EFAULT;
1511
1512 /*
1513 * Same checksum as an empty page. We attempt to merge it with the
1514 * appropriate zero page if the user enabled this via sysfs.
1515 */
1516 if (ksm_use_zero_pages && (rmap_item->oldchecksum == zero_checksum)) {
1517 struct vm_area_struct *vma;
1518
1519 mmap_read_lock(mm);
1520 vma = find_mergeable_vma(mm, rmap_item->address);
1521 if (vma) {
1522 err = try_to_merge_one_page(vma, page,
1523 ZERO_PAGE(rmap_item->address));
1524 trace_ksm_merge_one_page(
1525 page_to_pfn(ZERO_PAGE(rmap_item->address)),
1526 rmap_item, mm, err);
1527 } else {
1528 /*
1529 * If the vma is out of date, we do not need to
1530 * continue.
1531 */
1532 err = 0;
1533 }
1534 mmap_read_unlock(mm);
1535 }
1536
1537 return err;
1538}
1539
1540/*
1541 * try_to_merge_with_ksm_page - like try_to_merge_two_pages,
1542 * but no new kernel page is allocated: kpage must already be a ksm page.
1543 *
1544 * This function returns 0 if the pages were merged, -EFAULT otherwise.
1545 */
1546static int try_to_merge_with_ksm_page(struct ksm_rmap_item *rmap_item,
1547 struct page *page, struct page *kpage)
1548{
1549 struct mm_struct *mm = rmap_item->mm;
1550 struct vm_area_struct *vma;
1551 int err = -EFAULT;
1552
1553 mmap_read_lock(mm);
1554 vma = find_mergeable_vma(mm, rmap_item->address);
1555 if (!vma)
1556 goto out;
1557
1558 err = try_to_merge_one_page(vma, page, kpage);
1559 if (err)
1560 goto out;
1561
1562 /* Unstable nid is in union with stable anon_vma: remove first */
1563 remove_rmap_item_from_tree(rmap_item);
1564
1565 /* Must get reference to anon_vma while still holding mmap_lock */
1566 rmap_item->anon_vma = vma->anon_vma;
1567 get_anon_vma(vma->anon_vma);
1568out:
1569 mmap_read_unlock(mm);
1570 trace_ksm_merge_with_ksm_page(kpage, page_to_pfn(kpage ? kpage : page),
1571 rmap_item, mm, err);
1572 return err;
1573}
1574
1575/*
1576 * try_to_merge_two_pages - take two identical pages and prepare them
1577 * to be merged into one page.
1578 *
1579 * This function returns the kpage if we successfully merged two identical
1580 * pages into one ksm page, NULL otherwise.
1581 *
1582 * Note that this function upgrades page to ksm page: if one of the pages
1583 * is already a ksm page, try_to_merge_with_ksm_page should be used.
1584 */
1585static struct folio *try_to_merge_two_pages(struct ksm_rmap_item *rmap_item,
1586 struct page *page,
1587 struct ksm_rmap_item *tree_rmap_item,
1588 struct page *tree_page)
1589{
1590 int err;
1591
1592 err = try_to_merge_with_ksm_page(rmap_item, page, NULL);
1593 if (!err) {
1594 err = try_to_merge_with_ksm_page(tree_rmap_item,
1595 tree_page, page);
1596 /*
1597 * If that fails, we have a ksm page with only one pte
1598 * pointing to it: so break it.
1599 */
1600 if (err)
1601 break_cow(rmap_item);
1602 }
1603 return err ? NULL : page_folio(page);
1604}
1605
1606static __always_inline
1607bool __is_page_sharing_candidate(struct ksm_stable_node *stable_node, int offset)
1608{
1609 VM_BUG_ON(stable_node->rmap_hlist_len < 0);
1610 /*
1611 * Check that at least one mapping still exists, otherwise
1612 * there's no much point to merge and share with this
1613 * stable_node, as the underlying tree_page of the other
1614 * sharer is going to be freed soon.
1615 */
1616 return stable_node->rmap_hlist_len &&
1617 stable_node->rmap_hlist_len + offset < ksm_max_page_sharing;
1618}
1619
1620static __always_inline
1621bool is_page_sharing_candidate(struct ksm_stable_node *stable_node)
1622{
1623 return __is_page_sharing_candidate(stable_node, 0);
1624}
1625
1626static struct folio *stable_node_dup(struct ksm_stable_node **_stable_node_dup,
1627 struct ksm_stable_node **_stable_node,
1628 struct rb_root *root,
1629 bool prune_stale_stable_nodes)
1630{
1631 struct ksm_stable_node *dup, *found = NULL, *stable_node = *_stable_node;
1632 struct hlist_node *hlist_safe;
1633 struct folio *folio, *tree_folio = NULL;
1634 int found_rmap_hlist_len;
1635
1636 if (!prune_stale_stable_nodes ||
1637 time_before(jiffies, stable_node->chain_prune_time +
1638 msecs_to_jiffies(
1639 ksm_stable_node_chains_prune_millisecs)))
1640 prune_stale_stable_nodes = false;
1641 else
1642 stable_node->chain_prune_time = jiffies;
1643
1644 hlist_for_each_entry_safe(dup, hlist_safe,
1645 &stable_node->hlist, hlist_dup) {
1646 cond_resched();
1647 /*
1648 * We must walk all stable_node_dup to prune the stale
1649 * stable nodes during lookup.
1650 *
1651 * ksm_get_folio can drop the nodes from the
1652 * stable_node->hlist if they point to freed pages
1653 * (that's why we do a _safe walk). The "dup"
1654 * stable_node parameter itself will be freed from
1655 * under us if it returns NULL.
1656 */
1657 folio = ksm_get_folio(dup, KSM_GET_FOLIO_NOLOCK);
1658 if (!folio)
1659 continue;
1660 /* Pick the best candidate if possible. */
1661 if (!found || (is_page_sharing_candidate(dup) &&
1662 (!is_page_sharing_candidate(found) ||
1663 dup->rmap_hlist_len > found_rmap_hlist_len))) {
1664 if (found)
1665 folio_put(tree_folio);
1666 found = dup;
1667 found_rmap_hlist_len = found->rmap_hlist_len;
1668 tree_folio = folio;
1669 /* skip put_page for found candidate */
1670 if (!prune_stale_stable_nodes &&
1671 is_page_sharing_candidate(found))
1672 break;
1673 continue;
1674 }
1675 folio_put(folio);
1676 }
1677
1678 if (found) {
1679 if (hlist_is_singular_node(&found->hlist_dup, &stable_node->hlist)) {
1680 /*
1681 * If there's not just one entry it would
1682 * corrupt memory, better BUG_ON. In KSM
1683 * context with no lock held it's not even
1684 * fatal.
1685 */
1686 BUG_ON(stable_node->hlist.first->next);
1687
1688 /*
1689 * There's just one entry and it is below the
1690 * deduplication limit so drop the chain.
1691 */
1692 rb_replace_node(&stable_node->node, &found->node,
1693 root);
1694 free_stable_node(stable_node);
1695 ksm_stable_node_chains--;
1696 ksm_stable_node_dups--;
1697 /*
1698 * NOTE: the caller depends on the stable_node
1699 * to be equal to stable_node_dup if the chain
1700 * was collapsed.
1701 */
1702 *_stable_node = found;
1703 /*
1704 * Just for robustness, as stable_node is
1705 * otherwise left as a stable pointer, the
1706 * compiler shall optimize it away at build
1707 * time.
1708 */
1709 stable_node = NULL;
1710 } else if (stable_node->hlist.first != &found->hlist_dup &&
1711 __is_page_sharing_candidate(found, 1)) {
1712 /*
1713 * If the found stable_node dup can accept one
1714 * more future merge (in addition to the one
1715 * that is underway) and is not at the head of
1716 * the chain, put it there so next search will
1717 * be quicker in the !prune_stale_stable_nodes
1718 * case.
1719 *
1720 * NOTE: it would be inaccurate to use nr > 1
1721 * instead of checking the hlist.first pointer
1722 * directly, because in the
1723 * prune_stale_stable_nodes case "nr" isn't
1724 * the position of the found dup in the chain,
1725 * but the total number of dups in the chain.
1726 */
1727 hlist_del(&found->hlist_dup);
1728 hlist_add_head(&found->hlist_dup,
1729 &stable_node->hlist);
1730 }
1731 } else {
1732 /* Its hlist must be empty if no one found. */
1733 free_stable_node_chain(stable_node, root);
1734 }
1735
1736 *_stable_node_dup = found;
1737 return tree_folio;
1738}
1739
1740/*
1741 * Like for ksm_get_folio, this function can free the *_stable_node and
1742 * *_stable_node_dup if the returned tree_page is NULL.
1743 *
1744 * It can also free and overwrite *_stable_node with the found
1745 * stable_node_dup if the chain is collapsed (in which case
1746 * *_stable_node will be equal to *_stable_node_dup like if the chain
1747 * never existed). It's up to the caller to verify tree_page is not
1748 * NULL before dereferencing *_stable_node or *_stable_node_dup.
1749 *
1750 * *_stable_node_dup is really a second output parameter of this
1751 * function and will be overwritten in all cases, the caller doesn't
1752 * need to initialize it.
1753 */
1754static struct folio *__stable_node_chain(struct ksm_stable_node **_stable_node_dup,
1755 struct ksm_stable_node **_stable_node,
1756 struct rb_root *root,
1757 bool prune_stale_stable_nodes)
1758{
1759 struct ksm_stable_node *stable_node = *_stable_node;
1760
1761 if (!is_stable_node_chain(stable_node)) {
1762 *_stable_node_dup = stable_node;
1763 return ksm_get_folio(stable_node, KSM_GET_FOLIO_NOLOCK);
1764 }
1765 return stable_node_dup(_stable_node_dup, _stable_node, root,
1766 prune_stale_stable_nodes);
1767}
1768
1769static __always_inline struct folio *chain_prune(struct ksm_stable_node **s_n_d,
1770 struct ksm_stable_node **s_n,
1771 struct rb_root *root)
1772{
1773 return __stable_node_chain(s_n_d, s_n, root, true);
1774}
1775
1776static __always_inline struct folio *chain(struct ksm_stable_node **s_n_d,
1777 struct ksm_stable_node **s_n,
1778 struct rb_root *root)
1779{
1780 return __stable_node_chain(s_n_d, s_n, root, false);
1781}
1782
1783/*
1784 * stable_tree_search - search for page inside the stable tree
1785 *
1786 * This function checks if there is a page inside the stable tree
1787 * with identical content to the page that we are scanning right now.
1788 *
1789 * This function returns the stable tree node of identical content if found,
1790 * -EBUSY if the stable node's page is being migrated, NULL otherwise.
1791 */
1792static struct folio *stable_tree_search(struct page *page)
1793{
1794 int nid;
1795 struct rb_root *root;
1796 struct rb_node **new;
1797 struct rb_node *parent;
1798 struct ksm_stable_node *stable_node, *stable_node_dup;
1799 struct ksm_stable_node *page_node;
1800 struct folio *folio;
1801
1802 folio = page_folio(page);
1803 page_node = folio_stable_node(folio);
1804 if (page_node && page_node->head != &migrate_nodes) {
1805 /* ksm page forked */
1806 folio_get(folio);
1807 return folio;
1808 }
1809
1810 nid = get_kpfn_nid(folio_pfn(folio));
1811 root = root_stable_tree + nid;
1812again:
1813 new = &root->rb_node;
1814 parent = NULL;
1815
1816 while (*new) {
1817 struct folio *tree_folio;
1818 int ret;
1819
1820 cond_resched();
1821 stable_node = rb_entry(*new, struct ksm_stable_node, node);
1822 tree_folio = chain_prune(&stable_node_dup, &stable_node, root);
1823 if (!tree_folio) {
1824 /*
1825 * If we walked over a stale stable_node,
1826 * ksm_get_folio() will call rb_erase() and it
1827 * may rebalance the tree from under us. So
1828 * restart the search from scratch. Returning
1829 * NULL would be safe too, but we'd generate
1830 * false negative insertions just because some
1831 * stable_node was stale.
1832 */
1833 goto again;
1834 }
1835
1836 ret = memcmp_pages(page, &tree_folio->page);
1837 folio_put(tree_folio);
1838
1839 parent = *new;
1840 if (ret < 0)
1841 new = &parent->rb_left;
1842 else if (ret > 0)
1843 new = &parent->rb_right;
1844 else {
1845 if (page_node) {
1846 VM_BUG_ON(page_node->head != &migrate_nodes);
1847 /*
1848 * If the mapcount of our migrated KSM folio is
1849 * at most 1, we can merge it with another
1850 * KSM folio where we know that we have space
1851 * for one more mapping without exceeding the
1852 * ksm_max_page_sharing limit: see
1853 * chain_prune(). This way, we can avoid adding
1854 * this stable node to the chain.
1855 */
1856 if (folio_mapcount(folio) > 1)
1857 goto chain_append;
1858 }
1859
1860 if (!is_page_sharing_candidate(stable_node_dup)) {
1861 /*
1862 * If the stable_node is a chain and
1863 * we got a payload match in memcmp
1864 * but we cannot merge the scanned
1865 * page in any of the existing
1866 * stable_node dups because they're
1867 * all full, we need to wait the
1868 * scanned page to find itself a match
1869 * in the unstable tree to create a
1870 * brand new KSM page to add later to
1871 * the dups of this stable_node.
1872 */
1873 return NULL;
1874 }
1875
1876 /*
1877 * Lock and unlock the stable_node's page (which
1878 * might already have been migrated) so that page
1879 * migration is sure to notice its raised count.
1880 * It would be more elegant to return stable_node
1881 * than kpage, but that involves more changes.
1882 */
1883 tree_folio = ksm_get_folio(stable_node_dup,
1884 KSM_GET_FOLIO_TRYLOCK);
1885
1886 if (PTR_ERR(tree_folio) == -EBUSY)
1887 return ERR_PTR(-EBUSY);
1888
1889 if (unlikely(!tree_folio))
1890 /*
1891 * The tree may have been rebalanced,
1892 * so re-evaluate parent and new.
1893 */
1894 goto again;
1895 folio_unlock(tree_folio);
1896
1897 if (get_kpfn_nid(stable_node_dup->kpfn) !=
1898 NUMA(stable_node_dup->nid)) {
1899 folio_put(tree_folio);
1900 goto replace;
1901 }
1902 return tree_folio;
1903 }
1904 }
1905
1906 if (!page_node)
1907 return NULL;
1908
1909 list_del(&page_node->list);
1910 DO_NUMA(page_node->nid = nid);
1911 rb_link_node(&page_node->node, parent, new);
1912 rb_insert_color(&page_node->node, root);
1913out:
1914 if (is_page_sharing_candidate(page_node)) {
1915 folio_get(folio);
1916 return folio;
1917 } else
1918 return NULL;
1919
1920replace:
1921 /*
1922 * If stable_node was a chain and chain_prune collapsed it,
1923 * stable_node has been updated to be the new regular
1924 * stable_node. A collapse of the chain is indistinguishable
1925 * from the case there was no chain in the stable
1926 * rbtree. Otherwise stable_node is the chain and
1927 * stable_node_dup is the dup to replace.
1928 */
1929 if (stable_node_dup == stable_node) {
1930 VM_BUG_ON(is_stable_node_chain(stable_node_dup));
1931 VM_BUG_ON(is_stable_node_dup(stable_node_dup));
1932 /* there is no chain */
1933 if (page_node) {
1934 VM_BUG_ON(page_node->head != &migrate_nodes);
1935 list_del(&page_node->list);
1936 DO_NUMA(page_node->nid = nid);
1937 rb_replace_node(&stable_node_dup->node,
1938 &page_node->node,
1939 root);
1940 if (is_page_sharing_candidate(page_node))
1941 folio_get(folio);
1942 else
1943 folio = NULL;
1944 } else {
1945 rb_erase(&stable_node_dup->node, root);
1946 folio = NULL;
1947 }
1948 } else {
1949 VM_BUG_ON(!is_stable_node_chain(stable_node));
1950 __stable_node_dup_del(stable_node_dup);
1951 if (page_node) {
1952 VM_BUG_ON(page_node->head != &migrate_nodes);
1953 list_del(&page_node->list);
1954 DO_NUMA(page_node->nid = nid);
1955 stable_node_chain_add_dup(page_node, stable_node);
1956 if (is_page_sharing_candidate(page_node))
1957 folio_get(folio);
1958 else
1959 folio = NULL;
1960 } else {
1961 folio = NULL;
1962 }
1963 }
1964 stable_node_dup->head = &migrate_nodes;
1965 list_add(&stable_node_dup->list, stable_node_dup->head);
1966 return folio;
1967
1968chain_append:
1969 /*
1970 * If stable_node was a chain and chain_prune collapsed it,
1971 * stable_node has been updated to be the new regular
1972 * stable_node. A collapse of the chain is indistinguishable
1973 * from the case there was no chain in the stable
1974 * rbtree. Otherwise stable_node is the chain and
1975 * stable_node_dup is the dup to replace.
1976 */
1977 if (stable_node_dup == stable_node) {
1978 VM_BUG_ON(is_stable_node_dup(stable_node_dup));
1979 /* chain is missing so create it */
1980 stable_node = alloc_stable_node_chain(stable_node_dup,
1981 root);
1982 if (!stable_node)
1983 return NULL;
1984 }
1985 /*
1986 * Add this stable_node dup that was
1987 * migrated to the stable_node chain
1988 * of the current nid for this page
1989 * content.
1990 */
1991 VM_BUG_ON(!is_stable_node_dup(stable_node_dup));
1992 VM_BUG_ON(page_node->head != &migrate_nodes);
1993 list_del(&page_node->list);
1994 DO_NUMA(page_node->nid = nid);
1995 stable_node_chain_add_dup(page_node, stable_node);
1996 goto out;
1997}
1998
1999/*
2000 * stable_tree_insert - insert stable tree node pointing to new ksm page
2001 * into the stable tree.
2002 *
2003 * This function returns the stable tree node just allocated on success,
2004 * NULL otherwise.
2005 */
2006static struct ksm_stable_node *stable_tree_insert(struct folio *kfolio)
2007{
2008 int nid;
2009 unsigned long kpfn;
2010 struct rb_root *root;
2011 struct rb_node **new;
2012 struct rb_node *parent;
2013 struct ksm_stable_node *stable_node, *stable_node_dup;
2014 bool need_chain = false;
2015
2016 kpfn = folio_pfn(kfolio);
2017 nid = get_kpfn_nid(kpfn);
2018 root = root_stable_tree + nid;
2019again:
2020 parent = NULL;
2021 new = &root->rb_node;
2022
2023 while (*new) {
2024 struct folio *tree_folio;
2025 int ret;
2026
2027 cond_resched();
2028 stable_node = rb_entry(*new, struct ksm_stable_node, node);
2029 tree_folio = chain(&stable_node_dup, &stable_node, root);
2030 if (!tree_folio) {
2031 /*
2032 * If we walked over a stale stable_node,
2033 * ksm_get_folio() will call rb_erase() and it
2034 * may rebalance the tree from under us. So
2035 * restart the search from scratch. Returning
2036 * NULL would be safe too, but we'd generate
2037 * false negative insertions just because some
2038 * stable_node was stale.
2039 */
2040 goto again;
2041 }
2042
2043 ret = memcmp_pages(&kfolio->page, &tree_folio->page);
2044 folio_put(tree_folio);
2045
2046 parent = *new;
2047 if (ret < 0)
2048 new = &parent->rb_left;
2049 else if (ret > 0)
2050 new = &parent->rb_right;
2051 else {
2052 need_chain = true;
2053 break;
2054 }
2055 }
2056
2057 stable_node_dup = alloc_stable_node();
2058 if (!stable_node_dup)
2059 return NULL;
2060
2061 INIT_HLIST_HEAD(&stable_node_dup->hlist);
2062 stable_node_dup->kpfn = kpfn;
2063 stable_node_dup->rmap_hlist_len = 0;
2064 DO_NUMA(stable_node_dup->nid = nid);
2065 if (!need_chain) {
2066 rb_link_node(&stable_node_dup->node, parent, new);
2067 rb_insert_color(&stable_node_dup->node, root);
2068 } else {
2069 if (!is_stable_node_chain(stable_node)) {
2070 struct ksm_stable_node *orig = stable_node;
2071 /* chain is missing so create it */
2072 stable_node = alloc_stable_node_chain(orig, root);
2073 if (!stable_node) {
2074 free_stable_node(stable_node_dup);
2075 return NULL;
2076 }
2077 }
2078 stable_node_chain_add_dup(stable_node_dup, stable_node);
2079 }
2080
2081 folio_set_stable_node(kfolio, stable_node_dup);
2082
2083 return stable_node_dup;
2084}
2085
2086/*
2087 * unstable_tree_search_insert - search for identical page,
2088 * else insert rmap_item into the unstable tree.
2089 *
2090 * This function searches for a page in the unstable tree identical to the
2091 * page currently being scanned; and if no identical page is found in the
2092 * tree, we insert rmap_item as a new object into the unstable tree.
2093 *
2094 * This function returns pointer to rmap_item found to be identical
2095 * to the currently scanned page, NULL otherwise.
2096 *
2097 * This function does both searching and inserting, because they share
2098 * the same walking algorithm in an rbtree.
2099 */
2100static
2101struct ksm_rmap_item *unstable_tree_search_insert(struct ksm_rmap_item *rmap_item,
2102 struct page *page,
2103 struct page **tree_pagep)
2104{
2105 struct rb_node **new;
2106 struct rb_root *root;
2107 struct rb_node *parent = NULL;
2108 int nid;
2109
2110 nid = get_kpfn_nid(page_to_pfn(page));
2111 root = root_unstable_tree + nid;
2112 new = &root->rb_node;
2113
2114 while (*new) {
2115 struct ksm_rmap_item *tree_rmap_item;
2116 struct page *tree_page;
2117 int ret;
2118
2119 cond_resched();
2120 tree_rmap_item = rb_entry(*new, struct ksm_rmap_item, node);
2121 tree_page = get_mergeable_page(tree_rmap_item);
2122 if (!tree_page)
2123 return NULL;
2124
2125 /*
2126 * Don't substitute a ksm page for a forked page.
2127 */
2128 if (page == tree_page) {
2129 put_page(tree_page);
2130 return NULL;
2131 }
2132
2133 ret = memcmp_pages(page, tree_page);
2134
2135 parent = *new;
2136 if (ret < 0) {
2137 put_page(tree_page);
2138 new = &parent->rb_left;
2139 } else if (ret > 0) {
2140 put_page(tree_page);
2141 new = &parent->rb_right;
2142 } else if (!ksm_merge_across_nodes &&
2143 page_to_nid(tree_page) != nid) {
2144 /*
2145 * If tree_page has been migrated to another NUMA node,
2146 * it will be flushed out and put in the right unstable
2147 * tree next time: only merge with it when across_nodes.
2148 */
2149 put_page(tree_page);
2150 return NULL;
2151 } else {
2152 *tree_pagep = tree_page;
2153 return tree_rmap_item;
2154 }
2155 }
2156
2157 rmap_item->address |= UNSTABLE_FLAG;
2158 rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK);
2159 DO_NUMA(rmap_item->nid = nid);
2160 rb_link_node(&rmap_item->node, parent, new);
2161 rb_insert_color(&rmap_item->node, root);
2162
2163 ksm_pages_unshared++;
2164 return NULL;
2165}
2166
2167/*
2168 * stable_tree_append - add another rmap_item to the linked list of
2169 * rmap_items hanging off a given node of the stable tree, all sharing
2170 * the same ksm page.
2171 */
2172static void stable_tree_append(struct ksm_rmap_item *rmap_item,
2173 struct ksm_stable_node *stable_node,
2174 bool max_page_sharing_bypass)
2175{
2176 /*
2177 * rmap won't find this mapping if we don't insert the
2178 * rmap_item in the right stable_node
2179 * duplicate. page_migration could break later if rmap breaks,
2180 * so we can as well crash here. We really need to check for
2181 * rmap_hlist_len == STABLE_NODE_CHAIN, but we can as well check
2182 * for other negative values as an underflow if detected here
2183 * for the first time (and not when decreasing rmap_hlist_len)
2184 * would be sign of memory corruption in the stable_node.
2185 */
2186 BUG_ON(stable_node->rmap_hlist_len < 0);
2187
2188 stable_node->rmap_hlist_len++;
2189 if (!max_page_sharing_bypass)
2190 /* possibly non fatal but unexpected overflow, only warn */
2191 WARN_ON_ONCE(stable_node->rmap_hlist_len >
2192 ksm_max_page_sharing);
2193
2194 rmap_item->head = stable_node;
2195 rmap_item->address |= STABLE_FLAG;
2196 hlist_add_head(&rmap_item->hlist, &stable_node->hlist);
2197
2198 if (rmap_item->hlist.next)
2199 ksm_pages_sharing++;
2200 else
2201 ksm_pages_shared++;
2202
2203 rmap_item->mm->ksm_merging_pages++;
2204}
2205
2206/*
2207 * cmp_and_merge_page - first see if page can be merged into the stable tree;
2208 * if not, compare checksum to previous and if it's the same, see if page can
2209 * be inserted into the unstable tree, or merged with a page already there and
2210 * both transferred to the stable tree.
2211 *
2212 * @page: the page that we are searching identical page to.
2213 * @rmap_item: the reverse mapping into the virtual address of this page
2214 */
2215static void cmp_and_merge_page(struct page *page, struct ksm_rmap_item *rmap_item)
2216{
2217 struct ksm_rmap_item *tree_rmap_item;
2218 struct page *tree_page = NULL;
2219 struct ksm_stable_node *stable_node;
2220 struct folio *kfolio;
2221 unsigned int checksum;
2222 int err;
2223 bool max_page_sharing_bypass = false;
2224
2225 stable_node = page_stable_node(page);
2226 if (stable_node) {
2227 if (stable_node->head != &migrate_nodes &&
2228 get_kpfn_nid(READ_ONCE(stable_node->kpfn)) !=
2229 NUMA(stable_node->nid)) {
2230 stable_node_dup_del(stable_node);
2231 stable_node->head = &migrate_nodes;
2232 list_add(&stable_node->list, stable_node->head);
2233 }
2234 if (stable_node->head != &migrate_nodes &&
2235 rmap_item->head == stable_node)
2236 return;
2237 /*
2238 * If it's a KSM fork, allow it to go over the sharing limit
2239 * without warnings.
2240 */
2241 if (!is_page_sharing_candidate(stable_node))
2242 max_page_sharing_bypass = true;
2243 } else {
2244 remove_rmap_item_from_tree(rmap_item);
2245
2246 /*
2247 * If the hash value of the page has changed from the last time
2248 * we calculated it, this page is changing frequently: therefore we
2249 * don't want to insert it in the unstable tree, and we don't want
2250 * to waste our time searching for something identical to it there.
2251 */
2252 checksum = calc_checksum(page);
2253 if (rmap_item->oldchecksum != checksum) {
2254 rmap_item->oldchecksum = checksum;
2255 return;
2256 }
2257
2258 if (!try_to_merge_with_zero_page(rmap_item, page))
2259 return;
2260 }
2261
2262 /* Start by searching for the folio in the stable tree */
2263 kfolio = stable_tree_search(page);
2264 if (&kfolio->page == page && rmap_item->head == stable_node) {
2265 folio_put(kfolio);
2266 return;
2267 }
2268
2269 remove_rmap_item_from_tree(rmap_item);
2270
2271 if (kfolio) {
2272 if (kfolio == ERR_PTR(-EBUSY))
2273 return;
2274
2275 err = try_to_merge_with_ksm_page(rmap_item, page, &kfolio->page);
2276 if (!err) {
2277 /*
2278 * The page was successfully merged:
2279 * add its rmap_item to the stable tree.
2280 */
2281 folio_lock(kfolio);
2282 stable_tree_append(rmap_item, folio_stable_node(kfolio),
2283 max_page_sharing_bypass);
2284 folio_unlock(kfolio);
2285 }
2286 folio_put(kfolio);
2287 return;
2288 }
2289
2290 tree_rmap_item =
2291 unstable_tree_search_insert(rmap_item, page, &tree_page);
2292 if (tree_rmap_item) {
2293 bool split;
2294
2295 kfolio = try_to_merge_two_pages(rmap_item, page,
2296 tree_rmap_item, tree_page);
2297 /*
2298 * If both pages we tried to merge belong to the same compound
2299 * page, then we actually ended up increasing the reference
2300 * count of the same compound page twice, and split_huge_page
2301 * failed.
2302 * Here we set a flag if that happened, and we use it later to
2303 * try split_huge_page again. Since we call put_page right
2304 * afterwards, the reference count will be correct and
2305 * split_huge_page should succeed.
2306 */
2307 split = PageTransCompound(page)
2308 && compound_head(page) == compound_head(tree_page);
2309 put_page(tree_page);
2310 if (kfolio) {
2311 /*
2312 * The pages were successfully merged: insert new
2313 * node in the stable tree and add both rmap_items.
2314 */
2315 folio_lock(kfolio);
2316 stable_node = stable_tree_insert(kfolio);
2317 if (stable_node) {
2318 stable_tree_append(tree_rmap_item, stable_node,
2319 false);
2320 stable_tree_append(rmap_item, stable_node,
2321 false);
2322 }
2323 folio_unlock(kfolio);
2324
2325 /*
2326 * If we fail to insert the page into the stable tree,
2327 * we will have 2 virtual addresses that are pointing
2328 * to a ksm page left outside the stable tree,
2329 * in which case we need to break_cow on both.
2330 */
2331 if (!stable_node) {
2332 break_cow(tree_rmap_item);
2333 break_cow(rmap_item);
2334 }
2335 } else if (split) {
2336 /*
2337 * We are here if we tried to merge two pages and
2338 * failed because they both belonged to the same
2339 * compound page. We will split the page now, but no
2340 * merging will take place.
2341 * We do not want to add the cost of a full lock; if
2342 * the page is locked, it is better to skip it and
2343 * perhaps try again later.
2344 */
2345 if (!trylock_page(page))
2346 return;
2347 split_huge_page(page);
2348 unlock_page(page);
2349 }
2350 }
2351}
2352
2353static struct ksm_rmap_item *get_next_rmap_item(struct ksm_mm_slot *mm_slot,
2354 struct ksm_rmap_item **rmap_list,
2355 unsigned long addr)
2356{
2357 struct ksm_rmap_item *rmap_item;
2358
2359 while (*rmap_list) {
2360 rmap_item = *rmap_list;
2361 if ((rmap_item->address & PAGE_MASK) == addr)
2362 return rmap_item;
2363 if (rmap_item->address > addr)
2364 break;
2365 *rmap_list = rmap_item->rmap_list;
2366 remove_rmap_item_from_tree(rmap_item);
2367 free_rmap_item(rmap_item);
2368 }
2369
2370 rmap_item = alloc_rmap_item();
2371 if (rmap_item) {
2372 /* It has already been zeroed */
2373 rmap_item->mm = mm_slot->slot.mm;
2374 rmap_item->mm->ksm_rmap_items++;
2375 rmap_item->address = addr;
2376 rmap_item->rmap_list = *rmap_list;
2377 *rmap_list = rmap_item;
2378 }
2379 return rmap_item;
2380}
2381
2382/*
2383 * Calculate skip age for the ksm page age. The age determines how often
2384 * de-duplicating has already been tried unsuccessfully. If the age is
2385 * smaller, the scanning of this page is skipped for less scans.
2386 *
2387 * @age: rmap_item age of page
2388 */
2389static unsigned int skip_age(rmap_age_t age)
2390{
2391 if (age <= 3)
2392 return 1;
2393 if (age <= 5)
2394 return 2;
2395 if (age <= 8)
2396 return 4;
2397
2398 return 8;
2399}
2400
2401/*
2402 * Determines if a page should be skipped for the current scan.
2403 *
2404 * @folio: folio containing the page to check
2405 * @rmap_item: associated rmap_item of page
2406 */
2407static bool should_skip_rmap_item(struct folio *folio,
2408 struct ksm_rmap_item *rmap_item)
2409{
2410 rmap_age_t age;
2411
2412 if (!ksm_smart_scan)
2413 return false;
2414
2415 /*
2416 * Never skip pages that are already KSM; pages cmp_and_merge_page()
2417 * will essentially ignore them, but we still have to process them
2418 * properly.
2419 */
2420 if (folio_test_ksm(folio))
2421 return false;
2422
2423 age = rmap_item->age;
2424 if (age != U8_MAX)
2425 rmap_item->age++;
2426
2427 /*
2428 * Smaller ages are not skipped, they need to get a chance to go
2429 * through the different phases of the KSM merging.
2430 */
2431 if (age < 3)
2432 return false;
2433
2434 /*
2435 * Are we still allowed to skip? If not, then don't skip it
2436 * and determine how much more often we are allowed to skip next.
2437 */
2438 if (!rmap_item->remaining_skips) {
2439 rmap_item->remaining_skips = skip_age(age);
2440 return false;
2441 }
2442
2443 /* Skip this page */
2444 ksm_pages_skipped++;
2445 rmap_item->remaining_skips--;
2446 remove_rmap_item_from_tree(rmap_item);
2447 return true;
2448}
2449
2450static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page)
2451{
2452 struct mm_struct *mm;
2453 struct ksm_mm_slot *mm_slot;
2454 struct mm_slot *slot;
2455 struct vm_area_struct *vma;
2456 struct ksm_rmap_item *rmap_item;
2457 struct vma_iterator vmi;
2458 int nid;
2459
2460 if (list_empty(&ksm_mm_head.slot.mm_node))
2461 return NULL;
2462
2463 mm_slot = ksm_scan.mm_slot;
2464 if (mm_slot == &ksm_mm_head) {
2465 advisor_start_scan();
2466 trace_ksm_start_scan(ksm_scan.seqnr, ksm_rmap_items);
2467
2468 /*
2469 * A number of pages can hang around indefinitely in per-cpu
2470 * LRU cache, raised page count preventing write_protect_page
2471 * from merging them. Though it doesn't really matter much,
2472 * it is puzzling to see some stuck in pages_volatile until
2473 * other activity jostles them out, and they also prevented
2474 * LTP's KSM test from succeeding deterministically; so drain
2475 * them here (here rather than on entry to ksm_do_scan(),
2476 * so we don't IPI too often when pages_to_scan is set low).
2477 */
2478 lru_add_drain_all();
2479
2480 /*
2481 * Whereas stale stable_nodes on the stable_tree itself
2482 * get pruned in the regular course of stable_tree_search(),
2483 * those moved out to the migrate_nodes list can accumulate:
2484 * so prune them once before each full scan.
2485 */
2486 if (!ksm_merge_across_nodes) {
2487 struct ksm_stable_node *stable_node, *next;
2488 struct folio *folio;
2489
2490 list_for_each_entry_safe(stable_node, next,
2491 &migrate_nodes, list) {
2492 folio = ksm_get_folio(stable_node,
2493 KSM_GET_FOLIO_NOLOCK);
2494 if (folio)
2495 folio_put(folio);
2496 cond_resched();
2497 }
2498 }
2499
2500 for (nid = 0; nid < ksm_nr_node_ids; nid++)
2501 root_unstable_tree[nid] = RB_ROOT;
2502
2503 spin_lock(&ksm_mmlist_lock);
2504 slot = list_entry(mm_slot->slot.mm_node.next,
2505 struct mm_slot, mm_node);
2506 mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot);
2507 ksm_scan.mm_slot = mm_slot;
2508 spin_unlock(&ksm_mmlist_lock);
2509 /*
2510 * Although we tested list_empty() above, a racing __ksm_exit
2511 * of the last mm on the list may have removed it since then.
2512 */
2513 if (mm_slot == &ksm_mm_head)
2514 return NULL;
2515next_mm:
2516 ksm_scan.address = 0;
2517 ksm_scan.rmap_list = &mm_slot->rmap_list;
2518 }
2519
2520 slot = &mm_slot->slot;
2521 mm = slot->mm;
2522 vma_iter_init(&vmi, mm, ksm_scan.address);
2523
2524 mmap_read_lock(mm);
2525 if (ksm_test_exit(mm))
2526 goto no_vmas;
2527
2528 for_each_vma(vmi, vma) {
2529 if (!(vma->vm_flags & VM_MERGEABLE))
2530 continue;
2531 if (ksm_scan.address < vma->vm_start)
2532 ksm_scan.address = vma->vm_start;
2533 if (!vma->anon_vma)
2534 ksm_scan.address = vma->vm_end;
2535
2536 while (ksm_scan.address < vma->vm_end) {
2537 struct page *tmp_page = NULL;
2538 struct folio_walk fw;
2539 struct folio *folio;
2540
2541 if (ksm_test_exit(mm))
2542 break;
2543
2544 folio = folio_walk_start(&fw, vma, ksm_scan.address, 0);
2545 if (folio) {
2546 if (!folio_is_zone_device(folio) &&
2547 folio_test_anon(folio)) {
2548 folio_get(folio);
2549 tmp_page = fw.page;
2550 }
2551 folio_walk_end(&fw, vma);
2552 }
2553
2554 if (tmp_page) {
2555 flush_anon_page(vma, tmp_page, ksm_scan.address);
2556 flush_dcache_page(tmp_page);
2557 rmap_item = get_next_rmap_item(mm_slot,
2558 ksm_scan.rmap_list, ksm_scan.address);
2559 if (rmap_item) {
2560 ksm_scan.rmap_list =
2561 &rmap_item->rmap_list;
2562
2563 if (should_skip_rmap_item(folio, rmap_item)) {
2564 folio_put(folio);
2565 goto next_page;
2566 }
2567
2568 ksm_scan.address += PAGE_SIZE;
2569 *page = tmp_page;
2570 } else {
2571 folio_put(folio);
2572 }
2573 mmap_read_unlock(mm);
2574 return rmap_item;
2575 }
2576next_page:
2577 ksm_scan.address += PAGE_SIZE;
2578 cond_resched();
2579 }
2580 }
2581
2582 if (ksm_test_exit(mm)) {
2583no_vmas:
2584 ksm_scan.address = 0;
2585 ksm_scan.rmap_list = &mm_slot->rmap_list;
2586 }
2587 /*
2588 * Nuke all the rmap_items that are above this current rmap:
2589 * because there were no VM_MERGEABLE vmas with such addresses.
2590 */
2591 remove_trailing_rmap_items(ksm_scan.rmap_list);
2592
2593 spin_lock(&ksm_mmlist_lock);
2594 slot = list_entry(mm_slot->slot.mm_node.next,
2595 struct mm_slot, mm_node);
2596 ksm_scan.mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot);
2597 if (ksm_scan.address == 0) {
2598 /*
2599 * We've completed a full scan of all vmas, holding mmap_lock
2600 * throughout, and found no VM_MERGEABLE: so do the same as
2601 * __ksm_exit does to remove this mm from all our lists now.
2602 * This applies either when cleaning up after __ksm_exit
2603 * (but beware: we can reach here even before __ksm_exit),
2604 * or when all VM_MERGEABLE areas have been unmapped (and
2605 * mmap_lock then protects against race with MADV_MERGEABLE).
2606 */
2607 hash_del(&mm_slot->slot.hash);
2608 list_del(&mm_slot->slot.mm_node);
2609 spin_unlock(&ksm_mmlist_lock);
2610
2611 mm_slot_free(mm_slot_cache, mm_slot);
2612 clear_bit(MMF_VM_MERGEABLE, &mm->flags);
2613 clear_bit(MMF_VM_MERGE_ANY, &mm->flags);
2614 mmap_read_unlock(mm);
2615 mmdrop(mm);
2616 } else {
2617 mmap_read_unlock(mm);
2618 /*
2619 * mmap_read_unlock(mm) first because after
2620 * spin_unlock(&ksm_mmlist_lock) run, the "mm" may
2621 * already have been freed under us by __ksm_exit()
2622 * because the "mm_slot" is still hashed and
2623 * ksm_scan.mm_slot doesn't point to it anymore.
2624 */
2625 spin_unlock(&ksm_mmlist_lock);
2626 }
2627
2628 /* Repeat until we've completed scanning the whole list */
2629 mm_slot = ksm_scan.mm_slot;
2630 if (mm_slot != &ksm_mm_head)
2631 goto next_mm;
2632
2633 advisor_stop_scan();
2634
2635 trace_ksm_stop_scan(ksm_scan.seqnr, ksm_rmap_items);
2636 ksm_scan.seqnr++;
2637 return NULL;
2638}
2639
2640/**
2641 * ksm_do_scan - the ksm scanner main worker function.
2642 * @scan_npages: number of pages we want to scan before we return.
2643 */
2644static void ksm_do_scan(unsigned int scan_npages)
2645{
2646 struct ksm_rmap_item *rmap_item;
2647 struct page *page;
2648
2649 while (scan_npages-- && likely(!freezing(current))) {
2650 cond_resched();
2651 rmap_item = scan_get_next_rmap_item(&page);
2652 if (!rmap_item)
2653 return;
2654 cmp_and_merge_page(page, rmap_item);
2655 put_page(page);
2656 ksm_pages_scanned++;
2657 }
2658}
2659
2660static int ksmd_should_run(void)
2661{
2662 return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.slot.mm_node);
2663}
2664
2665static int ksm_scan_thread(void *nothing)
2666{
2667 unsigned int sleep_ms;
2668
2669 set_freezable();
2670 set_user_nice(current, 5);
2671
2672 while (!kthread_should_stop()) {
2673 mutex_lock(&ksm_thread_mutex);
2674 wait_while_offlining();
2675 if (ksmd_should_run())
2676 ksm_do_scan(ksm_thread_pages_to_scan);
2677 mutex_unlock(&ksm_thread_mutex);
2678
2679 if (ksmd_should_run()) {
2680 sleep_ms = READ_ONCE(ksm_thread_sleep_millisecs);
2681 wait_event_freezable_timeout(ksm_iter_wait,
2682 sleep_ms != READ_ONCE(ksm_thread_sleep_millisecs),
2683 msecs_to_jiffies(sleep_ms));
2684 } else {
2685 wait_event_freezable(ksm_thread_wait,
2686 ksmd_should_run() || kthread_should_stop());
2687 }
2688 }
2689 return 0;
2690}
2691
2692static void __ksm_add_vma(struct vm_area_struct *vma)
2693{
2694 unsigned long vm_flags = vma->vm_flags;
2695
2696 if (vm_flags & VM_MERGEABLE)
2697 return;
2698
2699 if (vma_ksm_compatible(vma))
2700 vm_flags_set(vma, VM_MERGEABLE);
2701}
2702
2703static int __ksm_del_vma(struct vm_area_struct *vma)
2704{
2705 int err;
2706
2707 if (!(vma->vm_flags & VM_MERGEABLE))
2708 return 0;
2709
2710 if (vma->anon_vma) {
2711 err = unmerge_ksm_pages(vma, vma->vm_start, vma->vm_end, true);
2712 if (err)
2713 return err;
2714 }
2715
2716 vm_flags_clear(vma, VM_MERGEABLE);
2717 return 0;
2718}
2719/**
2720 * ksm_add_vma - Mark vma as mergeable if compatible
2721 *
2722 * @vma: Pointer to vma
2723 */
2724void ksm_add_vma(struct vm_area_struct *vma)
2725{
2726 struct mm_struct *mm = vma->vm_mm;
2727
2728 if (test_bit(MMF_VM_MERGE_ANY, &mm->flags))
2729 __ksm_add_vma(vma);
2730}
2731
2732static void ksm_add_vmas(struct mm_struct *mm)
2733{
2734 struct vm_area_struct *vma;
2735
2736 VMA_ITERATOR(vmi, mm, 0);
2737 for_each_vma(vmi, vma)
2738 __ksm_add_vma(vma);
2739}
2740
2741static int ksm_del_vmas(struct mm_struct *mm)
2742{
2743 struct vm_area_struct *vma;
2744 int err;
2745
2746 VMA_ITERATOR(vmi, mm, 0);
2747 for_each_vma(vmi, vma) {
2748 err = __ksm_del_vma(vma);
2749 if (err)
2750 return err;
2751 }
2752 return 0;
2753}
2754
2755/**
2756 * ksm_enable_merge_any - Add mm to mm ksm list and enable merging on all
2757 * compatible VMA's
2758 *
2759 * @mm: Pointer to mm
2760 *
2761 * Returns 0 on success, otherwise error code
2762 */
2763int ksm_enable_merge_any(struct mm_struct *mm)
2764{
2765 int err;
2766
2767 if (test_bit(MMF_VM_MERGE_ANY, &mm->flags))
2768 return 0;
2769
2770 if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) {
2771 err = __ksm_enter(mm);
2772 if (err)
2773 return err;
2774 }
2775
2776 set_bit(MMF_VM_MERGE_ANY, &mm->flags);
2777 ksm_add_vmas(mm);
2778
2779 return 0;
2780}
2781
2782/**
2783 * ksm_disable_merge_any - Disable merging on all compatible VMA's of the mm,
2784 * previously enabled via ksm_enable_merge_any().
2785 *
2786 * Disabling merging implies unmerging any merged pages, like setting
2787 * MADV_UNMERGEABLE would. If unmerging fails, the whole operation fails and
2788 * merging on all compatible VMA's remains enabled.
2789 *
2790 * @mm: Pointer to mm
2791 *
2792 * Returns 0 on success, otherwise error code
2793 */
2794int ksm_disable_merge_any(struct mm_struct *mm)
2795{
2796 int err;
2797
2798 if (!test_bit(MMF_VM_MERGE_ANY, &mm->flags))
2799 return 0;
2800
2801 err = ksm_del_vmas(mm);
2802 if (err) {
2803 ksm_add_vmas(mm);
2804 return err;
2805 }
2806
2807 clear_bit(MMF_VM_MERGE_ANY, &mm->flags);
2808 return 0;
2809}
2810
2811int ksm_disable(struct mm_struct *mm)
2812{
2813 mmap_assert_write_locked(mm);
2814
2815 if (!test_bit(MMF_VM_MERGEABLE, &mm->flags))
2816 return 0;
2817 if (test_bit(MMF_VM_MERGE_ANY, &mm->flags))
2818 return ksm_disable_merge_any(mm);
2819 return ksm_del_vmas(mm);
2820}
2821
2822int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
2823 unsigned long end, int advice, unsigned long *vm_flags)
2824{
2825 struct mm_struct *mm = vma->vm_mm;
2826 int err;
2827
2828 switch (advice) {
2829 case MADV_MERGEABLE:
2830 if (vma->vm_flags & VM_MERGEABLE)
2831 return 0;
2832 if (!vma_ksm_compatible(vma))
2833 return 0;
2834
2835 if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) {
2836 err = __ksm_enter(mm);
2837 if (err)
2838 return err;
2839 }
2840
2841 *vm_flags |= VM_MERGEABLE;
2842 break;
2843
2844 case MADV_UNMERGEABLE:
2845 if (!(*vm_flags & VM_MERGEABLE))
2846 return 0; /* just ignore the advice */
2847
2848 if (vma->anon_vma) {
2849 err = unmerge_ksm_pages(vma, start, end, true);
2850 if (err)
2851 return err;
2852 }
2853
2854 *vm_flags &= ~VM_MERGEABLE;
2855 break;
2856 }
2857
2858 return 0;
2859}
2860EXPORT_SYMBOL_GPL(ksm_madvise);
2861
2862int __ksm_enter(struct mm_struct *mm)
2863{
2864 struct ksm_mm_slot *mm_slot;
2865 struct mm_slot *slot;
2866 int needs_wakeup;
2867
2868 mm_slot = mm_slot_alloc(mm_slot_cache);
2869 if (!mm_slot)
2870 return -ENOMEM;
2871
2872 slot = &mm_slot->slot;
2873
2874 /* Check ksm_run too? Would need tighter locking */
2875 needs_wakeup = list_empty(&ksm_mm_head.slot.mm_node);
2876
2877 spin_lock(&ksm_mmlist_lock);
2878 mm_slot_insert(mm_slots_hash, mm, slot);
2879 /*
2880 * When KSM_RUN_MERGE (or KSM_RUN_STOP),
2881 * insert just behind the scanning cursor, to let the area settle
2882 * down a little; when fork is followed by immediate exec, we don't
2883 * want ksmd to waste time setting up and tearing down an rmap_list.
2884 *
2885 * But when KSM_RUN_UNMERGE, it's important to insert ahead of its
2886 * scanning cursor, otherwise KSM pages in newly forked mms will be
2887 * missed: then we might as well insert at the end of the list.
2888 */
2889 if (ksm_run & KSM_RUN_UNMERGE)
2890 list_add_tail(&slot->mm_node, &ksm_mm_head.slot.mm_node);
2891 else
2892 list_add_tail(&slot->mm_node, &ksm_scan.mm_slot->slot.mm_node);
2893 spin_unlock(&ksm_mmlist_lock);
2894
2895 set_bit(MMF_VM_MERGEABLE, &mm->flags);
2896 mmgrab(mm);
2897
2898 if (needs_wakeup)
2899 wake_up_interruptible(&ksm_thread_wait);
2900
2901 trace_ksm_enter(mm);
2902 return 0;
2903}
2904
2905void __ksm_exit(struct mm_struct *mm)
2906{
2907 struct ksm_mm_slot *mm_slot;
2908 struct mm_slot *slot;
2909 int easy_to_free = 0;
2910
2911 /*
2912 * This process is exiting: if it's straightforward (as is the
2913 * case when ksmd was never running), free mm_slot immediately.
2914 * But if it's at the cursor or has rmap_items linked to it, use
2915 * mmap_lock to synchronize with any break_cows before pagetables
2916 * are freed, and leave the mm_slot on the list for ksmd to free.
2917 * Beware: ksm may already have noticed it exiting and freed the slot.
2918 */
2919
2920 spin_lock(&ksm_mmlist_lock);
2921 slot = mm_slot_lookup(mm_slots_hash, mm);
2922 mm_slot = mm_slot_entry(slot, struct ksm_mm_slot, slot);
2923 if (mm_slot && ksm_scan.mm_slot != mm_slot) {
2924 if (!mm_slot->rmap_list) {
2925 hash_del(&slot->hash);
2926 list_del(&slot->mm_node);
2927 easy_to_free = 1;
2928 } else {
2929 list_move(&slot->mm_node,
2930 &ksm_scan.mm_slot->slot.mm_node);
2931 }
2932 }
2933 spin_unlock(&ksm_mmlist_lock);
2934
2935 if (easy_to_free) {
2936 mm_slot_free(mm_slot_cache, mm_slot);
2937 clear_bit(MMF_VM_MERGE_ANY, &mm->flags);
2938 clear_bit(MMF_VM_MERGEABLE, &mm->flags);
2939 mmdrop(mm);
2940 } else if (mm_slot) {
2941 mmap_write_lock(mm);
2942 mmap_write_unlock(mm);
2943 }
2944
2945 trace_ksm_exit(mm);
2946}
2947
2948struct folio *ksm_might_need_to_copy(struct folio *folio,
2949 struct vm_area_struct *vma, unsigned long addr)
2950{
2951 struct page *page = folio_page(folio, 0);
2952 struct anon_vma *anon_vma = folio_anon_vma(folio);
2953 struct folio *new_folio;
2954
2955 if (folio_test_large(folio))
2956 return folio;
2957
2958 if (folio_test_ksm(folio)) {
2959 if (folio_stable_node(folio) &&
2960 !(ksm_run & KSM_RUN_UNMERGE))
2961 return folio; /* no need to copy it */
2962 } else if (!anon_vma) {
2963 return folio; /* no need to copy it */
2964 } else if (folio->index == linear_page_index(vma, addr) &&
2965 anon_vma->root == vma->anon_vma->root) {
2966 return folio; /* still no need to copy it */
2967 }
2968 if (PageHWPoison(page))
2969 return ERR_PTR(-EHWPOISON);
2970 if (!folio_test_uptodate(folio))
2971 return folio; /* let do_swap_page report the error */
2972
2973 new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr);
2974 if (new_folio &&
2975 mem_cgroup_charge(new_folio, vma->vm_mm, GFP_KERNEL)) {
2976 folio_put(new_folio);
2977 new_folio = NULL;
2978 }
2979 if (new_folio) {
2980 if (copy_mc_user_highpage(folio_page(new_folio, 0), page,
2981 addr, vma)) {
2982 folio_put(new_folio);
2983 return ERR_PTR(-EHWPOISON);
2984 }
2985 folio_set_dirty(new_folio);
2986 __folio_mark_uptodate(new_folio);
2987 __folio_set_locked(new_folio);
2988#ifdef CONFIG_SWAP
2989 count_vm_event(KSM_SWPIN_COPY);
2990#endif
2991 }
2992
2993 return new_folio;
2994}
2995
2996void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc)
2997{
2998 struct ksm_stable_node *stable_node;
2999 struct ksm_rmap_item *rmap_item;
3000 int search_new_forks = 0;
3001
3002 VM_BUG_ON_FOLIO(!folio_test_ksm(folio), folio);
3003
3004 /*
3005 * Rely on the page lock to protect against concurrent modifications
3006 * to that page's node of the stable tree.
3007 */
3008 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
3009
3010 stable_node = folio_stable_node(folio);
3011 if (!stable_node)
3012 return;
3013again:
3014 hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
3015 struct anon_vma *anon_vma = rmap_item->anon_vma;
3016 struct anon_vma_chain *vmac;
3017 struct vm_area_struct *vma;
3018
3019 cond_resched();
3020 if (!anon_vma_trylock_read(anon_vma)) {
3021 if (rwc->try_lock) {
3022 rwc->contended = true;
3023 return;
3024 }
3025 anon_vma_lock_read(anon_vma);
3026 }
3027 anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root,
3028 0, ULONG_MAX) {
3029 unsigned long addr;
3030
3031 cond_resched();
3032 vma = vmac->vma;
3033
3034 /* Ignore the stable/unstable/sqnr flags */
3035 addr = rmap_item->address & PAGE_MASK;
3036
3037 if (addr < vma->vm_start || addr >= vma->vm_end)
3038 continue;
3039 /*
3040 * Initially we examine only the vma which covers this
3041 * rmap_item; but later, if there is still work to do,
3042 * we examine covering vmas in other mms: in case they
3043 * were forked from the original since ksmd passed.
3044 */
3045 if ((rmap_item->mm == vma->vm_mm) == search_new_forks)
3046 continue;
3047
3048 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
3049 continue;
3050
3051 if (!rwc->rmap_one(folio, vma, addr, rwc->arg)) {
3052 anon_vma_unlock_read(anon_vma);
3053 return;
3054 }
3055 if (rwc->done && rwc->done(folio)) {
3056 anon_vma_unlock_read(anon_vma);
3057 return;
3058 }
3059 }
3060 anon_vma_unlock_read(anon_vma);
3061 }
3062 if (!search_new_forks++)
3063 goto again;
3064}
3065
3066#ifdef CONFIG_MEMORY_FAILURE
3067/*
3068 * Collect processes when the error hit an ksm page.
3069 */
3070void collect_procs_ksm(const struct folio *folio, const struct page *page,
3071 struct list_head *to_kill, int force_early)
3072{
3073 struct ksm_stable_node *stable_node;
3074 struct ksm_rmap_item *rmap_item;
3075 struct vm_area_struct *vma;
3076 struct task_struct *tsk;
3077
3078 stable_node = folio_stable_node(folio);
3079 if (!stable_node)
3080 return;
3081 hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
3082 struct anon_vma *av = rmap_item->anon_vma;
3083
3084 anon_vma_lock_read(av);
3085 rcu_read_lock();
3086 for_each_process(tsk) {
3087 struct anon_vma_chain *vmac;
3088 unsigned long addr;
3089 struct task_struct *t =
3090 task_early_kill(tsk, force_early);
3091 if (!t)
3092 continue;
3093 anon_vma_interval_tree_foreach(vmac, &av->rb_root, 0,
3094 ULONG_MAX)
3095 {
3096 vma = vmac->vma;
3097 if (vma->vm_mm == t->mm) {
3098 addr = rmap_item->address & PAGE_MASK;
3099 add_to_kill_ksm(t, page, vma, to_kill,
3100 addr);
3101 }
3102 }
3103 }
3104 rcu_read_unlock();
3105 anon_vma_unlock_read(av);
3106 }
3107}
3108#endif
3109
3110#ifdef CONFIG_MIGRATION
3111void folio_migrate_ksm(struct folio *newfolio, struct folio *folio)
3112{
3113 struct ksm_stable_node *stable_node;
3114
3115 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
3116 VM_BUG_ON_FOLIO(!folio_test_locked(newfolio), newfolio);
3117 VM_BUG_ON_FOLIO(newfolio->mapping != folio->mapping, newfolio);
3118
3119 stable_node = folio_stable_node(folio);
3120 if (stable_node) {
3121 VM_BUG_ON_FOLIO(stable_node->kpfn != folio_pfn(folio), folio);
3122 stable_node->kpfn = folio_pfn(newfolio);
3123 /*
3124 * newfolio->mapping was set in advance; now we need smp_wmb()
3125 * to make sure that the new stable_node->kpfn is visible
3126 * to ksm_get_folio() before it can see that folio->mapping
3127 * has gone stale (or that the swapcache flag has been cleared).
3128 */
3129 smp_wmb();
3130 folio_set_stable_node(folio, NULL);
3131 }
3132}
3133#endif /* CONFIG_MIGRATION */
3134
3135#ifdef CONFIG_MEMORY_HOTREMOVE
3136static void wait_while_offlining(void)
3137{
3138 while (ksm_run & KSM_RUN_OFFLINE) {
3139 mutex_unlock(&ksm_thread_mutex);
3140 wait_on_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE),
3141 TASK_UNINTERRUPTIBLE);
3142 mutex_lock(&ksm_thread_mutex);
3143 }
3144}
3145
3146static bool stable_node_dup_remove_range(struct ksm_stable_node *stable_node,
3147 unsigned long start_pfn,
3148 unsigned long end_pfn)
3149{
3150 if (stable_node->kpfn >= start_pfn &&
3151 stable_node->kpfn < end_pfn) {
3152 /*
3153 * Don't ksm_get_folio, page has already gone:
3154 * which is why we keep kpfn instead of page*
3155 */
3156 remove_node_from_stable_tree(stable_node);
3157 return true;
3158 }
3159 return false;
3160}
3161
3162static bool stable_node_chain_remove_range(struct ksm_stable_node *stable_node,
3163 unsigned long start_pfn,
3164 unsigned long end_pfn,
3165 struct rb_root *root)
3166{
3167 struct ksm_stable_node *dup;
3168 struct hlist_node *hlist_safe;
3169
3170 if (!is_stable_node_chain(stable_node)) {
3171 VM_BUG_ON(is_stable_node_dup(stable_node));
3172 return stable_node_dup_remove_range(stable_node, start_pfn,
3173 end_pfn);
3174 }
3175
3176 hlist_for_each_entry_safe(dup, hlist_safe,
3177 &stable_node->hlist, hlist_dup) {
3178 VM_BUG_ON(!is_stable_node_dup(dup));
3179 stable_node_dup_remove_range(dup, start_pfn, end_pfn);
3180 }
3181 if (hlist_empty(&stable_node->hlist)) {
3182 free_stable_node_chain(stable_node, root);
3183 return true; /* notify caller that tree was rebalanced */
3184 } else
3185 return false;
3186}
3187
3188static void ksm_check_stable_tree(unsigned long start_pfn,
3189 unsigned long end_pfn)
3190{
3191 struct ksm_stable_node *stable_node, *next;
3192 struct rb_node *node;
3193 int nid;
3194
3195 for (nid = 0; nid < ksm_nr_node_ids; nid++) {
3196 node = rb_first(root_stable_tree + nid);
3197 while (node) {
3198 stable_node = rb_entry(node, struct ksm_stable_node, node);
3199 if (stable_node_chain_remove_range(stable_node,
3200 start_pfn, end_pfn,
3201 root_stable_tree +
3202 nid))
3203 node = rb_first(root_stable_tree + nid);
3204 else
3205 node = rb_next(node);
3206 cond_resched();
3207 }
3208 }
3209 list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) {
3210 if (stable_node->kpfn >= start_pfn &&
3211 stable_node->kpfn < end_pfn)
3212 remove_node_from_stable_tree(stable_node);
3213 cond_resched();
3214 }
3215}
3216
3217static int ksm_memory_callback(struct notifier_block *self,
3218 unsigned long action, void *arg)
3219{
3220 struct memory_notify *mn = arg;
3221
3222 switch (action) {
3223 case MEM_GOING_OFFLINE:
3224 /*
3225 * Prevent ksm_do_scan(), unmerge_and_remove_all_rmap_items()
3226 * and remove_all_stable_nodes() while memory is going offline:
3227 * it is unsafe for them to touch the stable tree at this time.
3228 * But unmerge_ksm_pages(), rmap lookups and other entry points
3229 * which do not need the ksm_thread_mutex are all safe.
3230 */
3231 mutex_lock(&ksm_thread_mutex);
3232 ksm_run |= KSM_RUN_OFFLINE;
3233 mutex_unlock(&ksm_thread_mutex);
3234 break;
3235
3236 case MEM_OFFLINE:
3237 /*
3238 * Most of the work is done by page migration; but there might
3239 * be a few stable_nodes left over, still pointing to struct
3240 * pages which have been offlined: prune those from the tree,
3241 * otherwise ksm_get_folio() might later try to access a
3242 * non-existent struct page.
3243 */
3244 ksm_check_stable_tree(mn->start_pfn,
3245 mn->start_pfn + mn->nr_pages);
3246 fallthrough;
3247 case MEM_CANCEL_OFFLINE:
3248 mutex_lock(&ksm_thread_mutex);
3249 ksm_run &= ~KSM_RUN_OFFLINE;
3250 mutex_unlock(&ksm_thread_mutex);
3251
3252 smp_mb(); /* wake_up_bit advises this */
3253 wake_up_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE));
3254 break;
3255 }
3256 return NOTIFY_OK;
3257}
3258#else
3259static void wait_while_offlining(void)
3260{
3261}
3262#endif /* CONFIG_MEMORY_HOTREMOVE */
3263
3264#ifdef CONFIG_PROC_FS
3265long ksm_process_profit(struct mm_struct *mm)
3266{
3267 return (long)(mm->ksm_merging_pages + mm_ksm_zero_pages(mm)) * PAGE_SIZE -
3268 mm->ksm_rmap_items * sizeof(struct ksm_rmap_item);
3269}
3270#endif /* CONFIG_PROC_FS */
3271
3272#ifdef CONFIG_SYSFS
3273/*
3274 * This all compiles without CONFIG_SYSFS, but is a waste of space.
3275 */
3276
3277#define KSM_ATTR_RO(_name) \
3278 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
3279#define KSM_ATTR(_name) \
3280 static struct kobj_attribute _name##_attr = __ATTR_RW(_name)
3281
3282static ssize_t sleep_millisecs_show(struct kobject *kobj,
3283 struct kobj_attribute *attr, char *buf)
3284{
3285 return sysfs_emit(buf, "%u\n", ksm_thread_sleep_millisecs);
3286}
3287
3288static ssize_t sleep_millisecs_store(struct kobject *kobj,
3289 struct kobj_attribute *attr,
3290 const char *buf, size_t count)
3291{
3292 unsigned int msecs;
3293 int err;
3294
3295 err = kstrtouint(buf, 10, &msecs);
3296 if (err)
3297 return -EINVAL;
3298
3299 ksm_thread_sleep_millisecs = msecs;
3300 wake_up_interruptible(&ksm_iter_wait);
3301
3302 return count;
3303}
3304KSM_ATTR(sleep_millisecs);
3305
3306static ssize_t pages_to_scan_show(struct kobject *kobj,
3307 struct kobj_attribute *attr, char *buf)
3308{
3309 return sysfs_emit(buf, "%u\n", ksm_thread_pages_to_scan);
3310}
3311
3312static ssize_t pages_to_scan_store(struct kobject *kobj,
3313 struct kobj_attribute *attr,
3314 const char *buf, size_t count)
3315{
3316 unsigned int nr_pages;
3317 int err;
3318
3319 if (ksm_advisor != KSM_ADVISOR_NONE)
3320 return -EINVAL;
3321
3322 err = kstrtouint(buf, 10, &nr_pages);
3323 if (err)
3324 return -EINVAL;
3325
3326 ksm_thread_pages_to_scan = nr_pages;
3327
3328 return count;
3329}
3330KSM_ATTR(pages_to_scan);
3331
3332static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr,
3333 char *buf)
3334{
3335 return sysfs_emit(buf, "%lu\n", ksm_run);
3336}
3337
3338static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr,
3339 const char *buf, size_t count)
3340{
3341 unsigned int flags;
3342 int err;
3343
3344 err = kstrtouint(buf, 10, &flags);
3345 if (err)
3346 return -EINVAL;
3347 if (flags > KSM_RUN_UNMERGE)
3348 return -EINVAL;
3349
3350 /*
3351 * KSM_RUN_MERGE sets ksmd running, and 0 stops it running.
3352 * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items,
3353 * breaking COW to free the pages_shared (but leaves mm_slots
3354 * on the list for when ksmd may be set running again).
3355 */
3356
3357 mutex_lock(&ksm_thread_mutex);
3358 wait_while_offlining();
3359 if (ksm_run != flags) {
3360 ksm_run = flags;
3361 if (flags & KSM_RUN_UNMERGE) {
3362 set_current_oom_origin();
3363 err = unmerge_and_remove_all_rmap_items();
3364 clear_current_oom_origin();
3365 if (err) {
3366 ksm_run = KSM_RUN_STOP;
3367 count = err;
3368 }
3369 }
3370 }
3371 mutex_unlock(&ksm_thread_mutex);
3372
3373 if (flags & KSM_RUN_MERGE)
3374 wake_up_interruptible(&ksm_thread_wait);
3375
3376 return count;
3377}
3378KSM_ATTR(run);
3379
3380#ifdef CONFIG_NUMA
3381static ssize_t merge_across_nodes_show(struct kobject *kobj,
3382 struct kobj_attribute *attr, char *buf)
3383{
3384 return sysfs_emit(buf, "%u\n", ksm_merge_across_nodes);
3385}
3386
3387static ssize_t merge_across_nodes_store(struct kobject *kobj,
3388 struct kobj_attribute *attr,
3389 const char *buf, size_t count)
3390{
3391 int err;
3392 unsigned long knob;
3393
3394 err = kstrtoul(buf, 10, &knob);
3395 if (err)
3396 return err;
3397 if (knob > 1)
3398 return -EINVAL;
3399
3400 mutex_lock(&ksm_thread_mutex);
3401 wait_while_offlining();
3402 if (ksm_merge_across_nodes != knob) {
3403 if (ksm_pages_shared || remove_all_stable_nodes())
3404 err = -EBUSY;
3405 else if (root_stable_tree == one_stable_tree) {
3406 struct rb_root *buf;
3407 /*
3408 * This is the first time that we switch away from the
3409 * default of merging across nodes: must now allocate
3410 * a buffer to hold as many roots as may be needed.
3411 * Allocate stable and unstable together:
3412 * MAXSMP NODES_SHIFT 10 will use 16kB.
3413 */
3414 buf = kcalloc(nr_node_ids + nr_node_ids, sizeof(*buf),
3415 GFP_KERNEL);
3416 /* Let us assume that RB_ROOT is NULL is zero */
3417 if (!buf)
3418 err = -ENOMEM;
3419 else {
3420 root_stable_tree = buf;
3421 root_unstable_tree = buf + nr_node_ids;
3422 /* Stable tree is empty but not the unstable */
3423 root_unstable_tree[0] = one_unstable_tree[0];
3424 }
3425 }
3426 if (!err) {
3427 ksm_merge_across_nodes = knob;
3428 ksm_nr_node_ids = knob ? 1 : nr_node_ids;
3429 }
3430 }
3431 mutex_unlock(&ksm_thread_mutex);
3432
3433 return err ? err : count;
3434}
3435KSM_ATTR(merge_across_nodes);
3436#endif
3437
3438static ssize_t use_zero_pages_show(struct kobject *kobj,
3439 struct kobj_attribute *attr, char *buf)
3440{
3441 return sysfs_emit(buf, "%u\n", ksm_use_zero_pages);
3442}
3443static ssize_t use_zero_pages_store(struct kobject *kobj,
3444 struct kobj_attribute *attr,
3445 const char *buf, size_t count)
3446{
3447 int err;
3448 bool value;
3449
3450 err = kstrtobool(buf, &value);
3451 if (err)
3452 return -EINVAL;
3453
3454 ksm_use_zero_pages = value;
3455
3456 return count;
3457}
3458KSM_ATTR(use_zero_pages);
3459
3460static ssize_t max_page_sharing_show(struct kobject *kobj,
3461 struct kobj_attribute *attr, char *buf)
3462{
3463 return sysfs_emit(buf, "%u\n", ksm_max_page_sharing);
3464}
3465
3466static ssize_t max_page_sharing_store(struct kobject *kobj,
3467 struct kobj_attribute *attr,
3468 const char *buf, size_t count)
3469{
3470 int err;
3471 int knob;
3472
3473 err = kstrtoint(buf, 10, &knob);
3474 if (err)
3475 return err;
3476 /*
3477 * When a KSM page is created it is shared by 2 mappings. This
3478 * being a signed comparison, it implicitly verifies it's not
3479 * negative.
3480 */
3481 if (knob < 2)
3482 return -EINVAL;
3483
3484 if (READ_ONCE(ksm_max_page_sharing) == knob)
3485 return count;
3486
3487 mutex_lock(&ksm_thread_mutex);
3488 wait_while_offlining();
3489 if (ksm_max_page_sharing != knob) {
3490 if (ksm_pages_shared || remove_all_stable_nodes())
3491 err = -EBUSY;
3492 else
3493 ksm_max_page_sharing = knob;
3494 }
3495 mutex_unlock(&ksm_thread_mutex);
3496
3497 return err ? err : count;
3498}
3499KSM_ATTR(max_page_sharing);
3500
3501static ssize_t pages_scanned_show(struct kobject *kobj,
3502 struct kobj_attribute *attr, char *buf)
3503{
3504 return sysfs_emit(buf, "%lu\n", ksm_pages_scanned);
3505}
3506KSM_ATTR_RO(pages_scanned);
3507
3508static ssize_t pages_shared_show(struct kobject *kobj,
3509 struct kobj_attribute *attr, char *buf)
3510{
3511 return sysfs_emit(buf, "%lu\n", ksm_pages_shared);
3512}
3513KSM_ATTR_RO(pages_shared);
3514
3515static ssize_t pages_sharing_show(struct kobject *kobj,
3516 struct kobj_attribute *attr, char *buf)
3517{
3518 return sysfs_emit(buf, "%lu\n", ksm_pages_sharing);
3519}
3520KSM_ATTR_RO(pages_sharing);
3521
3522static ssize_t pages_unshared_show(struct kobject *kobj,
3523 struct kobj_attribute *attr, char *buf)
3524{
3525 return sysfs_emit(buf, "%lu\n", ksm_pages_unshared);
3526}
3527KSM_ATTR_RO(pages_unshared);
3528
3529static ssize_t pages_volatile_show(struct kobject *kobj,
3530 struct kobj_attribute *attr, char *buf)
3531{
3532 long ksm_pages_volatile;
3533
3534 ksm_pages_volatile = ksm_rmap_items - ksm_pages_shared
3535 - ksm_pages_sharing - ksm_pages_unshared;
3536 /*
3537 * It was not worth any locking to calculate that statistic,
3538 * but it might therefore sometimes be negative: conceal that.
3539 */
3540 if (ksm_pages_volatile < 0)
3541 ksm_pages_volatile = 0;
3542 return sysfs_emit(buf, "%ld\n", ksm_pages_volatile);
3543}
3544KSM_ATTR_RO(pages_volatile);
3545
3546static ssize_t pages_skipped_show(struct kobject *kobj,
3547 struct kobj_attribute *attr, char *buf)
3548{
3549 return sysfs_emit(buf, "%lu\n", ksm_pages_skipped);
3550}
3551KSM_ATTR_RO(pages_skipped);
3552
3553static ssize_t ksm_zero_pages_show(struct kobject *kobj,
3554 struct kobj_attribute *attr, char *buf)
3555{
3556 return sysfs_emit(buf, "%ld\n", atomic_long_read(&ksm_zero_pages));
3557}
3558KSM_ATTR_RO(ksm_zero_pages);
3559
3560static ssize_t general_profit_show(struct kobject *kobj,
3561 struct kobj_attribute *attr, char *buf)
3562{
3563 long general_profit;
3564
3565 general_profit = (ksm_pages_sharing + atomic_long_read(&ksm_zero_pages)) * PAGE_SIZE -
3566 ksm_rmap_items * sizeof(struct ksm_rmap_item);
3567
3568 return sysfs_emit(buf, "%ld\n", general_profit);
3569}
3570KSM_ATTR_RO(general_profit);
3571
3572static ssize_t stable_node_dups_show(struct kobject *kobj,
3573 struct kobj_attribute *attr, char *buf)
3574{
3575 return sysfs_emit(buf, "%lu\n", ksm_stable_node_dups);
3576}
3577KSM_ATTR_RO(stable_node_dups);
3578
3579static ssize_t stable_node_chains_show(struct kobject *kobj,
3580 struct kobj_attribute *attr, char *buf)
3581{
3582 return sysfs_emit(buf, "%lu\n", ksm_stable_node_chains);
3583}
3584KSM_ATTR_RO(stable_node_chains);
3585
3586static ssize_t
3587stable_node_chains_prune_millisecs_show(struct kobject *kobj,
3588 struct kobj_attribute *attr,
3589 char *buf)
3590{
3591 return sysfs_emit(buf, "%u\n", ksm_stable_node_chains_prune_millisecs);
3592}
3593
3594static ssize_t
3595stable_node_chains_prune_millisecs_store(struct kobject *kobj,
3596 struct kobj_attribute *attr,
3597 const char *buf, size_t count)
3598{
3599 unsigned int msecs;
3600 int err;
3601
3602 err = kstrtouint(buf, 10, &msecs);
3603 if (err)
3604 return -EINVAL;
3605
3606 ksm_stable_node_chains_prune_millisecs = msecs;
3607
3608 return count;
3609}
3610KSM_ATTR(stable_node_chains_prune_millisecs);
3611
3612static ssize_t full_scans_show(struct kobject *kobj,
3613 struct kobj_attribute *attr, char *buf)
3614{
3615 return sysfs_emit(buf, "%lu\n", ksm_scan.seqnr);
3616}
3617KSM_ATTR_RO(full_scans);
3618
3619static ssize_t smart_scan_show(struct kobject *kobj,
3620 struct kobj_attribute *attr, char *buf)
3621{
3622 return sysfs_emit(buf, "%u\n", ksm_smart_scan);
3623}
3624
3625static ssize_t smart_scan_store(struct kobject *kobj,
3626 struct kobj_attribute *attr,
3627 const char *buf, size_t count)
3628{
3629 int err;
3630 bool value;
3631
3632 err = kstrtobool(buf, &value);
3633 if (err)
3634 return -EINVAL;
3635
3636 ksm_smart_scan = value;
3637 return count;
3638}
3639KSM_ATTR(smart_scan);
3640
3641static ssize_t advisor_mode_show(struct kobject *kobj,
3642 struct kobj_attribute *attr, char *buf)
3643{
3644 const char *output;
3645
3646 if (ksm_advisor == KSM_ADVISOR_NONE)
3647 output = "[none] scan-time";
3648 else if (ksm_advisor == KSM_ADVISOR_SCAN_TIME)
3649 output = "none [scan-time]";
3650
3651 return sysfs_emit(buf, "%s\n", output);
3652}
3653
3654static ssize_t advisor_mode_store(struct kobject *kobj,
3655 struct kobj_attribute *attr, const char *buf,
3656 size_t count)
3657{
3658 enum ksm_advisor_type curr_advisor = ksm_advisor;
3659
3660 if (sysfs_streq("scan-time", buf))
3661 ksm_advisor = KSM_ADVISOR_SCAN_TIME;
3662 else if (sysfs_streq("none", buf))
3663 ksm_advisor = KSM_ADVISOR_NONE;
3664 else
3665 return -EINVAL;
3666
3667 /* Set advisor default values */
3668 if (curr_advisor != ksm_advisor)
3669 set_advisor_defaults();
3670
3671 return count;
3672}
3673KSM_ATTR(advisor_mode);
3674
3675static ssize_t advisor_max_cpu_show(struct kobject *kobj,
3676 struct kobj_attribute *attr, char *buf)
3677{
3678 return sysfs_emit(buf, "%u\n", ksm_advisor_max_cpu);
3679}
3680
3681static ssize_t advisor_max_cpu_store(struct kobject *kobj,
3682 struct kobj_attribute *attr,
3683 const char *buf, size_t count)
3684{
3685 int err;
3686 unsigned long value;
3687
3688 err = kstrtoul(buf, 10, &value);
3689 if (err)
3690 return -EINVAL;
3691
3692 ksm_advisor_max_cpu = value;
3693 return count;
3694}
3695KSM_ATTR(advisor_max_cpu);
3696
3697static ssize_t advisor_min_pages_to_scan_show(struct kobject *kobj,
3698 struct kobj_attribute *attr, char *buf)
3699{
3700 return sysfs_emit(buf, "%lu\n", ksm_advisor_min_pages_to_scan);
3701}
3702
3703static ssize_t advisor_min_pages_to_scan_store(struct kobject *kobj,
3704 struct kobj_attribute *attr,
3705 const char *buf, size_t count)
3706{
3707 int err;
3708 unsigned long value;
3709
3710 err = kstrtoul(buf, 10, &value);
3711 if (err)
3712 return -EINVAL;
3713
3714 ksm_advisor_min_pages_to_scan = value;
3715 return count;
3716}
3717KSM_ATTR(advisor_min_pages_to_scan);
3718
3719static ssize_t advisor_max_pages_to_scan_show(struct kobject *kobj,
3720 struct kobj_attribute *attr, char *buf)
3721{
3722 return sysfs_emit(buf, "%lu\n", ksm_advisor_max_pages_to_scan);
3723}
3724
3725static ssize_t advisor_max_pages_to_scan_store(struct kobject *kobj,
3726 struct kobj_attribute *attr,
3727 const char *buf, size_t count)
3728{
3729 int err;
3730 unsigned long value;
3731
3732 err = kstrtoul(buf, 10, &value);
3733 if (err)
3734 return -EINVAL;
3735
3736 ksm_advisor_max_pages_to_scan = value;
3737 return count;
3738}
3739KSM_ATTR(advisor_max_pages_to_scan);
3740
3741static ssize_t advisor_target_scan_time_show(struct kobject *kobj,
3742 struct kobj_attribute *attr, char *buf)
3743{
3744 return sysfs_emit(buf, "%lu\n", ksm_advisor_target_scan_time);
3745}
3746
3747static ssize_t advisor_target_scan_time_store(struct kobject *kobj,
3748 struct kobj_attribute *attr,
3749 const char *buf, size_t count)
3750{
3751 int err;
3752 unsigned long value;
3753
3754 err = kstrtoul(buf, 10, &value);
3755 if (err)
3756 return -EINVAL;
3757 if (value < 1)
3758 return -EINVAL;
3759
3760 ksm_advisor_target_scan_time = value;
3761 return count;
3762}
3763KSM_ATTR(advisor_target_scan_time);
3764
3765static struct attribute *ksm_attrs[] = {
3766 &sleep_millisecs_attr.attr,
3767 &pages_to_scan_attr.attr,
3768 &run_attr.attr,
3769 &pages_scanned_attr.attr,
3770 &pages_shared_attr.attr,
3771 &pages_sharing_attr.attr,
3772 &pages_unshared_attr.attr,
3773 &pages_volatile_attr.attr,
3774 &pages_skipped_attr.attr,
3775 &ksm_zero_pages_attr.attr,
3776 &full_scans_attr.attr,
3777#ifdef CONFIG_NUMA
3778 &merge_across_nodes_attr.attr,
3779#endif
3780 &max_page_sharing_attr.attr,
3781 &stable_node_chains_attr.attr,
3782 &stable_node_dups_attr.attr,
3783 &stable_node_chains_prune_millisecs_attr.attr,
3784 &use_zero_pages_attr.attr,
3785 &general_profit_attr.attr,
3786 &smart_scan_attr.attr,
3787 &advisor_mode_attr.attr,
3788 &advisor_max_cpu_attr.attr,
3789 &advisor_min_pages_to_scan_attr.attr,
3790 &advisor_max_pages_to_scan_attr.attr,
3791 &advisor_target_scan_time_attr.attr,
3792 NULL,
3793};
3794
3795static const struct attribute_group ksm_attr_group = {
3796 .attrs = ksm_attrs,
3797 .name = "ksm",
3798};
3799#endif /* CONFIG_SYSFS */
3800
3801static int __init ksm_init(void)
3802{
3803 struct task_struct *ksm_thread;
3804 int err;
3805
3806 /* The correct value depends on page size and endianness */
3807 zero_checksum = calc_checksum(ZERO_PAGE(0));
3808 /* Default to false for backwards compatibility */
3809 ksm_use_zero_pages = false;
3810
3811 err = ksm_slab_init();
3812 if (err)
3813 goto out;
3814
3815 ksm_thread = kthread_run(ksm_scan_thread, NULL, "ksmd");
3816 if (IS_ERR(ksm_thread)) {
3817 pr_err("ksm: creating kthread failed\n");
3818 err = PTR_ERR(ksm_thread);
3819 goto out_free;
3820 }
3821
3822#ifdef CONFIG_SYSFS
3823 err = sysfs_create_group(mm_kobj, &ksm_attr_group);
3824 if (err) {
3825 pr_err("ksm: register sysfs failed\n");
3826 kthread_stop(ksm_thread);
3827 goto out_free;
3828 }
3829#else
3830 ksm_run = KSM_RUN_MERGE; /* no way for user to start it */
3831
3832#endif /* CONFIG_SYSFS */
3833
3834#ifdef CONFIG_MEMORY_HOTREMOVE
3835 /* There is no significance to this priority 100 */
3836 hotplug_memory_notifier(ksm_memory_callback, KSM_CALLBACK_PRI);
3837#endif
3838 return 0;
3839
3840out_free:
3841 ksm_slab_free();
3842out:
3843 return err;
3844}
3845subsys_initcall(ksm_init);
1/*
2 * Memory merging support.
3 *
4 * This code enables dynamic sharing of identical pages found in different
5 * memory areas, even if they are not shared by fork()
6 *
7 * Copyright (C) 2008-2009 Red Hat, Inc.
8 * Authors:
9 * Izik Eidus
10 * Andrea Arcangeli
11 * Chris Wright
12 * Hugh Dickins
13 *
14 * This work is licensed under the terms of the GNU GPL, version 2.
15 */
16
17#include <linux/errno.h>
18#include <linux/mm.h>
19#include <linux/fs.h>
20#include <linux/mman.h>
21#include <linux/sched.h>
22#include <linux/rwsem.h>
23#include <linux/pagemap.h>
24#include <linux/rmap.h>
25#include <linux/spinlock.h>
26#include <linux/jhash.h>
27#include <linux/delay.h>
28#include <linux/kthread.h>
29#include <linux/wait.h>
30#include <linux/slab.h>
31#include <linux/rbtree.h>
32#include <linux/memory.h>
33#include <linux/mmu_notifier.h>
34#include <linux/swap.h>
35#include <linux/ksm.h>
36#include <linux/hashtable.h>
37#include <linux/freezer.h>
38#include <linux/oom.h>
39#include <linux/numa.h>
40
41#include <asm/tlbflush.h>
42#include "internal.h"
43
44#ifdef CONFIG_NUMA
45#define NUMA(x) (x)
46#define DO_NUMA(x) do { (x); } while (0)
47#else
48#define NUMA(x) (0)
49#define DO_NUMA(x) do { } while (0)
50#endif
51
52/*
53 * A few notes about the KSM scanning process,
54 * to make it easier to understand the data structures below:
55 *
56 * In order to reduce excessive scanning, KSM sorts the memory pages by their
57 * contents into a data structure that holds pointers to the pages' locations.
58 *
59 * Since the contents of the pages may change at any moment, KSM cannot just
60 * insert the pages into a normal sorted tree and expect it to find anything.
61 * Therefore KSM uses two data structures - the stable and the unstable tree.
62 *
63 * The stable tree holds pointers to all the merged pages (ksm pages), sorted
64 * by their contents. Because each such page is write-protected, searching on
65 * this tree is fully assured to be working (except when pages are unmapped),
66 * and therefore this tree is called the stable tree.
67 *
68 * In addition to the stable tree, KSM uses a second data structure called the
69 * unstable tree: this tree holds pointers to pages which have been found to
70 * be "unchanged for a period of time". The unstable tree sorts these pages
71 * by their contents, but since they are not write-protected, KSM cannot rely
72 * upon the unstable tree to work correctly - the unstable tree is liable to
73 * be corrupted as its contents are modified, and so it is called unstable.
74 *
75 * KSM solves this problem by several techniques:
76 *
77 * 1) The unstable tree is flushed every time KSM completes scanning all
78 * memory areas, and then the tree is rebuilt again from the beginning.
79 * 2) KSM will only insert into the unstable tree, pages whose hash value
80 * has not changed since the previous scan of all memory areas.
81 * 3) The unstable tree is a RedBlack Tree - so its balancing is based on the
82 * colors of the nodes and not on their contents, assuring that even when
83 * the tree gets "corrupted" it won't get out of balance, so scanning time
84 * remains the same (also, searching and inserting nodes in an rbtree uses
85 * the same algorithm, so we have no overhead when we flush and rebuild).
86 * 4) KSM never flushes the stable tree, which means that even if it were to
87 * take 10 attempts to find a page in the unstable tree, once it is found,
88 * it is secured in the stable tree. (When we scan a new page, we first
89 * compare it against the stable tree, and then against the unstable tree.)
90 *
91 * If the merge_across_nodes tunable is unset, then KSM maintains multiple
92 * stable trees and multiple unstable trees: one of each for each NUMA node.
93 */
94
95/**
96 * struct mm_slot - ksm information per mm that is being scanned
97 * @link: link to the mm_slots hash list
98 * @mm_list: link into the mm_slots list, rooted in ksm_mm_head
99 * @rmap_list: head for this mm_slot's singly-linked list of rmap_items
100 * @mm: the mm that this information is valid for
101 */
102struct mm_slot {
103 struct hlist_node link;
104 struct list_head mm_list;
105 struct rmap_item *rmap_list;
106 struct mm_struct *mm;
107};
108
109/**
110 * struct ksm_scan - cursor for scanning
111 * @mm_slot: the current mm_slot we are scanning
112 * @address: the next address inside that to be scanned
113 * @rmap_list: link to the next rmap to be scanned in the rmap_list
114 * @seqnr: count of completed full scans (needed when removing unstable node)
115 *
116 * There is only the one ksm_scan instance of this cursor structure.
117 */
118struct ksm_scan {
119 struct mm_slot *mm_slot;
120 unsigned long address;
121 struct rmap_item **rmap_list;
122 unsigned long seqnr;
123};
124
125/**
126 * struct stable_node - node of the stable rbtree
127 * @node: rb node of this ksm page in the stable tree
128 * @head: (overlaying parent) &migrate_nodes indicates temporarily on that list
129 * @list: linked into migrate_nodes, pending placement in the proper node tree
130 * @hlist: hlist head of rmap_items using this ksm page
131 * @kpfn: page frame number of this ksm page (perhaps temporarily on wrong nid)
132 * @nid: NUMA node id of stable tree in which linked (may not match kpfn)
133 */
134struct stable_node {
135 union {
136 struct rb_node node; /* when node of stable tree */
137 struct { /* when listed for migration */
138 struct list_head *head;
139 struct list_head list;
140 };
141 };
142 struct hlist_head hlist;
143 unsigned long kpfn;
144#ifdef CONFIG_NUMA
145 int nid;
146#endif
147};
148
149/**
150 * struct rmap_item - reverse mapping item for virtual addresses
151 * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list
152 * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree
153 * @nid: NUMA node id of unstable tree in which linked (may not match page)
154 * @mm: the memory structure this rmap_item is pointing into
155 * @address: the virtual address this rmap_item tracks (+ flags in low bits)
156 * @oldchecksum: previous checksum of the page at that virtual address
157 * @node: rb node of this rmap_item in the unstable tree
158 * @head: pointer to stable_node heading this list in the stable tree
159 * @hlist: link into hlist of rmap_items hanging off that stable_node
160 */
161struct rmap_item {
162 struct rmap_item *rmap_list;
163 union {
164 struct anon_vma *anon_vma; /* when stable */
165#ifdef CONFIG_NUMA
166 int nid; /* when node of unstable tree */
167#endif
168 };
169 struct mm_struct *mm;
170 unsigned long address; /* + low bits used for flags below */
171 unsigned int oldchecksum; /* when unstable */
172 union {
173 struct rb_node node; /* when node of unstable tree */
174 struct { /* when listed from stable tree */
175 struct stable_node *head;
176 struct hlist_node hlist;
177 };
178 };
179};
180
181#define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */
182#define UNSTABLE_FLAG 0x100 /* is a node of the unstable tree */
183#define STABLE_FLAG 0x200 /* is listed from the stable tree */
184
185/* The stable and unstable tree heads */
186static struct rb_root one_stable_tree[1] = { RB_ROOT };
187static struct rb_root one_unstable_tree[1] = { RB_ROOT };
188static struct rb_root *root_stable_tree = one_stable_tree;
189static struct rb_root *root_unstable_tree = one_unstable_tree;
190
191/* Recently migrated nodes of stable tree, pending proper placement */
192static LIST_HEAD(migrate_nodes);
193
194#define MM_SLOTS_HASH_BITS 10
195static DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
196
197static struct mm_slot ksm_mm_head = {
198 .mm_list = LIST_HEAD_INIT(ksm_mm_head.mm_list),
199};
200static struct ksm_scan ksm_scan = {
201 .mm_slot = &ksm_mm_head,
202};
203
204static struct kmem_cache *rmap_item_cache;
205static struct kmem_cache *stable_node_cache;
206static struct kmem_cache *mm_slot_cache;
207
208/* The number of nodes in the stable tree */
209static unsigned long ksm_pages_shared;
210
211/* The number of page slots additionally sharing those nodes */
212static unsigned long ksm_pages_sharing;
213
214/* The number of nodes in the unstable tree */
215static unsigned long ksm_pages_unshared;
216
217/* The number of rmap_items in use: to calculate pages_volatile */
218static unsigned long ksm_rmap_items;
219
220/* Number of pages ksmd should scan in one batch */
221static unsigned int ksm_thread_pages_to_scan = 100;
222
223/* Milliseconds ksmd should sleep between batches */
224static unsigned int ksm_thread_sleep_millisecs = 20;
225
226#ifdef CONFIG_NUMA
227/* Zeroed when merging across nodes is not allowed */
228static unsigned int ksm_merge_across_nodes = 1;
229static int ksm_nr_node_ids = 1;
230#else
231#define ksm_merge_across_nodes 1U
232#define ksm_nr_node_ids 1
233#endif
234
235#define KSM_RUN_STOP 0
236#define KSM_RUN_MERGE 1
237#define KSM_RUN_UNMERGE 2
238#define KSM_RUN_OFFLINE 4
239static unsigned long ksm_run = KSM_RUN_STOP;
240static void wait_while_offlining(void);
241
242static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait);
243static DEFINE_MUTEX(ksm_thread_mutex);
244static DEFINE_SPINLOCK(ksm_mmlist_lock);
245
246#define KSM_KMEM_CACHE(__struct, __flags) kmem_cache_create("ksm_"#__struct,\
247 sizeof(struct __struct), __alignof__(struct __struct),\
248 (__flags), NULL)
249
250static int __init ksm_slab_init(void)
251{
252 rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0);
253 if (!rmap_item_cache)
254 goto out;
255
256 stable_node_cache = KSM_KMEM_CACHE(stable_node, 0);
257 if (!stable_node_cache)
258 goto out_free1;
259
260 mm_slot_cache = KSM_KMEM_CACHE(mm_slot, 0);
261 if (!mm_slot_cache)
262 goto out_free2;
263
264 return 0;
265
266out_free2:
267 kmem_cache_destroy(stable_node_cache);
268out_free1:
269 kmem_cache_destroy(rmap_item_cache);
270out:
271 return -ENOMEM;
272}
273
274static void __init ksm_slab_free(void)
275{
276 kmem_cache_destroy(mm_slot_cache);
277 kmem_cache_destroy(stable_node_cache);
278 kmem_cache_destroy(rmap_item_cache);
279 mm_slot_cache = NULL;
280}
281
282static inline struct rmap_item *alloc_rmap_item(void)
283{
284 struct rmap_item *rmap_item;
285
286 rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL);
287 if (rmap_item)
288 ksm_rmap_items++;
289 return rmap_item;
290}
291
292static inline void free_rmap_item(struct rmap_item *rmap_item)
293{
294 ksm_rmap_items--;
295 rmap_item->mm = NULL; /* debug safety */
296 kmem_cache_free(rmap_item_cache, rmap_item);
297}
298
299static inline struct stable_node *alloc_stable_node(void)
300{
301 return kmem_cache_alloc(stable_node_cache, GFP_KERNEL);
302}
303
304static inline void free_stable_node(struct stable_node *stable_node)
305{
306 kmem_cache_free(stable_node_cache, stable_node);
307}
308
309static inline struct mm_slot *alloc_mm_slot(void)
310{
311 if (!mm_slot_cache) /* initialization failed */
312 return NULL;
313 return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
314}
315
316static inline void free_mm_slot(struct mm_slot *mm_slot)
317{
318 kmem_cache_free(mm_slot_cache, mm_slot);
319}
320
321static struct mm_slot *get_mm_slot(struct mm_struct *mm)
322{
323 struct mm_slot *slot;
324
325 hash_for_each_possible(mm_slots_hash, slot, link, (unsigned long)mm)
326 if (slot->mm == mm)
327 return slot;
328
329 return NULL;
330}
331
332static void insert_to_mm_slots_hash(struct mm_struct *mm,
333 struct mm_slot *mm_slot)
334{
335 mm_slot->mm = mm;
336 hash_add(mm_slots_hash, &mm_slot->link, (unsigned long)mm);
337}
338
339/*
340 * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's
341 * page tables after it has passed through ksm_exit() - which, if necessary,
342 * takes mmap_sem briefly to serialize against them. ksm_exit() does not set
343 * a special flag: they can just back out as soon as mm_users goes to zero.
344 * ksm_test_exit() is used throughout to make this test for exit: in some
345 * places for correctness, in some places just to avoid unnecessary work.
346 */
347static inline bool ksm_test_exit(struct mm_struct *mm)
348{
349 return atomic_read(&mm->mm_users) == 0;
350}
351
352/*
353 * We use break_ksm to break COW on a ksm page: it's a stripped down
354 *
355 * if (get_user_pages(addr, 1, 1, 1, &page, NULL) == 1)
356 * put_page(page);
357 *
358 * but taking great care only to touch a ksm page, in a VM_MERGEABLE vma,
359 * in case the application has unmapped and remapped mm,addr meanwhile.
360 * Could a ksm page appear anywhere else? Actually yes, in a VM_PFNMAP
361 * mmap of /dev/mem or /dev/kmem, where we would not want to touch it.
362 *
363 * FAULT_FLAG/FOLL_REMOTE are because we do this outside the context
364 * of the process that owns 'vma'. We also do not want to enforce
365 * protection keys here anyway.
366 */
367static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
368{
369 struct page *page;
370 int ret = 0;
371
372 do {
373 cond_resched();
374 page = follow_page(vma, addr,
375 FOLL_GET | FOLL_MIGRATION | FOLL_REMOTE);
376 if (IS_ERR_OR_NULL(page))
377 break;
378 if (PageKsm(page))
379 ret = handle_mm_fault(vma->vm_mm, vma, addr,
380 FAULT_FLAG_WRITE |
381 FAULT_FLAG_REMOTE);
382 else
383 ret = VM_FAULT_WRITE;
384 put_page(page);
385 } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM)));
386 /*
387 * We must loop because handle_mm_fault() may back out if there's
388 * any difficulty e.g. if pte accessed bit gets updated concurrently.
389 *
390 * VM_FAULT_WRITE is what we have been hoping for: it indicates that
391 * COW has been broken, even if the vma does not permit VM_WRITE;
392 * but note that a concurrent fault might break PageKsm for us.
393 *
394 * VM_FAULT_SIGBUS could occur if we race with truncation of the
395 * backing file, which also invalidates anonymous pages: that's
396 * okay, that truncation will have unmapped the PageKsm for us.
397 *
398 * VM_FAULT_OOM: at the time of writing (late July 2009), setting
399 * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the
400 * current task has TIF_MEMDIE set, and will be OOM killed on return
401 * to user; and ksmd, having no mm, would never be chosen for that.
402 *
403 * But if the mm is in a limited mem_cgroup, then the fault may fail
404 * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and
405 * even ksmd can fail in this way - though it's usually breaking ksm
406 * just to undo a merge it made a moment before, so unlikely to oom.
407 *
408 * That's a pity: we might therefore have more kernel pages allocated
409 * than we're counting as nodes in the stable tree; but ksm_do_scan
410 * will retry to break_cow on each pass, so should recover the page
411 * in due course. The important thing is to not let VM_MERGEABLE
412 * be cleared while any such pages might remain in the area.
413 */
414 return (ret & VM_FAULT_OOM) ? -ENOMEM : 0;
415}
416
417static struct vm_area_struct *find_mergeable_vma(struct mm_struct *mm,
418 unsigned long addr)
419{
420 struct vm_area_struct *vma;
421 if (ksm_test_exit(mm))
422 return NULL;
423 vma = find_vma(mm, addr);
424 if (!vma || vma->vm_start > addr)
425 return NULL;
426 if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
427 return NULL;
428 return vma;
429}
430
431static void break_cow(struct rmap_item *rmap_item)
432{
433 struct mm_struct *mm = rmap_item->mm;
434 unsigned long addr = rmap_item->address;
435 struct vm_area_struct *vma;
436
437 /*
438 * It is not an accident that whenever we want to break COW
439 * to undo, we also need to drop a reference to the anon_vma.
440 */
441 put_anon_vma(rmap_item->anon_vma);
442
443 down_read(&mm->mmap_sem);
444 vma = find_mergeable_vma(mm, addr);
445 if (vma)
446 break_ksm(vma, addr);
447 up_read(&mm->mmap_sem);
448}
449
450static struct page *get_mergeable_page(struct rmap_item *rmap_item)
451{
452 struct mm_struct *mm = rmap_item->mm;
453 unsigned long addr = rmap_item->address;
454 struct vm_area_struct *vma;
455 struct page *page;
456
457 down_read(&mm->mmap_sem);
458 vma = find_mergeable_vma(mm, addr);
459 if (!vma)
460 goto out;
461
462 page = follow_page(vma, addr, FOLL_GET);
463 if (IS_ERR_OR_NULL(page))
464 goto out;
465 if (PageAnon(page)) {
466 flush_anon_page(vma, page, addr);
467 flush_dcache_page(page);
468 } else {
469 put_page(page);
470out:
471 page = NULL;
472 }
473 up_read(&mm->mmap_sem);
474 return page;
475}
476
477/*
478 * This helper is used for getting right index into array of tree roots.
479 * When merge_across_nodes knob is set to 1, there are only two rb-trees for
480 * stable and unstable pages from all nodes with roots in index 0. Otherwise,
481 * every node has its own stable and unstable tree.
482 */
483static inline int get_kpfn_nid(unsigned long kpfn)
484{
485 return ksm_merge_across_nodes ? 0 : NUMA(pfn_to_nid(kpfn));
486}
487
488static void remove_node_from_stable_tree(struct stable_node *stable_node)
489{
490 struct rmap_item *rmap_item;
491
492 hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
493 if (rmap_item->hlist.next)
494 ksm_pages_sharing--;
495 else
496 ksm_pages_shared--;
497 put_anon_vma(rmap_item->anon_vma);
498 rmap_item->address &= PAGE_MASK;
499 cond_resched();
500 }
501
502 if (stable_node->head == &migrate_nodes)
503 list_del(&stable_node->list);
504 else
505 rb_erase(&stable_node->node,
506 root_stable_tree + NUMA(stable_node->nid));
507 free_stable_node(stable_node);
508}
509
510/*
511 * get_ksm_page: checks if the page indicated by the stable node
512 * is still its ksm page, despite having held no reference to it.
513 * In which case we can trust the content of the page, and it
514 * returns the gotten page; but if the page has now been zapped,
515 * remove the stale node from the stable tree and return NULL.
516 * But beware, the stable node's page might be being migrated.
517 *
518 * You would expect the stable_node to hold a reference to the ksm page.
519 * But if it increments the page's count, swapping out has to wait for
520 * ksmd to come around again before it can free the page, which may take
521 * seconds or even minutes: much too unresponsive. So instead we use a
522 * "keyhole reference": access to the ksm page from the stable node peeps
523 * out through its keyhole to see if that page still holds the right key,
524 * pointing back to this stable node. This relies on freeing a PageAnon
525 * page to reset its page->mapping to NULL, and relies on no other use of
526 * a page to put something that might look like our key in page->mapping.
527 * is on its way to being freed; but it is an anomaly to bear in mind.
528 */
529static struct page *get_ksm_page(struct stable_node *stable_node, bool lock_it)
530{
531 struct page *page;
532 void *expected_mapping;
533 unsigned long kpfn;
534
535 expected_mapping = (void *)stable_node +
536 (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
537again:
538 kpfn = READ_ONCE(stable_node->kpfn);
539 page = pfn_to_page(kpfn);
540
541 /*
542 * page is computed from kpfn, so on most architectures reading
543 * page->mapping is naturally ordered after reading node->kpfn,
544 * but on Alpha we need to be more careful.
545 */
546 smp_read_barrier_depends();
547 if (READ_ONCE(page->mapping) != expected_mapping)
548 goto stale;
549
550 /*
551 * We cannot do anything with the page while its refcount is 0.
552 * Usually 0 means free, or tail of a higher-order page: in which
553 * case this node is no longer referenced, and should be freed;
554 * however, it might mean that the page is under page_freeze_refs().
555 * The __remove_mapping() case is easy, again the node is now stale;
556 * but if page is swapcache in migrate_page_move_mapping(), it might
557 * still be our page, in which case it's essential to keep the node.
558 */
559 while (!get_page_unless_zero(page)) {
560 /*
561 * Another check for page->mapping != expected_mapping would
562 * work here too. We have chosen the !PageSwapCache test to
563 * optimize the common case, when the page is or is about to
564 * be freed: PageSwapCache is cleared (under spin_lock_irq)
565 * in the freeze_refs section of __remove_mapping(); but Anon
566 * page->mapping reset to NULL later, in free_pages_prepare().
567 */
568 if (!PageSwapCache(page))
569 goto stale;
570 cpu_relax();
571 }
572
573 if (READ_ONCE(page->mapping) != expected_mapping) {
574 put_page(page);
575 goto stale;
576 }
577
578 if (lock_it) {
579 lock_page(page);
580 if (READ_ONCE(page->mapping) != expected_mapping) {
581 unlock_page(page);
582 put_page(page);
583 goto stale;
584 }
585 }
586 return page;
587
588stale:
589 /*
590 * We come here from above when page->mapping or !PageSwapCache
591 * suggests that the node is stale; but it might be under migration.
592 * We need smp_rmb(), matching the smp_wmb() in ksm_migrate_page(),
593 * before checking whether node->kpfn has been changed.
594 */
595 smp_rmb();
596 if (READ_ONCE(stable_node->kpfn) != kpfn)
597 goto again;
598 remove_node_from_stable_tree(stable_node);
599 return NULL;
600}
601
602/*
603 * Removing rmap_item from stable or unstable tree.
604 * This function will clean the information from the stable/unstable tree.
605 */
606static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
607{
608 if (rmap_item->address & STABLE_FLAG) {
609 struct stable_node *stable_node;
610 struct page *page;
611
612 stable_node = rmap_item->head;
613 page = get_ksm_page(stable_node, true);
614 if (!page)
615 goto out;
616
617 hlist_del(&rmap_item->hlist);
618 unlock_page(page);
619 put_page(page);
620
621 if (!hlist_empty(&stable_node->hlist))
622 ksm_pages_sharing--;
623 else
624 ksm_pages_shared--;
625
626 put_anon_vma(rmap_item->anon_vma);
627 rmap_item->address &= PAGE_MASK;
628
629 } else if (rmap_item->address & UNSTABLE_FLAG) {
630 unsigned char age;
631 /*
632 * Usually ksmd can and must skip the rb_erase, because
633 * root_unstable_tree was already reset to RB_ROOT.
634 * But be careful when an mm is exiting: do the rb_erase
635 * if this rmap_item was inserted by this scan, rather
636 * than left over from before.
637 */
638 age = (unsigned char)(ksm_scan.seqnr - rmap_item->address);
639 BUG_ON(age > 1);
640 if (!age)
641 rb_erase(&rmap_item->node,
642 root_unstable_tree + NUMA(rmap_item->nid));
643 ksm_pages_unshared--;
644 rmap_item->address &= PAGE_MASK;
645 }
646out:
647 cond_resched(); /* we're called from many long loops */
648}
649
650static void remove_trailing_rmap_items(struct mm_slot *mm_slot,
651 struct rmap_item **rmap_list)
652{
653 while (*rmap_list) {
654 struct rmap_item *rmap_item = *rmap_list;
655 *rmap_list = rmap_item->rmap_list;
656 remove_rmap_item_from_tree(rmap_item);
657 free_rmap_item(rmap_item);
658 }
659}
660
661/*
662 * Though it's very tempting to unmerge rmap_items from stable tree rather
663 * than check every pte of a given vma, the locking doesn't quite work for
664 * that - an rmap_item is assigned to the stable tree after inserting ksm
665 * page and upping mmap_sem. Nor does it fit with the way we skip dup'ing
666 * rmap_items from parent to child at fork time (so as not to waste time
667 * if exit comes before the next scan reaches it).
668 *
669 * Similarly, although we'd like to remove rmap_items (so updating counts
670 * and freeing memory) when unmerging an area, it's easier to leave that
671 * to the next pass of ksmd - consider, for example, how ksmd might be
672 * in cmp_and_merge_page on one of the rmap_items we would be removing.
673 */
674static int unmerge_ksm_pages(struct vm_area_struct *vma,
675 unsigned long start, unsigned long end)
676{
677 unsigned long addr;
678 int err = 0;
679
680 for (addr = start; addr < end && !err; addr += PAGE_SIZE) {
681 if (ksm_test_exit(vma->vm_mm))
682 break;
683 if (signal_pending(current))
684 err = -ERESTARTSYS;
685 else
686 err = break_ksm(vma, addr);
687 }
688 return err;
689}
690
691#ifdef CONFIG_SYSFS
692/*
693 * Only called through the sysfs control interface:
694 */
695static int remove_stable_node(struct stable_node *stable_node)
696{
697 struct page *page;
698 int err;
699
700 page = get_ksm_page(stable_node, true);
701 if (!page) {
702 /*
703 * get_ksm_page did remove_node_from_stable_tree itself.
704 */
705 return 0;
706 }
707
708 if (WARN_ON_ONCE(page_mapped(page))) {
709 /*
710 * This should not happen: but if it does, just refuse to let
711 * merge_across_nodes be switched - there is no need to panic.
712 */
713 err = -EBUSY;
714 } else {
715 /*
716 * The stable node did not yet appear stale to get_ksm_page(),
717 * since that allows for an unmapped ksm page to be recognized
718 * right up until it is freed; but the node is safe to remove.
719 * This page might be in a pagevec waiting to be freed,
720 * or it might be PageSwapCache (perhaps under writeback),
721 * or it might have been removed from swapcache a moment ago.
722 */
723 set_page_stable_node(page, NULL);
724 remove_node_from_stable_tree(stable_node);
725 err = 0;
726 }
727
728 unlock_page(page);
729 put_page(page);
730 return err;
731}
732
733static int remove_all_stable_nodes(void)
734{
735 struct stable_node *stable_node, *next;
736 int nid;
737 int err = 0;
738
739 for (nid = 0; nid < ksm_nr_node_ids; nid++) {
740 while (root_stable_tree[nid].rb_node) {
741 stable_node = rb_entry(root_stable_tree[nid].rb_node,
742 struct stable_node, node);
743 if (remove_stable_node(stable_node)) {
744 err = -EBUSY;
745 break; /* proceed to next nid */
746 }
747 cond_resched();
748 }
749 }
750 list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) {
751 if (remove_stable_node(stable_node))
752 err = -EBUSY;
753 cond_resched();
754 }
755 return err;
756}
757
758static int unmerge_and_remove_all_rmap_items(void)
759{
760 struct mm_slot *mm_slot;
761 struct mm_struct *mm;
762 struct vm_area_struct *vma;
763 int err = 0;
764
765 spin_lock(&ksm_mmlist_lock);
766 ksm_scan.mm_slot = list_entry(ksm_mm_head.mm_list.next,
767 struct mm_slot, mm_list);
768 spin_unlock(&ksm_mmlist_lock);
769
770 for (mm_slot = ksm_scan.mm_slot;
771 mm_slot != &ksm_mm_head; mm_slot = ksm_scan.mm_slot) {
772 mm = mm_slot->mm;
773 down_read(&mm->mmap_sem);
774 for (vma = mm->mmap; vma; vma = vma->vm_next) {
775 if (ksm_test_exit(mm))
776 break;
777 if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma)
778 continue;
779 err = unmerge_ksm_pages(vma,
780 vma->vm_start, vma->vm_end);
781 if (err)
782 goto error;
783 }
784
785 remove_trailing_rmap_items(mm_slot, &mm_slot->rmap_list);
786 up_read(&mm->mmap_sem);
787
788 spin_lock(&ksm_mmlist_lock);
789 ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next,
790 struct mm_slot, mm_list);
791 if (ksm_test_exit(mm)) {
792 hash_del(&mm_slot->link);
793 list_del(&mm_slot->mm_list);
794 spin_unlock(&ksm_mmlist_lock);
795
796 free_mm_slot(mm_slot);
797 clear_bit(MMF_VM_MERGEABLE, &mm->flags);
798 mmdrop(mm);
799 } else
800 spin_unlock(&ksm_mmlist_lock);
801 }
802
803 /* Clean up stable nodes, but don't worry if some are still busy */
804 remove_all_stable_nodes();
805 ksm_scan.seqnr = 0;
806 return 0;
807
808error:
809 up_read(&mm->mmap_sem);
810 spin_lock(&ksm_mmlist_lock);
811 ksm_scan.mm_slot = &ksm_mm_head;
812 spin_unlock(&ksm_mmlist_lock);
813 return err;
814}
815#endif /* CONFIG_SYSFS */
816
817static u32 calc_checksum(struct page *page)
818{
819 u32 checksum;
820 void *addr = kmap_atomic(page);
821 checksum = jhash2(addr, PAGE_SIZE / 4, 17);
822 kunmap_atomic(addr);
823 return checksum;
824}
825
826static int memcmp_pages(struct page *page1, struct page *page2)
827{
828 char *addr1, *addr2;
829 int ret;
830
831 addr1 = kmap_atomic(page1);
832 addr2 = kmap_atomic(page2);
833 ret = memcmp(addr1, addr2, PAGE_SIZE);
834 kunmap_atomic(addr2);
835 kunmap_atomic(addr1);
836 return ret;
837}
838
839static inline int pages_identical(struct page *page1, struct page *page2)
840{
841 return !memcmp_pages(page1, page2);
842}
843
844static int write_protect_page(struct vm_area_struct *vma, struct page *page,
845 pte_t *orig_pte)
846{
847 struct mm_struct *mm = vma->vm_mm;
848 unsigned long addr;
849 pte_t *ptep;
850 spinlock_t *ptl;
851 int swapped;
852 int err = -EFAULT;
853 unsigned long mmun_start; /* For mmu_notifiers */
854 unsigned long mmun_end; /* For mmu_notifiers */
855
856 addr = page_address_in_vma(page, vma);
857 if (addr == -EFAULT)
858 goto out;
859
860 BUG_ON(PageTransCompound(page));
861
862 mmun_start = addr;
863 mmun_end = addr + PAGE_SIZE;
864 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
865
866 ptep = page_check_address(page, mm, addr, &ptl, 0);
867 if (!ptep)
868 goto out_mn;
869
870 if (pte_write(*ptep) || pte_dirty(*ptep)) {
871 pte_t entry;
872
873 swapped = PageSwapCache(page);
874 flush_cache_page(vma, addr, page_to_pfn(page));
875 /*
876 * Ok this is tricky, when get_user_pages_fast() run it doesn't
877 * take any lock, therefore the check that we are going to make
878 * with the pagecount against the mapcount is racey and
879 * O_DIRECT can happen right after the check.
880 * So we clear the pte and flush the tlb before the check
881 * this assure us that no O_DIRECT can happen after the check
882 * or in the middle of the check.
883 */
884 entry = ptep_clear_flush_notify(vma, addr, ptep);
885 /*
886 * Check that no O_DIRECT or similar I/O is in progress on the
887 * page
888 */
889 if (page_mapcount(page) + 1 + swapped != page_count(page)) {
890 set_pte_at(mm, addr, ptep, entry);
891 goto out_unlock;
892 }
893 if (pte_dirty(entry))
894 set_page_dirty(page);
895 entry = pte_mkclean(pte_wrprotect(entry));
896 set_pte_at_notify(mm, addr, ptep, entry);
897 }
898 *orig_pte = *ptep;
899 err = 0;
900
901out_unlock:
902 pte_unmap_unlock(ptep, ptl);
903out_mn:
904 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
905out:
906 return err;
907}
908
909/**
910 * replace_page - replace page in vma by new ksm page
911 * @vma: vma that holds the pte pointing to page
912 * @page: the page we are replacing by kpage
913 * @kpage: the ksm page we replace page by
914 * @orig_pte: the original value of the pte
915 *
916 * Returns 0 on success, -EFAULT on failure.
917 */
918static int replace_page(struct vm_area_struct *vma, struct page *page,
919 struct page *kpage, pte_t orig_pte)
920{
921 struct mm_struct *mm = vma->vm_mm;
922 pmd_t *pmd;
923 pte_t *ptep;
924 spinlock_t *ptl;
925 unsigned long addr;
926 int err = -EFAULT;
927 unsigned long mmun_start; /* For mmu_notifiers */
928 unsigned long mmun_end; /* For mmu_notifiers */
929
930 addr = page_address_in_vma(page, vma);
931 if (addr == -EFAULT)
932 goto out;
933
934 pmd = mm_find_pmd(mm, addr);
935 if (!pmd)
936 goto out;
937
938 mmun_start = addr;
939 mmun_end = addr + PAGE_SIZE;
940 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
941
942 ptep = pte_offset_map_lock(mm, pmd, addr, &ptl);
943 if (!pte_same(*ptep, orig_pte)) {
944 pte_unmap_unlock(ptep, ptl);
945 goto out_mn;
946 }
947
948 get_page(kpage);
949 page_add_anon_rmap(kpage, vma, addr, false);
950
951 flush_cache_page(vma, addr, pte_pfn(*ptep));
952 ptep_clear_flush_notify(vma, addr, ptep);
953 set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot));
954
955 page_remove_rmap(page, false);
956 if (!page_mapped(page))
957 try_to_free_swap(page);
958 put_page(page);
959
960 pte_unmap_unlock(ptep, ptl);
961 err = 0;
962out_mn:
963 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
964out:
965 return err;
966}
967
968/*
969 * try_to_merge_one_page - take two pages and merge them into one
970 * @vma: the vma that holds the pte pointing to page
971 * @page: the PageAnon page that we want to replace with kpage
972 * @kpage: the PageKsm page that we want to map instead of page,
973 * or NULL the first time when we want to use page as kpage.
974 *
975 * This function returns 0 if the pages were merged, -EFAULT otherwise.
976 */
977static int try_to_merge_one_page(struct vm_area_struct *vma,
978 struct page *page, struct page *kpage)
979{
980 pte_t orig_pte = __pte(0);
981 int err = -EFAULT;
982
983 if (page == kpage) /* ksm page forked */
984 return 0;
985
986 if (!PageAnon(page))
987 goto out;
988
989 /*
990 * We need the page lock to read a stable PageSwapCache in
991 * write_protect_page(). We use trylock_page() instead of
992 * lock_page() because we don't want to wait here - we
993 * prefer to continue scanning and merging different pages,
994 * then come back to this page when it is unlocked.
995 */
996 if (!trylock_page(page))
997 goto out;
998
999 if (PageTransCompound(page)) {
1000 err = split_huge_page(page);
1001 if (err)
1002 goto out_unlock;
1003 }
1004
1005 /*
1006 * If this anonymous page is mapped only here, its pte may need
1007 * to be write-protected. If it's mapped elsewhere, all of its
1008 * ptes are necessarily already write-protected. But in either
1009 * case, we need to lock and check page_count is not raised.
1010 */
1011 if (write_protect_page(vma, page, &orig_pte) == 0) {
1012 if (!kpage) {
1013 /*
1014 * While we hold page lock, upgrade page from
1015 * PageAnon+anon_vma to PageKsm+NULL stable_node:
1016 * stable_tree_insert() will update stable_node.
1017 */
1018 set_page_stable_node(page, NULL);
1019 mark_page_accessed(page);
1020 /*
1021 * Page reclaim just frees a clean page with no dirty
1022 * ptes: make sure that the ksm page would be swapped.
1023 */
1024 if (!PageDirty(page))
1025 SetPageDirty(page);
1026 err = 0;
1027 } else if (pages_identical(page, kpage))
1028 err = replace_page(vma, page, kpage, orig_pte);
1029 }
1030
1031 if ((vma->vm_flags & VM_LOCKED) && kpage && !err) {
1032 munlock_vma_page(page);
1033 if (!PageMlocked(kpage)) {
1034 unlock_page(page);
1035 lock_page(kpage);
1036 mlock_vma_page(kpage);
1037 page = kpage; /* for final unlock */
1038 }
1039 }
1040
1041out_unlock:
1042 unlock_page(page);
1043out:
1044 return err;
1045}
1046
1047/*
1048 * try_to_merge_with_ksm_page - like try_to_merge_two_pages,
1049 * but no new kernel page is allocated: kpage must already be a ksm page.
1050 *
1051 * This function returns 0 if the pages were merged, -EFAULT otherwise.
1052 */
1053static int try_to_merge_with_ksm_page(struct rmap_item *rmap_item,
1054 struct page *page, struct page *kpage)
1055{
1056 struct mm_struct *mm = rmap_item->mm;
1057 struct vm_area_struct *vma;
1058 int err = -EFAULT;
1059
1060 down_read(&mm->mmap_sem);
1061 vma = find_mergeable_vma(mm, rmap_item->address);
1062 if (!vma)
1063 goto out;
1064
1065 err = try_to_merge_one_page(vma, page, kpage);
1066 if (err)
1067 goto out;
1068
1069 /* Unstable nid is in union with stable anon_vma: remove first */
1070 remove_rmap_item_from_tree(rmap_item);
1071
1072 /* Must get reference to anon_vma while still holding mmap_sem */
1073 rmap_item->anon_vma = vma->anon_vma;
1074 get_anon_vma(vma->anon_vma);
1075out:
1076 up_read(&mm->mmap_sem);
1077 return err;
1078}
1079
1080/*
1081 * try_to_merge_two_pages - take two identical pages and prepare them
1082 * to be merged into one page.
1083 *
1084 * This function returns the kpage if we successfully merged two identical
1085 * pages into one ksm page, NULL otherwise.
1086 *
1087 * Note that this function upgrades page to ksm page: if one of the pages
1088 * is already a ksm page, try_to_merge_with_ksm_page should be used.
1089 */
1090static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item,
1091 struct page *page,
1092 struct rmap_item *tree_rmap_item,
1093 struct page *tree_page)
1094{
1095 int err;
1096
1097 err = try_to_merge_with_ksm_page(rmap_item, page, NULL);
1098 if (!err) {
1099 err = try_to_merge_with_ksm_page(tree_rmap_item,
1100 tree_page, page);
1101 /*
1102 * If that fails, we have a ksm page with only one pte
1103 * pointing to it: so break it.
1104 */
1105 if (err)
1106 break_cow(rmap_item);
1107 }
1108 return err ? NULL : page;
1109}
1110
1111/*
1112 * stable_tree_search - search for page inside the stable tree
1113 *
1114 * This function checks if there is a page inside the stable tree
1115 * with identical content to the page that we are scanning right now.
1116 *
1117 * This function returns the stable tree node of identical content if found,
1118 * NULL otherwise.
1119 */
1120static struct page *stable_tree_search(struct page *page)
1121{
1122 int nid;
1123 struct rb_root *root;
1124 struct rb_node **new;
1125 struct rb_node *parent;
1126 struct stable_node *stable_node;
1127 struct stable_node *page_node;
1128
1129 page_node = page_stable_node(page);
1130 if (page_node && page_node->head != &migrate_nodes) {
1131 /* ksm page forked */
1132 get_page(page);
1133 return page;
1134 }
1135
1136 nid = get_kpfn_nid(page_to_pfn(page));
1137 root = root_stable_tree + nid;
1138again:
1139 new = &root->rb_node;
1140 parent = NULL;
1141
1142 while (*new) {
1143 struct page *tree_page;
1144 int ret;
1145
1146 cond_resched();
1147 stable_node = rb_entry(*new, struct stable_node, node);
1148 tree_page = get_ksm_page(stable_node, false);
1149 if (!tree_page) {
1150 /*
1151 * If we walked over a stale stable_node,
1152 * get_ksm_page() will call rb_erase() and it
1153 * may rebalance the tree from under us. So
1154 * restart the search from scratch. Returning
1155 * NULL would be safe too, but we'd generate
1156 * false negative insertions just because some
1157 * stable_node was stale.
1158 */
1159 goto again;
1160 }
1161
1162 ret = memcmp_pages(page, tree_page);
1163 put_page(tree_page);
1164
1165 parent = *new;
1166 if (ret < 0)
1167 new = &parent->rb_left;
1168 else if (ret > 0)
1169 new = &parent->rb_right;
1170 else {
1171 /*
1172 * Lock and unlock the stable_node's page (which
1173 * might already have been migrated) so that page
1174 * migration is sure to notice its raised count.
1175 * It would be more elegant to return stable_node
1176 * than kpage, but that involves more changes.
1177 */
1178 tree_page = get_ksm_page(stable_node, true);
1179 if (tree_page) {
1180 unlock_page(tree_page);
1181 if (get_kpfn_nid(stable_node->kpfn) !=
1182 NUMA(stable_node->nid)) {
1183 put_page(tree_page);
1184 goto replace;
1185 }
1186 return tree_page;
1187 }
1188 /*
1189 * There is now a place for page_node, but the tree may
1190 * have been rebalanced, so re-evaluate parent and new.
1191 */
1192 if (page_node)
1193 goto again;
1194 return NULL;
1195 }
1196 }
1197
1198 if (!page_node)
1199 return NULL;
1200
1201 list_del(&page_node->list);
1202 DO_NUMA(page_node->nid = nid);
1203 rb_link_node(&page_node->node, parent, new);
1204 rb_insert_color(&page_node->node, root);
1205 get_page(page);
1206 return page;
1207
1208replace:
1209 if (page_node) {
1210 list_del(&page_node->list);
1211 DO_NUMA(page_node->nid = nid);
1212 rb_replace_node(&stable_node->node, &page_node->node, root);
1213 get_page(page);
1214 } else {
1215 rb_erase(&stable_node->node, root);
1216 page = NULL;
1217 }
1218 stable_node->head = &migrate_nodes;
1219 list_add(&stable_node->list, stable_node->head);
1220 return page;
1221}
1222
1223/*
1224 * stable_tree_insert - insert stable tree node pointing to new ksm page
1225 * into the stable tree.
1226 *
1227 * This function returns the stable tree node just allocated on success,
1228 * NULL otherwise.
1229 */
1230static struct stable_node *stable_tree_insert(struct page *kpage)
1231{
1232 int nid;
1233 unsigned long kpfn;
1234 struct rb_root *root;
1235 struct rb_node **new;
1236 struct rb_node *parent;
1237 struct stable_node *stable_node;
1238
1239 kpfn = page_to_pfn(kpage);
1240 nid = get_kpfn_nid(kpfn);
1241 root = root_stable_tree + nid;
1242again:
1243 parent = NULL;
1244 new = &root->rb_node;
1245
1246 while (*new) {
1247 struct page *tree_page;
1248 int ret;
1249
1250 cond_resched();
1251 stable_node = rb_entry(*new, struct stable_node, node);
1252 tree_page = get_ksm_page(stable_node, false);
1253 if (!tree_page) {
1254 /*
1255 * If we walked over a stale stable_node,
1256 * get_ksm_page() will call rb_erase() and it
1257 * may rebalance the tree from under us. So
1258 * restart the search from scratch. Returning
1259 * NULL would be safe too, but we'd generate
1260 * false negative insertions just because some
1261 * stable_node was stale.
1262 */
1263 goto again;
1264 }
1265
1266 ret = memcmp_pages(kpage, tree_page);
1267 put_page(tree_page);
1268
1269 parent = *new;
1270 if (ret < 0)
1271 new = &parent->rb_left;
1272 else if (ret > 0)
1273 new = &parent->rb_right;
1274 else {
1275 /*
1276 * It is not a bug that stable_tree_search() didn't
1277 * find this node: because at that time our page was
1278 * not yet write-protected, so may have changed since.
1279 */
1280 return NULL;
1281 }
1282 }
1283
1284 stable_node = alloc_stable_node();
1285 if (!stable_node)
1286 return NULL;
1287
1288 INIT_HLIST_HEAD(&stable_node->hlist);
1289 stable_node->kpfn = kpfn;
1290 set_page_stable_node(kpage, stable_node);
1291 DO_NUMA(stable_node->nid = nid);
1292 rb_link_node(&stable_node->node, parent, new);
1293 rb_insert_color(&stable_node->node, root);
1294
1295 return stable_node;
1296}
1297
1298/*
1299 * unstable_tree_search_insert - search for identical page,
1300 * else insert rmap_item into the unstable tree.
1301 *
1302 * This function searches for a page in the unstable tree identical to the
1303 * page currently being scanned; and if no identical page is found in the
1304 * tree, we insert rmap_item as a new object into the unstable tree.
1305 *
1306 * This function returns pointer to rmap_item found to be identical
1307 * to the currently scanned page, NULL otherwise.
1308 *
1309 * This function does both searching and inserting, because they share
1310 * the same walking algorithm in an rbtree.
1311 */
1312static
1313struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item,
1314 struct page *page,
1315 struct page **tree_pagep)
1316{
1317 struct rb_node **new;
1318 struct rb_root *root;
1319 struct rb_node *parent = NULL;
1320 int nid;
1321
1322 nid = get_kpfn_nid(page_to_pfn(page));
1323 root = root_unstable_tree + nid;
1324 new = &root->rb_node;
1325
1326 while (*new) {
1327 struct rmap_item *tree_rmap_item;
1328 struct page *tree_page;
1329 int ret;
1330
1331 cond_resched();
1332 tree_rmap_item = rb_entry(*new, struct rmap_item, node);
1333 tree_page = get_mergeable_page(tree_rmap_item);
1334 if (!tree_page)
1335 return NULL;
1336
1337 /*
1338 * Don't substitute a ksm page for a forked page.
1339 */
1340 if (page == tree_page) {
1341 put_page(tree_page);
1342 return NULL;
1343 }
1344
1345 ret = memcmp_pages(page, tree_page);
1346
1347 parent = *new;
1348 if (ret < 0) {
1349 put_page(tree_page);
1350 new = &parent->rb_left;
1351 } else if (ret > 0) {
1352 put_page(tree_page);
1353 new = &parent->rb_right;
1354 } else if (!ksm_merge_across_nodes &&
1355 page_to_nid(tree_page) != nid) {
1356 /*
1357 * If tree_page has been migrated to another NUMA node,
1358 * it will be flushed out and put in the right unstable
1359 * tree next time: only merge with it when across_nodes.
1360 */
1361 put_page(tree_page);
1362 return NULL;
1363 } else {
1364 *tree_pagep = tree_page;
1365 return tree_rmap_item;
1366 }
1367 }
1368
1369 rmap_item->address |= UNSTABLE_FLAG;
1370 rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK);
1371 DO_NUMA(rmap_item->nid = nid);
1372 rb_link_node(&rmap_item->node, parent, new);
1373 rb_insert_color(&rmap_item->node, root);
1374
1375 ksm_pages_unshared++;
1376 return NULL;
1377}
1378
1379/*
1380 * stable_tree_append - add another rmap_item to the linked list of
1381 * rmap_items hanging off a given node of the stable tree, all sharing
1382 * the same ksm page.
1383 */
1384static void stable_tree_append(struct rmap_item *rmap_item,
1385 struct stable_node *stable_node)
1386{
1387 rmap_item->head = stable_node;
1388 rmap_item->address |= STABLE_FLAG;
1389 hlist_add_head(&rmap_item->hlist, &stable_node->hlist);
1390
1391 if (rmap_item->hlist.next)
1392 ksm_pages_sharing++;
1393 else
1394 ksm_pages_shared++;
1395}
1396
1397/*
1398 * cmp_and_merge_page - first see if page can be merged into the stable tree;
1399 * if not, compare checksum to previous and if it's the same, see if page can
1400 * be inserted into the unstable tree, or merged with a page already there and
1401 * both transferred to the stable tree.
1402 *
1403 * @page: the page that we are searching identical page to.
1404 * @rmap_item: the reverse mapping into the virtual address of this page
1405 */
1406static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item)
1407{
1408 struct rmap_item *tree_rmap_item;
1409 struct page *tree_page = NULL;
1410 struct stable_node *stable_node;
1411 struct page *kpage;
1412 unsigned int checksum;
1413 int err;
1414
1415 stable_node = page_stable_node(page);
1416 if (stable_node) {
1417 if (stable_node->head != &migrate_nodes &&
1418 get_kpfn_nid(stable_node->kpfn) != NUMA(stable_node->nid)) {
1419 rb_erase(&stable_node->node,
1420 root_stable_tree + NUMA(stable_node->nid));
1421 stable_node->head = &migrate_nodes;
1422 list_add(&stable_node->list, stable_node->head);
1423 }
1424 if (stable_node->head != &migrate_nodes &&
1425 rmap_item->head == stable_node)
1426 return;
1427 }
1428
1429 /* We first start with searching the page inside the stable tree */
1430 kpage = stable_tree_search(page);
1431 if (kpage == page && rmap_item->head == stable_node) {
1432 put_page(kpage);
1433 return;
1434 }
1435
1436 remove_rmap_item_from_tree(rmap_item);
1437
1438 if (kpage) {
1439 err = try_to_merge_with_ksm_page(rmap_item, page, kpage);
1440 if (!err) {
1441 /*
1442 * The page was successfully merged:
1443 * add its rmap_item to the stable tree.
1444 */
1445 lock_page(kpage);
1446 stable_tree_append(rmap_item, page_stable_node(kpage));
1447 unlock_page(kpage);
1448 }
1449 put_page(kpage);
1450 return;
1451 }
1452
1453 /*
1454 * If the hash value of the page has changed from the last time
1455 * we calculated it, this page is changing frequently: therefore we
1456 * don't want to insert it in the unstable tree, and we don't want
1457 * to waste our time searching for something identical to it there.
1458 */
1459 checksum = calc_checksum(page);
1460 if (rmap_item->oldchecksum != checksum) {
1461 rmap_item->oldchecksum = checksum;
1462 return;
1463 }
1464
1465 tree_rmap_item =
1466 unstable_tree_search_insert(rmap_item, page, &tree_page);
1467 if (tree_rmap_item) {
1468 kpage = try_to_merge_two_pages(rmap_item, page,
1469 tree_rmap_item, tree_page);
1470 put_page(tree_page);
1471 if (kpage) {
1472 /*
1473 * The pages were successfully merged: insert new
1474 * node in the stable tree and add both rmap_items.
1475 */
1476 lock_page(kpage);
1477 stable_node = stable_tree_insert(kpage);
1478 if (stable_node) {
1479 stable_tree_append(tree_rmap_item, stable_node);
1480 stable_tree_append(rmap_item, stable_node);
1481 }
1482 unlock_page(kpage);
1483
1484 /*
1485 * If we fail to insert the page into the stable tree,
1486 * we will have 2 virtual addresses that are pointing
1487 * to a ksm page left outside the stable tree,
1488 * in which case we need to break_cow on both.
1489 */
1490 if (!stable_node) {
1491 break_cow(tree_rmap_item);
1492 break_cow(rmap_item);
1493 }
1494 }
1495 }
1496}
1497
1498static struct rmap_item *get_next_rmap_item(struct mm_slot *mm_slot,
1499 struct rmap_item **rmap_list,
1500 unsigned long addr)
1501{
1502 struct rmap_item *rmap_item;
1503
1504 while (*rmap_list) {
1505 rmap_item = *rmap_list;
1506 if ((rmap_item->address & PAGE_MASK) == addr)
1507 return rmap_item;
1508 if (rmap_item->address > addr)
1509 break;
1510 *rmap_list = rmap_item->rmap_list;
1511 remove_rmap_item_from_tree(rmap_item);
1512 free_rmap_item(rmap_item);
1513 }
1514
1515 rmap_item = alloc_rmap_item();
1516 if (rmap_item) {
1517 /* It has already been zeroed */
1518 rmap_item->mm = mm_slot->mm;
1519 rmap_item->address = addr;
1520 rmap_item->rmap_list = *rmap_list;
1521 *rmap_list = rmap_item;
1522 }
1523 return rmap_item;
1524}
1525
1526static struct rmap_item *scan_get_next_rmap_item(struct page **page)
1527{
1528 struct mm_struct *mm;
1529 struct mm_slot *slot;
1530 struct vm_area_struct *vma;
1531 struct rmap_item *rmap_item;
1532 int nid;
1533
1534 if (list_empty(&ksm_mm_head.mm_list))
1535 return NULL;
1536
1537 slot = ksm_scan.mm_slot;
1538 if (slot == &ksm_mm_head) {
1539 /*
1540 * A number of pages can hang around indefinitely on per-cpu
1541 * pagevecs, raised page count preventing write_protect_page
1542 * from merging them. Though it doesn't really matter much,
1543 * it is puzzling to see some stuck in pages_volatile until
1544 * other activity jostles them out, and they also prevented
1545 * LTP's KSM test from succeeding deterministically; so drain
1546 * them here (here rather than on entry to ksm_do_scan(),
1547 * so we don't IPI too often when pages_to_scan is set low).
1548 */
1549 lru_add_drain_all();
1550
1551 /*
1552 * Whereas stale stable_nodes on the stable_tree itself
1553 * get pruned in the regular course of stable_tree_search(),
1554 * those moved out to the migrate_nodes list can accumulate:
1555 * so prune them once before each full scan.
1556 */
1557 if (!ksm_merge_across_nodes) {
1558 struct stable_node *stable_node, *next;
1559 struct page *page;
1560
1561 list_for_each_entry_safe(stable_node, next,
1562 &migrate_nodes, list) {
1563 page = get_ksm_page(stable_node, false);
1564 if (page)
1565 put_page(page);
1566 cond_resched();
1567 }
1568 }
1569
1570 for (nid = 0; nid < ksm_nr_node_ids; nid++)
1571 root_unstable_tree[nid] = RB_ROOT;
1572
1573 spin_lock(&ksm_mmlist_lock);
1574 slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list);
1575 ksm_scan.mm_slot = slot;
1576 spin_unlock(&ksm_mmlist_lock);
1577 /*
1578 * Although we tested list_empty() above, a racing __ksm_exit
1579 * of the last mm on the list may have removed it since then.
1580 */
1581 if (slot == &ksm_mm_head)
1582 return NULL;
1583next_mm:
1584 ksm_scan.address = 0;
1585 ksm_scan.rmap_list = &slot->rmap_list;
1586 }
1587
1588 mm = slot->mm;
1589 down_read(&mm->mmap_sem);
1590 if (ksm_test_exit(mm))
1591 vma = NULL;
1592 else
1593 vma = find_vma(mm, ksm_scan.address);
1594
1595 for (; vma; vma = vma->vm_next) {
1596 if (!(vma->vm_flags & VM_MERGEABLE))
1597 continue;
1598 if (ksm_scan.address < vma->vm_start)
1599 ksm_scan.address = vma->vm_start;
1600 if (!vma->anon_vma)
1601 ksm_scan.address = vma->vm_end;
1602
1603 while (ksm_scan.address < vma->vm_end) {
1604 if (ksm_test_exit(mm))
1605 break;
1606 *page = follow_page(vma, ksm_scan.address, FOLL_GET);
1607 if (IS_ERR_OR_NULL(*page)) {
1608 ksm_scan.address += PAGE_SIZE;
1609 cond_resched();
1610 continue;
1611 }
1612 if (PageAnon(*page)) {
1613 flush_anon_page(vma, *page, ksm_scan.address);
1614 flush_dcache_page(*page);
1615 rmap_item = get_next_rmap_item(slot,
1616 ksm_scan.rmap_list, ksm_scan.address);
1617 if (rmap_item) {
1618 ksm_scan.rmap_list =
1619 &rmap_item->rmap_list;
1620 ksm_scan.address += PAGE_SIZE;
1621 } else
1622 put_page(*page);
1623 up_read(&mm->mmap_sem);
1624 return rmap_item;
1625 }
1626 put_page(*page);
1627 ksm_scan.address += PAGE_SIZE;
1628 cond_resched();
1629 }
1630 }
1631
1632 if (ksm_test_exit(mm)) {
1633 ksm_scan.address = 0;
1634 ksm_scan.rmap_list = &slot->rmap_list;
1635 }
1636 /*
1637 * Nuke all the rmap_items that are above this current rmap:
1638 * because there were no VM_MERGEABLE vmas with such addresses.
1639 */
1640 remove_trailing_rmap_items(slot, ksm_scan.rmap_list);
1641
1642 spin_lock(&ksm_mmlist_lock);
1643 ksm_scan.mm_slot = list_entry(slot->mm_list.next,
1644 struct mm_slot, mm_list);
1645 if (ksm_scan.address == 0) {
1646 /*
1647 * We've completed a full scan of all vmas, holding mmap_sem
1648 * throughout, and found no VM_MERGEABLE: so do the same as
1649 * __ksm_exit does to remove this mm from all our lists now.
1650 * This applies either when cleaning up after __ksm_exit
1651 * (but beware: we can reach here even before __ksm_exit),
1652 * or when all VM_MERGEABLE areas have been unmapped (and
1653 * mmap_sem then protects against race with MADV_MERGEABLE).
1654 */
1655 hash_del(&slot->link);
1656 list_del(&slot->mm_list);
1657 spin_unlock(&ksm_mmlist_lock);
1658
1659 free_mm_slot(slot);
1660 clear_bit(MMF_VM_MERGEABLE, &mm->flags);
1661 up_read(&mm->mmap_sem);
1662 mmdrop(mm);
1663 } else {
1664 up_read(&mm->mmap_sem);
1665 /*
1666 * up_read(&mm->mmap_sem) first because after
1667 * spin_unlock(&ksm_mmlist_lock) run, the "mm" may
1668 * already have been freed under us by __ksm_exit()
1669 * because the "mm_slot" is still hashed and
1670 * ksm_scan.mm_slot doesn't point to it anymore.
1671 */
1672 spin_unlock(&ksm_mmlist_lock);
1673 }
1674
1675 /* Repeat until we've completed scanning the whole list */
1676 slot = ksm_scan.mm_slot;
1677 if (slot != &ksm_mm_head)
1678 goto next_mm;
1679
1680 ksm_scan.seqnr++;
1681 return NULL;
1682}
1683
1684/**
1685 * ksm_do_scan - the ksm scanner main worker function.
1686 * @scan_npages - number of pages we want to scan before we return.
1687 */
1688static void ksm_do_scan(unsigned int scan_npages)
1689{
1690 struct rmap_item *rmap_item;
1691 struct page *uninitialized_var(page);
1692
1693 while (scan_npages-- && likely(!freezing(current))) {
1694 cond_resched();
1695 rmap_item = scan_get_next_rmap_item(&page);
1696 if (!rmap_item)
1697 return;
1698 cmp_and_merge_page(page, rmap_item);
1699 put_page(page);
1700 }
1701}
1702
1703static int ksmd_should_run(void)
1704{
1705 return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.mm_list);
1706}
1707
1708static int ksm_scan_thread(void *nothing)
1709{
1710 set_freezable();
1711 set_user_nice(current, 5);
1712
1713 while (!kthread_should_stop()) {
1714 mutex_lock(&ksm_thread_mutex);
1715 wait_while_offlining();
1716 if (ksmd_should_run())
1717 ksm_do_scan(ksm_thread_pages_to_scan);
1718 mutex_unlock(&ksm_thread_mutex);
1719
1720 try_to_freeze();
1721
1722 if (ksmd_should_run()) {
1723 schedule_timeout_interruptible(
1724 msecs_to_jiffies(ksm_thread_sleep_millisecs));
1725 } else {
1726 wait_event_freezable(ksm_thread_wait,
1727 ksmd_should_run() || kthread_should_stop());
1728 }
1729 }
1730 return 0;
1731}
1732
1733int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
1734 unsigned long end, int advice, unsigned long *vm_flags)
1735{
1736 struct mm_struct *mm = vma->vm_mm;
1737 int err;
1738
1739 switch (advice) {
1740 case MADV_MERGEABLE:
1741 /*
1742 * Be somewhat over-protective for now!
1743 */
1744 if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE |
1745 VM_PFNMAP | VM_IO | VM_DONTEXPAND |
1746 VM_HUGETLB | VM_MIXEDMAP))
1747 return 0; /* just ignore the advice */
1748
1749#ifdef VM_SAO
1750 if (*vm_flags & VM_SAO)
1751 return 0;
1752#endif
1753
1754 if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) {
1755 err = __ksm_enter(mm);
1756 if (err)
1757 return err;
1758 }
1759
1760 *vm_flags |= VM_MERGEABLE;
1761 break;
1762
1763 case MADV_UNMERGEABLE:
1764 if (!(*vm_flags & VM_MERGEABLE))
1765 return 0; /* just ignore the advice */
1766
1767 if (vma->anon_vma) {
1768 err = unmerge_ksm_pages(vma, start, end);
1769 if (err)
1770 return err;
1771 }
1772
1773 *vm_flags &= ~VM_MERGEABLE;
1774 break;
1775 }
1776
1777 return 0;
1778}
1779
1780int __ksm_enter(struct mm_struct *mm)
1781{
1782 struct mm_slot *mm_slot;
1783 int needs_wakeup;
1784
1785 mm_slot = alloc_mm_slot();
1786 if (!mm_slot)
1787 return -ENOMEM;
1788
1789 /* Check ksm_run too? Would need tighter locking */
1790 needs_wakeup = list_empty(&ksm_mm_head.mm_list);
1791
1792 spin_lock(&ksm_mmlist_lock);
1793 insert_to_mm_slots_hash(mm, mm_slot);
1794 /*
1795 * When KSM_RUN_MERGE (or KSM_RUN_STOP),
1796 * insert just behind the scanning cursor, to let the area settle
1797 * down a little; when fork is followed by immediate exec, we don't
1798 * want ksmd to waste time setting up and tearing down an rmap_list.
1799 *
1800 * But when KSM_RUN_UNMERGE, it's important to insert ahead of its
1801 * scanning cursor, otherwise KSM pages in newly forked mms will be
1802 * missed: then we might as well insert at the end of the list.
1803 */
1804 if (ksm_run & KSM_RUN_UNMERGE)
1805 list_add_tail(&mm_slot->mm_list, &ksm_mm_head.mm_list);
1806 else
1807 list_add_tail(&mm_slot->mm_list, &ksm_scan.mm_slot->mm_list);
1808 spin_unlock(&ksm_mmlist_lock);
1809
1810 set_bit(MMF_VM_MERGEABLE, &mm->flags);
1811 atomic_inc(&mm->mm_count);
1812
1813 if (needs_wakeup)
1814 wake_up_interruptible(&ksm_thread_wait);
1815
1816 return 0;
1817}
1818
1819void __ksm_exit(struct mm_struct *mm)
1820{
1821 struct mm_slot *mm_slot;
1822 int easy_to_free = 0;
1823
1824 /*
1825 * This process is exiting: if it's straightforward (as is the
1826 * case when ksmd was never running), free mm_slot immediately.
1827 * But if it's at the cursor or has rmap_items linked to it, use
1828 * mmap_sem to synchronize with any break_cows before pagetables
1829 * are freed, and leave the mm_slot on the list for ksmd to free.
1830 * Beware: ksm may already have noticed it exiting and freed the slot.
1831 */
1832
1833 spin_lock(&ksm_mmlist_lock);
1834 mm_slot = get_mm_slot(mm);
1835 if (mm_slot && ksm_scan.mm_slot != mm_slot) {
1836 if (!mm_slot->rmap_list) {
1837 hash_del(&mm_slot->link);
1838 list_del(&mm_slot->mm_list);
1839 easy_to_free = 1;
1840 } else {
1841 list_move(&mm_slot->mm_list,
1842 &ksm_scan.mm_slot->mm_list);
1843 }
1844 }
1845 spin_unlock(&ksm_mmlist_lock);
1846
1847 if (easy_to_free) {
1848 free_mm_slot(mm_slot);
1849 clear_bit(MMF_VM_MERGEABLE, &mm->flags);
1850 mmdrop(mm);
1851 } else if (mm_slot) {
1852 down_write(&mm->mmap_sem);
1853 up_write(&mm->mmap_sem);
1854 }
1855}
1856
1857struct page *ksm_might_need_to_copy(struct page *page,
1858 struct vm_area_struct *vma, unsigned long address)
1859{
1860 struct anon_vma *anon_vma = page_anon_vma(page);
1861 struct page *new_page;
1862
1863 if (PageKsm(page)) {
1864 if (page_stable_node(page) &&
1865 !(ksm_run & KSM_RUN_UNMERGE))
1866 return page; /* no need to copy it */
1867 } else if (!anon_vma) {
1868 return page; /* no need to copy it */
1869 } else if (anon_vma->root == vma->anon_vma->root &&
1870 page->index == linear_page_index(vma, address)) {
1871 return page; /* still no need to copy it */
1872 }
1873 if (!PageUptodate(page))
1874 return page; /* let do_swap_page report the error */
1875
1876 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
1877 if (new_page) {
1878 copy_user_highpage(new_page, page, address, vma);
1879
1880 SetPageDirty(new_page);
1881 __SetPageUptodate(new_page);
1882 __SetPageLocked(new_page);
1883 }
1884
1885 return new_page;
1886}
1887
1888int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
1889{
1890 struct stable_node *stable_node;
1891 struct rmap_item *rmap_item;
1892 int ret = SWAP_AGAIN;
1893 int search_new_forks = 0;
1894
1895 VM_BUG_ON_PAGE(!PageKsm(page), page);
1896
1897 /*
1898 * Rely on the page lock to protect against concurrent modifications
1899 * to that page's node of the stable tree.
1900 */
1901 VM_BUG_ON_PAGE(!PageLocked(page), page);
1902
1903 stable_node = page_stable_node(page);
1904 if (!stable_node)
1905 return ret;
1906again:
1907 hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
1908 struct anon_vma *anon_vma = rmap_item->anon_vma;
1909 struct anon_vma_chain *vmac;
1910 struct vm_area_struct *vma;
1911
1912 cond_resched();
1913 anon_vma_lock_read(anon_vma);
1914 anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root,
1915 0, ULONG_MAX) {
1916 cond_resched();
1917 vma = vmac->vma;
1918 if (rmap_item->address < vma->vm_start ||
1919 rmap_item->address >= vma->vm_end)
1920 continue;
1921 /*
1922 * Initially we examine only the vma which covers this
1923 * rmap_item; but later, if there is still work to do,
1924 * we examine covering vmas in other mms: in case they
1925 * were forked from the original since ksmd passed.
1926 */
1927 if ((rmap_item->mm == vma->vm_mm) == search_new_forks)
1928 continue;
1929
1930 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
1931 continue;
1932
1933 ret = rwc->rmap_one(page, vma,
1934 rmap_item->address, rwc->arg);
1935 if (ret != SWAP_AGAIN) {
1936 anon_vma_unlock_read(anon_vma);
1937 goto out;
1938 }
1939 if (rwc->done && rwc->done(page)) {
1940 anon_vma_unlock_read(anon_vma);
1941 goto out;
1942 }
1943 }
1944 anon_vma_unlock_read(anon_vma);
1945 }
1946 if (!search_new_forks++)
1947 goto again;
1948out:
1949 return ret;
1950}
1951
1952#ifdef CONFIG_MIGRATION
1953void ksm_migrate_page(struct page *newpage, struct page *oldpage)
1954{
1955 struct stable_node *stable_node;
1956
1957 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
1958 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
1959 VM_BUG_ON_PAGE(newpage->mapping != oldpage->mapping, newpage);
1960
1961 stable_node = page_stable_node(newpage);
1962 if (stable_node) {
1963 VM_BUG_ON_PAGE(stable_node->kpfn != page_to_pfn(oldpage), oldpage);
1964 stable_node->kpfn = page_to_pfn(newpage);
1965 /*
1966 * newpage->mapping was set in advance; now we need smp_wmb()
1967 * to make sure that the new stable_node->kpfn is visible
1968 * to get_ksm_page() before it can see that oldpage->mapping
1969 * has gone stale (or that PageSwapCache has been cleared).
1970 */
1971 smp_wmb();
1972 set_page_stable_node(oldpage, NULL);
1973 }
1974}
1975#endif /* CONFIG_MIGRATION */
1976
1977#ifdef CONFIG_MEMORY_HOTREMOVE
1978static void wait_while_offlining(void)
1979{
1980 while (ksm_run & KSM_RUN_OFFLINE) {
1981 mutex_unlock(&ksm_thread_mutex);
1982 wait_on_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE),
1983 TASK_UNINTERRUPTIBLE);
1984 mutex_lock(&ksm_thread_mutex);
1985 }
1986}
1987
1988static void ksm_check_stable_tree(unsigned long start_pfn,
1989 unsigned long end_pfn)
1990{
1991 struct stable_node *stable_node, *next;
1992 struct rb_node *node;
1993 int nid;
1994
1995 for (nid = 0; nid < ksm_nr_node_ids; nid++) {
1996 node = rb_first(root_stable_tree + nid);
1997 while (node) {
1998 stable_node = rb_entry(node, struct stable_node, node);
1999 if (stable_node->kpfn >= start_pfn &&
2000 stable_node->kpfn < end_pfn) {
2001 /*
2002 * Don't get_ksm_page, page has already gone:
2003 * which is why we keep kpfn instead of page*
2004 */
2005 remove_node_from_stable_tree(stable_node);
2006 node = rb_first(root_stable_tree + nid);
2007 } else
2008 node = rb_next(node);
2009 cond_resched();
2010 }
2011 }
2012 list_for_each_entry_safe(stable_node, next, &migrate_nodes, list) {
2013 if (stable_node->kpfn >= start_pfn &&
2014 stable_node->kpfn < end_pfn)
2015 remove_node_from_stable_tree(stable_node);
2016 cond_resched();
2017 }
2018}
2019
2020static int ksm_memory_callback(struct notifier_block *self,
2021 unsigned long action, void *arg)
2022{
2023 struct memory_notify *mn = arg;
2024
2025 switch (action) {
2026 case MEM_GOING_OFFLINE:
2027 /*
2028 * Prevent ksm_do_scan(), unmerge_and_remove_all_rmap_items()
2029 * and remove_all_stable_nodes() while memory is going offline:
2030 * it is unsafe for them to touch the stable tree at this time.
2031 * But unmerge_ksm_pages(), rmap lookups and other entry points
2032 * which do not need the ksm_thread_mutex are all safe.
2033 */
2034 mutex_lock(&ksm_thread_mutex);
2035 ksm_run |= KSM_RUN_OFFLINE;
2036 mutex_unlock(&ksm_thread_mutex);
2037 break;
2038
2039 case MEM_OFFLINE:
2040 /*
2041 * Most of the work is done by page migration; but there might
2042 * be a few stable_nodes left over, still pointing to struct
2043 * pages which have been offlined: prune those from the tree,
2044 * otherwise get_ksm_page() might later try to access a
2045 * non-existent struct page.
2046 */
2047 ksm_check_stable_tree(mn->start_pfn,
2048 mn->start_pfn + mn->nr_pages);
2049 /* fallthrough */
2050
2051 case MEM_CANCEL_OFFLINE:
2052 mutex_lock(&ksm_thread_mutex);
2053 ksm_run &= ~KSM_RUN_OFFLINE;
2054 mutex_unlock(&ksm_thread_mutex);
2055
2056 smp_mb(); /* wake_up_bit advises this */
2057 wake_up_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE));
2058 break;
2059 }
2060 return NOTIFY_OK;
2061}
2062#else
2063static void wait_while_offlining(void)
2064{
2065}
2066#endif /* CONFIG_MEMORY_HOTREMOVE */
2067
2068#ifdef CONFIG_SYSFS
2069/*
2070 * This all compiles without CONFIG_SYSFS, but is a waste of space.
2071 */
2072
2073#define KSM_ATTR_RO(_name) \
2074 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
2075#define KSM_ATTR(_name) \
2076 static struct kobj_attribute _name##_attr = \
2077 __ATTR(_name, 0644, _name##_show, _name##_store)
2078
2079static ssize_t sleep_millisecs_show(struct kobject *kobj,
2080 struct kobj_attribute *attr, char *buf)
2081{
2082 return sprintf(buf, "%u\n", ksm_thread_sleep_millisecs);
2083}
2084
2085static ssize_t sleep_millisecs_store(struct kobject *kobj,
2086 struct kobj_attribute *attr,
2087 const char *buf, size_t count)
2088{
2089 unsigned long msecs;
2090 int err;
2091
2092 err = kstrtoul(buf, 10, &msecs);
2093 if (err || msecs > UINT_MAX)
2094 return -EINVAL;
2095
2096 ksm_thread_sleep_millisecs = msecs;
2097
2098 return count;
2099}
2100KSM_ATTR(sleep_millisecs);
2101
2102static ssize_t pages_to_scan_show(struct kobject *kobj,
2103 struct kobj_attribute *attr, char *buf)
2104{
2105 return sprintf(buf, "%u\n", ksm_thread_pages_to_scan);
2106}
2107
2108static ssize_t pages_to_scan_store(struct kobject *kobj,
2109 struct kobj_attribute *attr,
2110 const char *buf, size_t count)
2111{
2112 int err;
2113 unsigned long nr_pages;
2114
2115 err = kstrtoul(buf, 10, &nr_pages);
2116 if (err || nr_pages > UINT_MAX)
2117 return -EINVAL;
2118
2119 ksm_thread_pages_to_scan = nr_pages;
2120
2121 return count;
2122}
2123KSM_ATTR(pages_to_scan);
2124
2125static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr,
2126 char *buf)
2127{
2128 return sprintf(buf, "%lu\n", ksm_run);
2129}
2130
2131static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr,
2132 const char *buf, size_t count)
2133{
2134 int err;
2135 unsigned long flags;
2136
2137 err = kstrtoul(buf, 10, &flags);
2138 if (err || flags > UINT_MAX)
2139 return -EINVAL;
2140 if (flags > KSM_RUN_UNMERGE)
2141 return -EINVAL;
2142
2143 /*
2144 * KSM_RUN_MERGE sets ksmd running, and 0 stops it running.
2145 * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items,
2146 * breaking COW to free the pages_shared (but leaves mm_slots
2147 * on the list for when ksmd may be set running again).
2148 */
2149
2150 mutex_lock(&ksm_thread_mutex);
2151 wait_while_offlining();
2152 if (ksm_run != flags) {
2153 ksm_run = flags;
2154 if (flags & KSM_RUN_UNMERGE) {
2155 set_current_oom_origin();
2156 err = unmerge_and_remove_all_rmap_items();
2157 clear_current_oom_origin();
2158 if (err) {
2159 ksm_run = KSM_RUN_STOP;
2160 count = err;
2161 }
2162 }
2163 }
2164 mutex_unlock(&ksm_thread_mutex);
2165
2166 if (flags & KSM_RUN_MERGE)
2167 wake_up_interruptible(&ksm_thread_wait);
2168
2169 return count;
2170}
2171KSM_ATTR(run);
2172
2173#ifdef CONFIG_NUMA
2174static ssize_t merge_across_nodes_show(struct kobject *kobj,
2175 struct kobj_attribute *attr, char *buf)
2176{
2177 return sprintf(buf, "%u\n", ksm_merge_across_nodes);
2178}
2179
2180static ssize_t merge_across_nodes_store(struct kobject *kobj,
2181 struct kobj_attribute *attr,
2182 const char *buf, size_t count)
2183{
2184 int err;
2185 unsigned long knob;
2186
2187 err = kstrtoul(buf, 10, &knob);
2188 if (err)
2189 return err;
2190 if (knob > 1)
2191 return -EINVAL;
2192
2193 mutex_lock(&ksm_thread_mutex);
2194 wait_while_offlining();
2195 if (ksm_merge_across_nodes != knob) {
2196 if (ksm_pages_shared || remove_all_stable_nodes())
2197 err = -EBUSY;
2198 else if (root_stable_tree == one_stable_tree) {
2199 struct rb_root *buf;
2200 /*
2201 * This is the first time that we switch away from the
2202 * default of merging across nodes: must now allocate
2203 * a buffer to hold as many roots as may be needed.
2204 * Allocate stable and unstable together:
2205 * MAXSMP NODES_SHIFT 10 will use 16kB.
2206 */
2207 buf = kcalloc(nr_node_ids + nr_node_ids, sizeof(*buf),
2208 GFP_KERNEL);
2209 /* Let us assume that RB_ROOT is NULL is zero */
2210 if (!buf)
2211 err = -ENOMEM;
2212 else {
2213 root_stable_tree = buf;
2214 root_unstable_tree = buf + nr_node_ids;
2215 /* Stable tree is empty but not the unstable */
2216 root_unstable_tree[0] = one_unstable_tree[0];
2217 }
2218 }
2219 if (!err) {
2220 ksm_merge_across_nodes = knob;
2221 ksm_nr_node_ids = knob ? 1 : nr_node_ids;
2222 }
2223 }
2224 mutex_unlock(&ksm_thread_mutex);
2225
2226 return err ? err : count;
2227}
2228KSM_ATTR(merge_across_nodes);
2229#endif
2230
2231static ssize_t pages_shared_show(struct kobject *kobj,
2232 struct kobj_attribute *attr, char *buf)
2233{
2234 return sprintf(buf, "%lu\n", ksm_pages_shared);
2235}
2236KSM_ATTR_RO(pages_shared);
2237
2238static ssize_t pages_sharing_show(struct kobject *kobj,
2239 struct kobj_attribute *attr, char *buf)
2240{
2241 return sprintf(buf, "%lu\n", ksm_pages_sharing);
2242}
2243KSM_ATTR_RO(pages_sharing);
2244
2245static ssize_t pages_unshared_show(struct kobject *kobj,
2246 struct kobj_attribute *attr, char *buf)
2247{
2248 return sprintf(buf, "%lu\n", ksm_pages_unshared);
2249}
2250KSM_ATTR_RO(pages_unshared);
2251
2252static ssize_t pages_volatile_show(struct kobject *kobj,
2253 struct kobj_attribute *attr, char *buf)
2254{
2255 long ksm_pages_volatile;
2256
2257 ksm_pages_volatile = ksm_rmap_items - ksm_pages_shared
2258 - ksm_pages_sharing - ksm_pages_unshared;
2259 /*
2260 * It was not worth any locking to calculate that statistic,
2261 * but it might therefore sometimes be negative: conceal that.
2262 */
2263 if (ksm_pages_volatile < 0)
2264 ksm_pages_volatile = 0;
2265 return sprintf(buf, "%ld\n", ksm_pages_volatile);
2266}
2267KSM_ATTR_RO(pages_volatile);
2268
2269static ssize_t full_scans_show(struct kobject *kobj,
2270 struct kobj_attribute *attr, char *buf)
2271{
2272 return sprintf(buf, "%lu\n", ksm_scan.seqnr);
2273}
2274KSM_ATTR_RO(full_scans);
2275
2276static struct attribute *ksm_attrs[] = {
2277 &sleep_millisecs_attr.attr,
2278 &pages_to_scan_attr.attr,
2279 &run_attr.attr,
2280 &pages_shared_attr.attr,
2281 &pages_sharing_attr.attr,
2282 &pages_unshared_attr.attr,
2283 &pages_volatile_attr.attr,
2284 &full_scans_attr.attr,
2285#ifdef CONFIG_NUMA
2286 &merge_across_nodes_attr.attr,
2287#endif
2288 NULL,
2289};
2290
2291static struct attribute_group ksm_attr_group = {
2292 .attrs = ksm_attrs,
2293 .name = "ksm",
2294};
2295#endif /* CONFIG_SYSFS */
2296
2297static int __init ksm_init(void)
2298{
2299 struct task_struct *ksm_thread;
2300 int err;
2301
2302 err = ksm_slab_init();
2303 if (err)
2304 goto out;
2305
2306 ksm_thread = kthread_run(ksm_scan_thread, NULL, "ksmd");
2307 if (IS_ERR(ksm_thread)) {
2308 pr_err("ksm: creating kthread failed\n");
2309 err = PTR_ERR(ksm_thread);
2310 goto out_free;
2311 }
2312
2313#ifdef CONFIG_SYSFS
2314 err = sysfs_create_group(mm_kobj, &ksm_attr_group);
2315 if (err) {
2316 pr_err("ksm: register sysfs failed\n");
2317 kthread_stop(ksm_thread);
2318 goto out_free;
2319 }
2320#else
2321 ksm_run = KSM_RUN_MERGE; /* no way for user to start it */
2322
2323#endif /* CONFIG_SYSFS */
2324
2325#ifdef CONFIG_MEMORY_HOTREMOVE
2326 /* There is no significance to this priority 100 */
2327 hotplug_memory_notifier(ksm_memory_callback, 100);
2328#endif
2329 return 0;
2330
2331out_free:
2332 ksm_slab_free();
2333out:
2334 return err;
2335}
2336subsys_initcall(ksm_init);