Linux Audio

Check our new training course

Loading...
  1#ifndef __LINUX_KSM_H
  2#define __LINUX_KSM_H
  3/*
  4 * Memory merging support.
  5 *
  6 * This code enables dynamic sharing of identical pages found in different
  7 * memory areas, even if they are not shared by fork().
  8 */
  9
 10#include <linux/bitops.h>
 11#include <linux/mm.h>
 12#include <linux/pagemap.h>
 13#include <linux/rmap.h>
 14#include <linux/sched.h>
 15
 16struct stable_node;
 17struct mem_cgroup;
 18
 19struct page *ksm_does_need_to_copy(struct page *page,
 20			struct vm_area_struct *vma, unsigned long address);
 21
 22#ifdef CONFIG_KSM
 23int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
 24		unsigned long end, int advice, unsigned long *vm_flags);
 25int __ksm_enter(struct mm_struct *mm);
 26void __ksm_exit(struct mm_struct *mm);
 27
 28static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
 29{
 30	if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
 31		return __ksm_enter(mm);
 32	return 0;
 33}
 34
 35static inline void ksm_exit(struct mm_struct *mm)
 36{
 37	if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
 38		__ksm_exit(mm);
 39}
 40
 41/*
 42 * A KSM page is one of those write-protected "shared pages" or "merged pages"
 43 * which KSM maps into multiple mms, wherever identical anonymous page content
 44 * is found in VM_MERGEABLE vmas.  It's a PageAnon page, pointing not to any
 45 * anon_vma, but to that page's node of the stable tree.
 46 */
 47static inline int PageKsm(struct page *page)
 48{
 49	return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
 50				(PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
 51}
 52
 53static inline struct stable_node *page_stable_node(struct page *page)
 54{
 55	return PageKsm(page) ? page_rmapping(page) : NULL;
 56}
 57
 58static inline void set_page_stable_node(struct page *page,
 59					struct stable_node *stable_node)
 60{
 61	page->mapping = (void *)stable_node +
 62				(PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
 63}
 64
 65/*
 66 * When do_swap_page() first faults in from swap what used to be a KSM page,
 67 * no problem, it will be assigned to this vma's anon_vma; but thereafter,
 68 * it might be faulted into a different anon_vma (or perhaps to a different
 69 * offset in the same anon_vma).  do_swap_page() cannot do all the locking
 70 * needed to reconstitute a cross-anon_vma KSM page: for now it has to make
 71 * a copy, and leave remerging the pages to a later pass of ksmd.
 72 *
 73 * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
 74 * but what if the vma was unmerged while the page was swapped out?
 75 */
 76static inline int ksm_might_need_to_copy(struct page *page,
 77			struct vm_area_struct *vma, unsigned long address)
 78{
 79	struct anon_vma *anon_vma = page_anon_vma(page);
 80
 81	return anon_vma &&
 82		(anon_vma->root != vma->anon_vma->root ||
 83		 page->index != linear_page_index(vma, address));
 84}
 85
 86int page_referenced_ksm(struct page *page,
 87			struct mem_cgroup *memcg, unsigned long *vm_flags);
 88int try_to_unmap_ksm(struct page *page, enum ttu_flags flags);
 89int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *,
 90		  struct vm_area_struct *, unsigned long, void *), void *arg);
 91void ksm_migrate_page(struct page *newpage, struct page *oldpage);
 92
 93#else  /* !CONFIG_KSM */
 94
 95static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
 96{
 97	return 0;
 98}
 99
100static inline void ksm_exit(struct mm_struct *mm)
101{
102}
103
104static inline int PageKsm(struct page *page)
105{
106	return 0;
107}
108
109#ifdef CONFIG_MMU
110static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
111		unsigned long end, int advice, unsigned long *vm_flags)
112{
113	return 0;
114}
115
116static inline int ksm_might_need_to_copy(struct page *page,
117			struct vm_area_struct *vma, unsigned long address)
118{
119	return 0;
120}
121
122static inline int page_referenced_ksm(struct page *page,
123			struct mem_cgroup *memcg, unsigned long *vm_flags)
124{
125	return 0;
126}
127
128static inline int try_to_unmap_ksm(struct page *page, enum ttu_flags flags)
129{
130	return 0;
131}
132
133static inline int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page*,
134		struct vm_area_struct *, unsigned long, void *), void *arg)
135{
136	return 0;
137}
138
139static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage)
140{
141}
142#endif /* CONFIG_MMU */
143#endif /* !CONFIG_KSM */
144
145#endif /* __LINUX_KSM_H */