Linux Audio

Check our new training course

Loading...
v4.10.11
  1#ifndef __LINUX_KSM_H
  2#define __LINUX_KSM_H
  3/*
  4 * Memory merging support.
  5 *
  6 * This code enables dynamic sharing of identical pages found in different
  7 * memory areas, even if they are not shared by fork().
  8 */
  9
 10#include <linux/bitops.h>
 11#include <linux/mm.h>
 12#include <linux/pagemap.h>
 13#include <linux/rmap.h>
 14#include <linux/sched.h>
 15
 16struct stable_node;
 17struct mem_cgroup;
 18
 19#ifdef CONFIG_KSM
 20int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
 21		unsigned long end, int advice, unsigned long *vm_flags);
 22int __ksm_enter(struct mm_struct *mm);
 23void __ksm_exit(struct mm_struct *mm);
 24
 25static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
 26{
 27	if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
 28		return __ksm_enter(mm);
 29	return 0;
 30}
 31
 32static inline void ksm_exit(struct mm_struct *mm)
 33{
 34	if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
 35		__ksm_exit(mm);
 36}
 37
 
 
 
 
 
 
 
 
 
 
 
 
 38static inline struct stable_node *page_stable_node(struct page *page)
 39{
 40	return PageKsm(page) ? page_rmapping(page) : NULL;
 41}
 42
 43static inline void set_page_stable_node(struct page *page,
 44					struct stable_node *stable_node)
 45{
 46	page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM);
 
 47}
 48
 49/*
 50 * When do_swap_page() first faults in from swap what used to be a KSM page,
 51 * no problem, it will be assigned to this vma's anon_vma; but thereafter,
 52 * it might be faulted into a different anon_vma (or perhaps to a different
 53 * offset in the same anon_vma).  do_swap_page() cannot do all the locking
 54 * needed to reconstitute a cross-anon_vma KSM page: for now it has to make
 55 * a copy, and leave remerging the pages to a later pass of ksmd.
 56 *
 57 * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
 58 * but what if the vma was unmerged while the page was swapped out?
 59 */
 60struct page *ksm_might_need_to_copy(struct page *page,
 61			struct vm_area_struct *vma, unsigned long address);
 62
 63int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
 64void ksm_migrate_page(struct page *newpage, struct page *oldpage);
 65
 66#else  /* !CONFIG_KSM */
 67
 68static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
 69{
 70	return 0;
 71}
 72
 73static inline void ksm_exit(struct mm_struct *mm)
 74{
 
 
 
 
 
 75}
 76
 77#ifdef CONFIG_MMU
 78static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
 79		unsigned long end, int advice, unsigned long *vm_flags)
 80{
 81	return 0;
 82}
 83
 84static inline struct page *ksm_might_need_to_copy(struct page *page,
 85			struct vm_area_struct *vma, unsigned long address)
 86{
 87	return page;
 88}
 89
 90static inline int page_referenced_ksm(struct page *page,
 91			struct mem_cgroup *memcg, unsigned long *vm_flags)
 92{
 93	return 0;
 94}
 95
 96static inline int rmap_walk_ksm(struct page *page,
 97			struct rmap_walk_control *rwc)
 98{
 99	return 0;
100}
101
102static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage)
103{
104}
105#endif /* CONFIG_MMU */
106#endif /* !CONFIG_KSM */
107
108#endif /* __LINUX_KSM_H */
v3.15
  1#ifndef __LINUX_KSM_H
  2#define __LINUX_KSM_H
  3/*
  4 * Memory merging support.
  5 *
  6 * This code enables dynamic sharing of identical pages found in different
  7 * memory areas, even if they are not shared by fork().
  8 */
  9
 10#include <linux/bitops.h>
 11#include <linux/mm.h>
 12#include <linux/pagemap.h>
 13#include <linux/rmap.h>
 14#include <linux/sched.h>
 15
 16struct stable_node;
 17struct mem_cgroup;
 18
 19#ifdef CONFIG_KSM
 20int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
 21		unsigned long end, int advice, unsigned long *vm_flags);
 22int __ksm_enter(struct mm_struct *mm);
 23void __ksm_exit(struct mm_struct *mm);
 24
 25static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
 26{
 27	if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
 28		return __ksm_enter(mm);
 29	return 0;
 30}
 31
 32static inline void ksm_exit(struct mm_struct *mm)
 33{
 34	if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
 35		__ksm_exit(mm);
 36}
 37
 38/*
 39 * A KSM page is one of those write-protected "shared pages" or "merged pages"
 40 * which KSM maps into multiple mms, wherever identical anonymous page content
 41 * is found in VM_MERGEABLE vmas.  It's a PageAnon page, pointing not to any
 42 * anon_vma, but to that page's node of the stable tree.
 43 */
 44static inline int PageKsm(struct page *page)
 45{
 46	return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
 47				(PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
 48}
 49
 50static inline struct stable_node *page_stable_node(struct page *page)
 51{
 52	return PageKsm(page) ? page_rmapping(page) : NULL;
 53}
 54
 55static inline void set_page_stable_node(struct page *page,
 56					struct stable_node *stable_node)
 57{
 58	page->mapping = (void *)stable_node +
 59				(PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
 60}
 61
 62/*
 63 * When do_swap_page() first faults in from swap what used to be a KSM page,
 64 * no problem, it will be assigned to this vma's anon_vma; but thereafter,
 65 * it might be faulted into a different anon_vma (or perhaps to a different
 66 * offset in the same anon_vma).  do_swap_page() cannot do all the locking
 67 * needed to reconstitute a cross-anon_vma KSM page: for now it has to make
 68 * a copy, and leave remerging the pages to a later pass of ksmd.
 69 *
 70 * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
 71 * but what if the vma was unmerged while the page was swapped out?
 72 */
 73struct page *ksm_might_need_to_copy(struct page *page,
 74			struct vm_area_struct *vma, unsigned long address);
 75
 76int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
 77void ksm_migrate_page(struct page *newpage, struct page *oldpage);
 78
 79#else  /* !CONFIG_KSM */
 80
 81static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
 82{
 83	return 0;
 84}
 85
 86static inline void ksm_exit(struct mm_struct *mm)
 87{
 88}
 89
 90static inline int PageKsm(struct page *page)
 91{
 92	return 0;
 93}
 94
 95#ifdef CONFIG_MMU
 96static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
 97		unsigned long end, int advice, unsigned long *vm_flags)
 98{
 99	return 0;
100}
101
102static inline struct page *ksm_might_need_to_copy(struct page *page,
103			struct vm_area_struct *vma, unsigned long address)
104{
105	return page;
106}
107
108static inline int page_referenced_ksm(struct page *page,
109			struct mem_cgroup *memcg, unsigned long *vm_flags)
110{
111	return 0;
112}
113
114static inline int rmap_walk_ksm(struct page *page,
115			struct rmap_walk_control *rwc)
116{
117	return 0;
118}
119
120static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage)
121{
122}
123#endif /* CONFIG_MMU */
124#endif /* !CONFIG_KSM */
125
126#endif /* __LINUX_KSM_H */