1 #ifndef __LINUX_KSM_H 2 #define __LINUX_KSM_H 3 /* 4 * Memory merging support. 5 * 6 * This code enables dynamic sharing of identical pages found in different 7 * memory areas, even if they are not shared by fork(). 8 */ 9 10 #include <linux/bitops.h> 11 #include <linux/mm.h> 12 #include <linux/pagemap.h> 13 #include <linux/rmap.h> 14 #include <linux/sched.h> 15 16 struct stable_node; 17 struct mem_cgroup; 18 19 #ifdef CONFIG_KSM 20 int ksm_madvise(struct vm_area_struct *vma, unsigned long start, 21 unsigned long end, int advice, unsigned long *vm_flags); 22 int __ksm_enter(struct mm_struct *mm); 23 void __ksm_exit(struct mm_struct *mm); 24 25 static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) 26 { 27 if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags)) 28 return __ksm_enter(mm); 29 return 0; 30 } 31 32 static inline void ksm_exit(struct mm_struct *mm) 33 { 34 if (test_bit(MMF_VM_MERGEABLE, &mm->flags)) 35 __ksm_exit(mm); 36 } 37 38 /* 39 * A KSM page is one of those write-protected "shared pages" or "merged pages" 40 * which KSM maps into multiple mms, wherever identical anonymous page content 41 * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any 42 * anon_vma, but to that page's node of the stable tree. 43 */ 44 static inline int PageKsm(struct page *page) 45 { 46 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == 47 (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); 48 } 49 50 static inline struct stable_node *page_stable_node(struct page *page) 51 { 52 return PageKsm(page) ? page_rmapping(page) : NULL; 53 } 54 55 static inline void set_page_stable_node(struct page *page, 56 struct stable_node *stable_node) 57 { 58 page->mapping = (void *)stable_node + 59 (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); 60 } 61 62 /* 63 * When do_swap_page() first faults in from swap what used to be a KSM page, 64 * no problem, it will be assigned to this vma's anon_vma; but thereafter, 65 * it might be faulted into a different anon_vma (or perhaps to a different 66 * offset in the same anon_vma). do_swap_page() cannot do all the locking 67 * needed to reconstitute a cross-anon_vma KSM page: for now it has to make 68 * a copy, and leave remerging the pages to a later pass of ksmd. 69 * 70 * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE, 71 * but what if the vma was unmerged while the page was swapped out? 72 */ 73 struct page *ksm_might_need_to_copy(struct page *page, 74 struct vm_area_struct *vma, unsigned long address); 75 76 int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc); 77 void ksm_migrate_page(struct page *newpage, struct page *oldpage); 78 79 #else /* !CONFIG_KSM */ 80 81 static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) 82 { 83 return 0; 84 } 85 86 static inline void ksm_exit(struct mm_struct *mm) 87 { 88 } 89 90 static inline int PageKsm(struct page *page) 91 { 92 return 0; 93 } 94 95 #ifdef CONFIG_MMU 96 static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, 97 unsigned long end, int advice, unsigned long *vm_flags) 98 { 99 return 0; 100 } 101 102 static inline struct page *ksm_might_need_to_copy(struct page *page, 103 struct vm_area_struct *vma, unsigned long address) 104 { 105 return page; 106 } 107 108 static inline int page_referenced_ksm(struct page *page, 109 struct mem_cgroup *memcg, unsigned long *vm_flags) 110 { 111 return 0; 112 } 113 114 static inline int rmap_walk_ksm(struct page *page, 115 struct rmap_walk_control *rwc) 116 { 117 return 0; 118 } 119 120 static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage) 121 { 122 } 123 #endif /* CONFIG_MMU */ 124 #endif /* !CONFIG_KSM */ 125 126 #endif /* __LINUX_KSM_H */ 127