1 #ifndef _LINUX_RMAP_H 2 #define _LINUX_RMAP_H 3 /* 4 * Declarations for Reverse Mapping functions in mm/rmap.c 5 */ 6 7 #include <linux/config.h> 8 #include <linux/list.h> 9 #include <linux/slab.h> 10 #include <linux/mm.h> 11 #include <linux/spinlock.h> 12 13 /* 14 * The anon_vma heads a list of private "related" vmas, to scan if 15 * an anonymous page pointing to this anon_vma needs to be unmapped: 16 * the vmas on the list will be related by forking, or by splitting. 17 * 18 * Since vmas come and go as they are split and merged (particularly 19 * in mprotect), the mapping field of an anonymous page cannot point 20 * directly to a vma: instead it points to an anon_vma, on whose list 21 * the related vmas can be easily linked or unlinked. 22 * 23 * After unlinking the last vma on the list, we must garbage collect 24 * the anon_vma object itself: we're guaranteed no page can be 25 * pointing to this anon_vma once its vma list is empty. 26 */ 27 struct anon_vma { 28 spinlock_t lock; /* Serialize access to vma list */ 29 struct list_head head; /* List of private "related" vmas */ 30 }; 31 32 #ifdef CONFIG_MMU 33 34 extern kmem_cache_t *anon_vma_cachep; 35 36 static inline struct anon_vma *anon_vma_alloc(void) 37 { 38 return kmem_cache_alloc(anon_vma_cachep, SLAB_KERNEL); 39 } 40 41 static inline void anon_vma_free(struct anon_vma *anon_vma) 42 { 43 kmem_cache_free(anon_vma_cachep, anon_vma); 44 } 45 46 static inline void anon_vma_lock(struct vm_area_struct *vma) 47 { 48 struct anon_vma *anon_vma = vma->anon_vma; 49 if (anon_vma) 50 spin_lock(&anon_vma->lock); 51 } 52 53 static inline void anon_vma_unlock(struct vm_area_struct *vma) 54 { 55 struct anon_vma *anon_vma = vma->anon_vma; 56 if (anon_vma) 57 spin_unlock(&anon_vma->lock); 58 } 59 60 /* 61 * anon_vma helper functions. 62 */ 63 void anon_vma_init(void); /* create anon_vma_cachep */ 64 int anon_vma_prepare(struct vm_area_struct *); 65 void __anon_vma_merge(struct vm_area_struct *, struct vm_area_struct *); 66 void anon_vma_unlink(struct vm_area_struct *); 67 void anon_vma_link(struct vm_area_struct *); 68 void __anon_vma_link(struct vm_area_struct *); 69 70 /* 71 * rmap interfaces called when adding or removing pte of page 72 */ 73 void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); 74 void page_add_file_rmap(struct page *); 75 void page_remove_rmap(struct page *); 76 77 /** 78 * page_dup_rmap - duplicate pte mapping to a page 79 * @page: the page to add the mapping to 80 * 81 * For copy_page_range only: minimal extract from page_add_rmap, 82 * avoiding unnecessary tests (already checked) so it's quicker. 83 */ 84 static inline void page_dup_rmap(struct page *page) 85 { 86 atomic_inc(&page->_mapcount); 87 } 88 89 /* 90 * Called from mm/vmscan.c to handle paging out 91 */ 92 int page_referenced(struct page *, int is_locked, int ignore_token); 93 int try_to_unmap(struct page *); 94 95 /* 96 * Used by swapoff to help locate where page is expected in vma. 97 */ 98 unsigned long page_address_in_vma(struct page *, struct vm_area_struct *); 99 100 #else /* !CONFIG_MMU */ 101 102 #define anon_vma_init() do {} while (0) 103 #define anon_vma_prepare(vma) (0) 104 #define anon_vma_link(vma) do {} while (0) 105 106 #define page_referenced(page,l,i) TestClearPageReferenced(page) 107 #define try_to_unmap(page) SWAP_FAIL 108 109 #endif /* CONFIG_MMU */ 110 111 /* 112 * Return values of try_to_unmap 113 */ 114 #define SWAP_SUCCESS 0 115 #define SWAP_AGAIN 1 116 #define SWAP_FAIL 2 117 118 #endif /* _LINUX_RMAP_H */ 119