1 #ifndef _LINUX_PAGEMAP_H 2 #define _LINUX_PAGEMAP_H 3 4 /* 5 * Copyright 1995 Linus Torvalds 6 */ 7 #include <linux/mm.h> 8 #include <linux/fs.h> 9 #include <linux/list.h> 10 #include <linux/highmem.h> 11 #include <linux/compiler.h> 12 #include <asm/uaccess.h> 13 #include <linux/gfp.h> 14 #include <linux/bitops.h> 15 #include <linux/hardirq.h> /* for in_interrupt() */ 16 #include <linux/hugetlb_inline.h> 17 18 /* 19 * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page 20 * allocation mode flags. 21 */ 22 enum mapping_flags { 23 AS_EIO = __GFP_BITS_SHIFT + 0, /* IO error on async write */ 24 AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */ 25 AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */ 26 AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */ 27 }; 28 29 static inline void mapping_set_error(struct address_space *mapping, int error) 30 { 31 if (unlikely(error)) { 32 if (error == -ENOSPC) 33 set_bit(AS_ENOSPC, &mapping->flags); 34 else 35 set_bit(AS_EIO, &mapping->flags); 36 } 37 } 38 39 static inline void mapping_set_unevictable(struct address_space *mapping) 40 { 41 set_bit(AS_UNEVICTABLE, &mapping->flags); 42 } 43 44 static inline void mapping_clear_unevictable(struct address_space *mapping) 45 { 46 clear_bit(AS_UNEVICTABLE, &mapping->flags); 47 } 48 49 static inline int mapping_unevictable(struct address_space *mapping) 50 { 51 if (mapping) 52 return test_bit(AS_UNEVICTABLE, &mapping->flags); 53 return !!mapping; 54 } 55 56 static inline gfp_t mapping_gfp_mask(struct address_space * mapping) 57 { 58 return (__force gfp_t)mapping->flags & __GFP_BITS_MASK; 59 } 60 61 /* 62 * This is non-atomic. Only to be used before the mapping is activated. 63 * Probably needs a barrier... 64 */ 65 static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) 66 { 67 m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) | 68 (__force unsigned long)mask; 69 } 70 71 /* 72 * The page cache can done in larger chunks than 73 * one page, because it allows for more efficient 74 * throughput (it can then be mapped into user 75 * space in smaller chunks for same flexibility). 76 * 77 * Or rather, it _will_ be done in larger chunks. 78 */ 79 #define PAGE_CACHE_SHIFT PAGE_SHIFT 80 #define PAGE_CACHE_SIZE PAGE_SIZE 81 #define PAGE_CACHE_MASK PAGE_MASK 82 #define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK) 83 84 #define page_cache_get(page) get_page(page) 85 #define page_cache_release(page) put_page(page) 86 void release_pages(struct page **pages, int nr, int cold); 87 88 /* 89 * speculatively take a reference to a page. 90 * If the page is free (_count == 0), then _count is untouched, and 0 91 * is returned. Otherwise, _count is incremented by 1 and 1 is returned. 92 * 93 * This function must be called inside the same rcu_read_lock() section as has 94 * been used to lookup the page in the pagecache radix-tree (or page table): 95 * this allows allocators to use a synchronize_rcu() to stabilize _count. 96 * 97 * Unless an RCU grace period has passed, the count of all pages coming out 98 * of the allocator must be considered unstable. page_count may return higher 99 * than expected, and put_page must be able to do the right thing when the 100 * page has been finished with, no matter what it is subsequently allocated 101 * for (because put_page is what is used here to drop an invalid speculative 102 * reference). 103 * 104 * This is the interesting part of the lockless pagecache (and lockless 105 * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page) 106 * has the following pattern: 107 * 1. find page in radix tree 108 * 2. conditionally increment refcount 109 * 3. check the page is still in pagecache (if no, goto 1) 110 * 111 * Remove-side that cares about stability of _count (eg. reclaim) has the 112 * following (with tree_lock held for write): 113 * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg) 114 * B. remove page from pagecache 115 * C. free the page 116 * 117 * There are 2 critical interleavings that matter: 118 * - 2 runs before A: in this case, A sees elevated refcount and bails out 119 * - A runs before 2: in this case, 2 sees zero refcount and retries; 120 * subsequently, B will complete and 1 will find no page, causing the 121 * lookup to return NULL. 122 * 123 * It is possible that between 1 and 2, the page is removed then the exact same 124 * page is inserted into the same position in pagecache. That's OK: the 125 * old find_get_page using tree_lock could equally have run before or after 126 * such a re-insertion, depending on order that locks are granted. 127 * 128 * Lookups racing against pagecache insertion isn't a big problem: either 1 129 * will find the page or it will not. Likewise, the old find_get_page could run 130 * either before the insertion or afterwards, depending on timing. 131 */ 132 static inline int page_cache_get_speculative(struct page *page) 133 { 134 VM_BUG_ON(in_interrupt()); 135 136 #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU) 137 # ifdef CONFIG_PREEMPT_COUNT 138 VM_BUG_ON(!in_atomic()); 139 # endif 140 /* 141 * Preempt must be disabled here - we rely on rcu_read_lock doing 142 * this for us. 143 * 144 * Pagecache won't be truncated from interrupt context, so if we have 145 * found a page in the radix tree here, we have pinned its refcount by 146 * disabling preempt, and hence no need for the "speculative get" that 147 * SMP requires. 148 */ 149 VM_BUG_ON(page_count(page) == 0); 150 atomic_inc(&page->_count); 151 152 #else 153 if (unlikely(!get_page_unless_zero(page))) { 154 /* 155 * Either the page has been freed, or will be freed. 156 * In either case, retry here and the caller should 157 * do the right thing (see comments above). 158 */ 159 return 0; 160 } 161 #endif 162 VM_BUG_ON(PageTail(page)); 163 164 return 1; 165 } 166 167 /* 168 * Same as above, but add instead of inc (could just be merged) 169 */ 170 static inline int page_cache_add_speculative(struct page *page, int count) 171 { 172 VM_BUG_ON(in_interrupt()); 173 174 #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU) 175 # ifdef CONFIG_PREEMPT_COUNT 176 VM_BUG_ON(!in_atomic()); 177 # endif 178 VM_BUG_ON(page_count(page) == 0); 179 atomic_add(count, &page->_count); 180 181 #else 182 if (unlikely(!atomic_add_unless(&page->_count, count, 0))) 183 return 0; 184 #endif 185 VM_BUG_ON(PageCompound(page) && page != compound_head(page)); 186 187 return 1; 188 } 189 190 static inline int page_freeze_refs(struct page *page, int count) 191 { 192 return likely(atomic_cmpxchg(&page->_count, count, 0) == count); 193 } 194 195 static inline void page_unfreeze_refs(struct page *page, int count) 196 { 197 VM_BUG_ON(page_count(page) != 0); 198 VM_BUG_ON(count == 0); 199 200 atomic_set(&page->_count, count); 201 } 202 203 #ifdef CONFIG_NUMA 204 extern struct page *__page_cache_alloc(gfp_t gfp); 205 #else 206 static inline struct page *__page_cache_alloc(gfp_t gfp) 207 { 208 return alloc_pages(gfp, 0); 209 } 210 #endif 211 212 static inline struct page *page_cache_alloc(struct address_space *x) 213 { 214 return __page_cache_alloc(mapping_gfp_mask(x)); 215 } 216 217 static inline struct page *page_cache_alloc_cold(struct address_space *x) 218 { 219 return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD); 220 } 221 222 static inline struct page *page_cache_alloc_readahead(struct address_space *x) 223 { 224 return __page_cache_alloc(mapping_gfp_mask(x) | 225 __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN); 226 } 227 228 typedef int filler_t(void *, struct page *); 229 230 extern struct page * find_get_page(struct address_space *mapping, 231 pgoff_t index); 232 extern struct page * find_lock_page(struct address_space *mapping, 233 pgoff_t index); 234 extern struct page * find_or_create_page(struct address_space *mapping, 235 pgoff_t index, gfp_t gfp_mask); 236 unsigned find_get_pages(struct address_space *mapping, pgoff_t start, 237 unsigned int nr_pages, struct page **pages); 238 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start, 239 unsigned int nr_pages, struct page **pages); 240 unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, 241 int tag, unsigned int nr_pages, struct page **pages); 242 243 struct page *grab_cache_page_write_begin(struct address_space *mapping, 244 pgoff_t index, unsigned flags); 245 246 /* 247 * Returns locked page at given index in given cache, creating it if needed. 248 */ 249 static inline struct page *grab_cache_page(struct address_space *mapping, 250 pgoff_t index) 251 { 252 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); 253 } 254 255 extern struct page * grab_cache_page_nowait(struct address_space *mapping, 256 pgoff_t index); 257 extern struct page * read_cache_page_async(struct address_space *mapping, 258 pgoff_t index, filler_t *filler, void *data); 259 extern struct page * read_cache_page(struct address_space *mapping, 260 pgoff_t index, filler_t *filler, void *data); 261 extern struct page * read_cache_page_gfp(struct address_space *mapping, 262 pgoff_t index, gfp_t gfp_mask); 263 extern int read_cache_pages(struct address_space *mapping, 264 struct list_head *pages, filler_t *filler, void *data); 265 266 static inline struct page *read_mapping_page_async( 267 struct address_space *mapping, 268 pgoff_t index, void *data) 269 { 270 filler_t *filler = (filler_t *)mapping->a_ops->readpage; 271 return read_cache_page_async(mapping, index, filler, data); 272 } 273 274 static inline struct page *read_mapping_page(struct address_space *mapping, 275 pgoff_t index, void *data) 276 { 277 filler_t *filler = (filler_t *)mapping->a_ops->readpage; 278 return read_cache_page(mapping, index, filler, data); 279 } 280 281 /* 282 * Return byte-offset into filesystem object for page. 283 */ 284 static inline loff_t page_offset(struct page *page) 285 { 286 return ((loff_t)page->index) << PAGE_CACHE_SHIFT; 287 } 288 289 extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma, 290 unsigned long address); 291 292 static inline pgoff_t linear_page_index(struct vm_area_struct *vma, 293 unsigned long address) 294 { 295 pgoff_t pgoff; 296 if (unlikely(is_vm_hugetlb_page(vma))) 297 return linear_hugepage_index(vma, address); 298 pgoff = (address - vma->vm_start) >> PAGE_SHIFT; 299 pgoff += vma->vm_pgoff; 300 return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT); 301 } 302 303 extern void __lock_page(struct page *page); 304 extern int __lock_page_killable(struct page *page); 305 extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm, 306 unsigned int flags); 307 extern void unlock_page(struct page *page); 308 309 static inline void __set_page_locked(struct page *page) 310 { 311 __set_bit(PG_locked, &page->flags); 312 } 313 314 static inline void __clear_page_locked(struct page *page) 315 { 316 __clear_bit(PG_locked, &page->flags); 317 } 318 319 static inline int trylock_page(struct page *page) 320 { 321 return (likely(!test_and_set_bit_lock(PG_locked, &page->flags))); 322 } 323 324 /* 325 * lock_page may only be called if we have the page's inode pinned. 326 */ 327 static inline void lock_page(struct page *page) 328 { 329 might_sleep(); 330 if (!trylock_page(page)) 331 __lock_page(page); 332 } 333 334 /* 335 * lock_page_killable is like lock_page but can be interrupted by fatal 336 * signals. It returns 0 if it locked the page and -EINTR if it was 337 * killed while waiting. 338 */ 339 static inline int lock_page_killable(struct page *page) 340 { 341 might_sleep(); 342 if (!trylock_page(page)) 343 return __lock_page_killable(page); 344 return 0; 345 } 346 347 /* 348 * lock_page_or_retry - Lock the page, unless this would block and the 349 * caller indicated that it can handle a retry. 350 */ 351 static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm, 352 unsigned int flags) 353 { 354 might_sleep(); 355 return trylock_page(page) || __lock_page_or_retry(page, mm, flags); 356 } 357 358 /* 359 * This is exported only for wait_on_page_locked/wait_on_page_writeback. 360 * Never use this directly! 361 */ 362 extern void wait_on_page_bit(struct page *page, int bit_nr); 363 364 extern int wait_on_page_bit_killable(struct page *page, int bit_nr); 365 366 static inline int wait_on_page_locked_killable(struct page *page) 367 { 368 if (PageLocked(page)) 369 return wait_on_page_bit_killable(page, PG_locked); 370 return 0; 371 } 372 373 /* 374 * Wait for a page to be unlocked. 375 * 376 * This must be called with the caller "holding" the page, 377 * ie with increased "page->count" so that the page won't 378 * go away during the wait.. 379 */ 380 static inline void wait_on_page_locked(struct page *page) 381 { 382 if (PageLocked(page)) 383 wait_on_page_bit(page, PG_locked); 384 } 385 386 /* 387 * Wait for a page to complete writeback 388 */ 389 static inline void wait_on_page_writeback(struct page *page) 390 { 391 if (PageWriteback(page)) 392 wait_on_page_bit(page, PG_writeback); 393 } 394 395 extern void end_page_writeback(struct page *page); 396 397 /* 398 * Add an arbitrary waiter to a page's wait queue 399 */ 400 extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter); 401 402 /* 403 * Fault a userspace page into pagetables. Return non-zero on a fault. 404 * 405 * This assumes that two userspace pages are always sufficient. That's 406 * not true if PAGE_CACHE_SIZE > PAGE_SIZE. 407 */ 408 static inline int fault_in_pages_writeable(char __user *uaddr, int size) 409 { 410 int ret; 411 412 if (unlikely(size == 0)) 413 return 0; 414 415 /* 416 * Writing zeroes into userspace here is OK, because we know that if 417 * the zero gets there, we'll be overwriting it. 418 */ 419 ret = __put_user(0, uaddr); 420 if (ret == 0) { 421 char __user *end = uaddr + size - 1; 422 423 /* 424 * If the page was already mapped, this will get a cache miss 425 * for sure, so try to avoid doing it. 426 */ 427 if (((unsigned long)uaddr & PAGE_MASK) != 428 ((unsigned long)end & PAGE_MASK)) 429 ret = __put_user(0, end); 430 } 431 return ret; 432 } 433 434 static inline int fault_in_pages_readable(const char __user *uaddr, int size) 435 { 436 volatile char c; 437 int ret; 438 439 if (unlikely(size == 0)) 440 return 0; 441 442 ret = __get_user(c, uaddr); 443 if (ret == 0) { 444 const char __user *end = uaddr + size - 1; 445 446 if (((unsigned long)uaddr & PAGE_MASK) != 447 ((unsigned long)end & PAGE_MASK)) { 448 ret = __get_user(c, end); 449 (void)c; 450 } 451 } 452 return ret; 453 } 454 455 int add_to_page_cache_locked(struct page *page, struct address_space *mapping, 456 pgoff_t index, gfp_t gfp_mask); 457 int add_to_page_cache_lru(struct page *page, struct address_space *mapping, 458 pgoff_t index, gfp_t gfp_mask); 459 extern void delete_from_page_cache(struct page *page); 460 extern void __delete_from_page_cache(struct page *page); 461 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask); 462 463 /* 464 * Like add_to_page_cache_locked, but used to add newly allocated pages: 465 * the page is new, so we can just run __set_page_locked() against it. 466 */ 467 static inline int add_to_page_cache(struct page *page, 468 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) 469 { 470 int error; 471 472 __set_page_locked(page); 473 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); 474 if (unlikely(error)) 475 __clear_page_locked(page); 476 return error; 477 } 478 479 #endif /* _LINUX_PAGEMAP_H */ 480