migrate.c (c3cc99ff5d24e2eeaf7ec2032e720681916990e3) | migrate.c (e286781d5f2e9c846e012a39653a166e9d31777d) |
---|---|
1/* 2 * Memory Migration functionality - linux/mm/migration.c 3 * 4 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter 5 * 6 * Page migration was first developed in the context of the memory hotplug 7 * project. The main authors of the migration code are: 8 * --- 271 unchanged lines hidden (view full) --- 280 goto out; 281 282 entry = pte_to_swp_entry(pte); 283 if (!is_migration_entry(entry)) 284 goto out; 285 286 page = migration_entry_to_page(entry); 287 | 1/* 2 * Memory Migration functionality - linux/mm/migration.c 3 * 4 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter 5 * 6 * Page migration was first developed in the context of the memory hotplug 7 * project. The main authors of the migration code are: 8 * --- 271 unchanged lines hidden (view full) --- 280 goto out; 281 282 entry = pte_to_swp_entry(pte); 283 if (!is_migration_entry(entry)) 284 goto out; 285 286 page = migration_entry_to_page(entry); 287 |
288 get_page(page); | 288 /* 289 * Once radix-tree replacement of page migration started, page_count 290 * *must* be zero. And, we don't want to call wait_on_page_locked() 291 * against a page without get_page(). 292 * So, we use get_page_unless_zero(), here. Even failed, page fault 293 * will occur again. 294 */ 295 if (!get_page_unless_zero(page)) 296 goto out; |
289 pte_unmap_unlock(ptep, ptl); 290 wait_on_page_locked(page); 291 put_page(page); 292 return; 293out: 294 pte_unmap_unlock(ptep, ptl); 295} 296 297/* 298 * Replace the page in the mapping. 299 * 300 * The number of remaining references must be: 301 * 1 for anonymous pages without a mapping 302 * 2 for pages with a mapping 303 * 3 for pages with a mapping and PagePrivate set. 304 */ 305static int migrate_page_move_mapping(struct address_space *mapping, 306 struct page *newpage, struct page *page) 307{ | 297 pte_unmap_unlock(ptep, ptl); 298 wait_on_page_locked(page); 299 put_page(page); 300 return; 301out: 302 pte_unmap_unlock(ptep, ptl); 303} 304 305/* 306 * Replace the page in the mapping. 307 * 308 * The number of remaining references must be: 309 * 1 for anonymous pages without a mapping 310 * 2 for pages with a mapping 311 * 3 for pages with a mapping and PagePrivate set. 312 */ 313static int migrate_page_move_mapping(struct address_space *mapping, 314 struct page *newpage, struct page *page) 315{ |
316 int expected_count; |
|
308 void **pslot; 309 310 if (!mapping) { 311 /* Anonymous page without mapping */ 312 if (page_count(page) != 1) 313 return -EAGAIN; 314 return 0; 315 } 316 317 write_lock_irq(&mapping->tree_lock); 318 319 pslot = radix_tree_lookup_slot(&mapping->page_tree, 320 page_index(page)); 321 | 317 void **pslot; 318 319 if (!mapping) { 320 /* Anonymous page without mapping */ 321 if (page_count(page) != 1) 322 return -EAGAIN; 323 return 0; 324 } 325 326 write_lock_irq(&mapping->tree_lock); 327 328 pslot = radix_tree_lookup_slot(&mapping->page_tree, 329 page_index(page)); 330 |
322 if (page_count(page) != 2 + !!PagePrivate(page) || | 331 expected_count = 2 + !!PagePrivate(page); 332 if (page_count(page) != expected_count || |
323 (struct page *)radix_tree_deref_slot(pslot) != page) { 324 write_unlock_irq(&mapping->tree_lock); 325 return -EAGAIN; 326 } 327 | 333 (struct page *)radix_tree_deref_slot(pslot) != page) { 334 write_unlock_irq(&mapping->tree_lock); 335 return -EAGAIN; 336 } 337 |
338 if (!page_freeze_refs(page, expected_count)) { 339 write_unlock_irq(&mapping->tree_lock); 340 return -EAGAIN; 341 } 342 |
|
328 /* 329 * Now we know that no one else is looking at the page. 330 */ 331 get_page(newpage); /* add cache reference */ 332#ifdef CONFIG_SWAP 333 if (PageSwapCache(page)) { 334 SetPageSwapCache(newpage); 335 set_page_private(newpage, page_private(page)); 336 } 337#endif 338 339 radix_tree_replace_slot(pslot, newpage); 340 | 343 /* 344 * Now we know that no one else is looking at the page. 345 */ 346 get_page(newpage); /* add cache reference */ 347#ifdef CONFIG_SWAP 348 if (PageSwapCache(page)) { 349 SetPageSwapCache(newpage); 350 set_page_private(newpage, page_private(page)); 351 } 352#endif 353 354 radix_tree_replace_slot(pslot, newpage); 355 |
356 page_unfreeze_refs(page, expected_count); |
|
341 /* 342 * Drop cache reference from old page. 343 * We know this isn't the last reference. 344 */ 345 __put_page(page); 346 347 /* 348 * If moved to a different zone then also account --- 754 unchanged lines hidden --- | 357 /* 358 * Drop cache reference from old page. 359 * We know this isn't the last reference. 360 */ 361 __put_page(page); 362 363 /* 364 * If moved to a different zone then also account --- 754 unchanged lines hidden --- |