memory_hotplug.c (748c5151deb56e4b7b5a9b07a884243764933831) memory_hotplug.c (7b78d335ac15b10bbcb0397c635d7f0d569b0270)
1/*
2 * linux/mm/memory_hotplug.c
3 *
4 * Copyright (C)
5 */
6
7#include <linux/stddef.h>
8#include <linux/mm.h>

--- 173 unchanged lines hidden (view full) ---

182
183
184int online_pages(unsigned long pfn, unsigned long nr_pages)
185{
186 unsigned long flags;
187 unsigned long onlined_pages = 0;
188 struct zone *zone;
189 int need_zonelists_rebuild = 0;
1/*
2 * linux/mm/memory_hotplug.c
3 *
4 * Copyright (C)
5 */
6
7#include <linux/stddef.h>
8#include <linux/mm.h>

--- 173 unchanged lines hidden (view full) ---

182
183
184int online_pages(unsigned long pfn, unsigned long nr_pages)
185{
186 unsigned long flags;
187 unsigned long onlined_pages = 0;
188 struct zone *zone;
189 int need_zonelists_rebuild = 0;
190 int nid;
191 int ret;
192 struct memory_notify arg;
190
193
194 arg.start_pfn = pfn;
195 arg.nr_pages = nr_pages;
196 arg.status_change_nid = -1;
197
198 nid = page_to_nid(pfn_to_page(pfn));
199 if (node_present_pages(nid) == 0)
200 arg.status_change_nid = nid;
201
202 ret = memory_notify(MEM_GOING_ONLINE, &arg);
203 ret = notifier_to_errno(ret);
204 if (ret) {
205 memory_notify(MEM_CANCEL_ONLINE, &arg);
206 return ret;
207 }
191 /*
192 * This doesn't need a lock to do pfn_to_page().
193 * The section can't be removed here because of the
194 * memory_block->state_sem.
195 */
196 zone = page_zone(pfn_to_page(pfn));
197 pgdat_resize_lock(zone->zone_pgdat, &flags);
198 grow_zone_span(zone, pfn, pfn + nr_pages);

--- 18 unchanged lines hidden (view full) ---

217 kswapd_run(zone_to_nid(zone));
218 node_set_state(zone_to_nid(zone), N_HIGH_MEMORY);
219 }
220
221 if (need_zonelists_rebuild)
222 build_all_zonelists();
223 vm_total_pages = nr_free_pagecache_pages();
224 writeback_set_ratelimit();
208 /*
209 * This doesn't need a lock to do pfn_to_page().
210 * The section can't be removed here because of the
211 * memory_block->state_sem.
212 */
213 zone = page_zone(pfn_to_page(pfn));
214 pgdat_resize_lock(zone->zone_pgdat, &flags);
215 grow_zone_span(zone, pfn, pfn + nr_pages);

--- 18 unchanged lines hidden (view full) ---

234 kswapd_run(zone_to_nid(zone));
235 node_set_state(zone_to_nid(zone), N_HIGH_MEMORY);
236 }
237
238 if (need_zonelists_rebuild)
239 build_all_zonelists();
240 vm_total_pages = nr_free_pagecache_pages();
241 writeback_set_ratelimit();
242
243 if (onlined_pages)
244 memory_notify(MEM_ONLINE, &arg);
245
225 return 0;
226}
227#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
228
229static pg_data_t *hotadd_new_pgdat(int nid, u64 start)
230{
231 struct pglist_data *pgdat;
232 unsigned long zones_size[MAX_NR_ZONES] = {0};

--- 229 unchanged lines hidden (view full) ---

462
463extern void drain_all_local_pages(void);
464
465int offline_pages(unsigned long start_pfn,
466 unsigned long end_pfn, unsigned long timeout)
467{
468 unsigned long pfn, nr_pages, expire;
469 long offlined_pages;
246 return 0;
247}
248#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
249
250static pg_data_t *hotadd_new_pgdat(int nid, u64 start)
251{
252 struct pglist_data *pgdat;
253 unsigned long zones_size[MAX_NR_ZONES] = {0};

--- 229 unchanged lines hidden (view full) ---

483
484extern void drain_all_local_pages(void);
485
486int offline_pages(unsigned long start_pfn,
487 unsigned long end_pfn, unsigned long timeout)
488{
489 unsigned long pfn, nr_pages, expire;
490 long offlined_pages;
470 int ret, drain, retry_max;
491 int ret, drain, retry_max, node;
471 struct zone *zone;
492 struct zone *zone;
493 struct memory_notify arg;
472
473 BUG_ON(start_pfn >= end_pfn);
474 /* at least, alignment against pageblock is necessary */
475 if (!IS_ALIGNED(start_pfn, pageblock_nr_pages))
476 return -EINVAL;
477 if (!IS_ALIGNED(end_pfn, pageblock_nr_pages))
478 return -EINVAL;
479 /* This makes hotplug much easier...and readable.
480 we assume this for now. .*/
481 if (!test_pages_in_a_zone(start_pfn, end_pfn))
482 return -EINVAL;
494
495 BUG_ON(start_pfn >= end_pfn);
496 /* at least, alignment against pageblock is necessary */
497 if (!IS_ALIGNED(start_pfn, pageblock_nr_pages))
498 return -EINVAL;
499 if (!IS_ALIGNED(end_pfn, pageblock_nr_pages))
500 return -EINVAL;
501 /* This makes hotplug much easier...and readable.
502 we assume this for now. .*/
503 if (!test_pages_in_a_zone(start_pfn, end_pfn))
504 return -EINVAL;
505
506 zone = page_zone(pfn_to_page(start_pfn));
507 node = zone_to_nid(zone);
508 nr_pages = end_pfn - start_pfn;
509
483 /* set above range as isolated */
484 ret = start_isolate_page_range(start_pfn, end_pfn);
485 if (ret)
486 return ret;
510 /* set above range as isolated */
511 ret = start_isolate_page_range(start_pfn, end_pfn);
512 if (ret)
513 return ret;
487 nr_pages = end_pfn - start_pfn;
514
515 arg.start_pfn = start_pfn;
516 arg.nr_pages = nr_pages;
517 arg.status_change_nid = -1;
518 if (nr_pages >= node_present_pages(node))
519 arg.status_change_nid = node;
520
521 ret = memory_notify(MEM_GOING_OFFLINE, &arg);
522 ret = notifier_to_errno(ret);
523 if (ret)
524 goto failed_removal;
525
488 pfn = start_pfn;
489 expire = jiffies + timeout;
490 drain = 0;
491 retry_max = 5;
492repeat:
493 /* start memory hot removal */
494 ret = -EAGAIN;
495 if (time_after(jiffies, expire))

--- 38 unchanged lines hidden (view full) ---

534 }
535 printk(KERN_INFO "Offlined Pages %ld\n", offlined_pages);
536 /* Ok, all of our target is islaoted.
537 We cannot do rollback at this point. */
538 offline_isolated_pages(start_pfn, end_pfn);
539 /* reset pagetype flags */
540 start_isolate_page_range(start_pfn, end_pfn);
541 /* removal success */
526 pfn = start_pfn;
527 expire = jiffies + timeout;
528 drain = 0;
529 retry_max = 5;
530repeat:
531 /* start memory hot removal */
532 ret = -EAGAIN;
533 if (time_after(jiffies, expire))

--- 38 unchanged lines hidden (view full) ---

572 }
573 printk(KERN_INFO "Offlined Pages %ld\n", offlined_pages);
574 /* Ok, all of our target is islaoted.
575 We cannot do rollback at this point. */
576 offline_isolated_pages(start_pfn, end_pfn);
577 /* reset pagetype flags */
578 start_isolate_page_range(start_pfn, end_pfn);
579 /* removal success */
542 zone = page_zone(pfn_to_page(start_pfn));
543 zone->present_pages -= offlined_pages;
544 zone->zone_pgdat->node_present_pages -= offlined_pages;
545 totalram_pages -= offlined_pages;
546 num_physpages -= offlined_pages;
580 zone->present_pages -= offlined_pages;
581 zone->zone_pgdat->node_present_pages -= offlined_pages;
582 totalram_pages -= offlined_pages;
583 num_physpages -= offlined_pages;
584
547 vm_total_pages = nr_free_pagecache_pages();
548 writeback_set_ratelimit();
585 vm_total_pages = nr_free_pagecache_pages();
586 writeback_set_ratelimit();
587
588 memory_notify(MEM_OFFLINE, &arg);
549 return 0;
550
551failed_removal:
552 printk(KERN_INFO "memory offlining %lx to %lx failed\n",
553 start_pfn, end_pfn);
589 return 0;
590
591failed_removal:
592 printk(KERN_INFO "memory offlining %lx to %lx failed\n",
593 start_pfn, end_pfn);
594 memory_notify(MEM_CANCEL_OFFLINE, &arg);
554 /* pushback to free area */
555 undo_isolate_page_range(start_pfn, end_pfn);
595 /* pushback to free area */
596 undo_isolate_page_range(start_pfn, end_pfn);
597
556 return ret;
557}
558#else
559int remove_memory(u64 start, u64 size)
560{
561 return -EINVAL;
562}
563EXPORT_SYMBOL_GPL(remove_memory);
564#endif /* CONFIG_MEMORY_HOTREMOVE */
598 return ret;
599}
600#else
601int remove_memory(u64 start, u64 size)
602{
603 return -EINVAL;
604}
605EXPORT_SYMBOL_GPL(remove_memory);
606#endif /* CONFIG_MEMORY_HOTREMOVE */