xref: /openbmc/linux/drivers/base/node.c (revision 9344dade)
1 /*
2  * Basic Node interface support
3  */
4 
5 #include <linux/module.h>
6 #include <linux/init.h>
7 #include <linux/mm.h>
8 #include <linux/memory.h>
9 #include <linux/vmstat.h>
10 #include <linux/notifier.h>
11 #include <linux/node.h>
12 #include <linux/hugetlb.h>
13 #include <linux/compaction.h>
14 #include <linux/cpumask.h>
15 #include <linux/topology.h>
16 #include <linux/nodemask.h>
17 #include <linux/cpu.h>
18 #include <linux/device.h>
19 #include <linux/swap.h>
20 #include <linux/slab.h>
21 
22 static struct bus_type node_subsys = {
23 	.name = "node",
24 	.dev_name = "node",
25 };
26 
27 
28 static ssize_t node_read_cpumap(struct device *dev, int type, char *buf)
29 {
30 	struct node *node_dev = to_node(dev);
31 	const struct cpumask *mask = cpumask_of_node(node_dev->dev.id);
32 	int len;
33 
34 	/* 2008/04/07: buf currently PAGE_SIZE, need 9 chars per 32 bits. */
35 	BUILD_BUG_ON((NR_CPUS/32 * 9) > (PAGE_SIZE-1));
36 
37 	len = type?
38 		cpulist_scnprintf(buf, PAGE_SIZE-2, mask) :
39 		cpumask_scnprintf(buf, PAGE_SIZE-2, mask);
40  	buf[len++] = '\n';
41  	buf[len] = '\0';
42 	return len;
43 }
44 
45 static inline ssize_t node_read_cpumask(struct device *dev,
46 				struct device_attribute *attr, char *buf)
47 {
48 	return node_read_cpumap(dev, 0, buf);
49 }
50 static inline ssize_t node_read_cpulist(struct device *dev,
51 				struct device_attribute *attr, char *buf)
52 {
53 	return node_read_cpumap(dev, 1, buf);
54 }
55 
56 static DEVICE_ATTR(cpumap,  S_IRUGO, node_read_cpumask, NULL);
57 static DEVICE_ATTR(cpulist, S_IRUGO, node_read_cpulist, NULL);
58 
59 #define K(x) ((x) << (PAGE_SHIFT - 10))
60 static ssize_t node_read_meminfo(struct device *dev,
61 			struct device_attribute *attr, char *buf)
62 {
63 	int n;
64 	int nid = dev->id;
65 	struct sysinfo i;
66 
67 	si_meminfo_node(&i, nid);
68 	n = sprintf(buf,
69 		       "Node %d MemTotal:       %8lu kB\n"
70 		       "Node %d MemFree:        %8lu kB\n"
71 		       "Node %d MemUsed:        %8lu kB\n"
72 		       "Node %d Active:         %8lu kB\n"
73 		       "Node %d Inactive:       %8lu kB\n"
74 		       "Node %d Active(anon):   %8lu kB\n"
75 		       "Node %d Inactive(anon): %8lu kB\n"
76 		       "Node %d Active(file):   %8lu kB\n"
77 		       "Node %d Inactive(file): %8lu kB\n"
78 		       "Node %d Unevictable:    %8lu kB\n"
79 		       "Node %d Mlocked:        %8lu kB\n",
80 		       nid, K(i.totalram),
81 		       nid, K(i.freeram),
82 		       nid, K(i.totalram - i.freeram),
83 		       nid, K(node_page_state(nid, NR_ACTIVE_ANON) +
84 				node_page_state(nid, NR_ACTIVE_FILE)),
85 		       nid, K(node_page_state(nid, NR_INACTIVE_ANON) +
86 				node_page_state(nid, NR_INACTIVE_FILE)),
87 		       nid, K(node_page_state(nid, NR_ACTIVE_ANON)),
88 		       nid, K(node_page_state(nid, NR_INACTIVE_ANON)),
89 		       nid, K(node_page_state(nid, NR_ACTIVE_FILE)),
90 		       nid, K(node_page_state(nid, NR_INACTIVE_FILE)),
91 		       nid, K(node_page_state(nid, NR_UNEVICTABLE)),
92 		       nid, K(node_page_state(nid, NR_MLOCK)));
93 
94 #ifdef CONFIG_HIGHMEM
95 	n += sprintf(buf + n,
96 		       "Node %d HighTotal:      %8lu kB\n"
97 		       "Node %d HighFree:       %8lu kB\n"
98 		       "Node %d LowTotal:       %8lu kB\n"
99 		       "Node %d LowFree:        %8lu kB\n",
100 		       nid, K(i.totalhigh),
101 		       nid, K(i.freehigh),
102 		       nid, K(i.totalram - i.totalhigh),
103 		       nid, K(i.freeram - i.freehigh));
104 #endif
105 	n += sprintf(buf + n,
106 		       "Node %d Dirty:          %8lu kB\n"
107 		       "Node %d Writeback:      %8lu kB\n"
108 		       "Node %d FilePages:      %8lu kB\n"
109 		       "Node %d Mapped:         %8lu kB\n"
110 		       "Node %d AnonPages:      %8lu kB\n"
111 		       "Node %d Shmem:          %8lu kB\n"
112 		       "Node %d KernelStack:    %8lu kB\n"
113 		       "Node %d PageTables:     %8lu kB\n"
114 		       "Node %d NFS_Unstable:   %8lu kB\n"
115 		       "Node %d Bounce:         %8lu kB\n"
116 		       "Node %d WritebackTmp:   %8lu kB\n"
117 		       "Node %d Slab:           %8lu kB\n"
118 		       "Node %d SReclaimable:   %8lu kB\n"
119 		       "Node %d SUnreclaim:     %8lu kB\n"
120 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
121 		       "Node %d AnonHugePages:  %8lu kB\n"
122 #endif
123 			,
124 		       nid, K(node_page_state(nid, NR_FILE_DIRTY)),
125 		       nid, K(node_page_state(nid, NR_WRITEBACK)),
126 		       nid, K(node_page_state(nid, NR_FILE_PAGES)),
127 		       nid, K(node_page_state(nid, NR_FILE_MAPPED)),
128 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
129 		       nid, K(node_page_state(nid, NR_ANON_PAGES)
130 			+ node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) *
131 			HPAGE_PMD_NR),
132 #else
133 		       nid, K(node_page_state(nid, NR_ANON_PAGES)),
134 #endif
135 		       nid, K(node_page_state(nid, NR_SHMEM)),
136 		       nid, node_page_state(nid, NR_KERNEL_STACK) *
137 				THREAD_SIZE / 1024,
138 		       nid, K(node_page_state(nid, NR_PAGETABLE)),
139 		       nid, K(node_page_state(nid, NR_UNSTABLE_NFS)),
140 		       nid, K(node_page_state(nid, NR_BOUNCE)),
141 		       nid, K(node_page_state(nid, NR_WRITEBACK_TEMP)),
142 		       nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE) +
143 				node_page_state(nid, NR_SLAB_UNRECLAIMABLE)),
144 		       nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE)),
145 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
146 		       nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE))
147 			, nid,
148 			K(node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) *
149 			HPAGE_PMD_NR));
150 #else
151 		       nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE)));
152 #endif
153 	n += hugetlb_report_node_meminfo(nid, buf + n);
154 	return n;
155 }
156 
157 #undef K
158 static DEVICE_ATTR(meminfo, S_IRUGO, node_read_meminfo, NULL);
159 
160 static ssize_t node_read_numastat(struct device *dev,
161 				struct device_attribute *attr, char *buf)
162 {
163 	return sprintf(buf,
164 		       "numa_hit %lu\n"
165 		       "numa_miss %lu\n"
166 		       "numa_foreign %lu\n"
167 		       "interleave_hit %lu\n"
168 		       "local_node %lu\n"
169 		       "other_node %lu\n",
170 		       node_page_state(dev->id, NUMA_HIT),
171 		       node_page_state(dev->id, NUMA_MISS),
172 		       node_page_state(dev->id, NUMA_FOREIGN),
173 		       node_page_state(dev->id, NUMA_INTERLEAVE_HIT),
174 		       node_page_state(dev->id, NUMA_LOCAL),
175 		       node_page_state(dev->id, NUMA_OTHER));
176 }
177 static DEVICE_ATTR(numastat, S_IRUGO, node_read_numastat, NULL);
178 
179 static ssize_t node_read_vmstat(struct device *dev,
180 				struct device_attribute *attr, char *buf)
181 {
182 	int nid = dev->id;
183 	int i;
184 	int n = 0;
185 
186 	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
187 		n += sprintf(buf+n, "%s %lu\n", vmstat_text[i],
188 			     node_page_state(nid, i));
189 
190 	return n;
191 }
192 static DEVICE_ATTR(vmstat, S_IRUGO, node_read_vmstat, NULL);
193 
194 static ssize_t node_read_distance(struct device *dev,
195 			struct device_attribute *attr, char * buf)
196 {
197 	int nid = dev->id;
198 	int len = 0;
199 	int i;
200 
201 	/*
202 	 * buf is currently PAGE_SIZE in length and each node needs 4 chars
203 	 * at the most (distance + space or newline).
204 	 */
205 	BUILD_BUG_ON(MAX_NUMNODES * 4 > PAGE_SIZE);
206 
207 	for_each_online_node(i)
208 		len += sprintf(buf + len, "%s%d", i ? " " : "", node_distance(nid, i));
209 
210 	len += sprintf(buf + len, "\n");
211 	return len;
212 }
213 static DEVICE_ATTR(distance, S_IRUGO, node_read_distance, NULL);
214 
215 #ifdef CONFIG_HUGETLBFS
216 /*
217  * hugetlbfs per node attributes registration interface:
218  * When/if hugetlb[fs] subsystem initializes [sometime after this module],
219  * it will register its per node attributes for all online nodes with
220  * memory.  It will also call register_hugetlbfs_with_node(), below, to
221  * register its attribute registration functions with this node driver.
222  * Once these hooks have been initialized, the node driver will call into
223  * the hugetlb module to [un]register attributes for hot-plugged nodes.
224  */
225 static node_registration_func_t __hugetlb_register_node;
226 static node_registration_func_t __hugetlb_unregister_node;
227 
228 static inline bool hugetlb_register_node(struct node *node)
229 {
230 	if (__hugetlb_register_node &&
231 			node_state(node->dev.id, N_MEMORY)) {
232 		__hugetlb_register_node(node);
233 		return true;
234 	}
235 	return false;
236 }
237 
238 static inline void hugetlb_unregister_node(struct node *node)
239 {
240 	if (__hugetlb_unregister_node)
241 		__hugetlb_unregister_node(node);
242 }
243 
244 void register_hugetlbfs_with_node(node_registration_func_t doregister,
245 				  node_registration_func_t unregister)
246 {
247 	__hugetlb_register_node   = doregister;
248 	__hugetlb_unregister_node = unregister;
249 }
250 #else
251 static inline void hugetlb_register_node(struct node *node) {}
252 
253 static inline void hugetlb_unregister_node(struct node *node) {}
254 #endif
255 
256 static void node_device_release(struct device *dev)
257 {
258 	struct node *node = to_node(dev);
259 
260 #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HUGETLBFS)
261 	/*
262 	 * We schedule the work only when a memory section is
263 	 * onlined/offlined on this node. When we come here,
264 	 * all the memory on this node has been offlined,
265 	 * so we won't enqueue new work to this work.
266 	 *
267 	 * The work is using node->node_work, so we should
268 	 * flush work before freeing the memory.
269 	 */
270 	flush_work(&node->node_work);
271 #endif
272 	kfree(node);
273 }
274 
275 /*
276  * register_node - Setup a sysfs device for a node.
277  * @num - Node number to use when creating the device.
278  *
279  * Initialize and register the node device.
280  */
281 static int register_node(struct node *node, int num, struct node *parent)
282 {
283 	int error;
284 
285 	node->dev.id = num;
286 	node->dev.bus = &node_subsys;
287 	node->dev.release = node_device_release;
288 	error = device_register(&node->dev);
289 
290 	if (!error){
291 		device_create_file(&node->dev, &dev_attr_cpumap);
292 		device_create_file(&node->dev, &dev_attr_cpulist);
293 		device_create_file(&node->dev, &dev_attr_meminfo);
294 		device_create_file(&node->dev, &dev_attr_numastat);
295 		device_create_file(&node->dev, &dev_attr_distance);
296 		device_create_file(&node->dev, &dev_attr_vmstat);
297 
298 		scan_unevictable_register_node(node);
299 
300 		hugetlb_register_node(node);
301 
302 		compaction_register_node(node);
303 	}
304 	return error;
305 }
306 
307 /**
308  * unregister_node - unregister a node device
309  * @node: node going away
310  *
311  * Unregisters a node device @node.  All the devices on the node must be
312  * unregistered before calling this function.
313  */
314 void unregister_node(struct node *node)
315 {
316 	device_remove_file(&node->dev, &dev_attr_cpumap);
317 	device_remove_file(&node->dev, &dev_attr_cpulist);
318 	device_remove_file(&node->dev, &dev_attr_meminfo);
319 	device_remove_file(&node->dev, &dev_attr_numastat);
320 	device_remove_file(&node->dev, &dev_attr_distance);
321 	device_remove_file(&node->dev, &dev_attr_vmstat);
322 
323 	scan_unevictable_unregister_node(node);
324 	hugetlb_unregister_node(node);		/* no-op, if memoryless node */
325 
326 	device_unregister(&node->dev);
327 }
328 
329 struct node *node_devices[MAX_NUMNODES];
330 
331 /*
332  * register cpu under node
333  */
334 int register_cpu_under_node(unsigned int cpu, unsigned int nid)
335 {
336 	int ret;
337 	struct device *obj;
338 
339 	if (!node_online(nid))
340 		return 0;
341 
342 	obj = get_cpu_device(cpu);
343 	if (!obj)
344 		return 0;
345 
346 	ret = sysfs_create_link(&node_devices[nid]->dev.kobj,
347 				&obj->kobj,
348 				kobject_name(&obj->kobj));
349 	if (ret)
350 		return ret;
351 
352 	return sysfs_create_link(&obj->kobj,
353 				 &node_devices[nid]->dev.kobj,
354 				 kobject_name(&node_devices[nid]->dev.kobj));
355 }
356 
357 int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
358 {
359 	struct device *obj;
360 
361 	if (!node_online(nid))
362 		return 0;
363 
364 	obj = get_cpu_device(cpu);
365 	if (!obj)
366 		return 0;
367 
368 	sysfs_remove_link(&node_devices[nid]->dev.kobj,
369 			  kobject_name(&obj->kobj));
370 	sysfs_remove_link(&obj->kobj,
371 			  kobject_name(&node_devices[nid]->dev.kobj));
372 
373 	return 0;
374 }
375 
376 #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
377 #define page_initialized(page)  (page->lru.next)
378 
379 static int get_nid_for_pfn(unsigned long pfn)
380 {
381 	struct page *page;
382 
383 	if (!pfn_valid_within(pfn))
384 		return -1;
385 	page = pfn_to_page(pfn);
386 	if (!page_initialized(page))
387 		return -1;
388 	return pfn_to_nid(pfn);
389 }
390 
391 /* register memory section under specified node if it spans that node */
392 int register_mem_sect_under_node(struct memory_block *mem_blk, int nid)
393 {
394 	int ret;
395 	unsigned long pfn, sect_start_pfn, sect_end_pfn;
396 
397 	if (!mem_blk)
398 		return -EFAULT;
399 	if (!node_online(nid))
400 		return 0;
401 
402 	sect_start_pfn = section_nr_to_pfn(mem_blk->start_section_nr);
403 	sect_end_pfn = section_nr_to_pfn(mem_blk->end_section_nr);
404 	sect_end_pfn += PAGES_PER_SECTION - 1;
405 	for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) {
406 		int page_nid;
407 
408 		page_nid = get_nid_for_pfn(pfn);
409 		if (page_nid < 0)
410 			continue;
411 		if (page_nid != nid)
412 			continue;
413 		ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj,
414 					&mem_blk->dev.kobj,
415 					kobject_name(&mem_blk->dev.kobj));
416 		if (ret)
417 			return ret;
418 
419 		return sysfs_create_link_nowarn(&mem_blk->dev.kobj,
420 				&node_devices[nid]->dev.kobj,
421 				kobject_name(&node_devices[nid]->dev.kobj));
422 	}
423 	/* mem section does not span the specified node */
424 	return 0;
425 }
426 
427 /* unregister memory section under all nodes that it spans */
428 int unregister_mem_sect_under_nodes(struct memory_block *mem_blk,
429 				    unsigned long phys_index)
430 {
431 	NODEMASK_ALLOC(nodemask_t, unlinked_nodes, GFP_KERNEL);
432 	unsigned long pfn, sect_start_pfn, sect_end_pfn;
433 
434 	if (!mem_blk) {
435 		NODEMASK_FREE(unlinked_nodes);
436 		return -EFAULT;
437 	}
438 	if (!unlinked_nodes)
439 		return -ENOMEM;
440 	nodes_clear(*unlinked_nodes);
441 
442 	sect_start_pfn = section_nr_to_pfn(phys_index);
443 	sect_end_pfn = sect_start_pfn + PAGES_PER_SECTION - 1;
444 	for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) {
445 		int nid;
446 
447 		nid = get_nid_for_pfn(pfn);
448 		if (nid < 0)
449 			continue;
450 		if (!node_online(nid))
451 			continue;
452 		if (node_test_and_set(nid, *unlinked_nodes))
453 			continue;
454 		sysfs_remove_link(&node_devices[nid]->dev.kobj,
455 			 kobject_name(&mem_blk->dev.kobj));
456 		sysfs_remove_link(&mem_blk->dev.kobj,
457 			 kobject_name(&node_devices[nid]->dev.kobj));
458 	}
459 	NODEMASK_FREE(unlinked_nodes);
460 	return 0;
461 }
462 
463 static int link_mem_sections(int nid)
464 {
465 	unsigned long start_pfn = NODE_DATA(nid)->node_start_pfn;
466 	unsigned long end_pfn = start_pfn + NODE_DATA(nid)->node_spanned_pages;
467 	unsigned long pfn;
468 	struct memory_block *mem_blk = NULL;
469 	int err = 0;
470 
471 	for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
472 		unsigned long section_nr = pfn_to_section_nr(pfn);
473 		struct mem_section *mem_sect;
474 		int ret;
475 
476 		if (!present_section_nr(section_nr))
477 			continue;
478 		mem_sect = __nr_to_section(section_nr);
479 
480 		/* same memblock ? */
481 		if (mem_blk)
482 			if ((section_nr >= mem_blk->start_section_nr) &&
483 			    (section_nr <= mem_blk->end_section_nr))
484 				continue;
485 
486 		mem_blk = find_memory_block_hinted(mem_sect, mem_blk);
487 
488 		ret = register_mem_sect_under_node(mem_blk, nid);
489 		if (!err)
490 			err = ret;
491 
492 		/* discard ref obtained in find_memory_block() */
493 	}
494 
495 	if (mem_blk)
496 		kobject_put(&mem_blk->dev.kobj);
497 	return err;
498 }
499 
500 #ifdef CONFIG_HUGETLBFS
501 /*
502  * Handle per node hstate attribute [un]registration on transistions
503  * to/from memoryless state.
504  */
505 static void node_hugetlb_work(struct work_struct *work)
506 {
507 	struct node *node = container_of(work, struct node, node_work);
508 
509 	/*
510 	 * We only get here when a node transitions to/from memoryless state.
511 	 * We can detect which transition occurred by examining whether the
512 	 * node has memory now.  hugetlb_register_node() already check this
513 	 * so we try to register the attributes.  If that fails, then the
514 	 * node has transitioned to memoryless, try to unregister the
515 	 * attributes.
516 	 */
517 	if (!hugetlb_register_node(node))
518 		hugetlb_unregister_node(node);
519 }
520 
521 static void init_node_hugetlb_work(int nid)
522 {
523 	INIT_WORK(&node_devices[nid]->node_work, node_hugetlb_work);
524 }
525 
526 static int node_memory_callback(struct notifier_block *self,
527 				unsigned long action, void *arg)
528 {
529 	struct memory_notify *mnb = arg;
530 	int nid = mnb->status_change_nid;
531 
532 	switch (action) {
533 	case MEM_ONLINE:
534 	case MEM_OFFLINE:
535 		/*
536 		 * offload per node hstate [un]registration to a work thread
537 		 * when transitioning to/from memoryless state.
538 		 */
539 		if (nid != NUMA_NO_NODE)
540 			schedule_work(&node_devices[nid]->node_work);
541 		break;
542 
543 	case MEM_GOING_ONLINE:
544 	case MEM_GOING_OFFLINE:
545 	case MEM_CANCEL_ONLINE:
546 	case MEM_CANCEL_OFFLINE:
547 	default:
548 		break;
549 	}
550 
551 	return NOTIFY_OK;
552 }
553 #endif	/* CONFIG_HUGETLBFS */
554 #else	/* !CONFIG_MEMORY_HOTPLUG_SPARSE */
555 
556 static int link_mem_sections(int nid) { return 0; }
557 #endif	/* CONFIG_MEMORY_HOTPLUG_SPARSE */
558 
559 #if !defined(CONFIG_MEMORY_HOTPLUG_SPARSE) || \
560     !defined(CONFIG_HUGETLBFS)
561 static inline int node_memory_callback(struct notifier_block *self,
562 				unsigned long action, void *arg)
563 {
564 	return NOTIFY_OK;
565 }
566 
567 static void init_node_hugetlb_work(int nid) { }
568 
569 #endif
570 
571 int register_one_node(int nid)
572 {
573 	int error = 0;
574 	int cpu;
575 
576 	if (node_online(nid)) {
577 		int p_node = parent_node(nid);
578 		struct node *parent = NULL;
579 
580 		if (p_node != nid)
581 			parent = node_devices[p_node];
582 
583 		node_devices[nid] = kzalloc(sizeof(struct node), GFP_KERNEL);
584 		if (!node_devices[nid])
585 			return -ENOMEM;
586 
587 		error = register_node(node_devices[nid], nid, parent);
588 
589 		/* link cpu under this node */
590 		for_each_present_cpu(cpu) {
591 			if (cpu_to_node(cpu) == nid)
592 				register_cpu_under_node(cpu, nid);
593 		}
594 
595 		/* link memory sections under this node */
596 		error = link_mem_sections(nid);
597 
598 		/* initialize work queue for memory hot plug */
599 		init_node_hugetlb_work(nid);
600 	}
601 
602 	return error;
603 
604 }
605 
606 void unregister_one_node(int nid)
607 {
608 	unregister_node(node_devices[nid]);
609 	node_devices[nid] = NULL;
610 }
611 
612 /*
613  * node states attributes
614  */
615 
616 static ssize_t print_nodes_state(enum node_states state, char *buf)
617 {
618 	int n;
619 
620 	n = nodelist_scnprintf(buf, PAGE_SIZE-2, node_states[state]);
621 	buf[n++] = '\n';
622 	buf[n] = '\0';
623 	return n;
624 }
625 
626 struct node_attr {
627 	struct device_attribute attr;
628 	enum node_states state;
629 };
630 
631 static ssize_t show_node_state(struct device *dev,
632 			       struct device_attribute *attr, char *buf)
633 {
634 	struct node_attr *na = container_of(attr, struct node_attr, attr);
635 	return print_nodes_state(na->state, buf);
636 }
637 
638 #define _NODE_ATTR(name, state) \
639 	{ __ATTR(name, 0444, show_node_state, NULL), state }
640 
641 static struct node_attr node_state_attr[] = {
642 	[N_POSSIBLE] = _NODE_ATTR(possible, N_POSSIBLE),
643 	[N_ONLINE] = _NODE_ATTR(online, N_ONLINE),
644 	[N_NORMAL_MEMORY] = _NODE_ATTR(has_normal_memory, N_NORMAL_MEMORY),
645 #ifdef CONFIG_HIGHMEM
646 	[N_HIGH_MEMORY] = _NODE_ATTR(has_high_memory, N_HIGH_MEMORY),
647 #endif
648 #ifdef CONFIG_MOVABLE_NODE
649 	[N_MEMORY] = _NODE_ATTR(has_memory, N_MEMORY),
650 #endif
651 	[N_CPU] = _NODE_ATTR(has_cpu, N_CPU),
652 };
653 
654 static struct attribute *node_state_attrs[] = {
655 	&node_state_attr[N_POSSIBLE].attr.attr,
656 	&node_state_attr[N_ONLINE].attr.attr,
657 	&node_state_attr[N_NORMAL_MEMORY].attr.attr,
658 #ifdef CONFIG_HIGHMEM
659 	&node_state_attr[N_HIGH_MEMORY].attr.attr,
660 #endif
661 #ifdef CONFIG_MOVABLE_NODE
662 	&node_state_attr[N_MEMORY].attr.attr,
663 #endif
664 	&node_state_attr[N_CPU].attr.attr,
665 	NULL
666 };
667 
668 static struct attribute_group memory_root_attr_group = {
669 	.attrs = node_state_attrs,
670 };
671 
672 static const struct attribute_group *cpu_root_attr_groups[] = {
673 	&memory_root_attr_group,
674 	NULL,
675 };
676 
677 #define NODE_CALLBACK_PRI	2	/* lower than SLAB */
678 static int __init register_node_type(void)
679 {
680 	int ret;
681 
682  	BUILD_BUG_ON(ARRAY_SIZE(node_state_attr) != NR_NODE_STATES);
683  	BUILD_BUG_ON(ARRAY_SIZE(node_state_attrs)-1 != NR_NODE_STATES);
684 
685 	ret = subsys_system_register(&node_subsys, cpu_root_attr_groups);
686 	if (!ret) {
687 		static struct notifier_block node_memory_callback_nb = {
688 			.notifier_call = node_memory_callback,
689 			.priority = NODE_CALLBACK_PRI,
690 		};
691 		register_hotmemory_notifier(&node_memory_callback_nb);
692 	}
693 
694 	/*
695 	 * Note:  we're not going to unregister the node class if we fail
696 	 * to register the node state class attribute files.
697 	 */
698 	return ret;
699 }
700 postcore_initcall(register_node_type);
701