1 /*
2  * PowerPC64 LPAR Configuration Information Driver
3  *
4  * Dave Engebretsen engebret@us.ibm.com
5  *    Copyright (c) 2003 Dave Engebretsen
6  * Will Schmidt willschm@us.ibm.com
7  *    SPLPAR updates, Copyright (c) 2003 Will Schmidt IBM Corporation.
8  *    seq_file updates, Copyright (c) 2004 Will Schmidt IBM Corporation.
9  * Nathan Lynch nathanl@austin.ibm.com
10  *    Added lparcfg_write, Copyright (C) 2004 Nathan Lynch IBM Corporation.
11  *
12  *      This program is free software; you can redistribute it and/or
13  *      modify it under the terms of the GNU General Public License
14  *      as published by the Free Software Foundation; either version
15  *      2 of the License, or (at your option) any later version.
16  *
17  * This driver creates a proc file at /proc/ppc64/lparcfg which contains
18  * keyword - value pairs that specify the configuration of the partition.
19  */
20 
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <linux/errno.h>
24 #include <linux/proc_fs.h>
25 #include <linux/init.h>
26 #include <linux/seq_file.h>
27 #include <linux/slab.h>
28 #include <linux/uaccess.h>
29 #include <asm/lppaca.h>
30 #include <asm/hvcall.h>
31 #include <asm/firmware.h>
32 #include <asm/rtas.h>
33 #include <asm/time.h>
34 #include <asm/prom.h>
35 #include <asm/vdso_datapage.h>
36 #include <asm/vio.h>
37 #include <asm/mmu.h>
38 #include <asm/machdep.h>
39 
40 #include "pseries.h"
41 
42 /*
43  * This isn't a module but we expose that to userspace
44  * via /proc so leave the definitions here
45  */
46 #define MODULE_VERS "1.9"
47 #define MODULE_NAME "lparcfg"
48 
49 /* #define LPARCFG_DEBUG */
50 
51 /*
52  * Track sum of all purrs across all processors. This is used to further
53  * calculate usage values by different applications
54  */
55 static void cpu_get_purr(void *arg)
56 {
57 	atomic64_t *sum = arg;
58 
59 	atomic64_add(mfspr(SPRN_PURR), sum);
60 }
61 
62 static unsigned long get_purr(void)
63 {
64 	atomic64_t purr = ATOMIC64_INIT(0);
65 
66 	on_each_cpu(cpu_get_purr, &purr, 1);
67 
68 	return atomic64_read(&purr);
69 }
70 
71 /*
72  * Methods used to fetch LPAR data when running on a pSeries platform.
73  */
74 
75 struct hvcall_ppp_data {
76 	u64	entitlement;
77 	u64	unallocated_entitlement;
78 	u16	group_num;
79 	u16	pool_num;
80 	u8	capped;
81 	u8	weight;
82 	u8	unallocated_weight;
83 	u16	active_procs_in_pool;
84 	u16	active_system_procs;
85 	u16	phys_platform_procs;
86 	u32	max_proc_cap_avail;
87 	u32	entitled_proc_cap_avail;
88 };
89 
90 /*
91  * H_GET_PPP hcall returns info in 4 parms.
92  *  entitled_capacity,unallocated_capacity,
93  *  aggregation, resource_capability).
94  *
95  *  R4 = Entitled Processor Capacity Percentage.
96  *  R5 = Unallocated Processor Capacity Percentage.
97  *  R6 (AABBCCDDEEFFGGHH).
98  *      XXXX - reserved (0)
99  *          XXXX - reserved (0)
100  *              XXXX - Group Number
101  *                  XXXX - Pool Number.
102  *  R7 (IIJJKKLLMMNNOOPP).
103  *      XX - reserved. (0)
104  *        XX - bit 0-6 reserved (0).   bit 7 is Capped indicator.
105  *          XX - variable processor Capacity Weight
106  *            XX - Unallocated Variable Processor Capacity Weight.
107  *              XXXX - Active processors in Physical Processor Pool.
108  *                  XXXX  - Processors active on platform.
109  *  R8 (QQQQRRRRRRSSSSSS). if ibm,partition-performance-parameters-level >= 1
110  *	XXXX - Physical platform procs allocated to virtualization.
111  *	    XXXXXX - Max procs capacity % available to the partitions pool.
112  *	          XXXXXX - Entitled procs capacity % available to the
113  *			   partitions pool.
114  */
115 static unsigned int h_get_ppp(struct hvcall_ppp_data *ppp_data)
116 {
117 	unsigned long rc;
118 	unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
119 
120 	rc = plpar_hcall9(H_GET_PPP, retbuf);
121 
122 	ppp_data->entitlement = retbuf[0];
123 	ppp_data->unallocated_entitlement = retbuf[1];
124 
125 	ppp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff;
126 	ppp_data->pool_num = retbuf[2] & 0xffff;
127 
128 	ppp_data->capped = (retbuf[3] >> 6 * 8) & 0x01;
129 	ppp_data->weight = (retbuf[3] >> 5 * 8) & 0xff;
130 	ppp_data->unallocated_weight = (retbuf[3] >> 4 * 8) & 0xff;
131 	ppp_data->active_procs_in_pool = (retbuf[3] >> 2 * 8) & 0xffff;
132 	ppp_data->active_system_procs = retbuf[3] & 0xffff;
133 
134 	ppp_data->phys_platform_procs = retbuf[4] >> 6 * 8;
135 	ppp_data->max_proc_cap_avail = (retbuf[4] >> 3 * 8) & 0xffffff;
136 	ppp_data->entitled_proc_cap_avail = retbuf[4] & 0xffffff;
137 
138 	return rc;
139 }
140 
141 static unsigned h_pic(unsigned long *pool_idle_time,
142 		      unsigned long *num_procs)
143 {
144 	unsigned long rc;
145 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
146 
147 	rc = plpar_hcall(H_PIC, retbuf);
148 
149 	*pool_idle_time = retbuf[0];
150 	*num_procs = retbuf[1];
151 
152 	return rc;
153 }
154 
155 /*
156  * parse_ppp_data
157  * Parse out the data returned from h_get_ppp and h_pic
158  */
159 static void parse_ppp_data(struct seq_file *m)
160 {
161 	struct hvcall_ppp_data ppp_data;
162 	struct device_node *root;
163 	const __be32 *perf_level;
164 	int rc;
165 
166 	rc = h_get_ppp(&ppp_data);
167 	if (rc)
168 		return;
169 
170 	seq_printf(m, "partition_entitled_capacity=%lld\n",
171 	           ppp_data.entitlement);
172 	seq_printf(m, "group=%d\n", ppp_data.group_num);
173 	seq_printf(m, "system_active_processors=%d\n",
174 	           ppp_data.active_system_procs);
175 
176 	/* pool related entries are appropriate for shared configs */
177 	if (lppaca_shared_proc(get_lppaca())) {
178 		unsigned long pool_idle_time, pool_procs;
179 
180 		seq_printf(m, "pool=%d\n", ppp_data.pool_num);
181 
182 		/* report pool_capacity in percentage */
183 		seq_printf(m, "pool_capacity=%d\n",
184 			   ppp_data.active_procs_in_pool * 100);
185 
186 		h_pic(&pool_idle_time, &pool_procs);
187 		seq_printf(m, "pool_idle_time=%ld\n", pool_idle_time);
188 		seq_printf(m, "pool_num_procs=%ld\n", pool_procs);
189 	}
190 
191 	seq_printf(m, "unallocated_capacity_weight=%d\n",
192 		   ppp_data.unallocated_weight);
193 	seq_printf(m, "capacity_weight=%d\n", ppp_data.weight);
194 	seq_printf(m, "capped=%d\n", ppp_data.capped);
195 	seq_printf(m, "unallocated_capacity=%lld\n",
196 		   ppp_data.unallocated_entitlement);
197 
198 	/* The last bits of information returned from h_get_ppp are only
199 	 * valid if the ibm,partition-performance-parameters-level
200 	 * property is >= 1.
201 	 */
202 	root = of_find_node_by_path("/");
203 	if (root) {
204 		perf_level = of_get_property(root,
205 				"ibm,partition-performance-parameters-level",
206 					     NULL);
207 		if (perf_level && (be32_to_cpup(perf_level) >= 1)) {
208 			seq_printf(m,
209 			    "physical_procs_allocated_to_virtualization=%d\n",
210 				   ppp_data.phys_platform_procs);
211 			seq_printf(m, "max_proc_capacity_available=%d\n",
212 				   ppp_data.max_proc_cap_avail);
213 			seq_printf(m, "entitled_proc_capacity_available=%d\n",
214 				   ppp_data.entitled_proc_cap_avail);
215 		}
216 
217 		of_node_put(root);
218 	}
219 }
220 
221 /**
222  * parse_mpp_data
223  * Parse out data returned from h_get_mpp
224  */
225 static void parse_mpp_data(struct seq_file *m)
226 {
227 	struct hvcall_mpp_data mpp_data;
228 	int rc;
229 
230 	rc = h_get_mpp(&mpp_data);
231 	if (rc)
232 		return;
233 
234 	seq_printf(m, "entitled_memory=%ld\n", mpp_data.entitled_mem);
235 
236 	if (mpp_data.mapped_mem != -1)
237 		seq_printf(m, "mapped_entitled_memory=%ld\n",
238 		           mpp_data.mapped_mem);
239 
240 	seq_printf(m, "entitled_memory_group_number=%d\n", mpp_data.group_num);
241 	seq_printf(m, "entitled_memory_pool_number=%d\n", mpp_data.pool_num);
242 
243 	seq_printf(m, "entitled_memory_weight=%d\n", mpp_data.mem_weight);
244 	seq_printf(m, "unallocated_entitled_memory_weight=%d\n",
245 	           mpp_data.unallocated_mem_weight);
246 	seq_printf(m, "unallocated_io_mapping_entitlement=%ld\n",
247 	           mpp_data.unallocated_entitlement);
248 
249 	if (mpp_data.pool_size != -1)
250 		seq_printf(m, "entitled_memory_pool_size=%ld bytes\n",
251 		           mpp_data.pool_size);
252 
253 	seq_printf(m, "entitled_memory_loan_request=%ld\n",
254 	           mpp_data.loan_request);
255 
256 	seq_printf(m, "backing_memory=%ld bytes\n", mpp_data.backing_mem);
257 }
258 
259 /**
260  * parse_mpp_x_data
261  * Parse out data returned from h_get_mpp_x
262  */
263 static void parse_mpp_x_data(struct seq_file *m)
264 {
265 	struct hvcall_mpp_x_data mpp_x_data;
266 
267 	if (!firmware_has_feature(FW_FEATURE_XCMO))
268 		return;
269 	if (h_get_mpp_x(&mpp_x_data))
270 		return;
271 
272 	seq_printf(m, "coalesced_bytes=%ld\n", mpp_x_data.coalesced_bytes);
273 
274 	if (mpp_x_data.pool_coalesced_bytes)
275 		seq_printf(m, "pool_coalesced_bytes=%ld\n",
276 			   mpp_x_data.pool_coalesced_bytes);
277 	if (mpp_x_data.pool_purr_cycles)
278 		seq_printf(m, "coalesce_pool_purr=%ld\n", mpp_x_data.pool_purr_cycles);
279 	if (mpp_x_data.pool_spurr_cycles)
280 		seq_printf(m, "coalesce_pool_spurr=%ld\n", mpp_x_data.pool_spurr_cycles);
281 }
282 
283 #define SPLPAR_CHARACTERISTICS_TOKEN 20
284 #define SPLPAR_MAXLENGTH 1026*(sizeof(char))
285 
286 /*
287  * parse_system_parameter_string()
288  * Retrieve the potential_processors, max_entitled_capacity and friends
289  * through the get-system-parameter rtas call.  Replace keyword strings as
290  * necessary.
291  */
292 static void parse_system_parameter_string(struct seq_file *m)
293 {
294 	int call_status;
295 
296 	unsigned char *local_buffer = kmalloc(SPLPAR_MAXLENGTH, GFP_KERNEL);
297 	if (!local_buffer) {
298 		printk(KERN_ERR "%s %s kmalloc failure at line %d\n",
299 		       __FILE__, __func__, __LINE__);
300 		return;
301 	}
302 
303 	spin_lock(&rtas_data_buf_lock);
304 	memset(rtas_data_buf, 0, SPLPAR_MAXLENGTH);
305 	call_status = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1,
306 				NULL,
307 				SPLPAR_CHARACTERISTICS_TOKEN,
308 				__pa(rtas_data_buf),
309 				RTAS_DATA_BUF_SIZE);
310 	memcpy(local_buffer, rtas_data_buf, SPLPAR_MAXLENGTH);
311 	local_buffer[SPLPAR_MAXLENGTH - 1] = '\0';
312 	spin_unlock(&rtas_data_buf_lock);
313 
314 	if (call_status != 0) {
315 		printk(KERN_INFO
316 		       "%s %s Error calling get-system-parameter (0x%x)\n",
317 		       __FILE__, __func__, call_status);
318 	} else {
319 		int splpar_strlen;
320 		int idx, w_idx;
321 		char *workbuffer = kzalloc(SPLPAR_MAXLENGTH, GFP_KERNEL);
322 		if (!workbuffer) {
323 			printk(KERN_ERR "%s %s kmalloc failure at line %d\n",
324 			       __FILE__, __func__, __LINE__);
325 			kfree(local_buffer);
326 			return;
327 		}
328 #ifdef LPARCFG_DEBUG
329 		printk(KERN_INFO "success calling get-system-parameter\n");
330 #endif
331 		splpar_strlen = local_buffer[0] * 256 + local_buffer[1];
332 		local_buffer += 2;	/* step over strlen value */
333 
334 		w_idx = 0;
335 		idx = 0;
336 		while ((*local_buffer) && (idx < splpar_strlen)) {
337 			workbuffer[w_idx++] = local_buffer[idx++];
338 			if ((local_buffer[idx] == ',')
339 			    || (local_buffer[idx] == '\0')) {
340 				workbuffer[w_idx] = '\0';
341 				if (w_idx) {
342 					/* avoid the empty string */
343 					seq_printf(m, "%s\n", workbuffer);
344 				}
345 				memset(workbuffer, 0, SPLPAR_MAXLENGTH);
346 				idx++;	/* skip the comma */
347 				w_idx = 0;
348 			} else if (local_buffer[idx] == '=') {
349 				/* code here to replace workbuffer contents
350 				   with different keyword strings */
351 				if (0 == strcmp(workbuffer, "MaxEntCap")) {
352 					strcpy(workbuffer,
353 					       "partition_max_entitled_capacity");
354 					w_idx = strlen(workbuffer);
355 				}
356 				if (0 == strcmp(workbuffer, "MaxPlatProcs")) {
357 					strcpy(workbuffer,
358 					       "system_potential_processors");
359 					w_idx = strlen(workbuffer);
360 				}
361 			}
362 		}
363 		kfree(workbuffer);
364 		local_buffer -= 2;	/* back up over strlen value */
365 	}
366 	kfree(local_buffer);
367 }
368 
369 /* Return the number of processors in the system.
370  * This function reads through the device tree and counts
371  * the virtual processors, this does not include threads.
372  */
373 static int lparcfg_count_active_processors(void)
374 {
375 	struct device_node *cpus_dn;
376 	int count = 0;
377 
378 	for_each_node_by_type(cpus_dn, "cpu") {
379 #ifdef LPARCFG_DEBUG
380 		printk(KERN_ERR "cpus_dn %p\n", cpus_dn);
381 #endif
382 		count++;
383 	}
384 	return count;
385 }
386 
387 static void pseries_cmo_data(struct seq_file *m)
388 {
389 	int cpu;
390 	unsigned long cmo_faults = 0;
391 	unsigned long cmo_fault_time = 0;
392 
393 	seq_printf(m, "cmo_enabled=%d\n", firmware_has_feature(FW_FEATURE_CMO));
394 
395 	if (!firmware_has_feature(FW_FEATURE_CMO))
396 		return;
397 
398 	for_each_possible_cpu(cpu) {
399 		cmo_faults += be64_to_cpu(lppaca_of(cpu).cmo_faults);
400 		cmo_fault_time += be64_to_cpu(lppaca_of(cpu).cmo_fault_time);
401 	}
402 
403 	seq_printf(m, "cmo_faults=%lu\n", cmo_faults);
404 	seq_printf(m, "cmo_fault_time_usec=%lu\n",
405 		   cmo_fault_time / tb_ticks_per_usec);
406 	seq_printf(m, "cmo_primary_psp=%d\n", cmo_get_primary_psp());
407 	seq_printf(m, "cmo_secondary_psp=%d\n", cmo_get_secondary_psp());
408 	seq_printf(m, "cmo_page_size=%lu\n", cmo_get_page_size());
409 }
410 
411 static void splpar_dispatch_data(struct seq_file *m)
412 {
413 	int cpu;
414 	unsigned long dispatches = 0;
415 	unsigned long dispatch_dispersions = 0;
416 
417 	for_each_possible_cpu(cpu) {
418 		dispatches += be32_to_cpu(lppaca_of(cpu).yield_count);
419 		dispatch_dispersions +=
420 			be32_to_cpu(lppaca_of(cpu).dispersion_count);
421 	}
422 
423 	seq_printf(m, "dispatches=%lu\n", dispatches);
424 	seq_printf(m, "dispatch_dispersions=%lu\n", dispatch_dispersions);
425 }
426 
427 static void parse_em_data(struct seq_file *m)
428 {
429 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
430 
431 	if (firmware_has_feature(FW_FEATURE_LPAR) &&
432 	    plpar_hcall(H_GET_EM_PARMS, retbuf) == H_SUCCESS)
433 		seq_printf(m, "power_mode_data=%016lx\n", retbuf[0]);
434 }
435 
436 static int pseries_lparcfg_data(struct seq_file *m, void *v)
437 {
438 	int partition_potential_processors;
439 	int partition_active_processors;
440 	struct device_node *rtas_node;
441 	const __be32 *lrdrp = NULL;
442 
443 	rtas_node = of_find_node_by_path("/rtas");
444 	if (rtas_node)
445 		lrdrp = of_get_property(rtas_node, "ibm,lrdr-capacity", NULL);
446 
447 	if (lrdrp == NULL) {
448 		partition_potential_processors = vdso_data->processorCount;
449 	} else {
450 		partition_potential_processors = be32_to_cpup(lrdrp + 4);
451 	}
452 	of_node_put(rtas_node);
453 
454 	partition_active_processors = lparcfg_count_active_processors();
455 
456 	if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
457 		/* this call handles the ibm,get-system-parameter contents */
458 		parse_system_parameter_string(m);
459 		parse_ppp_data(m);
460 		parse_mpp_data(m);
461 		parse_mpp_x_data(m);
462 		pseries_cmo_data(m);
463 		splpar_dispatch_data(m);
464 
465 		seq_printf(m, "purr=%ld\n", get_purr());
466 	} else {		/* non SPLPAR case */
467 
468 		seq_printf(m, "system_active_processors=%d\n",
469 			   partition_potential_processors);
470 
471 		seq_printf(m, "system_potential_processors=%d\n",
472 			   partition_potential_processors);
473 
474 		seq_printf(m, "partition_max_entitled_capacity=%d\n",
475 			   partition_potential_processors * 100);
476 
477 		seq_printf(m, "partition_entitled_capacity=%d\n",
478 			   partition_active_processors * 100);
479 	}
480 
481 	seq_printf(m, "partition_active_processors=%d\n",
482 		   partition_active_processors);
483 
484 	seq_printf(m, "partition_potential_processors=%d\n",
485 		   partition_potential_processors);
486 
487 	seq_printf(m, "shared_processor_mode=%d\n",
488 		   lppaca_shared_proc(get_lppaca()));
489 
490 #ifdef CONFIG_PPC_BOOK3S_64
491 	seq_printf(m, "slb_size=%d\n", mmu_slb_size);
492 #endif
493 	parse_em_data(m);
494 
495 	return 0;
496 }
497 
498 static ssize_t update_ppp(u64 *entitlement, u8 *weight)
499 {
500 	struct hvcall_ppp_data ppp_data;
501 	u8 new_weight;
502 	u64 new_entitled;
503 	ssize_t retval;
504 
505 	/* Get our current parameters */
506 	retval = h_get_ppp(&ppp_data);
507 	if (retval)
508 		return retval;
509 
510 	if (entitlement) {
511 		new_weight = ppp_data.weight;
512 		new_entitled = *entitlement;
513 	} else if (weight) {
514 		new_weight = *weight;
515 		new_entitled = ppp_data.entitlement;
516 	} else
517 		return -EINVAL;
518 
519 	pr_debug("%s: current_entitled = %llu, current_weight = %u\n",
520 		 __func__, ppp_data.entitlement, ppp_data.weight);
521 
522 	pr_debug("%s: new_entitled = %llu, new_weight = %u\n",
523 		 __func__, new_entitled, new_weight);
524 
525 	retval = plpar_hcall_norets(H_SET_PPP, new_entitled, new_weight);
526 	return retval;
527 }
528 
529 /**
530  * update_mpp
531  *
532  * Update the memory entitlement and weight for the partition.  Caller must
533  * specify either a new entitlement or weight, not both, to be updated
534  * since the h_set_mpp call takes both entitlement and weight as parameters.
535  */
536 static ssize_t update_mpp(u64 *entitlement, u8 *weight)
537 {
538 	struct hvcall_mpp_data mpp_data;
539 	u64 new_entitled;
540 	u8 new_weight;
541 	ssize_t rc;
542 
543 	if (entitlement) {
544 		/* Check with vio to ensure the new memory entitlement
545 		 * can be handled.
546 		 */
547 		rc = vio_cmo_entitlement_update(*entitlement);
548 		if (rc)
549 			return rc;
550 	}
551 
552 	rc = h_get_mpp(&mpp_data);
553 	if (rc)
554 		return rc;
555 
556 	if (entitlement) {
557 		new_weight = mpp_data.mem_weight;
558 		new_entitled = *entitlement;
559 	} else if (weight) {
560 		new_weight = *weight;
561 		new_entitled = mpp_data.entitled_mem;
562 	} else
563 		return -EINVAL;
564 
565 	pr_debug("%s: current_entitled = %lu, current_weight = %u\n",
566 	         __func__, mpp_data.entitled_mem, mpp_data.mem_weight);
567 
568 	pr_debug("%s: new_entitled = %llu, new_weight = %u\n",
569 		 __func__, new_entitled, new_weight);
570 
571 	rc = plpar_hcall_norets(H_SET_MPP, new_entitled, new_weight);
572 	return rc;
573 }
574 
575 /*
576  * Interface for changing system parameters (variable capacity weight
577  * and entitled capacity).  Format of input is "param_name=value";
578  * anything after value is ignored.  Valid parameters at this time are
579  * "partition_entitled_capacity" and "capacity_weight".  We use
580  * H_SET_PPP to alter parameters.
581  *
582  * This function should be invoked only on systems with
583  * FW_FEATURE_SPLPAR.
584  */
585 static ssize_t lparcfg_write(struct file *file, const char __user * buf,
586 			     size_t count, loff_t * off)
587 {
588 	int kbuf_sz = 64;
589 	char kbuf[kbuf_sz];
590 	char *tmp;
591 	u64 new_entitled, *new_entitled_ptr = &new_entitled;
592 	u8 new_weight, *new_weight_ptr = &new_weight;
593 	ssize_t retval;
594 
595 	if (!firmware_has_feature(FW_FEATURE_SPLPAR))
596 		return -EINVAL;
597 
598 	if (count > kbuf_sz)
599 		return -EINVAL;
600 
601 	if (copy_from_user(kbuf, buf, count))
602 		return -EFAULT;
603 
604 	kbuf[count - 1] = '\0';
605 	tmp = strchr(kbuf, '=');
606 	if (!tmp)
607 		return -EINVAL;
608 
609 	*tmp++ = '\0';
610 
611 	if (!strcmp(kbuf, "partition_entitled_capacity")) {
612 		char *endp;
613 		*new_entitled_ptr = (u64) simple_strtoul(tmp, &endp, 10);
614 		if (endp == tmp)
615 			return -EINVAL;
616 
617 		retval = update_ppp(new_entitled_ptr, NULL);
618 	} else if (!strcmp(kbuf, "capacity_weight")) {
619 		char *endp;
620 		*new_weight_ptr = (u8) simple_strtoul(tmp, &endp, 10);
621 		if (endp == tmp)
622 			return -EINVAL;
623 
624 		retval = update_ppp(NULL, new_weight_ptr);
625 	} else if (!strcmp(kbuf, "entitled_memory")) {
626 		char *endp;
627 		*new_entitled_ptr = (u64) simple_strtoul(tmp, &endp, 10);
628 		if (endp == tmp)
629 			return -EINVAL;
630 
631 		retval = update_mpp(new_entitled_ptr, NULL);
632 	} else if (!strcmp(kbuf, "entitled_memory_weight")) {
633 		char *endp;
634 		*new_weight_ptr = (u8) simple_strtoul(tmp, &endp, 10);
635 		if (endp == tmp)
636 			return -EINVAL;
637 
638 		retval = update_mpp(NULL, new_weight_ptr);
639 	} else
640 		return -EINVAL;
641 
642 	if (retval == H_SUCCESS || retval == H_CONSTRAINED) {
643 		retval = count;
644 	} else if (retval == H_BUSY) {
645 		retval = -EBUSY;
646 	} else if (retval == H_HARDWARE) {
647 		retval = -EIO;
648 	} else if (retval == H_PARAMETER) {
649 		retval = -EINVAL;
650 	}
651 
652 	return retval;
653 }
654 
655 static int lparcfg_data(struct seq_file *m, void *v)
656 {
657 	struct device_node *rootdn;
658 	const char *model = "";
659 	const char *system_id = "";
660 	const char *tmp;
661 	const __be32 *lp_index_ptr;
662 	unsigned int lp_index = 0;
663 
664 	seq_printf(m, "%s %s\n", MODULE_NAME, MODULE_VERS);
665 
666 	rootdn = of_find_node_by_path("/");
667 	if (rootdn) {
668 		tmp = of_get_property(rootdn, "model", NULL);
669 		if (tmp)
670 			model = tmp;
671 		tmp = of_get_property(rootdn, "system-id", NULL);
672 		if (tmp)
673 			system_id = tmp;
674 		lp_index_ptr = of_get_property(rootdn, "ibm,partition-no",
675 					NULL);
676 		if (lp_index_ptr)
677 			lp_index = be32_to_cpup(lp_index_ptr);
678 		of_node_put(rootdn);
679 	}
680 	seq_printf(m, "serial_number=%s\n", system_id);
681 	seq_printf(m, "system_type=%s\n", model);
682 	seq_printf(m, "partition_id=%d\n", (int)lp_index);
683 
684 	return pseries_lparcfg_data(m, v);
685 }
686 
687 static int lparcfg_open(struct inode *inode, struct file *file)
688 {
689 	return single_open(file, lparcfg_data, NULL);
690 }
691 
692 static const struct file_operations lparcfg_fops = {
693 	.read		= seq_read,
694 	.write		= lparcfg_write,
695 	.open		= lparcfg_open,
696 	.release	= single_release,
697 	.llseek		= seq_lseek,
698 };
699 
700 static int __init lparcfg_init(void)
701 {
702 	umode_t mode = 0444;
703 
704 	/* Allow writing if we have FW_FEATURE_SPLPAR */
705 	if (firmware_has_feature(FW_FEATURE_SPLPAR))
706 		mode |= 0200;
707 
708 	if (!proc_create("powerpc/lparcfg", mode, NULL, &lparcfg_fops)) {
709 		printk(KERN_ERR "Failed to create powerpc/lparcfg\n");
710 		return -EIO;
711 	}
712 	return 0;
713 }
714 machine_device_initcall(pseries, lparcfg_init);
715