xref: /openbmc/linux/drivers/s390/char/sclp_cmd.c (revision e6c81cce)
1 /*
2  * Copyright IBM Corp. 2007,2012
3  *
4  * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
5  *	      Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
6  */
7 
8 #define KMSG_COMPONENT "sclp_cmd"
9 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10 
11 #include <linux/completion.h>
12 #include <linux/init.h>
13 #include <linux/errno.h>
14 #include <linux/err.h>
15 #include <linux/export.h>
16 #include <linux/slab.h>
17 #include <linux/string.h>
18 #include <linux/mm.h>
19 #include <linux/mmzone.h>
20 #include <linux/memory.h>
21 #include <linux/module.h>
22 #include <linux/platform_device.h>
23 #include <asm/ctl_reg.h>
24 #include <asm/chpid.h>
25 #include <asm/setup.h>
26 #include <asm/page.h>
27 #include <asm/sclp.h>
28 
29 #include "sclp.h"
30 
31 static void sclp_sync_callback(struct sclp_req *req, void *data)
32 {
33 	struct completion *completion = data;
34 
35 	complete(completion);
36 }
37 
38 int sclp_sync_request(sclp_cmdw_t cmd, void *sccb)
39 {
40 	return sclp_sync_request_timeout(cmd, sccb, 0);
41 }
42 
43 int sclp_sync_request_timeout(sclp_cmdw_t cmd, void *sccb, int timeout)
44 {
45 	struct completion completion;
46 	struct sclp_req *request;
47 	int rc;
48 
49 	request = kzalloc(sizeof(*request), GFP_KERNEL);
50 	if (!request)
51 		return -ENOMEM;
52 	if (timeout)
53 		request->queue_timeout = timeout;
54 	request->command = cmd;
55 	request->sccb = sccb;
56 	request->status = SCLP_REQ_FILLED;
57 	request->callback = sclp_sync_callback;
58 	request->callback_data = &completion;
59 	init_completion(&completion);
60 
61 	/* Perform sclp request. */
62 	rc = sclp_add_request(request);
63 	if (rc)
64 		goto out;
65 	wait_for_completion(&completion);
66 
67 	/* Check response. */
68 	if (request->status != SCLP_REQ_DONE) {
69 		pr_warning("sync request failed (cmd=0x%08x, "
70 			   "status=0x%02x)\n", cmd, request->status);
71 		rc = -EIO;
72 	}
73 out:
74 	kfree(request);
75 	return rc;
76 }
77 
78 /*
79  * CPU configuration related functions.
80  */
81 
82 #define SCLP_CMDW_READ_CPU_INFO		0x00010001
83 #define SCLP_CMDW_CONFIGURE_CPU		0x00110001
84 #define SCLP_CMDW_DECONFIGURE_CPU	0x00100001
85 
86 struct read_cpu_info_sccb {
87 	struct	sccb_header header;
88 	u16	nr_configured;
89 	u16	offset_configured;
90 	u16	nr_standby;
91 	u16	offset_standby;
92 	u8	reserved[4096 - 16];
93 } __attribute__((packed, aligned(PAGE_SIZE)));
94 
95 static void sclp_fill_cpu_info(struct sclp_cpu_info *info,
96 			       struct read_cpu_info_sccb *sccb)
97 {
98 	char *page = (char *) sccb;
99 
100 	memset(info, 0, sizeof(*info));
101 	info->configured = sccb->nr_configured;
102 	info->standby = sccb->nr_standby;
103 	info->combined = sccb->nr_configured + sccb->nr_standby;
104 	info->has_cpu_type = sclp_fac84 & 0x1;
105 	memcpy(&info->cpu, page + sccb->offset_configured,
106 	       info->combined * sizeof(struct sclp_cpu_entry));
107 }
108 
109 int sclp_get_cpu_info(struct sclp_cpu_info *info)
110 {
111 	int rc;
112 	struct read_cpu_info_sccb *sccb;
113 
114 	if (!SCLP_HAS_CPU_INFO)
115 		return -EOPNOTSUPP;
116 	sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
117 	if (!sccb)
118 		return -ENOMEM;
119 	sccb->header.length = sizeof(*sccb);
120 	rc = sclp_sync_request_timeout(SCLP_CMDW_READ_CPU_INFO, sccb,
121 				       SCLP_QUEUE_INTERVAL);
122 	if (rc)
123 		goto out;
124 	if (sccb->header.response_code != 0x0010) {
125 		pr_warning("readcpuinfo failed (response=0x%04x)\n",
126 			   sccb->header.response_code);
127 		rc = -EIO;
128 		goto out;
129 	}
130 	sclp_fill_cpu_info(info, sccb);
131 out:
132 	free_page((unsigned long) sccb);
133 	return rc;
134 }
135 
136 struct cpu_configure_sccb {
137 	struct sccb_header header;
138 } __attribute__((packed, aligned(8)));
139 
140 static int do_cpu_configure(sclp_cmdw_t cmd)
141 {
142 	struct cpu_configure_sccb *sccb;
143 	int rc;
144 
145 	if (!SCLP_HAS_CPU_RECONFIG)
146 		return -EOPNOTSUPP;
147 	/*
148 	 * This is not going to cross a page boundary since we force
149 	 * kmalloc to have a minimum alignment of 8 bytes on s390.
150 	 */
151 	sccb = kzalloc(sizeof(*sccb), GFP_KERNEL | GFP_DMA);
152 	if (!sccb)
153 		return -ENOMEM;
154 	sccb->header.length = sizeof(*sccb);
155 	rc = sclp_sync_request_timeout(cmd, sccb, SCLP_QUEUE_INTERVAL);
156 	if (rc)
157 		goto out;
158 	switch (sccb->header.response_code) {
159 	case 0x0020:
160 	case 0x0120:
161 		break;
162 	default:
163 		pr_warning("configure cpu failed (cmd=0x%08x, "
164 			   "response=0x%04x)\n", cmd,
165 			   sccb->header.response_code);
166 		rc = -EIO;
167 		break;
168 	}
169 out:
170 	kfree(sccb);
171 	return rc;
172 }
173 
174 int sclp_cpu_configure(u8 cpu)
175 {
176 	return do_cpu_configure(SCLP_CMDW_CONFIGURE_CPU | cpu << 8);
177 }
178 
179 int sclp_cpu_deconfigure(u8 cpu)
180 {
181 	return do_cpu_configure(SCLP_CMDW_DECONFIGURE_CPU | cpu << 8);
182 }
183 
184 #ifdef CONFIG_MEMORY_HOTPLUG
185 
186 static DEFINE_MUTEX(sclp_mem_mutex);
187 static LIST_HEAD(sclp_mem_list);
188 static u8 sclp_max_storage_id;
189 static unsigned long sclp_storage_ids[256 / BITS_PER_LONG];
190 static int sclp_mem_state_changed;
191 
192 struct memory_increment {
193 	struct list_head list;
194 	u16 rn;
195 	int standby;
196 };
197 
198 struct assign_storage_sccb {
199 	struct sccb_header header;
200 	u16 rn;
201 } __packed;
202 
203 int arch_get_memory_phys_device(unsigned long start_pfn)
204 {
205 	if (!sclp_rzm)
206 		return 0;
207 	return PFN_PHYS(start_pfn) >> ilog2(sclp_rzm);
208 }
209 
210 static unsigned long long rn2addr(u16 rn)
211 {
212 	return (unsigned long long) (rn - 1) * sclp_rzm;
213 }
214 
215 static int do_assign_storage(sclp_cmdw_t cmd, u16 rn)
216 {
217 	struct assign_storage_sccb *sccb;
218 	int rc;
219 
220 	sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
221 	if (!sccb)
222 		return -ENOMEM;
223 	sccb->header.length = PAGE_SIZE;
224 	sccb->rn = rn;
225 	rc = sclp_sync_request_timeout(cmd, sccb, SCLP_QUEUE_INTERVAL);
226 	if (rc)
227 		goto out;
228 	switch (sccb->header.response_code) {
229 	case 0x0020:
230 	case 0x0120:
231 		break;
232 	default:
233 		pr_warning("assign storage failed (cmd=0x%08x, "
234 			   "response=0x%04x, rn=0x%04x)\n", cmd,
235 			   sccb->header.response_code, rn);
236 		rc = -EIO;
237 		break;
238 	}
239 out:
240 	free_page((unsigned long) sccb);
241 	return rc;
242 }
243 
244 static int sclp_assign_storage(u16 rn)
245 {
246 	unsigned long long start;
247 	int rc;
248 
249 	rc = do_assign_storage(0x000d0001, rn);
250 	if (rc)
251 		return rc;
252 	start = rn2addr(rn);
253 	storage_key_init_range(start, start + sclp_rzm);
254 	return 0;
255 }
256 
257 static int sclp_unassign_storage(u16 rn)
258 {
259 	return do_assign_storage(0x000c0001, rn);
260 }
261 
262 struct attach_storage_sccb {
263 	struct sccb_header header;
264 	u16 :16;
265 	u16 assigned;
266 	u32 :32;
267 	u32 entries[0];
268 } __packed;
269 
270 static int sclp_attach_storage(u8 id)
271 {
272 	struct attach_storage_sccb *sccb;
273 	int rc;
274 	int i;
275 
276 	sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
277 	if (!sccb)
278 		return -ENOMEM;
279 	sccb->header.length = PAGE_SIZE;
280 	rc = sclp_sync_request_timeout(0x00080001 | id << 8, sccb,
281 				       SCLP_QUEUE_INTERVAL);
282 	if (rc)
283 		goto out;
284 	switch (sccb->header.response_code) {
285 	case 0x0020:
286 		set_bit(id, sclp_storage_ids);
287 		for (i = 0; i < sccb->assigned; i++) {
288 			if (sccb->entries[i])
289 				sclp_unassign_storage(sccb->entries[i] >> 16);
290 		}
291 		break;
292 	default:
293 		rc = -EIO;
294 		break;
295 	}
296 out:
297 	free_page((unsigned long) sccb);
298 	return rc;
299 }
300 
301 static int sclp_mem_change_state(unsigned long start, unsigned long size,
302 				 int online)
303 {
304 	struct memory_increment *incr;
305 	unsigned long long istart;
306 	int rc = 0;
307 
308 	list_for_each_entry(incr, &sclp_mem_list, list) {
309 		istart = rn2addr(incr->rn);
310 		if (start + size - 1 < istart)
311 			break;
312 		if (start > istart + sclp_rzm - 1)
313 			continue;
314 		if (online)
315 			rc |= sclp_assign_storage(incr->rn);
316 		else
317 			sclp_unassign_storage(incr->rn);
318 		if (rc == 0)
319 			incr->standby = online ? 0 : 1;
320 	}
321 	return rc ? -EIO : 0;
322 }
323 
324 static bool contains_standby_increment(unsigned long start, unsigned long end)
325 {
326 	struct memory_increment *incr;
327 	unsigned long istart;
328 
329 	list_for_each_entry(incr, &sclp_mem_list, list) {
330 		istart = rn2addr(incr->rn);
331 		if (end - 1 < istart)
332 			continue;
333 		if (start > istart + sclp_rzm - 1)
334 			continue;
335 		if (incr->standby)
336 			return true;
337 	}
338 	return false;
339 }
340 
341 static int sclp_mem_notifier(struct notifier_block *nb,
342 			     unsigned long action, void *data)
343 {
344 	unsigned long start, size;
345 	struct memory_notify *arg;
346 	unsigned char id;
347 	int rc = 0;
348 
349 	arg = data;
350 	start = arg->start_pfn << PAGE_SHIFT;
351 	size = arg->nr_pages << PAGE_SHIFT;
352 	mutex_lock(&sclp_mem_mutex);
353 	for_each_clear_bit(id, sclp_storage_ids, sclp_max_storage_id + 1)
354 		sclp_attach_storage(id);
355 	switch (action) {
356 	case MEM_GOING_OFFLINE:
357 		/*
358 		 * We do not allow to set memory blocks offline that contain
359 		 * standby memory. This is done to simplify the "memory online"
360 		 * case.
361 		 */
362 		if (contains_standby_increment(start, start + size))
363 			rc = -EPERM;
364 		break;
365 	case MEM_ONLINE:
366 	case MEM_CANCEL_OFFLINE:
367 		break;
368 	case MEM_GOING_ONLINE:
369 		rc = sclp_mem_change_state(start, size, 1);
370 		break;
371 	case MEM_CANCEL_ONLINE:
372 		sclp_mem_change_state(start, size, 0);
373 		break;
374 	case MEM_OFFLINE:
375 		sclp_mem_change_state(start, size, 0);
376 		break;
377 	default:
378 		rc = -EINVAL;
379 		break;
380 	}
381 	if (!rc)
382 		sclp_mem_state_changed = 1;
383 	mutex_unlock(&sclp_mem_mutex);
384 	return rc ? NOTIFY_BAD : NOTIFY_OK;
385 }
386 
387 static struct notifier_block sclp_mem_nb = {
388 	.notifier_call = sclp_mem_notifier,
389 };
390 
391 static void __init align_to_block_size(unsigned long long *start,
392 				       unsigned long long *size)
393 {
394 	unsigned long long start_align, size_align, alignment;
395 
396 	alignment = memory_block_size_bytes();
397 	start_align = roundup(*start, alignment);
398 	size_align = rounddown(*start + *size, alignment) - start_align;
399 
400 	pr_info("Standby memory at 0x%llx (%lluM of %lluM usable)\n",
401 		*start, size_align >> 20, *size >> 20);
402 	*start = start_align;
403 	*size = size_align;
404 }
405 
406 static void __init add_memory_merged(u16 rn)
407 {
408 	static u16 first_rn, num;
409 	unsigned long long start, size;
410 
411 	if (rn && first_rn && (first_rn + num == rn)) {
412 		num++;
413 		return;
414 	}
415 	if (!first_rn)
416 		goto skip_add;
417 	start = rn2addr(first_rn);
418 	size = (unsigned long long) num * sclp_rzm;
419 	if (start >= VMEM_MAX_PHYS)
420 		goto skip_add;
421 	if (start + size > VMEM_MAX_PHYS)
422 		size = VMEM_MAX_PHYS - start;
423 	if (memory_end_set && (start >= memory_end))
424 		goto skip_add;
425 	if (memory_end_set && (start + size > memory_end))
426 		size = memory_end - start;
427 	align_to_block_size(&start, &size);
428 	if (size)
429 		add_memory(0, start, size);
430 skip_add:
431 	first_rn = rn;
432 	num = 1;
433 }
434 
435 static void __init sclp_add_standby_memory(void)
436 {
437 	struct memory_increment *incr;
438 
439 	list_for_each_entry(incr, &sclp_mem_list, list)
440 		if (incr->standby)
441 			add_memory_merged(incr->rn);
442 	add_memory_merged(0);
443 }
444 
445 static void __init insert_increment(u16 rn, int standby, int assigned)
446 {
447 	struct memory_increment *incr, *new_incr;
448 	struct list_head *prev;
449 	u16 last_rn;
450 
451 	new_incr = kzalloc(sizeof(*new_incr), GFP_KERNEL);
452 	if (!new_incr)
453 		return;
454 	new_incr->rn = rn;
455 	new_incr->standby = standby;
456 	last_rn = 0;
457 	prev = &sclp_mem_list;
458 	list_for_each_entry(incr, &sclp_mem_list, list) {
459 		if (assigned && incr->rn > rn)
460 			break;
461 		if (!assigned && incr->rn - last_rn > 1)
462 			break;
463 		last_rn = incr->rn;
464 		prev = &incr->list;
465 	}
466 	if (!assigned)
467 		new_incr->rn = last_rn + 1;
468 	if (new_incr->rn > sclp_rnmax) {
469 		kfree(new_incr);
470 		return;
471 	}
472 	list_add(&new_incr->list, prev);
473 }
474 
475 static int sclp_mem_freeze(struct device *dev)
476 {
477 	if (!sclp_mem_state_changed)
478 		return 0;
479 	pr_err("Memory hotplug state changed, suspend refused.\n");
480 	return -EPERM;
481 }
482 
483 struct read_storage_sccb {
484 	struct sccb_header header;
485 	u16 max_id;
486 	u16 assigned;
487 	u16 standby;
488 	u16 :16;
489 	u32 entries[0];
490 } __packed;
491 
492 static const struct dev_pm_ops sclp_mem_pm_ops = {
493 	.freeze		= sclp_mem_freeze,
494 };
495 
496 static struct platform_driver sclp_mem_pdrv = {
497 	.driver = {
498 		.name	= "sclp_mem",
499 		.pm	= &sclp_mem_pm_ops,
500 	},
501 };
502 
503 static int __init sclp_detect_standby_memory(void)
504 {
505 	struct platform_device *sclp_pdev;
506 	struct read_storage_sccb *sccb;
507 	int i, id, assigned, rc;
508 
509 	if (OLDMEM_BASE) /* No standby memory in kdump mode */
510 		return 0;
511 	if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL)
512 		return 0;
513 	rc = -ENOMEM;
514 	sccb = (void *) __get_free_page(GFP_KERNEL | GFP_DMA);
515 	if (!sccb)
516 		goto out;
517 	assigned = 0;
518 	for (id = 0; id <= sclp_max_storage_id; id++) {
519 		memset(sccb, 0, PAGE_SIZE);
520 		sccb->header.length = PAGE_SIZE;
521 		rc = sclp_sync_request(0x00040001 | id << 8, sccb);
522 		if (rc)
523 			goto out;
524 		switch (sccb->header.response_code) {
525 		case 0x0010:
526 			set_bit(id, sclp_storage_ids);
527 			for (i = 0; i < sccb->assigned; i++) {
528 				if (!sccb->entries[i])
529 					continue;
530 				assigned++;
531 				insert_increment(sccb->entries[i] >> 16, 0, 1);
532 			}
533 			break;
534 		case 0x0310:
535 			break;
536 		case 0x0410:
537 			for (i = 0; i < sccb->assigned; i++) {
538 				if (!sccb->entries[i])
539 					continue;
540 				assigned++;
541 				insert_increment(sccb->entries[i] >> 16, 1, 1);
542 			}
543 			break;
544 		default:
545 			rc = -EIO;
546 			break;
547 		}
548 		if (!rc)
549 			sclp_max_storage_id = sccb->max_id;
550 	}
551 	if (rc || list_empty(&sclp_mem_list))
552 		goto out;
553 	for (i = 1; i <= sclp_rnmax - assigned; i++)
554 		insert_increment(0, 1, 0);
555 	rc = register_memory_notifier(&sclp_mem_nb);
556 	if (rc)
557 		goto out;
558 	rc = platform_driver_register(&sclp_mem_pdrv);
559 	if (rc)
560 		goto out;
561 	sclp_pdev = platform_device_register_simple("sclp_mem", -1, NULL, 0);
562 	rc = PTR_ERR_OR_ZERO(sclp_pdev);
563 	if (rc)
564 		goto out_driver;
565 	sclp_add_standby_memory();
566 	goto out;
567 out_driver:
568 	platform_driver_unregister(&sclp_mem_pdrv);
569 out:
570 	free_page((unsigned long) sccb);
571 	return rc;
572 }
573 __initcall(sclp_detect_standby_memory);
574 
575 #endif /* CONFIG_MEMORY_HOTPLUG */
576 
577 /*
578  * PCI I/O adapter configuration related functions.
579  */
580 #define SCLP_CMDW_CONFIGURE_PCI			0x001a0001
581 #define SCLP_CMDW_DECONFIGURE_PCI		0x001b0001
582 
583 #define SCLP_RECONFIG_PCI_ATPYE			2
584 
585 struct pci_cfg_sccb {
586 	struct sccb_header header;
587 	u8 atype;		/* adapter type */
588 	u8 reserved1;
589 	u16 reserved2;
590 	u32 aid;		/* adapter identifier */
591 } __packed;
592 
593 static int do_pci_configure(sclp_cmdw_t cmd, u32 fid)
594 {
595 	struct pci_cfg_sccb *sccb;
596 	int rc;
597 
598 	if (!SCLP_HAS_PCI_RECONFIG)
599 		return -EOPNOTSUPP;
600 
601 	sccb = (struct pci_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
602 	if (!sccb)
603 		return -ENOMEM;
604 
605 	sccb->header.length = PAGE_SIZE;
606 	sccb->atype = SCLP_RECONFIG_PCI_ATPYE;
607 	sccb->aid = fid;
608 	rc = sclp_sync_request(cmd, sccb);
609 	if (rc)
610 		goto out;
611 	switch (sccb->header.response_code) {
612 	case 0x0020:
613 	case 0x0120:
614 		break;
615 	default:
616 		pr_warn("configure PCI I/O adapter failed: cmd=0x%08x  response=0x%04x\n",
617 			cmd, sccb->header.response_code);
618 		rc = -EIO;
619 		break;
620 	}
621 out:
622 	free_page((unsigned long) sccb);
623 	return rc;
624 }
625 
626 int sclp_pci_configure(u32 fid)
627 {
628 	return do_pci_configure(SCLP_CMDW_CONFIGURE_PCI, fid);
629 }
630 EXPORT_SYMBOL(sclp_pci_configure);
631 
632 int sclp_pci_deconfigure(u32 fid)
633 {
634 	return do_pci_configure(SCLP_CMDW_DECONFIGURE_PCI, fid);
635 }
636 EXPORT_SYMBOL(sclp_pci_deconfigure);
637 
638 /*
639  * Channel path configuration related functions.
640  */
641 
642 #define SCLP_CMDW_CONFIGURE_CHPATH		0x000f0001
643 #define SCLP_CMDW_DECONFIGURE_CHPATH		0x000e0001
644 #define SCLP_CMDW_READ_CHPATH_INFORMATION	0x00030001
645 
646 struct chp_cfg_sccb {
647 	struct sccb_header header;
648 	u8 ccm;
649 	u8 reserved[6];
650 	u8 cssid;
651 } __attribute__((packed));
652 
653 static int do_chp_configure(sclp_cmdw_t cmd)
654 {
655 	struct chp_cfg_sccb *sccb;
656 	int rc;
657 
658 	if (!SCLP_HAS_CHP_RECONFIG)
659 		return -EOPNOTSUPP;
660 	/* Prepare sccb. */
661 	sccb = (struct chp_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
662 	if (!sccb)
663 		return -ENOMEM;
664 	sccb->header.length = sizeof(*sccb);
665 	rc = sclp_sync_request(cmd, sccb);
666 	if (rc)
667 		goto out;
668 	switch (sccb->header.response_code) {
669 	case 0x0020:
670 	case 0x0120:
671 	case 0x0440:
672 	case 0x0450:
673 		break;
674 	default:
675 		pr_warning("configure channel-path failed "
676 			   "(cmd=0x%08x, response=0x%04x)\n", cmd,
677 			   sccb->header.response_code);
678 		rc = -EIO;
679 		break;
680 	}
681 out:
682 	free_page((unsigned long) sccb);
683 	return rc;
684 }
685 
686 /**
687  * sclp_chp_configure - perform configure channel-path sclp command
688  * @chpid: channel-path ID
689  *
690  * Perform configure channel-path command sclp command for specified chpid.
691  * Return 0 after command successfully finished, non-zero otherwise.
692  */
693 int sclp_chp_configure(struct chp_id chpid)
694 {
695 	return do_chp_configure(SCLP_CMDW_CONFIGURE_CHPATH | chpid.id << 8);
696 }
697 
698 /**
699  * sclp_chp_deconfigure - perform deconfigure channel-path sclp command
700  * @chpid: channel-path ID
701  *
702  * Perform deconfigure channel-path command sclp command for specified chpid
703  * and wait for completion. On success return 0. Return non-zero otherwise.
704  */
705 int sclp_chp_deconfigure(struct chp_id chpid)
706 {
707 	return do_chp_configure(SCLP_CMDW_DECONFIGURE_CHPATH | chpid.id << 8);
708 }
709 
710 struct chp_info_sccb {
711 	struct sccb_header header;
712 	u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
713 	u8 standby[SCLP_CHP_INFO_MASK_SIZE];
714 	u8 configured[SCLP_CHP_INFO_MASK_SIZE];
715 	u8 ccm;
716 	u8 reserved[6];
717 	u8 cssid;
718 } __attribute__((packed));
719 
720 /**
721  * sclp_chp_read_info - perform read channel-path information sclp command
722  * @info: resulting channel-path information data
723  *
724  * Perform read channel-path information sclp command and wait for completion.
725  * On success, store channel-path information in @info and return 0. Return
726  * non-zero otherwise.
727  */
728 int sclp_chp_read_info(struct sclp_chp_info *info)
729 {
730 	struct chp_info_sccb *sccb;
731 	int rc;
732 
733 	if (!SCLP_HAS_CHP_INFO)
734 		return -EOPNOTSUPP;
735 	/* Prepare sccb. */
736 	sccb = (struct chp_info_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
737 	if (!sccb)
738 		return -ENOMEM;
739 	sccb->header.length = sizeof(*sccb);
740 	rc = sclp_sync_request(SCLP_CMDW_READ_CHPATH_INFORMATION, sccb);
741 	if (rc)
742 		goto out;
743 	if (sccb->header.response_code != 0x0010) {
744 		pr_warning("read channel-path info failed "
745 			   "(response=0x%04x)\n", sccb->header.response_code);
746 		rc = -EIO;
747 		goto out;
748 	}
749 	memcpy(info->recognized, sccb->recognized, SCLP_CHP_INFO_MASK_SIZE);
750 	memcpy(info->standby, sccb->standby, SCLP_CHP_INFO_MASK_SIZE);
751 	memcpy(info->configured, sccb->configured, SCLP_CHP_INFO_MASK_SIZE);
752 out:
753 	free_page((unsigned long) sccb);
754 	return rc;
755 }
756 
757 bool sclp_has_sprp(void)
758 {
759 	return !!(sclp_fac84 & 0x2);
760 }
761