xref: /openbmc/linux/drivers/s390/cio/chsc.c (revision c21b37f6)
1 /*
2  *  drivers/s390/cio/chsc.c
3  *   S/390 common I/O routines -- channel subsystem call
4  *
5  *    Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH,
6  *			      IBM Corporation
7  *    Author(s): Ingo Adlung (adlung@de.ibm.com)
8  *		 Cornelia Huck (cornelia.huck@de.ibm.com)
9  *		 Arnd Bergmann (arndb@de.ibm.com)
10  */
11 
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/init.h>
15 #include <linux/device.h>
16 
17 #include <asm/cio.h>
18 #include <asm/chpid.h>
19 
20 #include "css.h"
21 #include "cio.h"
22 #include "cio_debug.h"
23 #include "ioasm.h"
24 #include "chp.h"
25 #include "chsc.h"
26 
27 static void *sei_page;
28 
29 struct chsc_ssd_area {
30 	struct chsc_header request;
31 	u16 :10;
32 	u16 ssid:2;
33 	u16 :4;
34 	u16 f_sch;	  /* first subchannel */
35 	u16 :16;
36 	u16 l_sch;	  /* last subchannel */
37 	u32 :32;
38 	struct chsc_header response;
39 	u32 :32;
40 	u8 sch_valid : 1;
41 	u8 dev_valid : 1;
42 	u8 st	     : 3; /* subchannel type */
43 	u8 zeroes    : 3;
44 	u8  unit_addr;	  /* unit address */
45 	u16 devno;	  /* device number */
46 	u8 path_mask;
47 	u8 fla_valid_mask;
48 	u16 sch;	  /* subchannel */
49 	u8 chpid[8];	  /* chpids 0-7 */
50 	u16 fla[8];	  /* full link addresses 0-7 */
51 } __attribute__ ((packed));
52 
53 int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
54 {
55 	unsigned long page;
56 	struct chsc_ssd_area *ssd_area;
57 	int ccode;
58 	int ret;
59 	int i;
60 	int mask;
61 
62 	page = get_zeroed_page(GFP_KERNEL | GFP_DMA);
63 	if (!page)
64 		return -ENOMEM;
65 	ssd_area = (struct chsc_ssd_area *) page;
66 	ssd_area->request.length = 0x0010;
67 	ssd_area->request.code = 0x0004;
68 	ssd_area->ssid = schid.ssid;
69 	ssd_area->f_sch = schid.sch_no;
70 	ssd_area->l_sch = schid.sch_no;
71 
72 	ccode = chsc(ssd_area);
73 	/* Check response. */
74 	if (ccode > 0) {
75 		ret = (ccode == 3) ? -ENODEV : -EBUSY;
76 		goto out_free;
77 	}
78 	if (ssd_area->response.code != 0x0001) {
79 		CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
80 			      schid.ssid, schid.sch_no,
81 			      ssd_area->response.code);
82 		ret = -EIO;
83 		goto out_free;
84 	}
85 	if (!ssd_area->sch_valid) {
86 		ret = -ENODEV;
87 		goto out_free;
88 	}
89 	/* Copy data */
90 	ret = 0;
91 	memset(ssd, 0, sizeof(struct chsc_ssd_info));
92 	if ((ssd_area->st != 0) && (ssd_area->st != 2))
93 		goto out_free;
94 	ssd->path_mask = ssd_area->path_mask;
95 	ssd->fla_valid_mask = ssd_area->fla_valid_mask;
96 	for (i = 0; i < 8; i++) {
97 		mask = 0x80 >> i;
98 		if (ssd_area->path_mask & mask) {
99 			chp_id_init(&ssd->chpid[i]);
100 			ssd->chpid[i].id = ssd_area->chpid[i];
101 		}
102 		if (ssd_area->fla_valid_mask & mask)
103 			ssd->fla[i] = ssd_area->fla[i];
104 	}
105 out_free:
106 	free_page(page);
107 	return ret;
108 }
109 
110 static int check_for_io_on_path(struct subchannel *sch, int mask)
111 {
112 	int cc;
113 
114 	cc = stsch(sch->schid, &sch->schib);
115 	if (cc)
116 		return 0;
117 	if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == mask)
118 		return 1;
119 	return 0;
120 }
121 
122 static void terminate_internal_io(struct subchannel *sch)
123 {
124 	if (cio_clear(sch)) {
125 		/* Recheck device in case clear failed. */
126 		sch->lpm = 0;
127 		if (device_trigger_verify(sch) != 0)
128 			css_schedule_eval(sch->schid);
129 		return;
130 	}
131 	/* Request retry of internal operation. */
132 	device_set_intretry(sch);
133 	/* Call handler. */
134 	if (sch->driver && sch->driver->termination)
135 		sch->driver->termination(&sch->dev);
136 }
137 
138 static int
139 s390_subchannel_remove_chpid(struct device *dev, void *data)
140 {
141 	int j;
142 	int mask;
143 	struct subchannel *sch;
144 	struct chp_id *chpid;
145 	struct schib schib;
146 
147 	sch = to_subchannel(dev);
148 	chpid = data;
149 	for (j = 0; j < 8; j++) {
150 		mask = 0x80 >> j;
151 		if ((sch->schib.pmcw.pim & mask) &&
152 		    (sch->schib.pmcw.chpid[j] == chpid->id))
153 			break;
154 	}
155 	if (j >= 8)
156 		return 0;
157 
158 	spin_lock_irq(sch->lock);
159 
160 	stsch(sch->schid, &schib);
161 	if (!schib.pmcw.dnv)
162 		goto out_unreg;
163 	memcpy(&sch->schib, &schib, sizeof(struct schib));
164 	/* Check for single path devices. */
165 	if (sch->schib.pmcw.pim == 0x80)
166 		goto out_unreg;
167 
168 	if (check_for_io_on_path(sch, mask)) {
169 		if (device_is_online(sch))
170 			device_kill_io(sch);
171 		else {
172 			terminate_internal_io(sch);
173 			/* Re-start path verification. */
174 			if (sch->driver && sch->driver->verify)
175 				sch->driver->verify(&sch->dev);
176 		}
177 	} else {
178 		/* trigger path verification. */
179 		if (sch->driver && sch->driver->verify)
180 			sch->driver->verify(&sch->dev);
181 		else if (sch->lpm == mask)
182 			goto out_unreg;
183 	}
184 
185 	spin_unlock_irq(sch->lock);
186 	return 0;
187 
188 out_unreg:
189 	sch->lpm = 0;
190 	spin_unlock_irq(sch->lock);
191 	css_schedule_eval(sch->schid);
192 	return 0;
193 }
194 
195 void chsc_chp_offline(struct chp_id chpid)
196 {
197 	char dbf_txt[15];
198 
199 	sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
200 	CIO_TRACE_EVENT(2, dbf_txt);
201 
202 	if (chp_get_status(chpid) <= 0)
203 		return;
204 	bus_for_each_dev(&css_bus_type, NULL, &chpid,
205 			 s390_subchannel_remove_chpid);
206 }
207 
208 static int
209 s390_process_res_acc_new_sch(struct subchannel_id schid)
210 {
211 	struct schib schib;
212 	/*
213 	 * We don't know the device yet, but since a path
214 	 * may be available now to the device we'll have
215 	 * to do recognition again.
216 	 * Since we don't have any idea about which chpid
217 	 * that beast may be on we'll have to do a stsch
218 	 * on all devices, grr...
219 	 */
220 	if (stsch_err(schid, &schib))
221 		/* We're through */
222 		return -ENXIO;
223 
224 	/* Put it on the slow path. */
225 	css_schedule_eval(schid);
226 	return 0;
227 }
228 
229 struct res_acc_data {
230 	struct chp_id chpid;
231 	u32 fla_mask;
232 	u16 fla;
233 };
234 
235 static int get_res_chpid_mask(struct chsc_ssd_info *ssd,
236 			      struct res_acc_data *data)
237 {
238 	int i;
239 	int mask;
240 
241 	for (i = 0; i < 8; i++) {
242 		mask = 0x80 >> i;
243 		if (!(ssd->path_mask & mask))
244 			continue;
245 		if (!chp_id_is_equal(&ssd->chpid[i], &data->chpid))
246 			continue;
247 		if ((ssd->fla_valid_mask & mask) &&
248 		    ((ssd->fla[i] & data->fla_mask) != data->fla))
249 			continue;
250 		return mask;
251 	}
252 	return 0;
253 }
254 
255 static int
256 __s390_process_res_acc(struct subchannel_id schid, void *data)
257 {
258 	int chp_mask, old_lpm;
259 	struct res_acc_data *res_data;
260 	struct subchannel *sch;
261 
262 	res_data = data;
263 	sch = get_subchannel_by_schid(schid);
264 	if (!sch)
265 		/* Check if a subchannel is newly available. */
266 		return s390_process_res_acc_new_sch(schid);
267 
268 	spin_lock_irq(sch->lock);
269 	chp_mask = get_res_chpid_mask(&sch->ssd_info, res_data);
270 	if (chp_mask == 0)
271 		goto out;
272 	if (stsch(sch->schid, &sch->schib))
273 		goto out;
274 	old_lpm = sch->lpm;
275 	sch->lpm = ((sch->schib.pmcw.pim &
276 		     sch->schib.pmcw.pam &
277 		     sch->schib.pmcw.pom)
278 		    | chp_mask) & sch->opm;
279 	if (!old_lpm && sch->lpm)
280 		device_trigger_reprobe(sch);
281 	else if (sch->driver && sch->driver->verify)
282 		sch->driver->verify(&sch->dev);
283 out:
284 	spin_unlock_irq(sch->lock);
285 	put_device(&sch->dev);
286 	return 0;
287 }
288 
289 static void s390_process_res_acc (struct res_acc_data *res_data)
290 {
291 	char dbf_txt[15];
292 
293 	sprintf(dbf_txt, "accpr%x.%02x", res_data->chpid.cssid,
294 		res_data->chpid.id);
295 	CIO_TRACE_EVENT( 2, dbf_txt);
296 	if (res_data->fla != 0) {
297 		sprintf(dbf_txt, "fla%x", res_data->fla);
298 		CIO_TRACE_EVENT( 2, dbf_txt);
299 	}
300 
301 	/*
302 	 * I/O resources may have become accessible.
303 	 * Scan through all subchannels that may be concerned and
304 	 * do a validation on those.
305 	 * The more information we have (info), the less scanning
306 	 * will we have to do.
307 	 */
308 	for_each_subchannel(__s390_process_res_acc, res_data);
309 }
310 
311 static int
312 __get_chpid_from_lir(void *data)
313 {
314 	struct lir {
315 		u8  iq;
316 		u8  ic;
317 		u16 sci;
318 		/* incident-node descriptor */
319 		u32 indesc[28];
320 		/* attached-node descriptor */
321 		u32 andesc[28];
322 		/* incident-specific information */
323 		u32 isinfo[28];
324 	} __attribute__ ((packed)) *lir;
325 
326 	lir = data;
327 	if (!(lir->iq&0x80))
328 		/* NULL link incident record */
329 		return -EINVAL;
330 	if (!(lir->indesc[0]&0xc0000000))
331 		/* node descriptor not valid */
332 		return -EINVAL;
333 	if (!(lir->indesc[0]&0x10000000))
334 		/* don't handle device-type nodes - FIXME */
335 		return -EINVAL;
336 	/* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
337 
338 	return (u16) (lir->indesc[0]&0x000000ff);
339 }
340 
341 struct chsc_sei_area {
342 	struct chsc_header request;
343 	u32 reserved1;
344 	u32 reserved2;
345 	u32 reserved3;
346 	struct chsc_header response;
347 	u32 reserved4;
348 	u8  flags;
349 	u8  vf;		/* validity flags */
350 	u8  rs;		/* reporting source */
351 	u8  cc;		/* content code */
352 	u16 fla;	/* full link address */
353 	u16 rsid;	/* reporting source id */
354 	u32 reserved5;
355 	u32 reserved6;
356 	u8 ccdf[4096 - 16 - 24];	/* content-code dependent field */
357 	/* ccdf has to be big enough for a link-incident record */
358 } __attribute__ ((packed));
359 
360 static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
361 {
362 	struct chp_id chpid;
363 	int id;
364 
365 	CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
366 		      sei_area->rs, sei_area->rsid);
367 	if (sei_area->rs != 4)
368 		return;
369 	id = __get_chpid_from_lir(sei_area->ccdf);
370 	if (id < 0)
371 		CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
372 	else {
373 		chp_id_init(&chpid);
374 		chpid.id = id;
375 		chsc_chp_offline(chpid);
376 	}
377 }
378 
379 static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
380 {
381 	struct res_acc_data res_data;
382 	struct chp_id chpid;
383 	int status;
384 
385 	CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
386 		      "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
387 	if (sei_area->rs != 4)
388 		return;
389 	chp_id_init(&chpid);
390 	chpid.id = sei_area->rsid;
391 	/* allocate a new channel path structure, if needed */
392 	status = chp_get_status(chpid);
393 	if (status < 0)
394 		chp_new(chpid);
395 	else if (!status)
396 		return;
397 	memset(&res_data, 0, sizeof(struct res_acc_data));
398 	res_data.chpid = chpid;
399 	if ((sei_area->vf & 0xc0) != 0) {
400 		res_data.fla = sei_area->fla;
401 		if ((sei_area->vf & 0xc0) == 0xc0)
402 			/* full link address */
403 			res_data.fla_mask = 0xffff;
404 		else
405 			/* link address */
406 			res_data.fla_mask = 0xff00;
407 	}
408 	s390_process_res_acc(&res_data);
409 }
410 
411 struct chp_config_data {
412 	u8 map[32];
413 	u8 op;
414 	u8 pc;
415 };
416 
417 static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
418 {
419 	struct chp_config_data *data;
420 	struct chp_id chpid;
421 	int num;
422 
423 	CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
424 	if (sei_area->rs != 0)
425 		return;
426 	data = (struct chp_config_data *) &(sei_area->ccdf);
427 	chp_id_init(&chpid);
428 	for (num = 0; num <= __MAX_CHPID; num++) {
429 		if (!chp_test_bit(data->map, num))
430 			continue;
431 		chpid.id = num;
432 		printk(KERN_WARNING "cio: processing configure event %d for "
433 		       "chpid %x.%02x\n", data->op, chpid.cssid, chpid.id);
434 		switch (data->op) {
435 		case 0:
436 			chp_cfg_schedule(chpid, 1);
437 			break;
438 		case 1:
439 			chp_cfg_schedule(chpid, 0);
440 			break;
441 		case 2:
442 			chp_cfg_cancel_deconfigure(chpid);
443 			break;
444 		}
445 	}
446 }
447 
448 static void chsc_process_sei(struct chsc_sei_area *sei_area)
449 {
450 	/* Check if we might have lost some information. */
451 	if (sei_area->flags & 0x40) {
452 		CIO_CRW_EVENT(2, "chsc: event overflow\n");
453 		css_schedule_eval_all();
454 	}
455 	/* which kind of information was stored? */
456 	switch (sei_area->cc) {
457 	case 1: /* link incident*/
458 		chsc_process_sei_link_incident(sei_area);
459 		break;
460 	case 2: /* i/o resource accessibiliy */
461 		chsc_process_sei_res_acc(sei_area);
462 		break;
463 	case 8: /* channel-path-configuration notification */
464 		chsc_process_sei_chp_config(sei_area);
465 		break;
466 	default: /* other stuff */
467 		CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
468 			      sei_area->cc);
469 		break;
470 	}
471 }
472 
473 void chsc_process_crw(void)
474 {
475 	struct chsc_sei_area *sei_area;
476 
477 	if (!sei_page)
478 		return;
479 	/* Access to sei_page is serialized through machine check handler
480 	 * thread, so no need for locking. */
481 	sei_area = sei_page;
482 
483 	CIO_TRACE_EVENT( 2, "prcss");
484 	do {
485 		memset(sei_area, 0, sizeof(*sei_area));
486 		sei_area->request.length = 0x0010;
487 		sei_area->request.code = 0x000e;
488 		if (chsc(sei_area))
489 			break;
490 
491 		if (sei_area->response.code == 0x0001) {
492 			CIO_CRW_EVENT(4, "chsc: sei successful\n");
493 			chsc_process_sei(sei_area);
494 		} else {
495 			CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
496 				      sei_area->response.code);
497 			break;
498 		}
499 	} while (sei_area->flags & 0x80);
500 }
501 
502 static int
503 __chp_add_new_sch(struct subchannel_id schid)
504 {
505 	struct schib schib;
506 
507 	if (stsch_err(schid, &schib))
508 		/* We're through */
509 		return -ENXIO;
510 
511 	/* Put it on the slow path. */
512 	css_schedule_eval(schid);
513 	return 0;
514 }
515 
516 
517 static int
518 __chp_add(struct subchannel_id schid, void *data)
519 {
520 	int i, mask;
521 	struct chp_id *chpid;
522 	struct subchannel *sch;
523 
524 	chpid = data;
525 	sch = get_subchannel_by_schid(schid);
526 	if (!sch)
527 		/* Check if the subchannel is now available. */
528 		return __chp_add_new_sch(schid);
529 	spin_lock_irq(sch->lock);
530 	for (i=0; i<8; i++) {
531 		mask = 0x80 >> i;
532 		if ((sch->schib.pmcw.pim & mask) &&
533 		    (sch->schib.pmcw.chpid[i] == chpid->id)) {
534 			if (stsch(sch->schid, &sch->schib) != 0) {
535 				/* Endgame. */
536 				spin_unlock_irq(sch->lock);
537 				return -ENXIO;
538 			}
539 			break;
540 		}
541 	}
542 	if (i==8) {
543 		spin_unlock_irq(sch->lock);
544 		return 0;
545 	}
546 	sch->lpm = ((sch->schib.pmcw.pim &
547 		     sch->schib.pmcw.pam &
548 		     sch->schib.pmcw.pom)
549 		    | mask) & sch->opm;
550 
551 	if (sch->driver && sch->driver->verify)
552 		sch->driver->verify(&sch->dev);
553 
554 	spin_unlock_irq(sch->lock);
555 	put_device(&sch->dev);
556 	return 0;
557 }
558 
559 void chsc_chp_online(struct chp_id chpid)
560 {
561 	char dbf_txt[15];
562 
563 	sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
564 	CIO_TRACE_EVENT(2, dbf_txt);
565 
566 	if (chp_get_status(chpid) != 0)
567 		for_each_subchannel(__chp_add, &chpid);
568 }
569 
570 static void __s390_subchannel_vary_chpid(struct subchannel *sch,
571 					 struct chp_id chpid, int on)
572 {
573 	int chp, old_lpm;
574 	int mask;
575 	unsigned long flags;
576 
577 	spin_lock_irqsave(sch->lock, flags);
578 	old_lpm = sch->lpm;
579 	for (chp = 0; chp < 8; chp++) {
580 		mask = 0x80 >> chp;
581 		if (!(sch->ssd_info.path_mask & mask))
582 			continue;
583 		if (!chp_id_is_equal(&sch->ssd_info.chpid[chp], &chpid))
584 			continue;
585 
586 		if (on) {
587 			sch->opm |= mask;
588 			sch->lpm |= mask;
589 			if (!old_lpm)
590 				device_trigger_reprobe(sch);
591 			else if (sch->driver && sch->driver->verify)
592 				sch->driver->verify(&sch->dev);
593 			break;
594 		}
595 		sch->opm &= ~mask;
596 		sch->lpm &= ~mask;
597 		if (check_for_io_on_path(sch, mask)) {
598 			if (device_is_online(sch))
599 				/* Path verification is done after killing. */
600 				device_kill_io(sch);
601 			else {
602 				/* Kill and retry internal I/O. */
603 				terminate_internal_io(sch);
604 				/* Re-start path verification. */
605 				if (sch->driver && sch->driver->verify)
606 					sch->driver->verify(&sch->dev);
607 			}
608 		} else if (!sch->lpm) {
609 			if (device_trigger_verify(sch) != 0)
610 				css_schedule_eval(sch->schid);
611 		} else if (sch->driver && sch->driver->verify)
612 			sch->driver->verify(&sch->dev);
613 		break;
614 	}
615 	spin_unlock_irqrestore(sch->lock, flags);
616 }
617 
618 static int s390_subchannel_vary_chpid_off(struct device *dev, void *data)
619 {
620 	struct subchannel *sch;
621 	struct chp_id *chpid;
622 
623 	sch = to_subchannel(dev);
624 	chpid = data;
625 
626 	__s390_subchannel_vary_chpid(sch, *chpid, 0);
627 	return 0;
628 }
629 
630 static int s390_subchannel_vary_chpid_on(struct device *dev, void *data)
631 {
632 	struct subchannel *sch;
633 	struct chp_id *chpid;
634 
635 	sch = to_subchannel(dev);
636 	chpid = data;
637 
638 	__s390_subchannel_vary_chpid(sch, *chpid, 1);
639 	return 0;
640 }
641 
642 static int
643 __s390_vary_chpid_on(struct subchannel_id schid, void *data)
644 {
645 	struct schib schib;
646 	struct subchannel *sch;
647 
648 	sch = get_subchannel_by_schid(schid);
649 	if (sch) {
650 		put_device(&sch->dev);
651 		return 0;
652 	}
653 	if (stsch_err(schid, &schib))
654 		/* We're through */
655 		return -ENXIO;
656 	/* Put it on the slow path. */
657 	css_schedule_eval(schid);
658 	return 0;
659 }
660 
661 /**
662  * chsc_chp_vary - propagate channel-path vary operation to subchannels
663  * @chpid: channl-path ID
664  * @on: non-zero for vary online, zero for vary offline
665  */
666 int chsc_chp_vary(struct chp_id chpid, int on)
667 {
668 	/*
669 	 * Redo PathVerification on the devices the chpid connects to
670 	 */
671 
672 	bus_for_each_dev(&css_bus_type, NULL, &chpid, on ?
673 			 s390_subchannel_vary_chpid_on :
674 			 s390_subchannel_vary_chpid_off);
675 	if (on)
676 		/* Scan for new devices on varied on path. */
677 		for_each_subchannel(__s390_vary_chpid_on, NULL);
678 	return 0;
679 }
680 
681 static void
682 chsc_remove_cmg_attr(struct channel_subsystem *css)
683 {
684 	int i;
685 
686 	for (i = 0; i <= __MAX_CHPID; i++) {
687 		if (!css->chps[i])
688 			continue;
689 		chp_remove_cmg_attr(css->chps[i]);
690 	}
691 }
692 
693 static int
694 chsc_add_cmg_attr(struct channel_subsystem *css)
695 {
696 	int i, ret;
697 
698 	ret = 0;
699 	for (i = 0; i <= __MAX_CHPID; i++) {
700 		if (!css->chps[i])
701 			continue;
702 		ret = chp_add_cmg_attr(css->chps[i]);
703 		if (ret)
704 			goto cleanup;
705 	}
706 	return ret;
707 cleanup:
708 	for (--i; i >= 0; i--) {
709 		if (!css->chps[i])
710 			continue;
711 		chp_remove_cmg_attr(css->chps[i]);
712 	}
713 	return ret;
714 }
715 
716 static int
717 __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
718 {
719 	struct {
720 		struct chsc_header request;
721 		u32 operation_code : 2;
722 		u32 : 30;
723 		u32 key : 4;
724 		u32 : 28;
725 		u32 zeroes1;
726 		u32 cub_addr1;
727 		u32 zeroes2;
728 		u32 cub_addr2;
729 		u32 reserved[13];
730 		struct chsc_header response;
731 		u32 status : 8;
732 		u32 : 4;
733 		u32 fmt : 4;
734 		u32 : 16;
735 	} __attribute__ ((packed)) *secm_area;
736 	int ret, ccode;
737 
738 	secm_area = page;
739 	secm_area->request.length = 0x0050;
740 	secm_area->request.code = 0x0016;
741 
742 	secm_area->key = PAGE_DEFAULT_KEY;
743 	secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
744 	secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
745 
746 	secm_area->operation_code = enable ? 0 : 1;
747 
748 	ccode = chsc(secm_area);
749 	if (ccode > 0)
750 		return (ccode == 3) ? -ENODEV : -EBUSY;
751 
752 	switch (secm_area->response.code) {
753 	case 0x0001: /* Success. */
754 		ret = 0;
755 		break;
756 	case 0x0003: /* Invalid block. */
757 	case 0x0007: /* Invalid format. */
758 	case 0x0008: /* Other invalid block. */
759 		CIO_CRW_EVENT(2, "Error in chsc request block!\n");
760 		ret = -EINVAL;
761 		break;
762 	case 0x0004: /* Command not provided in model. */
763 		CIO_CRW_EVENT(2, "Model does not provide secm\n");
764 		ret = -EOPNOTSUPP;
765 		break;
766 	case 0x0102: /* cub adresses incorrect */
767 		CIO_CRW_EVENT(2, "Invalid addresses in chsc request block\n");
768 		ret = -EINVAL;
769 		break;
770 	case 0x0103: /* key error */
771 		CIO_CRW_EVENT(2, "Access key error in secm\n");
772 		ret = -EINVAL;
773 		break;
774 	case 0x0105: /* error while starting */
775 		CIO_CRW_EVENT(2, "Error while starting channel measurement\n");
776 		ret = -EIO;
777 		break;
778 	default:
779 		CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
780 			      secm_area->response.code);
781 		ret = -EIO;
782 	}
783 	return ret;
784 }
785 
786 int
787 chsc_secm(struct channel_subsystem *css, int enable)
788 {
789 	void  *secm_area;
790 	int ret;
791 
792 	secm_area = (void *)get_zeroed_page(GFP_KERNEL |  GFP_DMA);
793 	if (!secm_area)
794 		return -ENOMEM;
795 
796 	mutex_lock(&css->mutex);
797 	if (enable && !css->cm_enabled) {
798 		css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
799 		css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
800 		if (!css->cub_addr1 || !css->cub_addr2) {
801 			free_page((unsigned long)css->cub_addr1);
802 			free_page((unsigned long)css->cub_addr2);
803 			free_page((unsigned long)secm_area);
804 			mutex_unlock(&css->mutex);
805 			return -ENOMEM;
806 		}
807 	}
808 	ret = __chsc_do_secm(css, enable, secm_area);
809 	if (!ret) {
810 		css->cm_enabled = enable;
811 		if (css->cm_enabled) {
812 			ret = chsc_add_cmg_attr(css);
813 			if (ret) {
814 				memset(secm_area, 0, PAGE_SIZE);
815 				__chsc_do_secm(css, 0, secm_area);
816 				css->cm_enabled = 0;
817 			}
818 		} else
819 			chsc_remove_cmg_attr(css);
820 	}
821 	if (!css->cm_enabled) {
822 		free_page((unsigned long)css->cub_addr1);
823 		free_page((unsigned long)css->cub_addr2);
824 	}
825 	mutex_unlock(&css->mutex);
826 	free_page((unsigned long)secm_area);
827 	return ret;
828 }
829 
830 int chsc_determine_channel_path_description(struct chp_id chpid,
831 					    struct channel_path_desc *desc)
832 {
833 	int ccode, ret;
834 
835 	struct {
836 		struct chsc_header request;
837 		u32 : 24;
838 		u32 first_chpid : 8;
839 		u32 : 24;
840 		u32 last_chpid : 8;
841 		u32 zeroes1;
842 		struct chsc_header response;
843 		u32 zeroes2;
844 		struct channel_path_desc desc;
845 	} __attribute__ ((packed)) *scpd_area;
846 
847 	scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
848 	if (!scpd_area)
849 		return -ENOMEM;
850 
851 	scpd_area->request.length = 0x0010;
852 	scpd_area->request.code = 0x0002;
853 
854 	scpd_area->first_chpid = chpid.id;
855 	scpd_area->last_chpid = chpid.id;
856 
857 	ccode = chsc(scpd_area);
858 	if (ccode > 0) {
859 		ret = (ccode == 3) ? -ENODEV : -EBUSY;
860 		goto out;
861 	}
862 
863 	switch (scpd_area->response.code) {
864 	case 0x0001: /* Success. */
865 		memcpy(desc, &scpd_area->desc,
866 		       sizeof(struct channel_path_desc));
867 		ret = 0;
868 		break;
869 	case 0x0003: /* Invalid block. */
870 	case 0x0007: /* Invalid format. */
871 	case 0x0008: /* Other invalid block. */
872 		CIO_CRW_EVENT(2, "Error in chsc request block!\n");
873 		ret = -EINVAL;
874 		break;
875 	case 0x0004: /* Command not provided in model. */
876 		CIO_CRW_EVENT(2, "Model does not provide scpd\n");
877 		ret = -EOPNOTSUPP;
878 		break;
879 	default:
880 		CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
881 			      scpd_area->response.code);
882 		ret = -EIO;
883 	}
884 out:
885 	free_page((unsigned long)scpd_area);
886 	return ret;
887 }
888 
889 static void
890 chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
891 			  struct cmg_chars *chars)
892 {
893 	switch (chp->cmg) {
894 	case 2:
895 	case 3:
896 		chp->cmg_chars = kmalloc(sizeof(struct cmg_chars),
897 					 GFP_KERNEL);
898 		if (chp->cmg_chars) {
899 			int i, mask;
900 			struct cmg_chars *cmg_chars;
901 
902 			cmg_chars = chp->cmg_chars;
903 			for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
904 				mask = 0x80 >> (i + 3);
905 				if (cmcv & mask)
906 					cmg_chars->values[i] = chars->values[i];
907 				else
908 					cmg_chars->values[i] = 0;
909 			}
910 		}
911 		break;
912 	default:
913 		/* No cmg-dependent data. */
914 		break;
915 	}
916 }
917 
918 int chsc_get_channel_measurement_chars(struct channel_path *chp)
919 {
920 	int ccode, ret;
921 
922 	struct {
923 		struct chsc_header request;
924 		u32 : 24;
925 		u32 first_chpid : 8;
926 		u32 : 24;
927 		u32 last_chpid : 8;
928 		u32 zeroes1;
929 		struct chsc_header response;
930 		u32 zeroes2;
931 		u32 not_valid : 1;
932 		u32 shared : 1;
933 		u32 : 22;
934 		u32 chpid : 8;
935 		u32 cmcv : 5;
936 		u32 : 11;
937 		u32 cmgq : 8;
938 		u32 cmg : 8;
939 		u32 zeroes3;
940 		u32 data[NR_MEASUREMENT_CHARS];
941 	} __attribute__ ((packed)) *scmc_area;
942 
943 	scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
944 	if (!scmc_area)
945 		return -ENOMEM;
946 
947 	scmc_area->request.length = 0x0010;
948 	scmc_area->request.code = 0x0022;
949 
950 	scmc_area->first_chpid = chp->chpid.id;
951 	scmc_area->last_chpid = chp->chpid.id;
952 
953 	ccode = chsc(scmc_area);
954 	if (ccode > 0) {
955 		ret = (ccode == 3) ? -ENODEV : -EBUSY;
956 		goto out;
957 	}
958 
959 	switch (scmc_area->response.code) {
960 	case 0x0001: /* Success. */
961 		if (!scmc_area->not_valid) {
962 			chp->cmg = scmc_area->cmg;
963 			chp->shared = scmc_area->shared;
964 			chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
965 						  (struct cmg_chars *)
966 						  &scmc_area->data);
967 		} else {
968 			chp->cmg = -1;
969 			chp->shared = -1;
970 		}
971 		ret = 0;
972 		break;
973 	case 0x0003: /* Invalid block. */
974 	case 0x0007: /* Invalid format. */
975 	case 0x0008: /* Invalid bit combination. */
976 		CIO_CRW_EVENT(2, "Error in chsc request block!\n");
977 		ret = -EINVAL;
978 		break;
979 	case 0x0004: /* Command not provided. */
980 		CIO_CRW_EVENT(2, "Model does not provide scmc\n");
981 		ret = -EOPNOTSUPP;
982 		break;
983 	default:
984 		CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
985 			      scmc_area->response.code);
986 		ret = -EIO;
987 	}
988 out:
989 	free_page((unsigned long)scmc_area);
990 	return ret;
991 }
992 
993 int __init chsc_alloc_sei_area(void)
994 {
995 	sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
996 	if (!sei_page)
997 		CIO_MSG_EVENT(0, "Can't allocate page for processing of "
998 			      "chsc machine checks!\n");
999 	return (sei_page ? 0 : -ENOMEM);
1000 }
1001 
1002 void __init chsc_free_sei_area(void)
1003 {
1004 	kfree(sei_page);
1005 }
1006 
1007 int __init
1008 chsc_enable_facility(int operation_code)
1009 {
1010 	int ret;
1011 	struct {
1012 		struct chsc_header request;
1013 		u8 reserved1:4;
1014 		u8 format:4;
1015 		u8 reserved2;
1016 		u16 operation_code;
1017 		u32 reserved3;
1018 		u32 reserved4;
1019 		u32 operation_data_area[252];
1020 		struct chsc_header response;
1021 		u32 reserved5:4;
1022 		u32 format2:4;
1023 		u32 reserved6:24;
1024 	} __attribute__ ((packed)) *sda_area;
1025 
1026 	sda_area = (void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
1027 	if (!sda_area)
1028 		return -ENOMEM;
1029 	sda_area->request.length = 0x0400;
1030 	sda_area->request.code = 0x0031;
1031 	sda_area->operation_code = operation_code;
1032 
1033 	ret = chsc(sda_area);
1034 	if (ret > 0) {
1035 		ret = (ret == 3) ? -ENODEV : -EBUSY;
1036 		goto out;
1037 	}
1038 	switch (sda_area->response.code) {
1039 	case 0x0001: /* everything ok */
1040 		ret = 0;
1041 		break;
1042 	case 0x0003: /* invalid request block */
1043 	case 0x0007:
1044 		ret = -EINVAL;
1045 		break;
1046 	case 0x0004: /* command not provided */
1047 	case 0x0101: /* facility not provided */
1048 		ret = -EOPNOTSUPP;
1049 		break;
1050 	default: /* something went wrong */
1051 		ret = -EIO;
1052 	}
1053  out:
1054 	free_page((unsigned long)sda_area);
1055 	return ret;
1056 }
1057 
1058 struct css_general_char css_general_characteristics;
1059 struct css_chsc_char css_chsc_characteristics;
1060 
1061 int __init
1062 chsc_determine_css_characteristics(void)
1063 {
1064 	int result;
1065 	struct {
1066 		struct chsc_header request;
1067 		u32 reserved1;
1068 		u32 reserved2;
1069 		u32 reserved3;
1070 		struct chsc_header response;
1071 		u32 reserved4;
1072 		u32 general_char[510];
1073 		u32 chsc_char[518];
1074 	} __attribute__ ((packed)) *scsc_area;
1075 
1076 	scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
1077 	if (!scsc_area) {
1078 		CIO_MSG_EVENT(0, "Was not able to determine available"
1079 			      "CHSCs due to no memory.\n");
1080 		return -ENOMEM;
1081 	}
1082 
1083 	scsc_area->request.length = 0x0010;
1084 	scsc_area->request.code = 0x0010;
1085 
1086 	result = chsc(scsc_area);
1087 	if (result) {
1088 		CIO_MSG_EVENT(0, "Was not able to determine available CHSCs, "
1089 			      "cc=%i.\n", result);
1090 		result = -EIO;
1091 		goto exit;
1092 	}
1093 
1094 	if (scsc_area->response.code != 1) {
1095 		CIO_MSG_EVENT(0, "Was not able to determine "
1096 			      "available CHSCs.\n");
1097 		result = -EIO;
1098 		goto exit;
1099 	}
1100 	memcpy(&css_general_characteristics, scsc_area->general_char,
1101 	       sizeof(css_general_characteristics));
1102 	memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
1103 	       sizeof(css_chsc_characteristics));
1104 exit:
1105 	free_page ((unsigned long) scsc_area);
1106 	return result;
1107 }
1108 
1109 EXPORT_SYMBOL_GPL(css_general_characteristics);
1110 EXPORT_SYMBOL_GPL(css_chsc_characteristics);
1111