xref: /openbmc/linux/drivers/s390/cio/cio.c (revision afb46f79)
1 /*
2  *   S/390 common I/O routines -- low level i/o calls
3  *
4  *    Copyright IBM Corp. 1999, 2008
5  *    Author(s): Ingo Adlung (adlung@de.ibm.com)
6  *		 Cornelia Huck (cornelia.huck@de.ibm.com)
7  *		 Arnd Bergmann (arndb@de.ibm.com)
8  *		 Martin Schwidefsky (schwidefsky@de.ibm.com)
9  */
10 
11 #define KMSG_COMPONENT "cio"
12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13 
14 #include <linux/ftrace.h>
15 #include <linux/module.h>
16 #include <linux/init.h>
17 #include <linux/slab.h>
18 #include <linux/device.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/interrupt.h>
21 #include <linux/irq.h>
22 #include <asm/cio.h>
23 #include <asm/delay.h>
24 #include <asm/irq.h>
25 #include <asm/irq_regs.h>
26 #include <asm/setup.h>
27 #include <asm/reset.h>
28 #include <asm/ipl.h>
29 #include <asm/chpid.h>
30 #include <asm/airq.h>
31 #include <asm/isc.h>
32 #include <linux/cputime.h>
33 #include <asm/fcx.h>
34 #include <asm/nmi.h>
35 #include <asm/crw.h>
36 #include "cio.h"
37 #include "css.h"
38 #include "chsc.h"
39 #include "ioasm.h"
40 #include "io_sch.h"
41 #include "blacklist.h"
42 #include "cio_debug.h"
43 #include "chp.h"
44 
45 debug_info_t *cio_debug_msg_id;
46 debug_info_t *cio_debug_trace_id;
47 debug_info_t *cio_debug_crw_id;
48 
49 /*
50  * Function: cio_debug_init
51  * Initializes three debug logs for common I/O:
52  * - cio_msg logs generic cio messages
53  * - cio_trace logs the calling of different functions
54  * - cio_crw logs machine check related cio messages
55  */
56 static int __init cio_debug_init(void)
57 {
58 	cio_debug_msg_id = debug_register("cio_msg", 16, 1, 11 * sizeof(long));
59 	if (!cio_debug_msg_id)
60 		goto out_unregister;
61 	debug_register_view(cio_debug_msg_id, &debug_sprintf_view);
62 	debug_set_level(cio_debug_msg_id, 2);
63 	cio_debug_trace_id = debug_register("cio_trace", 16, 1, 16);
64 	if (!cio_debug_trace_id)
65 		goto out_unregister;
66 	debug_register_view(cio_debug_trace_id, &debug_hex_ascii_view);
67 	debug_set_level(cio_debug_trace_id, 2);
68 	cio_debug_crw_id = debug_register("cio_crw", 8, 1, 8 * sizeof(long));
69 	if (!cio_debug_crw_id)
70 		goto out_unregister;
71 	debug_register_view(cio_debug_crw_id, &debug_sprintf_view);
72 	debug_set_level(cio_debug_crw_id, 4);
73 	return 0;
74 
75 out_unregister:
76 	if (cio_debug_msg_id)
77 		debug_unregister(cio_debug_msg_id);
78 	if (cio_debug_trace_id)
79 		debug_unregister(cio_debug_trace_id);
80 	if (cio_debug_crw_id)
81 		debug_unregister(cio_debug_crw_id);
82 	return -1;
83 }
84 
85 arch_initcall (cio_debug_init);
86 
87 int cio_set_options(struct subchannel *sch, int flags)
88 {
89 	struct io_subchannel_private *priv = to_io_private(sch);
90 
91 	priv->options.suspend = (flags & DOIO_ALLOW_SUSPEND) != 0;
92 	priv->options.prefetch = (flags & DOIO_DENY_PREFETCH) != 0;
93 	priv->options.inter = (flags & DOIO_SUPPRESS_INTER) != 0;
94 	return 0;
95 }
96 
97 static int
98 cio_start_handle_notoper(struct subchannel *sch, __u8 lpm)
99 {
100 	char dbf_text[15];
101 
102 	if (lpm != 0)
103 		sch->lpm &= ~lpm;
104 	else
105 		sch->lpm = 0;
106 
107 	CIO_MSG_EVENT(2, "cio_start: 'not oper' status for "
108 		      "subchannel 0.%x.%04x!\n", sch->schid.ssid,
109 		      sch->schid.sch_no);
110 
111 	if (cio_update_schib(sch))
112 		return -ENODEV;
113 
114 	sprintf(dbf_text, "no%s", dev_name(&sch->dev));
115 	CIO_TRACE_EVENT(0, dbf_text);
116 	CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib));
117 
118 	return (sch->lpm ? -EACCES : -ENODEV);
119 }
120 
121 int
122 cio_start_key (struct subchannel *sch,	/* subchannel structure */
123 	       struct ccw1 * cpa,	/* logical channel prog addr */
124 	       __u8 lpm,		/* logical path mask */
125 	       __u8 key)                /* storage key */
126 {
127 	struct io_subchannel_private *priv = to_io_private(sch);
128 	union orb *orb = &priv->orb;
129 	int ccode;
130 
131 	CIO_TRACE_EVENT(5, "stIO");
132 	CIO_TRACE_EVENT(5, dev_name(&sch->dev));
133 
134 	memset(orb, 0, sizeof(union orb));
135 	/* sch is always under 2G. */
136 	orb->cmd.intparm = (u32)(addr_t)sch;
137 	orb->cmd.fmt = 1;
138 
139 	orb->cmd.pfch = priv->options.prefetch == 0;
140 	orb->cmd.spnd = priv->options.suspend;
141 	orb->cmd.ssic = priv->options.suspend && priv->options.inter;
142 	orb->cmd.lpm = (lpm != 0) ? lpm : sch->lpm;
143 #ifdef CONFIG_64BIT
144 	/*
145 	 * for 64 bit we always support 64 bit IDAWs with 4k page size only
146 	 */
147 	orb->cmd.c64 = 1;
148 	orb->cmd.i2k = 0;
149 #endif
150 	orb->cmd.key = key >> 4;
151 	/* issue "Start Subchannel" */
152 	orb->cmd.cpa = (__u32) __pa(cpa);
153 	ccode = ssch(sch->schid, orb);
154 
155 	/* process condition code */
156 	CIO_HEX_EVENT(5, &ccode, sizeof(ccode));
157 
158 	switch (ccode) {
159 	case 0:
160 		/*
161 		 * initialize device status information
162 		 */
163 		sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
164 		return 0;
165 	case 1:		/* status pending */
166 	case 2:		/* busy */
167 		return -EBUSY;
168 	case 3:		/* device/path not operational */
169 		return cio_start_handle_notoper(sch, lpm);
170 	default:
171 		return ccode;
172 	}
173 }
174 
175 int
176 cio_start (struct subchannel *sch, struct ccw1 *cpa, __u8 lpm)
177 {
178 	return cio_start_key(sch, cpa, lpm, PAGE_DEFAULT_KEY);
179 }
180 
181 /*
182  * resume suspended I/O operation
183  */
184 int
185 cio_resume (struct subchannel *sch)
186 {
187 	int ccode;
188 
189 	CIO_TRACE_EVENT(4, "resIO");
190 	CIO_TRACE_EVENT(4, dev_name(&sch->dev));
191 
192 	ccode = rsch (sch->schid);
193 
194 	CIO_HEX_EVENT(4, &ccode, sizeof(ccode));
195 
196 	switch (ccode) {
197 	case 0:
198 		sch->schib.scsw.cmd.actl |= SCSW_ACTL_RESUME_PEND;
199 		return 0;
200 	case 1:
201 		return -EBUSY;
202 	case 2:
203 		return -EINVAL;
204 	default:
205 		/*
206 		 * useless to wait for request completion
207 		 *  as device is no longer operational !
208 		 */
209 		return -ENODEV;
210 	}
211 }
212 
213 /*
214  * halt I/O operation
215  */
216 int
217 cio_halt(struct subchannel *sch)
218 {
219 	int ccode;
220 
221 	if (!sch)
222 		return -ENODEV;
223 
224 	CIO_TRACE_EVENT(2, "haltIO");
225 	CIO_TRACE_EVENT(2, dev_name(&sch->dev));
226 
227 	/*
228 	 * Issue "Halt subchannel" and process condition code
229 	 */
230 	ccode = hsch (sch->schid);
231 
232 	CIO_HEX_EVENT(2, &ccode, sizeof(ccode));
233 
234 	switch (ccode) {
235 	case 0:
236 		sch->schib.scsw.cmd.actl |= SCSW_ACTL_HALT_PEND;
237 		return 0;
238 	case 1:		/* status pending */
239 	case 2:		/* busy */
240 		return -EBUSY;
241 	default:		/* device not operational */
242 		return -ENODEV;
243 	}
244 }
245 
246 /*
247  * Clear I/O operation
248  */
249 int
250 cio_clear(struct subchannel *sch)
251 {
252 	int ccode;
253 
254 	if (!sch)
255 		return -ENODEV;
256 
257 	CIO_TRACE_EVENT(2, "clearIO");
258 	CIO_TRACE_EVENT(2, dev_name(&sch->dev));
259 
260 	/*
261 	 * Issue "Clear subchannel" and process condition code
262 	 */
263 	ccode = csch (sch->schid);
264 
265 	CIO_HEX_EVENT(2, &ccode, sizeof(ccode));
266 
267 	switch (ccode) {
268 	case 0:
269 		sch->schib.scsw.cmd.actl |= SCSW_ACTL_CLEAR_PEND;
270 		return 0;
271 	default:		/* device not operational */
272 		return -ENODEV;
273 	}
274 }
275 
276 /*
277  * Function: cio_cancel
278  * Issues a "Cancel Subchannel" on the specified subchannel
279  * Note: We don't need any fancy intparms and flags here
280  *	 since xsch is executed synchronously.
281  * Only for common I/O internal use as for now.
282  */
283 int
284 cio_cancel (struct subchannel *sch)
285 {
286 	int ccode;
287 
288 	if (!sch)
289 		return -ENODEV;
290 
291 	CIO_TRACE_EVENT(2, "cancelIO");
292 	CIO_TRACE_EVENT(2, dev_name(&sch->dev));
293 
294 	ccode = xsch (sch->schid);
295 
296 	CIO_HEX_EVENT(2, &ccode, sizeof(ccode));
297 
298 	switch (ccode) {
299 	case 0:		/* success */
300 		/* Update information in scsw. */
301 		if (cio_update_schib(sch))
302 			return -ENODEV;
303 		return 0;
304 	case 1:		/* status pending */
305 		return -EBUSY;
306 	case 2:		/* not applicable */
307 		return -EINVAL;
308 	default:	/* not oper */
309 		return -ENODEV;
310 	}
311 }
312 
313 
314 static void cio_apply_config(struct subchannel *sch, struct schib *schib)
315 {
316 	schib->pmcw.intparm = sch->config.intparm;
317 	schib->pmcw.mbi = sch->config.mbi;
318 	schib->pmcw.isc = sch->config.isc;
319 	schib->pmcw.ena = sch->config.ena;
320 	schib->pmcw.mme = sch->config.mme;
321 	schib->pmcw.mp = sch->config.mp;
322 	schib->pmcw.csense = sch->config.csense;
323 	schib->pmcw.mbfc = sch->config.mbfc;
324 	if (sch->config.mbfc)
325 		schib->mba = sch->config.mba;
326 }
327 
328 static int cio_check_config(struct subchannel *sch, struct schib *schib)
329 {
330 	return (schib->pmcw.intparm == sch->config.intparm) &&
331 		(schib->pmcw.mbi == sch->config.mbi) &&
332 		(schib->pmcw.isc == sch->config.isc) &&
333 		(schib->pmcw.ena == sch->config.ena) &&
334 		(schib->pmcw.mme == sch->config.mme) &&
335 		(schib->pmcw.mp == sch->config.mp) &&
336 		(schib->pmcw.csense == sch->config.csense) &&
337 		(schib->pmcw.mbfc == sch->config.mbfc) &&
338 		(!sch->config.mbfc || (schib->mba == sch->config.mba));
339 }
340 
341 /*
342  * cio_commit_config - apply configuration to the subchannel
343  */
344 int cio_commit_config(struct subchannel *sch)
345 {
346 	int ccode, retry, ret = 0;
347 	struct schib schib;
348 	struct irb irb;
349 
350 	if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib))
351 		return -ENODEV;
352 
353 	for (retry = 0; retry < 5; retry++) {
354 		/* copy desired changes to local schib */
355 		cio_apply_config(sch, &schib);
356 		ccode = msch_err(sch->schid, &schib);
357 		if (ccode < 0) /* -EIO if msch gets a program check. */
358 			return ccode;
359 		switch (ccode) {
360 		case 0: /* successful */
361 			if (stsch_err(sch->schid, &schib) ||
362 			    !css_sch_is_valid(&schib))
363 				return -ENODEV;
364 			if (cio_check_config(sch, &schib)) {
365 				/* commit changes from local schib */
366 				memcpy(&sch->schib, &schib, sizeof(schib));
367 				return 0;
368 			}
369 			ret = -EAGAIN;
370 			break;
371 		case 1: /* status pending */
372 			ret = -EBUSY;
373 			if (tsch(sch->schid, &irb))
374 				return ret;
375 			break;
376 		case 2: /* busy */
377 			udelay(100); /* allow for recovery */
378 			ret = -EBUSY;
379 			break;
380 		case 3: /* not operational */
381 			return -ENODEV;
382 		}
383 	}
384 	return ret;
385 }
386 
387 /**
388  * cio_update_schib - Perform stsch and update schib if subchannel is valid.
389  * @sch: subchannel on which to perform stsch
390  * Return zero on success, -ENODEV otherwise.
391  */
392 int cio_update_schib(struct subchannel *sch)
393 {
394 	struct schib schib;
395 
396 	if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib))
397 		return -ENODEV;
398 
399 	memcpy(&sch->schib, &schib, sizeof(schib));
400 	return 0;
401 }
402 EXPORT_SYMBOL_GPL(cio_update_schib);
403 
404 /**
405  * cio_enable_subchannel - enable a subchannel.
406  * @sch: subchannel to be enabled
407  * @intparm: interruption parameter to set
408  */
409 int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
410 {
411 	int ret;
412 
413 	CIO_TRACE_EVENT(2, "ensch");
414 	CIO_TRACE_EVENT(2, dev_name(&sch->dev));
415 
416 	if (sch_is_pseudo_sch(sch))
417 		return -EINVAL;
418 	if (cio_update_schib(sch))
419 		return -ENODEV;
420 
421 	sch->config.ena = 1;
422 	sch->config.isc = sch->isc;
423 	sch->config.intparm = intparm;
424 
425 	ret = cio_commit_config(sch);
426 	if (ret == -EIO) {
427 		/*
428 		 * Got a program check in msch. Try without
429 		 * the concurrent sense bit the next time.
430 		 */
431 		sch->config.csense = 0;
432 		ret = cio_commit_config(sch);
433 	}
434 	CIO_HEX_EVENT(2, &ret, sizeof(ret));
435 	return ret;
436 }
437 EXPORT_SYMBOL_GPL(cio_enable_subchannel);
438 
439 /**
440  * cio_disable_subchannel - disable a subchannel.
441  * @sch: subchannel to disable
442  */
443 int cio_disable_subchannel(struct subchannel *sch)
444 {
445 	int ret;
446 
447 	CIO_TRACE_EVENT(2, "dissch");
448 	CIO_TRACE_EVENT(2, dev_name(&sch->dev));
449 
450 	if (sch_is_pseudo_sch(sch))
451 		return 0;
452 	if (cio_update_schib(sch))
453 		return -ENODEV;
454 
455 	sch->config.ena = 0;
456 	ret = cio_commit_config(sch);
457 
458 	CIO_HEX_EVENT(2, &ret, sizeof(ret));
459 	return ret;
460 }
461 EXPORT_SYMBOL_GPL(cio_disable_subchannel);
462 
463 static int cio_check_devno_blacklisted(struct subchannel *sch)
464 {
465 	if (is_blacklisted(sch->schid.ssid, sch->schib.pmcw.dev)) {
466 		/*
467 		 * This device must not be known to Linux. So we simply
468 		 * say that there is no device and return ENODEV.
469 		 */
470 		CIO_MSG_EVENT(6, "Blacklisted device detected "
471 			      "at devno %04X, subchannel set %x\n",
472 			      sch->schib.pmcw.dev, sch->schid.ssid);
473 		return -ENODEV;
474 	}
475 	return 0;
476 }
477 
478 static int cio_validate_io_subchannel(struct subchannel *sch)
479 {
480 	/* Initialization for io subchannels. */
481 	if (!css_sch_is_valid(&sch->schib))
482 		return -ENODEV;
483 
484 	/* Devno is valid. */
485 	return cio_check_devno_blacklisted(sch);
486 }
487 
488 static int cio_validate_msg_subchannel(struct subchannel *sch)
489 {
490 	/* Initialization for message subchannels. */
491 	if (!css_sch_is_valid(&sch->schib))
492 		return -ENODEV;
493 
494 	/* Devno is valid. */
495 	return cio_check_devno_blacklisted(sch);
496 }
497 
498 /**
499  * cio_validate_subchannel - basic validation of subchannel
500  * @sch: subchannel structure to be filled out
501  * @schid: subchannel id
502  *
503  * Find out subchannel type and initialize struct subchannel.
504  * Return codes:
505  *   0 on success
506  *   -ENXIO for non-defined subchannels
507  *   -ENODEV for invalid subchannels or blacklisted devices
508  *   -EIO for subchannels in an invalid subchannel set
509  */
510 int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid)
511 {
512 	char dbf_txt[15];
513 	int ccode;
514 	int err;
515 
516 	sprintf(dbf_txt, "valsch%x", schid.sch_no);
517 	CIO_TRACE_EVENT(4, dbf_txt);
518 
519 	/*
520 	 * The first subchannel that is not-operational (ccode==3)
521 	 * indicates that there aren't any more devices available.
522 	 * If stsch gets an exception, it means the current subchannel set
523 	 * is not valid.
524 	 */
525 	ccode = stsch_err(schid, &sch->schib);
526 	if (ccode) {
527 		err = (ccode == 3) ? -ENXIO : ccode;
528 		goto out;
529 	}
530 	sch->st = sch->schib.pmcw.st;
531 	sch->schid = schid;
532 
533 	switch (sch->st) {
534 	case SUBCHANNEL_TYPE_IO:
535 		err = cio_validate_io_subchannel(sch);
536 		break;
537 	case SUBCHANNEL_TYPE_MSG:
538 		err = cio_validate_msg_subchannel(sch);
539 		break;
540 	default:
541 		err = 0;
542 	}
543 	if (err)
544 		goto out;
545 
546 	CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n",
547 		      sch->schid.ssid, sch->schid.sch_no, sch->st);
548 out:
549 	return err;
550 }
551 
552 /*
553  * do_cio_interrupt() handles all normal I/O device IRQ's
554  */
555 static irqreturn_t do_cio_interrupt(int irq, void *dummy)
556 {
557 	struct tpi_info *tpi_info;
558 	struct subchannel *sch;
559 	struct irb *irb;
560 
561 	__this_cpu_write(s390_idle.nohz_delay, 1);
562 	tpi_info = (struct tpi_info *) &get_irq_regs()->int_code;
563 	irb = (struct irb *) &S390_lowcore.irb;
564 	sch = (struct subchannel *)(unsigned long) tpi_info->intparm;
565 	if (!sch) {
566 		/* Clear pending interrupt condition. */
567 		inc_irq_stat(IRQIO_CIO);
568 		tsch(tpi_info->schid, irb);
569 		return IRQ_HANDLED;
570 	}
571 	spin_lock(sch->lock);
572 	/* Store interrupt response block to lowcore. */
573 	if (tsch(tpi_info->schid, irb) == 0) {
574 		/* Keep subchannel information word up to date. */
575 		memcpy (&sch->schib.scsw, &irb->scsw, sizeof (irb->scsw));
576 		/* Call interrupt handler if there is one. */
577 		if (sch->driver && sch->driver->irq)
578 			sch->driver->irq(sch);
579 		else
580 			inc_irq_stat(IRQIO_CIO);
581 	} else
582 		inc_irq_stat(IRQIO_CIO);
583 	spin_unlock(sch->lock);
584 
585 	return IRQ_HANDLED;
586 }
587 
588 static struct irqaction io_interrupt = {
589 	.name	 = "IO",
590 	.handler = do_cio_interrupt,
591 };
592 
593 void __init init_cio_interrupts(void)
594 {
595 	irq_set_chip_and_handler(IO_INTERRUPT,
596 				 &dummy_irq_chip, handle_percpu_irq);
597 	setup_irq(IO_INTERRUPT, &io_interrupt);
598 }
599 
600 #ifdef CONFIG_CCW_CONSOLE
601 static struct subchannel *console_sch;
602 
603 /*
604  * Use cio_tsch to update the subchannel status and call the interrupt handler
605  * if status had been pending. Called with the subchannel's lock held.
606  */
607 void cio_tsch(struct subchannel *sch)
608 {
609 	struct irb *irb;
610 	int irq_context;
611 
612 	irb = (struct irb *)&S390_lowcore.irb;
613 	/* Store interrupt response block to lowcore. */
614 	if (tsch(sch->schid, irb) != 0)
615 		/* Not status pending or not operational. */
616 		return;
617 	memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw));
618 	/* Call interrupt handler with updated status. */
619 	irq_context = in_interrupt();
620 	if (!irq_context) {
621 		local_bh_disable();
622 		irq_enter();
623 	}
624 	kstat_incr_irq_this_cpu(IO_INTERRUPT);
625 	if (sch->driver && sch->driver->irq)
626 		sch->driver->irq(sch);
627 	else
628 		inc_irq_stat(IRQIO_CIO);
629 	if (!irq_context) {
630 		irq_exit();
631 		_local_bh_enable();
632 	}
633 }
634 
635 static int cio_test_for_console(struct subchannel_id schid, void *data)
636 {
637 	struct schib schib;
638 
639 	if (stsch_err(schid, &schib) != 0)
640 		return -ENXIO;
641 	if ((schib.pmcw.st == SUBCHANNEL_TYPE_IO) && schib.pmcw.dnv &&
642 	    (schib.pmcw.dev == console_devno)) {
643 		console_irq = schid.sch_no;
644 		return 1; /* found */
645 	}
646 	return 0;
647 }
648 
649 static int cio_get_console_sch_no(void)
650 {
651 	struct subchannel_id schid;
652 	struct schib schib;
653 
654 	init_subchannel_id(&schid);
655 	if (console_irq != -1) {
656 		/* VM provided us with the irq number of the console. */
657 		schid.sch_no = console_irq;
658 		if (stsch_err(schid, &schib) != 0 ||
659 		    (schib.pmcw.st != SUBCHANNEL_TYPE_IO) || !schib.pmcw.dnv)
660 			return -1;
661 		console_devno = schib.pmcw.dev;
662 	} else if (console_devno != -1) {
663 		/* At least the console device number is known. */
664 		for_each_subchannel(cio_test_for_console, NULL);
665 	}
666 	return console_irq;
667 }
668 
669 struct subchannel *cio_probe_console(void)
670 {
671 	struct subchannel_id schid;
672 	struct subchannel *sch;
673 	int sch_no, ret;
674 
675 	sch_no = cio_get_console_sch_no();
676 	if (sch_no == -1) {
677 		pr_warning("No CCW console was found\n");
678 		return ERR_PTR(-ENODEV);
679 	}
680 	init_subchannel_id(&schid);
681 	schid.sch_no = sch_no;
682 	sch = css_alloc_subchannel(schid);
683 	if (IS_ERR(sch))
684 		return sch;
685 
686 	isc_register(CONSOLE_ISC);
687 	sch->config.isc = CONSOLE_ISC;
688 	sch->config.intparm = (u32)(addr_t)sch;
689 	ret = cio_commit_config(sch);
690 	if (ret) {
691 		isc_unregister(CONSOLE_ISC);
692 		put_device(&sch->dev);
693 		return ERR_PTR(ret);
694 	}
695 	console_sch = sch;
696 	return sch;
697 }
698 
699 int cio_is_console(struct subchannel_id schid)
700 {
701 	if (!console_sch)
702 		return 0;
703 	return schid_equal(&schid, &console_sch->schid);
704 }
705 
706 void cio_register_early_subchannels(void)
707 {
708 	int ret;
709 
710 	if (!console_sch)
711 		return;
712 
713 	ret = css_register_subchannel(console_sch);
714 	if (ret)
715 		put_device(&console_sch->dev);
716 }
717 #endif /* CONFIG_CCW_CONSOLE */
718 
719 static int
720 __disable_subchannel_easy(struct subchannel_id schid, struct schib *schib)
721 {
722 	int retry, cc;
723 
724 	cc = 0;
725 	for (retry=0;retry<3;retry++) {
726 		schib->pmcw.ena = 0;
727 		cc = msch_err(schid, schib);
728 		if (cc)
729 			return (cc==3?-ENODEV:-EBUSY);
730 		if (stsch_err(schid, schib) || !css_sch_is_valid(schib))
731 			return -ENODEV;
732 		if (!schib->pmcw.ena)
733 			return 0;
734 	}
735 	return -EBUSY; /* uhm... */
736 }
737 
738 static int
739 __clear_io_subchannel_easy(struct subchannel_id schid)
740 {
741 	int retry;
742 
743 	if (csch(schid))
744 		return -ENODEV;
745 	for (retry=0;retry<20;retry++) {
746 		struct tpi_info ti;
747 
748 		if (tpi(&ti)) {
749 			tsch(ti.schid, (struct irb *)&S390_lowcore.irb);
750 			if (schid_equal(&ti.schid, &schid))
751 				return 0;
752 		}
753 		udelay_simple(100);
754 	}
755 	return -EBUSY;
756 }
757 
758 static void __clear_chsc_subchannel_easy(void)
759 {
760 	/* It seems we can only wait for a bit here :/ */
761 	udelay_simple(100);
762 }
763 
764 static int pgm_check_occured;
765 
766 static void cio_reset_pgm_check_handler(void)
767 {
768 	pgm_check_occured = 1;
769 }
770 
771 static int stsch_reset(struct subchannel_id schid, struct schib *addr)
772 {
773 	int rc;
774 
775 	pgm_check_occured = 0;
776 	s390_base_pgm_handler_fn = cio_reset_pgm_check_handler;
777 	rc = stsch_err(schid, addr);
778 	s390_base_pgm_handler_fn = NULL;
779 
780 	/* The program check handler could have changed pgm_check_occured. */
781 	barrier();
782 
783 	if (pgm_check_occured)
784 		return -EIO;
785 	else
786 		return rc;
787 }
788 
789 static int __shutdown_subchannel_easy(struct subchannel_id schid, void *data)
790 {
791 	struct schib schib;
792 
793 	if (stsch_reset(schid, &schib))
794 		return -ENXIO;
795 	if (!schib.pmcw.ena)
796 		return 0;
797 	switch(__disable_subchannel_easy(schid, &schib)) {
798 	case 0:
799 	case -ENODEV:
800 		break;
801 	default: /* -EBUSY */
802 		switch (schib.pmcw.st) {
803 		case SUBCHANNEL_TYPE_IO:
804 			if (__clear_io_subchannel_easy(schid))
805 				goto out; /* give up... */
806 			break;
807 		case SUBCHANNEL_TYPE_CHSC:
808 			__clear_chsc_subchannel_easy();
809 			break;
810 		default:
811 			/* No default clear strategy */
812 			break;
813 		}
814 		stsch_err(schid, &schib);
815 		__disable_subchannel_easy(schid, &schib);
816 	}
817 out:
818 	return 0;
819 }
820 
821 static atomic_t chpid_reset_count;
822 
823 static void s390_reset_chpids_mcck_handler(void)
824 {
825 	struct crw crw;
826 	struct mci *mci;
827 
828 	/* Check for pending channel report word. */
829 	mci = (struct mci *)&S390_lowcore.mcck_interruption_code;
830 	if (!mci->cp)
831 		return;
832 	/* Process channel report words. */
833 	while (stcrw(&crw) == 0) {
834 		/* Check for responses to RCHP. */
835 		if (crw.slct && crw.rsc == CRW_RSC_CPATH)
836 			atomic_dec(&chpid_reset_count);
837 	}
838 }
839 
840 #define RCHP_TIMEOUT (30 * USEC_PER_SEC)
841 static void css_reset(void)
842 {
843 	int i, ret;
844 	unsigned long long timeout;
845 	struct chp_id chpid;
846 
847 	/* Reset subchannels. */
848 	for_each_subchannel(__shutdown_subchannel_easy,  NULL);
849 	/* Reset channel paths. */
850 	s390_base_mcck_handler_fn = s390_reset_chpids_mcck_handler;
851 	/* Enable channel report machine checks. */
852 	__ctl_set_bit(14, 28);
853 	/* Temporarily reenable machine checks. */
854 	local_mcck_enable();
855 	chp_id_init(&chpid);
856 	for (i = 0; i <= __MAX_CHPID; i++) {
857 		chpid.id = i;
858 		ret = rchp(chpid);
859 		if ((ret == 0) || (ret == 2))
860 			/*
861 			 * rchp either succeeded, or another rchp is already
862 			 * in progress. In either case, we'll get a crw.
863 			 */
864 			atomic_inc(&chpid_reset_count);
865 	}
866 	/* Wait for machine check for all channel paths. */
867 	timeout = get_tod_clock_fast() + (RCHP_TIMEOUT << 12);
868 	while (atomic_read(&chpid_reset_count) != 0) {
869 		if (get_tod_clock_fast() > timeout)
870 			break;
871 		cpu_relax();
872 	}
873 	/* Disable machine checks again. */
874 	local_mcck_disable();
875 	/* Disable channel report machine checks. */
876 	__ctl_clear_bit(14, 28);
877 	s390_base_mcck_handler_fn = NULL;
878 }
879 
880 static struct reset_call css_reset_call = {
881 	.fn = css_reset,
882 };
883 
884 static int __init init_css_reset_call(void)
885 {
886 	atomic_set(&chpid_reset_count, 0);
887 	register_reset_call(&css_reset_call);
888 	return 0;
889 }
890 
891 arch_initcall(init_css_reset_call);
892 
893 struct sch_match_id {
894 	struct subchannel_id schid;
895 	struct ccw_dev_id devid;
896 	int rc;
897 };
898 
899 static int __reipl_subchannel_match(struct subchannel_id schid, void *data)
900 {
901 	struct schib schib;
902 	struct sch_match_id *match_id = data;
903 
904 	if (stsch_reset(schid, &schib))
905 		return -ENXIO;
906 	if ((schib.pmcw.st == SUBCHANNEL_TYPE_IO) && schib.pmcw.dnv &&
907 	    (schib.pmcw.dev == match_id->devid.devno) &&
908 	    (schid.ssid == match_id->devid.ssid)) {
909 		match_id->schid = schid;
910 		match_id->rc = 0;
911 		return 1;
912 	}
913 	return 0;
914 }
915 
916 static int reipl_find_schid(struct ccw_dev_id *devid,
917 			    struct subchannel_id *schid)
918 {
919 	struct sch_match_id match_id;
920 
921 	match_id.devid = *devid;
922 	match_id.rc = -ENODEV;
923 	for_each_subchannel(__reipl_subchannel_match, &match_id);
924 	if (match_id.rc == 0)
925 		*schid = match_id.schid;
926 	return match_id.rc;
927 }
928 
929 extern void do_reipl_asm(__u32 schid);
930 
931 /* Make sure all subchannels are quiet before we re-ipl an lpar. */
932 void reipl_ccw_dev(struct ccw_dev_id *devid)
933 {
934 	struct subchannel_id uninitialized_var(schid);
935 
936 	s390_reset_system(NULL, NULL);
937 	if (reipl_find_schid(devid, &schid) != 0)
938 		panic("IPL Device not found\n");
939 	do_reipl_asm(*((__u32*)&schid));
940 }
941 
942 int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo)
943 {
944 	struct subchannel_id schid;
945 	struct schib schib;
946 
947 	schid = *(struct subchannel_id *)&S390_lowcore.subchannel_id;
948 	if (!schid.one)
949 		return -ENODEV;
950 	if (stsch_err(schid, &schib))
951 		return -ENODEV;
952 	if (schib.pmcw.st != SUBCHANNEL_TYPE_IO)
953 		return -ENODEV;
954 	if (!schib.pmcw.dnv)
955 		return -ENODEV;
956 	iplinfo->devno = schib.pmcw.dev;
957 	iplinfo->is_qdio = schib.pmcw.qf;
958 	return 0;
959 }
960 
961 /**
962  * cio_tm_start_key - perform start function
963  * @sch: subchannel on which to perform the start function
964  * @tcw: transport-command word to be started
965  * @lpm: mask of paths to use
966  * @key: storage key to use for storage access
967  *
968  * Start the tcw on the given subchannel. Return zero on success, non-zero
969  * otherwise.
970  */
971 int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key)
972 {
973 	int cc;
974 	union orb *orb = &to_io_private(sch)->orb;
975 
976 	memset(orb, 0, sizeof(union orb));
977 	orb->tm.intparm = (u32) (addr_t) sch;
978 	orb->tm.key = key >> 4;
979 	orb->tm.b = 1;
980 	orb->tm.lpm = lpm ? lpm : sch->lpm;
981 	orb->tm.tcw = (u32) (addr_t) tcw;
982 	cc = ssch(sch->schid, orb);
983 	switch (cc) {
984 	case 0:
985 		return 0;
986 	case 1:
987 	case 2:
988 		return -EBUSY;
989 	default:
990 		return cio_start_handle_notoper(sch, lpm);
991 	}
992 }
993 
994 /**
995  * cio_tm_intrg - perform interrogate function
996  * @sch - subchannel on which to perform the interrogate function
997  *
998  * If the specified subchannel is running in transport-mode, perform the
999  * interrogate function. Return zero on success, non-zero otherwie.
1000  */
1001 int cio_tm_intrg(struct subchannel *sch)
1002 {
1003 	int cc;
1004 
1005 	if (!to_io_private(sch)->orb.tm.b)
1006 		return -EINVAL;
1007 	cc = xsch(sch->schid);
1008 	switch (cc) {
1009 	case 0:
1010 	case 2:
1011 		return 0;
1012 	case 1:
1013 		return -EBUSY;
1014 	default:
1015 		return -ENODEV;
1016 	}
1017 }
1018