xref: /openbmc/linux/drivers/s390/cio/cio.c (revision 23d805b647db6c2063a13089497615efa9deacdd)
1 /*
2  *  drivers/s390/cio/cio.c
3  *   S/390 common I/O routines -- low level i/o calls
4  *
5  *    Copyright IBM Corp. 1999,2008
6  *    Author(s): Ingo Adlung (adlung@de.ibm.com)
7  *		 Cornelia Huck (cornelia.huck@de.ibm.com)
8  *		 Arnd Bergmann (arndb@de.ibm.com)
9  *		 Martin Schwidefsky (schwidefsky@de.ibm.com)
10  */
11 
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/slab.h>
15 #include <linux/device.h>
16 #include <linux/kernel_stat.h>
17 #include <linux/interrupt.h>
18 #include <asm/cio.h>
19 #include <asm/delay.h>
20 #include <asm/irq.h>
21 #include <asm/irq_regs.h>
22 #include <asm/setup.h>
23 #include <asm/reset.h>
24 #include <asm/ipl.h>
25 #include <asm/chpid.h>
26 #include <asm/airq.h>
27 #include <asm/cpu.h>
28 #include "cio.h"
29 #include "css.h"
30 #include "chsc.h"
31 #include "ioasm.h"
32 #include "io_sch.h"
33 #include "blacklist.h"
34 #include "cio_debug.h"
35 #include "chp.h"
36 #include "../s390mach.h"
37 
38 debug_info_t *cio_debug_msg_id;
39 debug_info_t *cio_debug_trace_id;
40 debug_info_t *cio_debug_crw_id;
41 
42 /*
43  * Function: cio_debug_init
44  * Initializes three debug logs for common I/O:
45  * - cio_msg logs generic cio messages
46  * - cio_trace logs the calling of different functions
47  * - cio_crw logs machine check related cio messages
48  */
49 static int __init cio_debug_init(void)
50 {
51 	cio_debug_msg_id = debug_register("cio_msg", 16, 1, 16 * sizeof(long));
52 	if (!cio_debug_msg_id)
53 		goto out_unregister;
54 	debug_register_view(cio_debug_msg_id, &debug_sprintf_view);
55 	debug_set_level(cio_debug_msg_id, 2);
56 	cio_debug_trace_id = debug_register("cio_trace", 16, 1, 16);
57 	if (!cio_debug_trace_id)
58 		goto out_unregister;
59 	debug_register_view(cio_debug_trace_id, &debug_hex_ascii_view);
60 	debug_set_level(cio_debug_trace_id, 2);
61 	cio_debug_crw_id = debug_register("cio_crw", 16, 1, 16 * sizeof(long));
62 	if (!cio_debug_crw_id)
63 		goto out_unregister;
64 	debug_register_view(cio_debug_crw_id, &debug_sprintf_view);
65 	debug_set_level(cio_debug_crw_id, 4);
66 	return 0;
67 
68 out_unregister:
69 	if (cio_debug_msg_id)
70 		debug_unregister(cio_debug_msg_id);
71 	if (cio_debug_trace_id)
72 		debug_unregister(cio_debug_trace_id);
73 	if (cio_debug_crw_id)
74 		debug_unregister(cio_debug_crw_id);
75 	printk(KERN_WARNING"cio: could not initialize debugging\n");
76 	return -1;
77 }
78 
79 arch_initcall (cio_debug_init);
80 
81 int
82 cio_set_options (struct subchannel *sch, int flags)
83 {
84        sch->options.suspend = (flags & DOIO_ALLOW_SUSPEND) != 0;
85        sch->options.prefetch = (flags & DOIO_DENY_PREFETCH) != 0;
86        sch->options.inter = (flags & DOIO_SUPPRESS_INTER) != 0;
87        return 0;
88 }
89 
90 /* FIXME: who wants to use this? */
91 int
92 cio_get_options (struct subchannel *sch)
93 {
94        int flags;
95 
96        flags = 0;
97        if (sch->options.suspend)
98 		flags |= DOIO_ALLOW_SUSPEND;
99        if (sch->options.prefetch)
100 		flags |= DOIO_DENY_PREFETCH;
101        if (sch->options.inter)
102 		flags |= DOIO_SUPPRESS_INTER;
103        return flags;
104 }
105 
106 /*
107  * Use tpi to get a pending interrupt, call the interrupt handler and
108  * return a pointer to the subchannel structure.
109  */
110 static int
111 cio_tpi(void)
112 {
113 	struct tpi_info *tpi_info;
114 	struct subchannel *sch;
115 	struct irb *irb;
116 
117 	tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID;
118 	if (tpi (NULL) != 1)
119 		return 0;
120 	irb = (struct irb *) __LC_IRB;
121 	/* Store interrupt response block to lowcore. */
122 	if (tsch (tpi_info->schid, irb) != 0)
123 		/* Not status pending or not operational. */
124 		return 1;
125 	sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
126 	if (!sch)
127 		return 1;
128 	local_bh_disable();
129 	irq_enter ();
130 	spin_lock(sch->lock);
131 	memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw));
132 	if (sch->driver && sch->driver->irq)
133 		sch->driver->irq(sch);
134 	spin_unlock(sch->lock);
135 	irq_exit ();
136 	_local_bh_enable();
137 	return 1;
138 }
139 
140 static int
141 cio_start_handle_notoper(struct subchannel *sch, __u8 lpm)
142 {
143 	char dbf_text[15];
144 
145 	if (lpm != 0)
146 		sch->lpm &= ~lpm;
147 	else
148 		sch->lpm = 0;
149 
150 	stsch (sch->schid, &sch->schib);
151 
152 	CIO_MSG_EVENT(2, "cio_start: 'not oper' status for "
153 		      "subchannel 0.%x.%04x!\n", sch->schid.ssid,
154 		      sch->schid.sch_no);
155 	sprintf(dbf_text, "no%s", sch->dev.bus_id);
156 	CIO_TRACE_EVENT(0, dbf_text);
157 	CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib));
158 
159 	return (sch->lpm ? -EACCES : -ENODEV);
160 }
161 
162 int
163 cio_start_key (struct subchannel *sch,	/* subchannel structure */
164 	       struct ccw1 * cpa,	/* logical channel prog addr */
165 	       __u8 lpm,		/* logical path mask */
166 	       __u8 key)                /* storage key */
167 {
168 	char dbf_txt[15];
169 	int ccode;
170 	struct orb *orb;
171 
172 	CIO_TRACE_EVENT(4, "stIO");
173 	CIO_TRACE_EVENT(4, sch->dev.bus_id);
174 
175 	orb = &to_io_private(sch)->orb;
176 	/* sch is always under 2G. */
177 	orb->intparm = (u32)(addr_t)sch;
178 	orb->fmt = 1;
179 
180 	orb->pfch = sch->options.prefetch == 0;
181 	orb->spnd = sch->options.suspend;
182 	orb->ssic = sch->options.suspend && sch->options.inter;
183 	orb->lpm = (lpm != 0) ? lpm : sch->lpm;
184 #ifdef CONFIG_64BIT
185 	/*
186 	 * for 64 bit we always support 64 bit IDAWs with 4k page size only
187 	 */
188 	orb->c64 = 1;
189 	orb->i2k = 0;
190 #endif
191 	orb->key = key >> 4;
192 	/* issue "Start Subchannel" */
193 	orb->cpa = (__u32) __pa(cpa);
194 	ccode = ssch(sch->schid, orb);
195 
196 	/* process condition code */
197 	sprintf(dbf_txt, "ccode:%d", ccode);
198 	CIO_TRACE_EVENT(4, dbf_txt);
199 
200 	switch (ccode) {
201 	case 0:
202 		/*
203 		 * initialize device status information
204 		 */
205 		sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
206 		return 0;
207 	case 1:		/* status pending */
208 	case 2:		/* busy */
209 		return -EBUSY;
210 	default:		/* device/path not operational */
211 		return cio_start_handle_notoper(sch, lpm);
212 	}
213 }
214 
215 int
216 cio_start (struct subchannel *sch, struct ccw1 *cpa, __u8 lpm)
217 {
218 	return cio_start_key(sch, cpa, lpm, PAGE_DEFAULT_KEY);
219 }
220 
221 /*
222  * resume suspended I/O operation
223  */
224 int
225 cio_resume (struct subchannel *sch)
226 {
227 	char dbf_txt[15];
228 	int ccode;
229 
230 	CIO_TRACE_EVENT (4, "resIO");
231 	CIO_TRACE_EVENT (4, sch->dev.bus_id);
232 
233 	ccode = rsch (sch->schid);
234 
235 	sprintf (dbf_txt, "ccode:%d", ccode);
236 	CIO_TRACE_EVENT (4, dbf_txt);
237 
238 	switch (ccode) {
239 	case 0:
240 		sch->schib.scsw.cmd.actl |= SCSW_ACTL_RESUME_PEND;
241 		return 0;
242 	case 1:
243 		return -EBUSY;
244 	case 2:
245 		return -EINVAL;
246 	default:
247 		/*
248 		 * useless to wait for request completion
249 		 *  as device is no longer operational !
250 		 */
251 		return -ENODEV;
252 	}
253 }
254 
255 /*
256  * halt I/O operation
257  */
258 int
259 cio_halt(struct subchannel *sch)
260 {
261 	char dbf_txt[15];
262 	int ccode;
263 
264 	if (!sch)
265 		return -ENODEV;
266 
267 	CIO_TRACE_EVENT (2, "haltIO");
268 	CIO_TRACE_EVENT (2, sch->dev.bus_id);
269 
270 	/*
271 	 * Issue "Halt subchannel" and process condition code
272 	 */
273 	ccode = hsch (sch->schid);
274 
275 	sprintf (dbf_txt, "ccode:%d", ccode);
276 	CIO_TRACE_EVENT (2, dbf_txt);
277 
278 	switch (ccode) {
279 	case 0:
280 		sch->schib.scsw.cmd.actl |= SCSW_ACTL_HALT_PEND;
281 		return 0;
282 	case 1:		/* status pending */
283 	case 2:		/* busy */
284 		return -EBUSY;
285 	default:		/* device not operational */
286 		return -ENODEV;
287 	}
288 }
289 
290 /*
291  * Clear I/O operation
292  */
293 int
294 cio_clear(struct subchannel *sch)
295 {
296 	char dbf_txt[15];
297 	int ccode;
298 
299 	if (!sch)
300 		return -ENODEV;
301 
302 	CIO_TRACE_EVENT (2, "clearIO");
303 	CIO_TRACE_EVENT (2, sch->dev.bus_id);
304 
305 	/*
306 	 * Issue "Clear subchannel" and process condition code
307 	 */
308 	ccode = csch (sch->schid);
309 
310 	sprintf (dbf_txt, "ccode:%d", ccode);
311 	CIO_TRACE_EVENT (2, dbf_txt);
312 
313 	switch (ccode) {
314 	case 0:
315 		sch->schib.scsw.cmd.actl |= SCSW_ACTL_CLEAR_PEND;
316 		return 0;
317 	default:		/* device not operational */
318 		return -ENODEV;
319 	}
320 }
321 
322 /*
323  * Function: cio_cancel
324  * Issues a "Cancel Subchannel" on the specified subchannel
325  * Note: We don't need any fancy intparms and flags here
326  *	 since xsch is executed synchronously.
327  * Only for common I/O internal use as for now.
328  */
329 int
330 cio_cancel (struct subchannel *sch)
331 {
332 	char dbf_txt[15];
333 	int ccode;
334 
335 	if (!sch)
336 		return -ENODEV;
337 
338 	CIO_TRACE_EVENT (2, "cancelIO");
339 	CIO_TRACE_EVENT (2, sch->dev.bus_id);
340 
341 	ccode = xsch (sch->schid);
342 
343 	sprintf (dbf_txt, "ccode:%d", ccode);
344 	CIO_TRACE_EVENT (2, dbf_txt);
345 
346 	switch (ccode) {
347 	case 0:		/* success */
348 		/* Update information in scsw. */
349 		stsch (sch->schid, &sch->schib);
350 		return 0;
351 	case 1:		/* status pending */
352 		return -EBUSY;
353 	case 2:		/* not applicable */
354 		return -EINVAL;
355 	default:	/* not oper */
356 		return -ENODEV;
357 	}
358 }
359 
360 /*
361  * Function: cio_modify
362  * Issues a "Modify Subchannel" on the specified subchannel
363  */
364 int
365 cio_modify (struct subchannel *sch)
366 {
367 	int ccode, retry, ret;
368 
369 	ret = 0;
370 	for (retry = 0; retry < 5; retry++) {
371 		ccode = msch_err (sch->schid, &sch->schib);
372 		if (ccode < 0)	/* -EIO if msch gets a program check. */
373 			return ccode;
374 		switch (ccode) {
375 		case 0: /* successfull */
376 			return 0;
377 		case 1:	/* status pending */
378 			return -EBUSY;
379 		case 2:	/* busy */
380 			udelay (100);	/* allow for recovery */
381 			ret = -EBUSY;
382 			break;
383 		case 3:	/* not operational */
384 			return -ENODEV;
385 		}
386 	}
387 	return ret;
388 }
389 
390 /**
391  * cio_enable_subchannel - enable a subchannel.
392  * @sch: subchannel to be enabled
393  * @intparm: interruption parameter to set
394  */
395 int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
396 {
397 	char dbf_txt[15];
398 	int ccode;
399 	int retry;
400 	int ret;
401 
402 	CIO_TRACE_EVENT (2, "ensch");
403 	CIO_TRACE_EVENT (2, sch->dev.bus_id);
404 
405 	if (sch_is_pseudo_sch(sch))
406 		return -EINVAL;
407 	ccode = stsch (sch->schid, &sch->schib);
408 	if (ccode)
409 		return -ENODEV;
410 
411 	for (retry = 5, ret = 0; retry > 0; retry--) {
412 		sch->schib.pmcw.ena = 1;
413 		sch->schib.pmcw.isc = sch->isc;
414 		sch->schib.pmcw.intparm = intparm;
415 		ret = cio_modify(sch);
416 		if (ret == -ENODEV)
417 			break;
418 		if (ret == -EIO)
419 			/*
420 			 * Got a program check in cio_modify. Try without
421 			 * the concurrent sense bit the next time.
422 			 */
423 			sch->schib.pmcw.csense = 0;
424 		if (ret == 0) {
425 			stsch (sch->schid, &sch->schib);
426 			if (sch->schib.pmcw.ena)
427 				break;
428 		}
429 		if (ret == -EBUSY) {
430 			struct irb irb;
431 			if (tsch(sch->schid, &irb) != 0)
432 				break;
433 		}
434 	}
435 	sprintf (dbf_txt, "ret:%d", ret);
436 	CIO_TRACE_EVENT (2, dbf_txt);
437 	return ret;
438 }
439 EXPORT_SYMBOL_GPL(cio_enable_subchannel);
440 
441 /**
442  * cio_disable_subchannel - disable a subchannel.
443  * @sch: subchannel to disable
444  */
445 int cio_disable_subchannel(struct subchannel *sch)
446 {
447 	char dbf_txt[15];
448 	int ccode;
449 	int retry;
450 	int ret;
451 
452 	CIO_TRACE_EVENT (2, "dissch");
453 	CIO_TRACE_EVENT (2, sch->dev.bus_id);
454 
455 	if (sch_is_pseudo_sch(sch))
456 		return 0;
457 	ccode = stsch (sch->schid, &sch->schib);
458 	if (ccode == 3)		/* Not operational. */
459 		return -ENODEV;
460 
461 	if (scsw_actl(&sch->schib.scsw) != 0)
462 		/*
463 		 * the disable function must not be called while there are
464 		 *  requests pending for completion !
465 		 */
466 		return -EBUSY;
467 
468 	for (retry = 5, ret = 0; retry > 0; retry--) {
469 		sch->schib.pmcw.ena = 0;
470 		ret = cio_modify(sch);
471 		if (ret == -ENODEV)
472 			break;
473 		if (ret == -EBUSY)
474 			/*
475 			 * The subchannel is busy or status pending.
476 			 * We'll disable when the next interrupt was delivered
477 			 * via the state machine.
478 			 */
479 			break;
480 		if (ret == 0) {
481 			stsch (sch->schid, &sch->schib);
482 			if (!sch->schib.pmcw.ena)
483 				break;
484 		}
485 	}
486 	sprintf (dbf_txt, "ret:%d", ret);
487 	CIO_TRACE_EVENT (2, dbf_txt);
488 	return ret;
489 }
490 EXPORT_SYMBOL_GPL(cio_disable_subchannel);
491 
492 int cio_create_sch_lock(struct subchannel *sch)
493 {
494 	sch->lock = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
495 	if (!sch->lock)
496 		return -ENOMEM;
497 	spin_lock_init(sch->lock);
498 	return 0;
499 }
500 
501 static int cio_check_devno_blacklisted(struct subchannel *sch)
502 {
503 	if (is_blacklisted(sch->schid.ssid, sch->schib.pmcw.dev)) {
504 		/*
505 		 * This device must not be known to Linux. So we simply
506 		 * say that there is no device and return ENODEV.
507 		 */
508 		CIO_MSG_EVENT(6, "Blacklisted device detected "
509 			      "at devno %04X, subchannel set %x\n",
510 			      sch->schib.pmcw.dev, sch->schid.ssid);
511 		return -ENODEV;
512 	}
513 	return 0;
514 }
515 
516 static int cio_validate_io_subchannel(struct subchannel *sch)
517 {
518 	/* Initialization for io subchannels. */
519 	if (!css_sch_is_valid(&sch->schib))
520 		return -ENODEV;
521 
522 	/* Devno is valid. */
523 	return cio_check_devno_blacklisted(sch);
524 }
525 
526 static int cio_validate_msg_subchannel(struct subchannel *sch)
527 {
528 	/* Initialization for message subchannels. */
529 	if (!css_sch_is_valid(&sch->schib))
530 		return -ENODEV;
531 
532 	/* Devno is valid. */
533 	return cio_check_devno_blacklisted(sch);
534 }
535 
536 /**
537  * cio_validate_subchannel - basic validation of subchannel
538  * @sch: subchannel structure to be filled out
539  * @schid: subchannel id
540  *
541  * Find out subchannel type and initialize struct subchannel.
542  * Return codes:
543  *   0 on success
544  *   -ENXIO for non-defined subchannels
545  *   -ENODEV for invalid subchannels or blacklisted devices
546  *   -EIO for subchannels in an invalid subchannel set
547  */
548 int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid)
549 {
550 	char dbf_txt[15];
551 	int ccode;
552 	int err;
553 
554 	sprintf(dbf_txt, "valsch%x", schid.sch_no);
555 	CIO_TRACE_EVENT(4, dbf_txt);
556 
557 	/* Nuke all fields. */
558 	memset(sch, 0, sizeof(struct subchannel));
559 
560 	sch->schid = schid;
561 	if (cio_is_console(schid)) {
562 		sch->lock = cio_get_console_lock();
563 	} else {
564 		err = cio_create_sch_lock(sch);
565 		if (err)
566 			goto out;
567 	}
568 	mutex_init(&sch->reg_mutex);
569 	/* Set a name for the subchannel */
570 	snprintf (sch->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x", schid.ssid,
571 		  schid.sch_no);
572 
573 	/*
574 	 * The first subchannel that is not-operational (ccode==3)
575 	 *  indicates that there aren't any more devices available.
576 	 * If stsch gets an exception, it means the current subchannel set
577 	 *  is not valid.
578 	 */
579 	ccode = stsch_err (schid, &sch->schib);
580 	if (ccode) {
581 		err = (ccode == 3) ? -ENXIO : ccode;
582 		goto out;
583 	}
584 	/* Copy subchannel type from path management control word. */
585 	sch->st = sch->schib.pmcw.st;
586 
587 	switch (sch->st) {
588 	case SUBCHANNEL_TYPE_IO:
589 		err = cio_validate_io_subchannel(sch);
590 		break;
591 	case SUBCHANNEL_TYPE_MSG:
592 		err = cio_validate_msg_subchannel(sch);
593 		break;
594 	default:
595 		err = 0;
596 	}
597 	if (err)
598 		goto out;
599 
600 	CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n",
601 		      sch->schid.ssid, sch->schid.sch_no, sch->st);
602 	return 0;
603 out:
604 	if (!cio_is_console(schid))
605 		kfree(sch->lock);
606 	sch->lock = NULL;
607 	return err;
608 }
609 
610 /*
611  * do_IRQ() handles all normal I/O device IRQ's (the special
612  *	    SMP cross-CPU interrupts have their own specific
613  *	    handlers).
614  *
615  */
616 void
617 do_IRQ (struct pt_regs *regs)
618 {
619 	struct tpi_info *tpi_info;
620 	struct subchannel *sch;
621 	struct irb *irb;
622 	struct pt_regs *old_regs;
623 
624 	old_regs = set_irq_regs(regs);
625 	irq_enter();
626 	s390_idle_check();
627 	if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
628 		/* Serve timer interrupts first. */
629 		clock_comparator_work();
630 	/*
631 	 * Get interrupt information from lowcore
632 	 */
633 	tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID;
634 	irb = (struct irb *) __LC_IRB;
635 	do {
636 		kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++;
637 		/*
638 		 * Non I/O-subchannel thin interrupts are processed differently
639 		 */
640 		if (tpi_info->adapter_IO == 1 &&
641 		    tpi_info->int_type == IO_INTERRUPT_TYPE) {
642 			do_adapter_IO();
643 			continue;
644 		}
645 		sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
646 		if (!sch) {
647 			/* Clear pending interrupt condition. */
648 			tsch(tpi_info->schid, irb);
649 			continue;
650 		}
651 		spin_lock(sch->lock);
652 		/* Store interrupt response block to lowcore. */
653 		if (tsch(tpi_info->schid, irb) == 0) {
654 			/* Keep subchannel information word up to date. */
655 			memcpy (&sch->schib.scsw, &irb->scsw,
656 				sizeof (irb->scsw));
657 			/* Call interrupt handler if there is one. */
658 			if (sch->driver && sch->driver->irq)
659 				sch->driver->irq(sch);
660 		}
661 		spin_unlock(sch->lock);
662 		/*
663 		 * Are more interrupts pending?
664 		 * If so, the tpi instruction will update the lowcore
665 		 * to hold the info for the next interrupt.
666 		 * We don't do this for VM because a tpi drops the cpu
667 		 * out of the sie which costs more cycles than it saves.
668 		 */
669 	} while (!MACHINE_IS_VM && tpi (NULL) != 0);
670 	irq_exit();
671 	set_irq_regs(old_regs);
672 }
673 
674 #ifdef CONFIG_CCW_CONSOLE
675 static struct subchannel console_subchannel;
676 static struct io_subchannel_private console_priv;
677 static int console_subchannel_in_use;
678 
679 void *cio_get_console_priv(void)
680 {
681 	return &console_priv;
682 }
683 
684 /*
685  * busy wait for the next interrupt on the console
686  */
687 void wait_cons_dev(void)
688 	__releases(console_subchannel.lock)
689 	__acquires(console_subchannel.lock)
690 {
691 	unsigned long cr6      __attribute__ ((aligned (8)));
692 	unsigned long save_cr6 __attribute__ ((aligned (8)));
693 
694 	/*
695 	 * before entering the spinlock we may already have
696 	 * processed the interrupt on a different CPU...
697 	 */
698 	if (!console_subchannel_in_use)
699 		return;
700 
701 	/* disable all but isc 1 (console device) */
702 	__ctl_store (save_cr6, 6, 6);
703 	cr6 = 0x40000000;
704 	__ctl_load (cr6, 6, 6);
705 
706 	do {
707 		spin_unlock(console_subchannel.lock);
708 		if (!cio_tpi())
709 			cpu_relax();
710 		spin_lock(console_subchannel.lock);
711 	} while (console_subchannel.schib.scsw.cmd.actl != 0);
712 	/*
713 	 * restore previous isc value
714 	 */
715 	__ctl_load (save_cr6, 6, 6);
716 }
717 
718 static int
719 cio_test_for_console(struct subchannel_id schid, void *data)
720 {
721 	if (stsch_err(schid, &console_subchannel.schib) != 0)
722 		return -ENXIO;
723 	if ((console_subchannel.schib.pmcw.st == SUBCHANNEL_TYPE_IO) &&
724 	    console_subchannel.schib.pmcw.dnv &&
725 	    (console_subchannel.schib.pmcw.dev == console_devno)) {
726 		console_irq = schid.sch_no;
727 		return 1; /* found */
728 	}
729 	return 0;
730 }
731 
732 
733 static int
734 cio_get_console_sch_no(void)
735 {
736 	struct subchannel_id schid;
737 
738 	init_subchannel_id(&schid);
739 	if (console_irq != -1) {
740 		/* VM provided us with the irq number of the console. */
741 		schid.sch_no = console_irq;
742 		if (stsch(schid, &console_subchannel.schib) != 0 ||
743 		    (console_subchannel.schib.pmcw.st != SUBCHANNEL_TYPE_IO) ||
744 		    !console_subchannel.schib.pmcw.dnv)
745 			return -1;
746 		console_devno = console_subchannel.schib.pmcw.dev;
747 	} else if (console_devno != -1) {
748 		/* At least the console device number is known. */
749 		for_each_subchannel(cio_test_for_console, NULL);
750 		if (console_irq == -1)
751 			return -1;
752 	} else {
753 		/* unlike in 2.4, we cannot autoprobe here, since
754 		 * the channel subsystem is not fully initialized.
755 		 * With some luck, the HWC console can take over */
756 		printk(KERN_WARNING "cio: No ccw console found!\n");
757 		return -1;
758 	}
759 	return console_irq;
760 }
761 
762 struct subchannel *
763 cio_probe_console(void)
764 {
765 	int sch_no, ret;
766 	struct subchannel_id schid;
767 
768 	if (xchg(&console_subchannel_in_use, 1) != 0)
769 		return ERR_PTR(-EBUSY);
770 	sch_no = cio_get_console_sch_no();
771 	if (sch_no == -1) {
772 		console_subchannel_in_use = 0;
773 		return ERR_PTR(-ENODEV);
774 	}
775 	memset(&console_subchannel, 0, sizeof(struct subchannel));
776 	init_subchannel_id(&schid);
777 	schid.sch_no = sch_no;
778 	ret = cio_validate_subchannel(&console_subchannel, schid);
779 	if (ret) {
780 		console_subchannel_in_use = 0;
781 		return ERR_PTR(-ENODEV);
782 	}
783 
784 	/*
785 	 * enable console I/O-interrupt subclass 1
786 	 */
787 	ctl_set_bit(6, 30);
788 	console_subchannel.schib.pmcw.isc = 1;
789 	console_subchannel.schib.pmcw.intparm =
790 		(u32)(addr_t)&console_subchannel;
791 	ret = cio_modify(&console_subchannel);
792 	if (ret) {
793 		console_subchannel_in_use = 0;
794 		return ERR_PTR(ret);
795 	}
796 	return &console_subchannel;
797 }
798 
799 void
800 cio_release_console(void)
801 {
802 	console_subchannel.schib.pmcw.intparm = 0;
803 	cio_modify(&console_subchannel);
804 	ctl_clear_bit(6, 30);
805 	console_subchannel_in_use = 0;
806 }
807 
808 /* Bah... hack to catch console special sausages. */
809 int
810 cio_is_console(struct subchannel_id schid)
811 {
812 	if (!console_subchannel_in_use)
813 		return 0;
814 	return schid_equal(&schid, &console_subchannel.schid);
815 }
816 
817 struct subchannel *
818 cio_get_console_subchannel(void)
819 {
820 	if (!console_subchannel_in_use)
821 		return NULL;
822 	return &console_subchannel;
823 }
824 
825 #endif
826 static int
827 __disable_subchannel_easy(struct subchannel_id schid, struct schib *schib)
828 {
829 	int retry, cc;
830 
831 	cc = 0;
832 	for (retry=0;retry<3;retry++) {
833 		schib->pmcw.ena = 0;
834 		cc = msch(schid, schib);
835 		if (cc)
836 			return (cc==3?-ENODEV:-EBUSY);
837 		stsch(schid, schib);
838 		if (!schib->pmcw.ena)
839 			return 0;
840 	}
841 	return -EBUSY; /* uhm... */
842 }
843 
844 /* we can't use the normal udelay here, since it enables external interrupts */
845 
846 static void udelay_reset(unsigned long usecs)
847 {
848 	uint64_t start_cc, end_cc;
849 
850 	asm volatile ("STCK %0" : "=m" (start_cc));
851 	do {
852 		cpu_relax();
853 		asm volatile ("STCK %0" : "=m" (end_cc));
854 	} while (((end_cc - start_cc)/4096) < usecs);
855 }
856 
857 static int
858 __clear_io_subchannel_easy(struct subchannel_id schid)
859 {
860 	int retry;
861 
862 	if (csch(schid))
863 		return -ENODEV;
864 	for (retry=0;retry<20;retry++) {
865 		struct tpi_info ti;
866 
867 		if (tpi(&ti)) {
868 			tsch(ti.schid, (struct irb *)__LC_IRB);
869 			if (schid_equal(&ti.schid, &schid))
870 				return 0;
871 		}
872 		udelay_reset(100);
873 	}
874 	return -EBUSY;
875 }
876 
877 static int pgm_check_occured;
878 
879 static void cio_reset_pgm_check_handler(void)
880 {
881 	pgm_check_occured = 1;
882 }
883 
884 static int stsch_reset(struct subchannel_id schid, volatile struct schib *addr)
885 {
886 	int rc;
887 
888 	pgm_check_occured = 0;
889 	s390_base_pgm_handler_fn = cio_reset_pgm_check_handler;
890 	rc = stsch(schid, addr);
891 	s390_base_pgm_handler_fn = NULL;
892 
893 	/* The program check handler could have changed pgm_check_occured. */
894 	barrier();
895 
896 	if (pgm_check_occured)
897 		return -EIO;
898 	else
899 		return rc;
900 }
901 
902 static int __shutdown_subchannel_easy(struct subchannel_id schid, void *data)
903 {
904 	struct schib schib;
905 
906 	if (stsch_reset(schid, &schib))
907 		return -ENXIO;
908 	if (!schib.pmcw.ena)
909 		return 0;
910 	switch(__disable_subchannel_easy(schid, &schib)) {
911 	case 0:
912 	case -ENODEV:
913 		break;
914 	default: /* -EBUSY */
915 		switch (schib.pmcw.st) {
916 		case SUBCHANNEL_TYPE_IO:
917 			if (__clear_io_subchannel_easy(schid))
918 				goto out; /* give up... */
919 			break;
920 		default:
921 			/* No default clear strategy */
922 			break;
923 		}
924 		stsch(schid, &schib);
925 		__disable_subchannel_easy(schid, &schib);
926 	}
927 out:
928 	return 0;
929 }
930 
931 static atomic_t chpid_reset_count;
932 
933 static void s390_reset_chpids_mcck_handler(void)
934 {
935 	struct crw crw;
936 	struct mci *mci;
937 
938 	/* Check for pending channel report word. */
939 	mci = (struct mci *)&S390_lowcore.mcck_interruption_code;
940 	if (!mci->cp)
941 		return;
942 	/* Process channel report words. */
943 	while (stcrw(&crw) == 0) {
944 		/* Check for responses to RCHP. */
945 		if (crw.slct && crw.rsc == CRW_RSC_CPATH)
946 			atomic_dec(&chpid_reset_count);
947 	}
948 }
949 
950 #define RCHP_TIMEOUT (30 * USEC_PER_SEC)
951 static void css_reset(void)
952 {
953 	int i, ret;
954 	unsigned long long timeout;
955 	struct chp_id chpid;
956 
957 	/* Reset subchannels. */
958 	for_each_subchannel(__shutdown_subchannel_easy,  NULL);
959 	/* Reset channel paths. */
960 	s390_base_mcck_handler_fn = s390_reset_chpids_mcck_handler;
961 	/* Enable channel report machine checks. */
962 	__ctl_set_bit(14, 28);
963 	/* Temporarily reenable machine checks. */
964 	local_mcck_enable();
965 	chp_id_init(&chpid);
966 	for (i = 0; i <= __MAX_CHPID; i++) {
967 		chpid.id = i;
968 		ret = rchp(chpid);
969 		if ((ret == 0) || (ret == 2))
970 			/*
971 			 * rchp either succeeded, or another rchp is already
972 			 * in progress. In either case, we'll get a crw.
973 			 */
974 			atomic_inc(&chpid_reset_count);
975 	}
976 	/* Wait for machine check for all channel paths. */
977 	timeout = get_clock() + (RCHP_TIMEOUT << 12);
978 	while (atomic_read(&chpid_reset_count) != 0) {
979 		if (get_clock() > timeout)
980 			break;
981 		cpu_relax();
982 	}
983 	/* Disable machine checks again. */
984 	local_mcck_disable();
985 	/* Disable channel report machine checks. */
986 	__ctl_clear_bit(14, 28);
987 	s390_base_mcck_handler_fn = NULL;
988 }
989 
990 static struct reset_call css_reset_call = {
991 	.fn = css_reset,
992 };
993 
994 static int __init init_css_reset_call(void)
995 {
996 	atomic_set(&chpid_reset_count, 0);
997 	register_reset_call(&css_reset_call);
998 	return 0;
999 }
1000 
1001 arch_initcall(init_css_reset_call);
1002 
1003 struct sch_match_id {
1004 	struct subchannel_id schid;
1005 	struct ccw_dev_id devid;
1006 	int rc;
1007 };
1008 
1009 static int __reipl_subchannel_match(struct subchannel_id schid, void *data)
1010 {
1011 	struct schib schib;
1012 	struct sch_match_id *match_id = data;
1013 
1014 	if (stsch_reset(schid, &schib))
1015 		return -ENXIO;
1016 	if ((schib.pmcw.st == SUBCHANNEL_TYPE_IO) && schib.pmcw.dnv &&
1017 	    (schib.pmcw.dev == match_id->devid.devno) &&
1018 	    (schid.ssid == match_id->devid.ssid)) {
1019 		match_id->schid = schid;
1020 		match_id->rc = 0;
1021 		return 1;
1022 	}
1023 	return 0;
1024 }
1025 
1026 static int reipl_find_schid(struct ccw_dev_id *devid,
1027 			    struct subchannel_id *schid)
1028 {
1029 	struct sch_match_id match_id;
1030 
1031 	match_id.devid = *devid;
1032 	match_id.rc = -ENODEV;
1033 	for_each_subchannel(__reipl_subchannel_match, &match_id);
1034 	if (match_id.rc == 0)
1035 		*schid = match_id.schid;
1036 	return match_id.rc;
1037 }
1038 
1039 extern void do_reipl_asm(__u32 schid);
1040 
1041 /* Make sure all subchannels are quiet before we re-ipl an lpar. */
1042 void reipl_ccw_dev(struct ccw_dev_id *devid)
1043 {
1044 	struct subchannel_id schid;
1045 
1046 	s390_reset_system();
1047 	if (reipl_find_schid(devid, &schid) != 0)
1048 		panic("IPL Device not found\n");
1049 	do_reipl_asm(*((__u32*)&schid));
1050 }
1051 
1052 int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo)
1053 {
1054 	struct subchannel_id schid;
1055 	struct schib schib;
1056 
1057 	schid = *(struct subchannel_id *)__LC_SUBCHANNEL_ID;
1058 	if (!schid.one)
1059 		return -ENODEV;
1060 	if (stsch(schid, &schib))
1061 		return -ENODEV;
1062 	if (schib.pmcw.st != SUBCHANNEL_TYPE_IO)
1063 		return -ENODEV;
1064 	if (!schib.pmcw.dnv)
1065 		return -ENODEV;
1066 	iplinfo->devno = schib.pmcw.dev;
1067 	iplinfo->is_qdio = schib.pmcw.qf;
1068 	return 0;
1069 }
1070