xref: /openbmc/linux/drivers/s390/cio/cio.c (revision 643d1f7f)
1 /*
2  *  drivers/s390/cio/cio.c
3  *   S/390 common I/O routines -- low level i/o calls
4  *
5  *    Copyright (C) IBM Corp. 1999,2006
6  *    Author(s): Ingo Adlung (adlung@de.ibm.com)
7  *		 Cornelia Huck (cornelia.huck@de.ibm.com)
8  *		 Arnd Bergmann (arndb@de.ibm.com)
9  *		 Martin Schwidefsky (schwidefsky@de.ibm.com)
10  */
11 
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/slab.h>
15 #include <linux/device.h>
16 #include <linux/kernel_stat.h>
17 #include <linux/interrupt.h>
18 #include <asm/cio.h>
19 #include <asm/delay.h>
20 #include <asm/irq.h>
21 #include <asm/irq_regs.h>
22 #include <asm/setup.h>
23 #include <asm/reset.h>
24 #include <asm/ipl.h>
25 #include <asm/chpid.h>
26 #include <asm/airq.h>
27 #include "cio.h"
28 #include "css.h"
29 #include "chsc.h"
30 #include "ioasm.h"
31 #include "io_sch.h"
32 #include "blacklist.h"
33 #include "cio_debug.h"
34 #include "chp.h"
35 #include "../s390mach.h"
36 
37 debug_info_t *cio_debug_msg_id;
38 debug_info_t *cio_debug_trace_id;
39 debug_info_t *cio_debug_crw_id;
40 
41 int cio_show_msg;
42 
43 static int __init
44 cio_setup (char *parm)
45 {
46 	if (!strcmp (parm, "yes"))
47 		cio_show_msg = 1;
48 	else if (!strcmp (parm, "no"))
49 		cio_show_msg = 0;
50 	else
51 		printk(KERN_ERR "cio: cio_setup: "
52 		       "invalid cio_msg parameter '%s'", parm);
53 	return 1;
54 }
55 
56 __setup ("cio_msg=", cio_setup);
57 
58 /*
59  * Function: cio_debug_init
60  * Initializes three debug logs for common I/O:
61  * - cio_msg logs generic cio messages
62  * - cio_trace logs the calling of different functions
63  * - cio_crw logs machine check related cio messages
64  */
65 static int __init cio_debug_init(void)
66 {
67 	cio_debug_msg_id = debug_register("cio_msg", 16, 1, 16 * sizeof(long));
68 	if (!cio_debug_msg_id)
69 		goto out_unregister;
70 	debug_register_view(cio_debug_msg_id, &debug_sprintf_view);
71 	debug_set_level(cio_debug_msg_id, 2);
72 	cio_debug_trace_id = debug_register("cio_trace", 16, 1, 16);
73 	if (!cio_debug_trace_id)
74 		goto out_unregister;
75 	debug_register_view(cio_debug_trace_id, &debug_hex_ascii_view);
76 	debug_set_level(cio_debug_trace_id, 2);
77 	cio_debug_crw_id = debug_register("cio_crw", 16, 1, 16 * sizeof(long));
78 	if (!cio_debug_crw_id)
79 		goto out_unregister;
80 	debug_register_view(cio_debug_crw_id, &debug_sprintf_view);
81 	debug_set_level(cio_debug_crw_id, 4);
82 	return 0;
83 
84 out_unregister:
85 	if (cio_debug_msg_id)
86 		debug_unregister(cio_debug_msg_id);
87 	if (cio_debug_trace_id)
88 		debug_unregister(cio_debug_trace_id);
89 	if (cio_debug_crw_id)
90 		debug_unregister(cio_debug_crw_id);
91 	printk(KERN_WARNING"cio: could not initialize debugging\n");
92 	return -1;
93 }
94 
95 arch_initcall (cio_debug_init);
96 
97 int
98 cio_set_options (struct subchannel *sch, int flags)
99 {
100        sch->options.suspend = (flags & DOIO_ALLOW_SUSPEND) != 0;
101        sch->options.prefetch = (flags & DOIO_DENY_PREFETCH) != 0;
102        sch->options.inter = (flags & DOIO_SUPPRESS_INTER) != 0;
103        return 0;
104 }
105 
106 /* FIXME: who wants to use this? */
107 int
108 cio_get_options (struct subchannel *sch)
109 {
110        int flags;
111 
112        flags = 0;
113        if (sch->options.suspend)
114 		flags |= DOIO_ALLOW_SUSPEND;
115        if (sch->options.prefetch)
116 		flags |= DOIO_DENY_PREFETCH;
117        if (sch->options.inter)
118 		flags |= DOIO_SUPPRESS_INTER;
119        return flags;
120 }
121 
122 /*
123  * Use tpi to get a pending interrupt, call the interrupt handler and
124  * return a pointer to the subchannel structure.
125  */
126 static int
127 cio_tpi(void)
128 {
129 	struct tpi_info *tpi_info;
130 	struct subchannel *sch;
131 	struct irb *irb;
132 
133 	tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID;
134 	if (tpi (NULL) != 1)
135 		return 0;
136 	irb = (struct irb *) __LC_IRB;
137 	/* Store interrupt response block to lowcore. */
138 	if (tsch (tpi_info->schid, irb) != 0)
139 		/* Not status pending or not operational. */
140 		return 1;
141 	sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
142 	if (!sch)
143 		return 1;
144 	local_bh_disable();
145 	irq_enter ();
146 	spin_lock(sch->lock);
147 	memcpy (&sch->schib.scsw, &irb->scsw, sizeof (struct scsw));
148 	if (sch->driver && sch->driver->irq)
149 		sch->driver->irq(sch);
150 	spin_unlock(sch->lock);
151 	irq_exit ();
152 	_local_bh_enable();
153 	return 1;
154 }
155 
156 static int
157 cio_start_handle_notoper(struct subchannel *sch, __u8 lpm)
158 {
159 	char dbf_text[15];
160 
161 	if (lpm != 0)
162 		sch->lpm &= ~lpm;
163 	else
164 		sch->lpm = 0;
165 
166 	stsch (sch->schid, &sch->schib);
167 
168 	CIO_MSG_EVENT(0, "cio_start: 'not oper' status for "
169 		      "subchannel 0.%x.%04x!\n", sch->schid.ssid,
170 		      sch->schid.sch_no);
171 	sprintf(dbf_text, "no%s", sch->dev.bus_id);
172 	CIO_TRACE_EVENT(0, dbf_text);
173 	CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib));
174 
175 	return (sch->lpm ? -EACCES : -ENODEV);
176 }
177 
178 int
179 cio_start_key (struct subchannel *sch,	/* subchannel structure */
180 	       struct ccw1 * cpa,	/* logical channel prog addr */
181 	       __u8 lpm,		/* logical path mask */
182 	       __u8 key)                /* storage key */
183 {
184 	char dbf_txt[15];
185 	int ccode;
186 	struct orb *orb;
187 
188 	CIO_TRACE_EVENT(4, "stIO");
189 	CIO_TRACE_EVENT(4, sch->dev.bus_id);
190 
191 	orb = &to_io_private(sch)->orb;
192 	/* sch is always under 2G. */
193 	orb->intparm = (u32)(addr_t)sch;
194 	orb->fmt = 1;
195 
196 	orb->pfch = sch->options.prefetch == 0;
197 	orb->spnd = sch->options.suspend;
198 	orb->ssic = sch->options.suspend && sch->options.inter;
199 	orb->lpm = (lpm != 0) ? lpm : sch->lpm;
200 #ifdef CONFIG_64BIT
201 	/*
202 	 * for 64 bit we always support 64 bit IDAWs with 4k page size only
203 	 */
204 	orb->c64 = 1;
205 	orb->i2k = 0;
206 #endif
207 	orb->key = key >> 4;
208 	/* issue "Start Subchannel" */
209 	orb->cpa = (__u32) __pa(cpa);
210 	ccode = ssch(sch->schid, orb);
211 
212 	/* process condition code */
213 	sprintf(dbf_txt, "ccode:%d", ccode);
214 	CIO_TRACE_EVENT(4, dbf_txt);
215 
216 	switch (ccode) {
217 	case 0:
218 		/*
219 		 * initialize device status information
220 		 */
221 		sch->schib.scsw.actl |= SCSW_ACTL_START_PEND;
222 		return 0;
223 	case 1:		/* status pending */
224 	case 2:		/* busy */
225 		return -EBUSY;
226 	default:		/* device/path not operational */
227 		return cio_start_handle_notoper(sch, lpm);
228 	}
229 }
230 
231 int
232 cio_start (struct subchannel *sch, struct ccw1 *cpa, __u8 lpm)
233 {
234 	return cio_start_key(sch, cpa, lpm, PAGE_DEFAULT_KEY);
235 }
236 
237 /*
238  * resume suspended I/O operation
239  */
240 int
241 cio_resume (struct subchannel *sch)
242 {
243 	char dbf_txt[15];
244 	int ccode;
245 
246 	CIO_TRACE_EVENT (4, "resIO");
247 	CIO_TRACE_EVENT (4, sch->dev.bus_id);
248 
249 	ccode = rsch (sch->schid);
250 
251 	sprintf (dbf_txt, "ccode:%d", ccode);
252 	CIO_TRACE_EVENT (4, dbf_txt);
253 
254 	switch (ccode) {
255 	case 0:
256 		sch->schib.scsw.actl |= SCSW_ACTL_RESUME_PEND;
257 		return 0;
258 	case 1:
259 		return -EBUSY;
260 	case 2:
261 		return -EINVAL;
262 	default:
263 		/*
264 		 * useless to wait for request completion
265 		 *  as device is no longer operational !
266 		 */
267 		return -ENODEV;
268 	}
269 }
270 
271 /*
272  * halt I/O operation
273  */
274 int
275 cio_halt(struct subchannel *sch)
276 {
277 	char dbf_txt[15];
278 	int ccode;
279 
280 	if (!sch)
281 		return -ENODEV;
282 
283 	CIO_TRACE_EVENT (2, "haltIO");
284 	CIO_TRACE_EVENT (2, sch->dev.bus_id);
285 
286 	/*
287 	 * Issue "Halt subchannel" and process condition code
288 	 */
289 	ccode = hsch (sch->schid);
290 
291 	sprintf (dbf_txt, "ccode:%d", ccode);
292 	CIO_TRACE_EVENT (2, dbf_txt);
293 
294 	switch (ccode) {
295 	case 0:
296 		sch->schib.scsw.actl |= SCSW_ACTL_HALT_PEND;
297 		return 0;
298 	case 1:		/* status pending */
299 	case 2:		/* busy */
300 		return -EBUSY;
301 	default:		/* device not operational */
302 		return -ENODEV;
303 	}
304 }
305 
306 /*
307  * Clear I/O operation
308  */
309 int
310 cio_clear(struct subchannel *sch)
311 {
312 	char dbf_txt[15];
313 	int ccode;
314 
315 	if (!sch)
316 		return -ENODEV;
317 
318 	CIO_TRACE_EVENT (2, "clearIO");
319 	CIO_TRACE_EVENT (2, sch->dev.bus_id);
320 
321 	/*
322 	 * Issue "Clear subchannel" and process condition code
323 	 */
324 	ccode = csch (sch->schid);
325 
326 	sprintf (dbf_txt, "ccode:%d", ccode);
327 	CIO_TRACE_EVENT (2, dbf_txt);
328 
329 	switch (ccode) {
330 	case 0:
331 		sch->schib.scsw.actl |= SCSW_ACTL_CLEAR_PEND;
332 		return 0;
333 	default:		/* device not operational */
334 		return -ENODEV;
335 	}
336 }
337 
338 /*
339  * Function: cio_cancel
340  * Issues a "Cancel Subchannel" on the specified subchannel
341  * Note: We don't need any fancy intparms and flags here
342  *	 since xsch is executed synchronously.
343  * Only for common I/O internal use as for now.
344  */
345 int
346 cio_cancel (struct subchannel *sch)
347 {
348 	char dbf_txt[15];
349 	int ccode;
350 
351 	if (!sch)
352 		return -ENODEV;
353 
354 	CIO_TRACE_EVENT (2, "cancelIO");
355 	CIO_TRACE_EVENT (2, sch->dev.bus_id);
356 
357 	ccode = xsch (sch->schid);
358 
359 	sprintf (dbf_txt, "ccode:%d", ccode);
360 	CIO_TRACE_EVENT (2, dbf_txt);
361 
362 	switch (ccode) {
363 	case 0:		/* success */
364 		/* Update information in scsw. */
365 		stsch (sch->schid, &sch->schib);
366 		return 0;
367 	case 1:		/* status pending */
368 		return -EBUSY;
369 	case 2:		/* not applicable */
370 		return -EINVAL;
371 	default:	/* not oper */
372 		return -ENODEV;
373 	}
374 }
375 
376 /*
377  * Function: cio_modify
378  * Issues a "Modify Subchannel" on the specified subchannel
379  */
380 int
381 cio_modify (struct subchannel *sch)
382 {
383 	int ccode, retry, ret;
384 
385 	ret = 0;
386 	for (retry = 0; retry < 5; retry++) {
387 		ccode = msch_err (sch->schid, &sch->schib);
388 		if (ccode < 0)	/* -EIO if msch gets a program check. */
389 			return ccode;
390 		switch (ccode) {
391 		case 0: /* successfull */
392 			return 0;
393 		case 1:	/* status pending */
394 			return -EBUSY;
395 		case 2:	/* busy */
396 			udelay (100);	/* allow for recovery */
397 			ret = -EBUSY;
398 			break;
399 		case 3:	/* not operational */
400 			return -ENODEV;
401 		}
402 	}
403 	return ret;
404 }
405 
406 /*
407  * Enable subchannel.
408  */
409 int cio_enable_subchannel(struct subchannel *sch, unsigned int isc,
410 			  u32 intparm)
411 {
412 	char dbf_txt[15];
413 	int ccode;
414 	int retry;
415 	int ret;
416 
417 	CIO_TRACE_EVENT (2, "ensch");
418 	CIO_TRACE_EVENT (2, sch->dev.bus_id);
419 
420 	if (sch_is_pseudo_sch(sch))
421 		return -EINVAL;
422 	ccode = stsch (sch->schid, &sch->schib);
423 	if (ccode)
424 		return -ENODEV;
425 
426 	for (retry = 5, ret = 0; retry > 0; retry--) {
427 		sch->schib.pmcw.ena = 1;
428 		sch->schib.pmcw.isc = isc;
429 		sch->schib.pmcw.intparm = intparm;
430 		ret = cio_modify(sch);
431 		if (ret == -ENODEV)
432 			break;
433 		if (ret == -EIO)
434 			/*
435 			 * Got a program check in cio_modify. Try without
436 			 * the concurrent sense bit the next time.
437 			 */
438 			sch->schib.pmcw.csense = 0;
439 		if (ret == 0) {
440 			stsch (sch->schid, &sch->schib);
441 			if (sch->schib.pmcw.ena)
442 				break;
443 		}
444 		if (ret == -EBUSY) {
445 			struct irb irb;
446 			if (tsch(sch->schid, &irb) != 0)
447 				break;
448 		}
449 	}
450 	sprintf (dbf_txt, "ret:%d", ret);
451 	CIO_TRACE_EVENT (2, dbf_txt);
452 	return ret;
453 }
454 
455 /*
456  * Disable subchannel.
457  */
458 int
459 cio_disable_subchannel (struct subchannel *sch)
460 {
461 	char dbf_txt[15];
462 	int ccode;
463 	int retry;
464 	int ret;
465 
466 	CIO_TRACE_EVENT (2, "dissch");
467 	CIO_TRACE_EVENT (2, sch->dev.bus_id);
468 
469 	if (sch_is_pseudo_sch(sch))
470 		return 0;
471 	ccode = stsch (sch->schid, &sch->schib);
472 	if (ccode == 3)		/* Not operational. */
473 		return -ENODEV;
474 
475 	if (sch->schib.scsw.actl != 0)
476 		/*
477 		 * the disable function must not be called while there are
478 		 *  requests pending for completion !
479 		 */
480 		return -EBUSY;
481 
482 	for (retry = 5, ret = 0; retry > 0; retry--) {
483 		sch->schib.pmcw.ena = 0;
484 		ret = cio_modify(sch);
485 		if (ret == -ENODEV)
486 			break;
487 		if (ret == -EBUSY)
488 			/*
489 			 * The subchannel is busy or status pending.
490 			 * We'll disable when the next interrupt was delivered
491 			 * via the state machine.
492 			 */
493 			break;
494 		if (ret == 0) {
495 			stsch (sch->schid, &sch->schib);
496 			if (!sch->schib.pmcw.ena)
497 				break;
498 		}
499 	}
500 	sprintf (dbf_txt, "ret:%d", ret);
501 	CIO_TRACE_EVENT (2, dbf_txt);
502 	return ret;
503 }
504 
505 int cio_create_sch_lock(struct subchannel *sch)
506 {
507 	sch->lock = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
508 	if (!sch->lock)
509 		return -ENOMEM;
510 	spin_lock_init(sch->lock);
511 	return 0;
512 }
513 
514 /*
515  * cio_validate_subchannel()
516  *
517  * Find out subchannel type and initialize struct subchannel.
518  * Return codes:
519  *   SUBCHANNEL_TYPE_IO for a normal io subchannel
520  *   SUBCHANNEL_TYPE_CHSC for a chsc subchannel
521  *   SUBCHANNEL_TYPE_MESSAGE for a messaging subchannel
522  *   SUBCHANNEL_TYPE_ADM for a adm(?) subchannel
523  *   -ENXIO for non-defined subchannels
524  *   -ENODEV for subchannels with invalid device number or blacklisted devices
525  */
526 int
527 cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
528 {
529 	char dbf_txt[15];
530 	int ccode;
531 	int err;
532 
533 	sprintf (dbf_txt, "valsch%x", schid.sch_no);
534 	CIO_TRACE_EVENT (4, dbf_txt);
535 
536 	/* Nuke all fields. */
537 	memset(sch, 0, sizeof(struct subchannel));
538 
539 	sch->schid = schid;
540 	if (cio_is_console(schid)) {
541 		sch->lock = cio_get_console_lock();
542 	} else {
543 		err = cio_create_sch_lock(sch);
544 		if (err)
545 			goto out;
546 	}
547 	mutex_init(&sch->reg_mutex);
548 	/* Set a name for the subchannel */
549 	snprintf (sch->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x", schid.ssid,
550 		  schid.sch_no);
551 
552 	/*
553 	 * The first subchannel that is not-operational (ccode==3)
554 	 *  indicates that there aren't any more devices available.
555 	 * If stsch gets an exception, it means the current subchannel set
556 	 *  is not valid.
557 	 */
558 	ccode = stsch_err (schid, &sch->schib);
559 	if (ccode) {
560 		err = (ccode == 3) ? -ENXIO : ccode;
561 		goto out;
562 	}
563 	/* Copy subchannel type from path management control word. */
564 	sch->st = sch->schib.pmcw.st;
565 
566 	/*
567 	 * ... just being curious we check for non I/O subchannels
568 	 */
569 	if (sch->st != 0) {
570 		CIO_DEBUG(KERN_INFO, 0,
571 			  "Subchannel 0.%x.%04x reports "
572 			  "non-I/O subchannel type %04X\n",
573 			  sch->schid.ssid, sch->schid.sch_no, sch->st);
574 		/* We stop here for non-io subchannels. */
575 		err = sch->st;
576 		goto out;
577 	}
578 
579 	/* Initialization for io subchannels. */
580 	if (!css_sch_is_valid(&sch->schib)) {
581 		err = -ENODEV;
582 		goto out;
583 	}
584 
585 	/* Devno is valid. */
586 	if (is_blacklisted (sch->schid.ssid, sch->schib.pmcw.dev)) {
587 		/*
588 		 * This device must not be known to Linux. So we simply
589 		 * say that there is no device and return ENODEV.
590 		 */
591 		CIO_MSG_EVENT(4, "Blacklisted device detected "
592 			      "at devno %04X, subchannel set %x\n",
593 			      sch->schib.pmcw.dev, sch->schid.ssid);
594 		err = -ENODEV;
595 		goto out;
596 	}
597 	if (cio_is_console(sch->schid))
598 		sch->opm = 0xff;
599 	else
600 		sch->opm = chp_get_sch_opm(sch);
601 	sch->lpm = sch->schib.pmcw.pam & sch->opm;
602 
603 	CIO_DEBUG(KERN_INFO, 0,
604 		  "Detected device %04x on subchannel 0.%x.%04X"
605 		  " - PIM = %02X, PAM = %02X, POM = %02X\n",
606 		  sch->schib.pmcw.dev, sch->schid.ssid,
607 		  sch->schid.sch_no, sch->schib.pmcw.pim,
608 		  sch->schib.pmcw.pam, sch->schib.pmcw.pom);
609 
610 	/*
611 	 * We now have to initially ...
612 	 *  ... set "interruption subclass"
613 	 *  ... enable "concurrent sense"
614 	 *  ... enable "multipath mode" if more than one
615 	 *	  CHPID is available. This is done regardless
616 	 *	  whether multiple paths are available for us.
617 	 */
618 	sch->schib.pmcw.isc = 3;	/* could be smth. else */
619 	sch->schib.pmcw.csense = 1;	/* concurrent sense */
620 	sch->schib.pmcw.ena = 0;
621 	if ((sch->lpm & (sch->lpm - 1)) != 0)
622 		sch->schib.pmcw.mp = 1;	/* multipath mode */
623 	/* clean up possible residual cmf stuff */
624 	sch->schib.pmcw.mme = 0;
625 	sch->schib.pmcw.mbfc = 0;
626 	sch->schib.pmcw.mbi = 0;
627 	sch->schib.mba = 0;
628 	return 0;
629 out:
630 	if (!cio_is_console(schid))
631 		kfree(sch->lock);
632 	sch->lock = NULL;
633 	return err;
634 }
635 
636 /*
637  * do_IRQ() handles all normal I/O device IRQ's (the special
638  *	    SMP cross-CPU interrupts have their own specific
639  *	    handlers).
640  *
641  */
642 void
643 do_IRQ (struct pt_regs *regs)
644 {
645 	struct tpi_info *tpi_info;
646 	struct subchannel *sch;
647 	struct irb *irb;
648 	struct pt_regs *old_regs;
649 
650 	old_regs = set_irq_regs(regs);
651 	irq_enter();
652 	asm volatile ("mc 0,0");
653 	if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer)
654 		/**
655 		 * Make sure that the i/o interrupt did not "overtake"
656 		 * the last HZ timer interrupt.
657 		 */
658 		account_ticks(S390_lowcore.int_clock);
659 	/*
660 	 * Get interrupt information from lowcore
661 	 */
662 	tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID;
663 	irb = (struct irb *) __LC_IRB;
664 	do {
665 		kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++;
666 		/*
667 		 * Non I/O-subchannel thin interrupts are processed differently
668 		 */
669 		if (tpi_info->adapter_IO == 1 &&
670 		    tpi_info->int_type == IO_INTERRUPT_TYPE) {
671 			do_adapter_IO();
672 			continue;
673 		}
674 		sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
675 		if (sch)
676 			spin_lock(sch->lock);
677 		/* Store interrupt response block to lowcore. */
678 		if (tsch (tpi_info->schid, irb) == 0 && sch) {
679 			/* Keep subchannel information word up to date. */
680 			memcpy (&sch->schib.scsw, &irb->scsw,
681 				sizeof (irb->scsw));
682 			/* Call interrupt handler if there is one. */
683 			if (sch->driver && sch->driver->irq)
684 				sch->driver->irq(sch);
685 		}
686 		if (sch)
687 			spin_unlock(sch->lock);
688 		/*
689 		 * Are more interrupts pending?
690 		 * If so, the tpi instruction will update the lowcore
691 		 * to hold the info for the next interrupt.
692 		 * We don't do this for VM because a tpi drops the cpu
693 		 * out of the sie which costs more cycles than it saves.
694 		 */
695 	} while (!MACHINE_IS_VM && tpi (NULL) != 0);
696 	irq_exit();
697 	set_irq_regs(old_regs);
698 }
699 
700 #ifdef CONFIG_CCW_CONSOLE
701 static struct subchannel console_subchannel;
702 static struct io_subchannel_private console_priv;
703 static int console_subchannel_in_use;
704 
705 void *cio_get_console_priv(void)
706 {
707 	return &console_priv;
708 }
709 
710 /*
711  * busy wait for the next interrupt on the console
712  */
713 void
714 wait_cons_dev (void)
715 {
716 	unsigned long cr6      __attribute__ ((aligned (8)));
717 	unsigned long save_cr6 __attribute__ ((aligned (8)));
718 
719 	/*
720 	 * before entering the spinlock we may already have
721 	 * processed the interrupt on a different CPU...
722 	 */
723 	if (!console_subchannel_in_use)
724 		return;
725 
726 	/* disable all but isc 7 (console device) */
727 	__ctl_store (save_cr6, 6, 6);
728 	cr6 = 0x01000000;
729 	__ctl_load (cr6, 6, 6);
730 
731 	do {
732 		spin_unlock(console_subchannel.lock);
733 		if (!cio_tpi())
734 			cpu_relax();
735 		spin_lock(console_subchannel.lock);
736 	} while (console_subchannel.schib.scsw.actl != 0);
737 	/*
738 	 * restore previous isc value
739 	 */
740 	__ctl_load (save_cr6, 6, 6);
741 }
742 
743 static int
744 cio_test_for_console(struct subchannel_id schid, void *data)
745 {
746 	if (stsch_err(schid, &console_subchannel.schib) != 0)
747 		return -ENXIO;
748 	if ((console_subchannel.schib.pmcw.st == SUBCHANNEL_TYPE_IO) &&
749 	    console_subchannel.schib.pmcw.dnv &&
750 	    (console_subchannel.schib.pmcw.dev == console_devno)) {
751 		console_irq = schid.sch_no;
752 		return 1; /* found */
753 	}
754 	return 0;
755 }
756 
757 
758 static int
759 cio_get_console_sch_no(void)
760 {
761 	struct subchannel_id schid;
762 
763 	init_subchannel_id(&schid);
764 	if (console_irq != -1) {
765 		/* VM provided us with the irq number of the console. */
766 		schid.sch_no = console_irq;
767 		if (stsch(schid, &console_subchannel.schib) != 0 ||
768 		    (console_subchannel.schib.pmcw.st != SUBCHANNEL_TYPE_IO) ||
769 		    !console_subchannel.schib.pmcw.dnv)
770 			return -1;
771 		console_devno = console_subchannel.schib.pmcw.dev;
772 	} else if (console_devno != -1) {
773 		/* At least the console device number is known. */
774 		for_each_subchannel(cio_test_for_console, NULL);
775 		if (console_irq == -1)
776 			return -1;
777 	} else {
778 		/* unlike in 2.4, we cannot autoprobe here, since
779 		 * the channel subsystem is not fully initialized.
780 		 * With some luck, the HWC console can take over */
781 		printk(KERN_WARNING "cio: No ccw console found!\n");
782 		return -1;
783 	}
784 	return console_irq;
785 }
786 
787 struct subchannel *
788 cio_probe_console(void)
789 {
790 	int sch_no, ret;
791 	struct subchannel_id schid;
792 
793 	if (xchg(&console_subchannel_in_use, 1) != 0)
794 		return ERR_PTR(-EBUSY);
795 	sch_no = cio_get_console_sch_no();
796 	if (sch_no == -1) {
797 		console_subchannel_in_use = 0;
798 		return ERR_PTR(-ENODEV);
799 	}
800 	memset(&console_subchannel, 0, sizeof(struct subchannel));
801 	init_subchannel_id(&schid);
802 	schid.sch_no = sch_no;
803 	ret = cio_validate_subchannel(&console_subchannel, schid);
804 	if (ret) {
805 		console_subchannel_in_use = 0;
806 		return ERR_PTR(-ENODEV);
807 	}
808 
809 	/*
810 	 * enable console I/O-interrupt subclass 7
811 	 */
812 	ctl_set_bit(6, 24);
813 	console_subchannel.schib.pmcw.isc = 7;
814 	console_subchannel.schib.pmcw.intparm =
815 		(u32)(addr_t)&console_subchannel;
816 	ret = cio_modify(&console_subchannel);
817 	if (ret) {
818 		console_subchannel_in_use = 0;
819 		return ERR_PTR(ret);
820 	}
821 	return &console_subchannel;
822 }
823 
824 void
825 cio_release_console(void)
826 {
827 	console_subchannel.schib.pmcw.intparm = 0;
828 	cio_modify(&console_subchannel);
829 	ctl_clear_bit(6, 24);
830 	console_subchannel_in_use = 0;
831 }
832 
833 /* Bah... hack to catch console special sausages. */
834 int
835 cio_is_console(struct subchannel_id schid)
836 {
837 	if (!console_subchannel_in_use)
838 		return 0;
839 	return schid_equal(&schid, &console_subchannel.schid);
840 }
841 
842 struct subchannel *
843 cio_get_console_subchannel(void)
844 {
845 	if (!console_subchannel_in_use)
846 		return NULL;
847 	return &console_subchannel;
848 }
849 
850 #endif
851 static int
852 __disable_subchannel_easy(struct subchannel_id schid, struct schib *schib)
853 {
854 	int retry, cc;
855 
856 	cc = 0;
857 	for (retry=0;retry<3;retry++) {
858 		schib->pmcw.ena = 0;
859 		cc = msch(schid, schib);
860 		if (cc)
861 			return (cc==3?-ENODEV:-EBUSY);
862 		stsch(schid, schib);
863 		if (!schib->pmcw.ena)
864 			return 0;
865 	}
866 	return -EBUSY; /* uhm... */
867 }
868 
869 /* we can't use the normal udelay here, since it enables external interrupts */
870 
871 static void udelay_reset(unsigned long usecs)
872 {
873 	uint64_t start_cc, end_cc;
874 
875 	asm volatile ("STCK %0" : "=m" (start_cc));
876 	do {
877 		cpu_relax();
878 		asm volatile ("STCK %0" : "=m" (end_cc));
879 	} while (((end_cc - start_cc)/4096) < usecs);
880 }
881 
882 static int
883 __clear_subchannel_easy(struct subchannel_id schid)
884 {
885 	int retry;
886 
887 	if (csch(schid))
888 		return -ENODEV;
889 	for (retry=0;retry<20;retry++) {
890 		struct tpi_info ti;
891 
892 		if (tpi(&ti)) {
893 			tsch(ti.schid, (struct irb *)__LC_IRB);
894 			if (schid_equal(&ti.schid, &schid))
895 				return 0;
896 		}
897 		udelay_reset(100);
898 	}
899 	return -EBUSY;
900 }
901 
902 static int pgm_check_occured;
903 
904 static void cio_reset_pgm_check_handler(void)
905 {
906 	pgm_check_occured = 1;
907 }
908 
909 static int stsch_reset(struct subchannel_id schid, volatile struct schib *addr)
910 {
911 	int rc;
912 
913 	pgm_check_occured = 0;
914 	s390_base_pgm_handler_fn = cio_reset_pgm_check_handler;
915 	rc = stsch(schid, addr);
916 	s390_base_pgm_handler_fn = NULL;
917 
918 	/* The program check handler could have changed pgm_check_occured. */
919 	barrier();
920 
921 	if (pgm_check_occured)
922 		return -EIO;
923 	else
924 		return rc;
925 }
926 
927 static int __shutdown_subchannel_easy(struct subchannel_id schid, void *data)
928 {
929 	struct schib schib;
930 
931 	if (stsch_reset(schid, &schib))
932 		return -ENXIO;
933 	if (!schib.pmcw.ena)
934 		return 0;
935 	switch(__disable_subchannel_easy(schid, &schib)) {
936 	case 0:
937 	case -ENODEV:
938 		break;
939 	default: /* -EBUSY */
940 		if (__clear_subchannel_easy(schid))
941 			break; /* give up... */
942 		stsch(schid, &schib);
943 		__disable_subchannel_easy(schid, &schib);
944 	}
945 	return 0;
946 }
947 
948 static atomic_t chpid_reset_count;
949 
950 static void s390_reset_chpids_mcck_handler(void)
951 {
952 	struct crw crw;
953 	struct mci *mci;
954 
955 	/* Check for pending channel report word. */
956 	mci = (struct mci *)&S390_lowcore.mcck_interruption_code;
957 	if (!mci->cp)
958 		return;
959 	/* Process channel report words. */
960 	while (stcrw(&crw) == 0) {
961 		/* Check for responses to RCHP. */
962 		if (crw.slct && crw.rsc == CRW_RSC_CPATH)
963 			atomic_dec(&chpid_reset_count);
964 	}
965 }
966 
967 #define RCHP_TIMEOUT (30 * USEC_PER_SEC)
968 static void css_reset(void)
969 {
970 	int i, ret;
971 	unsigned long long timeout;
972 	struct chp_id chpid;
973 
974 	/* Reset subchannels. */
975 	for_each_subchannel(__shutdown_subchannel_easy,  NULL);
976 	/* Reset channel paths. */
977 	s390_base_mcck_handler_fn = s390_reset_chpids_mcck_handler;
978 	/* Enable channel report machine checks. */
979 	__ctl_set_bit(14, 28);
980 	/* Temporarily reenable machine checks. */
981 	local_mcck_enable();
982 	chp_id_init(&chpid);
983 	for (i = 0; i <= __MAX_CHPID; i++) {
984 		chpid.id = i;
985 		ret = rchp(chpid);
986 		if ((ret == 0) || (ret == 2))
987 			/*
988 			 * rchp either succeeded, or another rchp is already
989 			 * in progress. In either case, we'll get a crw.
990 			 */
991 			atomic_inc(&chpid_reset_count);
992 	}
993 	/* Wait for machine check for all channel paths. */
994 	timeout = get_clock() + (RCHP_TIMEOUT << 12);
995 	while (atomic_read(&chpid_reset_count) != 0) {
996 		if (get_clock() > timeout)
997 			break;
998 		cpu_relax();
999 	}
1000 	/* Disable machine checks again. */
1001 	local_mcck_disable();
1002 	/* Disable channel report machine checks. */
1003 	__ctl_clear_bit(14, 28);
1004 	s390_base_mcck_handler_fn = NULL;
1005 }
1006 
1007 static struct reset_call css_reset_call = {
1008 	.fn = css_reset,
1009 };
1010 
1011 static int __init init_css_reset_call(void)
1012 {
1013 	atomic_set(&chpid_reset_count, 0);
1014 	register_reset_call(&css_reset_call);
1015 	return 0;
1016 }
1017 
1018 arch_initcall(init_css_reset_call);
1019 
1020 struct sch_match_id {
1021 	struct subchannel_id schid;
1022 	struct ccw_dev_id devid;
1023 	int rc;
1024 };
1025 
1026 static int __reipl_subchannel_match(struct subchannel_id schid, void *data)
1027 {
1028 	struct schib schib;
1029 	struct sch_match_id *match_id = data;
1030 
1031 	if (stsch_reset(schid, &schib))
1032 		return -ENXIO;
1033 	if ((schib.pmcw.st == SUBCHANNEL_TYPE_IO) && schib.pmcw.dnv &&
1034 	    (schib.pmcw.dev == match_id->devid.devno) &&
1035 	    (schid.ssid == match_id->devid.ssid)) {
1036 		match_id->schid = schid;
1037 		match_id->rc = 0;
1038 		return 1;
1039 	}
1040 	return 0;
1041 }
1042 
1043 static int reipl_find_schid(struct ccw_dev_id *devid,
1044 			    struct subchannel_id *schid)
1045 {
1046 	struct sch_match_id match_id;
1047 
1048 	match_id.devid = *devid;
1049 	match_id.rc = -ENODEV;
1050 	for_each_subchannel(__reipl_subchannel_match, &match_id);
1051 	if (match_id.rc == 0)
1052 		*schid = match_id.schid;
1053 	return match_id.rc;
1054 }
1055 
1056 extern void do_reipl_asm(__u32 schid);
1057 
1058 /* Make sure all subchannels are quiet before we re-ipl an lpar. */
1059 void reipl_ccw_dev(struct ccw_dev_id *devid)
1060 {
1061 	struct subchannel_id schid;
1062 
1063 	s390_reset_system();
1064 	if (reipl_find_schid(devid, &schid) != 0)
1065 		panic("IPL Device not found\n");
1066 	do_reipl_asm(*((__u32*)&schid));
1067 }
1068 
1069 int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo)
1070 {
1071 	struct subchannel_id schid;
1072 	struct schib schib;
1073 
1074 	schid = *(struct subchannel_id *)__LC_SUBCHANNEL_ID;
1075 	if (!schid.one)
1076 		return -ENODEV;
1077 	if (stsch(schid, &schib))
1078 		return -ENODEV;
1079 	if (schib.pmcw.st != SUBCHANNEL_TYPE_IO)
1080 		return -ENODEV;
1081 	if (!schib.pmcw.dnv)
1082 		return -ENODEV;
1083 	iplinfo->devno = schib.pmcw.dev;
1084 	iplinfo->is_qdio = schib.pmcw.qf;
1085 	return 0;
1086 }
1087