xref: /openbmc/linux/drivers/s390/cio/device_fsm.c (revision 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2)
1 /*
2  * drivers/s390/cio/device_fsm.c
3  * finite state machine for device handling
4  *
5  *    Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
6  *			 IBM Corporation
7  *    Author(s): Cornelia Huck(cohuck@de.ibm.com)
8  *		 Martin Schwidefsky (schwidefsky@de.ibm.com)
9  */
10 
11 #include <linux/module.h>
12 #include <linux/config.h>
13 #include <linux/init.h>
14 
15 #include <asm/ccwdev.h>
16 #include <asm/qdio.h>
17 
18 #include "cio.h"
19 #include "cio_debug.h"
20 #include "css.h"
21 #include "device.h"
22 #include "chsc.h"
23 #include "ioasm.h"
24 #include "qdio.h"
25 
26 int
27 device_is_online(struct subchannel *sch)
28 {
29 	struct ccw_device *cdev;
30 
31 	if (!sch->dev.driver_data)
32 		return 0;
33 	cdev = sch->dev.driver_data;
34 	return (cdev->private->state == DEV_STATE_ONLINE);
35 }
36 
37 int
38 device_is_disconnected(struct subchannel *sch)
39 {
40 	struct ccw_device *cdev;
41 
42 	if (!sch->dev.driver_data)
43 		return 0;
44 	cdev = sch->dev.driver_data;
45 	return (cdev->private->state == DEV_STATE_DISCONNECTED ||
46 		cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
47 }
48 
49 void
50 device_set_disconnected(struct subchannel *sch)
51 {
52 	struct ccw_device *cdev;
53 
54 	if (!sch->dev.driver_data)
55 		return;
56 	cdev = sch->dev.driver_data;
57 	ccw_device_set_timeout(cdev, 0);
58 	cdev->private->flags.fake_irb = 0;
59 	cdev->private->state = DEV_STATE_DISCONNECTED;
60 }
61 
62 void
63 device_set_waiting(struct subchannel *sch)
64 {
65 	struct ccw_device *cdev;
66 
67 	if (!sch->dev.driver_data)
68 		return;
69 	cdev = sch->dev.driver_data;
70 	ccw_device_set_timeout(cdev, 10*HZ);
71 	cdev->private->state = DEV_STATE_WAIT4IO;
72 }
73 
74 /*
75  * Timeout function. It just triggers a DEV_EVENT_TIMEOUT.
76  */
77 static void
78 ccw_device_timeout(unsigned long data)
79 {
80 	struct ccw_device *cdev;
81 
82 	cdev = (struct ccw_device *) data;
83 	spin_lock_irq(cdev->ccwlock);
84 	dev_fsm_event(cdev, DEV_EVENT_TIMEOUT);
85 	spin_unlock_irq(cdev->ccwlock);
86 }
87 
88 /*
89  * Set timeout
90  */
91 void
92 ccw_device_set_timeout(struct ccw_device *cdev, int expires)
93 {
94 	if (expires == 0) {
95 		del_timer(&cdev->private->timer);
96 		return;
97 	}
98 	if (timer_pending(&cdev->private->timer)) {
99 		if (mod_timer(&cdev->private->timer, jiffies + expires))
100 			return;
101 	}
102 	cdev->private->timer.function = ccw_device_timeout;
103 	cdev->private->timer.data = (unsigned long) cdev;
104 	cdev->private->timer.expires = jiffies + expires;
105 	add_timer(&cdev->private->timer);
106 }
107 
108 /* Kill any pending timers after machine check. */
109 void
110 device_kill_pending_timer(struct subchannel *sch)
111 {
112 	struct ccw_device *cdev;
113 
114 	if (!sch->dev.driver_data)
115 		return;
116 	cdev = sch->dev.driver_data;
117 	ccw_device_set_timeout(cdev, 0);
118 }
119 
120 /*
121  * Cancel running i/o. This is called repeatedly since halt/clear are
122  * asynchronous operations. We do one try with cio_cancel, two tries
123  * with cio_halt, 255 tries with cio_clear. If everythings fails panic.
124  * Returns 0 if device now idle, -ENODEV for device not operational and
125  * -EBUSY if an interrupt is expected (either from halt/clear or from a
126  * status pending).
127  */
128 int
129 ccw_device_cancel_halt_clear(struct ccw_device *cdev)
130 {
131 	struct subchannel *sch;
132 	int ret;
133 
134 	sch = to_subchannel(cdev->dev.parent);
135 	ret = stsch(sch->irq, &sch->schib);
136 	if (ret || !sch->schib.pmcw.dnv)
137 		return -ENODEV;
138 	if (!sch->schib.pmcw.ena || sch->schib.scsw.actl == 0)
139 		/* Not operational or no activity -> done. */
140 		return 0;
141 	/* Stage 1: cancel io. */
142 	if (!(sch->schib.scsw.actl & SCSW_ACTL_HALT_PEND) &&
143 	    !(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) {
144 		ret = cio_cancel(sch);
145 		if (ret != -EINVAL)
146 			return ret;
147 		/* cancel io unsuccessful. From now on it is asynchronous. */
148 		cdev->private->iretry = 3;	/* 3 halt retries. */
149 	}
150 	if (!(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) {
151 		/* Stage 2: halt io. */
152 		if (cdev->private->iretry) {
153 			cdev->private->iretry--;
154 			ret = cio_halt(sch);
155 			return (ret == 0) ? -EBUSY : ret;
156 		}
157 		/* halt io unsuccessful. */
158 		cdev->private->iretry = 255;	/* 255 clear retries. */
159 	}
160 	/* Stage 3: clear io. */
161 	if (cdev->private->iretry) {
162 		cdev->private->iretry--;
163 		ret = cio_clear (sch);
164 		return (ret == 0) ? -EBUSY : ret;
165 	}
166 	panic("Can't stop i/o on subchannel.\n");
167 }
168 
169 static int
170 ccw_device_handle_oper(struct ccw_device *cdev)
171 {
172 	struct subchannel *sch;
173 
174 	sch = to_subchannel(cdev->dev.parent);
175 	cdev->private->flags.recog_done = 1;
176 	/*
177 	 * Check if cu type and device type still match. If
178 	 * not, it is certainly another device and we have to
179 	 * de- and re-register. Also check here for non-matching devno.
180 	 */
181 	if (cdev->id.cu_type != cdev->private->senseid.cu_type ||
182 	    cdev->id.cu_model != cdev->private->senseid.cu_model ||
183 	    cdev->id.dev_type != cdev->private->senseid.dev_type ||
184 	    cdev->id.dev_model != cdev->private->senseid.dev_model ||
185 	    cdev->private->devno != sch->schib.pmcw.dev) {
186 		PREPARE_WORK(&cdev->private->kick_work,
187 			     ccw_device_do_unreg_rereg, (void *)cdev);
188 		queue_work(ccw_device_work, &cdev->private->kick_work);
189 		return 0;
190 	}
191 	cdev->private->flags.donotify = 1;
192 	return 1;
193 }
194 
195 /*
196  * The machine won't give us any notification by machine check if a chpid has
197  * been varied online on the SE so we have to find out by magic (i. e. driving
198  * the channel subsystem to device selection and updating our path masks).
199  */
200 static inline void
201 __recover_lost_chpids(struct subchannel *sch, int old_lpm)
202 {
203 	int mask, i;
204 
205 	for (i = 0; i<8; i++) {
206 		mask = 0x80 >> i;
207 		if (!(sch->lpm & mask))
208 			continue;
209 		if (old_lpm & mask)
210 			continue;
211 		chpid_is_actually_online(sch->schib.pmcw.chpid[i]);
212 	}
213 }
214 
215 /*
216  * Stop device recognition.
217  */
218 static void
219 ccw_device_recog_done(struct ccw_device *cdev, int state)
220 {
221 	struct subchannel *sch;
222 	int notify, old_lpm, same_dev;
223 
224 	sch = to_subchannel(cdev->dev.parent);
225 
226 	ccw_device_set_timeout(cdev, 0);
227 	cio_disable_subchannel(sch);
228 	/*
229 	 * Now that we tried recognition, we have performed device selection
230 	 * through ssch() and the path information is up to date.
231 	 */
232 	old_lpm = sch->lpm;
233 	stsch(sch->irq, &sch->schib);
234 	sch->lpm = sch->schib.pmcw.pim &
235 		sch->schib.pmcw.pam &
236 		sch->schib.pmcw.pom &
237 		sch->opm;
238 	if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID)
239 		/* Force reprobe on all chpids. */
240 		old_lpm = 0;
241 	if (sch->lpm != old_lpm)
242 		__recover_lost_chpids(sch, old_lpm);
243 	if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
244 		if (state == DEV_STATE_NOT_OPER) {
245 			cdev->private->flags.recog_done = 1;
246 			cdev->private->state = DEV_STATE_DISCONNECTED;
247 			return;
248 		}
249 		/* Boxed devices don't need extra treatment. */
250 	}
251 	notify = 0;
252 	same_dev = 0; /* Keep the compiler quiet... */
253 	switch (state) {
254 	case DEV_STATE_NOT_OPER:
255 		CIO_DEBUG(KERN_WARNING, 2,
256 			  "SenseID : unknown device %04x on subchannel %04x\n",
257 			  cdev->private->devno, sch->irq);
258 		break;
259 	case DEV_STATE_OFFLINE:
260 		if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
261 			same_dev = ccw_device_handle_oper(cdev);
262 			notify = 1;
263 		}
264 		/* fill out sense information */
265 		cdev->id = (struct ccw_device_id) {
266 			.cu_type   = cdev->private->senseid.cu_type,
267 			.cu_model  = cdev->private->senseid.cu_model,
268 			.dev_type  = cdev->private->senseid.dev_type,
269 			.dev_model = cdev->private->senseid.dev_model,
270 		};
271 		if (notify) {
272 			cdev->private->state = DEV_STATE_OFFLINE;
273 			if (same_dev) {
274 				/* Get device online again. */
275 				ccw_device_online(cdev);
276 				wake_up(&cdev->private->wait_q);
277 			}
278 			return;
279 		}
280 		/* Issue device info message. */
281 		CIO_DEBUG(KERN_INFO, 2, "SenseID : device %04x reports: "
282 			  "CU  Type/Mod = %04X/%02X, Dev Type/Mod = "
283 			  "%04X/%02X\n", cdev->private->devno,
284 			  cdev->id.cu_type, cdev->id.cu_model,
285 			  cdev->id.dev_type, cdev->id.dev_model);
286 		break;
287 	case DEV_STATE_BOXED:
288 		CIO_DEBUG(KERN_WARNING, 2,
289 			  "SenseID : boxed device %04x on subchannel %04x\n",
290 			  cdev->private->devno, sch->irq);
291 		break;
292 	}
293 	cdev->private->state = state;
294 	io_subchannel_recog_done(cdev);
295 	if (state != DEV_STATE_NOT_OPER)
296 		wake_up(&cdev->private->wait_q);
297 }
298 
299 /*
300  * Function called from device_id.c after sense id has completed.
301  */
302 void
303 ccw_device_sense_id_done(struct ccw_device *cdev, int err)
304 {
305 	switch (err) {
306 	case 0:
307 		ccw_device_recog_done(cdev, DEV_STATE_OFFLINE);
308 		break;
309 	case -ETIME:		/* Sense id stopped by timeout. */
310 		ccw_device_recog_done(cdev, DEV_STATE_BOXED);
311 		break;
312 	default:
313 		ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
314 		break;
315 	}
316 }
317 
318 static void
319 ccw_device_oper_notify(void *data)
320 {
321 	struct ccw_device *cdev;
322 	struct subchannel *sch;
323 	int ret;
324 
325 	cdev = (struct ccw_device *)data;
326 	sch = to_subchannel(cdev->dev.parent);
327 	ret = (sch->driver && sch->driver->notify) ?
328 		sch->driver->notify(&sch->dev, CIO_OPER) : 0;
329 	if (!ret)
330 		/* Driver doesn't want device back. */
331 		ccw_device_do_unreg_rereg((void *)cdev);
332 	else
333 		wake_up(&cdev->private->wait_q);
334 }
335 
336 /*
337  * Finished with online/offline processing.
338  */
339 static void
340 ccw_device_done(struct ccw_device *cdev, int state)
341 {
342 	struct subchannel *sch;
343 
344 	sch = to_subchannel(cdev->dev.parent);
345 
346 	if (state != DEV_STATE_ONLINE)
347 		cio_disable_subchannel(sch);
348 
349 	/* Reset device status. */
350 	memset(&cdev->private->irb, 0, sizeof(struct irb));
351 
352 	cdev->private->state = state;
353 
354 
355 	if (state == DEV_STATE_BOXED)
356 		CIO_DEBUG(KERN_WARNING, 2,
357 			  "Boxed device %04x on subchannel %04x\n",
358 			  cdev->private->devno, sch->irq);
359 
360 	if (cdev->private->flags.donotify) {
361 		cdev->private->flags.donotify = 0;
362 		PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify,
363 			     (void *)cdev);
364 		queue_work(ccw_device_notify_work, &cdev->private->kick_work);
365 	}
366 	wake_up(&cdev->private->wait_q);
367 
368 	if (css_init_done && state != DEV_STATE_ONLINE)
369 		put_device (&cdev->dev);
370 }
371 
372 /*
373  * Function called from device_pgid.c after sense path ground has completed.
374  */
375 void
376 ccw_device_sense_pgid_done(struct ccw_device *cdev, int err)
377 {
378 	struct subchannel *sch;
379 
380 	sch = to_subchannel(cdev->dev.parent);
381 	switch (err) {
382 	case 0:
383 		/* Start Path Group verification. */
384 		sch->vpm = 0;	/* Start with no path groups set. */
385 		cdev->private->state = DEV_STATE_VERIFY;
386 		ccw_device_verify_start(cdev);
387 		break;
388 	case -ETIME:		/* Sense path group id stopped by timeout. */
389 	case -EUSERS:		/* device is reserved for someone else. */
390 		ccw_device_done(cdev, DEV_STATE_BOXED);
391 		break;
392 	case -EOPNOTSUPP: /* path grouping not supported, just set online. */
393 		cdev->private->options.pgroup = 0;
394 		ccw_device_done(cdev, DEV_STATE_ONLINE);
395 		break;
396 	default:
397 		ccw_device_done(cdev, DEV_STATE_NOT_OPER);
398 		break;
399 	}
400 }
401 
402 /*
403  * Start device recognition.
404  */
405 int
406 ccw_device_recognition(struct ccw_device *cdev)
407 {
408 	struct subchannel *sch;
409 	int ret;
410 
411 	if ((cdev->private->state != DEV_STATE_NOT_OPER) &&
412 	    (cdev->private->state != DEV_STATE_BOXED))
413 		return -EINVAL;
414 	sch = to_subchannel(cdev->dev.parent);
415 	ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc);
416 	if (ret != 0)
417 		/* Couldn't enable the subchannel for i/o. Sick device. */
418 		return ret;
419 
420 	/* After 60s the device recognition is considered to have failed. */
421 	ccw_device_set_timeout(cdev, 60*HZ);
422 
423 	/*
424 	 * We used to start here with a sense pgid to find out whether a device
425 	 * is locked by someone else. Unfortunately, the sense pgid command
426 	 * code has other meanings on devices predating the path grouping
427 	 * algorithm, so we start with sense id and box the device after an
428 	 * timeout (or if sense pgid during path verification detects the device
429 	 * is locked, as may happen on newer devices).
430 	 */
431 	cdev->private->flags.recog_done = 0;
432 	cdev->private->state = DEV_STATE_SENSE_ID;
433 	ccw_device_sense_id_start(cdev);
434 	return 0;
435 }
436 
437 /*
438  * Handle timeout in device recognition.
439  */
440 static void
441 ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event)
442 {
443 	int ret;
444 
445 	ret = ccw_device_cancel_halt_clear(cdev);
446 	switch (ret) {
447 	case 0:
448 		ccw_device_recog_done(cdev, DEV_STATE_BOXED);
449 		break;
450 	case -ENODEV:
451 		ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
452 		break;
453 	default:
454 		ccw_device_set_timeout(cdev, 3*HZ);
455 	}
456 }
457 
458 
459 static void
460 ccw_device_nopath_notify(void *data)
461 {
462 	struct ccw_device *cdev;
463 	struct subchannel *sch;
464 	int ret;
465 
466 	cdev = (struct ccw_device *)data;
467 	sch = to_subchannel(cdev->dev.parent);
468 	/* Extra sanity. */
469 	if (sch->lpm)
470 		return;
471 	ret = (sch->driver && sch->driver->notify) ?
472 		sch->driver->notify(&sch->dev, CIO_NO_PATH) : 0;
473 	if (!ret) {
474 		if (get_device(&sch->dev)) {
475 			/* Driver doesn't want to keep device. */
476 			cio_disable_subchannel(sch);
477 			if (get_device(&cdev->dev)) {
478 				PREPARE_WORK(&cdev->private->kick_work,
479 					     ccw_device_call_sch_unregister,
480 					     (void *)cdev);
481 				queue_work(ccw_device_work,
482 					   &cdev->private->kick_work);
483 			} else
484 				put_device(&sch->dev);
485 		}
486 	} else {
487 		cio_disable_subchannel(sch);
488 		ccw_device_set_timeout(cdev, 0);
489 		cdev->private->flags.fake_irb = 0;
490 		cdev->private->state = DEV_STATE_DISCONNECTED;
491 		wake_up(&cdev->private->wait_q);
492 	}
493 }
494 
495 void
496 ccw_device_verify_done(struct ccw_device *cdev, int err)
497 {
498 	cdev->private->flags.doverify = 0;
499 	switch (err) {
500 	case -EOPNOTSUPP: /* path grouping not supported, just set online. */
501 		cdev->private->options.pgroup = 0;
502 	case 0:
503 		ccw_device_done(cdev, DEV_STATE_ONLINE);
504 		/* Deliver fake irb to device driver, if needed. */
505 		if (cdev->private->flags.fake_irb) {
506 			memset(&cdev->private->irb, 0, sizeof(struct irb));
507 			cdev->private->irb.scsw = (struct scsw) {
508 				.cc = 1,
509 				.fctl = SCSW_FCTL_START_FUNC,
510 				.actl = SCSW_ACTL_START_PEND,
511 				.stctl = SCSW_STCTL_STATUS_PEND,
512 			};
513 			cdev->private->flags.fake_irb = 0;
514 			if (cdev->handler)
515 				cdev->handler(cdev, cdev->private->intparm,
516 					      &cdev->private->irb);
517 			memset(&cdev->private->irb, 0, sizeof(struct irb));
518 		}
519 		break;
520 	case -ETIME:
521 		ccw_device_done(cdev, DEV_STATE_BOXED);
522 		break;
523 	default:
524 		PREPARE_WORK(&cdev->private->kick_work,
525 			     ccw_device_nopath_notify, (void *)cdev);
526 		queue_work(ccw_device_notify_work, &cdev->private->kick_work);
527 		ccw_device_done(cdev, DEV_STATE_NOT_OPER);
528 		break;
529 	}
530 }
531 
532 /*
533  * Get device online.
534  */
535 int
536 ccw_device_online(struct ccw_device *cdev)
537 {
538 	struct subchannel *sch;
539 	int ret;
540 
541 	if ((cdev->private->state != DEV_STATE_OFFLINE) &&
542 	    (cdev->private->state != DEV_STATE_BOXED))
543 		return -EINVAL;
544 	sch = to_subchannel(cdev->dev.parent);
545 	if (css_init_done && !get_device(&cdev->dev))
546 		return -ENODEV;
547 	ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc);
548 	if (ret != 0) {
549 		/* Couldn't enable the subchannel for i/o. Sick device. */
550 		if (ret == -ENODEV)
551 			dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
552 		return ret;
553 	}
554 	/* Do we want to do path grouping? */
555 	if (!cdev->private->options.pgroup) {
556 		/* No, set state online immediately. */
557 		ccw_device_done(cdev, DEV_STATE_ONLINE);
558 		return 0;
559 	}
560 	/* Do a SensePGID first. */
561 	cdev->private->state = DEV_STATE_SENSE_PGID;
562 	ccw_device_sense_pgid_start(cdev);
563 	return 0;
564 }
565 
566 void
567 ccw_device_disband_done(struct ccw_device *cdev, int err)
568 {
569 	switch (err) {
570 	case 0:
571 		ccw_device_done(cdev, DEV_STATE_OFFLINE);
572 		break;
573 	case -ETIME:
574 		ccw_device_done(cdev, DEV_STATE_BOXED);
575 		break;
576 	default:
577 		ccw_device_done(cdev, DEV_STATE_NOT_OPER);
578 		break;
579 	}
580 }
581 
582 /*
583  * Shutdown device.
584  */
585 int
586 ccw_device_offline(struct ccw_device *cdev)
587 {
588 	struct subchannel *sch;
589 
590 	sch = to_subchannel(cdev->dev.parent);
591 	if (stsch(sch->irq, &sch->schib) || !sch->schib.pmcw.dnv)
592 		return -ENODEV;
593 	if (cdev->private->state != DEV_STATE_ONLINE) {
594 		if (sch->schib.scsw.actl != 0)
595 			return -EBUSY;
596 		return -EINVAL;
597 	}
598 	if (sch->schib.scsw.actl != 0)
599 		return -EBUSY;
600 	/* Are we doing path grouping? */
601 	if (!cdev->private->options.pgroup) {
602 		/* No, set state offline immediately. */
603 		ccw_device_done(cdev, DEV_STATE_OFFLINE);
604 		return 0;
605 	}
606 	/* Start Set Path Group commands. */
607 	cdev->private->state = DEV_STATE_DISBAND_PGID;
608 	ccw_device_disband_start(cdev);
609 	return 0;
610 }
611 
612 /*
613  * Handle timeout in device online/offline process.
614  */
615 static void
616 ccw_device_onoff_timeout(struct ccw_device *cdev, enum dev_event dev_event)
617 {
618 	int ret;
619 
620 	ret = ccw_device_cancel_halt_clear(cdev);
621 	switch (ret) {
622 	case 0:
623 		ccw_device_done(cdev, DEV_STATE_BOXED);
624 		break;
625 	case -ENODEV:
626 		ccw_device_done(cdev, DEV_STATE_NOT_OPER);
627 		break;
628 	default:
629 		ccw_device_set_timeout(cdev, 3*HZ);
630 	}
631 }
632 
633 /*
634  * Handle not oper event in device recognition.
635  */
636 static void
637 ccw_device_recog_notoper(struct ccw_device *cdev, enum dev_event dev_event)
638 {
639 	ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
640 }
641 
642 /*
643  * Handle not operational event while offline.
644  */
645 static void
646 ccw_device_offline_notoper(struct ccw_device *cdev, enum dev_event dev_event)
647 {
648 	struct subchannel *sch;
649 
650 	cdev->private->state = DEV_STATE_NOT_OPER;
651 	sch = to_subchannel(cdev->dev.parent);
652 	if (get_device(&cdev->dev)) {
653 		PREPARE_WORK(&cdev->private->kick_work,
654 			     ccw_device_call_sch_unregister, (void *)cdev);
655 		queue_work(ccw_device_work, &cdev->private->kick_work);
656 	}
657 	wake_up(&cdev->private->wait_q);
658 }
659 
660 /*
661  * Handle not operational event while online.
662  */
663 static void
664 ccw_device_online_notoper(struct ccw_device *cdev, enum dev_event dev_event)
665 {
666 	struct subchannel *sch;
667 
668 	sch = to_subchannel(cdev->dev.parent);
669 	if (sch->driver->notify &&
670 	    sch->driver->notify(&sch->dev, sch->lpm ? CIO_GONE : CIO_NO_PATH)) {
671 			ccw_device_set_timeout(cdev, 0);
672 			cdev->private->flags.fake_irb = 0;
673 			cdev->private->state = DEV_STATE_DISCONNECTED;
674 			wake_up(&cdev->private->wait_q);
675 			return;
676 	}
677 	cdev->private->state = DEV_STATE_NOT_OPER;
678 	cio_disable_subchannel(sch);
679 	if (sch->schib.scsw.actl != 0) {
680 		// FIXME: not-oper indication to device driver ?
681 		ccw_device_call_handler(cdev);
682 	}
683 	if (get_device(&cdev->dev)) {
684 		PREPARE_WORK(&cdev->private->kick_work,
685 			     ccw_device_call_sch_unregister, (void *)cdev);
686 		queue_work(ccw_device_work, &cdev->private->kick_work);
687 	}
688 	wake_up(&cdev->private->wait_q);
689 }
690 
691 /*
692  * Handle path verification event.
693  */
694 static void
695 ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
696 {
697 	struct subchannel *sch;
698 
699 	if (!cdev->private->options.pgroup)
700 		return;
701 	if (cdev->private->state == DEV_STATE_W4SENSE) {
702 		cdev->private->flags.doverify = 1;
703 		return;
704 	}
705 	sch = to_subchannel(cdev->dev.parent);
706 	/*
707 	 * Since we might not just be coming from an interrupt from the
708 	 * subchannel we have to update the schib.
709 	 */
710 	stsch(sch->irq, &sch->schib);
711 
712 	if (sch->schib.scsw.actl != 0 ||
713 	    (cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) {
714 		/*
715 		 * No final status yet or final status not yet delivered
716 		 * to the device driver. Can't do path verfication now,
717 		 * delay until final status was delivered.
718 		 */
719 		cdev->private->flags.doverify = 1;
720 		return;
721 	}
722 	/* Device is idle, we can do the path verification. */
723 	cdev->private->state = DEV_STATE_VERIFY;
724 	ccw_device_verify_start(cdev);
725 }
726 
727 /*
728  * Got an interrupt for a normal io (state online).
729  */
730 static void
731 ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
732 {
733 	struct irb *irb;
734 
735 	irb = (struct irb *) __LC_IRB;
736 	/* Check for unsolicited interrupt. */
737 	if ((irb->scsw.stctl ==
738 	    		(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS))
739 	    && (!irb->scsw.cc)) {
740 		if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) &&
741 		    !irb->esw.esw0.erw.cons) {
742 			/* Unit check but no sense data. Need basic sense. */
743 			if (ccw_device_do_sense(cdev, irb) != 0)
744 				goto call_handler_unsol;
745 			memcpy(irb, &cdev->private->irb, sizeof(struct irb));
746 			cdev->private->state = DEV_STATE_W4SENSE;
747 			cdev->private->intparm = 0;
748 			return;
749 		}
750 call_handler_unsol:
751 		if (cdev->handler)
752 			cdev->handler (cdev, 0, irb);
753 		return;
754 	}
755 	/* Accumulate status and find out if a basic sense is needed. */
756 	ccw_device_accumulate_irb(cdev, irb);
757 	if (cdev->private->flags.dosense) {
758 		if (ccw_device_do_sense(cdev, irb) == 0) {
759 			cdev->private->state = DEV_STATE_W4SENSE;
760 		}
761 		return;
762 	}
763 	/* Call the handler. */
764 	if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
765 		/* Start delayed path verification. */
766 		ccw_device_online_verify(cdev, 0);
767 }
768 
769 /*
770  * Got an timeout in online state.
771  */
772 static void
773 ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
774 {
775 	int ret;
776 
777 	ccw_device_set_timeout(cdev, 0);
778 	ret = ccw_device_cancel_halt_clear(cdev);
779 	if (ret == -EBUSY) {
780 		ccw_device_set_timeout(cdev, 3*HZ);
781 		cdev->private->state = DEV_STATE_TIMEOUT_KILL;
782 		return;
783 	}
784 	if (ret == -ENODEV) {
785 		struct subchannel *sch;
786 
787 		sch = to_subchannel(cdev->dev.parent);
788 		if (!sch->lpm) {
789 			PREPARE_WORK(&cdev->private->kick_work,
790 				     ccw_device_nopath_notify, (void *)cdev);
791 			queue_work(ccw_device_notify_work,
792 				   &cdev->private->kick_work);
793 		} else
794 			dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
795 	} else if (cdev->handler)
796 		cdev->handler(cdev, cdev->private->intparm,
797 			      ERR_PTR(-ETIMEDOUT));
798 }
799 
800 /*
801  * Got an interrupt for a basic sense.
802  */
803 void
804 ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
805 {
806 	struct irb *irb;
807 
808 	irb = (struct irb *) __LC_IRB;
809 	/* Check for unsolicited interrupt. */
810 	if (irb->scsw.stctl ==
811 	    		(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
812 		if (irb->scsw.cc == 1)
813 			/* Basic sense hasn't started. Try again. */
814 			ccw_device_do_sense(cdev, irb);
815 		else {
816 			printk("Huh? %s(%s): unsolicited interrupt...\n",
817 			       __FUNCTION__, cdev->dev.bus_id);
818 			if (cdev->handler)
819 				cdev->handler (cdev, 0, irb);
820 		}
821 		return;
822 	}
823 	/* Add basic sense info to irb. */
824 	ccw_device_accumulate_basic_sense(cdev, irb);
825 	if (cdev->private->flags.dosense) {
826 		/* Another basic sense is needed. */
827 		ccw_device_do_sense(cdev, irb);
828 		return;
829 	}
830 	cdev->private->state = DEV_STATE_ONLINE;
831 	/* Call the handler. */
832 	if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
833 		/* Start delayed path verification. */
834 		ccw_device_online_verify(cdev, 0);
835 }
836 
837 static void
838 ccw_device_clear_verify(struct ccw_device *cdev, enum dev_event dev_event)
839 {
840 	struct irb *irb;
841 
842 	irb = (struct irb *) __LC_IRB;
843 	/* Accumulate status. We don't do basic sense. */
844 	ccw_device_accumulate_irb(cdev, irb);
845 	/* Try to start delayed device verification. */
846 	ccw_device_online_verify(cdev, 0);
847 	/* Note: Don't call handler for cio initiated clear! */
848 }
849 
850 static void
851 ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
852 {
853 	struct subchannel *sch;
854 
855 	sch = to_subchannel(cdev->dev.parent);
856 	ccw_device_set_timeout(cdev, 0);
857 	/* OK, i/o is dead now. Call interrupt handler. */
858 	cdev->private->state = DEV_STATE_ONLINE;
859 	if (cdev->handler)
860 		cdev->handler(cdev, cdev->private->intparm,
861 			      ERR_PTR(-ETIMEDOUT));
862 	if (!sch->lpm) {
863 		PREPARE_WORK(&cdev->private->kick_work,
864 			     ccw_device_nopath_notify, (void *)cdev);
865 		queue_work(ccw_device_notify_work, &cdev->private->kick_work);
866 	} else if (cdev->private->flags.doverify)
867 		/* Start delayed path verification. */
868 		ccw_device_online_verify(cdev, 0);
869 }
870 
871 static void
872 ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
873 {
874 	int ret;
875 
876 	ret = ccw_device_cancel_halt_clear(cdev);
877 	if (ret == -EBUSY) {
878 		ccw_device_set_timeout(cdev, 3*HZ);
879 		return;
880 	}
881 	if (ret == -ENODEV) {
882 		struct subchannel *sch;
883 
884 		sch = to_subchannel(cdev->dev.parent);
885 		if (!sch->lpm) {
886 			PREPARE_WORK(&cdev->private->kick_work,
887 				     ccw_device_nopath_notify, (void *)cdev);
888 			queue_work(ccw_device_notify_work,
889 				   &cdev->private->kick_work);
890 		} else
891 			dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
892 		return;
893 	}
894 	//FIXME: Can we get here?
895 	cdev->private->state = DEV_STATE_ONLINE;
896 	if (cdev->handler)
897 		cdev->handler(cdev, cdev->private->intparm,
898 			      ERR_PTR(-ETIMEDOUT));
899 }
900 
901 static void
902 ccw_device_wait4io_irq(struct ccw_device *cdev, enum dev_event dev_event)
903 {
904 	struct irb *irb;
905 	struct subchannel *sch;
906 
907 	irb = (struct irb *) __LC_IRB;
908 	/*
909 	 * Accumulate status and find out if a basic sense is needed.
910 	 * This is fine since we have already adapted the lpm.
911 	 */
912 	ccw_device_accumulate_irb(cdev, irb);
913 	if (cdev->private->flags.dosense) {
914 		if (ccw_device_do_sense(cdev, irb) == 0) {
915 			cdev->private->state = DEV_STATE_W4SENSE;
916 		}
917 		return;
918 	}
919 
920 	/* Iff device is idle, reset timeout. */
921 	sch = to_subchannel(cdev->dev.parent);
922 	if (!stsch(sch->irq, &sch->schib))
923 		if (sch->schib.scsw.actl == 0)
924 			ccw_device_set_timeout(cdev, 0);
925 	/* Call the handler. */
926 	ccw_device_call_handler(cdev);
927 	if (!sch->lpm) {
928 		PREPARE_WORK(&cdev->private->kick_work,
929 			     ccw_device_nopath_notify, (void *)cdev);
930 		queue_work(ccw_device_notify_work, &cdev->private->kick_work);
931 	} else if (cdev->private->flags.doverify)
932 		ccw_device_online_verify(cdev, 0);
933 }
934 
935 static void
936 ccw_device_wait4io_timeout(struct ccw_device *cdev, enum dev_event dev_event)
937 {
938 	int ret;
939 	struct subchannel *sch;
940 
941 	sch = to_subchannel(cdev->dev.parent);
942 	ccw_device_set_timeout(cdev, 0);
943 	ret = ccw_device_cancel_halt_clear(cdev);
944 	if (ret == -EBUSY) {
945 		ccw_device_set_timeout(cdev, 3*HZ);
946 		cdev->private->state = DEV_STATE_TIMEOUT_KILL;
947 		return;
948 	}
949 	if (ret == -ENODEV) {
950 		if (!sch->lpm) {
951 			PREPARE_WORK(&cdev->private->kick_work,
952 				     ccw_device_nopath_notify, (void *)cdev);
953 			queue_work(ccw_device_notify_work,
954 				   &cdev->private->kick_work);
955 		} else
956 			dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
957 		return;
958 	}
959 	if (cdev->handler)
960 		cdev->handler(cdev, cdev->private->intparm,
961 			      ERR_PTR(-ETIMEDOUT));
962 	if (!sch->lpm) {
963 		PREPARE_WORK(&cdev->private->kick_work,
964 			     ccw_device_nopath_notify, (void *)cdev);
965 		queue_work(ccw_device_notify_work, &cdev->private->kick_work);
966 	} else if (cdev->private->flags.doverify)
967 		/* Start delayed path verification. */
968 		ccw_device_online_verify(cdev, 0);
969 }
970 
971 static void
972 ccw_device_wait4io_verify(struct ccw_device *cdev, enum dev_event dev_event)
973 {
974 	/* When the I/O has terminated, we have to start verification. */
975 	if (cdev->private->options.pgroup)
976 		cdev->private->flags.doverify = 1;
977 }
978 
979 static void
980 ccw_device_stlck_done(struct ccw_device *cdev, enum dev_event dev_event)
981 {
982 	struct irb *irb;
983 
984 	switch (dev_event) {
985 	case DEV_EVENT_INTERRUPT:
986 		irb = (struct irb *) __LC_IRB;
987 		/* Check for unsolicited interrupt. */
988 		if ((irb->scsw.stctl ==
989 		     (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) &&
990 		    (!irb->scsw.cc))
991 			/* FIXME: we should restart stlck here, but this
992 			 * is extremely unlikely ... */
993 			goto out_wakeup;
994 
995 		ccw_device_accumulate_irb(cdev, irb);
996 		/* We don't care about basic sense etc. */
997 		break;
998 	default: /* timeout */
999 		break;
1000 	}
1001 out_wakeup:
1002 	wake_up(&cdev->private->wait_q);
1003 }
1004 
1005 static void
1006 ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
1007 {
1008 	struct subchannel *sch;
1009 
1010 	sch = to_subchannel(cdev->dev.parent);
1011 	if (cio_enable_subchannel(sch, sch->schib.pmcw.isc) != 0)
1012 		/* Couldn't enable the subchannel for i/o. Sick device. */
1013 		return;
1014 
1015 	/* After 60s the device recognition is considered to have failed. */
1016 	ccw_device_set_timeout(cdev, 60*HZ);
1017 
1018 	cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID;
1019 	ccw_device_sense_id_start(cdev);
1020 }
1021 
1022 void
1023 device_trigger_reprobe(struct subchannel *sch)
1024 {
1025 	struct ccw_device *cdev;
1026 
1027 	if (!sch->dev.driver_data)
1028 		return;
1029 	cdev = sch->dev.driver_data;
1030 	if (cdev->private->state != DEV_STATE_DISCONNECTED)
1031 		return;
1032 
1033 	/* Update some values. */
1034 	if (stsch(sch->irq, &sch->schib))
1035 		return;
1036 
1037 	/*
1038 	 * The pim, pam, pom values may not be accurate, but they are the best
1039 	 * we have before performing device selection :/
1040 	 */
1041 	sch->lpm = sch->schib.pmcw.pim &
1042 		sch->schib.pmcw.pam &
1043 		sch->schib.pmcw.pom &
1044 		sch->opm;
1045 	/* Re-set some bits in the pmcw that were lost. */
1046 	sch->schib.pmcw.isc = 3;
1047 	sch->schib.pmcw.csense = 1;
1048 	sch->schib.pmcw.ena = 0;
1049 	if ((sch->lpm & (sch->lpm - 1)) != 0)
1050 		sch->schib.pmcw.mp = 1;
1051 	sch->schib.pmcw.intparm = (__u32)(unsigned long)sch;
1052 	/* We should also udate ssd info, but this has to wait. */
1053 	ccw_device_start_id(cdev, 0);
1054 }
1055 
1056 static void
1057 ccw_device_offline_irq(struct ccw_device *cdev, enum dev_event dev_event)
1058 {
1059 	struct subchannel *sch;
1060 
1061 	sch = to_subchannel(cdev->dev.parent);
1062 	/*
1063 	 * An interrupt in state offline means a previous disable was not
1064 	 * successful. Try again.
1065 	 */
1066 	cio_disable_subchannel(sch);
1067 }
1068 
1069 static void
1070 ccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event)
1071 {
1072 	retry_set_schib(cdev);
1073 	cdev->private->state = DEV_STATE_ONLINE;
1074 	dev_fsm_event(cdev, dev_event);
1075 }
1076 
1077 
1078 static void
1079 ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event)
1080 {
1081 	ccw_device_set_timeout(cdev, 0);
1082 	if (dev_event == DEV_EVENT_NOTOPER)
1083 		cdev->private->state = DEV_STATE_NOT_OPER;
1084 	else
1085 		cdev->private->state = DEV_STATE_OFFLINE;
1086 	wake_up(&cdev->private->wait_q);
1087 }
1088 
1089 static void
1090 ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event)
1091 {
1092 	int ret;
1093 
1094 	ret = ccw_device_cancel_halt_clear(cdev);
1095 	switch (ret) {
1096 	case 0:
1097 		cdev->private->state = DEV_STATE_OFFLINE;
1098 		wake_up(&cdev->private->wait_q);
1099 		break;
1100 	case -ENODEV:
1101 		cdev->private->state = DEV_STATE_NOT_OPER;
1102 		wake_up(&cdev->private->wait_q);
1103 		break;
1104 	default:
1105 		ccw_device_set_timeout(cdev, HZ/10);
1106 	}
1107 }
1108 
1109 /*
1110  * No operation action. This is used e.g. to ignore a timeout event in
1111  * state offline.
1112  */
1113 static void
1114 ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event)
1115 {
1116 }
1117 
1118 /*
1119  * Bug operation action.
1120  */
1121 static void
1122 ccw_device_bug(struct ccw_device *cdev, enum dev_event dev_event)
1123 {
1124 	printk(KERN_EMERG "dev_jumptable[%i][%i] == NULL\n",
1125 	       cdev->private->state, dev_event);
1126 	BUG();
1127 }
1128 
1129 /*
1130  * device statemachine
1131  */
1132 fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1133 	[DEV_STATE_NOT_OPER] = {
1134 		[DEV_EVENT_NOTOPER]	= ccw_device_nop,
1135 		[DEV_EVENT_INTERRUPT]	= ccw_device_bug,
1136 		[DEV_EVENT_TIMEOUT]	= ccw_device_nop,
1137 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1138 	},
1139 	[DEV_STATE_SENSE_PGID] = {
1140 		[DEV_EVENT_NOTOPER]	= ccw_device_online_notoper,
1141 		[DEV_EVENT_INTERRUPT]	= ccw_device_sense_pgid_irq,
1142 		[DEV_EVENT_TIMEOUT]	= ccw_device_onoff_timeout,
1143 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1144 	},
1145 	[DEV_STATE_SENSE_ID] = {
1146 		[DEV_EVENT_NOTOPER]	= ccw_device_recog_notoper,
1147 		[DEV_EVENT_INTERRUPT]	= ccw_device_sense_id_irq,
1148 		[DEV_EVENT_TIMEOUT]	= ccw_device_recog_timeout,
1149 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1150 	},
1151 	[DEV_STATE_OFFLINE] = {
1152 		[DEV_EVENT_NOTOPER]	= ccw_device_offline_notoper,
1153 		[DEV_EVENT_INTERRUPT]	= ccw_device_offline_irq,
1154 		[DEV_EVENT_TIMEOUT]	= ccw_device_nop,
1155 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1156 	},
1157 	[DEV_STATE_VERIFY] = {
1158 		[DEV_EVENT_NOTOPER]	= ccw_device_online_notoper,
1159 		[DEV_EVENT_INTERRUPT]	= ccw_device_verify_irq,
1160 		[DEV_EVENT_TIMEOUT]	= ccw_device_onoff_timeout,
1161 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1162 	},
1163 	[DEV_STATE_ONLINE] = {
1164 		[DEV_EVENT_NOTOPER]	= ccw_device_online_notoper,
1165 		[DEV_EVENT_INTERRUPT]	= ccw_device_irq,
1166 		[DEV_EVENT_TIMEOUT]	= ccw_device_online_timeout,
1167 		[DEV_EVENT_VERIFY]	= ccw_device_online_verify,
1168 	},
1169 	[DEV_STATE_W4SENSE] = {
1170 		[DEV_EVENT_NOTOPER]	= ccw_device_online_notoper,
1171 		[DEV_EVENT_INTERRUPT]	= ccw_device_w4sense,
1172 		[DEV_EVENT_TIMEOUT]	= ccw_device_nop,
1173 		[DEV_EVENT_VERIFY]	= ccw_device_online_verify,
1174 	},
1175 	[DEV_STATE_DISBAND_PGID] = {
1176 		[DEV_EVENT_NOTOPER]	= ccw_device_online_notoper,
1177 		[DEV_EVENT_INTERRUPT]	= ccw_device_disband_irq,
1178 		[DEV_EVENT_TIMEOUT]	= ccw_device_onoff_timeout,
1179 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1180 	},
1181 	[DEV_STATE_BOXED] = {
1182 		[DEV_EVENT_NOTOPER]	= ccw_device_offline_notoper,
1183 		[DEV_EVENT_INTERRUPT]	= ccw_device_stlck_done,
1184 		[DEV_EVENT_TIMEOUT]	= ccw_device_stlck_done,
1185 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1186 	},
1187 	/* states to wait for i/o completion before doing something */
1188 	[DEV_STATE_CLEAR_VERIFY] = {
1189 		[DEV_EVENT_NOTOPER]     = ccw_device_online_notoper,
1190 		[DEV_EVENT_INTERRUPT]   = ccw_device_clear_verify,
1191 		[DEV_EVENT_TIMEOUT]	= ccw_device_nop,
1192 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1193 	},
1194 	[DEV_STATE_TIMEOUT_KILL] = {
1195 		[DEV_EVENT_NOTOPER]	= ccw_device_online_notoper,
1196 		[DEV_EVENT_INTERRUPT]	= ccw_device_killing_irq,
1197 		[DEV_EVENT_TIMEOUT]	= ccw_device_killing_timeout,
1198 		[DEV_EVENT_VERIFY]	= ccw_device_nop, //FIXME
1199 	},
1200 	[DEV_STATE_WAIT4IO] = {
1201 		[DEV_EVENT_NOTOPER]	= ccw_device_online_notoper,
1202 		[DEV_EVENT_INTERRUPT]	= ccw_device_wait4io_irq,
1203 		[DEV_EVENT_TIMEOUT]	= ccw_device_wait4io_timeout,
1204 		[DEV_EVENT_VERIFY]	= ccw_device_wait4io_verify,
1205 	},
1206 	[DEV_STATE_QUIESCE] = {
1207 		[DEV_EVENT_NOTOPER]	= ccw_device_quiesce_done,
1208 		[DEV_EVENT_INTERRUPT]	= ccw_device_quiesce_done,
1209 		[DEV_EVENT_TIMEOUT]	= ccw_device_quiesce_timeout,
1210 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1211 	},
1212 	/* special states for devices gone not operational */
1213 	[DEV_STATE_DISCONNECTED] = {
1214 		[DEV_EVENT_NOTOPER]	= ccw_device_nop,
1215 		[DEV_EVENT_INTERRUPT]	= ccw_device_start_id,
1216 		[DEV_EVENT_TIMEOUT]	= ccw_device_bug,
1217 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1218 	},
1219 	[DEV_STATE_DISCONNECTED_SENSE_ID] = {
1220 		[DEV_EVENT_NOTOPER]	= ccw_device_recog_notoper,
1221 		[DEV_EVENT_INTERRUPT]	= ccw_device_sense_id_irq,
1222 		[DEV_EVENT_TIMEOUT]	= ccw_device_recog_timeout,
1223 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1224 	},
1225 	[DEV_STATE_CMFCHANGE] = {
1226 		[DEV_EVENT_NOTOPER]	= ccw_device_change_cmfstate,
1227 		[DEV_EVENT_INTERRUPT]	= ccw_device_change_cmfstate,
1228 		[DEV_EVENT_TIMEOUT]	= ccw_device_change_cmfstate,
1229 		[DEV_EVENT_VERIFY]	= ccw_device_change_cmfstate,
1230 	},
1231 };
1232 
1233 /*
1234  * io_subchannel_irq is called for "real" interrupts or for status
1235  * pending conditions on msch.
1236  */
1237 void
1238 io_subchannel_irq (struct device *pdev)
1239 {
1240 	struct ccw_device *cdev;
1241 
1242 	cdev = to_subchannel(pdev)->dev.driver_data;
1243 
1244 	CIO_TRACE_EVENT (3, "IRQ");
1245 	CIO_TRACE_EVENT (3, pdev->bus_id);
1246 	if (cdev)
1247 		dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
1248 }
1249 
1250 EXPORT_SYMBOL_GPL(ccw_device_set_timeout);
1251