xref: /openbmc/linux/drivers/s390/cio/device_fsm.c (revision 22246614)
1 /*
2  * drivers/s390/cio/device_fsm.c
3  * finite state machine for device handling
4  *
5  *    Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
6  *			 IBM Corporation
7  *    Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
8  *		 Martin Schwidefsky (schwidefsky@de.ibm.com)
9  */
10 
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/jiffies.h>
14 #include <linux/string.h>
15 
16 #include <asm/ccwdev.h>
17 #include <asm/cio.h>
18 #include <asm/chpid.h>
19 
20 #include "cio.h"
21 #include "cio_debug.h"
22 #include "css.h"
23 #include "device.h"
24 #include "chsc.h"
25 #include "ioasm.h"
26 #include "chp.h"
27 
28 static int timeout_log_enabled;
29 
30 int
31 device_is_online(struct subchannel *sch)
32 {
33 	struct ccw_device *cdev;
34 
35 	cdev = sch_get_cdev(sch);
36 	if (!cdev)
37 		return 0;
38 	return (cdev->private->state == DEV_STATE_ONLINE);
39 }
40 
41 int
42 device_is_disconnected(struct subchannel *sch)
43 {
44 	struct ccw_device *cdev;
45 
46 	cdev = sch_get_cdev(sch);
47 	if (!cdev)
48 		return 0;
49 	return (cdev->private->state == DEV_STATE_DISCONNECTED ||
50 		cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
51 }
52 
53 void
54 device_set_disconnected(struct subchannel *sch)
55 {
56 	struct ccw_device *cdev;
57 
58 	cdev = sch_get_cdev(sch);
59 	if (!cdev)
60 		return;
61 	ccw_device_set_timeout(cdev, 0);
62 	cdev->private->flags.fake_irb = 0;
63 	cdev->private->state = DEV_STATE_DISCONNECTED;
64 	if (cdev->online)
65 		ccw_device_schedule_recovery();
66 }
67 
68 void device_set_intretry(struct subchannel *sch)
69 {
70 	struct ccw_device *cdev;
71 
72 	cdev = sch_get_cdev(sch);
73 	if (!cdev)
74 		return;
75 	cdev->private->flags.intretry = 1;
76 }
77 
78 int device_trigger_verify(struct subchannel *sch)
79 {
80 	struct ccw_device *cdev;
81 
82 	cdev = sch_get_cdev(sch);
83 	if (!cdev || !cdev->online)
84 		return -EINVAL;
85 	dev_fsm_event(cdev, DEV_EVENT_VERIFY);
86 	return 0;
87 }
88 
89 static int __init ccw_timeout_log_setup(char *unused)
90 {
91 	timeout_log_enabled = 1;
92 	return 1;
93 }
94 
95 __setup("ccw_timeout_log", ccw_timeout_log_setup);
96 
97 static void ccw_timeout_log(struct ccw_device *cdev)
98 {
99 	struct schib schib;
100 	struct subchannel *sch;
101 	struct io_subchannel_private *private;
102 	int cc;
103 
104 	sch = to_subchannel(cdev->dev.parent);
105 	private = to_io_private(sch);
106 	cc = stsch(sch->schid, &schib);
107 
108 	printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, "
109 	       "device information:\n", get_clock());
110 	printk(KERN_WARNING "cio: orb:\n");
111 	print_hex_dump(KERN_WARNING, "cio:  ", DUMP_PREFIX_NONE, 16, 1,
112 		       &private->orb, sizeof(private->orb), 0);
113 	printk(KERN_WARNING "cio: ccw device bus id: %s\n", cdev->dev.bus_id);
114 	printk(KERN_WARNING "cio: subchannel bus id: %s\n", sch->dev.bus_id);
115 	printk(KERN_WARNING "cio: subchannel lpm: %02x, opm: %02x, "
116 	       "vpm: %02x\n", sch->lpm, sch->opm, sch->vpm);
117 
118 	if ((void *)(addr_t)private->orb.cpa == &private->sense_ccw ||
119 	    (void *)(addr_t)private->orb.cpa == cdev->private->iccws)
120 		printk(KERN_WARNING "cio: last channel program (intern):\n");
121 	else
122 		printk(KERN_WARNING "cio: last channel program:\n");
123 
124 	print_hex_dump(KERN_WARNING, "cio:  ", DUMP_PREFIX_NONE, 16, 1,
125 		       (void *)(addr_t)private->orb.cpa,
126 		       sizeof(struct ccw1), 0);
127 	printk(KERN_WARNING "cio: ccw device state: %d\n",
128 	       cdev->private->state);
129 	printk(KERN_WARNING "cio: store subchannel returned: cc=%d\n", cc);
130 	printk(KERN_WARNING "cio: schib:\n");
131 	print_hex_dump(KERN_WARNING, "cio:  ", DUMP_PREFIX_NONE, 16, 1,
132 		       &schib, sizeof(schib), 0);
133 	printk(KERN_WARNING "cio: ccw device flags:\n");
134 	print_hex_dump(KERN_WARNING, "cio:  ", DUMP_PREFIX_NONE, 16, 1,
135 		       &cdev->private->flags, sizeof(cdev->private->flags), 0);
136 }
137 
138 /*
139  * Timeout function. It just triggers a DEV_EVENT_TIMEOUT.
140  */
141 static void
142 ccw_device_timeout(unsigned long data)
143 {
144 	struct ccw_device *cdev;
145 
146 	cdev = (struct ccw_device *) data;
147 	spin_lock_irq(cdev->ccwlock);
148 	if (timeout_log_enabled)
149 		ccw_timeout_log(cdev);
150 	dev_fsm_event(cdev, DEV_EVENT_TIMEOUT);
151 	spin_unlock_irq(cdev->ccwlock);
152 }
153 
154 /*
155  * Set timeout
156  */
157 void
158 ccw_device_set_timeout(struct ccw_device *cdev, int expires)
159 {
160 	if (expires == 0) {
161 		del_timer(&cdev->private->timer);
162 		return;
163 	}
164 	if (timer_pending(&cdev->private->timer)) {
165 		if (mod_timer(&cdev->private->timer, jiffies + expires))
166 			return;
167 	}
168 	cdev->private->timer.function = ccw_device_timeout;
169 	cdev->private->timer.data = (unsigned long) cdev;
170 	cdev->private->timer.expires = jiffies + expires;
171 	add_timer(&cdev->private->timer);
172 }
173 
174 /* Kill any pending timers after machine check. */
175 void
176 device_kill_pending_timer(struct subchannel *sch)
177 {
178 	struct ccw_device *cdev;
179 
180 	cdev = sch_get_cdev(sch);
181 	if (!cdev)
182 		return;
183 	ccw_device_set_timeout(cdev, 0);
184 }
185 
186 /*
187  * Cancel running i/o. This is called repeatedly since halt/clear are
188  * asynchronous operations. We do one try with cio_cancel, two tries
189  * with cio_halt, 255 tries with cio_clear. If everythings fails panic.
190  * Returns 0 if device now idle, -ENODEV for device not operational and
191  * -EBUSY if an interrupt is expected (either from halt/clear or from a
192  * status pending).
193  */
194 int
195 ccw_device_cancel_halt_clear(struct ccw_device *cdev)
196 {
197 	struct subchannel *sch;
198 	int ret;
199 
200 	sch = to_subchannel(cdev->dev.parent);
201 	ret = stsch(sch->schid, &sch->schib);
202 	if (ret || !sch->schib.pmcw.dnv)
203 		return -ENODEV;
204 	if (!sch->schib.pmcw.ena)
205 		/* Not operational -> done. */
206 		return 0;
207 	/* Stage 1: cancel io. */
208 	if (!(sch->schib.scsw.actl & SCSW_ACTL_HALT_PEND) &&
209 	    !(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) {
210 		ret = cio_cancel(sch);
211 		if (ret != -EINVAL)
212 			return ret;
213 		/* cancel io unsuccessful. From now on it is asynchronous. */
214 		cdev->private->iretry = 3;	/* 3 halt retries. */
215 	}
216 	if (!(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) {
217 		/* Stage 2: halt io. */
218 		if (cdev->private->iretry) {
219 			cdev->private->iretry--;
220 			ret = cio_halt(sch);
221 			if (ret != -EBUSY)
222 				return (ret == 0) ? -EBUSY : ret;
223 		}
224 		/* halt io unsuccessful. */
225 		cdev->private->iretry = 255;	/* 255 clear retries. */
226 	}
227 	/* Stage 3: clear io. */
228 	if (cdev->private->iretry) {
229 		cdev->private->iretry--;
230 		ret = cio_clear (sch);
231 		return (ret == 0) ? -EBUSY : ret;
232 	}
233 	panic("Can't stop i/o on subchannel.\n");
234 }
235 
236 static int
237 ccw_device_handle_oper(struct ccw_device *cdev)
238 {
239 	struct subchannel *sch;
240 
241 	sch = to_subchannel(cdev->dev.parent);
242 	cdev->private->flags.recog_done = 1;
243 	/*
244 	 * Check if cu type and device type still match. If
245 	 * not, it is certainly another device and we have to
246 	 * de- and re-register.
247 	 */
248 	if (cdev->id.cu_type != cdev->private->senseid.cu_type ||
249 	    cdev->id.cu_model != cdev->private->senseid.cu_model ||
250 	    cdev->id.dev_type != cdev->private->senseid.dev_type ||
251 	    cdev->id.dev_model != cdev->private->senseid.dev_model) {
252 		PREPARE_WORK(&cdev->private->kick_work,
253 			     ccw_device_do_unreg_rereg);
254 		queue_work(ccw_device_work, &cdev->private->kick_work);
255 		return 0;
256 	}
257 	cdev->private->flags.donotify = 1;
258 	return 1;
259 }
260 
261 /*
262  * The machine won't give us any notification by machine check if a chpid has
263  * been varied online on the SE so we have to find out by magic (i. e. driving
264  * the channel subsystem to device selection and updating our path masks).
265  */
266 static void
267 __recover_lost_chpids(struct subchannel *sch, int old_lpm)
268 {
269 	int mask, i;
270 	struct chp_id chpid;
271 
272 	chp_id_init(&chpid);
273 	for (i = 0; i<8; i++) {
274 		mask = 0x80 >> i;
275 		if (!(sch->lpm & mask))
276 			continue;
277 		if (old_lpm & mask)
278 			continue;
279 		chpid.id = sch->schib.pmcw.chpid[i];
280 		if (!chp_is_registered(chpid))
281 			css_schedule_eval_all();
282 	}
283 }
284 
285 /*
286  * Stop device recognition.
287  */
288 static void
289 ccw_device_recog_done(struct ccw_device *cdev, int state)
290 {
291 	struct subchannel *sch;
292 	int notify, old_lpm, same_dev;
293 
294 	sch = to_subchannel(cdev->dev.parent);
295 
296 	ccw_device_set_timeout(cdev, 0);
297 	cio_disable_subchannel(sch);
298 	/*
299 	 * Now that we tried recognition, we have performed device selection
300 	 * through ssch() and the path information is up to date.
301 	 */
302 	old_lpm = sch->lpm;
303 	stsch(sch->schid, &sch->schib);
304 	sch->lpm = sch->schib.pmcw.pam & sch->opm;
305 	/* Check since device may again have become not operational. */
306 	if (!sch->schib.pmcw.dnv)
307 		state = DEV_STATE_NOT_OPER;
308 	if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID)
309 		/* Force reprobe on all chpids. */
310 		old_lpm = 0;
311 	if (sch->lpm != old_lpm)
312 		__recover_lost_chpids(sch, old_lpm);
313 	if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
314 		if (state == DEV_STATE_NOT_OPER) {
315 			cdev->private->flags.recog_done = 1;
316 			cdev->private->state = DEV_STATE_DISCONNECTED;
317 			return;
318 		}
319 		/* Boxed devices don't need extra treatment. */
320 	}
321 	notify = 0;
322 	same_dev = 0; /* Keep the compiler quiet... */
323 	switch (state) {
324 	case DEV_STATE_NOT_OPER:
325 		CIO_MSG_EVENT(2, "SenseID : unknown device %04x on "
326 			      "subchannel 0.%x.%04x\n",
327 			      cdev->private->dev_id.devno,
328 			      sch->schid.ssid, sch->schid.sch_no);
329 		break;
330 	case DEV_STATE_OFFLINE:
331 		if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
332 			same_dev = ccw_device_handle_oper(cdev);
333 			notify = 1;
334 		}
335 		/* fill out sense information */
336 		memset(&cdev->id, 0, sizeof(cdev->id));
337 		cdev->id.cu_type   = cdev->private->senseid.cu_type;
338 		cdev->id.cu_model  = cdev->private->senseid.cu_model;
339 		cdev->id.dev_type  = cdev->private->senseid.dev_type;
340 		cdev->id.dev_model = cdev->private->senseid.dev_model;
341 		if (notify) {
342 			cdev->private->state = DEV_STATE_OFFLINE;
343 			if (same_dev) {
344 				/* Get device online again. */
345 				ccw_device_online(cdev);
346 				wake_up(&cdev->private->wait_q);
347 			}
348 			return;
349 		}
350 		/* Issue device info message. */
351 		CIO_MSG_EVENT(4, "SenseID : device 0.%x.%04x reports: "
352 			      "CU  Type/Mod = %04X/%02X, Dev Type/Mod = "
353 			      "%04X/%02X\n",
354 			      cdev->private->dev_id.ssid,
355 			      cdev->private->dev_id.devno,
356 			      cdev->id.cu_type, cdev->id.cu_model,
357 			      cdev->id.dev_type, cdev->id.dev_model);
358 		break;
359 	case DEV_STATE_BOXED:
360 		CIO_MSG_EVENT(0, "SenseID : boxed device %04x on "
361 			      " subchannel 0.%x.%04x\n",
362 			      cdev->private->dev_id.devno,
363 			      sch->schid.ssid, sch->schid.sch_no);
364 		break;
365 	}
366 	cdev->private->state = state;
367 	io_subchannel_recog_done(cdev);
368 	if (state != DEV_STATE_NOT_OPER)
369 		wake_up(&cdev->private->wait_q);
370 }
371 
372 /*
373  * Function called from device_id.c after sense id has completed.
374  */
375 void
376 ccw_device_sense_id_done(struct ccw_device *cdev, int err)
377 {
378 	switch (err) {
379 	case 0:
380 		ccw_device_recog_done(cdev, DEV_STATE_OFFLINE);
381 		break;
382 	case -ETIME:		/* Sense id stopped by timeout. */
383 		ccw_device_recog_done(cdev, DEV_STATE_BOXED);
384 		break;
385 	default:
386 		ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
387 		break;
388 	}
389 }
390 
391 static void
392 ccw_device_oper_notify(struct work_struct *work)
393 {
394 	struct ccw_device_private *priv;
395 	struct ccw_device *cdev;
396 	struct subchannel *sch;
397 	int ret;
398 	unsigned long flags;
399 
400 	priv = container_of(work, struct ccw_device_private, kick_work);
401 	cdev = priv->cdev;
402 	spin_lock_irqsave(cdev->ccwlock, flags);
403 	sch = to_subchannel(cdev->dev.parent);
404 	if (sch->driver && sch->driver->notify) {
405 		spin_unlock_irqrestore(cdev->ccwlock, flags);
406 		ret = sch->driver->notify(sch, CIO_OPER);
407 		spin_lock_irqsave(cdev->ccwlock, flags);
408 	} else
409 		ret = 0;
410 	if (ret) {
411 		/* Reenable channel measurements, if needed. */
412 		spin_unlock_irqrestore(cdev->ccwlock, flags);
413 		cmf_reenable(cdev);
414 		spin_lock_irqsave(cdev->ccwlock, flags);
415 		wake_up(&cdev->private->wait_q);
416 	}
417 	spin_unlock_irqrestore(cdev->ccwlock, flags);
418 	if (!ret)
419 		/* Driver doesn't want device back. */
420 		ccw_device_do_unreg_rereg(work);
421 }
422 
423 /*
424  * Finished with online/offline processing.
425  */
426 static void
427 ccw_device_done(struct ccw_device *cdev, int state)
428 {
429 	struct subchannel *sch;
430 
431 	sch = to_subchannel(cdev->dev.parent);
432 
433 	ccw_device_set_timeout(cdev, 0);
434 
435 	if (state != DEV_STATE_ONLINE)
436 		cio_disable_subchannel(sch);
437 
438 	/* Reset device status. */
439 	memset(&cdev->private->irb, 0, sizeof(struct irb));
440 
441 	cdev->private->state = state;
442 
443 
444 	if (state == DEV_STATE_BOXED)
445 		CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n",
446 			      cdev->private->dev_id.devno, sch->schid.sch_no);
447 
448 	if (cdev->private->flags.donotify) {
449 		cdev->private->flags.donotify = 0;
450 		PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify);
451 		queue_work(ccw_device_notify_work, &cdev->private->kick_work);
452 	}
453 	wake_up(&cdev->private->wait_q);
454 
455 	if (css_init_done && state != DEV_STATE_ONLINE)
456 		put_device (&cdev->dev);
457 }
458 
459 static int cmp_pgid(struct pgid *p1, struct pgid *p2)
460 {
461 	char *c1;
462 	char *c2;
463 
464 	c1 = (char *)p1;
465 	c2 = (char *)p2;
466 
467 	return memcmp(c1 + 1, c2 + 1, sizeof(struct pgid) - 1);
468 }
469 
470 static void __ccw_device_get_common_pgid(struct ccw_device *cdev)
471 {
472 	int i;
473 	int last;
474 
475 	last = 0;
476 	for (i = 0; i < 8; i++) {
477 		if (cdev->private->pgid[i].inf.ps.state1 == SNID_STATE1_RESET)
478 			/* No PGID yet */
479 			continue;
480 		if (cdev->private->pgid[last].inf.ps.state1 ==
481 		    SNID_STATE1_RESET) {
482 			/* First non-zero PGID */
483 			last = i;
484 			continue;
485 		}
486 		if (cmp_pgid(&cdev->private->pgid[i],
487 			     &cdev->private->pgid[last]) == 0)
488 			/* Non-conflicting PGIDs */
489 			continue;
490 
491 		/* PGID mismatch, can't pathgroup. */
492 		CIO_MSG_EVENT(0, "SNID - pgid mismatch for device "
493 			      "0.%x.%04x, can't pathgroup\n",
494 			      cdev->private->dev_id.ssid,
495 			      cdev->private->dev_id.devno);
496 		cdev->private->options.pgroup = 0;
497 		return;
498 	}
499 	if (cdev->private->pgid[last].inf.ps.state1 ==
500 	    SNID_STATE1_RESET)
501 		/* No previous pgid found */
502 		memcpy(&cdev->private->pgid[0],
503 		       &channel_subsystems[0]->global_pgid,
504 		       sizeof(struct pgid));
505 	else
506 		/* Use existing pgid */
507 		memcpy(&cdev->private->pgid[0], &cdev->private->pgid[last],
508 		       sizeof(struct pgid));
509 }
510 
511 /*
512  * Function called from device_pgid.c after sense path ground has completed.
513  */
514 void
515 ccw_device_sense_pgid_done(struct ccw_device *cdev, int err)
516 {
517 	struct subchannel *sch;
518 
519 	sch = to_subchannel(cdev->dev.parent);
520 	switch (err) {
521 	case -EOPNOTSUPP: /* path grouping not supported, use nop instead. */
522 		cdev->private->options.pgroup = 0;
523 		break;
524 	case 0: /* success */
525 	case -EACCES: /* partial success, some paths not operational */
526 		/* Check if all pgids are equal or 0. */
527 		__ccw_device_get_common_pgid(cdev);
528 		break;
529 	case -ETIME:		/* Sense path group id stopped by timeout. */
530 	case -EUSERS:		/* device is reserved for someone else. */
531 		ccw_device_done(cdev, DEV_STATE_BOXED);
532 		return;
533 	default:
534 		ccw_device_done(cdev, DEV_STATE_NOT_OPER);
535 		return;
536 	}
537 	/* Start Path Group verification. */
538 	cdev->private->state = DEV_STATE_VERIFY;
539 	cdev->private->flags.doverify = 0;
540 	ccw_device_verify_start(cdev);
541 }
542 
543 /*
544  * Start device recognition.
545  */
546 int
547 ccw_device_recognition(struct ccw_device *cdev)
548 {
549 	struct subchannel *sch;
550 	int ret;
551 
552 	if ((cdev->private->state != DEV_STATE_NOT_OPER) &&
553 	    (cdev->private->state != DEV_STATE_BOXED))
554 		return -EINVAL;
555 	sch = to_subchannel(cdev->dev.parent);
556 	ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
557 	if (ret != 0)
558 		/* Couldn't enable the subchannel for i/o. Sick device. */
559 		return ret;
560 
561 	/* After 60s the device recognition is considered to have failed. */
562 	ccw_device_set_timeout(cdev, 60*HZ);
563 
564 	/*
565 	 * We used to start here with a sense pgid to find out whether a device
566 	 * is locked by someone else. Unfortunately, the sense pgid command
567 	 * code has other meanings on devices predating the path grouping
568 	 * algorithm, so we start with sense id and box the device after an
569 	 * timeout (or if sense pgid during path verification detects the device
570 	 * is locked, as may happen on newer devices).
571 	 */
572 	cdev->private->flags.recog_done = 0;
573 	cdev->private->state = DEV_STATE_SENSE_ID;
574 	ccw_device_sense_id_start(cdev);
575 	return 0;
576 }
577 
578 /*
579  * Handle timeout in device recognition.
580  */
581 static void
582 ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event)
583 {
584 	int ret;
585 
586 	ret = ccw_device_cancel_halt_clear(cdev);
587 	switch (ret) {
588 	case 0:
589 		ccw_device_recog_done(cdev, DEV_STATE_BOXED);
590 		break;
591 	case -ENODEV:
592 		ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
593 		break;
594 	default:
595 		ccw_device_set_timeout(cdev, 3*HZ);
596 	}
597 }
598 
599 
600 void
601 ccw_device_verify_done(struct ccw_device *cdev, int err)
602 {
603 	struct subchannel *sch;
604 
605 	sch = to_subchannel(cdev->dev.parent);
606 	/* Update schib - pom may have changed. */
607 	stsch(sch->schid, &sch->schib);
608 	/* Update lpm with verified path mask. */
609 	sch->lpm = sch->vpm;
610 	/* Repeat path verification? */
611 	if (cdev->private->flags.doverify) {
612 		cdev->private->flags.doverify = 0;
613 		ccw_device_verify_start(cdev);
614 		return;
615 	}
616 	switch (err) {
617 	case -EOPNOTSUPP: /* path grouping not supported, just set online. */
618 		cdev->private->options.pgroup = 0;
619 	case 0:
620 		ccw_device_done(cdev, DEV_STATE_ONLINE);
621 		/* Deliver fake irb to device driver, if needed. */
622 		if (cdev->private->flags.fake_irb) {
623 			memset(&cdev->private->irb, 0, sizeof(struct irb));
624 			cdev->private->irb.scsw.cc = 1;
625 			cdev->private->irb.scsw.fctl = SCSW_FCTL_START_FUNC;
626 			cdev->private->irb.scsw.actl = SCSW_ACTL_START_PEND;
627 			cdev->private->irb.scsw.stctl = SCSW_STCTL_STATUS_PEND;
628 			cdev->private->flags.fake_irb = 0;
629 			if (cdev->handler)
630 				cdev->handler(cdev, cdev->private->intparm,
631 					      &cdev->private->irb);
632 			memset(&cdev->private->irb, 0, sizeof(struct irb));
633 		}
634 		break;
635 	case -ETIME:
636 		/* Reset oper notify indication after verify error. */
637 		cdev->private->flags.donotify = 0;
638 		ccw_device_done(cdev, DEV_STATE_BOXED);
639 		break;
640 	default:
641 		/* Reset oper notify indication after verify error. */
642 		cdev->private->flags.donotify = 0;
643 		if (cdev->online) {
644 			ccw_device_set_timeout(cdev, 0);
645 			dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
646 		} else
647 			ccw_device_done(cdev, DEV_STATE_NOT_OPER);
648 		break;
649 	}
650 }
651 
652 /*
653  * Get device online.
654  */
655 int
656 ccw_device_online(struct ccw_device *cdev)
657 {
658 	struct subchannel *sch;
659 	int ret;
660 
661 	if ((cdev->private->state != DEV_STATE_OFFLINE) &&
662 	    (cdev->private->state != DEV_STATE_BOXED))
663 		return -EINVAL;
664 	sch = to_subchannel(cdev->dev.parent);
665 	if (css_init_done && !get_device(&cdev->dev))
666 		return -ENODEV;
667 	ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
668 	if (ret != 0) {
669 		/* Couldn't enable the subchannel for i/o. Sick device. */
670 		if (ret == -ENODEV)
671 			dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
672 		return ret;
673 	}
674 	/* Do we want to do path grouping? */
675 	if (!cdev->private->options.pgroup) {
676 		/* Start initial path verification. */
677 		cdev->private->state = DEV_STATE_VERIFY;
678 		cdev->private->flags.doverify = 0;
679 		ccw_device_verify_start(cdev);
680 		return 0;
681 	}
682 	/* Do a SensePGID first. */
683 	cdev->private->state = DEV_STATE_SENSE_PGID;
684 	ccw_device_sense_pgid_start(cdev);
685 	return 0;
686 }
687 
688 void
689 ccw_device_disband_done(struct ccw_device *cdev, int err)
690 {
691 	switch (err) {
692 	case 0:
693 		ccw_device_done(cdev, DEV_STATE_OFFLINE);
694 		break;
695 	case -ETIME:
696 		ccw_device_done(cdev, DEV_STATE_BOXED);
697 		break;
698 	default:
699 		cdev->private->flags.donotify = 0;
700 		dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
701 		ccw_device_done(cdev, DEV_STATE_NOT_OPER);
702 		break;
703 	}
704 }
705 
706 /*
707  * Shutdown device.
708  */
709 int
710 ccw_device_offline(struct ccw_device *cdev)
711 {
712 	struct subchannel *sch;
713 
714 	if (ccw_device_is_orphan(cdev)) {
715 		ccw_device_done(cdev, DEV_STATE_OFFLINE);
716 		return 0;
717 	}
718 	sch = to_subchannel(cdev->dev.parent);
719 	if (stsch(sch->schid, &sch->schib) || !sch->schib.pmcw.dnv)
720 		return -ENODEV;
721 	if (cdev->private->state != DEV_STATE_ONLINE) {
722 		if (sch->schib.scsw.actl != 0)
723 			return -EBUSY;
724 		return -EINVAL;
725 	}
726 	if (sch->schib.scsw.actl != 0)
727 		return -EBUSY;
728 	/* Are we doing path grouping? */
729 	if (!cdev->private->options.pgroup) {
730 		/* No, set state offline immediately. */
731 		ccw_device_done(cdev, DEV_STATE_OFFLINE);
732 		return 0;
733 	}
734 	/* Start Set Path Group commands. */
735 	cdev->private->state = DEV_STATE_DISBAND_PGID;
736 	ccw_device_disband_start(cdev);
737 	return 0;
738 }
739 
740 /*
741  * Handle timeout in device online/offline process.
742  */
743 static void
744 ccw_device_onoff_timeout(struct ccw_device *cdev, enum dev_event dev_event)
745 {
746 	int ret;
747 
748 	ret = ccw_device_cancel_halt_clear(cdev);
749 	switch (ret) {
750 	case 0:
751 		ccw_device_done(cdev, DEV_STATE_BOXED);
752 		break;
753 	case -ENODEV:
754 		ccw_device_done(cdev, DEV_STATE_NOT_OPER);
755 		break;
756 	default:
757 		ccw_device_set_timeout(cdev, 3*HZ);
758 	}
759 }
760 
761 /*
762  * Handle not oper event in device recognition.
763  */
764 static void
765 ccw_device_recog_notoper(struct ccw_device *cdev, enum dev_event dev_event)
766 {
767 	ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER);
768 }
769 
770 /*
771  * Handle not operational event in non-special state.
772  */
773 static void ccw_device_generic_notoper(struct ccw_device *cdev,
774 				       enum dev_event dev_event)
775 {
776 	struct subchannel *sch;
777 
778 	cdev->private->state = DEV_STATE_NOT_OPER;
779 	sch = to_subchannel(cdev->dev.parent);
780 	css_schedule_eval(sch->schid);
781 }
782 
783 /*
784  * Handle path verification event.
785  */
786 static void
787 ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
788 {
789 	struct subchannel *sch;
790 
791 	if (cdev->private->state == DEV_STATE_W4SENSE) {
792 		cdev->private->flags.doverify = 1;
793 		return;
794 	}
795 	sch = to_subchannel(cdev->dev.parent);
796 	/*
797 	 * Since we might not just be coming from an interrupt from the
798 	 * subchannel we have to update the schib.
799 	 */
800 	stsch(sch->schid, &sch->schib);
801 
802 	if (sch->schib.scsw.actl != 0 ||
803 	    (sch->schib.scsw.stctl & SCSW_STCTL_STATUS_PEND) ||
804 	    (cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) {
805 		/*
806 		 * No final status yet or final status not yet delivered
807 		 * to the device driver. Can't do path verfication now,
808 		 * delay until final status was delivered.
809 		 */
810 		cdev->private->flags.doverify = 1;
811 		return;
812 	}
813 	/* Device is idle, we can do the path verification. */
814 	cdev->private->state = DEV_STATE_VERIFY;
815 	cdev->private->flags.doverify = 0;
816 	ccw_device_verify_start(cdev);
817 }
818 
819 /*
820  * Got an interrupt for a normal io (state online).
821  */
822 static void
823 ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
824 {
825 	struct irb *irb;
826 
827 	irb = (struct irb *) __LC_IRB;
828 	/* Check for unsolicited interrupt. */
829 	if ((irb->scsw.stctl ==
830 	    		(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS))
831 	    && (!irb->scsw.cc)) {
832 		if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) &&
833 		    !irb->esw.esw0.erw.cons) {
834 			/* Unit check but no sense data. Need basic sense. */
835 			if (ccw_device_do_sense(cdev, irb) != 0)
836 				goto call_handler_unsol;
837 			memcpy(&cdev->private->irb, irb, sizeof(struct irb));
838 			cdev->private->state = DEV_STATE_W4SENSE;
839 			cdev->private->intparm = 0;
840 			return;
841 		}
842 call_handler_unsol:
843 		if (cdev->handler)
844 			cdev->handler (cdev, 0, irb);
845 		if (cdev->private->flags.doverify)
846 			ccw_device_online_verify(cdev, 0);
847 		return;
848 	}
849 	/* Accumulate status and find out if a basic sense is needed. */
850 	ccw_device_accumulate_irb(cdev, irb);
851 	if (cdev->private->flags.dosense) {
852 		if (ccw_device_do_sense(cdev, irb) == 0) {
853 			cdev->private->state = DEV_STATE_W4SENSE;
854 		}
855 		return;
856 	}
857 	/* Call the handler. */
858 	if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
859 		/* Start delayed path verification. */
860 		ccw_device_online_verify(cdev, 0);
861 }
862 
863 /*
864  * Got an timeout in online state.
865  */
866 static void
867 ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
868 {
869 	int ret;
870 
871 	ccw_device_set_timeout(cdev, 0);
872 	ret = ccw_device_cancel_halt_clear(cdev);
873 	if (ret == -EBUSY) {
874 		ccw_device_set_timeout(cdev, 3*HZ);
875 		cdev->private->state = DEV_STATE_TIMEOUT_KILL;
876 		return;
877 	}
878 	if (ret == -ENODEV)
879 		dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
880 	else if (cdev->handler)
881 		cdev->handler(cdev, cdev->private->intparm,
882 			      ERR_PTR(-ETIMEDOUT));
883 }
884 
885 /*
886  * Got an interrupt for a basic sense.
887  */
888 static void
889 ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
890 {
891 	struct irb *irb;
892 
893 	irb = (struct irb *) __LC_IRB;
894 	/* Check for unsolicited interrupt. */
895 	if (irb->scsw.stctl ==
896 	    		(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
897 		if (irb->scsw.cc == 1)
898 			/* Basic sense hasn't started. Try again. */
899 			ccw_device_do_sense(cdev, irb);
900 		else {
901 			CIO_MSG_EVENT(0, "0.%x.%04x: unsolicited "
902 				      "interrupt during w4sense...\n",
903 				      cdev->private->dev_id.ssid,
904 				      cdev->private->dev_id.devno);
905 			if (cdev->handler)
906 				cdev->handler (cdev, 0, irb);
907 		}
908 		return;
909 	}
910 	/*
911 	 * Check if a halt or clear has been issued in the meanwhile. If yes,
912 	 * only deliver the halt/clear interrupt to the device driver as if it
913 	 * had killed the original request.
914 	 */
915 	if (irb->scsw.fctl & (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) {
916 		/* Retry Basic Sense if requested. */
917 		if (cdev->private->flags.intretry) {
918 			cdev->private->flags.intretry = 0;
919 			ccw_device_do_sense(cdev, irb);
920 			return;
921 		}
922 		cdev->private->flags.dosense = 0;
923 		memset(&cdev->private->irb, 0, sizeof(struct irb));
924 		ccw_device_accumulate_irb(cdev, irb);
925 		goto call_handler;
926 	}
927 	/* Add basic sense info to irb. */
928 	ccw_device_accumulate_basic_sense(cdev, irb);
929 	if (cdev->private->flags.dosense) {
930 		/* Another basic sense is needed. */
931 		ccw_device_do_sense(cdev, irb);
932 		return;
933 	}
934 call_handler:
935 	cdev->private->state = DEV_STATE_ONLINE;
936 	/* Call the handler. */
937 	if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify)
938 		/* Start delayed path verification. */
939 		ccw_device_online_verify(cdev, 0);
940 }
941 
942 static void
943 ccw_device_clear_verify(struct ccw_device *cdev, enum dev_event dev_event)
944 {
945 	struct irb *irb;
946 
947 	irb = (struct irb *) __LC_IRB;
948 	/* Accumulate status. We don't do basic sense. */
949 	ccw_device_accumulate_irb(cdev, irb);
950 	/* Remember to clear irb to avoid residuals. */
951 	memset(&cdev->private->irb, 0, sizeof(struct irb));
952 	/* Try to start delayed device verification. */
953 	ccw_device_online_verify(cdev, 0);
954 	/* Note: Don't call handler for cio initiated clear! */
955 }
956 
957 static void
958 ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
959 {
960 	struct subchannel *sch;
961 
962 	sch = to_subchannel(cdev->dev.parent);
963 	ccw_device_set_timeout(cdev, 0);
964 	/* Start delayed path verification. */
965 	ccw_device_online_verify(cdev, 0);
966 	/* OK, i/o is dead now. Call interrupt handler. */
967 	if (cdev->handler)
968 		cdev->handler(cdev, cdev->private->intparm,
969 			      ERR_PTR(-EIO));
970 }
971 
972 static void
973 ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
974 {
975 	int ret;
976 
977 	ret = ccw_device_cancel_halt_clear(cdev);
978 	if (ret == -EBUSY) {
979 		ccw_device_set_timeout(cdev, 3*HZ);
980 		return;
981 	}
982 	/* Start delayed path verification. */
983 	ccw_device_online_verify(cdev, 0);
984 	if (cdev->handler)
985 		cdev->handler(cdev, cdev->private->intparm,
986 			      ERR_PTR(-EIO));
987 }
988 
989 void device_kill_io(struct subchannel *sch)
990 {
991 	int ret;
992 	struct ccw_device *cdev;
993 
994 	cdev = sch_get_cdev(sch);
995 	ret = ccw_device_cancel_halt_clear(cdev);
996 	if (ret == -EBUSY) {
997 		ccw_device_set_timeout(cdev, 3*HZ);
998 		cdev->private->state = DEV_STATE_TIMEOUT_KILL;
999 		return;
1000 	}
1001 	/* Start delayed path verification. */
1002 	ccw_device_online_verify(cdev, 0);
1003 	if (cdev->handler)
1004 		cdev->handler(cdev, cdev->private->intparm,
1005 			      ERR_PTR(-EIO));
1006 }
1007 
1008 static void
1009 ccw_device_delay_verify(struct ccw_device *cdev, enum dev_event dev_event)
1010 {
1011 	/* Start verification after current task finished. */
1012 	cdev->private->flags.doverify = 1;
1013 }
1014 
1015 static void
1016 ccw_device_stlck_done(struct ccw_device *cdev, enum dev_event dev_event)
1017 {
1018 	struct irb *irb;
1019 
1020 	switch (dev_event) {
1021 	case DEV_EVENT_INTERRUPT:
1022 		irb = (struct irb *) __LC_IRB;
1023 		/* Check for unsolicited interrupt. */
1024 		if ((irb->scsw.stctl ==
1025 		     (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) &&
1026 		    (!irb->scsw.cc))
1027 			/* FIXME: we should restart stlck here, but this
1028 			 * is extremely unlikely ... */
1029 			goto out_wakeup;
1030 
1031 		ccw_device_accumulate_irb(cdev, irb);
1032 		/* We don't care about basic sense etc. */
1033 		break;
1034 	default: /* timeout */
1035 		break;
1036 	}
1037 out_wakeup:
1038 	wake_up(&cdev->private->wait_q);
1039 }
1040 
1041 static void
1042 ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
1043 {
1044 	struct subchannel *sch;
1045 
1046 	sch = to_subchannel(cdev->dev.parent);
1047 	if (cio_enable_subchannel(sch, (u32)(addr_t)sch) != 0)
1048 		/* Couldn't enable the subchannel for i/o. Sick device. */
1049 		return;
1050 
1051 	/* After 60s the device recognition is considered to have failed. */
1052 	ccw_device_set_timeout(cdev, 60*HZ);
1053 
1054 	cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID;
1055 	ccw_device_sense_id_start(cdev);
1056 }
1057 
1058 void
1059 device_trigger_reprobe(struct subchannel *sch)
1060 {
1061 	struct ccw_device *cdev;
1062 
1063 	cdev = sch_get_cdev(sch);
1064 	if (!cdev)
1065 		return;
1066 	if (cdev->private->state != DEV_STATE_DISCONNECTED)
1067 		return;
1068 
1069 	/* Update some values. */
1070 	if (stsch(sch->schid, &sch->schib))
1071 		return;
1072 	if (!sch->schib.pmcw.dnv)
1073 		return;
1074 	/*
1075 	 * The pim, pam, pom values may not be accurate, but they are the best
1076 	 * we have before performing device selection :/
1077 	 */
1078 	sch->lpm = sch->schib.pmcw.pam & sch->opm;
1079 	/* Re-set some bits in the pmcw that were lost. */
1080 	sch->schib.pmcw.csense = 1;
1081 	sch->schib.pmcw.ena = 0;
1082 	if ((sch->lpm & (sch->lpm - 1)) != 0)
1083 		sch->schib.pmcw.mp = 1;
1084 	sch->schib.pmcw.intparm = (u32)(addr_t)sch;
1085 	/* We should also udate ssd info, but this has to wait. */
1086 	/* Check if this is another device which appeared on the same sch. */
1087 	if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
1088 		PREPARE_WORK(&cdev->private->kick_work,
1089 			     ccw_device_move_to_orphanage);
1090 		queue_work(slow_path_wq, &cdev->private->kick_work);
1091 	} else
1092 		ccw_device_start_id(cdev, 0);
1093 }
1094 
1095 static void
1096 ccw_device_offline_irq(struct ccw_device *cdev, enum dev_event dev_event)
1097 {
1098 	struct subchannel *sch;
1099 
1100 	sch = to_subchannel(cdev->dev.parent);
1101 	/*
1102 	 * An interrupt in state offline means a previous disable was not
1103 	 * successful. Try again.
1104 	 */
1105 	cio_disable_subchannel(sch);
1106 }
1107 
1108 static void
1109 ccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event)
1110 {
1111 	retry_set_schib(cdev);
1112 	cdev->private->state = DEV_STATE_ONLINE;
1113 	dev_fsm_event(cdev, dev_event);
1114 }
1115 
1116 static void ccw_device_update_cmfblock(struct ccw_device *cdev,
1117 				       enum dev_event dev_event)
1118 {
1119 	cmf_retry_copy_block(cdev);
1120 	cdev->private->state = DEV_STATE_ONLINE;
1121 	dev_fsm_event(cdev, dev_event);
1122 }
1123 
1124 static void
1125 ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event)
1126 {
1127 	ccw_device_set_timeout(cdev, 0);
1128 	if (dev_event == DEV_EVENT_NOTOPER)
1129 		cdev->private->state = DEV_STATE_NOT_OPER;
1130 	else
1131 		cdev->private->state = DEV_STATE_OFFLINE;
1132 	wake_up(&cdev->private->wait_q);
1133 }
1134 
1135 static void
1136 ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event)
1137 {
1138 	int ret;
1139 
1140 	ret = ccw_device_cancel_halt_clear(cdev);
1141 	switch (ret) {
1142 	case 0:
1143 		cdev->private->state = DEV_STATE_OFFLINE;
1144 		wake_up(&cdev->private->wait_q);
1145 		break;
1146 	case -ENODEV:
1147 		cdev->private->state = DEV_STATE_NOT_OPER;
1148 		wake_up(&cdev->private->wait_q);
1149 		break;
1150 	default:
1151 		ccw_device_set_timeout(cdev, HZ/10);
1152 	}
1153 }
1154 
1155 /*
1156  * No operation action. This is used e.g. to ignore a timeout event in
1157  * state offline.
1158  */
1159 static void
1160 ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event)
1161 {
1162 }
1163 
1164 /*
1165  * Bug operation action.
1166  */
1167 static void
1168 ccw_device_bug(struct ccw_device *cdev, enum dev_event dev_event)
1169 {
1170 	CIO_MSG_EVENT(0, "Internal state [%i][%i] not handled for device "
1171 		      "0.%x.%04x\n", cdev->private->state, dev_event,
1172 		      cdev->private->dev_id.ssid,
1173 		      cdev->private->dev_id.devno);
1174 	BUG();
1175 }
1176 
1177 /*
1178  * device statemachine
1179  */
1180 fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1181 	[DEV_STATE_NOT_OPER] = {
1182 		[DEV_EVENT_NOTOPER]	= ccw_device_nop,
1183 		[DEV_EVENT_INTERRUPT]	= ccw_device_bug,
1184 		[DEV_EVENT_TIMEOUT]	= ccw_device_nop,
1185 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1186 	},
1187 	[DEV_STATE_SENSE_PGID] = {
1188 		[DEV_EVENT_NOTOPER]	= ccw_device_generic_notoper,
1189 		[DEV_EVENT_INTERRUPT]	= ccw_device_sense_pgid_irq,
1190 		[DEV_EVENT_TIMEOUT]	= ccw_device_onoff_timeout,
1191 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1192 	},
1193 	[DEV_STATE_SENSE_ID] = {
1194 		[DEV_EVENT_NOTOPER]	= ccw_device_recog_notoper,
1195 		[DEV_EVENT_INTERRUPT]	= ccw_device_sense_id_irq,
1196 		[DEV_EVENT_TIMEOUT]	= ccw_device_recog_timeout,
1197 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1198 	},
1199 	[DEV_STATE_OFFLINE] = {
1200 		[DEV_EVENT_NOTOPER]	= ccw_device_generic_notoper,
1201 		[DEV_EVENT_INTERRUPT]	= ccw_device_offline_irq,
1202 		[DEV_EVENT_TIMEOUT]	= ccw_device_nop,
1203 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1204 	},
1205 	[DEV_STATE_VERIFY] = {
1206 		[DEV_EVENT_NOTOPER]	= ccw_device_generic_notoper,
1207 		[DEV_EVENT_INTERRUPT]	= ccw_device_verify_irq,
1208 		[DEV_EVENT_TIMEOUT]	= ccw_device_onoff_timeout,
1209 		[DEV_EVENT_VERIFY]	= ccw_device_delay_verify,
1210 	},
1211 	[DEV_STATE_ONLINE] = {
1212 		[DEV_EVENT_NOTOPER]	= ccw_device_generic_notoper,
1213 		[DEV_EVENT_INTERRUPT]	= ccw_device_irq,
1214 		[DEV_EVENT_TIMEOUT]	= ccw_device_online_timeout,
1215 		[DEV_EVENT_VERIFY]	= ccw_device_online_verify,
1216 	},
1217 	[DEV_STATE_W4SENSE] = {
1218 		[DEV_EVENT_NOTOPER]	= ccw_device_generic_notoper,
1219 		[DEV_EVENT_INTERRUPT]	= ccw_device_w4sense,
1220 		[DEV_EVENT_TIMEOUT]	= ccw_device_nop,
1221 		[DEV_EVENT_VERIFY]	= ccw_device_online_verify,
1222 	},
1223 	[DEV_STATE_DISBAND_PGID] = {
1224 		[DEV_EVENT_NOTOPER]	= ccw_device_generic_notoper,
1225 		[DEV_EVENT_INTERRUPT]	= ccw_device_disband_irq,
1226 		[DEV_EVENT_TIMEOUT]	= ccw_device_onoff_timeout,
1227 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1228 	},
1229 	[DEV_STATE_BOXED] = {
1230 		[DEV_EVENT_NOTOPER]	= ccw_device_generic_notoper,
1231 		[DEV_EVENT_INTERRUPT]	= ccw_device_stlck_done,
1232 		[DEV_EVENT_TIMEOUT]	= ccw_device_stlck_done,
1233 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1234 	},
1235 	/* states to wait for i/o completion before doing something */
1236 	[DEV_STATE_CLEAR_VERIFY] = {
1237 		[DEV_EVENT_NOTOPER]	= ccw_device_generic_notoper,
1238 		[DEV_EVENT_INTERRUPT]   = ccw_device_clear_verify,
1239 		[DEV_EVENT_TIMEOUT]	= ccw_device_nop,
1240 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1241 	},
1242 	[DEV_STATE_TIMEOUT_KILL] = {
1243 		[DEV_EVENT_NOTOPER]	= ccw_device_generic_notoper,
1244 		[DEV_EVENT_INTERRUPT]	= ccw_device_killing_irq,
1245 		[DEV_EVENT_TIMEOUT]	= ccw_device_killing_timeout,
1246 		[DEV_EVENT_VERIFY]	= ccw_device_nop, //FIXME
1247 	},
1248 	[DEV_STATE_QUIESCE] = {
1249 		[DEV_EVENT_NOTOPER]	= ccw_device_quiesce_done,
1250 		[DEV_EVENT_INTERRUPT]	= ccw_device_quiesce_done,
1251 		[DEV_EVENT_TIMEOUT]	= ccw_device_quiesce_timeout,
1252 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1253 	},
1254 	/* special states for devices gone not operational */
1255 	[DEV_STATE_DISCONNECTED] = {
1256 		[DEV_EVENT_NOTOPER]	= ccw_device_nop,
1257 		[DEV_EVENT_INTERRUPT]	= ccw_device_start_id,
1258 		[DEV_EVENT_TIMEOUT]	= ccw_device_bug,
1259 		[DEV_EVENT_VERIFY]	= ccw_device_start_id,
1260 	},
1261 	[DEV_STATE_DISCONNECTED_SENSE_ID] = {
1262 		[DEV_EVENT_NOTOPER]	= ccw_device_recog_notoper,
1263 		[DEV_EVENT_INTERRUPT]	= ccw_device_sense_id_irq,
1264 		[DEV_EVENT_TIMEOUT]	= ccw_device_recog_timeout,
1265 		[DEV_EVENT_VERIFY]	= ccw_device_nop,
1266 	},
1267 	[DEV_STATE_CMFCHANGE] = {
1268 		[DEV_EVENT_NOTOPER]	= ccw_device_change_cmfstate,
1269 		[DEV_EVENT_INTERRUPT]	= ccw_device_change_cmfstate,
1270 		[DEV_EVENT_TIMEOUT]	= ccw_device_change_cmfstate,
1271 		[DEV_EVENT_VERIFY]	= ccw_device_change_cmfstate,
1272 	},
1273 	[DEV_STATE_CMFUPDATE] = {
1274 		[DEV_EVENT_NOTOPER]	= ccw_device_update_cmfblock,
1275 		[DEV_EVENT_INTERRUPT]	= ccw_device_update_cmfblock,
1276 		[DEV_EVENT_TIMEOUT]	= ccw_device_update_cmfblock,
1277 		[DEV_EVENT_VERIFY]	= ccw_device_update_cmfblock,
1278 	},
1279 };
1280 
1281 EXPORT_SYMBOL_GPL(ccw_device_set_timeout);
1282