xref: /openbmc/linux/drivers/s390/cio/device.c (revision 7dd65feb)
1 /*
2  *  drivers/s390/cio/device.c
3  *  bus driver for ccw devices
4  *
5  *    Copyright IBM Corp. 2002,2008
6  *    Author(s): Arnd Bergmann (arndb@de.ibm.com)
7  *		 Cornelia Huck (cornelia.huck@de.ibm.com)
8  *		 Martin Schwidefsky (schwidefsky@de.ibm.com)
9  */
10 
11 #define KMSG_COMPONENT "cio"
12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13 
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/spinlock.h>
17 #include <linux/errno.h>
18 #include <linux/err.h>
19 #include <linux/slab.h>
20 #include <linux/list.h>
21 #include <linux/device.h>
22 #include <linux/workqueue.h>
23 #include <linux/timer.h>
24 
25 #include <asm/ccwdev.h>
26 #include <asm/cio.h>
27 #include <asm/param.h>		/* HZ */
28 #include <asm/cmb.h>
29 #include <asm/isc.h>
30 
31 #include "chp.h"
32 #include "cio.h"
33 #include "cio_debug.h"
34 #include "css.h"
35 #include "device.h"
36 #include "ioasm.h"
37 #include "io_sch.h"
38 #include "blacklist.h"
39 
40 static struct timer_list recovery_timer;
41 static DEFINE_SPINLOCK(recovery_lock);
42 static int recovery_phase;
43 static const unsigned long recovery_delay[] = { 3, 30, 300 };
44 
45 /******************* bus type handling ***********************/
46 
47 /* The Linux driver model distinguishes between a bus type and
48  * the bus itself. Of course we only have one channel
49  * subsystem driver and one channel system per machine, but
50  * we still use the abstraction. T.R. says it's a good idea. */
51 static int
52 ccw_bus_match (struct device * dev, struct device_driver * drv)
53 {
54 	struct ccw_device *cdev = to_ccwdev(dev);
55 	struct ccw_driver *cdrv = to_ccwdrv(drv);
56 	const struct ccw_device_id *ids = cdrv->ids, *found;
57 
58 	if (!ids)
59 		return 0;
60 
61 	found = ccw_device_id_match(ids, &cdev->id);
62 	if (!found)
63 		return 0;
64 
65 	cdev->id.driver_info = found->driver_info;
66 
67 	return 1;
68 }
69 
70 /* Store modalias string delimited by prefix/suffix string into buffer with
71  * specified size. Return length of resulting string (excluding trailing '\0')
72  * even if string doesn't fit buffer (snprintf semantics). */
73 static int snprint_alias(char *buf, size_t size,
74 			 struct ccw_device_id *id, const char *suffix)
75 {
76 	int len;
77 
78 	len = snprintf(buf, size, "ccw:t%04Xm%02X", id->cu_type, id->cu_model);
79 	if (len > size)
80 		return len;
81 	buf += len;
82 	size -= len;
83 
84 	if (id->dev_type != 0)
85 		len += snprintf(buf, size, "dt%04Xdm%02X%s", id->dev_type,
86 				id->dev_model, suffix);
87 	else
88 		len += snprintf(buf, size, "dtdm%s", suffix);
89 
90 	return len;
91 }
92 
93 /* Set up environment variables for ccw device uevent. Return 0 on success,
94  * non-zero otherwise. */
95 static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env)
96 {
97 	struct ccw_device *cdev = to_ccwdev(dev);
98 	struct ccw_device_id *id = &(cdev->id);
99 	int ret;
100 	char modalias_buf[30];
101 
102 	/* CU_TYPE= */
103 	ret = add_uevent_var(env, "CU_TYPE=%04X", id->cu_type);
104 	if (ret)
105 		return ret;
106 
107 	/* CU_MODEL= */
108 	ret = add_uevent_var(env, "CU_MODEL=%02X", id->cu_model);
109 	if (ret)
110 		return ret;
111 
112 	/* The next two can be zero, that's ok for us */
113 	/* DEV_TYPE= */
114 	ret = add_uevent_var(env, "DEV_TYPE=%04X", id->dev_type);
115 	if (ret)
116 		return ret;
117 
118 	/* DEV_MODEL= */
119 	ret = add_uevent_var(env, "DEV_MODEL=%02X", id->dev_model);
120 	if (ret)
121 		return ret;
122 
123 	/* MODALIAS=  */
124 	snprint_alias(modalias_buf, sizeof(modalias_buf), id, "");
125 	ret = add_uevent_var(env, "MODALIAS=%s", modalias_buf);
126 	return ret;
127 }
128 
129 struct bus_type ccw_bus_type;
130 
131 static void io_subchannel_irq(struct subchannel *);
132 static int io_subchannel_probe(struct subchannel *);
133 static int io_subchannel_remove(struct subchannel *);
134 static void io_subchannel_shutdown(struct subchannel *);
135 static int io_subchannel_sch_event(struct subchannel *, int);
136 static int io_subchannel_chp_event(struct subchannel *, struct chp_link *,
137 				   int);
138 static void recovery_func(unsigned long data);
139 struct workqueue_struct *ccw_device_work;
140 wait_queue_head_t ccw_device_init_wq;
141 atomic_t ccw_device_init_count;
142 
143 static struct css_device_id io_subchannel_ids[] = {
144 	{ .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
145 	{ /* end of list */ },
146 };
147 MODULE_DEVICE_TABLE(css, io_subchannel_ids);
148 
149 static int io_subchannel_prepare(struct subchannel *sch)
150 {
151 	struct ccw_device *cdev;
152 	/*
153 	 * Don't allow suspend while a ccw device registration
154 	 * is still outstanding.
155 	 */
156 	cdev = sch_get_cdev(sch);
157 	if (cdev && !device_is_registered(&cdev->dev))
158 		return -EAGAIN;
159 	return 0;
160 }
161 
162 static void io_subchannel_settle(void)
163 {
164 	wait_event(ccw_device_init_wq,
165 		   atomic_read(&ccw_device_init_count) == 0);
166 	flush_workqueue(ccw_device_work);
167 }
168 
169 static struct css_driver io_subchannel_driver = {
170 	.owner = THIS_MODULE,
171 	.subchannel_type = io_subchannel_ids,
172 	.name = "io_subchannel",
173 	.irq = io_subchannel_irq,
174 	.sch_event = io_subchannel_sch_event,
175 	.chp_event = io_subchannel_chp_event,
176 	.probe = io_subchannel_probe,
177 	.remove = io_subchannel_remove,
178 	.shutdown = io_subchannel_shutdown,
179 	.prepare = io_subchannel_prepare,
180 	.settle = io_subchannel_settle,
181 };
182 
183 int __init io_subchannel_init(void)
184 {
185 	int ret;
186 
187 	init_waitqueue_head(&ccw_device_init_wq);
188 	atomic_set(&ccw_device_init_count, 0);
189 	setup_timer(&recovery_timer, recovery_func, 0);
190 
191 	ccw_device_work = create_singlethread_workqueue("cio");
192 	if (!ccw_device_work)
193 		return -ENOMEM;
194 	slow_path_wq = create_singlethread_workqueue("kslowcrw");
195 	if (!slow_path_wq) {
196 		ret = -ENOMEM;
197 		goto out_err;
198 	}
199 	if ((ret = bus_register (&ccw_bus_type)))
200 		goto out_err;
201 
202 	ret = css_driver_register(&io_subchannel_driver);
203 	if (ret)
204 		goto out_err;
205 
206 	return 0;
207 out_err:
208 	if (ccw_device_work)
209 		destroy_workqueue(ccw_device_work);
210 	if (slow_path_wq)
211 		destroy_workqueue(slow_path_wq);
212 	return ret;
213 }
214 
215 
216 /************************ device handling **************************/
217 
218 /*
219  * A ccw_device has some interfaces in sysfs in addition to the
220  * standard ones.
221  * The following entries are designed to export the information which
222  * resided in 2.4 in /proc/subchannels. Subchannel and device number
223  * are obvious, so they don't have an entry :)
224  * TODO: Split chpids and pimpampom up? Where is "in use" in the tree?
225  */
226 static ssize_t
227 chpids_show (struct device * dev, struct device_attribute *attr, char * buf)
228 {
229 	struct subchannel *sch = to_subchannel(dev);
230 	struct chsc_ssd_info *ssd = &sch->ssd_info;
231 	ssize_t ret = 0;
232 	int chp;
233 	int mask;
234 
235 	for (chp = 0; chp < 8; chp++) {
236 		mask = 0x80 >> chp;
237 		if (ssd->path_mask & mask)
238 			ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id);
239 		else
240 			ret += sprintf(buf + ret, "00 ");
241 	}
242 	ret += sprintf (buf+ret, "\n");
243 	return min((ssize_t)PAGE_SIZE, ret);
244 }
245 
246 static ssize_t
247 pimpampom_show (struct device * dev, struct device_attribute *attr, char * buf)
248 {
249 	struct subchannel *sch = to_subchannel(dev);
250 	struct pmcw *pmcw = &sch->schib.pmcw;
251 
252 	return sprintf (buf, "%02x %02x %02x\n",
253 			pmcw->pim, pmcw->pam, pmcw->pom);
254 }
255 
256 static ssize_t
257 devtype_show (struct device *dev, struct device_attribute *attr, char *buf)
258 {
259 	struct ccw_device *cdev = to_ccwdev(dev);
260 	struct ccw_device_id *id = &(cdev->id);
261 
262 	if (id->dev_type != 0)
263 		return sprintf(buf, "%04x/%02x\n",
264 				id->dev_type, id->dev_model);
265 	else
266 		return sprintf(buf, "n/a\n");
267 }
268 
269 static ssize_t
270 cutype_show (struct device *dev, struct device_attribute *attr, char *buf)
271 {
272 	struct ccw_device *cdev = to_ccwdev(dev);
273 	struct ccw_device_id *id = &(cdev->id);
274 
275 	return sprintf(buf, "%04x/%02x\n",
276 		       id->cu_type, id->cu_model);
277 }
278 
279 static ssize_t
280 modalias_show (struct device *dev, struct device_attribute *attr, char *buf)
281 {
282 	struct ccw_device *cdev = to_ccwdev(dev);
283 	struct ccw_device_id *id = &(cdev->id);
284 	int len;
285 
286 	len = snprint_alias(buf, PAGE_SIZE, id, "\n");
287 
288 	return len > PAGE_SIZE ? PAGE_SIZE : len;
289 }
290 
291 static ssize_t
292 online_show (struct device *dev, struct device_attribute *attr, char *buf)
293 {
294 	struct ccw_device *cdev = to_ccwdev(dev);
295 
296 	return sprintf(buf, cdev->online ? "1\n" : "0\n");
297 }
298 
299 int ccw_device_is_orphan(struct ccw_device *cdev)
300 {
301 	return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent));
302 }
303 
304 static void ccw_device_unregister(struct ccw_device *cdev)
305 {
306 	if (device_is_registered(&cdev->dev)) {
307 		/* Undo device_add(). */
308 		device_del(&cdev->dev);
309 	}
310 	if (cdev->private->flags.initialized) {
311 		cdev->private->flags.initialized = 0;
312 		/* Release reference from device_initialize(). */
313 		put_device(&cdev->dev);
314 	}
315 }
316 
317 static void io_subchannel_quiesce(struct subchannel *);
318 
319 /**
320  * ccw_device_set_offline() - disable a ccw device for I/O
321  * @cdev: target ccw device
322  *
323  * This function calls the driver's set_offline() function for @cdev, if
324  * given, and then disables @cdev.
325  * Returns:
326  *   %0 on success and a negative error value on failure.
327  * Context:
328  *  enabled, ccw device lock not held
329  */
330 int ccw_device_set_offline(struct ccw_device *cdev)
331 {
332 	struct subchannel *sch;
333 	int ret, state;
334 
335 	if (!cdev)
336 		return -ENODEV;
337 	if (!cdev->online || !cdev->drv)
338 		return -EINVAL;
339 
340 	if (cdev->drv->set_offline) {
341 		ret = cdev->drv->set_offline(cdev);
342 		if (ret != 0)
343 			return ret;
344 	}
345 	cdev->online = 0;
346 	spin_lock_irq(cdev->ccwlock);
347 	sch = to_subchannel(cdev->dev.parent);
348 	/* Wait until a final state or DISCONNECTED is reached */
349 	while (!dev_fsm_final_state(cdev) &&
350 	       cdev->private->state != DEV_STATE_DISCONNECTED) {
351 		spin_unlock_irq(cdev->ccwlock);
352 		wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
353 			   cdev->private->state == DEV_STATE_DISCONNECTED));
354 		spin_lock_irq(cdev->ccwlock);
355 	}
356 	do {
357 		ret = ccw_device_offline(cdev);
358 		if (!ret)
359 			break;
360 		CIO_MSG_EVENT(0, "ccw_device_offline returned %d, device "
361 			      "0.%x.%04x\n", ret, cdev->private->dev_id.ssid,
362 			      cdev->private->dev_id.devno);
363 		if (ret != -EBUSY)
364 			goto error;
365 		state = cdev->private->state;
366 		spin_unlock_irq(cdev->ccwlock);
367 		io_subchannel_quiesce(sch);
368 		spin_lock_irq(cdev->ccwlock);
369 		cdev->private->state = state;
370 	} while (ret == -EBUSY);
371 	spin_unlock_irq(cdev->ccwlock);
372 	wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
373 		   cdev->private->state == DEV_STATE_DISCONNECTED));
374 	/* Inform the user if set offline failed. */
375 	if (cdev->private->state == DEV_STATE_BOXED) {
376 		pr_warning("%s: The device entered boxed state while "
377 			   "being set offline\n", dev_name(&cdev->dev));
378 	} else if (cdev->private->state == DEV_STATE_NOT_OPER) {
379 		pr_warning("%s: The device stopped operating while "
380 			   "being set offline\n", dev_name(&cdev->dev));
381 	}
382 	/* Give up reference from ccw_device_set_online(). */
383 	put_device(&cdev->dev);
384 	return 0;
385 
386 error:
387 	cdev->private->state = DEV_STATE_OFFLINE;
388 	dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
389 	spin_unlock_irq(cdev->ccwlock);
390 	/* Give up reference from ccw_device_set_online(). */
391 	put_device(&cdev->dev);
392 	return -ENODEV;
393 }
394 
395 /**
396  * ccw_device_set_online() - enable a ccw device for I/O
397  * @cdev: target ccw device
398  *
399  * This function first enables @cdev and then calls the driver's set_online()
400  * function for @cdev, if given. If set_online() returns an error, @cdev is
401  * disabled again.
402  * Returns:
403  *   %0 on success and a negative error value on failure.
404  * Context:
405  *  enabled, ccw device lock not held
406  */
407 int ccw_device_set_online(struct ccw_device *cdev)
408 {
409 	int ret;
410 	int ret2;
411 
412 	if (!cdev)
413 		return -ENODEV;
414 	if (cdev->online || !cdev->drv)
415 		return -EINVAL;
416 	/* Hold on to an extra reference while device is online. */
417 	if (!get_device(&cdev->dev))
418 		return -ENODEV;
419 
420 	spin_lock_irq(cdev->ccwlock);
421 	ret = ccw_device_online(cdev);
422 	spin_unlock_irq(cdev->ccwlock);
423 	if (ret == 0)
424 		wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
425 	else {
426 		CIO_MSG_EVENT(0, "ccw_device_online returned %d, "
427 			      "device 0.%x.%04x\n",
428 			      ret, cdev->private->dev_id.ssid,
429 			      cdev->private->dev_id.devno);
430 		/* Give up online reference since onlining failed. */
431 		put_device(&cdev->dev);
432 		return ret;
433 	}
434 	spin_lock_irq(cdev->ccwlock);
435 	/* Check if online processing was successful */
436 	if ((cdev->private->state != DEV_STATE_ONLINE) &&
437 	    (cdev->private->state != DEV_STATE_W4SENSE)) {
438 		spin_unlock_irq(cdev->ccwlock);
439 		/* Inform the user that set online failed. */
440 		if (cdev->private->state == DEV_STATE_BOXED) {
441 			pr_warning("%s: Setting the device online failed "
442 				   "because it is boxed\n",
443 				   dev_name(&cdev->dev));
444 		} else if (cdev->private->state == DEV_STATE_NOT_OPER) {
445 			pr_warning("%s: Setting the device online failed "
446 				   "because it is not operational\n",
447 				   dev_name(&cdev->dev));
448 		}
449 		/* Give up online reference since onlining failed. */
450 		put_device(&cdev->dev);
451 		return -ENODEV;
452 	}
453 	spin_unlock_irq(cdev->ccwlock);
454 	if (cdev->drv->set_online)
455 		ret = cdev->drv->set_online(cdev);
456 	if (ret)
457 		goto rollback;
458 	cdev->online = 1;
459 	return 0;
460 
461 rollback:
462 	spin_lock_irq(cdev->ccwlock);
463 	/* Wait until a final state or DISCONNECTED is reached */
464 	while (!dev_fsm_final_state(cdev) &&
465 	       cdev->private->state != DEV_STATE_DISCONNECTED) {
466 		spin_unlock_irq(cdev->ccwlock);
467 		wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
468 			   cdev->private->state == DEV_STATE_DISCONNECTED));
469 		spin_lock_irq(cdev->ccwlock);
470 	}
471 	ret2 = ccw_device_offline(cdev);
472 	if (ret2)
473 		goto error;
474 	spin_unlock_irq(cdev->ccwlock);
475 	wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
476 		   cdev->private->state == DEV_STATE_DISCONNECTED));
477 	/* Give up online reference since onlining failed. */
478 	put_device(&cdev->dev);
479 	return ret;
480 
481 error:
482 	CIO_MSG_EVENT(0, "rollback ccw_device_offline returned %d, "
483 		      "device 0.%x.%04x\n",
484 		      ret2, cdev->private->dev_id.ssid,
485 		      cdev->private->dev_id.devno);
486 	cdev->private->state = DEV_STATE_OFFLINE;
487 	spin_unlock_irq(cdev->ccwlock);
488 	/* Give up online reference since onlining failed. */
489 	put_device(&cdev->dev);
490 	return ret;
491 }
492 
493 static int online_store_handle_offline(struct ccw_device *cdev)
494 {
495 	if (cdev->private->state == DEV_STATE_DISCONNECTED) {
496 		spin_lock_irq(cdev->ccwlock);
497 		ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL);
498 		spin_unlock_irq(cdev->ccwlock);
499 	} else if (cdev->online && cdev->drv && cdev->drv->set_offline)
500 		return ccw_device_set_offline(cdev);
501 	return 0;
502 }
503 
504 static int online_store_recog_and_online(struct ccw_device *cdev)
505 {
506 	/* Do device recognition, if needed. */
507 	if (cdev->private->state == DEV_STATE_BOXED) {
508 		spin_lock_irq(cdev->ccwlock);
509 		ccw_device_recognition(cdev);
510 		spin_unlock_irq(cdev->ccwlock);
511 		wait_event(cdev->private->wait_q,
512 			   cdev->private->flags.recog_done);
513 		if (cdev->private->state != DEV_STATE_OFFLINE)
514 			/* recognition failed */
515 			return -EAGAIN;
516 	}
517 	if (cdev->drv && cdev->drv->set_online)
518 		ccw_device_set_online(cdev);
519 	return 0;
520 }
521 
522 static int online_store_handle_online(struct ccw_device *cdev, int force)
523 {
524 	int ret;
525 
526 	ret = online_store_recog_and_online(cdev);
527 	if (ret && !force)
528 		return ret;
529 	if (force && cdev->private->state == DEV_STATE_BOXED) {
530 		ret = ccw_device_stlck(cdev);
531 		if (ret)
532 			return ret;
533 		if (cdev->id.cu_type == 0)
534 			cdev->private->state = DEV_STATE_NOT_OPER;
535 		ret = online_store_recog_and_online(cdev);
536 		if (ret)
537 			return ret;
538 	}
539 	return 0;
540 }
541 
542 static ssize_t online_store (struct device *dev, struct device_attribute *attr,
543 			     const char *buf, size_t count)
544 {
545 	struct ccw_device *cdev = to_ccwdev(dev);
546 	int force, ret;
547 	unsigned long i;
548 
549 	if (!dev_fsm_final_state(cdev) &&
550 	    cdev->private->state != DEV_STATE_DISCONNECTED)
551 		return -EAGAIN;
552 	if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
553 		return -EAGAIN;
554 
555 	if (cdev->drv && !try_module_get(cdev->drv->owner)) {
556 		atomic_set(&cdev->private->onoff, 0);
557 		return -EINVAL;
558 	}
559 	if (!strncmp(buf, "force\n", count)) {
560 		force = 1;
561 		i = 1;
562 		ret = 0;
563 	} else {
564 		force = 0;
565 		ret = strict_strtoul(buf, 16, &i);
566 	}
567 	if (ret)
568 		goto out;
569 	switch (i) {
570 	case 0:
571 		ret = online_store_handle_offline(cdev);
572 		break;
573 	case 1:
574 		ret = online_store_handle_online(cdev, force);
575 		break;
576 	default:
577 		ret = -EINVAL;
578 	}
579 out:
580 	if (cdev->drv)
581 		module_put(cdev->drv->owner);
582 	atomic_set(&cdev->private->onoff, 0);
583 	return (ret < 0) ? ret : count;
584 }
585 
586 static ssize_t
587 available_show (struct device *dev, struct device_attribute *attr, char *buf)
588 {
589 	struct ccw_device *cdev = to_ccwdev(dev);
590 	struct subchannel *sch;
591 
592 	if (ccw_device_is_orphan(cdev))
593 		return sprintf(buf, "no device\n");
594 	switch (cdev->private->state) {
595 	case DEV_STATE_BOXED:
596 		return sprintf(buf, "boxed\n");
597 	case DEV_STATE_DISCONNECTED:
598 	case DEV_STATE_DISCONNECTED_SENSE_ID:
599 	case DEV_STATE_NOT_OPER:
600 		sch = to_subchannel(dev->parent);
601 		if (!sch->lpm)
602 			return sprintf(buf, "no path\n");
603 		else
604 			return sprintf(buf, "no device\n");
605 	default:
606 		/* All other states considered fine. */
607 		return sprintf(buf, "good\n");
608 	}
609 }
610 
611 static DEVICE_ATTR(chpids, 0444, chpids_show, NULL);
612 static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL);
613 static DEVICE_ATTR(devtype, 0444, devtype_show, NULL);
614 static DEVICE_ATTR(cutype, 0444, cutype_show, NULL);
615 static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
616 static DEVICE_ATTR(online, 0644, online_show, online_store);
617 static DEVICE_ATTR(availability, 0444, available_show, NULL);
618 
619 static struct attribute *io_subchannel_attrs[] = {
620 	&dev_attr_chpids.attr,
621 	&dev_attr_pimpampom.attr,
622 	NULL,
623 };
624 
625 static struct attribute_group io_subchannel_attr_group = {
626 	.attrs = io_subchannel_attrs,
627 };
628 
629 static struct attribute * ccwdev_attrs[] = {
630 	&dev_attr_devtype.attr,
631 	&dev_attr_cutype.attr,
632 	&dev_attr_modalias.attr,
633 	&dev_attr_online.attr,
634 	&dev_attr_cmb_enable.attr,
635 	&dev_attr_availability.attr,
636 	NULL,
637 };
638 
639 static struct attribute_group ccwdev_attr_group = {
640 	.attrs = ccwdev_attrs,
641 };
642 
643 static const struct attribute_group *ccwdev_attr_groups[] = {
644 	&ccwdev_attr_group,
645 	NULL,
646 };
647 
648 /* this is a simple abstraction for device_register that sets the
649  * correct bus type and adds the bus specific files */
650 static int ccw_device_register(struct ccw_device *cdev)
651 {
652 	struct device *dev = &cdev->dev;
653 	int ret;
654 
655 	dev->bus = &ccw_bus_type;
656 	ret = dev_set_name(&cdev->dev, "0.%x.%04x", cdev->private->dev_id.ssid,
657 			   cdev->private->dev_id.devno);
658 	if (ret)
659 		return ret;
660 	return device_add(dev);
661 }
662 
663 static int match_dev_id(struct device *dev, void *data)
664 {
665 	struct ccw_device *cdev = to_ccwdev(dev);
666 	struct ccw_dev_id *dev_id = data;
667 
668 	return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id);
669 }
670 
671 static struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id)
672 {
673 	struct device *dev;
674 
675 	dev = bus_find_device(&ccw_bus_type, NULL, dev_id, match_dev_id);
676 
677 	return dev ? to_ccwdev(dev) : NULL;
678 }
679 
680 static void ccw_device_do_unbind_bind(struct ccw_device *cdev)
681 {
682 	int ret;
683 
684 	if (device_is_registered(&cdev->dev)) {
685 		device_release_driver(&cdev->dev);
686 		ret = device_attach(&cdev->dev);
687 		WARN_ON(ret == -ENODEV);
688 	}
689 }
690 
691 static void
692 ccw_device_release(struct device *dev)
693 {
694 	struct ccw_device *cdev;
695 
696 	cdev = to_ccwdev(dev);
697 	/* Release reference of parent subchannel. */
698 	put_device(cdev->dev.parent);
699 	kfree(cdev->private);
700 	kfree(cdev);
701 }
702 
703 static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
704 {
705 	struct ccw_device *cdev;
706 
707 	cdev  = kzalloc(sizeof(*cdev), GFP_KERNEL);
708 	if (cdev) {
709 		cdev->private = kzalloc(sizeof(struct ccw_device_private),
710 					GFP_KERNEL | GFP_DMA);
711 		if (cdev->private)
712 			return cdev;
713 	}
714 	kfree(cdev);
715 	return ERR_PTR(-ENOMEM);
716 }
717 
718 static void ccw_device_todo(struct work_struct *work);
719 
720 static int io_subchannel_initialize_dev(struct subchannel *sch,
721 					struct ccw_device *cdev)
722 {
723 	cdev->private->cdev = cdev;
724 	atomic_set(&cdev->private->onoff, 0);
725 	cdev->dev.parent = &sch->dev;
726 	cdev->dev.release = ccw_device_release;
727 	INIT_WORK(&cdev->private->todo_work, ccw_device_todo);
728 	cdev->dev.groups = ccwdev_attr_groups;
729 	/* Do first half of device_register. */
730 	device_initialize(&cdev->dev);
731 	if (!get_device(&sch->dev)) {
732 		/* Release reference from device_initialize(). */
733 		put_device(&cdev->dev);
734 		return -ENODEV;
735 	}
736 	cdev->private->flags.initialized = 1;
737 	return 0;
738 }
739 
740 static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch)
741 {
742 	struct ccw_device *cdev;
743 	int ret;
744 
745 	cdev = io_subchannel_allocate_dev(sch);
746 	if (!IS_ERR(cdev)) {
747 		ret = io_subchannel_initialize_dev(sch, cdev);
748 		if (ret)
749 			cdev = ERR_PTR(ret);
750 	}
751 	return cdev;
752 }
753 
754 static void io_subchannel_recog(struct ccw_device *, struct subchannel *);
755 
756 static void sch_create_and_recog_new_device(struct subchannel *sch)
757 {
758 	struct ccw_device *cdev;
759 
760 	/* Need to allocate a new ccw device. */
761 	cdev = io_subchannel_create_ccwdev(sch);
762 	if (IS_ERR(cdev)) {
763 		/* OK, we did everything we could... */
764 		css_sch_device_unregister(sch);
765 		return;
766 	}
767 	/* Start recognition for the new ccw device. */
768 	io_subchannel_recog(cdev, sch);
769 }
770 
771 /*
772  * Register recognized device.
773  */
774 static void io_subchannel_register(struct ccw_device *cdev)
775 {
776 	struct subchannel *sch;
777 	int ret;
778 	unsigned long flags;
779 
780 	sch = to_subchannel(cdev->dev.parent);
781 	/*
782 	 * Check if subchannel is still registered. It may have become
783 	 * unregistered if a machine check hit us after finishing
784 	 * device recognition but before the register work could be
785 	 * queued.
786 	 */
787 	if (!device_is_registered(&sch->dev))
788 		goto out_err;
789 	css_update_ssd_info(sch);
790 	/*
791 	 * io_subchannel_register() will also be called after device
792 	 * recognition has been done for a boxed device (which will already
793 	 * be registered). We need to reprobe since we may now have sense id
794 	 * information.
795 	 */
796 	if (device_is_registered(&cdev->dev)) {
797 		if (!cdev->drv) {
798 			ret = device_reprobe(&cdev->dev);
799 			if (ret)
800 				/* We can't do much here. */
801 				CIO_MSG_EVENT(0, "device_reprobe() returned"
802 					      " %d for 0.%x.%04x\n", ret,
803 					      cdev->private->dev_id.ssid,
804 					      cdev->private->dev_id.devno);
805 		}
806 		goto out;
807 	}
808 	/*
809 	 * Now we know this subchannel will stay, we can throw
810 	 * our delayed uevent.
811 	 */
812 	dev_set_uevent_suppress(&sch->dev, 0);
813 	kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
814 	/* make it known to the system */
815 	ret = ccw_device_register(cdev);
816 	if (ret) {
817 		CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n",
818 			      cdev->private->dev_id.ssid,
819 			      cdev->private->dev_id.devno, ret);
820 		spin_lock_irqsave(sch->lock, flags);
821 		sch_set_cdev(sch, NULL);
822 		spin_unlock_irqrestore(sch->lock, flags);
823 		/* Release initial device reference. */
824 		put_device(&cdev->dev);
825 		goto out_err;
826 	}
827 out:
828 	cdev->private->flags.recog_done = 1;
829 	wake_up(&cdev->private->wait_q);
830 out_err:
831 	if (atomic_dec_and_test(&ccw_device_init_count))
832 		wake_up(&ccw_device_init_wq);
833 }
834 
835 static void ccw_device_call_sch_unregister(struct ccw_device *cdev)
836 {
837 	struct subchannel *sch;
838 
839 	/* Get subchannel reference for local processing. */
840 	if (!get_device(cdev->dev.parent))
841 		return;
842 	sch = to_subchannel(cdev->dev.parent);
843 	css_sch_device_unregister(sch);
844 	/* Release subchannel reference for local processing. */
845 	put_device(&sch->dev);
846 }
847 
848 /*
849  * subchannel recognition done. Called from the state machine.
850  */
851 void
852 io_subchannel_recog_done(struct ccw_device *cdev)
853 {
854 	if (css_init_done == 0) {
855 		cdev->private->flags.recog_done = 1;
856 		return;
857 	}
858 	switch (cdev->private->state) {
859 	case DEV_STATE_BOXED:
860 		/* Device did not respond in time. */
861 	case DEV_STATE_NOT_OPER:
862 		cdev->private->flags.recog_done = 1;
863 		/* Remove device found not operational. */
864 		ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
865 		if (atomic_dec_and_test(&ccw_device_init_count))
866 			wake_up(&ccw_device_init_wq);
867 		break;
868 	case DEV_STATE_OFFLINE:
869 		/*
870 		 * We can't register the device in interrupt context so
871 		 * we schedule a work item.
872 		 */
873 		ccw_device_sched_todo(cdev, CDEV_TODO_REGISTER);
874 		break;
875 	}
876 }
877 
878 static void io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
879 {
880 	struct ccw_device_private *priv;
881 
882 	cdev->ccwlock = sch->lock;
883 
884 	/* Init private data. */
885 	priv = cdev->private;
886 	priv->dev_id.devno = sch->schib.pmcw.dev;
887 	priv->dev_id.ssid = sch->schid.ssid;
888 	priv->schid = sch->schid;
889 	priv->state = DEV_STATE_NOT_OPER;
890 	INIT_LIST_HEAD(&priv->cmb_list);
891 	init_waitqueue_head(&priv->wait_q);
892 	init_timer(&priv->timer);
893 
894 	/* Increase counter of devices currently in recognition. */
895 	atomic_inc(&ccw_device_init_count);
896 
897 	/* Start async. device sensing. */
898 	spin_lock_irq(sch->lock);
899 	sch_set_cdev(sch, cdev);
900 	ccw_device_recognition(cdev);
901 	spin_unlock_irq(sch->lock);
902 }
903 
904 static int ccw_device_move_to_sch(struct ccw_device *cdev,
905 				  struct subchannel *sch)
906 {
907 	struct subchannel *old_sch;
908 	int rc, old_enabled = 0;
909 
910 	old_sch = to_subchannel(cdev->dev.parent);
911 	/* Obtain child reference for new parent. */
912 	if (!get_device(&sch->dev))
913 		return -ENODEV;
914 
915 	if (!sch_is_pseudo_sch(old_sch)) {
916 		spin_lock_irq(old_sch->lock);
917 		old_enabled = old_sch->schib.pmcw.ena;
918 		rc = 0;
919 		if (old_enabled)
920 			rc = cio_disable_subchannel(old_sch);
921 		spin_unlock_irq(old_sch->lock);
922 		if (rc == -EBUSY) {
923 			/* Release child reference for new parent. */
924 			put_device(&sch->dev);
925 			return rc;
926 		}
927 	}
928 
929 	mutex_lock(&sch->reg_mutex);
930 	rc = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
931 	mutex_unlock(&sch->reg_mutex);
932 	if (rc) {
933 		CIO_MSG_EVENT(0, "device_move(0.%x.%04x,0.%x.%04x)=%d\n",
934 			      cdev->private->dev_id.ssid,
935 			      cdev->private->dev_id.devno, sch->schid.ssid,
936 			      sch->schib.pmcw.dev, rc);
937 		if (old_enabled) {
938 			/* Try to reenable the old subchannel. */
939 			spin_lock_irq(old_sch->lock);
940 			cio_enable_subchannel(old_sch, (u32)(addr_t)old_sch);
941 			spin_unlock_irq(old_sch->lock);
942 		}
943 		/* Release child reference for new parent. */
944 		put_device(&sch->dev);
945 		return rc;
946 	}
947 	/* Clean up old subchannel. */
948 	if (!sch_is_pseudo_sch(old_sch)) {
949 		spin_lock_irq(old_sch->lock);
950 		sch_set_cdev(old_sch, NULL);
951 		spin_unlock_irq(old_sch->lock);
952 		css_schedule_eval(old_sch->schid);
953 	}
954 	/* Release child reference for old parent. */
955 	put_device(&old_sch->dev);
956 	/* Initialize new subchannel. */
957 	spin_lock_irq(sch->lock);
958 	cdev->private->schid = sch->schid;
959 	cdev->ccwlock = sch->lock;
960 	if (!sch_is_pseudo_sch(sch))
961 		sch_set_cdev(sch, cdev);
962 	spin_unlock_irq(sch->lock);
963 	if (!sch_is_pseudo_sch(sch))
964 		css_update_ssd_info(sch);
965 	return 0;
966 }
967 
968 static int ccw_device_move_to_orph(struct ccw_device *cdev)
969 {
970 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
971 	struct channel_subsystem *css = to_css(sch->dev.parent);
972 
973 	return ccw_device_move_to_sch(cdev, css->pseudo_subchannel);
974 }
975 
976 static void io_subchannel_irq(struct subchannel *sch)
977 {
978 	struct ccw_device *cdev;
979 
980 	cdev = sch_get_cdev(sch);
981 
982 	CIO_TRACE_EVENT(6, "IRQ");
983 	CIO_TRACE_EVENT(6, dev_name(&sch->dev));
984 	if (cdev)
985 		dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
986 }
987 
988 void io_subchannel_init_config(struct subchannel *sch)
989 {
990 	memset(&sch->config, 0, sizeof(sch->config));
991 	sch->config.csense = 1;
992 }
993 
994 static void io_subchannel_init_fields(struct subchannel *sch)
995 {
996 	if (cio_is_console(sch->schid))
997 		sch->opm = 0xff;
998 	else
999 		sch->opm = chp_get_sch_opm(sch);
1000 	sch->lpm = sch->schib.pmcw.pam & sch->opm;
1001 	sch->isc = cio_is_console(sch->schid) ? CONSOLE_ISC : IO_SCH_ISC;
1002 
1003 	CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X"
1004 		      " - PIM = %02X, PAM = %02X, POM = %02X\n",
1005 		      sch->schib.pmcw.dev, sch->schid.ssid,
1006 		      sch->schid.sch_no, sch->schib.pmcw.pim,
1007 		      sch->schib.pmcw.pam, sch->schib.pmcw.pom);
1008 
1009 	io_subchannel_init_config(sch);
1010 }
1011 
1012 /*
1013  * Note: We always return 0 so that we bind to the device even on error.
1014  * This is needed so that our remove function is called on unregister.
1015  */
1016 static int io_subchannel_probe(struct subchannel *sch)
1017 {
1018 	struct ccw_device *cdev;
1019 	int rc;
1020 
1021 	if (cio_is_console(sch->schid)) {
1022 		rc = sysfs_create_group(&sch->dev.kobj,
1023 					&io_subchannel_attr_group);
1024 		if (rc)
1025 			CIO_MSG_EVENT(0, "Failed to create io subchannel "
1026 				      "attributes for subchannel "
1027 				      "0.%x.%04x (rc=%d)\n",
1028 				      sch->schid.ssid, sch->schid.sch_no, rc);
1029 		/*
1030 		 * The console subchannel already has an associated ccw_device.
1031 		 * Throw the delayed uevent for the subchannel, register
1032 		 * the ccw_device and exit.
1033 		 */
1034 		dev_set_uevent_suppress(&sch->dev, 0);
1035 		kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
1036 		cdev = sch_get_cdev(sch);
1037 		cdev->dev.groups = ccwdev_attr_groups;
1038 		device_initialize(&cdev->dev);
1039 		cdev->private->flags.initialized = 1;
1040 		ccw_device_register(cdev);
1041 		/*
1042 		 * Check if the device is already online. If it is
1043 		 * the reference count needs to be corrected since we
1044 		 * didn't obtain a reference in ccw_device_set_online.
1045 		 */
1046 		if (cdev->private->state != DEV_STATE_NOT_OPER &&
1047 		    cdev->private->state != DEV_STATE_OFFLINE &&
1048 		    cdev->private->state != DEV_STATE_BOXED)
1049 			get_device(&cdev->dev);
1050 		return 0;
1051 	}
1052 	io_subchannel_init_fields(sch);
1053 	rc = cio_commit_config(sch);
1054 	if (rc)
1055 		goto out_schedule;
1056 	rc = sysfs_create_group(&sch->dev.kobj,
1057 				&io_subchannel_attr_group);
1058 	if (rc)
1059 		goto out_schedule;
1060 	/* Allocate I/O subchannel private data. */
1061 	sch->private = kzalloc(sizeof(struct io_subchannel_private),
1062 			       GFP_KERNEL | GFP_DMA);
1063 	if (!sch->private)
1064 		goto out_schedule;
1065 	css_schedule_eval(sch->schid);
1066 	return 0;
1067 
1068 out_schedule:
1069 	spin_lock_irq(sch->lock);
1070 	css_sched_sch_todo(sch, SCH_TODO_UNREG);
1071 	spin_unlock_irq(sch->lock);
1072 	return 0;
1073 }
1074 
1075 static int
1076 io_subchannel_remove (struct subchannel *sch)
1077 {
1078 	struct ccw_device *cdev;
1079 
1080 	cdev = sch_get_cdev(sch);
1081 	if (!cdev)
1082 		goto out_free;
1083 	io_subchannel_quiesce(sch);
1084 	/* Set ccw device to not operational and drop reference. */
1085 	spin_lock_irq(cdev->ccwlock);
1086 	sch_set_cdev(sch, NULL);
1087 	cdev->private->state = DEV_STATE_NOT_OPER;
1088 	spin_unlock_irq(cdev->ccwlock);
1089 	ccw_device_unregister(cdev);
1090 out_free:
1091 	kfree(sch->private);
1092 	sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
1093 	return 0;
1094 }
1095 
1096 static void io_subchannel_verify(struct subchannel *sch)
1097 {
1098 	struct ccw_device *cdev;
1099 
1100 	cdev = sch_get_cdev(sch);
1101 	if (cdev)
1102 		dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1103 }
1104 
1105 static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
1106 {
1107 	struct ccw_device *cdev;
1108 
1109 	cdev = sch_get_cdev(sch);
1110 	if (!cdev)
1111 		return;
1112 	if (cio_update_schib(sch))
1113 		goto err;
1114 	/* Check for I/O on path. */
1115 	if (scsw_actl(&sch->schib.scsw) == 0 || sch->schib.pmcw.lpum != mask)
1116 		goto out;
1117 	if (cdev->private->state == DEV_STATE_ONLINE) {
1118 		ccw_device_kill_io(cdev);
1119 		goto out;
1120 	}
1121 	if (cio_clear(sch))
1122 		goto err;
1123 out:
1124 	/* Trigger path verification. */
1125 	dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1126 	return;
1127 
1128 err:
1129 	dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
1130 }
1131 
1132 static int io_subchannel_chp_event(struct subchannel *sch,
1133 				   struct chp_link *link, int event)
1134 {
1135 	int mask;
1136 
1137 	mask = chp_ssd_get_mask(&sch->ssd_info, link);
1138 	if (!mask)
1139 		return 0;
1140 	switch (event) {
1141 	case CHP_VARY_OFF:
1142 		sch->opm &= ~mask;
1143 		sch->lpm &= ~mask;
1144 		io_subchannel_terminate_path(sch, mask);
1145 		break;
1146 	case CHP_VARY_ON:
1147 		sch->opm |= mask;
1148 		sch->lpm |= mask;
1149 		io_subchannel_verify(sch);
1150 		break;
1151 	case CHP_OFFLINE:
1152 		if (cio_update_schib(sch))
1153 			return -ENODEV;
1154 		io_subchannel_terminate_path(sch, mask);
1155 		break;
1156 	case CHP_ONLINE:
1157 		if (cio_update_schib(sch))
1158 			return -ENODEV;
1159 		sch->lpm |= mask & sch->opm;
1160 		io_subchannel_verify(sch);
1161 		break;
1162 	}
1163 	return 0;
1164 }
1165 
1166 static void io_subchannel_quiesce(struct subchannel *sch)
1167 {
1168 	struct ccw_device *cdev;
1169 	int ret;
1170 
1171 	spin_lock_irq(sch->lock);
1172 	cdev = sch_get_cdev(sch);
1173 	if (cio_is_console(sch->schid))
1174 		goto out_unlock;
1175 	if (!sch->schib.pmcw.ena)
1176 		goto out_unlock;
1177 	ret = cio_disable_subchannel(sch);
1178 	if (ret != -EBUSY)
1179 		goto out_unlock;
1180 	if (cdev->handler)
1181 		cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-EIO));
1182 	while (ret == -EBUSY) {
1183 		cdev->private->state = DEV_STATE_QUIESCE;
1184 		ret = ccw_device_cancel_halt_clear(cdev);
1185 		if (ret == -EBUSY) {
1186 			ccw_device_set_timeout(cdev, HZ/10);
1187 			spin_unlock_irq(sch->lock);
1188 			wait_event(cdev->private->wait_q,
1189 				   cdev->private->state != DEV_STATE_QUIESCE);
1190 			spin_lock_irq(sch->lock);
1191 		}
1192 		ret = cio_disable_subchannel(sch);
1193 	}
1194 out_unlock:
1195 	spin_unlock_irq(sch->lock);
1196 }
1197 
1198 static void io_subchannel_shutdown(struct subchannel *sch)
1199 {
1200 	io_subchannel_quiesce(sch);
1201 }
1202 
1203 static int device_is_disconnected(struct ccw_device *cdev)
1204 {
1205 	if (!cdev)
1206 		return 0;
1207 	return (cdev->private->state == DEV_STATE_DISCONNECTED ||
1208 		cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
1209 }
1210 
1211 static int recovery_check(struct device *dev, void *data)
1212 {
1213 	struct ccw_device *cdev = to_ccwdev(dev);
1214 	int *redo = data;
1215 
1216 	spin_lock_irq(cdev->ccwlock);
1217 	switch (cdev->private->state) {
1218 	case DEV_STATE_DISCONNECTED:
1219 		CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n",
1220 			      cdev->private->dev_id.ssid,
1221 			      cdev->private->dev_id.devno);
1222 		dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1223 		*redo = 1;
1224 		break;
1225 	case DEV_STATE_DISCONNECTED_SENSE_ID:
1226 		*redo = 1;
1227 		break;
1228 	}
1229 	spin_unlock_irq(cdev->ccwlock);
1230 
1231 	return 0;
1232 }
1233 
1234 static void recovery_work_func(struct work_struct *unused)
1235 {
1236 	int redo = 0;
1237 
1238 	bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check);
1239 	if (redo) {
1240 		spin_lock_irq(&recovery_lock);
1241 		if (!timer_pending(&recovery_timer)) {
1242 			if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1)
1243 				recovery_phase++;
1244 			mod_timer(&recovery_timer, jiffies +
1245 				  recovery_delay[recovery_phase] * HZ);
1246 		}
1247 		spin_unlock_irq(&recovery_lock);
1248 	} else
1249 		CIO_MSG_EVENT(4, "recovery: end\n");
1250 }
1251 
1252 static DECLARE_WORK(recovery_work, recovery_work_func);
1253 
1254 static void recovery_func(unsigned long data)
1255 {
1256 	/*
1257 	 * We can't do our recovery in softirq context and it's not
1258 	 * performance critical, so we schedule it.
1259 	 */
1260 	schedule_work(&recovery_work);
1261 }
1262 
1263 static void ccw_device_schedule_recovery(void)
1264 {
1265 	unsigned long flags;
1266 
1267 	CIO_MSG_EVENT(4, "recovery: schedule\n");
1268 	spin_lock_irqsave(&recovery_lock, flags);
1269 	if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) {
1270 		recovery_phase = 0;
1271 		mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ);
1272 	}
1273 	spin_unlock_irqrestore(&recovery_lock, flags);
1274 }
1275 
1276 static int purge_fn(struct device *dev, void *data)
1277 {
1278 	struct ccw_device *cdev = to_ccwdev(dev);
1279 	struct ccw_dev_id *id = &cdev->private->dev_id;
1280 
1281 	spin_lock_irq(cdev->ccwlock);
1282 	if (is_blacklisted(id->ssid, id->devno) &&
1283 	    (cdev->private->state == DEV_STATE_OFFLINE)) {
1284 		CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid,
1285 			      id->devno);
1286 		ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1287 	}
1288 	spin_unlock_irq(cdev->ccwlock);
1289 	/* Abort loop in case of pending signal. */
1290 	if (signal_pending(current))
1291 		return -EINTR;
1292 
1293 	return 0;
1294 }
1295 
1296 /**
1297  * ccw_purge_blacklisted - purge unused, blacklisted devices
1298  *
1299  * Unregister all ccw devices that are offline and on the blacklist.
1300  */
1301 int ccw_purge_blacklisted(void)
1302 {
1303 	CIO_MSG_EVENT(2, "ccw: purging blacklisted devices\n");
1304 	bus_for_each_dev(&ccw_bus_type, NULL, NULL, purge_fn);
1305 	return 0;
1306 }
1307 
1308 void ccw_device_set_disconnected(struct ccw_device *cdev)
1309 {
1310 	if (!cdev)
1311 		return;
1312 	ccw_device_set_timeout(cdev, 0);
1313 	cdev->private->flags.fake_irb = 0;
1314 	cdev->private->state = DEV_STATE_DISCONNECTED;
1315 	if (cdev->online)
1316 		ccw_device_schedule_recovery();
1317 }
1318 
1319 void ccw_device_set_notoper(struct ccw_device *cdev)
1320 {
1321 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1322 
1323 	CIO_TRACE_EVENT(2, "notoper");
1324 	CIO_TRACE_EVENT(2, dev_name(&sch->dev));
1325 	ccw_device_set_timeout(cdev, 0);
1326 	cio_disable_subchannel(sch);
1327 	cdev->private->state = DEV_STATE_NOT_OPER;
1328 }
1329 
1330 enum io_sch_action {
1331 	IO_SCH_UNREG,
1332 	IO_SCH_ORPH_UNREG,
1333 	IO_SCH_ATTACH,
1334 	IO_SCH_UNREG_ATTACH,
1335 	IO_SCH_ORPH_ATTACH,
1336 	IO_SCH_REPROBE,
1337 	IO_SCH_VERIFY,
1338 	IO_SCH_DISC,
1339 	IO_SCH_NOP,
1340 };
1341 
1342 static enum io_sch_action sch_get_action(struct subchannel *sch)
1343 {
1344 	struct ccw_device *cdev;
1345 
1346 	cdev = sch_get_cdev(sch);
1347 	if (cio_update_schib(sch)) {
1348 		/* Not operational. */
1349 		if (!cdev)
1350 			return IO_SCH_UNREG;
1351 		if (!ccw_device_notify(cdev, CIO_GONE))
1352 			return IO_SCH_UNREG;
1353 		return IO_SCH_ORPH_UNREG;
1354 	}
1355 	/* Operational. */
1356 	if (!cdev)
1357 		return IO_SCH_ATTACH;
1358 	if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
1359 		if (!ccw_device_notify(cdev, CIO_GONE))
1360 			return IO_SCH_UNREG_ATTACH;
1361 		return IO_SCH_ORPH_ATTACH;
1362 	}
1363 	if ((sch->schib.pmcw.pam & sch->opm) == 0) {
1364 		if (!ccw_device_notify(cdev, CIO_NO_PATH))
1365 			return IO_SCH_UNREG;
1366 		return IO_SCH_DISC;
1367 	}
1368 	if (device_is_disconnected(cdev))
1369 		return IO_SCH_REPROBE;
1370 	if (cdev->online)
1371 		return IO_SCH_VERIFY;
1372 	return IO_SCH_NOP;
1373 }
1374 
1375 /**
1376  * io_subchannel_sch_event - process subchannel event
1377  * @sch: subchannel
1378  * @process: non-zero if function is called in process context
1379  *
1380  * An unspecified event occurred for this subchannel. Adjust data according
1381  * to the current operational state of the subchannel and device. Return
1382  * zero when the event has been handled sufficiently or -EAGAIN when this
1383  * function should be called again in process context.
1384  */
1385 static int io_subchannel_sch_event(struct subchannel *sch, int process)
1386 {
1387 	unsigned long flags;
1388 	struct ccw_device *cdev;
1389 	struct ccw_dev_id dev_id;
1390 	enum io_sch_action action;
1391 	int rc = -EAGAIN;
1392 
1393 	spin_lock_irqsave(sch->lock, flags);
1394 	if (!device_is_registered(&sch->dev))
1395 		goto out_unlock;
1396 	if (work_pending(&sch->todo_work))
1397 		goto out_unlock;
1398 	cdev = sch_get_cdev(sch);
1399 	if (cdev && work_pending(&cdev->private->todo_work))
1400 		goto out_unlock;
1401 	action = sch_get_action(sch);
1402 	CIO_MSG_EVENT(2, "event: sch 0.%x.%04x, process=%d, action=%d\n",
1403 		      sch->schid.ssid, sch->schid.sch_no, process,
1404 		      action);
1405 	/* Perform immediate actions while holding the lock. */
1406 	switch (action) {
1407 	case IO_SCH_REPROBE:
1408 		/* Trigger device recognition. */
1409 		ccw_device_trigger_reprobe(cdev);
1410 		rc = 0;
1411 		goto out_unlock;
1412 	case IO_SCH_VERIFY:
1413 		/* Trigger path verification. */
1414 		io_subchannel_verify(sch);
1415 		rc = 0;
1416 		goto out_unlock;
1417 	case IO_SCH_DISC:
1418 		ccw_device_set_disconnected(cdev);
1419 		rc = 0;
1420 		goto out_unlock;
1421 	case IO_SCH_ORPH_UNREG:
1422 	case IO_SCH_ORPH_ATTACH:
1423 		ccw_device_set_disconnected(cdev);
1424 		break;
1425 	case IO_SCH_UNREG_ATTACH:
1426 	case IO_SCH_UNREG:
1427 		if (cdev)
1428 			ccw_device_set_notoper(cdev);
1429 		break;
1430 	case IO_SCH_NOP:
1431 		rc = 0;
1432 		goto out_unlock;
1433 	default:
1434 		break;
1435 	}
1436 	spin_unlock_irqrestore(sch->lock, flags);
1437 	/* All other actions require process context. */
1438 	if (!process)
1439 		goto out;
1440 	/* Handle attached ccw device. */
1441 	switch (action) {
1442 	case IO_SCH_ORPH_UNREG:
1443 	case IO_SCH_ORPH_ATTACH:
1444 		/* Move ccw device to orphanage. */
1445 		rc = ccw_device_move_to_orph(cdev);
1446 		if (rc)
1447 			goto out;
1448 		break;
1449 	case IO_SCH_UNREG_ATTACH:
1450 		/* Unregister ccw device. */
1451 		ccw_device_unregister(cdev);
1452 		break;
1453 	default:
1454 		break;
1455 	}
1456 	/* Handle subchannel. */
1457 	switch (action) {
1458 	case IO_SCH_ORPH_UNREG:
1459 	case IO_SCH_UNREG:
1460 		css_sch_device_unregister(sch);
1461 		break;
1462 	case IO_SCH_ORPH_ATTACH:
1463 	case IO_SCH_UNREG_ATTACH:
1464 	case IO_SCH_ATTACH:
1465 		dev_id.ssid = sch->schid.ssid;
1466 		dev_id.devno = sch->schib.pmcw.dev;
1467 		cdev = get_ccwdev_by_dev_id(&dev_id);
1468 		if (!cdev) {
1469 			sch_create_and_recog_new_device(sch);
1470 			break;
1471 		}
1472 		rc = ccw_device_move_to_sch(cdev, sch);
1473 		if (rc) {
1474 			/* Release reference from get_ccwdev_by_dev_id() */
1475 			put_device(&cdev->dev);
1476 			goto out;
1477 		}
1478 		spin_lock_irqsave(sch->lock, flags);
1479 		ccw_device_trigger_reprobe(cdev);
1480 		spin_unlock_irqrestore(sch->lock, flags);
1481 		/* Release reference from get_ccwdev_by_dev_id() */
1482 		put_device(&cdev->dev);
1483 		break;
1484 	default:
1485 		break;
1486 	}
1487 	return 0;
1488 
1489 out_unlock:
1490 	spin_unlock_irqrestore(sch->lock, flags);
1491 out:
1492 	return rc;
1493 }
1494 
1495 #ifdef CONFIG_CCW_CONSOLE
1496 static struct ccw_device console_cdev;
1497 static struct ccw_device_private console_private;
1498 static int console_cdev_in_use;
1499 
1500 static DEFINE_SPINLOCK(ccw_console_lock);
1501 
1502 spinlock_t * cio_get_console_lock(void)
1503 {
1504 	return &ccw_console_lock;
1505 }
1506 
1507 static int ccw_device_console_enable(struct ccw_device *cdev,
1508 				     struct subchannel *sch)
1509 {
1510 	int rc;
1511 
1512 	/* Attach subchannel private data. */
1513 	sch->private = cio_get_console_priv();
1514 	memset(sch->private, 0, sizeof(struct io_subchannel_private));
1515 	io_subchannel_init_fields(sch);
1516 	rc = cio_commit_config(sch);
1517 	if (rc)
1518 		return rc;
1519 	sch->driver = &io_subchannel_driver;
1520 	/* Initialize the ccw_device structure. */
1521 	cdev->dev.parent= &sch->dev;
1522 	sch_set_cdev(sch, cdev);
1523 	io_subchannel_recog(cdev, sch);
1524 	/* Now wait for the async. recognition to come to an end. */
1525 	spin_lock_irq(cdev->ccwlock);
1526 	while (!dev_fsm_final_state(cdev))
1527 		wait_cons_dev();
1528 	rc = -EIO;
1529 	if (cdev->private->state != DEV_STATE_OFFLINE)
1530 		goto out_unlock;
1531 	ccw_device_online(cdev);
1532 	while (!dev_fsm_final_state(cdev))
1533 		wait_cons_dev();
1534 	if (cdev->private->state != DEV_STATE_ONLINE)
1535 		goto out_unlock;
1536 	rc = 0;
1537 out_unlock:
1538 	spin_unlock_irq(cdev->ccwlock);
1539 	return rc;
1540 }
1541 
1542 struct ccw_device *
1543 ccw_device_probe_console(void)
1544 {
1545 	struct subchannel *sch;
1546 	int ret;
1547 
1548 	if (xchg(&console_cdev_in_use, 1) != 0)
1549 		return ERR_PTR(-EBUSY);
1550 	sch = cio_probe_console();
1551 	if (IS_ERR(sch)) {
1552 		console_cdev_in_use = 0;
1553 		return (void *) sch;
1554 	}
1555 	memset(&console_cdev, 0, sizeof(struct ccw_device));
1556 	memset(&console_private, 0, sizeof(struct ccw_device_private));
1557 	console_cdev.private = &console_private;
1558 	console_private.cdev = &console_cdev;
1559 	ret = ccw_device_console_enable(&console_cdev, sch);
1560 	if (ret) {
1561 		cio_release_console();
1562 		console_cdev_in_use = 0;
1563 		return ERR_PTR(ret);
1564 	}
1565 	console_cdev.online = 1;
1566 	return &console_cdev;
1567 }
1568 
1569 static int ccw_device_pm_restore(struct device *dev);
1570 
1571 int ccw_device_force_console(void)
1572 {
1573 	if (!console_cdev_in_use)
1574 		return -ENODEV;
1575 	return ccw_device_pm_restore(&console_cdev.dev);
1576 }
1577 EXPORT_SYMBOL_GPL(ccw_device_force_console);
1578 #endif
1579 
1580 /*
1581  * get ccw_device matching the busid, but only if owned by cdrv
1582  */
1583 static int
1584 __ccwdev_check_busid(struct device *dev, void *id)
1585 {
1586 	char *bus_id;
1587 
1588 	bus_id = id;
1589 
1590 	return (strcmp(bus_id, dev_name(dev)) == 0);
1591 }
1592 
1593 
1594 /**
1595  * get_ccwdev_by_busid() - obtain device from a bus id
1596  * @cdrv: driver the device is owned by
1597  * @bus_id: bus id of the device to be searched
1598  *
1599  * This function searches all devices owned by @cdrv for a device with a bus
1600  * id matching @bus_id.
1601  * Returns:
1602  *  If a match is found, its reference count of the found device is increased
1603  *  and it is returned; else %NULL is returned.
1604  */
1605 struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv,
1606 				       const char *bus_id)
1607 {
1608 	struct device *dev;
1609 	struct device_driver *drv;
1610 
1611 	drv = get_driver(&cdrv->driver);
1612 	if (!drv)
1613 		return NULL;
1614 
1615 	dev = driver_find_device(drv, NULL, (void *)bus_id,
1616 				 __ccwdev_check_busid);
1617 	put_driver(drv);
1618 
1619 	return dev ? to_ccwdev(dev) : NULL;
1620 }
1621 
1622 /************************** device driver handling ************************/
1623 
1624 /* This is the implementation of the ccw_driver class. The probe, remove
1625  * and release methods are initially very similar to the device_driver
1626  * implementations, with the difference that they have ccw_device
1627  * arguments.
1628  *
1629  * A ccw driver also contains the information that is needed for
1630  * device matching.
1631  */
1632 static int
1633 ccw_device_probe (struct device *dev)
1634 {
1635 	struct ccw_device *cdev = to_ccwdev(dev);
1636 	struct ccw_driver *cdrv = to_ccwdrv(dev->driver);
1637 	int ret;
1638 
1639 	cdev->drv = cdrv; /* to let the driver call _set_online */
1640 
1641 	ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV;
1642 
1643 	if (ret) {
1644 		cdev->drv = NULL;
1645 		return ret;
1646 	}
1647 
1648 	return 0;
1649 }
1650 
1651 static int
1652 ccw_device_remove (struct device *dev)
1653 {
1654 	struct ccw_device *cdev = to_ccwdev(dev);
1655 	struct ccw_driver *cdrv = cdev->drv;
1656 	int ret;
1657 
1658 	if (cdrv->remove)
1659 		cdrv->remove(cdev);
1660 	if (cdev->online) {
1661 		cdev->online = 0;
1662 		spin_lock_irq(cdev->ccwlock);
1663 		ret = ccw_device_offline(cdev);
1664 		spin_unlock_irq(cdev->ccwlock);
1665 		if (ret == 0)
1666 			wait_event(cdev->private->wait_q,
1667 				   dev_fsm_final_state(cdev));
1668 		else
1669 			CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
1670 				      "device 0.%x.%04x\n",
1671 				      ret, cdev->private->dev_id.ssid,
1672 				      cdev->private->dev_id.devno);
1673 		/* Give up reference obtained in ccw_device_set_online(). */
1674 		put_device(&cdev->dev);
1675 	}
1676 	ccw_device_set_timeout(cdev, 0);
1677 	cdev->drv = NULL;
1678 	return 0;
1679 }
1680 
1681 static void ccw_device_shutdown(struct device *dev)
1682 {
1683 	struct ccw_device *cdev;
1684 
1685 	cdev = to_ccwdev(dev);
1686 	if (cdev->drv && cdev->drv->shutdown)
1687 		cdev->drv->shutdown(cdev);
1688 	disable_cmf(cdev);
1689 }
1690 
1691 static int ccw_device_pm_prepare(struct device *dev)
1692 {
1693 	struct ccw_device *cdev = to_ccwdev(dev);
1694 
1695 	if (work_pending(&cdev->private->todo_work))
1696 		return -EAGAIN;
1697 	/* Fail while device is being set online/offline. */
1698 	if (atomic_read(&cdev->private->onoff))
1699 		return -EAGAIN;
1700 
1701 	if (cdev->online && cdev->drv && cdev->drv->prepare)
1702 		return cdev->drv->prepare(cdev);
1703 
1704 	return 0;
1705 }
1706 
1707 static void ccw_device_pm_complete(struct device *dev)
1708 {
1709 	struct ccw_device *cdev = to_ccwdev(dev);
1710 
1711 	if (cdev->online && cdev->drv && cdev->drv->complete)
1712 		cdev->drv->complete(cdev);
1713 }
1714 
1715 static int ccw_device_pm_freeze(struct device *dev)
1716 {
1717 	struct ccw_device *cdev = to_ccwdev(dev);
1718 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1719 	int ret, cm_enabled;
1720 
1721 	/* Fail suspend while device is in transistional state. */
1722 	if (!dev_fsm_final_state(cdev))
1723 		return -EAGAIN;
1724 	if (!cdev->online)
1725 		return 0;
1726 	if (cdev->drv && cdev->drv->freeze) {
1727 		ret = cdev->drv->freeze(cdev);
1728 		if (ret)
1729 			return ret;
1730 	}
1731 
1732 	spin_lock_irq(sch->lock);
1733 	cm_enabled = cdev->private->cmb != NULL;
1734 	spin_unlock_irq(sch->lock);
1735 	if (cm_enabled) {
1736 		/* Don't have the css write on memory. */
1737 		ret = ccw_set_cmf(cdev, 0);
1738 		if (ret)
1739 			return ret;
1740 	}
1741 	/* From here on, disallow device driver I/O. */
1742 	spin_lock_irq(sch->lock);
1743 	ret = cio_disable_subchannel(sch);
1744 	spin_unlock_irq(sch->lock);
1745 
1746 	return ret;
1747 }
1748 
1749 static int ccw_device_pm_thaw(struct device *dev)
1750 {
1751 	struct ccw_device *cdev = to_ccwdev(dev);
1752 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1753 	int ret, cm_enabled;
1754 
1755 	if (!cdev->online)
1756 		return 0;
1757 
1758 	spin_lock_irq(sch->lock);
1759 	/* Allow device driver I/O again. */
1760 	ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
1761 	cm_enabled = cdev->private->cmb != NULL;
1762 	spin_unlock_irq(sch->lock);
1763 	if (ret)
1764 		return ret;
1765 
1766 	if (cm_enabled) {
1767 		ret = ccw_set_cmf(cdev, 1);
1768 		if (ret)
1769 			return ret;
1770 	}
1771 
1772 	if (cdev->drv && cdev->drv->thaw)
1773 		ret = cdev->drv->thaw(cdev);
1774 
1775 	return ret;
1776 }
1777 
1778 static void __ccw_device_pm_restore(struct ccw_device *cdev)
1779 {
1780 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1781 
1782 	if (cio_is_console(sch->schid))
1783 		goto out;
1784 	/*
1785 	 * While we were sleeping, devices may have gone or become
1786 	 * available again. Kick re-detection.
1787 	 */
1788 	spin_lock_irq(sch->lock);
1789 	cdev->private->flags.resuming = 1;
1790 	ccw_device_recognition(cdev);
1791 	spin_unlock_irq(sch->lock);
1792 	wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev) ||
1793 		   cdev->private->state == DEV_STATE_DISCONNECTED);
1794 out:
1795 	cdev->private->flags.resuming = 0;
1796 }
1797 
1798 static int resume_handle_boxed(struct ccw_device *cdev)
1799 {
1800 	cdev->private->state = DEV_STATE_BOXED;
1801 	if (ccw_device_notify(cdev, CIO_BOXED))
1802 		return 0;
1803 	ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1804 	return -ENODEV;
1805 }
1806 
1807 static int resume_handle_disc(struct ccw_device *cdev)
1808 {
1809 	cdev->private->state = DEV_STATE_DISCONNECTED;
1810 	if (ccw_device_notify(cdev, CIO_GONE))
1811 		return 0;
1812 	ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1813 	return -ENODEV;
1814 }
1815 
1816 static int ccw_device_pm_restore(struct device *dev)
1817 {
1818 	struct ccw_device *cdev = to_ccwdev(dev);
1819 	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1820 	int ret = 0, cm_enabled;
1821 
1822 	__ccw_device_pm_restore(cdev);
1823 	spin_lock_irq(sch->lock);
1824 	if (cio_is_console(sch->schid)) {
1825 		cio_enable_subchannel(sch, (u32)(addr_t)sch);
1826 		spin_unlock_irq(sch->lock);
1827 		goto out_restore;
1828 	}
1829 	cdev->private->flags.donotify = 0;
1830 	/* check recognition results */
1831 	switch (cdev->private->state) {
1832 	case DEV_STATE_OFFLINE:
1833 		break;
1834 	case DEV_STATE_BOXED:
1835 		ret = resume_handle_boxed(cdev);
1836 		spin_unlock_irq(sch->lock);
1837 		if (ret)
1838 			goto out;
1839 		goto out_restore;
1840 	case DEV_STATE_DISCONNECTED:
1841 		goto out_disc_unlock;
1842 	default:
1843 		goto out_unreg_unlock;
1844 	}
1845 	/* check if the device id has changed */
1846 	if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
1847 		CIO_MSG_EVENT(0, "resume: sch 0.%x.%04x: failed (devno "
1848 			      "changed from %04x to %04x)\n",
1849 			      sch->schid.ssid, sch->schid.sch_no,
1850 			      cdev->private->dev_id.devno,
1851 			      sch->schib.pmcw.dev);
1852 		goto out_unreg_unlock;
1853 	}
1854 	/* check if the device type has changed */
1855 	if (!ccw_device_test_sense_data(cdev)) {
1856 		ccw_device_update_sense_data(cdev);
1857 		ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
1858 		ret = -ENODEV;
1859 		goto out_unlock;
1860 	}
1861 	if (!cdev->online) {
1862 		ret = 0;
1863 		goto out_unlock;
1864 	}
1865 	ret = ccw_device_online(cdev);
1866 	if (ret)
1867 		goto out_disc_unlock;
1868 
1869 	cm_enabled = cdev->private->cmb != NULL;
1870 	spin_unlock_irq(sch->lock);
1871 
1872 	wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
1873 	if (cdev->private->state != DEV_STATE_ONLINE) {
1874 		spin_lock_irq(sch->lock);
1875 		goto out_disc_unlock;
1876 	}
1877 	if (cm_enabled) {
1878 		ret = ccw_set_cmf(cdev, 1);
1879 		if (ret) {
1880 			CIO_MSG_EVENT(2, "resume: cdev 0.%x.%04x: cmf failed "
1881 				      "(rc=%d)\n", cdev->private->dev_id.ssid,
1882 				      cdev->private->dev_id.devno, ret);
1883 			ret = 0;
1884 		}
1885 	}
1886 
1887 out_restore:
1888 	if (cdev->online && cdev->drv && cdev->drv->restore)
1889 		ret = cdev->drv->restore(cdev);
1890 out:
1891 	return ret;
1892 
1893 out_disc_unlock:
1894 	ret = resume_handle_disc(cdev);
1895 	spin_unlock_irq(sch->lock);
1896 	if (ret)
1897 		return ret;
1898 	goto out_restore;
1899 
1900 out_unreg_unlock:
1901 	ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL);
1902 	ret = -ENODEV;
1903 out_unlock:
1904 	spin_unlock_irq(sch->lock);
1905 	return ret;
1906 }
1907 
1908 static const struct dev_pm_ops ccw_pm_ops = {
1909 	.prepare = ccw_device_pm_prepare,
1910 	.complete = ccw_device_pm_complete,
1911 	.freeze = ccw_device_pm_freeze,
1912 	.thaw = ccw_device_pm_thaw,
1913 	.restore = ccw_device_pm_restore,
1914 };
1915 
1916 struct bus_type ccw_bus_type = {
1917 	.name   = "ccw",
1918 	.match  = ccw_bus_match,
1919 	.uevent = ccw_uevent,
1920 	.probe  = ccw_device_probe,
1921 	.remove = ccw_device_remove,
1922 	.shutdown = ccw_device_shutdown,
1923 	.pm = &ccw_pm_ops,
1924 };
1925 
1926 /**
1927  * ccw_driver_register() - register a ccw driver
1928  * @cdriver: driver to be registered
1929  *
1930  * This function is mainly a wrapper around driver_register().
1931  * Returns:
1932  *   %0 on success and a negative error value on failure.
1933  */
1934 int ccw_driver_register(struct ccw_driver *cdriver)
1935 {
1936 	struct device_driver *drv = &cdriver->driver;
1937 
1938 	drv->bus = &ccw_bus_type;
1939 	drv->name = cdriver->name;
1940 	drv->owner = cdriver->owner;
1941 
1942 	return driver_register(drv);
1943 }
1944 
1945 /**
1946  * ccw_driver_unregister() - deregister a ccw driver
1947  * @cdriver: driver to be deregistered
1948  *
1949  * This function is mainly a wrapper around driver_unregister().
1950  */
1951 void ccw_driver_unregister(struct ccw_driver *cdriver)
1952 {
1953 	driver_unregister(&cdriver->driver);
1954 }
1955 
1956 /* Helper func for qdio. */
1957 struct subchannel_id
1958 ccw_device_get_subchannel_id(struct ccw_device *cdev)
1959 {
1960 	struct subchannel *sch;
1961 
1962 	sch = to_subchannel(cdev->dev.parent);
1963 	return sch->schid;
1964 }
1965 
1966 static void ccw_device_todo(struct work_struct *work)
1967 {
1968 	struct ccw_device_private *priv;
1969 	struct ccw_device *cdev;
1970 	struct subchannel *sch;
1971 	enum cdev_todo todo;
1972 
1973 	priv = container_of(work, struct ccw_device_private, todo_work);
1974 	cdev = priv->cdev;
1975 	sch = to_subchannel(cdev->dev.parent);
1976 	/* Find out todo. */
1977 	spin_lock_irq(cdev->ccwlock);
1978 	todo = priv->todo;
1979 	priv->todo = CDEV_TODO_NOTHING;
1980 	CIO_MSG_EVENT(4, "cdev_todo: cdev=0.%x.%04x todo=%d\n",
1981 		      priv->dev_id.ssid, priv->dev_id.devno, todo);
1982 	spin_unlock_irq(cdev->ccwlock);
1983 	/* Perform todo. */
1984 	switch (todo) {
1985 	case CDEV_TODO_ENABLE_CMF:
1986 		cmf_reenable(cdev);
1987 		break;
1988 	case CDEV_TODO_REBIND:
1989 		ccw_device_do_unbind_bind(cdev);
1990 		break;
1991 	case CDEV_TODO_REGISTER:
1992 		io_subchannel_register(cdev);
1993 		break;
1994 	case CDEV_TODO_UNREG_EVAL:
1995 		if (!sch_is_pseudo_sch(sch))
1996 			css_schedule_eval(sch->schid);
1997 		/* fall-through */
1998 	case CDEV_TODO_UNREG:
1999 		if (sch_is_pseudo_sch(sch))
2000 			ccw_device_unregister(cdev);
2001 		else
2002 			ccw_device_call_sch_unregister(cdev);
2003 		break;
2004 	default:
2005 		break;
2006 	}
2007 	/* Release workqueue ref. */
2008 	put_device(&cdev->dev);
2009 }
2010 
2011 /**
2012  * ccw_device_sched_todo - schedule ccw device operation
2013  * @cdev: ccw device
2014  * @todo: todo
2015  *
2016  * Schedule the operation identified by @todo to be performed on the slow path
2017  * workqueue. Do nothing if another operation with higher priority is already
2018  * scheduled. Needs to be called with ccwdev lock held.
2019  */
2020 void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo)
2021 {
2022 	CIO_MSG_EVENT(4, "cdev_todo: sched cdev=0.%x.%04x todo=%d\n",
2023 		      cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
2024 		      todo);
2025 	if (cdev->private->todo >= todo)
2026 		return;
2027 	cdev->private->todo = todo;
2028 	/* Get workqueue ref. */
2029 	if (!get_device(&cdev->dev))
2030 		return;
2031 	if (!queue_work(slow_path_wq, &cdev->private->todo_work)) {
2032 		/* Already queued, release workqueue ref. */
2033 		put_device(&cdev->dev);
2034 	}
2035 }
2036 
2037 MODULE_LICENSE("GPL");
2038 EXPORT_SYMBOL(ccw_device_set_online);
2039 EXPORT_SYMBOL(ccw_device_set_offline);
2040 EXPORT_SYMBOL(ccw_driver_register);
2041 EXPORT_SYMBOL(ccw_driver_unregister);
2042 EXPORT_SYMBOL(get_ccwdev_by_busid);
2043 EXPORT_SYMBOL(ccw_bus_type);
2044 EXPORT_SYMBOL(ccw_device_work);
2045 EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id);
2046