xref: /openbmc/linux/drivers/s390/block/dasd.c (revision f8324e20f8289dffc646d64366332e05eaacab25)
1 /*
2  * File...........: linux/drivers/s390/block/dasd.c
3  * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4  *		    Horst Hummel <Horst.Hummel@de.ibm.com>
5  *		    Carsten Otte <Cotte@de.ibm.com>
6  *		    Martin Schwidefsky <schwidefsky@de.ibm.com>
7  * Bugreports.to..: <Linux390@de.ibm.com>
8  * Copyright IBM Corp. 1999, 2009
9  */
10 
11 #define KMSG_COMPONENT "dasd"
12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13 
14 #include <linux/kmod.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
17 #include <linux/ctype.h>
18 #include <linux/major.h>
19 #include <linux/slab.h>
20 #include <linux/buffer_head.h>
21 #include <linux/hdreg.h>
22 #include <linux/async.h>
23 #include <linux/mutex.h>
24 
25 #include <asm/ccwdev.h>
26 #include <asm/ebcdic.h>
27 #include <asm/idals.h>
28 #include <asm/itcw.h>
29 #include <asm/diag.h>
30 
31 /* This is ugly... */
32 #define PRINTK_HEADER "dasd:"
33 
34 #include "dasd_int.h"
35 /*
36  * SECTION: Constant definitions to be used within this file
37  */
38 #define DASD_CHANQ_MAX_SIZE 4
39 
40 #define DASD_SLEEPON_START_TAG	(void *) 1
41 #define DASD_SLEEPON_END_TAG	(void *) 2
42 
43 /*
44  * SECTION: exported variables of dasd.c
45  */
46 debug_info_t *dasd_debug_area;
47 struct dasd_discipline *dasd_diag_discipline_pointer;
48 void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
49 
50 MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
51 MODULE_DESCRIPTION("Linux on S/390 DASD device driver,"
52 		   " Copyright 2000 IBM Corporation");
53 MODULE_SUPPORTED_DEVICE("dasd");
54 MODULE_LICENSE("GPL");
55 
56 /*
57  * SECTION: prototypes for static functions of dasd.c
58  */
59 static int  dasd_alloc_queue(struct dasd_block *);
60 static void dasd_setup_queue(struct dasd_block *);
61 static void dasd_free_queue(struct dasd_block *);
62 static void dasd_flush_request_queue(struct dasd_block *);
63 static int dasd_flush_block_queue(struct dasd_block *);
64 static void dasd_device_tasklet(struct dasd_device *);
65 static void dasd_block_tasklet(struct dasd_block *);
66 static void do_kick_device(struct work_struct *);
67 static void do_restore_device(struct work_struct *);
68 static void do_reload_device(struct work_struct *);
69 static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
70 static void dasd_device_timeout(unsigned long);
71 static void dasd_block_timeout(unsigned long);
72 static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *);
73 
74 /*
75  * SECTION: Operations on the device structure.
76  */
77 static wait_queue_head_t dasd_init_waitq;
78 static wait_queue_head_t dasd_flush_wq;
79 static wait_queue_head_t generic_waitq;
80 
81 /*
82  * Allocate memory for a new device structure.
83  */
84 struct dasd_device *dasd_alloc_device(void)
85 {
86 	struct dasd_device *device;
87 
88 	device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC);
89 	if (!device)
90 		return ERR_PTR(-ENOMEM);
91 
92 	/* Get two pages for normal block device operations. */
93 	device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1);
94 	if (!device->ccw_mem) {
95 		kfree(device);
96 		return ERR_PTR(-ENOMEM);
97 	}
98 	/* Get one page for error recovery. */
99 	device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA);
100 	if (!device->erp_mem) {
101 		free_pages((unsigned long) device->ccw_mem, 1);
102 		kfree(device);
103 		return ERR_PTR(-ENOMEM);
104 	}
105 
106 	dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2);
107 	dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE);
108 	spin_lock_init(&device->mem_lock);
109 	atomic_set(&device->tasklet_scheduled, 0);
110 	tasklet_init(&device->tasklet,
111 		     (void (*)(unsigned long)) dasd_device_tasklet,
112 		     (unsigned long) device);
113 	INIT_LIST_HEAD(&device->ccw_queue);
114 	init_timer(&device->timer);
115 	device->timer.function = dasd_device_timeout;
116 	device->timer.data = (unsigned long) device;
117 	INIT_WORK(&device->kick_work, do_kick_device);
118 	INIT_WORK(&device->restore_device, do_restore_device);
119 	INIT_WORK(&device->reload_device, do_reload_device);
120 	device->state = DASD_STATE_NEW;
121 	device->target = DASD_STATE_NEW;
122 	mutex_init(&device->state_mutex);
123 
124 	return device;
125 }
126 
127 /*
128  * Free memory of a device structure.
129  */
130 void dasd_free_device(struct dasd_device *device)
131 {
132 	kfree(device->private);
133 	free_page((unsigned long) device->erp_mem);
134 	free_pages((unsigned long) device->ccw_mem, 1);
135 	kfree(device);
136 }
137 
138 /*
139  * Allocate memory for a new device structure.
140  */
141 struct dasd_block *dasd_alloc_block(void)
142 {
143 	struct dasd_block *block;
144 
145 	block = kzalloc(sizeof(*block), GFP_ATOMIC);
146 	if (!block)
147 		return ERR_PTR(-ENOMEM);
148 	/* open_count = 0 means device online but not in use */
149 	atomic_set(&block->open_count, -1);
150 
151 	spin_lock_init(&block->request_queue_lock);
152 	atomic_set(&block->tasklet_scheduled, 0);
153 	tasklet_init(&block->tasklet,
154 		     (void (*)(unsigned long)) dasd_block_tasklet,
155 		     (unsigned long) block);
156 	INIT_LIST_HEAD(&block->ccw_queue);
157 	spin_lock_init(&block->queue_lock);
158 	init_timer(&block->timer);
159 	block->timer.function = dasd_block_timeout;
160 	block->timer.data = (unsigned long) block;
161 
162 	return block;
163 }
164 
165 /*
166  * Free memory of a device structure.
167  */
168 void dasd_free_block(struct dasd_block *block)
169 {
170 	kfree(block);
171 }
172 
173 /*
174  * Make a new device known to the system.
175  */
176 static int dasd_state_new_to_known(struct dasd_device *device)
177 {
178 	int rc;
179 
180 	/*
181 	 * As long as the device is not in state DASD_STATE_NEW we want to
182 	 * keep the reference count > 0.
183 	 */
184 	dasd_get_device(device);
185 
186 	if (device->block) {
187 		rc = dasd_alloc_queue(device->block);
188 		if (rc) {
189 			dasd_put_device(device);
190 			return rc;
191 		}
192 	}
193 	device->state = DASD_STATE_KNOWN;
194 	return 0;
195 }
196 
197 /*
198  * Let the system forget about a device.
199  */
200 static int dasd_state_known_to_new(struct dasd_device *device)
201 {
202 	/* Disable extended error reporting for this device. */
203 	dasd_eer_disable(device);
204 	/* Forget the discipline information. */
205 	if (device->discipline) {
206 		if (device->discipline->uncheck_device)
207 			device->discipline->uncheck_device(device);
208 		module_put(device->discipline->owner);
209 	}
210 	device->discipline = NULL;
211 	if (device->base_discipline)
212 		module_put(device->base_discipline->owner);
213 	device->base_discipline = NULL;
214 	device->state = DASD_STATE_NEW;
215 
216 	if (device->block)
217 		dasd_free_queue(device->block);
218 
219 	/* Give up reference we took in dasd_state_new_to_known. */
220 	dasd_put_device(device);
221 	return 0;
222 }
223 
224 /*
225  * Request the irq line for the device.
226  */
227 static int dasd_state_known_to_basic(struct dasd_device *device)
228 {
229 	int rc;
230 
231 	/* Allocate and register gendisk structure. */
232 	if (device->block) {
233 		rc = dasd_gendisk_alloc(device->block);
234 		if (rc)
235 			return rc;
236 	}
237 	/* register 'device' debug area, used for all DBF_DEV_XXX calls */
238 	device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1,
239 					    8 * sizeof(long));
240 	debug_register_view(device->debug_area, &debug_sprintf_view);
241 	debug_set_level(device->debug_area, DBF_WARNING);
242 	DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created");
243 
244 	device->state = DASD_STATE_BASIC;
245 	return 0;
246 }
247 
248 /*
249  * Release the irq line for the device. Terminate any running i/o.
250  */
251 static int dasd_state_basic_to_known(struct dasd_device *device)
252 {
253 	int rc;
254 	if (device->block) {
255 		dasd_gendisk_free(device->block);
256 		dasd_block_clear_timer(device->block);
257 	}
258 	rc = dasd_flush_device_queue(device);
259 	if (rc)
260 		return rc;
261 	dasd_device_clear_timer(device);
262 
263 	DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
264 	if (device->debug_area != NULL) {
265 		debug_unregister(device->debug_area);
266 		device->debug_area = NULL;
267 	}
268 	device->state = DASD_STATE_KNOWN;
269 	return 0;
270 }
271 
272 /*
273  * Do the initial analysis. The do_analysis function may return
274  * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC
275  * until the discipline decides to continue the startup sequence
276  * by calling the function dasd_change_state. The eckd disciplines
277  * uses this to start a ccw that detects the format. The completion
278  * interrupt for this detection ccw uses the kernel event daemon to
279  * trigger the call to dasd_change_state. All this is done in the
280  * discipline code, see dasd_eckd.c.
281  * After the analysis ccw is done (do_analysis returned 0) the block
282  * device is setup.
283  * In case the analysis returns an error, the device setup is stopped
284  * (a fake disk was already added to allow formatting).
285  */
286 static int dasd_state_basic_to_ready(struct dasd_device *device)
287 {
288 	int rc;
289 	struct dasd_block *block;
290 
291 	rc = 0;
292 	block = device->block;
293 	/* make disk known with correct capacity */
294 	if (block) {
295 		if (block->base->discipline->do_analysis != NULL)
296 			rc = block->base->discipline->do_analysis(block);
297 		if (rc) {
298 			if (rc != -EAGAIN)
299 				device->state = DASD_STATE_UNFMT;
300 			return rc;
301 		}
302 		dasd_setup_queue(block);
303 		set_capacity(block->gdp,
304 			     block->blocks << block->s2b_shift);
305 		device->state = DASD_STATE_READY;
306 		rc = dasd_scan_partitions(block);
307 		if (rc)
308 			device->state = DASD_STATE_BASIC;
309 	} else {
310 		device->state = DASD_STATE_READY;
311 	}
312 	return rc;
313 }
314 
315 /*
316  * Remove device from block device layer. Destroy dirty buffers.
317  * Forget format information. Check if the target level is basic
318  * and if it is create fake disk for formatting.
319  */
320 static int dasd_state_ready_to_basic(struct dasd_device *device)
321 {
322 	int rc;
323 
324 	device->state = DASD_STATE_BASIC;
325 	if (device->block) {
326 		struct dasd_block *block = device->block;
327 		rc = dasd_flush_block_queue(block);
328 		if (rc) {
329 			device->state = DASD_STATE_READY;
330 			return rc;
331 		}
332 		dasd_flush_request_queue(block);
333 		dasd_destroy_partitions(block);
334 		block->blocks = 0;
335 		block->bp_block = 0;
336 		block->s2b_shift = 0;
337 	}
338 	return 0;
339 }
340 
341 /*
342  * Back to basic.
343  */
344 static int dasd_state_unfmt_to_basic(struct dasd_device *device)
345 {
346 	device->state = DASD_STATE_BASIC;
347 	return 0;
348 }
349 
350 /*
351  * Make the device online and schedule the bottom half to start
352  * the requeueing of requests from the linux request queue to the
353  * ccw queue.
354  */
355 static int
356 dasd_state_ready_to_online(struct dasd_device * device)
357 {
358 	int rc;
359 	struct gendisk *disk;
360 	struct disk_part_iter piter;
361 	struct hd_struct *part;
362 
363 	if (device->discipline->ready_to_online) {
364 		rc = device->discipline->ready_to_online(device);
365 		if (rc)
366 			return rc;
367 	}
368 	device->state = DASD_STATE_ONLINE;
369 	if (device->block) {
370 		dasd_schedule_block_bh(device->block);
371 		disk = device->block->bdev->bd_disk;
372 		disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
373 		while ((part = disk_part_iter_next(&piter)))
374 			kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE);
375 		disk_part_iter_exit(&piter);
376 	}
377 	return 0;
378 }
379 
380 /*
381  * Stop the requeueing of requests again.
382  */
383 static int dasd_state_online_to_ready(struct dasd_device *device)
384 {
385 	int rc;
386 	struct gendisk *disk;
387 	struct disk_part_iter piter;
388 	struct hd_struct *part;
389 
390 	if (device->discipline->online_to_ready) {
391 		rc = device->discipline->online_to_ready(device);
392 		if (rc)
393 			return rc;
394 	}
395 	device->state = DASD_STATE_READY;
396 	if (device->block) {
397 		disk = device->block->bdev->bd_disk;
398 		disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
399 		while ((part = disk_part_iter_next(&piter)))
400 			kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE);
401 		disk_part_iter_exit(&piter);
402 	}
403 	return 0;
404 }
405 
406 /*
407  * Device startup state changes.
408  */
409 static int dasd_increase_state(struct dasd_device *device)
410 {
411 	int rc;
412 
413 	rc = 0;
414 	if (device->state == DASD_STATE_NEW &&
415 	    device->target >= DASD_STATE_KNOWN)
416 		rc = dasd_state_new_to_known(device);
417 
418 	if (!rc &&
419 	    device->state == DASD_STATE_KNOWN &&
420 	    device->target >= DASD_STATE_BASIC)
421 		rc = dasd_state_known_to_basic(device);
422 
423 	if (!rc &&
424 	    device->state == DASD_STATE_BASIC &&
425 	    device->target >= DASD_STATE_READY)
426 		rc = dasd_state_basic_to_ready(device);
427 
428 	if (!rc &&
429 	    device->state == DASD_STATE_UNFMT &&
430 	    device->target > DASD_STATE_UNFMT)
431 		rc = -EPERM;
432 
433 	if (!rc &&
434 	    device->state == DASD_STATE_READY &&
435 	    device->target >= DASD_STATE_ONLINE)
436 		rc = dasd_state_ready_to_online(device);
437 
438 	return rc;
439 }
440 
441 /*
442  * Device shutdown state changes.
443  */
444 static int dasd_decrease_state(struct dasd_device *device)
445 {
446 	int rc;
447 
448 	rc = 0;
449 	if (device->state == DASD_STATE_ONLINE &&
450 	    device->target <= DASD_STATE_READY)
451 		rc = dasd_state_online_to_ready(device);
452 
453 	if (!rc &&
454 	    device->state == DASD_STATE_READY &&
455 	    device->target <= DASD_STATE_BASIC)
456 		rc = dasd_state_ready_to_basic(device);
457 
458 	if (!rc &&
459 	    device->state == DASD_STATE_UNFMT &&
460 	    device->target <= DASD_STATE_BASIC)
461 		rc = dasd_state_unfmt_to_basic(device);
462 
463 	if (!rc &&
464 	    device->state == DASD_STATE_BASIC &&
465 	    device->target <= DASD_STATE_KNOWN)
466 		rc = dasd_state_basic_to_known(device);
467 
468 	if (!rc &&
469 	    device->state == DASD_STATE_KNOWN &&
470 	    device->target <= DASD_STATE_NEW)
471 		rc = dasd_state_known_to_new(device);
472 
473 	return rc;
474 }
475 
476 /*
477  * This is the main startup/shutdown routine.
478  */
479 static void dasd_change_state(struct dasd_device *device)
480 {
481 	int rc;
482 
483 	if (device->state == device->target)
484 		/* Already where we want to go today... */
485 		return;
486 	if (device->state < device->target)
487 		rc = dasd_increase_state(device);
488 	else
489 		rc = dasd_decrease_state(device);
490 	if (rc == -EAGAIN)
491 		return;
492 	if (rc)
493 		device->target = device->state;
494 
495 	if (device->state == device->target)
496 		wake_up(&dasd_init_waitq);
497 
498 	/* let user-space know that the device status changed */
499 	kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE);
500 }
501 
502 /*
503  * Kick starter for devices that did not complete the startup/shutdown
504  * procedure or were sleeping because of a pending state.
505  * dasd_kick_device will schedule a call do do_kick_device to the kernel
506  * event daemon.
507  */
508 static void do_kick_device(struct work_struct *work)
509 {
510 	struct dasd_device *device = container_of(work, struct dasd_device, kick_work);
511 	mutex_lock(&device->state_mutex);
512 	dasd_change_state(device);
513 	mutex_unlock(&device->state_mutex);
514 	dasd_schedule_device_bh(device);
515 	dasd_put_device(device);
516 }
517 
518 void dasd_kick_device(struct dasd_device *device)
519 {
520 	dasd_get_device(device);
521 	/* queue call to dasd_kick_device to the kernel event daemon. */
522 	schedule_work(&device->kick_work);
523 }
524 
525 /*
526  * dasd_reload_device will schedule a call do do_reload_device to the kernel
527  * event daemon.
528  */
529 static void do_reload_device(struct work_struct *work)
530 {
531 	struct dasd_device *device = container_of(work, struct dasd_device,
532 						  reload_device);
533 	device->discipline->reload(device);
534 	dasd_put_device(device);
535 }
536 
537 void dasd_reload_device(struct dasd_device *device)
538 {
539 	dasd_get_device(device);
540 	/* queue call to dasd_reload_device to the kernel event daemon. */
541 	schedule_work(&device->reload_device);
542 }
543 EXPORT_SYMBOL(dasd_reload_device);
544 
545 /*
546  * dasd_restore_device will schedule a call do do_restore_device to the kernel
547  * event daemon.
548  */
549 static void do_restore_device(struct work_struct *work)
550 {
551 	struct dasd_device *device = container_of(work, struct dasd_device,
552 						  restore_device);
553 	device->cdev->drv->restore(device->cdev);
554 	dasd_put_device(device);
555 }
556 
557 void dasd_restore_device(struct dasd_device *device)
558 {
559 	dasd_get_device(device);
560 	/* queue call to dasd_restore_device to the kernel event daemon. */
561 	schedule_work(&device->restore_device);
562 }
563 
564 /*
565  * Set the target state for a device and starts the state change.
566  */
567 void dasd_set_target_state(struct dasd_device *device, int target)
568 {
569 	dasd_get_device(device);
570 	mutex_lock(&device->state_mutex);
571 	/* If we are in probeonly mode stop at DASD_STATE_READY. */
572 	if (dasd_probeonly && target > DASD_STATE_READY)
573 		target = DASD_STATE_READY;
574 	if (device->target != target) {
575 		if (device->state == target)
576 			wake_up(&dasd_init_waitq);
577 		device->target = target;
578 	}
579 	if (device->state != device->target)
580 		dasd_change_state(device);
581 	mutex_unlock(&device->state_mutex);
582 	dasd_put_device(device);
583 }
584 
585 /*
586  * Enable devices with device numbers in [from..to].
587  */
588 static inline int _wait_for_device(struct dasd_device *device)
589 {
590 	return (device->state == device->target);
591 }
592 
593 void dasd_enable_device(struct dasd_device *device)
594 {
595 	dasd_set_target_state(device, DASD_STATE_ONLINE);
596 	if (device->state <= DASD_STATE_KNOWN)
597 		/* No discipline for device found. */
598 		dasd_set_target_state(device, DASD_STATE_NEW);
599 	/* Now wait for the devices to come up. */
600 	wait_event(dasd_init_waitq, _wait_for_device(device));
601 }
602 
603 /*
604  * SECTION: device operation (interrupt handler, start i/o, term i/o ...)
605  */
606 #ifdef CONFIG_DASD_PROFILE
607 
608 struct dasd_profile_info_t dasd_global_profile;
609 unsigned int dasd_profile_level = DASD_PROFILE_OFF;
610 
611 /*
612  * Increments counter in global and local profiling structures.
613  */
614 #define dasd_profile_counter(value, counter, block) \
615 { \
616 	int index; \
617 	for (index = 0; index < 31 && value >> (2+index); index++); \
618 	dasd_global_profile.counter[index]++; \
619 	block->profile.counter[index]++; \
620 }
621 
622 /*
623  * Add profiling information for cqr before execution.
624  */
625 static void dasd_profile_start(struct dasd_block *block,
626 			       struct dasd_ccw_req *cqr,
627 			       struct request *req)
628 {
629 	struct list_head *l;
630 	unsigned int counter;
631 
632 	if (dasd_profile_level != DASD_PROFILE_ON)
633 		return;
634 
635 	/* count the length of the chanq for statistics */
636 	counter = 0;
637 	list_for_each(l, &block->ccw_queue)
638 		if (++counter >= 31)
639 			break;
640 	dasd_global_profile.dasd_io_nr_req[counter]++;
641 	block->profile.dasd_io_nr_req[counter]++;
642 }
643 
644 /*
645  * Add profiling information for cqr after execution.
646  */
647 static void dasd_profile_end(struct dasd_block *block,
648 			     struct dasd_ccw_req *cqr,
649 			     struct request *req)
650 {
651 	long strtime, irqtime, endtime, tottime;	/* in microseconds */
652 	long tottimeps, sectors;
653 
654 	if (dasd_profile_level != DASD_PROFILE_ON)
655 		return;
656 
657 	sectors = blk_rq_sectors(req);
658 	if (!cqr->buildclk || !cqr->startclk ||
659 	    !cqr->stopclk || !cqr->endclk ||
660 	    !sectors)
661 		return;
662 
663 	strtime = ((cqr->startclk - cqr->buildclk) >> 12);
664 	irqtime = ((cqr->stopclk - cqr->startclk) >> 12);
665 	endtime = ((cqr->endclk - cqr->stopclk) >> 12);
666 	tottime = ((cqr->endclk - cqr->buildclk) >> 12);
667 	tottimeps = tottime / sectors;
668 
669 	if (!dasd_global_profile.dasd_io_reqs)
670 		memset(&dasd_global_profile, 0,
671 		       sizeof(struct dasd_profile_info_t));
672 	dasd_global_profile.dasd_io_reqs++;
673 	dasd_global_profile.dasd_io_sects += sectors;
674 
675 	if (!block->profile.dasd_io_reqs)
676 		memset(&block->profile, 0,
677 		       sizeof(struct dasd_profile_info_t));
678 	block->profile.dasd_io_reqs++;
679 	block->profile.dasd_io_sects += sectors;
680 
681 	dasd_profile_counter(sectors, dasd_io_secs, block);
682 	dasd_profile_counter(tottime, dasd_io_times, block);
683 	dasd_profile_counter(tottimeps, dasd_io_timps, block);
684 	dasd_profile_counter(strtime, dasd_io_time1, block);
685 	dasd_profile_counter(irqtime, dasd_io_time2, block);
686 	dasd_profile_counter(irqtime / sectors, dasd_io_time2ps, block);
687 	dasd_profile_counter(endtime, dasd_io_time3, block);
688 }
689 #else
690 #define dasd_profile_start(block, cqr, req) do {} while (0)
691 #define dasd_profile_end(block, cqr, req) do {} while (0)
692 #endif				/* CONFIG_DASD_PROFILE */
693 
694 /*
695  * Allocate memory for a channel program with 'cplength' channel
696  * command words and 'datasize' additional space. There are two
697  * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed
698  * memory and 2) dasd_smalloc_request uses the static ccw memory
699  * that gets allocated for each device.
700  */
701 struct dasd_ccw_req *dasd_kmalloc_request(int magic, int cplength,
702 					  int datasize,
703 					  struct dasd_device *device)
704 {
705 	struct dasd_ccw_req *cqr;
706 
707 	/* Sanity checks */
708 	BUG_ON(datasize > PAGE_SIZE ||
709 	     (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
710 
711 	cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC);
712 	if (cqr == NULL)
713 		return ERR_PTR(-ENOMEM);
714 	cqr->cpaddr = NULL;
715 	if (cplength > 0) {
716 		cqr->cpaddr = kcalloc(cplength, sizeof(struct ccw1),
717 				      GFP_ATOMIC | GFP_DMA);
718 		if (cqr->cpaddr == NULL) {
719 			kfree(cqr);
720 			return ERR_PTR(-ENOMEM);
721 		}
722 	}
723 	cqr->data = NULL;
724 	if (datasize > 0) {
725 		cqr->data = kzalloc(datasize, GFP_ATOMIC | GFP_DMA);
726 		if (cqr->data == NULL) {
727 			kfree(cqr->cpaddr);
728 			kfree(cqr);
729 			return ERR_PTR(-ENOMEM);
730 		}
731 	}
732 	cqr->magic =  magic;
733 	set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
734 	dasd_get_device(device);
735 	return cqr;
736 }
737 
738 struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength,
739 					  int datasize,
740 					  struct dasd_device *device)
741 {
742 	unsigned long flags;
743 	struct dasd_ccw_req *cqr;
744 	char *data;
745 	int size;
746 
747 	/* Sanity checks */
748 	BUG_ON(datasize > PAGE_SIZE ||
749 	     (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
750 
751 	size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
752 	if (cplength > 0)
753 		size += cplength * sizeof(struct ccw1);
754 	if (datasize > 0)
755 		size += datasize;
756 	spin_lock_irqsave(&device->mem_lock, flags);
757 	cqr = (struct dasd_ccw_req *)
758 		dasd_alloc_chunk(&device->ccw_chunks, size);
759 	spin_unlock_irqrestore(&device->mem_lock, flags);
760 	if (cqr == NULL)
761 		return ERR_PTR(-ENOMEM);
762 	memset(cqr, 0, sizeof(struct dasd_ccw_req));
763 	data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L);
764 	cqr->cpaddr = NULL;
765 	if (cplength > 0) {
766 		cqr->cpaddr = (struct ccw1 *) data;
767 		data += cplength*sizeof(struct ccw1);
768 		memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1));
769 	}
770 	cqr->data = NULL;
771 	if (datasize > 0) {
772 		cqr->data = data;
773  		memset(cqr->data, 0, datasize);
774 	}
775 	cqr->magic = magic;
776 	set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
777 	dasd_get_device(device);
778 	return cqr;
779 }
780 
781 /*
782  * Free memory of a channel program. This function needs to free all the
783  * idal lists that might have been created by dasd_set_cda and the
784  * struct dasd_ccw_req itself.
785  */
786 void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
787 {
788 #ifdef CONFIG_64BIT
789 	struct ccw1 *ccw;
790 
791 	/* Clear any idals used for the request. */
792 	ccw = cqr->cpaddr;
793 	do {
794 		clear_normalized_cda(ccw);
795 	} while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC));
796 #endif
797 	kfree(cqr->cpaddr);
798 	kfree(cqr->data);
799 	kfree(cqr);
800 	dasd_put_device(device);
801 }
802 
803 void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
804 {
805 	unsigned long flags;
806 
807 	spin_lock_irqsave(&device->mem_lock, flags);
808 	dasd_free_chunk(&device->ccw_chunks, cqr);
809 	spin_unlock_irqrestore(&device->mem_lock, flags);
810 	dasd_put_device(device);
811 }
812 
813 /*
814  * Check discipline magic in cqr.
815  */
816 static inline int dasd_check_cqr(struct dasd_ccw_req *cqr)
817 {
818 	struct dasd_device *device;
819 
820 	if (cqr == NULL)
821 		return -EINVAL;
822 	device = cqr->startdev;
823 	if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) {
824 		DBF_DEV_EVENT(DBF_WARNING, device,
825 			    " dasd_ccw_req 0x%08x magic doesn't match"
826 			    " discipline 0x%08x",
827 			    cqr->magic,
828 			    *(unsigned int *) device->discipline->name);
829 		return -EINVAL;
830 	}
831 	return 0;
832 }
833 
834 /*
835  * Terminate the current i/o and set the request to clear_pending.
836  * Timer keeps device runnig.
837  * ccw_device_clear can fail if the i/o subsystem
838  * is in a bad mood.
839  */
840 int dasd_term_IO(struct dasd_ccw_req *cqr)
841 {
842 	struct dasd_device *device;
843 	int retries, rc;
844 	char errorstring[ERRORLENGTH];
845 
846 	/* Check the cqr */
847 	rc = dasd_check_cqr(cqr);
848 	if (rc)
849 		return rc;
850 	retries = 0;
851 	device = (struct dasd_device *) cqr->startdev;
852 	while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) {
853 		rc = ccw_device_clear(device->cdev, (long) cqr);
854 		switch (rc) {
855 		case 0:	/* termination successful */
856 			cqr->retries--;
857 			cqr->status = DASD_CQR_CLEAR_PENDING;
858 			cqr->stopclk = get_clock();
859 			cqr->starttime = 0;
860 			DBF_DEV_EVENT(DBF_DEBUG, device,
861 				      "terminate cqr %p successful",
862 				      cqr);
863 			break;
864 		case -ENODEV:
865 			DBF_DEV_EVENT(DBF_ERR, device, "%s",
866 				      "device gone, retry");
867 			break;
868 		case -EIO:
869 			DBF_DEV_EVENT(DBF_ERR, device, "%s",
870 				      "I/O error, retry");
871 			break;
872 		case -EINVAL:
873 		case -EBUSY:
874 			DBF_DEV_EVENT(DBF_ERR, device, "%s",
875 				      "device busy, retry later");
876 			break;
877 		default:
878 			/* internal error 10 - unknown rc*/
879 			snprintf(errorstring, ERRORLENGTH, "10 %d", rc);
880 			dev_err(&device->cdev->dev, "An error occurred in the "
881 				"DASD device driver, reason=%s\n", errorstring);
882 			BUG();
883 			break;
884 		}
885 		retries++;
886 	}
887 	dasd_schedule_device_bh(device);
888 	return rc;
889 }
890 
891 /*
892  * Start the i/o. This start_IO can fail if the channel is really busy.
893  * In that case set up a timer to start the request later.
894  */
895 int dasd_start_IO(struct dasd_ccw_req *cqr)
896 {
897 	struct dasd_device *device;
898 	int rc;
899 	char errorstring[ERRORLENGTH];
900 
901 	/* Check the cqr */
902 	rc = dasd_check_cqr(cqr);
903 	if (rc) {
904 		cqr->intrc = rc;
905 		return rc;
906 	}
907 	device = (struct dasd_device *) cqr->startdev;
908 	if (cqr->retries < 0) {
909 		/* internal error 14 - start_IO run out of retries */
910 		sprintf(errorstring, "14 %p", cqr);
911 		dev_err(&device->cdev->dev, "An error occurred in the DASD "
912 			"device driver, reason=%s\n", errorstring);
913 		cqr->status = DASD_CQR_ERROR;
914 		return -EIO;
915 	}
916 	cqr->startclk = get_clock();
917 	cqr->starttime = jiffies;
918 	cqr->retries--;
919 	if (cqr->cpmode == 1) {
920 		rc = ccw_device_tm_start(device->cdev, cqr->cpaddr,
921 					 (long) cqr, cqr->lpm);
922 	} else {
923 		rc = ccw_device_start(device->cdev, cqr->cpaddr,
924 				      (long) cqr, cqr->lpm, 0);
925 	}
926 	switch (rc) {
927 	case 0:
928 		cqr->status = DASD_CQR_IN_IO;
929 		break;
930 	case -EBUSY:
931 		DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
932 			      "start_IO: device busy, retry later");
933 		break;
934 	case -ETIMEDOUT:
935 		DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
936 			      "start_IO: request timeout, retry later");
937 		break;
938 	case -EACCES:
939 		/* -EACCES indicates that the request used only a
940 		 * subset of the available pathes and all these
941 		 * pathes are gone.
942 		 * Do a retry with all available pathes.
943 		 */
944 		cqr->lpm = LPM_ANYPATH;
945 		DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
946 			      "start_IO: selected pathes gone,"
947 			      " retry on all pathes");
948 		break;
949 	case -ENODEV:
950 		DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
951 			      "start_IO: -ENODEV device gone, retry");
952 		break;
953 	case -EIO:
954 		DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
955 			      "start_IO: -EIO device gone, retry");
956 		break;
957 	case -EINVAL:
958 		/* most likely caused in power management context */
959 		DBF_DEV_EVENT(DBF_DEBUG, device, "%s",
960 			      "start_IO: -EINVAL device currently "
961 			      "not accessible");
962 		break;
963 	default:
964 		/* internal error 11 - unknown rc */
965 		snprintf(errorstring, ERRORLENGTH, "11 %d", rc);
966 		dev_err(&device->cdev->dev,
967 			"An error occurred in the DASD device driver, "
968 			"reason=%s\n", errorstring);
969 		BUG();
970 		break;
971 	}
972 	cqr->intrc = rc;
973 	return rc;
974 }
975 
976 /*
977  * Timeout function for dasd devices. This is used for different purposes
978  *  1) missing interrupt handler for normal operation
979  *  2) delayed start of request where start_IO failed with -EBUSY
980  *  3) timeout for missing state change interrupts
981  * The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
982  * DASD_CQR_QUEUED for 2) and 3).
983  */
984 static void dasd_device_timeout(unsigned long ptr)
985 {
986 	unsigned long flags;
987 	struct dasd_device *device;
988 
989 	device = (struct dasd_device *) ptr;
990 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
991 	/* re-activate request queue */
992 	dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
993 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
994 	dasd_schedule_device_bh(device);
995 }
996 
997 /*
998  * Setup timeout for a device in jiffies.
999  */
1000 void dasd_device_set_timer(struct dasd_device *device, int expires)
1001 {
1002 	if (expires == 0)
1003 		del_timer(&device->timer);
1004 	else
1005 		mod_timer(&device->timer, jiffies + expires);
1006 }
1007 
1008 /*
1009  * Clear timeout for a device.
1010  */
1011 void dasd_device_clear_timer(struct dasd_device *device)
1012 {
1013 	del_timer(&device->timer);
1014 }
1015 
1016 static void dasd_handle_killed_request(struct ccw_device *cdev,
1017 				       unsigned long intparm)
1018 {
1019 	struct dasd_ccw_req *cqr;
1020 	struct dasd_device *device;
1021 
1022 	if (!intparm)
1023 		return;
1024 	cqr = (struct dasd_ccw_req *) intparm;
1025 	if (cqr->status != DASD_CQR_IN_IO) {
1026 		DBF_EVENT_DEVID(DBF_DEBUG, cdev,
1027 				"invalid status in handle_killed_request: "
1028 				"%02x", cqr->status);
1029 		return;
1030 	}
1031 
1032 	device = dasd_device_from_cdev_locked(cdev);
1033 	if (IS_ERR(device)) {
1034 		DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1035 				"unable to get device from cdev");
1036 		return;
1037 	}
1038 
1039 	if (!cqr->startdev ||
1040 	    device != cqr->startdev ||
1041 	    strncmp(cqr->startdev->discipline->ebcname,
1042 		    (char *) &cqr->magic, 4)) {
1043 		DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1044 				"invalid device in request");
1045 		dasd_put_device(device);
1046 		return;
1047 	}
1048 
1049 	/* Schedule request to be retried. */
1050 	cqr->status = DASD_CQR_QUEUED;
1051 
1052 	dasd_device_clear_timer(device);
1053 	dasd_schedule_device_bh(device);
1054 	dasd_put_device(device);
1055 }
1056 
1057 void dasd_generic_handle_state_change(struct dasd_device *device)
1058 {
1059 	/* First of all start sense subsystem status request. */
1060 	dasd_eer_snss(device);
1061 
1062 	dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
1063 	dasd_schedule_device_bh(device);
1064 	if (device->block)
1065 		dasd_schedule_block_bh(device->block);
1066 }
1067 
1068 /*
1069  * Interrupt handler for "normal" ssch-io based dasd devices.
1070  */
1071 void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1072 		      struct irb *irb)
1073 {
1074 	struct dasd_ccw_req *cqr, *next;
1075 	struct dasd_device *device;
1076 	unsigned long long now;
1077 	int expires;
1078 
1079 	if (IS_ERR(irb)) {
1080 		switch (PTR_ERR(irb)) {
1081 		case -EIO:
1082 			break;
1083 		case -ETIMEDOUT:
1084 			DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
1085 					"request timed out\n", __func__);
1086 			break;
1087 		default:
1088 			DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
1089 					"unknown error %ld\n", __func__,
1090 					PTR_ERR(irb));
1091 		}
1092 		dasd_handle_killed_request(cdev, intparm);
1093 		return;
1094 	}
1095 
1096 	now = get_clock();
1097 
1098 	/* check for unsolicited interrupts */
1099 	cqr = (struct dasd_ccw_req *) intparm;
1100 	if (!cqr || ((scsw_cc(&irb->scsw) == 1) &&
1101 		     (scsw_fctl(&irb->scsw) & SCSW_FCTL_START_FUNC) &&
1102 		     (scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))) {
1103 		if (cqr && cqr->status == DASD_CQR_IN_IO)
1104 			cqr->status = DASD_CQR_QUEUED;
1105 		device = dasd_device_from_cdev_locked(cdev);
1106 		if (!IS_ERR(device)) {
1107 			dasd_device_clear_timer(device);
1108 			device->discipline->handle_unsolicited_interrupt(device,
1109 									 irb);
1110 			dasd_put_device(device);
1111 		}
1112 		return;
1113 	}
1114 
1115 	device = (struct dasd_device *) cqr->startdev;
1116 	if (!device ||
1117 	    strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) {
1118 		DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s",
1119 				"invalid device in request");
1120 		return;
1121 	}
1122 
1123 	/* Check for clear pending */
1124 	if (cqr->status == DASD_CQR_CLEAR_PENDING &&
1125 	    scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) {
1126 		cqr->status = DASD_CQR_CLEARED;
1127 		dasd_device_clear_timer(device);
1128 		wake_up(&dasd_flush_wq);
1129 		dasd_schedule_device_bh(device);
1130 		return;
1131 	}
1132 
1133 	/* check status - the request might have been killed by dyn detach */
1134 	if (cqr->status != DASD_CQR_IN_IO) {
1135 		DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, "
1136 			      "status %02x", dev_name(&cdev->dev), cqr->status);
1137 		return;
1138 	}
1139 
1140 	next = NULL;
1141 	expires = 0;
1142 	if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) &&
1143 	    scsw_cstat(&irb->scsw) == 0) {
1144 		/* request was completed successfully */
1145 		cqr->status = DASD_CQR_SUCCESS;
1146 		cqr->stopclk = now;
1147 		/* Start first request on queue if possible -> fast_io. */
1148 		if (cqr->devlist.next != &device->ccw_queue) {
1149 			next = list_entry(cqr->devlist.next,
1150 					  struct dasd_ccw_req, devlist);
1151 		}
1152 	} else {  /* error */
1153 		memcpy(&cqr->irb, irb, sizeof(struct irb));
1154 		/* log sense for every failed I/O to s390 debugfeature */
1155 		dasd_log_sense_dbf(cqr, irb);
1156 		if (device->features & DASD_FEATURE_ERPLOG) {
1157 			dasd_log_sense(cqr, irb);
1158 		}
1159 
1160 		/*
1161 		 * If we don't want complex ERP for this request, then just
1162 		 * reset this and retry it in the fastpath
1163 		 */
1164 		if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) &&
1165 		    cqr->retries > 0) {
1166 			if (cqr->lpm == LPM_ANYPATH)
1167 				DBF_DEV_EVENT(DBF_DEBUG, device,
1168 					      "default ERP in fastpath "
1169 					      "(%i retries left)",
1170 					      cqr->retries);
1171 			cqr->lpm    = LPM_ANYPATH;
1172 			cqr->status = DASD_CQR_QUEUED;
1173 			next = cqr;
1174 		} else
1175 			cqr->status = DASD_CQR_ERROR;
1176 	}
1177 	if (next && (next->status == DASD_CQR_QUEUED) &&
1178 	    (!device->stopped)) {
1179 		if (device->discipline->start_IO(next) == 0)
1180 			expires = next->expires;
1181 	}
1182 	if (expires != 0)
1183 		dasd_device_set_timer(device, expires);
1184 	else
1185 		dasd_device_clear_timer(device);
1186 	dasd_schedule_device_bh(device);
1187 }
1188 
1189 enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb)
1190 {
1191 	struct dasd_device *device;
1192 
1193 	device = dasd_device_from_cdev_locked(cdev);
1194 
1195 	if (IS_ERR(device))
1196 		goto out;
1197 	if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
1198 	   device->state != device->target ||
1199 	   !device->discipline->handle_unsolicited_interrupt){
1200 		dasd_put_device(device);
1201 		goto out;
1202 	}
1203 
1204 	dasd_device_clear_timer(device);
1205 	device->discipline->handle_unsolicited_interrupt(device, irb);
1206 	dasd_put_device(device);
1207 out:
1208 	return UC_TODO_RETRY;
1209 }
1210 EXPORT_SYMBOL_GPL(dasd_generic_uc_handler);
1211 
1212 /*
1213  * If we have an error on a dasd_block layer request then we cancel
1214  * and return all further requests from the same dasd_block as well.
1215  */
1216 static void __dasd_device_recovery(struct dasd_device *device,
1217 				   struct dasd_ccw_req *ref_cqr)
1218 {
1219 	struct list_head *l, *n;
1220 	struct dasd_ccw_req *cqr;
1221 
1222 	/*
1223 	 * only requeue request that came from the dasd_block layer
1224 	 */
1225 	if (!ref_cqr->block)
1226 		return;
1227 
1228 	list_for_each_safe(l, n, &device->ccw_queue) {
1229 		cqr = list_entry(l, struct dasd_ccw_req, devlist);
1230 		if (cqr->status == DASD_CQR_QUEUED &&
1231 		    ref_cqr->block == cqr->block) {
1232 			cqr->status = DASD_CQR_CLEARED;
1233 		}
1234 	}
1235 };
1236 
1237 /*
1238  * Remove those ccw requests from the queue that need to be returned
1239  * to the upper layer.
1240  */
1241 static void __dasd_device_process_ccw_queue(struct dasd_device *device,
1242 					    struct list_head *final_queue)
1243 {
1244 	struct list_head *l, *n;
1245 	struct dasd_ccw_req *cqr;
1246 
1247 	/* Process request with final status. */
1248 	list_for_each_safe(l, n, &device->ccw_queue) {
1249 		cqr = list_entry(l, struct dasd_ccw_req, devlist);
1250 
1251 		/* Stop list processing at the first non-final request. */
1252 		if (cqr->status == DASD_CQR_QUEUED ||
1253 		    cqr->status == DASD_CQR_IN_IO ||
1254 		    cqr->status == DASD_CQR_CLEAR_PENDING)
1255 			break;
1256 		if (cqr->status == DASD_CQR_ERROR) {
1257 			__dasd_device_recovery(device, cqr);
1258 		}
1259 		/* Rechain finished requests to final queue */
1260 		list_move_tail(&cqr->devlist, final_queue);
1261 	}
1262 }
1263 
1264 /*
1265  * the cqrs from the final queue are returned to the upper layer
1266  * by setting a dasd_block state and calling the callback function
1267  */
1268 static void __dasd_device_process_final_queue(struct dasd_device *device,
1269 					      struct list_head *final_queue)
1270 {
1271 	struct list_head *l, *n;
1272 	struct dasd_ccw_req *cqr;
1273 	struct dasd_block *block;
1274 	void (*callback)(struct dasd_ccw_req *, void *data);
1275 	void *callback_data;
1276 	char errorstring[ERRORLENGTH];
1277 
1278 	list_for_each_safe(l, n, final_queue) {
1279 		cqr = list_entry(l, struct dasd_ccw_req, devlist);
1280 		list_del_init(&cqr->devlist);
1281 		block = cqr->block;
1282 		callback = cqr->callback;
1283 		callback_data = cqr->callback_data;
1284 		if (block)
1285 			spin_lock_bh(&block->queue_lock);
1286 		switch (cqr->status) {
1287 		case DASD_CQR_SUCCESS:
1288 			cqr->status = DASD_CQR_DONE;
1289 			break;
1290 		case DASD_CQR_ERROR:
1291 			cqr->status = DASD_CQR_NEED_ERP;
1292 			break;
1293 		case DASD_CQR_CLEARED:
1294 			cqr->status = DASD_CQR_TERMINATED;
1295 			break;
1296 		default:
1297 			/* internal error 12 - wrong cqr status*/
1298 			snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status);
1299 			dev_err(&device->cdev->dev,
1300 				"An error occurred in the DASD device driver, "
1301 				"reason=%s\n", errorstring);
1302 			BUG();
1303 		}
1304 		if (cqr->callback != NULL)
1305 			(callback)(cqr, callback_data);
1306 		if (block)
1307 			spin_unlock_bh(&block->queue_lock);
1308 	}
1309 }
1310 
1311 /*
1312  * Take a look at the first request on the ccw queue and check
1313  * if it reached its expire time. If so, terminate the IO.
1314  */
1315 static void __dasd_device_check_expire(struct dasd_device *device)
1316 {
1317 	struct dasd_ccw_req *cqr;
1318 
1319 	if (list_empty(&device->ccw_queue))
1320 		return;
1321 	cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1322 	if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) &&
1323 	    (time_after_eq(jiffies, cqr->expires + cqr->starttime))) {
1324 		if (device->discipline->term_IO(cqr) != 0) {
1325 			/* Hmpf, try again in 5 sec */
1326 			dev_err(&device->cdev->dev,
1327 				"cqr %p timed out (%is) but cannot be "
1328 				"ended, retrying in 5 s\n",
1329 				cqr, (cqr->expires/HZ));
1330 			cqr->expires += 5*HZ;
1331 			dasd_device_set_timer(device, 5*HZ);
1332 		} else {
1333 			dev_err(&device->cdev->dev,
1334 				"cqr %p timed out (%is), %i retries "
1335 				"remaining\n", cqr, (cqr->expires/HZ),
1336 				cqr->retries);
1337 		}
1338 	}
1339 }
1340 
1341 /*
1342  * Take a look at the first request on the ccw queue and check
1343  * if it needs to be started.
1344  */
1345 static void __dasd_device_start_head(struct dasd_device *device)
1346 {
1347 	struct dasd_ccw_req *cqr;
1348 	int rc;
1349 
1350 	if (list_empty(&device->ccw_queue))
1351 		return;
1352 	cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1353 	if (cqr->status != DASD_CQR_QUEUED)
1354 		return;
1355 	/* when device is stopped, return request to previous layer */
1356 	if (device->stopped) {
1357 		cqr->status = DASD_CQR_CLEARED;
1358 		dasd_schedule_device_bh(device);
1359 		return;
1360 	}
1361 
1362 	rc = device->discipline->start_IO(cqr);
1363 	if (rc == 0)
1364 		dasd_device_set_timer(device, cqr->expires);
1365 	else if (rc == -EACCES) {
1366 		dasd_schedule_device_bh(device);
1367 	} else
1368 		/* Hmpf, try again in 1/2 sec */
1369 		dasd_device_set_timer(device, 50);
1370 }
1371 
1372 /*
1373  * Go through all request on the dasd_device request queue,
1374  * terminate them on the cdev if necessary, and return them to the
1375  * submitting layer via callback.
1376  * Note:
1377  * Make sure that all 'submitting layers' still exist when
1378  * this function is called!. In other words, when 'device' is a base
1379  * device then all block layer requests must have been removed before
1380  * via dasd_flush_block_queue.
1381  */
1382 int dasd_flush_device_queue(struct dasd_device *device)
1383 {
1384 	struct dasd_ccw_req *cqr, *n;
1385 	int rc;
1386 	struct list_head flush_queue;
1387 
1388 	INIT_LIST_HEAD(&flush_queue);
1389 	spin_lock_irq(get_ccwdev_lock(device->cdev));
1390 	rc = 0;
1391 	list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
1392 		/* Check status and move request to flush_queue */
1393 		switch (cqr->status) {
1394 		case DASD_CQR_IN_IO:
1395 			rc = device->discipline->term_IO(cqr);
1396 			if (rc) {
1397 				/* unable to terminate requeust */
1398 				dev_err(&device->cdev->dev,
1399 					"Flushing the DASD request queue "
1400 					"failed for request %p\n", cqr);
1401 				/* stop flush processing */
1402 				goto finished;
1403 			}
1404 			break;
1405 		case DASD_CQR_QUEUED:
1406 			cqr->stopclk = get_clock();
1407 			cqr->status = DASD_CQR_CLEARED;
1408 			break;
1409 		default: /* no need to modify the others */
1410 			break;
1411 		}
1412 		list_move_tail(&cqr->devlist, &flush_queue);
1413 	}
1414 finished:
1415 	spin_unlock_irq(get_ccwdev_lock(device->cdev));
1416 	/*
1417 	 * After this point all requests must be in state CLEAR_PENDING,
1418 	 * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become
1419 	 * one of the others.
1420 	 */
1421 	list_for_each_entry_safe(cqr, n, &flush_queue, devlist)
1422 		wait_event(dasd_flush_wq,
1423 			   (cqr->status != DASD_CQR_CLEAR_PENDING));
1424 	/*
1425 	 * Now set each request back to TERMINATED, DONE or NEED_ERP
1426 	 * and call the callback function of flushed requests
1427 	 */
1428 	__dasd_device_process_final_queue(device, &flush_queue);
1429 	return rc;
1430 }
1431 
1432 /*
1433  * Acquire the device lock and process queues for the device.
1434  */
1435 static void dasd_device_tasklet(struct dasd_device *device)
1436 {
1437 	struct list_head final_queue;
1438 
1439 	atomic_set (&device->tasklet_scheduled, 0);
1440 	INIT_LIST_HEAD(&final_queue);
1441 	spin_lock_irq(get_ccwdev_lock(device->cdev));
1442 	/* Check expire time of first request on the ccw queue. */
1443 	__dasd_device_check_expire(device);
1444 	/* find final requests on ccw queue */
1445 	__dasd_device_process_ccw_queue(device, &final_queue);
1446 	spin_unlock_irq(get_ccwdev_lock(device->cdev));
1447 	/* Now call the callback function of requests with final status */
1448 	__dasd_device_process_final_queue(device, &final_queue);
1449 	spin_lock_irq(get_ccwdev_lock(device->cdev));
1450 	/* Now check if the head of the ccw queue needs to be started. */
1451 	__dasd_device_start_head(device);
1452 	spin_unlock_irq(get_ccwdev_lock(device->cdev));
1453 	dasd_put_device(device);
1454 }
1455 
1456 /*
1457  * Schedules a call to dasd_tasklet over the device tasklet.
1458  */
1459 void dasd_schedule_device_bh(struct dasd_device *device)
1460 {
1461 	/* Protect against rescheduling. */
1462 	if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0)
1463 		return;
1464 	dasd_get_device(device);
1465 	tasklet_hi_schedule(&device->tasklet);
1466 }
1467 
1468 void dasd_device_set_stop_bits(struct dasd_device *device, int bits)
1469 {
1470 	device->stopped |= bits;
1471 }
1472 EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits);
1473 
1474 void dasd_device_remove_stop_bits(struct dasd_device *device, int bits)
1475 {
1476 	device->stopped &= ~bits;
1477 	if (!device->stopped)
1478 		wake_up(&generic_waitq);
1479 }
1480 EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits);
1481 
1482 /*
1483  * Queue a request to the head of the device ccw_queue.
1484  * Start the I/O if possible.
1485  */
1486 void dasd_add_request_head(struct dasd_ccw_req *cqr)
1487 {
1488 	struct dasd_device *device;
1489 	unsigned long flags;
1490 
1491 	device = cqr->startdev;
1492 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1493 	cqr->status = DASD_CQR_QUEUED;
1494 	list_add(&cqr->devlist, &device->ccw_queue);
1495 	/* let the bh start the request to keep them in order */
1496 	dasd_schedule_device_bh(device);
1497 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1498 }
1499 
1500 /*
1501  * Queue a request to the tail of the device ccw_queue.
1502  * Start the I/O if possible.
1503  */
1504 void dasd_add_request_tail(struct dasd_ccw_req *cqr)
1505 {
1506 	struct dasd_device *device;
1507 	unsigned long flags;
1508 
1509 	device = cqr->startdev;
1510 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1511 	cqr->status = DASD_CQR_QUEUED;
1512 	list_add_tail(&cqr->devlist, &device->ccw_queue);
1513 	/* let the bh start the request to keep them in order */
1514 	dasd_schedule_device_bh(device);
1515 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1516 }
1517 
1518 /*
1519  * Wakeup helper for the 'sleep_on' functions.
1520  */
1521 static void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data)
1522 {
1523 	spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev));
1524 	cqr->callback_data = DASD_SLEEPON_END_TAG;
1525 	spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev));
1526 	wake_up(&generic_waitq);
1527 }
1528 
1529 static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
1530 {
1531 	struct dasd_device *device;
1532 	int rc;
1533 
1534 	device = cqr->startdev;
1535 	spin_lock_irq(get_ccwdev_lock(device->cdev));
1536 	rc = (cqr->callback_data == DASD_SLEEPON_END_TAG);
1537 	spin_unlock_irq(get_ccwdev_lock(device->cdev));
1538 	return rc;
1539 }
1540 
1541 /*
1542  * checks if error recovery is necessary, returns 1 if yes, 0 otherwise.
1543  */
1544 static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr)
1545 {
1546 	struct dasd_device *device;
1547 	dasd_erp_fn_t erp_fn;
1548 
1549 	if (cqr->status == DASD_CQR_FILLED)
1550 		return 0;
1551 	device = cqr->startdev;
1552 	if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
1553 		if (cqr->status == DASD_CQR_TERMINATED) {
1554 			device->discipline->handle_terminated_request(cqr);
1555 			return 1;
1556 		}
1557 		if (cqr->status == DASD_CQR_NEED_ERP) {
1558 			erp_fn = device->discipline->erp_action(cqr);
1559 			erp_fn(cqr);
1560 			return 1;
1561 		}
1562 		if (cqr->status == DASD_CQR_FAILED)
1563 			dasd_log_sense(cqr, &cqr->irb);
1564 		if (cqr->refers) {
1565 			__dasd_process_erp(device, cqr);
1566 			return 1;
1567 		}
1568 	}
1569 	return 0;
1570 }
1571 
1572 static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr)
1573 {
1574 	if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) {
1575 		if (cqr->refers) /* erp is not done yet */
1576 			return 1;
1577 		return ((cqr->status != DASD_CQR_DONE) &&
1578 			(cqr->status != DASD_CQR_FAILED));
1579 	} else
1580 		return (cqr->status == DASD_CQR_FILLED);
1581 }
1582 
1583 static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible)
1584 {
1585 	struct dasd_device *device;
1586 	int rc;
1587 	struct list_head ccw_queue;
1588 	struct dasd_ccw_req *cqr;
1589 
1590 	INIT_LIST_HEAD(&ccw_queue);
1591 	maincqr->status = DASD_CQR_FILLED;
1592 	device = maincqr->startdev;
1593 	list_add(&maincqr->blocklist, &ccw_queue);
1594 	for (cqr = maincqr;  __dasd_sleep_on_loop_condition(cqr);
1595 	     cqr = list_first_entry(&ccw_queue,
1596 				    struct dasd_ccw_req, blocklist)) {
1597 
1598 		if (__dasd_sleep_on_erp(cqr))
1599 			continue;
1600 		if (cqr->status != DASD_CQR_FILLED) /* could be failed */
1601 			continue;
1602 
1603 		/* Non-temporary stop condition will trigger fail fast */
1604 		if (device->stopped & ~DASD_STOPPED_PENDING &&
1605 		    test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
1606 		    (!dasd_eer_enabled(device))) {
1607 			cqr->status = DASD_CQR_FAILED;
1608 			continue;
1609 		}
1610 
1611 		/* Don't try to start requests if device is stopped */
1612 		if (interruptible) {
1613 			rc = wait_event_interruptible(
1614 				generic_waitq, !(device->stopped));
1615 			if (rc == -ERESTARTSYS) {
1616 				cqr->status = DASD_CQR_FAILED;
1617 				maincqr->intrc = rc;
1618 				continue;
1619 			}
1620 		} else
1621 			wait_event(generic_waitq, !(device->stopped));
1622 
1623 		cqr->callback = dasd_wakeup_cb;
1624 		cqr->callback_data = DASD_SLEEPON_START_TAG;
1625 		dasd_add_request_tail(cqr);
1626 		if (interruptible) {
1627 			rc = wait_event_interruptible(
1628 				generic_waitq, _wait_for_wakeup(cqr));
1629 			if (rc == -ERESTARTSYS) {
1630 				dasd_cancel_req(cqr);
1631 				/* wait (non-interruptible) for final status */
1632 				wait_event(generic_waitq,
1633 					   _wait_for_wakeup(cqr));
1634 				cqr->status = DASD_CQR_FAILED;
1635 				maincqr->intrc = rc;
1636 				continue;
1637 			}
1638 		} else
1639 			wait_event(generic_waitq, _wait_for_wakeup(cqr));
1640 	}
1641 
1642 	maincqr->endclk = get_clock();
1643 	if ((maincqr->status != DASD_CQR_DONE) &&
1644 	    (maincqr->intrc != -ERESTARTSYS))
1645 		dasd_log_sense(maincqr, &maincqr->irb);
1646 	if (maincqr->status == DASD_CQR_DONE)
1647 		rc = 0;
1648 	else if (maincqr->intrc)
1649 		rc = maincqr->intrc;
1650 	else
1651 		rc = -EIO;
1652 	return rc;
1653 }
1654 
1655 /*
1656  * Queue a request to the tail of the device ccw_queue and wait for
1657  * it's completion.
1658  */
1659 int dasd_sleep_on(struct dasd_ccw_req *cqr)
1660 {
1661 	return _dasd_sleep_on(cqr, 0);
1662 }
1663 
1664 /*
1665  * Queue a request to the tail of the device ccw_queue and wait
1666  * interruptible for it's completion.
1667  */
1668 int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr)
1669 {
1670 	return _dasd_sleep_on(cqr, 1);
1671 }
1672 
1673 /*
1674  * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock
1675  * for eckd devices) the currently running request has to be terminated
1676  * and be put back to status queued, before the special request is added
1677  * to the head of the queue. Then the special request is waited on normally.
1678  */
1679 static inline int _dasd_term_running_cqr(struct dasd_device *device)
1680 {
1681 	struct dasd_ccw_req *cqr;
1682 
1683 	if (list_empty(&device->ccw_queue))
1684 		return 0;
1685 	cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
1686 	return device->discipline->term_IO(cqr);
1687 }
1688 
1689 int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
1690 {
1691 	struct dasd_device *device;
1692 	int rc;
1693 
1694 	device = cqr->startdev;
1695 	spin_lock_irq(get_ccwdev_lock(device->cdev));
1696 	rc = _dasd_term_running_cqr(device);
1697 	if (rc) {
1698 		spin_unlock_irq(get_ccwdev_lock(device->cdev));
1699 		return rc;
1700 	}
1701 
1702 	cqr->callback = dasd_wakeup_cb;
1703 	cqr->callback_data = DASD_SLEEPON_START_TAG;
1704 	cqr->status = DASD_CQR_QUEUED;
1705 	list_add(&cqr->devlist, &device->ccw_queue);
1706 
1707 	/* let the bh start the request to keep them in order */
1708 	dasd_schedule_device_bh(device);
1709 
1710 	spin_unlock_irq(get_ccwdev_lock(device->cdev));
1711 
1712 	wait_event(generic_waitq, _wait_for_wakeup(cqr));
1713 
1714 	if (cqr->status == DASD_CQR_DONE)
1715 		rc = 0;
1716 	else if (cqr->intrc)
1717 		rc = cqr->intrc;
1718 	else
1719 		rc = -EIO;
1720 	return rc;
1721 }
1722 
1723 /*
1724  * Cancels a request that was started with dasd_sleep_on_req.
1725  * This is useful to timeout requests. The request will be
1726  * terminated if it is currently in i/o.
1727  * Returns 1 if the request has been terminated.
1728  *	   0 if there was no need to terminate the request (not started yet)
1729  *	   negative error code if termination failed
1730  * Cancellation of a request is an asynchronous operation! The calling
1731  * function has to wait until the request is properly returned via callback.
1732  */
1733 int dasd_cancel_req(struct dasd_ccw_req *cqr)
1734 {
1735 	struct dasd_device *device = cqr->startdev;
1736 	unsigned long flags;
1737 	int rc;
1738 
1739 	rc = 0;
1740 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1741 	switch (cqr->status) {
1742 	case DASD_CQR_QUEUED:
1743 		/* request was not started - just set to cleared */
1744 		cqr->status = DASD_CQR_CLEARED;
1745 		break;
1746 	case DASD_CQR_IN_IO:
1747 		/* request in IO - terminate IO and release again */
1748 		rc = device->discipline->term_IO(cqr);
1749 		if (rc) {
1750 			dev_err(&device->cdev->dev,
1751 				"Cancelling request %p failed with rc=%d\n",
1752 				cqr, rc);
1753 		} else {
1754 			cqr->stopclk = get_clock();
1755 		}
1756 		break;
1757 	default: /* already finished or clear pending - do nothing */
1758 		break;
1759 	}
1760 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1761 	dasd_schedule_device_bh(device);
1762 	return rc;
1763 }
1764 
1765 
1766 /*
1767  * SECTION: Operations of the dasd_block layer.
1768  */
1769 
1770 /*
1771  * Timeout function for dasd_block. This is used when the block layer
1772  * is waiting for something that may not come reliably, (e.g. a state
1773  * change interrupt)
1774  */
1775 static void dasd_block_timeout(unsigned long ptr)
1776 {
1777 	unsigned long flags;
1778 	struct dasd_block *block;
1779 
1780 	block = (struct dasd_block *) ptr;
1781 	spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags);
1782 	/* re-activate request queue */
1783 	dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING);
1784 	spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags);
1785 	dasd_schedule_block_bh(block);
1786 }
1787 
1788 /*
1789  * Setup timeout for a dasd_block in jiffies.
1790  */
1791 void dasd_block_set_timer(struct dasd_block *block, int expires)
1792 {
1793 	if (expires == 0)
1794 		del_timer(&block->timer);
1795 	else
1796 		mod_timer(&block->timer, jiffies + expires);
1797 }
1798 
1799 /*
1800  * Clear timeout for a dasd_block.
1801  */
1802 void dasd_block_clear_timer(struct dasd_block *block)
1803 {
1804 	del_timer(&block->timer);
1805 }
1806 
1807 /*
1808  * Process finished error recovery ccw.
1809  */
1810 static void __dasd_process_erp(struct dasd_device *device,
1811 			       struct dasd_ccw_req *cqr)
1812 {
1813 	dasd_erp_fn_t erp_fn;
1814 
1815 	if (cqr->status == DASD_CQR_DONE)
1816 		DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful");
1817 	else
1818 		dev_err(&device->cdev->dev, "ERP failed for the DASD\n");
1819 	erp_fn = device->discipline->erp_postaction(cqr);
1820 	erp_fn(cqr);
1821 }
1822 
1823 /*
1824  * Fetch requests from the block device queue.
1825  */
1826 static void __dasd_process_request_queue(struct dasd_block *block)
1827 {
1828 	struct request_queue *queue;
1829 	struct request *req;
1830 	struct dasd_ccw_req *cqr;
1831 	struct dasd_device *basedev;
1832 	unsigned long flags;
1833 	queue = block->request_queue;
1834 	basedev = block->base;
1835 	/* No queue ? Then there is nothing to do. */
1836 	if (queue == NULL)
1837 		return;
1838 
1839 	/*
1840 	 * We requeue request from the block device queue to the ccw
1841 	 * queue only in two states. In state DASD_STATE_READY the
1842 	 * partition detection is done and we need to requeue requests
1843 	 * for that. State DASD_STATE_ONLINE is normal block device
1844 	 * operation.
1845 	 */
1846 	if (basedev->state < DASD_STATE_READY) {
1847 		while ((req = blk_fetch_request(block->request_queue)))
1848 			__blk_end_request_all(req, -EIO);
1849 		return;
1850 	}
1851 	/* Now we try to fetch requests from the request queue */
1852 	while (!blk_queue_plugged(queue) && (req = blk_peek_request(queue))) {
1853 		if (basedev->features & DASD_FEATURE_READONLY &&
1854 		    rq_data_dir(req) == WRITE) {
1855 			DBF_DEV_EVENT(DBF_ERR, basedev,
1856 				      "Rejecting write request %p",
1857 				      req);
1858 			blk_start_request(req);
1859 			__blk_end_request_all(req, -EIO);
1860 			continue;
1861 		}
1862 		cqr = basedev->discipline->build_cp(basedev, block, req);
1863 		if (IS_ERR(cqr)) {
1864 			if (PTR_ERR(cqr) == -EBUSY)
1865 				break;	/* normal end condition */
1866 			if (PTR_ERR(cqr) == -ENOMEM)
1867 				break;	/* terminate request queue loop */
1868 			if (PTR_ERR(cqr) == -EAGAIN) {
1869 				/*
1870 				 * The current request cannot be build right
1871 				 * now, we have to try later. If this request
1872 				 * is the head-of-queue we stop the device
1873 				 * for 1/2 second.
1874 				 */
1875 				if (!list_empty(&block->ccw_queue))
1876 					break;
1877 				spin_lock_irqsave(
1878 					get_ccwdev_lock(basedev->cdev), flags);
1879 				dasd_device_set_stop_bits(basedev,
1880 							  DASD_STOPPED_PENDING);
1881 				spin_unlock_irqrestore(
1882 					get_ccwdev_lock(basedev->cdev), flags);
1883 				dasd_block_set_timer(block, HZ/2);
1884 				break;
1885 			}
1886 			DBF_DEV_EVENT(DBF_ERR, basedev,
1887 				      "CCW creation failed (rc=%ld) "
1888 				      "on request %p",
1889 				      PTR_ERR(cqr), req);
1890 			blk_start_request(req);
1891 			__blk_end_request_all(req, -EIO);
1892 			continue;
1893 		}
1894 		/*
1895 		 *  Note: callback is set to dasd_return_cqr_cb in
1896 		 * __dasd_block_start_head to cover erp requests as well
1897 		 */
1898 		cqr->callback_data = (void *) req;
1899 		cqr->status = DASD_CQR_FILLED;
1900 		blk_start_request(req);
1901 		list_add_tail(&cqr->blocklist, &block->ccw_queue);
1902 		dasd_profile_start(block, cqr, req);
1903 	}
1904 }
1905 
1906 static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
1907 {
1908 	struct request *req;
1909 	int status;
1910 	int error = 0;
1911 
1912 	req = (struct request *) cqr->callback_data;
1913 	dasd_profile_end(cqr->block, cqr, req);
1914 	status = cqr->block->base->discipline->free_cp(cqr, req);
1915 	if (status <= 0)
1916 		error = status ? status : -EIO;
1917 	__blk_end_request_all(req, error);
1918 }
1919 
1920 /*
1921  * Process ccw request queue.
1922  */
1923 static void __dasd_process_block_ccw_queue(struct dasd_block *block,
1924 					   struct list_head *final_queue)
1925 {
1926 	struct list_head *l, *n;
1927 	struct dasd_ccw_req *cqr;
1928 	dasd_erp_fn_t erp_fn;
1929 	unsigned long flags;
1930 	struct dasd_device *base = block->base;
1931 
1932 restart:
1933 	/* Process request with final status. */
1934 	list_for_each_safe(l, n, &block->ccw_queue) {
1935 		cqr = list_entry(l, struct dasd_ccw_req, blocklist);
1936 		if (cqr->status != DASD_CQR_DONE &&
1937 		    cqr->status != DASD_CQR_FAILED &&
1938 		    cqr->status != DASD_CQR_NEED_ERP &&
1939 		    cqr->status != DASD_CQR_TERMINATED)
1940 			continue;
1941 
1942 		if (cqr->status == DASD_CQR_TERMINATED) {
1943 			base->discipline->handle_terminated_request(cqr);
1944 			goto restart;
1945 		}
1946 
1947 		/*  Process requests that may be recovered */
1948 		if (cqr->status == DASD_CQR_NEED_ERP) {
1949 			erp_fn = base->discipline->erp_action(cqr);
1950 			if (IS_ERR(erp_fn(cqr)))
1951 				continue;
1952 			goto restart;
1953 		}
1954 
1955 		/* log sense for fatal error */
1956 		if (cqr->status == DASD_CQR_FAILED) {
1957 			dasd_log_sense(cqr, &cqr->irb);
1958 		}
1959 
1960 		/* First of all call extended error reporting. */
1961 		if (dasd_eer_enabled(base) &&
1962 		    cqr->status == DASD_CQR_FAILED) {
1963 			dasd_eer_write(base, cqr, DASD_EER_FATALERROR);
1964 
1965 			/* restart request  */
1966 			cqr->status = DASD_CQR_FILLED;
1967 			cqr->retries = 255;
1968 			spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags);
1969 			dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE);
1970 			spin_unlock_irqrestore(get_ccwdev_lock(base->cdev),
1971 					       flags);
1972 			goto restart;
1973 		}
1974 
1975 		/* Process finished ERP request. */
1976 		if (cqr->refers) {
1977 			__dasd_process_erp(base, cqr);
1978 			goto restart;
1979 		}
1980 
1981 		/* Rechain finished requests to final queue */
1982 		cqr->endclk = get_clock();
1983 		list_move_tail(&cqr->blocklist, final_queue);
1984 	}
1985 }
1986 
1987 static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data)
1988 {
1989 	dasd_schedule_block_bh(cqr->block);
1990 }
1991 
1992 static void __dasd_block_start_head(struct dasd_block *block)
1993 {
1994 	struct dasd_ccw_req *cqr;
1995 
1996 	if (list_empty(&block->ccw_queue))
1997 		return;
1998 	/* We allways begin with the first requests on the queue, as some
1999 	 * of previously started requests have to be enqueued on a
2000 	 * dasd_device again for error recovery.
2001 	 */
2002 	list_for_each_entry(cqr, &block->ccw_queue, blocklist) {
2003 		if (cqr->status != DASD_CQR_FILLED)
2004 			continue;
2005 		/* Non-temporary stop condition will trigger fail fast */
2006 		if (block->base->stopped & ~DASD_STOPPED_PENDING &&
2007 		    test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) &&
2008 		    (!dasd_eer_enabled(block->base))) {
2009 			cqr->status = DASD_CQR_FAILED;
2010 			dasd_schedule_block_bh(block);
2011 			continue;
2012 		}
2013 		/* Don't try to start requests if device is stopped */
2014 		if (block->base->stopped)
2015 			return;
2016 
2017 		/* just a fail safe check, should not happen */
2018 		if (!cqr->startdev)
2019 			cqr->startdev = block->base;
2020 
2021 		/* make sure that the requests we submit find their way back */
2022 		cqr->callback = dasd_return_cqr_cb;
2023 
2024 		dasd_add_request_tail(cqr);
2025 	}
2026 }
2027 
2028 /*
2029  * Central dasd_block layer routine. Takes requests from the generic
2030  * block layer request queue, creates ccw requests, enqueues them on
2031  * a dasd_device and processes ccw requests that have been returned.
2032  */
2033 static void dasd_block_tasklet(struct dasd_block *block)
2034 {
2035 	struct list_head final_queue;
2036 	struct list_head *l, *n;
2037 	struct dasd_ccw_req *cqr;
2038 
2039 	atomic_set(&block->tasklet_scheduled, 0);
2040 	INIT_LIST_HEAD(&final_queue);
2041 	spin_lock(&block->queue_lock);
2042 	/* Finish off requests on ccw queue */
2043 	__dasd_process_block_ccw_queue(block, &final_queue);
2044 	spin_unlock(&block->queue_lock);
2045 	/* Now call the callback function of requests with final status */
2046 	spin_lock_irq(&block->request_queue_lock);
2047 	list_for_each_safe(l, n, &final_queue) {
2048 		cqr = list_entry(l, struct dasd_ccw_req, blocklist);
2049 		list_del_init(&cqr->blocklist);
2050 		__dasd_cleanup_cqr(cqr);
2051 	}
2052 	spin_lock(&block->queue_lock);
2053 	/* Get new request from the block device request queue */
2054 	__dasd_process_request_queue(block);
2055 	/* Now check if the head of the ccw queue needs to be started. */
2056 	__dasd_block_start_head(block);
2057 	spin_unlock(&block->queue_lock);
2058 	spin_unlock_irq(&block->request_queue_lock);
2059 	dasd_put_device(block->base);
2060 }
2061 
2062 static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data)
2063 {
2064 	wake_up(&dasd_flush_wq);
2065 }
2066 
2067 /*
2068  * Go through all request on the dasd_block request queue, cancel them
2069  * on the respective dasd_device, and return them to the generic
2070  * block layer.
2071  */
2072 static int dasd_flush_block_queue(struct dasd_block *block)
2073 {
2074 	struct dasd_ccw_req *cqr, *n;
2075 	int rc, i;
2076 	struct list_head flush_queue;
2077 
2078 	INIT_LIST_HEAD(&flush_queue);
2079 	spin_lock_bh(&block->queue_lock);
2080 	rc = 0;
2081 restart:
2082 	list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) {
2083 		/* if this request currently owned by a dasd_device cancel it */
2084 		if (cqr->status >= DASD_CQR_QUEUED)
2085 			rc = dasd_cancel_req(cqr);
2086 		if (rc < 0)
2087 			break;
2088 		/* Rechain request (including erp chain) so it won't be
2089 		 * touched by the dasd_block_tasklet anymore.
2090 		 * Replace the callback so we notice when the request
2091 		 * is returned from the dasd_device layer.
2092 		 */
2093 		cqr->callback = _dasd_wake_block_flush_cb;
2094 		for (i = 0; cqr != NULL; cqr = cqr->refers, i++)
2095 			list_move_tail(&cqr->blocklist, &flush_queue);
2096 		if (i > 1)
2097 			/* moved more than one request - need to restart */
2098 			goto restart;
2099 	}
2100 	spin_unlock_bh(&block->queue_lock);
2101 	/* Now call the callback function of flushed requests */
2102 restart_cb:
2103 	list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) {
2104 		wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
2105 		/* Process finished ERP request. */
2106 		if (cqr->refers) {
2107 			spin_lock_bh(&block->queue_lock);
2108 			__dasd_process_erp(block->base, cqr);
2109 			spin_unlock_bh(&block->queue_lock);
2110 			/* restart list_for_xx loop since dasd_process_erp
2111 			 * might remove multiple elements */
2112 			goto restart_cb;
2113 		}
2114 		/* call the callback function */
2115 		spin_lock_irq(&block->request_queue_lock);
2116 		cqr->endclk = get_clock();
2117 		list_del_init(&cqr->blocklist);
2118 		__dasd_cleanup_cqr(cqr);
2119 		spin_unlock_irq(&block->request_queue_lock);
2120 	}
2121 	return rc;
2122 }
2123 
2124 /*
2125  * Schedules a call to dasd_tasklet over the device tasklet.
2126  */
2127 void dasd_schedule_block_bh(struct dasd_block *block)
2128 {
2129 	/* Protect against rescheduling. */
2130 	if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0)
2131 		return;
2132 	/* life cycle of block is bound to it's base device */
2133 	dasd_get_device(block->base);
2134 	tasklet_hi_schedule(&block->tasklet);
2135 }
2136 
2137 
2138 /*
2139  * SECTION: external block device operations
2140  * (request queue handling, open, release, etc.)
2141  */
2142 
2143 /*
2144  * Dasd request queue function. Called from ll_rw_blk.c
2145  */
2146 static void do_dasd_request(struct request_queue *queue)
2147 {
2148 	struct dasd_block *block;
2149 
2150 	block = queue->queuedata;
2151 	spin_lock(&block->queue_lock);
2152 	/* Get new request from the block device request queue */
2153 	__dasd_process_request_queue(block);
2154 	/* Now check if the head of the ccw queue needs to be started. */
2155 	__dasd_block_start_head(block);
2156 	spin_unlock(&block->queue_lock);
2157 }
2158 
2159 /*
2160  * Allocate and initialize request queue and default I/O scheduler.
2161  */
2162 static int dasd_alloc_queue(struct dasd_block *block)
2163 {
2164 	int rc;
2165 
2166 	block->request_queue = blk_init_queue(do_dasd_request,
2167 					       &block->request_queue_lock);
2168 	if (block->request_queue == NULL)
2169 		return -ENOMEM;
2170 
2171 	block->request_queue->queuedata = block;
2172 
2173 	elevator_exit(block->request_queue->elevator);
2174 	block->request_queue->elevator = NULL;
2175 	rc = elevator_init(block->request_queue, "deadline");
2176 	if (rc) {
2177 		blk_cleanup_queue(block->request_queue);
2178 		return rc;
2179 	}
2180 	return 0;
2181 }
2182 
2183 /*
2184  * Allocate and initialize request queue.
2185  */
2186 static void dasd_setup_queue(struct dasd_block *block)
2187 {
2188 	int max;
2189 
2190 	blk_queue_logical_block_size(block->request_queue, block->bp_block);
2191 	max = block->base->discipline->max_blocks << block->s2b_shift;
2192 	blk_queue_max_hw_sectors(block->request_queue, max);
2193 	blk_queue_max_segments(block->request_queue, -1L);
2194 	/* with page sized segments we can translate each segement into
2195 	 * one idaw/tidaw
2196 	 */
2197 	blk_queue_max_segment_size(block->request_queue, PAGE_SIZE);
2198 	blk_queue_segment_boundary(block->request_queue, PAGE_SIZE - 1);
2199 	blk_queue_ordered(block->request_queue, QUEUE_ORDERED_DRAIN, NULL);
2200 }
2201 
2202 /*
2203  * Deactivate and free request queue.
2204  */
2205 static void dasd_free_queue(struct dasd_block *block)
2206 {
2207 	if (block->request_queue) {
2208 		blk_cleanup_queue(block->request_queue);
2209 		block->request_queue = NULL;
2210 	}
2211 }
2212 
2213 /*
2214  * Flush request on the request queue.
2215  */
2216 static void dasd_flush_request_queue(struct dasd_block *block)
2217 {
2218 	struct request *req;
2219 
2220 	if (!block->request_queue)
2221 		return;
2222 
2223 	spin_lock_irq(&block->request_queue_lock);
2224 	while ((req = blk_fetch_request(block->request_queue)))
2225 		__blk_end_request_all(req, -EIO);
2226 	spin_unlock_irq(&block->request_queue_lock);
2227 }
2228 
2229 static int dasd_open(struct block_device *bdev, fmode_t mode)
2230 {
2231 	struct dasd_block *block = bdev->bd_disk->private_data;
2232 	struct dasd_device *base;
2233 	int rc;
2234 
2235 	if (!block)
2236 		return -ENODEV;
2237 
2238 	base = block->base;
2239 	atomic_inc(&block->open_count);
2240 	if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) {
2241 		rc = -ENODEV;
2242 		goto unlock;
2243 	}
2244 
2245 	if (!try_module_get(base->discipline->owner)) {
2246 		rc = -EINVAL;
2247 		goto unlock;
2248 	}
2249 
2250 	if (dasd_probeonly) {
2251 		dev_info(&base->cdev->dev,
2252 			 "Accessing the DASD failed because it is in "
2253 			 "probeonly mode\n");
2254 		rc = -EPERM;
2255 		goto out;
2256 	}
2257 
2258 	if (base->state <= DASD_STATE_BASIC) {
2259 		DBF_DEV_EVENT(DBF_ERR, base, " %s",
2260 			      " Cannot open unrecognized device");
2261 		rc = -ENODEV;
2262 		goto out;
2263 	}
2264 
2265 	if ((mode & FMODE_WRITE) &&
2266 	    (test_bit(DASD_FLAG_DEVICE_RO, &base->flags) ||
2267 	     (base->features & DASD_FEATURE_READONLY))) {
2268 		rc = -EROFS;
2269 		goto out;
2270 	}
2271 
2272 	return 0;
2273 
2274 out:
2275 	module_put(base->discipline->owner);
2276 unlock:
2277 	atomic_dec(&block->open_count);
2278 	return rc;
2279 }
2280 
2281 static int dasd_release(struct gendisk *disk, fmode_t mode)
2282 {
2283 	struct dasd_block *block = disk->private_data;
2284 
2285 	atomic_dec(&block->open_count);
2286 	module_put(block->base->discipline->owner);
2287 	return 0;
2288 }
2289 
2290 /*
2291  * Return disk geometry.
2292  */
2293 static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
2294 {
2295 	struct dasd_block *block;
2296 	struct dasd_device *base;
2297 
2298 	block = bdev->bd_disk->private_data;
2299 	if (!block)
2300 		return -ENODEV;
2301 	base = block->base;
2302 
2303 	if (!base->discipline ||
2304 	    !base->discipline->fill_geometry)
2305 		return -EINVAL;
2306 
2307 	base->discipline->fill_geometry(block, geo);
2308 	geo->start = get_start_sect(bdev) >> block->s2b_shift;
2309 	return 0;
2310 }
2311 
2312 const struct block_device_operations
2313 dasd_device_operations = {
2314 	.owner		= THIS_MODULE,
2315 	.open		= dasd_open,
2316 	.release	= dasd_release,
2317 	.ioctl		= dasd_ioctl,
2318 	.compat_ioctl	= dasd_ioctl,
2319 	.getgeo		= dasd_getgeo,
2320 };
2321 
2322 /*******************************************************************************
2323  * end of block device operations
2324  */
2325 
2326 static void
2327 dasd_exit(void)
2328 {
2329 #ifdef CONFIG_PROC_FS
2330 	dasd_proc_exit();
2331 #endif
2332 	dasd_eer_exit();
2333         if (dasd_page_cache != NULL) {
2334 		kmem_cache_destroy(dasd_page_cache);
2335 		dasd_page_cache = NULL;
2336 	}
2337 	dasd_gendisk_exit();
2338 	dasd_devmap_exit();
2339 	if (dasd_debug_area != NULL) {
2340 		debug_unregister(dasd_debug_area);
2341 		dasd_debug_area = NULL;
2342 	}
2343 }
2344 
2345 /*
2346  * SECTION: common functions for ccw_driver use
2347  */
2348 
2349 /*
2350  * Is the device read-only?
2351  * Note that this function does not report the setting of the
2352  * readonly device attribute, but how it is configured in z/VM.
2353  */
2354 int dasd_device_is_ro(struct dasd_device *device)
2355 {
2356 	struct ccw_dev_id dev_id;
2357 	struct diag210 diag_data;
2358 	int rc;
2359 
2360 	if (!MACHINE_IS_VM)
2361 		return 0;
2362 	ccw_device_get_id(device->cdev, &dev_id);
2363 	memset(&diag_data, 0, sizeof(diag_data));
2364 	diag_data.vrdcdvno = dev_id.devno;
2365 	diag_data.vrdclen = sizeof(diag_data);
2366 	rc = diag210(&diag_data);
2367 	if (rc == 0 || rc == 2) {
2368 		return diag_data.vrdcvfla & 0x80;
2369 	} else {
2370 		DBF_EVENT(DBF_WARNING, "diag210 failed for dev=%04x with rc=%d",
2371 			  dev_id.devno, rc);
2372 		return 0;
2373 	}
2374 }
2375 EXPORT_SYMBOL_GPL(dasd_device_is_ro);
2376 
2377 static void dasd_generic_auto_online(void *data, async_cookie_t cookie)
2378 {
2379 	struct ccw_device *cdev = data;
2380 	int ret;
2381 
2382 	ret = ccw_device_set_online(cdev);
2383 	if (ret)
2384 		pr_warning("%s: Setting the DASD online failed with rc=%d\n",
2385 			   dev_name(&cdev->dev), ret);
2386 }
2387 
2388 /*
2389  * Initial attempt at a probe function. this can be simplified once
2390  * the other detection code is gone.
2391  */
2392 int dasd_generic_probe(struct ccw_device *cdev,
2393 		       struct dasd_discipline *discipline)
2394 {
2395 	int ret;
2396 
2397 	ret = dasd_add_sysfs_files(cdev);
2398 	if (ret) {
2399 		DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
2400 				"dasd_generic_probe: could not add "
2401 				"sysfs entries");
2402 		return ret;
2403 	}
2404 	cdev->handler = &dasd_int_handler;
2405 
2406 	/*
2407 	 * Automatically online either all dasd devices (dasd_autodetect)
2408 	 * or all devices specified with dasd= parameters during
2409 	 * initial probe.
2410 	 */
2411 	if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) ||
2412 	    (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0))
2413 		async_schedule(dasd_generic_auto_online, cdev);
2414 	return 0;
2415 }
2416 
2417 /*
2418  * This will one day be called from a global not_oper handler.
2419  * It is also used by driver_unregister during module unload.
2420  */
2421 void dasd_generic_remove(struct ccw_device *cdev)
2422 {
2423 	struct dasd_device *device;
2424 	struct dasd_block *block;
2425 
2426 	cdev->handler = NULL;
2427 
2428 	dasd_remove_sysfs_files(cdev);
2429 	device = dasd_device_from_cdev(cdev);
2430 	if (IS_ERR(device))
2431 		return;
2432 	if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
2433 		/* Already doing offline processing */
2434 		dasd_put_device(device);
2435 		return;
2436 	}
2437 	/*
2438 	 * This device is removed unconditionally. Set offline
2439 	 * flag to prevent dasd_open from opening it while it is
2440 	 * no quite down yet.
2441 	 */
2442 	dasd_set_target_state(device, DASD_STATE_NEW);
2443 	/* dasd_delete_device destroys the device reference. */
2444 	block = device->block;
2445 	device->block = NULL;
2446 	dasd_delete_device(device);
2447 	/*
2448 	 * life cycle of block is bound to device, so delete it after
2449 	 * device was safely removed
2450 	 */
2451 	if (block)
2452 		dasd_free_block(block);
2453 }
2454 
2455 /*
2456  * Activate a device. This is called from dasd_{eckd,fba}_probe() when either
2457  * the device is detected for the first time and is supposed to be used
2458  * or the user has started activation through sysfs.
2459  */
2460 int dasd_generic_set_online(struct ccw_device *cdev,
2461 			    struct dasd_discipline *base_discipline)
2462 {
2463 	struct dasd_discipline *discipline;
2464 	struct dasd_device *device;
2465 	int rc;
2466 
2467 	/* first online clears initial online feature flag */
2468 	dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0);
2469 	device = dasd_create_device(cdev);
2470 	if (IS_ERR(device))
2471 		return PTR_ERR(device);
2472 
2473 	discipline = base_discipline;
2474 	if (device->features & DASD_FEATURE_USEDIAG) {
2475 	  	if (!dasd_diag_discipline_pointer) {
2476 			pr_warning("%s Setting the DASD online failed because "
2477 				   "of missing DIAG discipline\n",
2478 				   dev_name(&cdev->dev));
2479 			dasd_delete_device(device);
2480 			return -ENODEV;
2481 		}
2482 		discipline = dasd_diag_discipline_pointer;
2483 	}
2484 	if (!try_module_get(base_discipline->owner)) {
2485 		dasd_delete_device(device);
2486 		return -EINVAL;
2487 	}
2488 	if (!try_module_get(discipline->owner)) {
2489 		module_put(base_discipline->owner);
2490 		dasd_delete_device(device);
2491 		return -EINVAL;
2492 	}
2493 	device->base_discipline = base_discipline;
2494 	device->discipline = discipline;
2495 
2496 	/* check_device will allocate block device if necessary */
2497 	rc = discipline->check_device(device);
2498 	if (rc) {
2499 		pr_warning("%s Setting the DASD online with discipline %s "
2500 			   "failed with rc=%i\n",
2501 			   dev_name(&cdev->dev), discipline->name, rc);
2502 		module_put(discipline->owner);
2503 		module_put(base_discipline->owner);
2504 		dasd_delete_device(device);
2505 		return rc;
2506 	}
2507 
2508 	dasd_set_target_state(device, DASD_STATE_ONLINE);
2509 	if (device->state <= DASD_STATE_KNOWN) {
2510 		pr_warning("%s Setting the DASD online failed because of a "
2511 			   "missing discipline\n", dev_name(&cdev->dev));
2512 		rc = -ENODEV;
2513 		dasd_set_target_state(device, DASD_STATE_NEW);
2514 		if (device->block)
2515 			dasd_free_block(device->block);
2516 		dasd_delete_device(device);
2517 	} else
2518 		pr_debug("dasd_generic device %s found\n",
2519 				dev_name(&cdev->dev));
2520 
2521 	wait_event(dasd_init_waitq, _wait_for_device(device));
2522 
2523 	dasd_put_device(device);
2524 	return rc;
2525 }
2526 
2527 int dasd_generic_set_offline(struct ccw_device *cdev)
2528 {
2529 	struct dasd_device *device;
2530 	struct dasd_block *block;
2531 	int max_count, open_count;
2532 
2533 	device = dasd_device_from_cdev(cdev);
2534 	if (IS_ERR(device))
2535 		return PTR_ERR(device);
2536 	if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) {
2537 		/* Already doing offline processing */
2538 		dasd_put_device(device);
2539 		return 0;
2540 	}
2541 	/*
2542 	 * We must make sure that this device is currently not in use.
2543 	 * The open_count is increased for every opener, that includes
2544 	 * the blkdev_get in dasd_scan_partitions. We are only interested
2545 	 * in the other openers.
2546 	 */
2547 	if (device->block) {
2548 		max_count = device->block->bdev ? 0 : -1;
2549 		open_count = atomic_read(&device->block->open_count);
2550 		if (open_count > max_count) {
2551 			if (open_count > 0)
2552 				pr_warning("%s: The DASD cannot be set offline "
2553 					   "with open count %i\n",
2554 					   dev_name(&cdev->dev), open_count);
2555 			else
2556 				pr_warning("%s: The DASD cannot be set offline "
2557 					   "while it is in use\n",
2558 					   dev_name(&cdev->dev));
2559 			clear_bit(DASD_FLAG_OFFLINE, &device->flags);
2560 			dasd_put_device(device);
2561 			return -EBUSY;
2562 		}
2563 	}
2564 	dasd_set_target_state(device, DASD_STATE_NEW);
2565 	/* dasd_delete_device destroys the device reference. */
2566 	block = device->block;
2567 	device->block = NULL;
2568 	dasd_delete_device(device);
2569 	/*
2570 	 * life cycle of block is bound to device, so delete it after
2571 	 * device was safely removed
2572 	 */
2573 	if (block)
2574 		dasd_free_block(block);
2575 	return 0;
2576 }
2577 
2578 int dasd_generic_notify(struct ccw_device *cdev, int event)
2579 {
2580 	struct dasd_device *device;
2581 	struct dasd_ccw_req *cqr;
2582 	int ret;
2583 
2584 	device = dasd_device_from_cdev_locked(cdev);
2585 	if (IS_ERR(device))
2586 		return 0;
2587 	ret = 0;
2588 	switch (event) {
2589 	case CIO_GONE:
2590 	case CIO_BOXED:
2591 	case CIO_NO_PATH:
2592 		/* First of all call extended error reporting. */
2593 		dasd_eer_write(device, NULL, DASD_EER_NOPATH);
2594 
2595 		if (device->state < DASD_STATE_BASIC)
2596 			break;
2597 		/* Device is active. We want to keep it. */
2598 		list_for_each_entry(cqr, &device->ccw_queue, devlist)
2599 			if (cqr->status == DASD_CQR_IN_IO) {
2600 				cqr->status = DASD_CQR_QUEUED;
2601 				cqr->retries++;
2602 			}
2603 		dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT);
2604 		dasd_device_clear_timer(device);
2605 		dasd_schedule_device_bh(device);
2606 		ret = 1;
2607 		break;
2608 	case CIO_OPER:
2609 		/* FIXME: add a sanity check. */
2610 		dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT);
2611 		if (device->stopped & DASD_UNRESUMED_PM) {
2612 			dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM);
2613 			dasd_restore_device(device);
2614 			ret = 1;
2615 			break;
2616 		}
2617 		dasd_schedule_device_bh(device);
2618 		if (device->block)
2619 			dasd_schedule_block_bh(device->block);
2620 		ret = 1;
2621 		break;
2622 	}
2623 	dasd_put_device(device);
2624 	return ret;
2625 }
2626 
2627 int dasd_generic_pm_freeze(struct ccw_device *cdev)
2628 {
2629 	struct dasd_ccw_req *cqr, *n;
2630 	int rc;
2631 	struct list_head freeze_queue;
2632 	struct dasd_device *device = dasd_device_from_cdev(cdev);
2633 
2634 	if (IS_ERR(device))
2635 		return PTR_ERR(device);
2636 	/* disallow new I/O  */
2637 	dasd_device_set_stop_bits(device, DASD_STOPPED_PM);
2638 	/* clear active requests */
2639 	INIT_LIST_HEAD(&freeze_queue);
2640 	spin_lock_irq(get_ccwdev_lock(cdev));
2641 	rc = 0;
2642 	list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
2643 		/* Check status and move request to flush_queue */
2644 		if (cqr->status == DASD_CQR_IN_IO) {
2645 			rc = device->discipline->term_IO(cqr);
2646 			if (rc) {
2647 				/* unable to terminate requeust */
2648 				dev_err(&device->cdev->dev,
2649 					"Unable to terminate request %p "
2650 					"on suspend\n", cqr);
2651 				spin_unlock_irq(get_ccwdev_lock(cdev));
2652 				dasd_put_device(device);
2653 				return rc;
2654 			}
2655 		}
2656 		list_move_tail(&cqr->devlist, &freeze_queue);
2657 	}
2658 
2659 	spin_unlock_irq(get_ccwdev_lock(cdev));
2660 
2661 	list_for_each_entry_safe(cqr, n, &freeze_queue, devlist) {
2662 		wait_event(dasd_flush_wq,
2663 			   (cqr->status != DASD_CQR_CLEAR_PENDING));
2664 		if (cqr->status == DASD_CQR_CLEARED)
2665 			cqr->status = DASD_CQR_QUEUED;
2666 	}
2667 	/* move freeze_queue to start of the ccw_queue */
2668 	spin_lock_irq(get_ccwdev_lock(cdev));
2669 	list_splice_tail(&freeze_queue, &device->ccw_queue);
2670 	spin_unlock_irq(get_ccwdev_lock(cdev));
2671 
2672 	if (device->discipline->freeze)
2673 		rc = device->discipline->freeze(device);
2674 
2675 	dasd_put_device(device);
2676 	return rc;
2677 }
2678 EXPORT_SYMBOL_GPL(dasd_generic_pm_freeze);
2679 
2680 int dasd_generic_restore_device(struct ccw_device *cdev)
2681 {
2682 	struct dasd_device *device = dasd_device_from_cdev(cdev);
2683 	int rc = 0;
2684 
2685 	if (IS_ERR(device))
2686 		return PTR_ERR(device);
2687 
2688 	/* allow new IO again */
2689 	dasd_device_remove_stop_bits(device,
2690 				     (DASD_STOPPED_PM | DASD_UNRESUMED_PM));
2691 
2692 	dasd_schedule_device_bh(device);
2693 
2694 	/*
2695 	 * call discipline restore function
2696 	 * if device is stopped do nothing e.g. for disconnected devices
2697 	 */
2698 	if (device->discipline->restore && !(device->stopped))
2699 		rc = device->discipline->restore(device);
2700 	if (rc || device->stopped)
2701 		/*
2702 		 * if the resume failed for the DASD we put it in
2703 		 * an UNRESUMED stop state
2704 		 */
2705 		device->stopped |= DASD_UNRESUMED_PM;
2706 
2707 	if (device->block)
2708 		dasd_schedule_block_bh(device->block);
2709 
2710 	dasd_put_device(device);
2711 	return 0;
2712 }
2713 EXPORT_SYMBOL_GPL(dasd_generic_restore_device);
2714 
2715 static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
2716 						   void *rdc_buffer,
2717 						   int rdc_buffer_size,
2718 						   int magic)
2719 {
2720 	struct dasd_ccw_req *cqr;
2721 	struct ccw1 *ccw;
2722 	unsigned long *idaw;
2723 
2724 	cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device);
2725 
2726 	if (IS_ERR(cqr)) {
2727 		/* internal error 13 - Allocating the RDC request failed*/
2728 		dev_err(&device->cdev->dev,
2729 			 "An error occurred in the DASD device driver, "
2730 			 "reason=%s\n", "13");
2731 		return cqr;
2732 	}
2733 
2734 	ccw = cqr->cpaddr;
2735 	ccw->cmd_code = CCW_CMD_RDC;
2736 	if (idal_is_needed(rdc_buffer, rdc_buffer_size)) {
2737 		idaw = (unsigned long *) (cqr->data);
2738 		ccw->cda = (__u32)(addr_t) idaw;
2739 		ccw->flags = CCW_FLAG_IDA;
2740 		idaw = idal_create_words(idaw, rdc_buffer, rdc_buffer_size);
2741 	} else {
2742 		ccw->cda = (__u32)(addr_t) rdc_buffer;
2743 		ccw->flags = 0;
2744 	}
2745 
2746 	ccw->count = rdc_buffer_size;
2747 	cqr->startdev = device;
2748 	cqr->memdev = device;
2749 	cqr->expires = 10*HZ;
2750 	cqr->retries = 256;
2751 	cqr->buildclk = get_clock();
2752 	cqr->status = DASD_CQR_FILLED;
2753 	return cqr;
2754 }
2755 
2756 
2757 int dasd_generic_read_dev_chars(struct dasd_device *device, int magic,
2758 				void *rdc_buffer, int rdc_buffer_size)
2759 {
2760 	int ret;
2761 	struct dasd_ccw_req *cqr;
2762 
2763 	cqr = dasd_generic_build_rdc(device, rdc_buffer, rdc_buffer_size,
2764 				     magic);
2765 	if (IS_ERR(cqr))
2766 		return PTR_ERR(cqr);
2767 
2768 	ret = dasd_sleep_on(cqr);
2769 	dasd_sfree_request(cqr, cqr->memdev);
2770 	return ret;
2771 }
2772 EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars);
2773 
2774 /*
2775  *   In command mode and transport mode we need to look for sense
2776  *   data in different places. The sense data itself is allways
2777  *   an array of 32 bytes, so we can unify the sense data access
2778  *   for both modes.
2779  */
2780 char *dasd_get_sense(struct irb *irb)
2781 {
2782 	struct tsb *tsb = NULL;
2783 	char *sense = NULL;
2784 
2785 	if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) {
2786 		if (irb->scsw.tm.tcw)
2787 			tsb = tcw_get_tsb((struct tcw *)(unsigned long)
2788 					  irb->scsw.tm.tcw);
2789 		if (tsb && tsb->length == 64 && tsb->flags)
2790 			switch (tsb->flags & 0x07) {
2791 			case 1:	/* tsa_iostat */
2792 				sense = tsb->tsa.iostat.sense;
2793 				break;
2794 			case 2: /* tsa_ddpc */
2795 				sense = tsb->tsa.ddpc.sense;
2796 				break;
2797 			default:
2798 				/* currently we don't use interrogate data */
2799 				break;
2800 			}
2801 	} else if (irb->esw.esw0.erw.cons) {
2802 		sense = irb->ecw;
2803 	}
2804 	return sense;
2805 }
2806 EXPORT_SYMBOL_GPL(dasd_get_sense);
2807 
2808 static int __init dasd_init(void)
2809 {
2810 	int rc;
2811 
2812 	init_waitqueue_head(&dasd_init_waitq);
2813 	init_waitqueue_head(&dasd_flush_wq);
2814 	init_waitqueue_head(&generic_waitq);
2815 
2816 	/* register 'common' DASD debug area, used for all DBF_XXX calls */
2817 	dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long));
2818 	if (dasd_debug_area == NULL) {
2819 		rc = -ENOMEM;
2820 		goto failed;
2821 	}
2822 	debug_register_view(dasd_debug_area, &debug_sprintf_view);
2823 	debug_set_level(dasd_debug_area, DBF_WARNING);
2824 
2825 	DBF_EVENT(DBF_EMERG, "%s", "debug area created");
2826 
2827 	dasd_diag_discipline_pointer = NULL;
2828 
2829 	rc = dasd_devmap_init();
2830 	if (rc)
2831 		goto failed;
2832 	rc = dasd_gendisk_init();
2833 	if (rc)
2834 		goto failed;
2835 	rc = dasd_parse();
2836 	if (rc)
2837 		goto failed;
2838 	rc = dasd_eer_init();
2839 	if (rc)
2840 		goto failed;
2841 #ifdef CONFIG_PROC_FS
2842 	rc = dasd_proc_init();
2843 	if (rc)
2844 		goto failed;
2845 #endif
2846 
2847 	return 0;
2848 failed:
2849 	pr_info("The DASD device driver could not be initialized\n");
2850 	dasd_exit();
2851 	return rc;
2852 }
2853 
2854 module_init(dasd_init);
2855 module_exit(dasd_exit);
2856 
2857 EXPORT_SYMBOL(dasd_debug_area);
2858 EXPORT_SYMBOL(dasd_diag_discipline_pointer);
2859 
2860 EXPORT_SYMBOL(dasd_add_request_head);
2861 EXPORT_SYMBOL(dasd_add_request_tail);
2862 EXPORT_SYMBOL(dasd_cancel_req);
2863 EXPORT_SYMBOL(dasd_device_clear_timer);
2864 EXPORT_SYMBOL(dasd_block_clear_timer);
2865 EXPORT_SYMBOL(dasd_enable_device);
2866 EXPORT_SYMBOL(dasd_int_handler);
2867 EXPORT_SYMBOL(dasd_kfree_request);
2868 EXPORT_SYMBOL(dasd_kick_device);
2869 EXPORT_SYMBOL(dasd_kmalloc_request);
2870 EXPORT_SYMBOL(dasd_schedule_device_bh);
2871 EXPORT_SYMBOL(dasd_schedule_block_bh);
2872 EXPORT_SYMBOL(dasd_set_target_state);
2873 EXPORT_SYMBOL(dasd_device_set_timer);
2874 EXPORT_SYMBOL(dasd_block_set_timer);
2875 EXPORT_SYMBOL(dasd_sfree_request);
2876 EXPORT_SYMBOL(dasd_sleep_on);
2877 EXPORT_SYMBOL(dasd_sleep_on_immediatly);
2878 EXPORT_SYMBOL(dasd_sleep_on_interruptible);
2879 EXPORT_SYMBOL(dasd_smalloc_request);
2880 EXPORT_SYMBOL(dasd_start_IO);
2881 EXPORT_SYMBOL(dasd_term_IO);
2882 
2883 EXPORT_SYMBOL_GPL(dasd_generic_probe);
2884 EXPORT_SYMBOL_GPL(dasd_generic_remove);
2885 EXPORT_SYMBOL_GPL(dasd_generic_notify);
2886 EXPORT_SYMBOL_GPL(dasd_generic_set_online);
2887 EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
2888 EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change);
2889 EXPORT_SYMBOL_GPL(dasd_flush_device_queue);
2890 EXPORT_SYMBOL_GPL(dasd_alloc_block);
2891 EXPORT_SYMBOL_GPL(dasd_free_block);
2892