xref: /openbmc/linux/drivers/s390/block/dasd_eckd.c (revision 294001a8)
1 /*
2  * File...........: linux/drivers/s390/block/dasd_eckd.c
3  * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4  *		    Horst Hummel <Horst.Hummel@de.ibm.com>
5  *		    Carsten Otte <Cotte@de.ibm.com>
6  *		    Martin Schwidefsky <schwidefsky@de.ibm.com>
7  * Bugreports.to..: <Linux390@de.ibm.com>
8  * Copyright IBM Corp. 1999, 2009
9  * EMC Symmetrix ioctl Copyright EMC Corporation, 2008
10  * Author.........: Nigel Hislop <hislop_nigel@emc.com>
11  */
12 
13 #define KMSG_COMPONENT "dasd-eckd"
14 
15 #include <linux/stddef.h>
16 #include <linux/kernel.h>
17 #include <linux/slab.h>
18 #include <linux/hdreg.h>	/* HDIO_GETGEO			    */
19 #include <linux/bio.h>
20 #include <linux/module.h>
21 #include <linux/init.h>
22 
23 #include <asm/debug.h>
24 #include <asm/idals.h>
25 #include <asm/ebcdic.h>
26 #include <asm/compat.h>
27 #include <asm/io.h>
28 #include <asm/uaccess.h>
29 #include <asm/cio.h>
30 #include <asm/ccwdev.h>
31 #include <asm/itcw.h>
32 
33 #include "dasd_int.h"
34 #include "dasd_eckd.h"
35 #include "../cio/chsc.h"
36 
37 
38 #ifdef PRINTK_HEADER
39 #undef PRINTK_HEADER
40 #endif				/* PRINTK_HEADER */
41 #define PRINTK_HEADER "dasd(eckd):"
42 
43 #define ECKD_C0(i) (i->home_bytes)
44 #define ECKD_F(i) (i->formula)
45 #define ECKD_F1(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f1):\
46 		    (i->factors.f_0x02.f1))
47 #define ECKD_F2(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f2):\
48 		    (i->factors.f_0x02.f2))
49 #define ECKD_F3(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f3):\
50 		    (i->factors.f_0x02.f3))
51 #define ECKD_F4(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f4):0)
52 #define ECKD_F5(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f5):0)
53 #define ECKD_F6(i) (i->factor6)
54 #define ECKD_F7(i) (i->factor7)
55 #define ECKD_F8(i) (i->factor8)
56 
57 MODULE_LICENSE("GPL");
58 
59 static struct dasd_discipline dasd_eckd_discipline;
60 
61 /* The ccw bus type uses this table to find devices that it sends to
62  * dasd_eckd_probe */
63 static struct ccw_device_id dasd_eckd_ids[] = {
64 	{ CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1},
65 	{ CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2},
66 	{ CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3390, 0), .driver_info = 0x3},
67 	{ CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4},
68 	{ CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5},
69 	{ CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6},
70 	{ CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3390, 0), .driver_info = 0x7},
71 	{ CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3380, 0), .driver_info = 0x8},
72 	{ CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3390, 0), .driver_info = 0x9},
73 	{ CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3380, 0), .driver_info = 0xa},
74 	{ /* end of list */ },
75 };
76 
77 MODULE_DEVICE_TABLE(ccw, dasd_eckd_ids);
78 
79 static struct ccw_driver dasd_eckd_driver; /* see below */
80 
81 #define INIT_CQR_OK 0
82 #define INIT_CQR_UNFORMATTED 1
83 #define INIT_CQR_ERROR 2
84 
85 
86 /* initial attempt at a probe function. this can be simplified once
87  * the other detection code is gone */
88 static int
89 dasd_eckd_probe (struct ccw_device *cdev)
90 {
91 	int ret;
92 
93 	/* set ECKD specific ccw-device options */
94 	ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE |
95 				     CCWDEV_DO_PATHGROUP | CCWDEV_DO_MULTIPATH);
96 	if (ret) {
97 		DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
98 				"dasd_eckd_probe: could not set "
99 				"ccw-device options");
100 		return ret;
101 	}
102 	ret = dasd_generic_probe(cdev, &dasd_eckd_discipline);
103 	return ret;
104 }
105 
106 static int
107 dasd_eckd_set_online(struct ccw_device *cdev)
108 {
109 	return dasd_generic_set_online(cdev, &dasd_eckd_discipline);
110 }
111 
112 static const int sizes_trk0[] = { 28, 148, 84 };
113 #define LABEL_SIZE 140
114 
115 static inline unsigned int
116 round_up_multiple(unsigned int no, unsigned int mult)
117 {
118 	int rem = no % mult;
119 	return (rem ? no - rem + mult : no);
120 }
121 
122 static inline unsigned int
123 ceil_quot(unsigned int d1, unsigned int d2)
124 {
125 	return (d1 + (d2 - 1)) / d2;
126 }
127 
128 static unsigned int
129 recs_per_track(struct dasd_eckd_characteristics * rdc,
130 	       unsigned int kl, unsigned int dl)
131 {
132 	int dn, kn;
133 
134 	switch (rdc->dev_type) {
135 	case 0x3380:
136 		if (kl)
137 			return 1499 / (15 + 7 + ceil_quot(kl + 12, 32) +
138 				       ceil_quot(dl + 12, 32));
139 		else
140 			return 1499 / (15 + ceil_quot(dl + 12, 32));
141 	case 0x3390:
142 		dn = ceil_quot(dl + 6, 232) + 1;
143 		if (kl) {
144 			kn = ceil_quot(kl + 6, 232) + 1;
145 			return 1729 / (10 + 9 + ceil_quot(kl + 6 * kn, 34) +
146 				       9 + ceil_quot(dl + 6 * dn, 34));
147 		} else
148 			return 1729 / (10 + 9 + ceil_quot(dl + 6 * dn, 34));
149 	case 0x9345:
150 		dn = ceil_quot(dl + 6, 232) + 1;
151 		if (kl) {
152 			kn = ceil_quot(kl + 6, 232) + 1;
153 			return 1420 / (18 + 7 + ceil_quot(kl + 6 * kn, 34) +
154 				       ceil_quot(dl + 6 * dn, 34));
155 		} else
156 			return 1420 / (18 + 7 + ceil_quot(dl + 6 * dn, 34));
157 	}
158 	return 0;
159 }
160 
161 static void set_ch_t(struct ch_t *geo, __u32 cyl, __u8 head)
162 {
163 	geo->cyl = (__u16) cyl;
164 	geo->head = cyl >> 16;
165 	geo->head <<= 4;
166 	geo->head |= head;
167 }
168 
169 static int
170 check_XRC (struct ccw1         *de_ccw,
171            struct DE_eckd_data *data,
172            struct dasd_device  *device)
173 {
174         struct dasd_eckd_private *private;
175 	int rc;
176 
177         private = (struct dasd_eckd_private *) device->private;
178 	if (!private->rdc_data.facilities.XRC_supported)
179 		return 0;
180 
181         /* switch on System Time Stamp - needed for XRC Support */
182 	data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid'   */
183 	data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */
184 
185 	rc = get_sync_clock(&data->ep_sys_time);
186 	/* Ignore return code if sync clock is switched off. */
187 	if (rc == -ENOSYS || rc == -EACCES)
188 		rc = 0;
189 
190 	de_ccw->count = sizeof(struct DE_eckd_data);
191 	de_ccw->flags |= CCW_FLAG_SLI;
192 	return rc;
193 }
194 
195 static int
196 define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk,
197 	      unsigned int totrk, int cmd, struct dasd_device *device)
198 {
199 	struct dasd_eckd_private *private;
200 	u32 begcyl, endcyl;
201 	u16 heads, beghead, endhead;
202 	int rc = 0;
203 
204 	private = (struct dasd_eckd_private *) device->private;
205 
206 	ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
207 	ccw->flags = 0;
208 	ccw->count = 16;
209 	ccw->cda = (__u32) __pa(data);
210 
211 	memset(data, 0, sizeof(struct DE_eckd_data));
212 	switch (cmd) {
213 	case DASD_ECKD_CCW_READ_HOME_ADDRESS:
214 	case DASD_ECKD_CCW_READ_RECORD_ZERO:
215 	case DASD_ECKD_CCW_READ:
216 	case DASD_ECKD_CCW_READ_MT:
217 	case DASD_ECKD_CCW_READ_CKD:
218 	case DASD_ECKD_CCW_READ_CKD_MT:
219 	case DASD_ECKD_CCW_READ_KD:
220 	case DASD_ECKD_CCW_READ_KD_MT:
221 	case DASD_ECKD_CCW_READ_COUNT:
222 		data->mask.perm = 0x1;
223 		data->attributes.operation = private->attrib.operation;
224 		break;
225 	case DASD_ECKD_CCW_WRITE:
226 	case DASD_ECKD_CCW_WRITE_MT:
227 	case DASD_ECKD_CCW_WRITE_KD:
228 	case DASD_ECKD_CCW_WRITE_KD_MT:
229 		data->mask.perm = 0x02;
230 		data->attributes.operation = private->attrib.operation;
231 		rc = check_XRC (ccw, data, device);
232 		break;
233 	case DASD_ECKD_CCW_WRITE_CKD:
234 	case DASD_ECKD_CCW_WRITE_CKD_MT:
235 		data->attributes.operation = DASD_BYPASS_CACHE;
236 		rc = check_XRC (ccw, data, device);
237 		break;
238 	case DASD_ECKD_CCW_ERASE:
239 	case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
240 	case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
241 		data->mask.perm = 0x3;
242 		data->mask.auth = 0x1;
243 		data->attributes.operation = DASD_BYPASS_CACHE;
244 		rc = check_XRC (ccw, data, device);
245 		break;
246 	default:
247 		dev_err(&device->cdev->dev,
248 			"0x%x is not a known command\n", cmd);
249 		break;
250 	}
251 
252 	data->attributes.mode = 0x3;	/* ECKD */
253 
254 	if ((private->rdc_data.cu_type == 0x2105 ||
255 	     private->rdc_data.cu_type == 0x2107 ||
256 	     private->rdc_data.cu_type == 0x1750)
257 	    && !(private->uses_cdl && trk < 2))
258 		data->ga_extended |= 0x40; /* Regular Data Format Mode */
259 
260 	heads = private->rdc_data.trk_per_cyl;
261 	begcyl = trk / heads;
262 	beghead = trk % heads;
263 	endcyl = totrk / heads;
264 	endhead = totrk % heads;
265 
266 	/* check for sequential prestage - enhance cylinder range */
267 	if (data->attributes.operation == DASD_SEQ_PRESTAGE ||
268 	    data->attributes.operation == DASD_SEQ_ACCESS) {
269 
270 		if (endcyl + private->attrib.nr_cyl < private->real_cyl)
271 			endcyl += private->attrib.nr_cyl;
272 		else
273 			endcyl = (private->real_cyl - 1);
274 	}
275 
276 	set_ch_t(&data->beg_ext, begcyl, beghead);
277 	set_ch_t(&data->end_ext, endcyl, endhead);
278 	return rc;
279 }
280 
281 static int check_XRC_on_prefix(struct PFX_eckd_data *pfxdata,
282 			       struct dasd_device  *device)
283 {
284 	struct dasd_eckd_private *private;
285 	int rc;
286 
287 	private = (struct dasd_eckd_private *) device->private;
288 	if (!private->rdc_data.facilities.XRC_supported)
289 		return 0;
290 
291 	/* switch on System Time Stamp - needed for XRC Support */
292 	pfxdata->define_extent.ga_extended |= 0x08; /* 'Time Stamp Valid'   */
293 	pfxdata->define_extent.ga_extended |= 0x02; /* 'Extended Parameter' */
294 	pfxdata->validity.time_stamp = 1;	    /* 'Time Stamp Valid'   */
295 
296 	rc = get_sync_clock(&pfxdata->define_extent.ep_sys_time);
297 	/* Ignore return code if sync clock is switched off. */
298 	if (rc == -ENOSYS || rc == -EACCES)
299 		rc = 0;
300 	return rc;
301 }
302 
303 static void fill_LRE_data(struct LRE_eckd_data *data, unsigned int trk,
304 			  unsigned int rec_on_trk, int count, int cmd,
305 			  struct dasd_device *device, unsigned int reclen,
306 			  unsigned int tlf)
307 {
308 	struct dasd_eckd_private *private;
309 	int sector;
310 	int dn, d;
311 
312 	private = (struct dasd_eckd_private *) device->private;
313 
314 	memset(data, 0, sizeof(*data));
315 	sector = 0;
316 	if (rec_on_trk) {
317 		switch (private->rdc_data.dev_type) {
318 		case 0x3390:
319 			dn = ceil_quot(reclen + 6, 232);
320 			d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
321 			sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
322 			break;
323 		case 0x3380:
324 			d = 7 + ceil_quot(reclen + 12, 32);
325 			sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
326 			break;
327 		}
328 	}
329 	data->sector = sector;
330 	/* note: meaning of count depends on the operation
331 	 *	 for record based I/O it's the number of records, but for
332 	 *	 track based I/O it's the number of tracks
333 	 */
334 	data->count = count;
335 	switch (cmd) {
336 	case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
337 		data->operation.orientation = 0x3;
338 		data->operation.operation = 0x03;
339 		break;
340 	case DASD_ECKD_CCW_READ_HOME_ADDRESS:
341 		data->operation.orientation = 0x3;
342 		data->operation.operation = 0x16;
343 		break;
344 	case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
345 		data->operation.orientation = 0x1;
346 		data->operation.operation = 0x03;
347 		data->count++;
348 		break;
349 	case DASD_ECKD_CCW_READ_RECORD_ZERO:
350 		data->operation.orientation = 0x3;
351 		data->operation.operation = 0x16;
352 		data->count++;
353 		break;
354 	case DASD_ECKD_CCW_WRITE:
355 	case DASD_ECKD_CCW_WRITE_MT:
356 	case DASD_ECKD_CCW_WRITE_KD:
357 	case DASD_ECKD_CCW_WRITE_KD_MT:
358 		data->auxiliary.length_valid = 0x1;
359 		data->length = reclen;
360 		data->operation.operation = 0x01;
361 		break;
362 	case DASD_ECKD_CCW_WRITE_CKD:
363 	case DASD_ECKD_CCW_WRITE_CKD_MT:
364 		data->auxiliary.length_valid = 0x1;
365 		data->length = reclen;
366 		data->operation.operation = 0x03;
367 		break;
368 	case DASD_ECKD_CCW_WRITE_TRACK_DATA:
369 		data->auxiliary.length_valid = 0x1;
370 		data->length = reclen;	/* not tlf, as one might think */
371 		data->operation.operation = 0x3F;
372 		data->extended_operation = 0x23;
373 		break;
374 	case DASD_ECKD_CCW_READ:
375 	case DASD_ECKD_CCW_READ_MT:
376 	case DASD_ECKD_CCW_READ_KD:
377 	case DASD_ECKD_CCW_READ_KD_MT:
378 		data->auxiliary.length_valid = 0x1;
379 		data->length = reclen;
380 		data->operation.operation = 0x06;
381 		break;
382 	case DASD_ECKD_CCW_READ_CKD:
383 	case DASD_ECKD_CCW_READ_CKD_MT:
384 		data->auxiliary.length_valid = 0x1;
385 		data->length = reclen;
386 		data->operation.operation = 0x16;
387 		break;
388 	case DASD_ECKD_CCW_READ_COUNT:
389 		data->operation.operation = 0x06;
390 		break;
391 	case DASD_ECKD_CCW_READ_TRACK_DATA:
392 		data->auxiliary.length_valid = 0x1;
393 		data->length = tlf;
394 		data->operation.operation = 0x0C;
395 		break;
396 	case DASD_ECKD_CCW_ERASE:
397 		data->length = reclen;
398 		data->auxiliary.length_valid = 0x1;
399 		data->operation.operation = 0x0b;
400 		break;
401 	default:
402 		DBF_DEV_EVENT(DBF_ERR, device,
403 			    "fill LRE unknown opcode 0x%x", cmd);
404 		BUG();
405 	}
406 	set_ch_t(&data->seek_addr,
407 		 trk / private->rdc_data.trk_per_cyl,
408 		 trk % private->rdc_data.trk_per_cyl);
409 	data->search_arg.cyl = data->seek_addr.cyl;
410 	data->search_arg.head = data->seek_addr.head;
411 	data->search_arg.record = rec_on_trk;
412 }
413 
414 static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
415 		      unsigned int trk, unsigned int totrk, int cmd,
416 		      struct dasd_device *basedev, struct dasd_device *startdev,
417 		      unsigned char format, unsigned int rec_on_trk, int count,
418 		      unsigned int blksize, unsigned int tlf)
419 {
420 	struct dasd_eckd_private *basepriv, *startpriv;
421 	struct DE_eckd_data *dedata;
422 	struct LRE_eckd_data *lredata;
423 	u32 begcyl, endcyl;
424 	u16 heads, beghead, endhead;
425 	int rc = 0;
426 
427 	basepriv = (struct dasd_eckd_private *) basedev->private;
428 	startpriv = (struct dasd_eckd_private *) startdev->private;
429 	dedata = &pfxdata->define_extent;
430 	lredata = &pfxdata->locate_record;
431 
432 	ccw->cmd_code = DASD_ECKD_CCW_PFX;
433 	ccw->flags = 0;
434 	ccw->count = sizeof(*pfxdata);
435 	ccw->cda = (__u32) __pa(pfxdata);
436 
437 	memset(pfxdata, 0, sizeof(*pfxdata));
438 	/* prefix data */
439 	if (format > 1) {
440 		DBF_DEV_EVENT(DBF_ERR, basedev,
441 			      "PFX LRE unknown format 0x%x", format);
442 		BUG();
443 		return -EINVAL;
444 	}
445 	pfxdata->format = format;
446 	pfxdata->base_address = basepriv->ned->unit_addr;
447 	pfxdata->base_lss = basepriv->ned->ID;
448 	pfxdata->validity.define_extent = 1;
449 
450 	/* private uid is kept up to date, conf_data may be outdated */
451 	if (startpriv->uid.type != UA_BASE_DEVICE) {
452 		pfxdata->validity.verify_base = 1;
453 		if (startpriv->uid.type == UA_HYPER_PAV_ALIAS)
454 			pfxdata->validity.hyper_pav = 1;
455 	}
456 
457 	/* define extend data (mostly)*/
458 	switch (cmd) {
459 	case DASD_ECKD_CCW_READ_HOME_ADDRESS:
460 	case DASD_ECKD_CCW_READ_RECORD_ZERO:
461 	case DASD_ECKD_CCW_READ:
462 	case DASD_ECKD_CCW_READ_MT:
463 	case DASD_ECKD_CCW_READ_CKD:
464 	case DASD_ECKD_CCW_READ_CKD_MT:
465 	case DASD_ECKD_CCW_READ_KD:
466 	case DASD_ECKD_CCW_READ_KD_MT:
467 	case DASD_ECKD_CCW_READ_COUNT:
468 		dedata->mask.perm = 0x1;
469 		dedata->attributes.operation = basepriv->attrib.operation;
470 		break;
471 	case DASD_ECKD_CCW_READ_TRACK_DATA:
472 		dedata->mask.perm = 0x1;
473 		dedata->attributes.operation = basepriv->attrib.operation;
474 		dedata->blk_size = 0;
475 		break;
476 	case DASD_ECKD_CCW_WRITE:
477 	case DASD_ECKD_CCW_WRITE_MT:
478 	case DASD_ECKD_CCW_WRITE_KD:
479 	case DASD_ECKD_CCW_WRITE_KD_MT:
480 		dedata->mask.perm = 0x02;
481 		dedata->attributes.operation = basepriv->attrib.operation;
482 		rc = check_XRC_on_prefix(pfxdata, basedev);
483 		break;
484 	case DASD_ECKD_CCW_WRITE_CKD:
485 	case DASD_ECKD_CCW_WRITE_CKD_MT:
486 		dedata->attributes.operation = DASD_BYPASS_CACHE;
487 		rc = check_XRC_on_prefix(pfxdata, basedev);
488 		break;
489 	case DASD_ECKD_CCW_ERASE:
490 	case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
491 	case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
492 		dedata->mask.perm = 0x3;
493 		dedata->mask.auth = 0x1;
494 		dedata->attributes.operation = DASD_BYPASS_CACHE;
495 		rc = check_XRC_on_prefix(pfxdata, basedev);
496 		break;
497 	case DASD_ECKD_CCW_WRITE_TRACK_DATA:
498 		dedata->mask.perm = 0x02;
499 		dedata->attributes.operation = basepriv->attrib.operation;
500 		dedata->blk_size = blksize;
501 		rc = check_XRC_on_prefix(pfxdata, basedev);
502 		break;
503 	default:
504 		DBF_DEV_EVENT(DBF_ERR, basedev,
505 			    "PFX LRE unknown opcode 0x%x", cmd);
506 		BUG();
507 		return -EINVAL;
508 	}
509 
510 	dedata->attributes.mode = 0x3;	/* ECKD */
511 
512 	if ((basepriv->rdc_data.cu_type == 0x2105 ||
513 	     basepriv->rdc_data.cu_type == 0x2107 ||
514 	     basepriv->rdc_data.cu_type == 0x1750)
515 	    && !(basepriv->uses_cdl && trk < 2))
516 		dedata->ga_extended |= 0x40; /* Regular Data Format Mode */
517 
518 	heads = basepriv->rdc_data.trk_per_cyl;
519 	begcyl = trk / heads;
520 	beghead = trk % heads;
521 	endcyl = totrk / heads;
522 	endhead = totrk % heads;
523 
524 	/* check for sequential prestage - enhance cylinder range */
525 	if (dedata->attributes.operation == DASD_SEQ_PRESTAGE ||
526 	    dedata->attributes.operation == DASD_SEQ_ACCESS) {
527 
528 		if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl)
529 			endcyl += basepriv->attrib.nr_cyl;
530 		else
531 			endcyl = (basepriv->real_cyl - 1);
532 	}
533 
534 	set_ch_t(&dedata->beg_ext, begcyl, beghead);
535 	set_ch_t(&dedata->end_ext, endcyl, endhead);
536 
537 	if (format == 1) {
538 		fill_LRE_data(lredata, trk, rec_on_trk, count, cmd,
539 			      basedev, blksize, tlf);
540 	}
541 
542 	return rc;
543 }
544 
545 static int prefix(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
546 		  unsigned int trk, unsigned int totrk, int cmd,
547 		  struct dasd_device *basedev, struct dasd_device *startdev)
548 {
549 	return prefix_LRE(ccw, pfxdata, trk, totrk, cmd, basedev, startdev,
550 			  0, 0, 0, 0, 0);
551 }
552 
553 static void
554 locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, unsigned int trk,
555 	      unsigned int rec_on_trk, int no_rec, int cmd,
556 	      struct dasd_device * device, int reclen)
557 {
558 	struct dasd_eckd_private *private;
559 	int sector;
560 	int dn, d;
561 
562 	private = (struct dasd_eckd_private *) device->private;
563 
564 	DBF_DEV_EVENT(DBF_INFO, device,
565 		  "Locate: trk %d, rec %d, no_rec %d, cmd %d, reclen %d",
566 		  trk, rec_on_trk, no_rec, cmd, reclen);
567 
568 	ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD;
569 	ccw->flags = 0;
570 	ccw->count = 16;
571 	ccw->cda = (__u32) __pa(data);
572 
573 	memset(data, 0, sizeof(struct LO_eckd_data));
574 	sector = 0;
575 	if (rec_on_trk) {
576 		switch (private->rdc_data.dev_type) {
577 		case 0x3390:
578 			dn = ceil_quot(reclen + 6, 232);
579 			d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
580 			sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
581 			break;
582 		case 0x3380:
583 			d = 7 + ceil_quot(reclen + 12, 32);
584 			sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
585 			break;
586 		}
587 	}
588 	data->sector = sector;
589 	data->count = no_rec;
590 	switch (cmd) {
591 	case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
592 		data->operation.orientation = 0x3;
593 		data->operation.operation = 0x03;
594 		break;
595 	case DASD_ECKD_CCW_READ_HOME_ADDRESS:
596 		data->operation.orientation = 0x3;
597 		data->operation.operation = 0x16;
598 		break;
599 	case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
600 		data->operation.orientation = 0x1;
601 		data->operation.operation = 0x03;
602 		data->count++;
603 		break;
604 	case DASD_ECKD_CCW_READ_RECORD_ZERO:
605 		data->operation.orientation = 0x3;
606 		data->operation.operation = 0x16;
607 		data->count++;
608 		break;
609 	case DASD_ECKD_CCW_WRITE:
610 	case DASD_ECKD_CCW_WRITE_MT:
611 	case DASD_ECKD_CCW_WRITE_KD:
612 	case DASD_ECKD_CCW_WRITE_KD_MT:
613 		data->auxiliary.last_bytes_used = 0x1;
614 		data->length = reclen;
615 		data->operation.operation = 0x01;
616 		break;
617 	case DASD_ECKD_CCW_WRITE_CKD:
618 	case DASD_ECKD_CCW_WRITE_CKD_MT:
619 		data->auxiliary.last_bytes_used = 0x1;
620 		data->length = reclen;
621 		data->operation.operation = 0x03;
622 		break;
623 	case DASD_ECKD_CCW_READ:
624 	case DASD_ECKD_CCW_READ_MT:
625 	case DASD_ECKD_CCW_READ_KD:
626 	case DASD_ECKD_CCW_READ_KD_MT:
627 		data->auxiliary.last_bytes_used = 0x1;
628 		data->length = reclen;
629 		data->operation.operation = 0x06;
630 		break;
631 	case DASD_ECKD_CCW_READ_CKD:
632 	case DASD_ECKD_CCW_READ_CKD_MT:
633 		data->auxiliary.last_bytes_used = 0x1;
634 		data->length = reclen;
635 		data->operation.operation = 0x16;
636 		break;
637 	case DASD_ECKD_CCW_READ_COUNT:
638 		data->operation.operation = 0x06;
639 		break;
640 	case DASD_ECKD_CCW_ERASE:
641 		data->length = reclen;
642 		data->auxiliary.last_bytes_used = 0x1;
643 		data->operation.operation = 0x0b;
644 		break;
645 	default:
646 		DBF_DEV_EVENT(DBF_ERR, device, "unknown locate record "
647 			      "opcode 0x%x", cmd);
648 	}
649 	set_ch_t(&data->seek_addr,
650 		 trk / private->rdc_data.trk_per_cyl,
651 		 trk % private->rdc_data.trk_per_cyl);
652 	data->search_arg.cyl = data->seek_addr.cyl;
653 	data->search_arg.head = data->seek_addr.head;
654 	data->search_arg.record = rec_on_trk;
655 }
656 
657 /*
658  * Returns 1 if the block is one of the special blocks that needs
659  * to get read/written with the KD variant of the command.
660  * That is DASD_ECKD_READ_KD_MT instead of DASD_ECKD_READ_MT and
661  * DASD_ECKD_WRITE_KD_MT instead of DASD_ECKD_WRITE_MT.
662  * Luckily the KD variants differ only by one bit (0x08) from the
663  * normal variant. So don't wonder about code like:
664  * if (dasd_eckd_cdl_special(blk_per_trk, recid))
665  *         ccw->cmd_code |= 0x8;
666  */
667 static inline int
668 dasd_eckd_cdl_special(int blk_per_trk, int recid)
669 {
670 	if (recid < 3)
671 		return 1;
672 	if (recid < blk_per_trk)
673 		return 0;
674 	if (recid < 2 * blk_per_trk)
675 		return 1;
676 	return 0;
677 }
678 
679 /*
680  * Returns the record size for the special blocks of the cdl format.
681  * Only returns something useful if dasd_eckd_cdl_special is true
682  * for the recid.
683  */
684 static inline int
685 dasd_eckd_cdl_reclen(int recid)
686 {
687 	if (recid < 3)
688 		return sizes_trk0[recid];
689 	return LABEL_SIZE;
690 }
691 
692 /*
693  * Generate device unique id that specifies the physical device.
694  */
695 static int dasd_eckd_generate_uid(struct dasd_device *device,
696 				  struct dasd_uid *uid)
697 {
698 	struct dasd_eckd_private *private;
699 	int count;
700 
701 	private = (struct dasd_eckd_private *) device->private;
702 	if (!private)
703 		return -ENODEV;
704 	if (!private->ned || !private->gneq)
705 		return -ENODEV;
706 
707 	memset(uid, 0, sizeof(struct dasd_uid));
708 	memcpy(uid->vendor, private->ned->HDA_manufacturer,
709 	       sizeof(uid->vendor) - 1);
710 	EBCASC(uid->vendor, sizeof(uid->vendor) - 1);
711 	memcpy(uid->serial, private->ned->HDA_location,
712 	       sizeof(uid->serial) - 1);
713 	EBCASC(uid->serial, sizeof(uid->serial) - 1);
714 	uid->ssid = private->gneq->subsystemID;
715 	uid->real_unit_addr = private->ned->unit_addr;
716 	if (private->sneq) {
717 		uid->type = private->sneq->sua_flags;
718 		if (uid->type == UA_BASE_PAV_ALIAS)
719 			uid->base_unit_addr = private->sneq->base_unit_addr;
720 	} else {
721 		uid->type = UA_BASE_DEVICE;
722 	}
723 	if (private->vdsneq) {
724 		for (count = 0; count < 16; count++) {
725 			sprintf(uid->vduit+2*count, "%02x",
726 				private->vdsneq->uit[count]);
727 		}
728 	}
729 	return 0;
730 }
731 
732 static struct dasd_ccw_req *dasd_eckd_build_rcd_lpm(struct dasd_device *device,
733 						    void *rcd_buffer,
734 						    struct ciw *ciw, __u8 lpm)
735 {
736 	struct dasd_ccw_req *cqr;
737 	struct ccw1 *ccw;
738 
739 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */, ciw->count,
740 				   device);
741 
742 	if (IS_ERR(cqr)) {
743 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
744 			      "Could not allocate RCD request");
745 		return cqr;
746 	}
747 
748 	ccw = cqr->cpaddr;
749 	ccw->cmd_code = ciw->cmd;
750 	ccw->cda = (__u32)(addr_t)rcd_buffer;
751 	ccw->count = ciw->count;
752 
753 	cqr->startdev = device;
754 	cqr->memdev = device;
755 	cqr->block = NULL;
756 	cqr->expires = 10*HZ;
757 	cqr->lpm = lpm;
758 	cqr->retries = 256;
759 	cqr->buildclk = get_clock();
760 	cqr->status = DASD_CQR_FILLED;
761 	return cqr;
762 }
763 
764 static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
765 				   void **rcd_buffer,
766 				   int *rcd_buffer_size, __u8 lpm)
767 {
768 	struct ciw *ciw;
769 	char *rcd_buf = NULL;
770 	int ret;
771 	struct dasd_ccw_req *cqr;
772 
773 	/*
774 	 * scan for RCD command in extended SenseID data
775 	 */
776 	ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
777 	if (!ciw || ciw->cmd == 0) {
778 		ret = -EOPNOTSUPP;
779 		goto out_error;
780 	}
781 	rcd_buf = kzalloc(ciw->count, GFP_KERNEL | GFP_DMA);
782 	if (!rcd_buf) {
783 		ret = -ENOMEM;
784 		goto out_error;
785 	}
786 
787 	/*
788 	 * buffer has to start with EBCDIC "V1.0" to show
789 	 * support for virtual device SNEQ
790 	 */
791 	rcd_buf[0] = 0xE5;
792 	rcd_buf[1] = 0xF1;
793 	rcd_buf[2] = 0x4B;
794 	rcd_buf[3] = 0xF0;
795 	cqr = dasd_eckd_build_rcd_lpm(device, rcd_buf, ciw, lpm);
796 	if (IS_ERR(cqr)) {
797 		ret =  PTR_ERR(cqr);
798 		goto out_error;
799 	}
800 	ret = dasd_sleep_on(cqr);
801 	/*
802 	 * on success we update the user input parms
803 	 */
804 	dasd_sfree_request(cqr, cqr->memdev);
805 	if (ret)
806 		goto out_error;
807 
808 	*rcd_buffer_size = ciw->count;
809 	*rcd_buffer = rcd_buf;
810 	return 0;
811 out_error:
812 	kfree(rcd_buf);
813 	*rcd_buffer = NULL;
814 	*rcd_buffer_size = 0;
815 	return ret;
816 }
817 
818 static int dasd_eckd_identify_conf_parts(struct dasd_eckd_private *private)
819 {
820 
821 	struct dasd_sneq *sneq;
822 	int i, count;
823 
824 	private->ned = NULL;
825 	private->sneq = NULL;
826 	private->vdsneq = NULL;
827 	private->gneq = NULL;
828 	count = private->conf_len / sizeof(struct dasd_sneq);
829 	sneq = (struct dasd_sneq *)private->conf_data;
830 	for (i = 0; i < count; ++i) {
831 		if (sneq->flags.identifier == 1 && sneq->format == 1)
832 			private->sneq = sneq;
833 		else if (sneq->flags.identifier == 1 && sneq->format == 4)
834 			private->vdsneq = (struct vd_sneq *)sneq;
835 		else if (sneq->flags.identifier == 2)
836 			private->gneq = (struct dasd_gneq *)sneq;
837 		else if (sneq->flags.identifier == 3 && sneq->res1 == 1)
838 			private->ned = (struct dasd_ned *)sneq;
839 		sneq++;
840 	}
841 	if (!private->ned || !private->gneq) {
842 		private->ned = NULL;
843 		private->sneq = NULL;
844 		private->vdsneq = NULL;
845 		private->gneq = NULL;
846 		return -EINVAL;
847 	}
848 	return 0;
849 
850 };
851 
852 static unsigned char dasd_eckd_path_access(void *conf_data, int conf_len)
853 {
854 	struct dasd_gneq *gneq;
855 	int i, count, found;
856 
857 	count = conf_len / sizeof(*gneq);
858 	gneq = (struct dasd_gneq *)conf_data;
859 	found = 0;
860 	for (i = 0; i < count; ++i) {
861 		if (gneq->flags.identifier == 2) {
862 			found = 1;
863 			break;
864 		}
865 		gneq++;
866 	}
867 	if (found)
868 		return ((char *)gneq)[18] & 0x07;
869 	else
870 		return 0;
871 }
872 
873 static int dasd_eckd_read_conf(struct dasd_device *device)
874 {
875 	void *conf_data;
876 	int conf_len, conf_data_saved;
877 	int rc;
878 	__u8 lpm;
879 	struct dasd_eckd_private *private;
880 	struct dasd_eckd_path *path_data;
881 
882 	private = (struct dasd_eckd_private *) device->private;
883 	path_data = (struct dasd_eckd_path *) &private->path_data;
884 	path_data->opm = ccw_device_get_path_mask(device->cdev);
885 	lpm = 0x80;
886 	conf_data_saved = 0;
887 	/* get configuration data per operational path */
888 	for (lpm = 0x80; lpm; lpm>>= 1) {
889 		if (lpm & path_data->opm){
890 			rc = dasd_eckd_read_conf_lpm(device, &conf_data,
891 						     &conf_len, lpm);
892 			if (rc && rc != -EOPNOTSUPP) {	/* -EOPNOTSUPP is ok */
893 				DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
894 					  "Read configuration data returned "
895 					  "error %d", rc);
896 				return rc;
897 			}
898 			if (conf_data == NULL) {
899 				DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
900 						"No configuration data "
901 						"retrieved");
902 				continue;	/* no error */
903 			}
904 			/* save first valid configuration data */
905 			if (!conf_data_saved) {
906 				kfree(private->conf_data);
907 				private->conf_data = conf_data;
908 				private->conf_len = conf_len;
909 				if (dasd_eckd_identify_conf_parts(private)) {
910 					private->conf_data = NULL;
911 					private->conf_len = 0;
912 					kfree(conf_data);
913 					continue;
914 				}
915 				conf_data_saved++;
916 			}
917 			switch (dasd_eckd_path_access(conf_data, conf_len)) {
918 			case 0x02:
919 				path_data->npm |= lpm;
920 				break;
921 			case 0x03:
922 				path_data->ppm |= lpm;
923 				break;
924 			}
925 			if (conf_data != private->conf_data)
926 				kfree(conf_data);
927 		}
928 	}
929 	return 0;
930 }
931 
932 static int dasd_eckd_read_features(struct dasd_device *device)
933 {
934 	struct dasd_psf_prssd_data *prssdp;
935 	struct dasd_rssd_features *features;
936 	struct dasd_ccw_req *cqr;
937 	struct ccw1 *ccw;
938 	int rc;
939 	struct dasd_eckd_private *private;
940 
941 	private = (struct dasd_eckd_private *) device->private;
942 	memset(&private->features, 0, sizeof(struct dasd_rssd_features));
943 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */	+ 1 /* RSSD */,
944 				   (sizeof(struct dasd_psf_prssd_data) +
945 				    sizeof(struct dasd_rssd_features)),
946 				   device);
947 	if (IS_ERR(cqr)) {
948 		DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not "
949 				"allocate initialization request");
950 		return PTR_ERR(cqr);
951 	}
952 	cqr->startdev = device;
953 	cqr->memdev = device;
954 	cqr->block = NULL;
955 	cqr->retries = 256;
956 	cqr->expires = 10 * HZ;
957 
958 	/* Prepare for Read Subsystem Data */
959 	prssdp = (struct dasd_psf_prssd_data *) cqr->data;
960 	memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
961 	prssdp->order = PSF_ORDER_PRSSD;
962 	prssdp->suborder = 0x41;	/* Read Feature Codes */
963 	/* all other bytes of prssdp must be zero */
964 
965 	ccw = cqr->cpaddr;
966 	ccw->cmd_code = DASD_ECKD_CCW_PSF;
967 	ccw->count = sizeof(struct dasd_psf_prssd_data);
968 	ccw->flags |= CCW_FLAG_CC;
969 	ccw->cda = (__u32)(addr_t) prssdp;
970 
971 	/* Read Subsystem Data - feature codes */
972 	features = (struct dasd_rssd_features *) (prssdp + 1);
973 	memset(features, 0, sizeof(struct dasd_rssd_features));
974 
975 	ccw++;
976 	ccw->cmd_code = DASD_ECKD_CCW_RSSD;
977 	ccw->count = sizeof(struct dasd_rssd_features);
978 	ccw->cda = (__u32)(addr_t) features;
979 
980 	cqr->buildclk = get_clock();
981 	cqr->status = DASD_CQR_FILLED;
982 	rc = dasd_sleep_on(cqr);
983 	if (rc == 0) {
984 		prssdp = (struct dasd_psf_prssd_data *) cqr->data;
985 		features = (struct dasd_rssd_features *) (prssdp + 1);
986 		memcpy(&private->features, features,
987 		       sizeof(struct dasd_rssd_features));
988 	} else
989 		dev_warn(&device->cdev->dev, "Reading device feature codes"
990 			 " failed with rc=%d\n", rc);
991 	dasd_sfree_request(cqr, cqr->memdev);
992 	return rc;
993 }
994 
995 
996 /*
997  * Build CP for Perform Subsystem Function - SSC.
998  */
999 static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
1000 						    int enable_pav)
1001 {
1002 	struct dasd_ccw_req *cqr;
1003 	struct dasd_psf_ssc_data *psf_ssc_data;
1004 	struct ccw1 *ccw;
1005 
1006 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
1007 				  sizeof(struct dasd_psf_ssc_data),
1008 				  device);
1009 
1010 	if (IS_ERR(cqr)) {
1011 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1012 			   "Could not allocate PSF-SSC request");
1013 		return cqr;
1014 	}
1015 	psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data;
1016 	psf_ssc_data->order = PSF_ORDER_SSC;
1017 	psf_ssc_data->suborder = 0xc0;
1018 	if (enable_pav) {
1019 		psf_ssc_data->suborder |= 0x08;
1020 		psf_ssc_data->reserved[0] = 0x88;
1021 	}
1022 	ccw = cqr->cpaddr;
1023 	ccw->cmd_code = DASD_ECKD_CCW_PSF;
1024 	ccw->cda = (__u32)(addr_t)psf_ssc_data;
1025 	ccw->count = 66;
1026 
1027 	cqr->startdev = device;
1028 	cqr->memdev = device;
1029 	cqr->block = NULL;
1030 	cqr->retries = 256;
1031 	cqr->expires = 10*HZ;
1032 	cqr->buildclk = get_clock();
1033 	cqr->status = DASD_CQR_FILLED;
1034 	return cqr;
1035 }
1036 
1037 /*
1038  * Perform Subsystem Function.
1039  * It is necessary to trigger CIO for channel revalidation since this
1040  * call might change behaviour of DASD devices.
1041  */
1042 static int
1043 dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav)
1044 {
1045 	struct dasd_ccw_req *cqr;
1046 	int rc;
1047 
1048 	cqr = dasd_eckd_build_psf_ssc(device, enable_pav);
1049 	if (IS_ERR(cqr))
1050 		return PTR_ERR(cqr);
1051 
1052 	rc = dasd_sleep_on(cqr);
1053 	if (!rc)
1054 		/* trigger CIO to reprobe devices */
1055 		css_schedule_reprobe();
1056 	dasd_sfree_request(cqr, cqr->memdev);
1057 	return rc;
1058 }
1059 
1060 /*
1061  * Valide storage server of current device.
1062  */
1063 static void dasd_eckd_validate_server(struct dasd_device *device)
1064 {
1065 	int rc;
1066 	struct dasd_eckd_private *private;
1067 	int enable_pav;
1068 
1069 	if (dasd_nopav || MACHINE_IS_VM)
1070 		enable_pav = 0;
1071 	else
1072 		enable_pav = 1;
1073 	rc = dasd_eckd_psf_ssc(device, enable_pav);
1074 
1075 	/* may be requested feature is not available on server,
1076 	 * therefore just report error and go ahead */
1077 	private = (struct dasd_eckd_private *) device->private;
1078 	DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "PSF-SSC for SSID %04x "
1079 			"returned rc=%d", private->uid.ssid, rc);
1080 }
1081 
1082 /*
1083  * Check device characteristics.
1084  * If the device is accessible using ECKD discipline, the device is enabled.
1085  */
1086 static int
1087 dasd_eckd_check_characteristics(struct dasd_device *device)
1088 {
1089 	struct dasd_eckd_private *private;
1090 	struct dasd_block *block;
1091 	int is_known, rc;
1092 
1093 	if (!ccw_device_is_pathgroup(device->cdev)) {
1094 		dev_warn(&device->cdev->dev,
1095 			 "A channel path group could not be established\n");
1096 		return -EIO;
1097 	}
1098 	if (!ccw_device_is_multipath(device->cdev)) {
1099 		dev_info(&device->cdev->dev,
1100 			 "The DASD is not operating in multipath mode\n");
1101 	}
1102 	private = (struct dasd_eckd_private *) device->private;
1103 	if (!private) {
1104 		private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
1105 		if (!private) {
1106 			dev_warn(&device->cdev->dev,
1107 				 "Allocating memory for private DASD data "
1108 				 "failed\n");
1109 			return -ENOMEM;
1110 		}
1111 		device->private = (void *) private;
1112 	} else {
1113 		memset(private, 0, sizeof(*private));
1114 	}
1115 	/* Invalidate status of initial analysis. */
1116 	private->init_cqr_status = -1;
1117 	/* Set default cache operations. */
1118 	private->attrib.operation = DASD_NORMAL_CACHE;
1119 	private->attrib.nr_cyl = 0;
1120 
1121 	/* Read Configuration Data */
1122 	rc = dasd_eckd_read_conf(device);
1123 	if (rc)
1124 		goto out_err1;
1125 
1126 	/* Generate device unique id and register in devmap */
1127 	rc = dasd_eckd_generate_uid(device, &private->uid);
1128 	if (rc)
1129 		goto out_err1;
1130 	dasd_set_uid(device->cdev, &private->uid);
1131 
1132 	if (private->uid.type == UA_BASE_DEVICE) {
1133 		block = dasd_alloc_block();
1134 		if (IS_ERR(block)) {
1135 			DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1136 					"could not allocate dasd "
1137 					"block structure");
1138 			rc = PTR_ERR(block);
1139 			goto out_err1;
1140 		}
1141 		device->block = block;
1142 		block->base = device;
1143 	}
1144 
1145 	/* register lcu with alias handling, enable PAV if this is a new lcu */
1146 	is_known = dasd_alias_make_device_known_to_lcu(device);
1147 	if (is_known < 0) {
1148 		rc = is_known;
1149 		goto out_err2;
1150 	}
1151 	/*
1152 	 * dasd_eckd_vaildate_server is done on the first device that
1153 	 * is found for an LCU. All later other devices have to wait
1154 	 * for it, so they will read the correct feature codes.
1155 	 */
1156 	if (!is_known) {
1157 		dasd_eckd_validate_server(device);
1158 		dasd_alias_lcu_setup_complete(device);
1159 	} else
1160 		dasd_alias_wait_for_lcu_setup(device);
1161 
1162 	/* device may report different configuration data after LCU setup */
1163 	rc = dasd_eckd_read_conf(device);
1164 	if (rc)
1165 		goto out_err3;
1166 
1167 	/* Read Feature Codes */
1168 	dasd_eckd_read_features(device);
1169 
1170 	/* Read Device Characteristics */
1171 	rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
1172 					 &private->rdc_data, 64);
1173 	if (rc) {
1174 		DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1175 				"Read device characteristic failed, rc=%d", rc);
1176 		goto out_err3;
1177 	}
1178 	/* find the vaild cylinder size */
1179 	if (private->rdc_data.no_cyl == LV_COMPAT_CYL &&
1180 	    private->rdc_data.long_no_cyl)
1181 		private->real_cyl = private->rdc_data.long_no_cyl;
1182 	else
1183 		private->real_cyl = private->rdc_data.no_cyl;
1184 
1185 	dev_info(&device->cdev->dev, "New DASD %04X/%02X (CU %04X/%02X) "
1186 		 "with %d cylinders, %d heads, %d sectors\n",
1187 		 private->rdc_data.dev_type,
1188 		 private->rdc_data.dev_model,
1189 		 private->rdc_data.cu_type,
1190 		 private->rdc_data.cu_model.model,
1191 		 private->real_cyl,
1192 		 private->rdc_data.trk_per_cyl,
1193 		 private->rdc_data.sec_per_trk);
1194 	return 0;
1195 
1196 out_err3:
1197 	dasd_alias_disconnect_device_from_lcu(device);
1198 out_err2:
1199 	dasd_free_block(device->block);
1200 	device->block = NULL;
1201 out_err1:
1202 	kfree(private->conf_data);
1203 	kfree(device->private);
1204 	device->private = NULL;
1205 	return rc;
1206 }
1207 
1208 static void dasd_eckd_uncheck_device(struct dasd_device *device)
1209 {
1210 	struct dasd_eckd_private *private;
1211 
1212 	private = (struct dasd_eckd_private *) device->private;
1213 	dasd_alias_disconnect_device_from_lcu(device);
1214 	private->ned = NULL;
1215 	private->sneq = NULL;
1216 	private->vdsneq = NULL;
1217 	private->gneq = NULL;
1218 	private->conf_len = 0;
1219 	kfree(private->conf_data);
1220 	private->conf_data = NULL;
1221 }
1222 
1223 static struct dasd_ccw_req *
1224 dasd_eckd_analysis_ccw(struct dasd_device *device)
1225 {
1226 	struct dasd_eckd_private *private;
1227 	struct eckd_count *count_data;
1228 	struct LO_eckd_data *LO_data;
1229 	struct dasd_ccw_req *cqr;
1230 	struct ccw1 *ccw;
1231 	int cplength, datasize;
1232 	int i;
1233 
1234 	private = (struct dasd_eckd_private *) device->private;
1235 
1236 	cplength = 8;
1237 	datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data);
1238 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device);
1239 	if (IS_ERR(cqr))
1240 		return cqr;
1241 	ccw = cqr->cpaddr;
1242 	/* Define extent for the first 3 tracks. */
1243 	define_extent(ccw++, cqr->data, 0, 2,
1244 		      DASD_ECKD_CCW_READ_COUNT, device);
1245 	LO_data = cqr->data + sizeof(struct DE_eckd_data);
1246 	/* Locate record for the first 4 records on track 0. */
1247 	ccw[-1].flags |= CCW_FLAG_CC;
1248 	locate_record(ccw++, LO_data++, 0, 0, 4,
1249 		      DASD_ECKD_CCW_READ_COUNT, device, 0);
1250 
1251 	count_data = private->count_area;
1252 	for (i = 0; i < 4; i++) {
1253 		ccw[-1].flags |= CCW_FLAG_CC;
1254 		ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
1255 		ccw->flags = 0;
1256 		ccw->count = 8;
1257 		ccw->cda = (__u32)(addr_t) count_data;
1258 		ccw++;
1259 		count_data++;
1260 	}
1261 
1262 	/* Locate record for the first record on track 2. */
1263 	ccw[-1].flags |= CCW_FLAG_CC;
1264 	locate_record(ccw++, LO_data++, 2, 0, 1,
1265 		      DASD_ECKD_CCW_READ_COUNT, device, 0);
1266 	/* Read count ccw. */
1267 	ccw[-1].flags |= CCW_FLAG_CC;
1268 	ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
1269 	ccw->flags = 0;
1270 	ccw->count = 8;
1271 	ccw->cda = (__u32)(addr_t) count_data;
1272 
1273 	cqr->block = NULL;
1274 	cqr->startdev = device;
1275 	cqr->memdev = device;
1276 	cqr->retries = 255;
1277 	cqr->buildclk = get_clock();
1278 	cqr->status = DASD_CQR_FILLED;
1279 	return cqr;
1280 }
1281 
1282 /* differentiate between 'no record found' and any other error */
1283 static int dasd_eckd_analysis_evaluation(struct dasd_ccw_req *init_cqr)
1284 {
1285 	char *sense;
1286 	if (init_cqr->status == DASD_CQR_DONE)
1287 		return INIT_CQR_OK;
1288 	else if (init_cqr->status == DASD_CQR_NEED_ERP ||
1289 		 init_cqr->status == DASD_CQR_FAILED) {
1290 		sense = dasd_get_sense(&init_cqr->irb);
1291 		if (sense && (sense[1] & SNS1_NO_REC_FOUND))
1292 			return INIT_CQR_UNFORMATTED;
1293 		else
1294 			return INIT_CQR_ERROR;
1295 	} else
1296 		return INIT_CQR_ERROR;
1297 }
1298 
1299 /*
1300  * This is the callback function for the init_analysis cqr. It saves
1301  * the status of the initial analysis ccw before it frees it and kicks
1302  * the device to continue the startup sequence. This will call
1303  * dasd_eckd_do_analysis again (if the devices has not been marked
1304  * for deletion in the meantime).
1305  */
1306 static void dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr,
1307 					void *data)
1308 {
1309 	struct dasd_eckd_private *private;
1310 	struct dasd_device *device;
1311 
1312 	device = init_cqr->startdev;
1313 	private = (struct dasd_eckd_private *) device->private;
1314 	private->init_cqr_status = dasd_eckd_analysis_evaluation(init_cqr);
1315 	dasd_sfree_request(init_cqr, device);
1316 	dasd_kick_device(device);
1317 }
1318 
1319 static int dasd_eckd_start_analysis(struct dasd_block *block)
1320 {
1321 	struct dasd_eckd_private *private;
1322 	struct dasd_ccw_req *init_cqr;
1323 
1324 	private = (struct dasd_eckd_private *) block->base->private;
1325 	init_cqr = dasd_eckd_analysis_ccw(block->base);
1326 	if (IS_ERR(init_cqr))
1327 		return PTR_ERR(init_cqr);
1328 	init_cqr->callback = dasd_eckd_analysis_callback;
1329 	init_cqr->callback_data = NULL;
1330 	init_cqr->expires = 5*HZ;
1331 	/* first try without ERP, so we can later handle unformatted
1332 	 * devices as special case
1333 	 */
1334 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &init_cqr->flags);
1335 	init_cqr->retries = 0;
1336 	dasd_add_request_head(init_cqr);
1337 	return -EAGAIN;
1338 }
1339 
1340 static int dasd_eckd_end_analysis(struct dasd_block *block)
1341 {
1342 	struct dasd_device *device;
1343 	struct dasd_eckd_private *private;
1344 	struct eckd_count *count_area;
1345 	unsigned int sb, blk_per_trk;
1346 	int status, i;
1347 	struct dasd_ccw_req *init_cqr;
1348 
1349 	device = block->base;
1350 	private = (struct dasd_eckd_private *) device->private;
1351 	status = private->init_cqr_status;
1352 	private->init_cqr_status = -1;
1353 	if (status == INIT_CQR_ERROR) {
1354 		/* try again, this time with full ERP */
1355 		init_cqr = dasd_eckd_analysis_ccw(device);
1356 		dasd_sleep_on(init_cqr);
1357 		status = dasd_eckd_analysis_evaluation(init_cqr);
1358 		dasd_sfree_request(init_cqr, device);
1359 	}
1360 
1361 	if (status == INIT_CQR_UNFORMATTED) {
1362 		dev_warn(&device->cdev->dev, "The DASD is not formatted\n");
1363 		return -EMEDIUMTYPE;
1364 	} else if (status == INIT_CQR_ERROR) {
1365 		dev_err(&device->cdev->dev,
1366 			"Detecting the DASD disk layout failed because "
1367 			"of an I/O error\n");
1368 		return -EIO;
1369 	}
1370 
1371 	private->uses_cdl = 1;
1372 	/* Check Track 0 for Compatible Disk Layout */
1373 	count_area = NULL;
1374 	for (i = 0; i < 3; i++) {
1375 		if (private->count_area[i].kl != 4 ||
1376 		    private->count_area[i].dl != dasd_eckd_cdl_reclen(i) - 4) {
1377 			private->uses_cdl = 0;
1378 			break;
1379 		}
1380 	}
1381 	if (i == 3)
1382 		count_area = &private->count_area[4];
1383 
1384 	if (private->uses_cdl == 0) {
1385 		for (i = 0; i < 5; i++) {
1386 			if ((private->count_area[i].kl != 0) ||
1387 			    (private->count_area[i].dl !=
1388 			     private->count_area[0].dl))
1389 				break;
1390 		}
1391 		if (i == 5)
1392 			count_area = &private->count_area[0];
1393 	} else {
1394 		if (private->count_area[3].record == 1)
1395 			dev_warn(&device->cdev->dev,
1396 				 "Track 0 has no records following the VTOC\n");
1397 	}
1398 	if (count_area != NULL && count_area->kl == 0) {
1399 		/* we found notthing violating our disk layout */
1400 		if (dasd_check_blocksize(count_area->dl) == 0)
1401 			block->bp_block = count_area->dl;
1402 	}
1403 	if (block->bp_block == 0) {
1404 		dev_warn(&device->cdev->dev,
1405 			 "The disk layout of the DASD is not supported\n");
1406 		return -EMEDIUMTYPE;
1407 	}
1408 	block->s2b_shift = 0;	/* bits to shift 512 to get a block */
1409 	for (sb = 512; sb < block->bp_block; sb = sb << 1)
1410 		block->s2b_shift++;
1411 
1412 	blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block);
1413 	block->blocks = (private->real_cyl *
1414 			  private->rdc_data.trk_per_cyl *
1415 			  blk_per_trk);
1416 
1417 	dev_info(&device->cdev->dev,
1418 		 "DASD with %d KB/block, %d KB total size, %d KB/track, "
1419 		 "%s\n", (block->bp_block >> 10),
1420 		 ((private->real_cyl *
1421 		   private->rdc_data.trk_per_cyl *
1422 		   blk_per_trk * (block->bp_block >> 9)) >> 1),
1423 		 ((blk_per_trk * block->bp_block) >> 10),
1424 		 private->uses_cdl ?
1425 		 "compatible disk layout" : "linux disk layout");
1426 
1427 	return 0;
1428 }
1429 
1430 static int dasd_eckd_do_analysis(struct dasd_block *block)
1431 {
1432 	struct dasd_eckd_private *private;
1433 
1434 	private = (struct dasd_eckd_private *) block->base->private;
1435 	if (private->init_cqr_status < 0)
1436 		return dasd_eckd_start_analysis(block);
1437 	else
1438 		return dasd_eckd_end_analysis(block);
1439 }
1440 
1441 static int dasd_eckd_ready_to_online(struct dasd_device *device)
1442 {
1443 	return dasd_alias_add_device(device);
1444 };
1445 
1446 static int dasd_eckd_online_to_ready(struct dasd_device *device)
1447 {
1448 	return dasd_alias_remove_device(device);
1449 };
1450 
1451 static int
1452 dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo)
1453 {
1454 	struct dasd_eckd_private *private;
1455 
1456 	private = (struct dasd_eckd_private *) block->base->private;
1457 	if (dasd_check_blocksize(block->bp_block) == 0) {
1458 		geo->sectors = recs_per_track(&private->rdc_data,
1459 					      0, block->bp_block);
1460 	}
1461 	geo->cylinders = private->rdc_data.no_cyl;
1462 	geo->heads = private->rdc_data.trk_per_cyl;
1463 	return 0;
1464 }
1465 
1466 static struct dasd_ccw_req *
1467 dasd_eckd_format_device(struct dasd_device * device,
1468 			struct format_data_t * fdata)
1469 {
1470 	struct dasd_eckd_private *private;
1471 	struct dasd_ccw_req *fcp;
1472 	struct eckd_count *ect;
1473 	struct ccw1 *ccw;
1474 	void *data;
1475 	int rpt;
1476 	struct ch_t address;
1477 	int cplength, datasize;
1478 	int i;
1479 	int intensity = 0;
1480 	int r0_perm;
1481 
1482 	private = (struct dasd_eckd_private *) device->private;
1483 	rpt = recs_per_track(&private->rdc_data, 0, fdata->blksize);
1484 	set_ch_t(&address,
1485 		 fdata->start_unit / private->rdc_data.trk_per_cyl,
1486 		 fdata->start_unit % private->rdc_data.trk_per_cyl);
1487 
1488 	/* Sanity checks. */
1489 	if (fdata->start_unit >=
1490 	    (private->real_cyl * private->rdc_data.trk_per_cyl)) {
1491 		dev_warn(&device->cdev->dev, "Start track number %d used in "
1492 			 "formatting is too big\n", fdata->start_unit);
1493 		return ERR_PTR(-EINVAL);
1494 	}
1495 	if (fdata->start_unit > fdata->stop_unit) {
1496 		dev_warn(&device->cdev->dev, "Start track %d used in "
1497 			 "formatting exceeds end track\n", fdata->start_unit);
1498 		return ERR_PTR(-EINVAL);
1499 	}
1500 	if (dasd_check_blocksize(fdata->blksize) != 0) {
1501 		dev_warn(&device->cdev->dev,
1502 			 "The DASD cannot be formatted with block size %d\n",
1503 			 fdata->blksize);
1504 		return ERR_PTR(-EINVAL);
1505 	}
1506 
1507 	/*
1508 	 * fdata->intensity is a bit string that tells us what to do:
1509 	 *   Bit 0: write record zero
1510 	 *   Bit 1: write home address, currently not supported
1511 	 *   Bit 2: invalidate tracks
1512 	 *   Bit 3: use OS/390 compatible disk layout (cdl)
1513 	 *   Bit 4: do not allow storage subsystem to modify record zero
1514 	 * Only some bit combinations do make sense.
1515 	 */
1516 	if (fdata->intensity & 0x10) {
1517 		r0_perm = 0;
1518 		intensity = fdata->intensity & ~0x10;
1519 	} else {
1520 		r0_perm = 1;
1521 		intensity = fdata->intensity;
1522 	}
1523 	switch (intensity) {
1524 	case 0x00:	/* Normal format */
1525 	case 0x08:	/* Normal format, use cdl. */
1526 		cplength = 2 + rpt;
1527 		datasize = sizeof(struct DE_eckd_data) +
1528 			sizeof(struct LO_eckd_data) +
1529 			rpt * sizeof(struct eckd_count);
1530 		break;
1531 	case 0x01:	/* Write record zero and format track. */
1532 	case 0x09:	/* Write record zero and format track, use cdl. */
1533 		cplength = 3 + rpt;
1534 		datasize = sizeof(struct DE_eckd_data) +
1535 			sizeof(struct LO_eckd_data) +
1536 			sizeof(struct eckd_count) +
1537 			rpt * sizeof(struct eckd_count);
1538 		break;
1539 	case 0x04:	/* Invalidate track. */
1540 	case 0x0c:	/* Invalidate track, use cdl. */
1541 		cplength = 3;
1542 		datasize = sizeof(struct DE_eckd_data) +
1543 			sizeof(struct LO_eckd_data) +
1544 			sizeof(struct eckd_count);
1545 		break;
1546 	default:
1547 		dev_warn(&device->cdev->dev, "An I/O control call used "
1548 			 "incorrect flags 0x%x\n", fdata->intensity);
1549 		return ERR_PTR(-EINVAL);
1550 	}
1551 	/* Allocate the format ccw request. */
1552 	fcp = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device);
1553 	if (IS_ERR(fcp))
1554 		return fcp;
1555 
1556 	data = fcp->data;
1557 	ccw = fcp->cpaddr;
1558 
1559 	switch (intensity & ~0x08) {
1560 	case 0x00: /* Normal format. */
1561 		define_extent(ccw++, (struct DE_eckd_data *) data,
1562 			      fdata->start_unit, fdata->start_unit,
1563 			      DASD_ECKD_CCW_WRITE_CKD, device);
1564 		/* grant subsystem permission to format R0 */
1565 		if (r0_perm)
1566 			((struct DE_eckd_data *)data)->ga_extended |= 0x04;
1567 		data += sizeof(struct DE_eckd_data);
1568 		ccw[-1].flags |= CCW_FLAG_CC;
1569 		locate_record(ccw++, (struct LO_eckd_data *) data,
1570 			      fdata->start_unit, 0, rpt,
1571 			      DASD_ECKD_CCW_WRITE_CKD, device,
1572 			      fdata->blksize);
1573 		data += sizeof(struct LO_eckd_data);
1574 		break;
1575 	case 0x01: /* Write record zero + format track. */
1576 		define_extent(ccw++, (struct DE_eckd_data *) data,
1577 			      fdata->start_unit, fdata->start_unit,
1578 			      DASD_ECKD_CCW_WRITE_RECORD_ZERO,
1579 			      device);
1580 		data += sizeof(struct DE_eckd_data);
1581 		ccw[-1].flags |= CCW_FLAG_CC;
1582 		locate_record(ccw++, (struct LO_eckd_data *) data,
1583 			      fdata->start_unit, 0, rpt + 1,
1584 			      DASD_ECKD_CCW_WRITE_RECORD_ZERO, device,
1585 			      device->block->bp_block);
1586 		data += sizeof(struct LO_eckd_data);
1587 		break;
1588 	case 0x04: /* Invalidate track. */
1589 		define_extent(ccw++, (struct DE_eckd_data *) data,
1590 			      fdata->start_unit, fdata->start_unit,
1591 			      DASD_ECKD_CCW_WRITE_CKD, device);
1592 		data += sizeof(struct DE_eckd_data);
1593 		ccw[-1].flags |= CCW_FLAG_CC;
1594 		locate_record(ccw++, (struct LO_eckd_data *) data,
1595 			      fdata->start_unit, 0, 1,
1596 			      DASD_ECKD_CCW_WRITE_CKD, device, 8);
1597 		data += sizeof(struct LO_eckd_data);
1598 		break;
1599 	}
1600 	if (intensity & 0x01) {	/* write record zero */
1601 		ect = (struct eckd_count *) data;
1602 		data += sizeof(struct eckd_count);
1603 		ect->cyl = address.cyl;
1604 		ect->head = address.head;
1605 		ect->record = 0;
1606 		ect->kl = 0;
1607 		ect->dl = 8;
1608 		ccw[-1].flags |= CCW_FLAG_CC;
1609 		ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO;
1610 		ccw->flags = CCW_FLAG_SLI;
1611 		ccw->count = 8;
1612 		ccw->cda = (__u32)(addr_t) ect;
1613 		ccw++;
1614 	}
1615 	if ((intensity & ~0x08) & 0x04) {	/* erase track */
1616 		ect = (struct eckd_count *) data;
1617 		data += sizeof(struct eckd_count);
1618 		ect->cyl = address.cyl;
1619 		ect->head = address.head;
1620 		ect->record = 1;
1621 		ect->kl = 0;
1622 		ect->dl = 0;
1623 		ccw[-1].flags |= CCW_FLAG_CC;
1624 		ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
1625 		ccw->flags = CCW_FLAG_SLI;
1626 		ccw->count = 8;
1627 		ccw->cda = (__u32)(addr_t) ect;
1628 	} else {		/* write remaining records */
1629 		for (i = 0; i < rpt; i++) {
1630 			ect = (struct eckd_count *) data;
1631 			data += sizeof(struct eckd_count);
1632 			ect->cyl = address.cyl;
1633 			ect->head = address.head;
1634 			ect->record = i + 1;
1635 			ect->kl = 0;
1636 			ect->dl = fdata->blksize;
1637 			/* Check for special tracks 0-1 when formatting CDL */
1638 			if ((intensity & 0x08) &&
1639 			    fdata->start_unit == 0) {
1640 				if (i < 3) {
1641 					ect->kl = 4;
1642 					ect->dl = sizes_trk0[i] - 4;
1643 				}
1644 			}
1645 			if ((intensity & 0x08) &&
1646 			    fdata->start_unit == 1) {
1647 				ect->kl = 44;
1648 				ect->dl = LABEL_SIZE - 44;
1649 			}
1650 			ccw[-1].flags |= CCW_FLAG_CC;
1651 			ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
1652 			ccw->flags = CCW_FLAG_SLI;
1653 			ccw->count = 8;
1654 			ccw->cda = (__u32)(addr_t) ect;
1655 			ccw++;
1656 		}
1657 	}
1658 	fcp->startdev = device;
1659 	fcp->memdev = device;
1660 	fcp->retries = 256;
1661 	fcp->buildclk = get_clock();
1662 	fcp->status = DASD_CQR_FILLED;
1663 	return fcp;
1664 }
1665 
1666 static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr)
1667 {
1668 	cqr->status = DASD_CQR_FILLED;
1669 	if (cqr->block && (cqr->startdev != cqr->block->base)) {
1670 		dasd_eckd_reset_ccw_to_base_io(cqr);
1671 		cqr->startdev = cqr->block->base;
1672 	}
1673 };
1674 
1675 static dasd_erp_fn_t
1676 dasd_eckd_erp_action(struct dasd_ccw_req * cqr)
1677 {
1678 	struct dasd_device *device = (struct dasd_device *) cqr->startdev;
1679 	struct ccw_device *cdev = device->cdev;
1680 
1681 	switch (cdev->id.cu_type) {
1682 	case 0x3990:
1683 	case 0x2105:
1684 	case 0x2107:
1685 	case 0x1750:
1686 		return dasd_3990_erp_action;
1687 	case 0x9343:
1688 	case 0x3880:
1689 	default:
1690 		return dasd_default_erp_action;
1691 	}
1692 }
1693 
1694 static dasd_erp_fn_t
1695 dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr)
1696 {
1697 	return dasd_default_erp_postaction;
1698 }
1699 
1700 
1701 static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device,
1702 						   struct irb *irb)
1703 {
1704 	char mask;
1705 	char *sense = NULL;
1706 
1707 	/* first of all check for state change pending interrupt */
1708 	mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
1709 	if ((scsw_dstat(&irb->scsw) & mask) == mask) {
1710 		dasd_generic_handle_state_change(device);
1711 		return;
1712 	}
1713 
1714 	/* summary unit check */
1715 	if ((scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) &&
1716 	    (irb->ecw[7] == 0x0D)) {
1717 		dasd_alias_handle_summary_unit_check(device, irb);
1718 		return;
1719 	}
1720 
1721 	sense = dasd_get_sense(irb);
1722 	/* service information message SIM */
1723 	if (sense && !(sense[27] & DASD_SENSE_BIT_0) &&
1724 	    ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) {
1725 		dasd_3990_erp_handle_sim(device, sense);
1726 		dasd_schedule_device_bh(device);
1727 		return;
1728 	}
1729 
1730 	if ((scsw_cc(&irb->scsw) == 1) &&
1731 	    (scsw_fctl(&irb->scsw) & SCSW_FCTL_START_FUNC) &&
1732 	    (scsw_actl(&irb->scsw) & SCSW_ACTL_START_PEND) &&
1733 	    (scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND)) {
1734 		/* fake irb do nothing, they are handled elsewhere */
1735 		dasd_schedule_device_bh(device);
1736 		return;
1737 	}
1738 
1739 	if (!sense) {
1740 		/* just report other unsolicited interrupts */
1741 		DBF_DEV_EVENT(DBF_ERR, device, "%s",
1742 			    "unsolicited interrupt received");
1743 	} else {
1744 		DBF_DEV_EVENT(DBF_ERR, device, "%s",
1745 			    "unsolicited interrupt received "
1746 			    "(sense available)");
1747 		device->discipline->dump_sense_dbf(device, irb, "unsolicited");
1748 	}
1749 
1750 	dasd_schedule_device_bh(device);
1751 	return;
1752 };
1753 
1754 
1755 static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
1756 					       struct dasd_device *startdev,
1757 					       struct dasd_block *block,
1758 					       struct request *req,
1759 					       sector_t first_rec,
1760 					       sector_t last_rec,
1761 					       sector_t first_trk,
1762 					       sector_t last_trk,
1763 					       unsigned int first_offs,
1764 					       unsigned int last_offs,
1765 					       unsigned int blk_per_trk,
1766 					       unsigned int blksize)
1767 {
1768 	struct dasd_eckd_private *private;
1769 	unsigned long *idaws;
1770 	struct LO_eckd_data *LO_data;
1771 	struct dasd_ccw_req *cqr;
1772 	struct ccw1 *ccw;
1773 	struct req_iterator iter;
1774 	struct bio_vec *bv;
1775 	char *dst;
1776 	unsigned int off;
1777 	int count, cidaw, cplength, datasize;
1778 	sector_t recid;
1779 	unsigned char cmd, rcmd;
1780 	int use_prefix;
1781 	struct dasd_device *basedev;
1782 
1783 	basedev = block->base;
1784 	private = (struct dasd_eckd_private *) basedev->private;
1785 	if (rq_data_dir(req) == READ)
1786 		cmd = DASD_ECKD_CCW_READ_MT;
1787 	else if (rq_data_dir(req) == WRITE)
1788 		cmd = DASD_ECKD_CCW_WRITE_MT;
1789 	else
1790 		return ERR_PTR(-EINVAL);
1791 
1792 	/* Check struct bio and count the number of blocks for the request. */
1793 	count = 0;
1794 	cidaw = 0;
1795 	rq_for_each_segment(bv, req, iter) {
1796 		if (bv->bv_len & (blksize - 1))
1797 			/* Eckd can only do full blocks. */
1798 			return ERR_PTR(-EINVAL);
1799 		count += bv->bv_len >> (block->s2b_shift + 9);
1800 #if defined(CONFIG_64BIT)
1801 		if (idal_is_needed (page_address(bv->bv_page), bv->bv_len))
1802 			cidaw += bv->bv_len >> (block->s2b_shift + 9);
1803 #endif
1804 	}
1805 	/* Paranoia. */
1806 	if (count != last_rec - first_rec + 1)
1807 		return ERR_PTR(-EINVAL);
1808 
1809 	/* use the prefix command if available */
1810 	use_prefix = private->features.feature[8] & 0x01;
1811 	if (use_prefix) {
1812 		/* 1x prefix + number of blocks */
1813 		cplength = 2 + count;
1814 		/* 1x prefix + cidaws*sizeof(long) */
1815 		datasize = sizeof(struct PFX_eckd_data) +
1816 			sizeof(struct LO_eckd_data) +
1817 			cidaw * sizeof(unsigned long);
1818 	} else {
1819 		/* 1x define extent + 1x locate record + number of blocks */
1820 		cplength = 2 + count;
1821 		/* 1x define extent + 1x locate record + cidaws*sizeof(long) */
1822 		datasize = sizeof(struct DE_eckd_data) +
1823 			sizeof(struct LO_eckd_data) +
1824 			cidaw * sizeof(unsigned long);
1825 	}
1826 	/* Find out the number of additional locate record ccws for cdl. */
1827 	if (private->uses_cdl && first_rec < 2*blk_per_trk) {
1828 		if (last_rec >= 2*blk_per_trk)
1829 			count = 2*blk_per_trk - first_rec;
1830 		cplength += count;
1831 		datasize += count*sizeof(struct LO_eckd_data);
1832 	}
1833 	/* Allocate the ccw request. */
1834 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
1835 				   startdev);
1836 	if (IS_ERR(cqr))
1837 		return cqr;
1838 	ccw = cqr->cpaddr;
1839 	/* First ccw is define extent or prefix. */
1840 	if (use_prefix) {
1841 		if (prefix(ccw++, cqr->data, first_trk,
1842 			   last_trk, cmd, basedev, startdev) == -EAGAIN) {
1843 			/* Clock not in sync and XRC is enabled.
1844 			 * Try again later.
1845 			 */
1846 			dasd_sfree_request(cqr, startdev);
1847 			return ERR_PTR(-EAGAIN);
1848 		}
1849 		idaws = (unsigned long *) (cqr->data +
1850 					   sizeof(struct PFX_eckd_data));
1851 	} else {
1852 		if (define_extent(ccw++, cqr->data, first_trk,
1853 				  last_trk, cmd, startdev) == -EAGAIN) {
1854 			/* Clock not in sync and XRC is enabled.
1855 			 * Try again later.
1856 			 */
1857 			dasd_sfree_request(cqr, startdev);
1858 			return ERR_PTR(-EAGAIN);
1859 		}
1860 		idaws = (unsigned long *) (cqr->data +
1861 					   sizeof(struct DE_eckd_data));
1862 	}
1863 	/* Build locate_record+read/write/ccws. */
1864 	LO_data = (struct LO_eckd_data *) (idaws + cidaw);
1865 	recid = first_rec;
1866 	if (private->uses_cdl == 0 || recid > 2*blk_per_trk) {
1867 		/* Only standard blocks so there is just one locate record. */
1868 		ccw[-1].flags |= CCW_FLAG_CC;
1869 		locate_record(ccw++, LO_data++, first_trk, first_offs + 1,
1870 			      last_rec - recid + 1, cmd, basedev, blksize);
1871 	}
1872 	rq_for_each_segment(bv, req, iter) {
1873 		dst = page_address(bv->bv_page) + bv->bv_offset;
1874 		if (dasd_page_cache) {
1875 			char *copy = kmem_cache_alloc(dasd_page_cache,
1876 						      GFP_DMA | __GFP_NOWARN);
1877 			if (copy && rq_data_dir(req) == WRITE)
1878 				memcpy(copy + bv->bv_offset, dst, bv->bv_len);
1879 			if (copy)
1880 				dst = copy + bv->bv_offset;
1881 		}
1882 		for (off = 0; off < bv->bv_len; off += blksize) {
1883 			sector_t trkid = recid;
1884 			unsigned int recoffs = sector_div(trkid, blk_per_trk);
1885 			rcmd = cmd;
1886 			count = blksize;
1887 			/* Locate record for cdl special block ? */
1888 			if (private->uses_cdl && recid < 2*blk_per_trk) {
1889 				if (dasd_eckd_cdl_special(blk_per_trk, recid)){
1890 					rcmd |= 0x8;
1891 					count = dasd_eckd_cdl_reclen(recid);
1892 					if (count < blksize &&
1893 					    rq_data_dir(req) == READ)
1894 						memset(dst + count, 0xe5,
1895 						       blksize - count);
1896 				}
1897 				ccw[-1].flags |= CCW_FLAG_CC;
1898 				locate_record(ccw++, LO_data++,
1899 					      trkid, recoffs + 1,
1900 					      1, rcmd, basedev, count);
1901 			}
1902 			/* Locate record for standard blocks ? */
1903 			if (private->uses_cdl && recid == 2*blk_per_trk) {
1904 				ccw[-1].flags |= CCW_FLAG_CC;
1905 				locate_record(ccw++, LO_data++,
1906 					      trkid, recoffs + 1,
1907 					      last_rec - recid + 1,
1908 					      cmd, basedev, count);
1909 			}
1910 			/* Read/write ccw. */
1911 			ccw[-1].flags |= CCW_FLAG_CC;
1912 			ccw->cmd_code = rcmd;
1913 			ccw->count = count;
1914 			if (idal_is_needed(dst, blksize)) {
1915 				ccw->cda = (__u32)(addr_t) idaws;
1916 				ccw->flags = CCW_FLAG_IDA;
1917 				idaws = idal_create_words(idaws, dst, blksize);
1918 			} else {
1919 				ccw->cda = (__u32)(addr_t) dst;
1920 				ccw->flags = 0;
1921 			}
1922 			ccw++;
1923 			dst += blksize;
1924 			recid++;
1925 		}
1926 	}
1927 	if (blk_noretry_request(req) ||
1928 	    block->base->features & DASD_FEATURE_FAILFAST)
1929 		set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
1930 	cqr->startdev = startdev;
1931 	cqr->memdev = startdev;
1932 	cqr->block = block;
1933 	cqr->expires = 5 * 60 * HZ;	/* 5 minutes */
1934 	cqr->lpm = private->path_data.ppm;
1935 	cqr->retries = 256;
1936 	cqr->buildclk = get_clock();
1937 	cqr->status = DASD_CQR_FILLED;
1938 	return cqr;
1939 }
1940 
1941 static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
1942 					       struct dasd_device *startdev,
1943 					       struct dasd_block *block,
1944 					       struct request *req,
1945 					       sector_t first_rec,
1946 					       sector_t last_rec,
1947 					       sector_t first_trk,
1948 					       sector_t last_trk,
1949 					       unsigned int first_offs,
1950 					       unsigned int last_offs,
1951 					       unsigned int blk_per_trk,
1952 					       unsigned int blksize)
1953 {
1954 	struct dasd_eckd_private *private;
1955 	unsigned long *idaws;
1956 	struct dasd_ccw_req *cqr;
1957 	struct ccw1 *ccw;
1958 	struct req_iterator iter;
1959 	struct bio_vec *bv;
1960 	char *dst, *idaw_dst;
1961 	unsigned int cidaw, cplength, datasize;
1962 	unsigned int tlf;
1963 	sector_t recid;
1964 	unsigned char cmd;
1965 	struct dasd_device *basedev;
1966 	unsigned int trkcount, count, count_to_trk_end;
1967 	unsigned int idaw_len, seg_len, part_len, len_to_track_end;
1968 	unsigned char new_track, end_idaw;
1969 	sector_t trkid;
1970 	unsigned int recoffs;
1971 
1972 	basedev = block->base;
1973 	private = (struct dasd_eckd_private *) basedev->private;
1974 	if (rq_data_dir(req) == READ)
1975 		cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
1976 	else if (rq_data_dir(req) == WRITE)
1977 		cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
1978 	else
1979 		return ERR_PTR(-EINVAL);
1980 
1981 	/* Track based I/O needs IDAWs for each page, and not just for
1982 	 * 64 bit addresses. We need additional idals for pages
1983 	 * that get filled from two tracks, so we use the number
1984 	 * of records as upper limit.
1985 	 */
1986 	cidaw = last_rec - first_rec + 1;
1987 	trkcount = last_trk - first_trk + 1;
1988 
1989 	/* 1x prefix + one read/write ccw per track */
1990 	cplength = 1 + trkcount;
1991 
1992 	/* on 31-bit we need space for two 32 bit addresses per page
1993 	 * on 64-bit one 64 bit address
1994 	 */
1995 	datasize = sizeof(struct PFX_eckd_data) +
1996 		cidaw * sizeof(unsigned long long);
1997 
1998 	/* Allocate the ccw request. */
1999 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
2000 				   startdev);
2001 	if (IS_ERR(cqr))
2002 		return cqr;
2003 	ccw = cqr->cpaddr;
2004 	/* transfer length factor: how many bytes to read from the last track */
2005 	if (first_trk == last_trk)
2006 		tlf = last_offs - first_offs + 1;
2007 	else
2008 		tlf = last_offs + 1;
2009 	tlf *= blksize;
2010 
2011 	if (prefix_LRE(ccw++, cqr->data, first_trk,
2012 		       last_trk, cmd, basedev, startdev,
2013 		       1 /* format */, first_offs + 1,
2014 		       trkcount, blksize,
2015 		       tlf) == -EAGAIN) {
2016 		/* Clock not in sync and XRC is enabled.
2017 		 * Try again later.
2018 		 */
2019 		dasd_sfree_request(cqr, startdev);
2020 		return ERR_PTR(-EAGAIN);
2021 	}
2022 
2023 	/*
2024 	 * The translation of request into ccw programs must meet the
2025 	 * following conditions:
2026 	 * - all idaws but the first and the last must address full pages
2027 	 *   (or 2K blocks on 31-bit)
2028 	 * - the scope of a ccw and it's idal ends with the track boundaries
2029 	 */
2030 	idaws = (unsigned long *) (cqr->data + sizeof(struct PFX_eckd_data));
2031 	recid = first_rec;
2032 	new_track = 1;
2033 	end_idaw = 0;
2034 	len_to_track_end = 0;
2035 	idaw_dst = 0;
2036 	idaw_len = 0;
2037 	rq_for_each_segment(bv, req, iter) {
2038 		dst = page_address(bv->bv_page) + bv->bv_offset;
2039 		seg_len = bv->bv_len;
2040 		while (seg_len) {
2041 			if (new_track) {
2042 				trkid = recid;
2043 				recoffs = sector_div(trkid, blk_per_trk);
2044 				count_to_trk_end = blk_per_trk - recoffs;
2045 				count = min((last_rec - recid + 1),
2046 					    (sector_t)count_to_trk_end);
2047 				len_to_track_end = count * blksize;
2048 				ccw[-1].flags |= CCW_FLAG_CC;
2049 				ccw->cmd_code = cmd;
2050 				ccw->count = len_to_track_end;
2051 				ccw->cda = (__u32)(addr_t)idaws;
2052 				ccw->flags = CCW_FLAG_IDA;
2053 				ccw++;
2054 				recid += count;
2055 				new_track = 0;
2056 				/* first idaw for a ccw may start anywhere */
2057 				if (!idaw_dst)
2058 					idaw_dst = dst;
2059 			}
2060 			/* If we start a new idaw, we must make sure that it
2061 			 * starts on an IDA_BLOCK_SIZE boundary.
2062 			 * If we continue an idaw, we must make sure that the
2063 			 * current segment begins where the so far accumulated
2064 			 * idaw ends
2065 			 */
2066 			if (!idaw_dst) {
2067 				if (__pa(dst) & (IDA_BLOCK_SIZE-1)) {
2068 					dasd_sfree_request(cqr, startdev);
2069 					return ERR_PTR(-ERANGE);
2070 				} else
2071 					idaw_dst = dst;
2072 			}
2073 			if ((idaw_dst + idaw_len) != dst) {
2074 				dasd_sfree_request(cqr, startdev);
2075 				return ERR_PTR(-ERANGE);
2076 			}
2077 			part_len = min(seg_len, len_to_track_end);
2078 			seg_len -= part_len;
2079 			dst += part_len;
2080 			idaw_len += part_len;
2081 			len_to_track_end -= part_len;
2082 			/* collected memory area ends on an IDA_BLOCK border,
2083 			 * -> create an idaw
2084 			 * idal_create_words will handle cases where idaw_len
2085 			 * is larger then IDA_BLOCK_SIZE
2086 			 */
2087 			if (!(__pa(idaw_dst + idaw_len) & (IDA_BLOCK_SIZE-1)))
2088 				end_idaw = 1;
2089 			/* We also need to end the idaw at track end */
2090 			if (!len_to_track_end) {
2091 				new_track = 1;
2092 				end_idaw = 1;
2093 			}
2094 			if (end_idaw) {
2095 				idaws = idal_create_words(idaws, idaw_dst,
2096 							  idaw_len);
2097 				idaw_dst = 0;
2098 				idaw_len = 0;
2099 				end_idaw = 0;
2100 			}
2101 		}
2102 	}
2103 
2104 	if (blk_noretry_request(req) ||
2105 	    block->base->features & DASD_FEATURE_FAILFAST)
2106 		set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
2107 	cqr->startdev = startdev;
2108 	cqr->memdev = startdev;
2109 	cqr->block = block;
2110 	cqr->expires = 5 * 60 * HZ;	/* 5 minutes */
2111 	cqr->lpm = private->path_data.ppm;
2112 	cqr->retries = 256;
2113 	cqr->buildclk = get_clock();
2114 	cqr->status = DASD_CQR_FILLED;
2115 	return cqr;
2116 }
2117 
2118 static int prepare_itcw(struct itcw *itcw,
2119 			unsigned int trk, unsigned int totrk, int cmd,
2120 			struct dasd_device *basedev,
2121 			struct dasd_device *startdev,
2122 			unsigned int rec_on_trk, int count,
2123 			unsigned int blksize,
2124 			unsigned int total_data_size,
2125 			unsigned int tlf,
2126 			unsigned int blk_per_trk)
2127 {
2128 	struct PFX_eckd_data pfxdata;
2129 	struct dasd_eckd_private *basepriv, *startpriv;
2130 	struct DE_eckd_data *dedata;
2131 	struct LRE_eckd_data *lredata;
2132 	struct dcw *dcw;
2133 
2134 	u32 begcyl, endcyl;
2135 	u16 heads, beghead, endhead;
2136 	u8 pfx_cmd;
2137 
2138 	int rc = 0;
2139 	int sector = 0;
2140 	int dn, d;
2141 
2142 
2143 	/* setup prefix data */
2144 	basepriv = (struct dasd_eckd_private *) basedev->private;
2145 	startpriv = (struct dasd_eckd_private *) startdev->private;
2146 	dedata = &pfxdata.define_extent;
2147 	lredata = &pfxdata.locate_record;
2148 
2149 	memset(&pfxdata, 0, sizeof(pfxdata));
2150 	pfxdata.format = 1; /* PFX with LRE */
2151 	pfxdata.base_address = basepriv->ned->unit_addr;
2152 	pfxdata.base_lss = basepriv->ned->ID;
2153 	pfxdata.validity.define_extent = 1;
2154 
2155 	/* private uid is kept up to date, conf_data may be outdated */
2156 	if (startpriv->uid.type != UA_BASE_DEVICE) {
2157 		pfxdata.validity.verify_base = 1;
2158 		if (startpriv->uid.type == UA_HYPER_PAV_ALIAS)
2159 			pfxdata.validity.hyper_pav = 1;
2160 	}
2161 
2162 	switch (cmd) {
2163 	case DASD_ECKD_CCW_READ_TRACK_DATA:
2164 		dedata->mask.perm = 0x1;
2165 		dedata->attributes.operation = basepriv->attrib.operation;
2166 		dedata->blk_size = blksize;
2167 		dedata->ga_extended |= 0x42;
2168 		lredata->operation.orientation = 0x0;
2169 		lredata->operation.operation = 0x0C;
2170 		lredata->auxiliary.check_bytes = 0x01;
2171 		pfx_cmd = DASD_ECKD_CCW_PFX_READ;
2172 		break;
2173 	case DASD_ECKD_CCW_WRITE_TRACK_DATA:
2174 		dedata->mask.perm = 0x02;
2175 		dedata->attributes.operation = basepriv->attrib.operation;
2176 		dedata->blk_size = blksize;
2177 		rc = check_XRC_on_prefix(&pfxdata, basedev);
2178 		dedata->ga_extended |= 0x42;
2179 		lredata->operation.orientation = 0x0;
2180 		lredata->operation.operation = 0x3F;
2181 		lredata->extended_operation = 0x23;
2182 		lredata->auxiliary.check_bytes = 0x2;
2183 		pfx_cmd = DASD_ECKD_CCW_PFX;
2184 		break;
2185 	default:
2186 		DBF_DEV_EVENT(DBF_ERR, basedev,
2187 			      "prepare itcw, unknown opcode 0x%x", cmd);
2188 		BUG();
2189 		break;
2190 	}
2191 	if (rc)
2192 		return rc;
2193 
2194 	dedata->attributes.mode = 0x3;	/* ECKD */
2195 
2196 	heads = basepriv->rdc_data.trk_per_cyl;
2197 	begcyl = trk / heads;
2198 	beghead = trk % heads;
2199 	endcyl = totrk / heads;
2200 	endhead = totrk % heads;
2201 
2202 	/* check for sequential prestage - enhance cylinder range */
2203 	if (dedata->attributes.operation == DASD_SEQ_PRESTAGE ||
2204 	    dedata->attributes.operation == DASD_SEQ_ACCESS) {
2205 
2206 		if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl)
2207 			endcyl += basepriv->attrib.nr_cyl;
2208 		else
2209 			endcyl = (basepriv->real_cyl - 1);
2210 	}
2211 
2212 	set_ch_t(&dedata->beg_ext, begcyl, beghead);
2213 	set_ch_t(&dedata->end_ext, endcyl, endhead);
2214 
2215 	dedata->ep_format = 0x20; /* records per track is valid */
2216 	dedata->ep_rec_per_track = blk_per_trk;
2217 
2218 	if (rec_on_trk) {
2219 		switch (basepriv->rdc_data.dev_type) {
2220 		case 0x3390:
2221 			dn = ceil_quot(blksize + 6, 232);
2222 			d = 9 + ceil_quot(blksize + 6 * (dn + 1), 34);
2223 			sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
2224 			break;
2225 		case 0x3380:
2226 			d = 7 + ceil_quot(blksize + 12, 32);
2227 			sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
2228 			break;
2229 		}
2230 	}
2231 
2232 	lredata->auxiliary.length_valid = 1;
2233 	lredata->auxiliary.length_scope = 1;
2234 	lredata->auxiliary.imbedded_ccw_valid = 1;
2235 	lredata->length = tlf;
2236 	lredata->imbedded_ccw = cmd;
2237 	lredata->count = count;
2238 	lredata->sector = sector;
2239 	set_ch_t(&lredata->seek_addr, begcyl, beghead);
2240 	lredata->search_arg.cyl = lredata->seek_addr.cyl;
2241 	lredata->search_arg.head = lredata->seek_addr.head;
2242 	lredata->search_arg.record = rec_on_trk;
2243 
2244 	dcw = itcw_add_dcw(itcw, pfx_cmd, 0,
2245 		     &pfxdata, sizeof(pfxdata), total_data_size);
2246 
2247 	return rc;
2248 }
2249 
2250 static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
2251 					       struct dasd_device *startdev,
2252 					       struct dasd_block *block,
2253 					       struct request *req,
2254 					       sector_t first_rec,
2255 					       sector_t last_rec,
2256 					       sector_t first_trk,
2257 					       sector_t last_trk,
2258 					       unsigned int first_offs,
2259 					       unsigned int last_offs,
2260 					       unsigned int blk_per_trk,
2261 					       unsigned int blksize)
2262 {
2263 	struct dasd_eckd_private *private;
2264 	struct dasd_ccw_req *cqr;
2265 	struct req_iterator iter;
2266 	struct bio_vec *bv;
2267 	char *dst;
2268 	unsigned int trkcount, ctidaw;
2269 	unsigned char cmd;
2270 	struct dasd_device *basedev;
2271 	unsigned int tlf;
2272 	struct itcw *itcw;
2273 	struct tidaw *last_tidaw = NULL;
2274 	int itcw_op;
2275 	size_t itcw_size;
2276 
2277 	basedev = block->base;
2278 	private = (struct dasd_eckd_private *) basedev->private;
2279 	if (rq_data_dir(req) == READ) {
2280 		cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
2281 		itcw_op = ITCW_OP_READ;
2282 	} else if (rq_data_dir(req) == WRITE) {
2283 		cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
2284 		itcw_op = ITCW_OP_WRITE;
2285 	} else
2286 		return ERR_PTR(-EINVAL);
2287 
2288 	/* trackbased I/O needs address all memory via TIDAWs,
2289 	 * not just for 64 bit addresses. This allows us to map
2290 	 * each segment directly to one tidaw.
2291 	 */
2292 	trkcount = last_trk - first_trk + 1;
2293 	ctidaw = 0;
2294 	rq_for_each_segment(bv, req, iter) {
2295 		++ctidaw;
2296 	}
2297 
2298 	/* Allocate the ccw request. */
2299 	itcw_size = itcw_calc_size(0, ctidaw, 0);
2300 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev);
2301 	if (IS_ERR(cqr))
2302 		return cqr;
2303 
2304 	cqr->cpmode = 1;
2305 	cqr->startdev = startdev;
2306 	cqr->memdev = startdev;
2307 	cqr->block = block;
2308 	cqr->expires = 100*HZ;
2309 	cqr->buildclk = get_clock();
2310 	cqr->status = DASD_CQR_FILLED;
2311 	cqr->retries = 10;
2312 
2313 	/* transfer length factor: how many bytes to read from the last track */
2314 	if (first_trk == last_trk)
2315 		tlf = last_offs - first_offs + 1;
2316 	else
2317 		tlf = last_offs + 1;
2318 	tlf *= blksize;
2319 
2320 	itcw = itcw_init(cqr->data, itcw_size, itcw_op, 0, ctidaw, 0);
2321 	cqr->cpaddr = itcw_get_tcw(itcw);
2322 
2323 	if (prepare_itcw(itcw, first_trk, last_trk,
2324 			 cmd, basedev, startdev,
2325 			 first_offs + 1,
2326 			 trkcount, blksize,
2327 			 (last_rec - first_rec + 1) * blksize,
2328 			 tlf, blk_per_trk) == -EAGAIN) {
2329 		/* Clock not in sync and XRC is enabled.
2330 		 * Try again later.
2331 		 */
2332 		dasd_sfree_request(cqr, startdev);
2333 		return ERR_PTR(-EAGAIN);
2334 	}
2335 
2336 	/*
2337 	 * A tidaw can address 4k of memory, but must not cross page boundaries
2338 	 * We can let the block layer handle this by setting
2339 	 * blk_queue_segment_boundary to page boundaries and
2340 	 * blk_max_segment_size to page size when setting up the request queue.
2341 	 */
2342 	rq_for_each_segment(bv, req, iter) {
2343 		dst = page_address(bv->bv_page) + bv->bv_offset;
2344 		last_tidaw = itcw_add_tidaw(itcw, 0x00, dst, bv->bv_len);
2345 		if (IS_ERR(last_tidaw))
2346 			return (struct dasd_ccw_req *)last_tidaw;
2347 	}
2348 
2349 	last_tidaw->flags |= 0x80;
2350 	itcw_finalize(itcw);
2351 
2352 	if (blk_noretry_request(req) ||
2353 	    block->base->features & DASD_FEATURE_FAILFAST)
2354 		set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
2355 	cqr->startdev = startdev;
2356 	cqr->memdev = startdev;
2357 	cqr->block = block;
2358 	cqr->expires = 5 * 60 * HZ;	/* 5 minutes */
2359 	cqr->lpm = private->path_data.ppm;
2360 	cqr->retries = 256;
2361 	cqr->buildclk = get_clock();
2362 	cqr->status = DASD_CQR_FILLED;
2363 	return cqr;
2364 }
2365 
2366 static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
2367 					       struct dasd_block *block,
2368 					       struct request *req)
2369 {
2370 	int tpm, cmdrtd, cmdwtd;
2371 	int use_prefix;
2372 #if defined(CONFIG_64BIT)
2373 	int fcx_in_css, fcx_in_gneq, fcx_in_features;
2374 #endif
2375 	struct dasd_eckd_private *private;
2376 	struct dasd_device *basedev;
2377 	sector_t first_rec, last_rec;
2378 	sector_t first_trk, last_trk;
2379 	unsigned int first_offs, last_offs;
2380 	unsigned int blk_per_trk, blksize;
2381 	int cdlspecial;
2382 	struct dasd_ccw_req *cqr;
2383 
2384 	basedev = block->base;
2385 	private = (struct dasd_eckd_private *) basedev->private;
2386 
2387 	/* Calculate number of blocks/records per track. */
2388 	blksize = block->bp_block;
2389 	blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
2390 	if (blk_per_trk == 0)
2391 		return ERR_PTR(-EINVAL);
2392 	/* Calculate record id of first and last block. */
2393 	first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift;
2394 	first_offs = sector_div(first_trk, blk_per_trk);
2395 	last_rec = last_trk =
2396 		(blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
2397 	last_offs = sector_div(last_trk, blk_per_trk);
2398 	cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk);
2399 
2400 	/* is transport mode supported? */
2401 #if defined(CONFIG_64BIT)
2402 	fcx_in_css = css_general_characteristics.fcx;
2403 	fcx_in_gneq = private->gneq->reserved2[7] & 0x04;
2404 	fcx_in_features = private->features.feature[40] & 0x80;
2405 	tpm = fcx_in_css && fcx_in_gneq && fcx_in_features;
2406 #else
2407 	tpm = 0;
2408 #endif
2409 
2410 	/* is read track data and write track data in command mode supported? */
2411 	cmdrtd = private->features.feature[9] & 0x20;
2412 	cmdwtd = private->features.feature[12] & 0x40;
2413 	use_prefix = private->features.feature[8] & 0x01;
2414 
2415 	cqr = NULL;
2416 	if (cdlspecial || dasd_page_cache) {
2417 		/* do nothing, just fall through to the cmd mode single case */
2418 	} else if (!dasd_nofcx && tpm && (first_trk == last_trk)) {
2419 		cqr = dasd_eckd_build_cp_tpm_track(startdev, block, req,
2420 						    first_rec, last_rec,
2421 						    first_trk, last_trk,
2422 						    first_offs, last_offs,
2423 						    blk_per_trk, blksize);
2424 		if (IS_ERR(cqr) && PTR_ERR(cqr) != -EAGAIN)
2425 			cqr = NULL;
2426 	} else if (use_prefix &&
2427 		   (((rq_data_dir(req) == READ) && cmdrtd) ||
2428 		    ((rq_data_dir(req) == WRITE) && cmdwtd))) {
2429 		cqr = dasd_eckd_build_cp_cmd_track(startdev, block, req,
2430 						   first_rec, last_rec,
2431 						   first_trk, last_trk,
2432 						   first_offs, last_offs,
2433 						   blk_per_trk, blksize);
2434 		if (IS_ERR(cqr) && PTR_ERR(cqr) != -EAGAIN)
2435 			cqr = NULL;
2436 	}
2437 	if (!cqr)
2438 		cqr = dasd_eckd_build_cp_cmd_single(startdev, block, req,
2439 						    first_rec, last_rec,
2440 						    first_trk, last_trk,
2441 						    first_offs, last_offs,
2442 						    blk_per_trk, blksize);
2443 	return cqr;
2444 }
2445 
2446 static int
2447 dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
2448 {
2449 	struct dasd_eckd_private *private;
2450 	struct ccw1 *ccw;
2451 	struct req_iterator iter;
2452 	struct bio_vec *bv;
2453 	char *dst, *cda;
2454 	unsigned int blksize, blk_per_trk, off;
2455 	sector_t recid;
2456 	int status;
2457 
2458 	if (!dasd_page_cache)
2459 		goto out;
2460 	private = (struct dasd_eckd_private *) cqr->block->base->private;
2461 	blksize = cqr->block->bp_block;
2462 	blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
2463 	recid = blk_rq_pos(req) >> cqr->block->s2b_shift;
2464 	ccw = cqr->cpaddr;
2465 	/* Skip over define extent & locate record. */
2466 	ccw++;
2467 	if (private->uses_cdl == 0 || recid > 2*blk_per_trk)
2468 		ccw++;
2469 	rq_for_each_segment(bv, req, iter) {
2470 		dst = page_address(bv->bv_page) + bv->bv_offset;
2471 		for (off = 0; off < bv->bv_len; off += blksize) {
2472 			/* Skip locate record. */
2473 			if (private->uses_cdl && recid <= 2*blk_per_trk)
2474 				ccw++;
2475 			if (dst) {
2476 				if (ccw->flags & CCW_FLAG_IDA)
2477 					cda = *((char **)((addr_t) ccw->cda));
2478 				else
2479 					cda = (char *)((addr_t) ccw->cda);
2480 				if (dst != cda) {
2481 					if (rq_data_dir(req) == READ)
2482 						memcpy(dst, cda, bv->bv_len);
2483 					kmem_cache_free(dasd_page_cache,
2484 					    (void *)((addr_t)cda & PAGE_MASK));
2485 				}
2486 				dst = NULL;
2487 			}
2488 			ccw++;
2489 			recid++;
2490 		}
2491 	}
2492 out:
2493 	status = cqr->status == DASD_CQR_DONE;
2494 	dasd_sfree_request(cqr, cqr->memdev);
2495 	return status;
2496 }
2497 
2498 /*
2499  * Modify ccw/tcw in cqr so it can be started on a base device.
2500  *
2501  * Note that this is not enough to restart the cqr!
2502  * Either reset cqr->startdev as well (summary unit check handling)
2503  * or restart via separate cqr (as in ERP handling).
2504  */
2505 void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *cqr)
2506 {
2507 	struct ccw1 *ccw;
2508 	struct PFX_eckd_data *pfxdata;
2509 	struct tcw *tcw;
2510 	struct tccb *tccb;
2511 	struct dcw *dcw;
2512 
2513 	if (cqr->cpmode == 1) {
2514 		tcw = cqr->cpaddr;
2515 		tccb = tcw_get_tccb(tcw);
2516 		dcw = (struct dcw *)&tccb->tca[0];
2517 		pfxdata = (struct PFX_eckd_data *)&dcw->cd[0];
2518 		pfxdata->validity.verify_base = 0;
2519 		pfxdata->validity.hyper_pav = 0;
2520 	} else {
2521 		ccw = cqr->cpaddr;
2522 		pfxdata = cqr->data;
2523 		if (ccw->cmd_code == DASD_ECKD_CCW_PFX) {
2524 			pfxdata->validity.verify_base = 0;
2525 			pfxdata->validity.hyper_pav = 0;
2526 		}
2527 	}
2528 }
2529 
2530 #define DASD_ECKD_CHANQ_MAX_SIZE 4
2531 
2532 static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base,
2533 						     struct dasd_block *block,
2534 						     struct request *req)
2535 {
2536 	struct dasd_eckd_private *private;
2537 	struct dasd_device *startdev;
2538 	unsigned long flags;
2539 	struct dasd_ccw_req *cqr;
2540 
2541 	startdev = dasd_alias_get_start_dev(base);
2542 	if (!startdev)
2543 		startdev = base;
2544 	private = (struct dasd_eckd_private *) startdev->private;
2545 	if (private->count >= DASD_ECKD_CHANQ_MAX_SIZE)
2546 		return ERR_PTR(-EBUSY);
2547 
2548 	spin_lock_irqsave(get_ccwdev_lock(startdev->cdev), flags);
2549 	private->count++;
2550 	cqr = dasd_eckd_build_cp(startdev, block, req);
2551 	if (IS_ERR(cqr))
2552 		private->count--;
2553 	spin_unlock_irqrestore(get_ccwdev_lock(startdev->cdev), flags);
2554 	return cqr;
2555 }
2556 
2557 static int dasd_eckd_free_alias_cp(struct dasd_ccw_req *cqr,
2558 				   struct request *req)
2559 {
2560 	struct dasd_eckd_private *private;
2561 	unsigned long flags;
2562 
2563 	spin_lock_irqsave(get_ccwdev_lock(cqr->memdev->cdev), flags);
2564 	private = (struct dasd_eckd_private *) cqr->memdev->private;
2565 	private->count--;
2566 	spin_unlock_irqrestore(get_ccwdev_lock(cqr->memdev->cdev), flags);
2567 	return dasd_eckd_free_cp(cqr, req);
2568 }
2569 
2570 static int
2571 dasd_eckd_fill_info(struct dasd_device * device,
2572 		    struct dasd_information2_t * info)
2573 {
2574 	struct dasd_eckd_private *private;
2575 
2576 	private = (struct dasd_eckd_private *) device->private;
2577 	info->label_block = 2;
2578 	info->FBA_layout = private->uses_cdl ? 0 : 1;
2579 	info->format = private->uses_cdl ? DASD_FORMAT_CDL : DASD_FORMAT_LDL;
2580 	info->characteristics_size = sizeof(struct dasd_eckd_characteristics);
2581 	memcpy(info->characteristics, &private->rdc_data,
2582 	       sizeof(struct dasd_eckd_characteristics));
2583 	info->confdata_size = min((unsigned long)private->conf_len,
2584 				  sizeof(info->configuration_data));
2585 	memcpy(info->configuration_data, private->conf_data,
2586 	       info->confdata_size);
2587 	return 0;
2588 }
2589 
2590 /*
2591  * SECTION: ioctl functions for eckd devices.
2592  */
2593 
2594 /*
2595  * Release device ioctl.
2596  * Buils a channel programm to releases a prior reserved
2597  * (see dasd_eckd_reserve) device.
2598  */
2599 static int
2600 dasd_eckd_release(struct dasd_device *device)
2601 {
2602 	struct dasd_ccw_req *cqr;
2603 	int rc;
2604 	struct ccw1 *ccw;
2605 
2606 	if (!capable(CAP_SYS_ADMIN))
2607 		return -EACCES;
2608 
2609 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
2610 	if (IS_ERR(cqr)) {
2611 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
2612 			    "Could not allocate initialization request");
2613 		return PTR_ERR(cqr);
2614 	}
2615 	ccw = cqr->cpaddr;
2616 	ccw->cmd_code = DASD_ECKD_CCW_RELEASE;
2617 	ccw->flags |= CCW_FLAG_SLI;
2618 	ccw->count = 32;
2619 	ccw->cda = (__u32)(addr_t) cqr->data;
2620 	cqr->startdev = device;
2621 	cqr->memdev = device;
2622 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
2623 	set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
2624 	cqr->retries = 2;	/* set retry counter to enable basic ERP */
2625 	cqr->expires = 2 * HZ;
2626 	cqr->buildclk = get_clock();
2627 	cqr->status = DASD_CQR_FILLED;
2628 
2629 	rc = dasd_sleep_on_immediatly(cqr);
2630 
2631 	dasd_sfree_request(cqr, cqr->memdev);
2632 	return rc;
2633 }
2634 
2635 /*
2636  * Reserve device ioctl.
2637  * Options are set to 'synchronous wait for interrupt' and
2638  * 'timeout the request'. This leads to a terminate IO if
2639  * the interrupt is outstanding for a certain time.
2640  */
2641 static int
2642 dasd_eckd_reserve(struct dasd_device *device)
2643 {
2644 	struct dasd_ccw_req *cqr;
2645 	int rc;
2646 	struct ccw1 *ccw;
2647 
2648 	if (!capable(CAP_SYS_ADMIN))
2649 		return -EACCES;
2650 
2651 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
2652 	if (IS_ERR(cqr)) {
2653 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
2654 			    "Could not allocate initialization request");
2655 		return PTR_ERR(cqr);
2656 	}
2657 	ccw = cqr->cpaddr;
2658 	ccw->cmd_code = DASD_ECKD_CCW_RESERVE;
2659 	ccw->flags |= CCW_FLAG_SLI;
2660 	ccw->count = 32;
2661 	ccw->cda = (__u32)(addr_t) cqr->data;
2662 	cqr->startdev = device;
2663 	cqr->memdev = device;
2664 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
2665 	set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
2666 	cqr->retries = 2;	/* set retry counter to enable basic ERP */
2667 	cqr->expires = 2 * HZ;
2668 	cqr->buildclk = get_clock();
2669 	cqr->status = DASD_CQR_FILLED;
2670 
2671 	rc = dasd_sleep_on_immediatly(cqr);
2672 
2673 	dasd_sfree_request(cqr, cqr->memdev);
2674 	return rc;
2675 }
2676 
2677 /*
2678  * Steal lock ioctl - unconditional reserve device.
2679  * Buils a channel programm to break a device's reservation.
2680  * (unconditional reserve)
2681  */
2682 static int
2683 dasd_eckd_steal_lock(struct dasd_device *device)
2684 {
2685 	struct dasd_ccw_req *cqr;
2686 	int rc;
2687 	struct ccw1 *ccw;
2688 
2689 	if (!capable(CAP_SYS_ADMIN))
2690 		return -EACCES;
2691 
2692 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
2693 	if (IS_ERR(cqr)) {
2694 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
2695 			    "Could not allocate initialization request");
2696 		return PTR_ERR(cqr);
2697 	}
2698 	ccw = cqr->cpaddr;
2699 	ccw->cmd_code = DASD_ECKD_CCW_SLCK;
2700 	ccw->flags |= CCW_FLAG_SLI;
2701 	ccw->count = 32;
2702 	ccw->cda = (__u32)(addr_t) cqr->data;
2703 	cqr->startdev = device;
2704 	cqr->memdev = device;
2705 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
2706 	set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
2707 	cqr->retries = 2;	/* set retry counter to enable basic ERP */
2708 	cqr->expires = 2 * HZ;
2709 	cqr->buildclk = get_clock();
2710 	cqr->status = DASD_CQR_FILLED;
2711 
2712 	rc = dasd_sleep_on_immediatly(cqr);
2713 
2714 	dasd_sfree_request(cqr, cqr->memdev);
2715 	return rc;
2716 }
2717 
2718 /*
2719  * Read performance statistics
2720  */
2721 static int
2722 dasd_eckd_performance(struct dasd_device *device, void __user *argp)
2723 {
2724 	struct dasd_psf_prssd_data *prssdp;
2725 	struct dasd_rssd_perf_stats_t *stats;
2726 	struct dasd_ccw_req *cqr;
2727 	struct ccw1 *ccw;
2728 	int rc;
2729 
2730 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */  + 1 /* RSSD */,
2731 				   (sizeof(struct dasd_psf_prssd_data) +
2732 				    sizeof(struct dasd_rssd_perf_stats_t)),
2733 				   device);
2734 	if (IS_ERR(cqr)) {
2735 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
2736 			    "Could not allocate initialization request");
2737 		return PTR_ERR(cqr);
2738 	}
2739 	cqr->startdev = device;
2740 	cqr->memdev = device;
2741 	cqr->retries = 0;
2742 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
2743 	cqr->expires = 10 * HZ;
2744 
2745 	/* Prepare for Read Subsystem Data */
2746 	prssdp = (struct dasd_psf_prssd_data *) cqr->data;
2747 	memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
2748 	prssdp->order = PSF_ORDER_PRSSD;
2749 	prssdp->suborder = 0x01;	/* Performance Statistics */
2750 	prssdp->varies[1] = 0x01;	/* Perf Statistics for the Subsystem */
2751 
2752 	ccw = cqr->cpaddr;
2753 	ccw->cmd_code = DASD_ECKD_CCW_PSF;
2754 	ccw->count = sizeof(struct dasd_psf_prssd_data);
2755 	ccw->flags |= CCW_FLAG_CC;
2756 	ccw->cda = (__u32)(addr_t) prssdp;
2757 
2758 	/* Read Subsystem Data - Performance Statistics */
2759 	stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
2760 	memset(stats, 0, sizeof(struct dasd_rssd_perf_stats_t));
2761 
2762 	ccw++;
2763 	ccw->cmd_code = DASD_ECKD_CCW_RSSD;
2764 	ccw->count = sizeof(struct dasd_rssd_perf_stats_t);
2765 	ccw->cda = (__u32)(addr_t) stats;
2766 
2767 	cqr->buildclk = get_clock();
2768 	cqr->status = DASD_CQR_FILLED;
2769 	rc = dasd_sleep_on(cqr);
2770 	if (rc == 0) {
2771 		prssdp = (struct dasd_psf_prssd_data *) cqr->data;
2772 		stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
2773 		if (copy_to_user(argp, stats,
2774 				 sizeof(struct dasd_rssd_perf_stats_t)))
2775 			rc = -EFAULT;
2776 	}
2777 	dasd_sfree_request(cqr, cqr->memdev);
2778 	return rc;
2779 }
2780 
2781 /*
2782  * Get attributes (cache operations)
2783  * Returnes the cache attributes used in Define Extend (DE).
2784  */
2785 static int
2786 dasd_eckd_get_attrib(struct dasd_device *device, void __user *argp)
2787 {
2788 	struct dasd_eckd_private *private =
2789 		(struct dasd_eckd_private *)device->private;
2790 	struct attrib_data_t attrib = private->attrib;
2791 	int rc;
2792 
2793         if (!capable(CAP_SYS_ADMIN))
2794                 return -EACCES;
2795 	if (!argp)
2796                 return -EINVAL;
2797 
2798 	rc = 0;
2799 	if (copy_to_user(argp, (long *) &attrib,
2800 			 sizeof(struct attrib_data_t)))
2801 		rc = -EFAULT;
2802 
2803 	return rc;
2804 }
2805 
2806 /*
2807  * Set attributes (cache operations)
2808  * Stores the attributes for cache operation to be used in Define Extend (DE).
2809  */
2810 static int
2811 dasd_eckd_set_attrib(struct dasd_device *device, void __user *argp)
2812 {
2813 	struct dasd_eckd_private *private =
2814 		(struct dasd_eckd_private *)device->private;
2815 	struct attrib_data_t attrib;
2816 
2817 	if (!capable(CAP_SYS_ADMIN))
2818 		return -EACCES;
2819 	if (!argp)
2820 		return -EINVAL;
2821 
2822 	if (copy_from_user(&attrib, argp, sizeof(struct attrib_data_t)))
2823 		return -EFAULT;
2824 	private->attrib = attrib;
2825 
2826 	dev_info(&device->cdev->dev,
2827 		 "The DASD cache mode was set to %x (%i cylinder prestage)\n",
2828 		 private->attrib.operation, private->attrib.nr_cyl);
2829 	return 0;
2830 }
2831 
2832 /*
2833  * Issue syscall I/O to EMC Symmetrix array.
2834  * CCWs are PSF and RSSD
2835  */
2836 static int dasd_symm_io(struct dasd_device *device, void __user *argp)
2837 {
2838 	struct dasd_symmio_parms usrparm;
2839 	char *psf_data, *rssd_result;
2840 	struct dasd_ccw_req *cqr;
2841 	struct ccw1 *ccw;
2842 	int rc;
2843 
2844 	/* Copy parms from caller */
2845 	rc = -EFAULT;
2846 	if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
2847 		goto out;
2848 	if (is_compat_task() || sizeof(long) == 4) {
2849 		/* Make sure pointers are sane even on 31 bit. */
2850 		rc = -EINVAL;
2851 		if ((usrparm.psf_data >> 32) != 0)
2852 			goto out;
2853 		if ((usrparm.rssd_result >> 32) != 0)
2854 			goto out;
2855 		usrparm.psf_data &= 0x7fffffffULL;
2856 		usrparm.rssd_result &= 0x7fffffffULL;
2857 	}
2858 	/* alloc I/O data area */
2859 	psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA);
2860 	rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA);
2861 	if (!psf_data || !rssd_result) {
2862 		rc = -ENOMEM;
2863 		goto out_free;
2864 	}
2865 
2866 	/* get syscall header from user space */
2867 	rc = -EFAULT;
2868 	if (copy_from_user(psf_data,
2869 			   (void __user *)(unsigned long) usrparm.psf_data,
2870 			   usrparm.psf_data_len))
2871 		goto out_free;
2872 
2873 	/* sanity check on syscall header */
2874 	if (psf_data[0] != 0x17 && psf_data[1] != 0xce) {
2875 		rc = -EINVAL;
2876 		goto out_free;
2877 	}
2878 
2879 	/* setup CCWs for PSF + RSSD */
2880 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 , 0, device);
2881 	if (IS_ERR(cqr)) {
2882 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
2883 			"Could not allocate initialization request");
2884 		rc = PTR_ERR(cqr);
2885 		goto out_free;
2886 	}
2887 
2888 	cqr->startdev = device;
2889 	cqr->memdev = device;
2890 	cqr->retries = 3;
2891 	cqr->expires = 10 * HZ;
2892 	cqr->buildclk = get_clock();
2893 	cqr->status = DASD_CQR_FILLED;
2894 
2895 	/* Build the ccws */
2896 	ccw = cqr->cpaddr;
2897 
2898 	/* PSF ccw */
2899 	ccw->cmd_code = DASD_ECKD_CCW_PSF;
2900 	ccw->count = usrparm.psf_data_len;
2901 	ccw->flags |= CCW_FLAG_CC;
2902 	ccw->cda = (__u32)(addr_t) psf_data;
2903 
2904 	ccw++;
2905 
2906 	/* RSSD ccw  */
2907 	ccw->cmd_code = DASD_ECKD_CCW_RSSD;
2908 	ccw->count = usrparm.rssd_result_len;
2909 	ccw->flags = CCW_FLAG_SLI ;
2910 	ccw->cda = (__u32)(addr_t) rssd_result;
2911 
2912 	rc = dasd_sleep_on(cqr);
2913 	if (rc)
2914 		goto out_sfree;
2915 
2916 	rc = -EFAULT;
2917 	if (copy_to_user((void __user *)(unsigned long) usrparm.rssd_result,
2918 			   rssd_result, usrparm.rssd_result_len))
2919 		goto out_sfree;
2920 	rc = 0;
2921 
2922 out_sfree:
2923 	dasd_sfree_request(cqr, cqr->memdev);
2924 out_free:
2925 	kfree(rssd_result);
2926 	kfree(psf_data);
2927 out:
2928 	DBF_DEV_EVENT(DBF_WARNING, device, "Symmetrix ioctl: rc=%d", rc);
2929 	return rc;
2930 }
2931 
2932 static int
2933 dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp)
2934 {
2935 	struct dasd_device *device = block->base;
2936 
2937 	switch (cmd) {
2938 	case BIODASDGATTR:
2939 		return dasd_eckd_get_attrib(device, argp);
2940 	case BIODASDSATTR:
2941 		return dasd_eckd_set_attrib(device, argp);
2942 	case BIODASDPSRD:
2943 		return dasd_eckd_performance(device, argp);
2944 	case BIODASDRLSE:
2945 		return dasd_eckd_release(device);
2946 	case BIODASDRSRV:
2947 		return dasd_eckd_reserve(device);
2948 	case BIODASDSLCK:
2949 		return dasd_eckd_steal_lock(device);
2950 	case BIODASDSYMMIO:
2951 		return dasd_symm_io(device, argp);
2952 	default:
2953 		return -ENOIOCTLCMD;
2954 	}
2955 }
2956 
2957 /*
2958  * Dump the range of CCWs into 'page' buffer
2959  * and return number of printed chars.
2960  */
2961 static int
2962 dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
2963 {
2964 	int len, count;
2965 	char *datap;
2966 
2967 	len = 0;
2968 	while (from <= to) {
2969 		len += sprintf(page + len, KERN_ERR PRINTK_HEADER
2970 			       " CCW %p: %08X %08X DAT:",
2971 			       from, ((int *) from)[0], ((int *) from)[1]);
2972 
2973 		/* get pointer to data (consider IDALs) */
2974 		if (from->flags & CCW_FLAG_IDA)
2975 			datap = (char *) *((addr_t *) (addr_t) from->cda);
2976 		else
2977 			datap = (char *) ((addr_t) from->cda);
2978 
2979 		/* dump data (max 32 bytes) */
2980 		for (count = 0; count < from->count && count < 32; count++) {
2981 			if (count % 8 == 0) len += sprintf(page + len, " ");
2982 			if (count % 4 == 0) len += sprintf(page + len, " ");
2983 			len += sprintf(page + len, "%02x", datap[count]);
2984 		}
2985 		len += sprintf(page + len, "\n");
2986 		from++;
2987 	}
2988 	return len;
2989 }
2990 
2991 static void
2992 dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct irb *irb,
2993 			 char *reason)
2994 {
2995 	u64 *sense;
2996 
2997 	sense = (u64 *) dasd_get_sense(irb);
2998 	if (sense) {
2999 		DBF_DEV_EVENT(DBF_EMERG, device,
3000 			      "%s: %s %02x%02x%02x %016llx %016llx %016llx "
3001 			      "%016llx", reason,
3002 			      scsw_is_tm(&irb->scsw) ? "t" : "c",
3003 			      scsw_cc(&irb->scsw), scsw_cstat(&irb->scsw),
3004 			      scsw_dstat(&irb->scsw), sense[0], sense[1],
3005 			      sense[2], sense[3]);
3006 	} else {
3007 		DBF_DEV_EVENT(DBF_EMERG, device, "%s",
3008 			      "SORRY - NO VALID SENSE AVAILABLE\n");
3009 	}
3010 }
3011 
3012 /*
3013  * Print sense data and related channel program.
3014  * Parts are printed because printk buffer is only 1024 bytes.
3015  */
3016 static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
3017 				 struct dasd_ccw_req *req, struct irb *irb)
3018 {
3019 	char *page;
3020 	struct ccw1 *first, *last, *fail, *from, *to;
3021 	int len, sl, sct;
3022 
3023 	page = (char *) get_zeroed_page(GFP_ATOMIC);
3024 	if (page == NULL) {
3025 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
3026 			      "No memory to dump sense data\n");
3027 		return;
3028 	}
3029 	/* dump the sense data */
3030 	len = sprintf(page,  KERN_ERR PRINTK_HEADER
3031 		      " I/O status report for device %s:\n",
3032 		      dev_name(&device->cdev->dev));
3033 	len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3034 		       " in req: %p CS: 0x%02X DS: 0x%02X CC: 0x%02X RC: %d\n",
3035 		       req, scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw),
3036 		       scsw_cc(&irb->scsw), req ? req->intrc : 0);
3037 	len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3038 		       " device %s: Failing CCW: %p\n",
3039 		       dev_name(&device->cdev->dev),
3040 		       (void *) (addr_t) irb->scsw.cmd.cpa);
3041 	if (irb->esw.esw0.erw.cons) {
3042 		for (sl = 0; sl < 4; sl++) {
3043 			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3044 				       " Sense(hex) %2d-%2d:",
3045 				       (8 * sl), ((8 * sl) + 7));
3046 
3047 			for (sct = 0; sct < 8; sct++) {
3048 				len += sprintf(page + len, " %02x",
3049 					       irb->ecw[8 * sl + sct]);
3050 			}
3051 			len += sprintf(page + len, "\n");
3052 		}
3053 
3054 		if (irb->ecw[27] & DASD_SENSE_BIT_0) {
3055 			/* 24 Byte Sense Data */
3056 			sprintf(page + len, KERN_ERR PRINTK_HEADER
3057 				" 24 Byte: %x MSG %x, "
3058 				"%s MSGb to SYSOP\n",
3059 				irb->ecw[7] >> 4, irb->ecw[7] & 0x0f,
3060 				irb->ecw[1] & 0x10 ? "" : "no");
3061 		} else {
3062 			/* 32 Byte Sense Data */
3063 			sprintf(page + len, KERN_ERR PRINTK_HEADER
3064 				" 32 Byte: Format: %x "
3065 				"Exception class %x\n",
3066 				irb->ecw[6] & 0x0f, irb->ecw[22] >> 4);
3067 		}
3068 	} else {
3069 		sprintf(page + len, KERN_ERR PRINTK_HEADER
3070 			" SORRY - NO VALID SENSE AVAILABLE\n");
3071 	}
3072 	printk("%s", page);
3073 
3074 	if (req) {
3075 		/* req == NULL for unsolicited interrupts */
3076 		/* dump the Channel Program (max 140 Bytes per line) */
3077 		/* Count CCW and print first CCWs (maximum 1024 % 140 = 7) */
3078 		first = req->cpaddr;
3079 		for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
3080 		to = min(first + 6, last);
3081 		len = sprintf(page,  KERN_ERR PRINTK_HEADER
3082 			      " Related CP in req: %p\n", req);
3083 		dasd_eckd_dump_ccw_range(first, to, page + len);
3084 		printk("%s", page);
3085 
3086 		/* print failing CCW area (maximum 4) */
3087 		/* scsw->cda is either valid or zero  */
3088 		len = 0;
3089 		from = ++to;
3090 		fail = (struct ccw1 *)(addr_t)
3091 				irb->scsw.cmd.cpa; /* failing CCW */
3092 		if (from <  fail - 2) {
3093 			from = fail - 2;     /* there is a gap - print header */
3094 			len += sprintf(page, KERN_ERR PRINTK_HEADER "......\n");
3095 		}
3096 		to = min(fail + 1, last);
3097 		len += dasd_eckd_dump_ccw_range(from, to, page + len);
3098 
3099 		/* print last CCWs (maximum 2) */
3100 		from = max(from, ++to);
3101 		if (from < last - 1) {
3102 			from = last - 1;     /* there is a gap - print header */
3103 			len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n");
3104 		}
3105 		len += dasd_eckd_dump_ccw_range(from, last, page + len);
3106 		if (len > 0)
3107 			printk("%s", page);
3108 	}
3109 	free_page((unsigned long) page);
3110 }
3111 
3112 
3113 /*
3114  * Print sense data from a tcw.
3115  */
3116 static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
3117 				 struct dasd_ccw_req *req, struct irb *irb)
3118 {
3119 	char *page;
3120 	int len, sl, sct, residual;
3121 
3122 	struct tsb *tsb;
3123 	u8 *sense;
3124 
3125 
3126 	page = (char *) get_zeroed_page(GFP_ATOMIC);
3127 	if (page == NULL) {
3128 		DBF_DEV_EVENT(DBF_WARNING, device, " %s",
3129 			    "No memory to dump sense data");
3130 		return;
3131 	}
3132 	/* dump the sense data */
3133 	len = sprintf(page,  KERN_ERR PRINTK_HEADER
3134 		      " I/O status report for device %s:\n",
3135 		      dev_name(&device->cdev->dev));
3136 	len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3137 		       " in req: %p CS: 0x%02X DS: 0x%02X CC: 0x%02X RC: %d "
3138 		       "fcxs: 0x%02X schxs: 0x%02X\n", req,
3139 		       scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw),
3140 		       scsw_cc(&irb->scsw), req->intrc,
3141 		       irb->scsw.tm.fcxs, irb->scsw.tm.schxs);
3142 	len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3143 		       " device %s: Failing TCW: %p\n",
3144 		       dev_name(&device->cdev->dev),
3145 		       (void *) (addr_t) irb->scsw.tm.tcw);
3146 
3147 	tsb = NULL;
3148 	sense = NULL;
3149 	if (irb->scsw.tm.tcw)
3150 		tsb = tcw_get_tsb(
3151 			(struct tcw *)(unsigned long)irb->scsw.tm.tcw);
3152 
3153 	if (tsb && (irb->scsw.tm.fcxs == 0x01)) {
3154 		len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3155 			       " tsb->length %d\n", tsb->length);
3156 		len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3157 			       " tsb->flags %x\n", tsb->flags);
3158 		len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3159 			       " tsb->dcw_offset %d\n", tsb->dcw_offset);
3160 		len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3161 			       " tsb->count %d\n", tsb->count);
3162 		residual = tsb->count - 28;
3163 		len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3164 			       " residual %d\n", residual);
3165 
3166 		switch (tsb->flags & 0x07) {
3167 		case 1:	/* tsa_iostat */
3168 			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3169 			       " tsb->tsa.iostat.dev_time %d\n",
3170 				       tsb->tsa.iostat.dev_time);
3171 			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3172 			       " tsb->tsa.iostat.def_time %d\n",
3173 				       tsb->tsa.iostat.def_time);
3174 			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3175 			       " tsb->tsa.iostat.queue_time %d\n",
3176 				       tsb->tsa.iostat.queue_time);
3177 			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3178 			       " tsb->tsa.iostat.dev_busy_time %d\n",
3179 				       tsb->tsa.iostat.dev_busy_time);
3180 			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3181 			       " tsb->tsa.iostat.dev_act_time %d\n",
3182 				       tsb->tsa.iostat.dev_act_time);
3183 			sense = tsb->tsa.iostat.sense;
3184 			break;
3185 		case 2: /* ts_ddpc */
3186 			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3187 			       " tsb->tsa.ddpc.rc %d\n", tsb->tsa.ddpc.rc);
3188 			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3189 			       " tsb->tsa.ddpc.rcq:  ");
3190 			for (sl = 0; sl < 16; sl++) {
3191 				for (sct = 0; sct < 8; sct++) {
3192 					len += sprintf(page + len, " %02x",
3193 						       tsb->tsa.ddpc.rcq[sl]);
3194 				}
3195 				len += sprintf(page + len, "\n");
3196 			}
3197 			sense = tsb->tsa.ddpc.sense;
3198 			break;
3199 		case 3: /* tsa_intrg */
3200 			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3201 				      " tsb->tsa.intrg.: not supportet yet \n");
3202 			break;
3203 		}
3204 
3205 		if (sense) {
3206 			for (sl = 0; sl < 4; sl++) {
3207 				len += sprintf(page + len,
3208 					       KERN_ERR PRINTK_HEADER
3209 					       " Sense(hex) %2d-%2d:",
3210 					       (8 * sl), ((8 * sl) + 7));
3211 				for (sct = 0; sct < 8; sct++) {
3212 					len += sprintf(page + len, " %02x",
3213 						       sense[8 * sl + sct]);
3214 				}
3215 				len += sprintf(page + len, "\n");
3216 			}
3217 
3218 			if (sense[27] & DASD_SENSE_BIT_0) {
3219 				/* 24 Byte Sense Data */
3220 				sprintf(page + len, KERN_ERR PRINTK_HEADER
3221 					" 24 Byte: %x MSG %x, "
3222 					"%s MSGb to SYSOP\n",
3223 					sense[7] >> 4, sense[7] & 0x0f,
3224 					sense[1] & 0x10 ? "" : "no");
3225 			} else {
3226 				/* 32 Byte Sense Data */
3227 				sprintf(page + len, KERN_ERR PRINTK_HEADER
3228 					" 32 Byte: Format: %x "
3229 					"Exception class %x\n",
3230 					sense[6] & 0x0f, sense[22] >> 4);
3231 			}
3232 		} else {
3233 			sprintf(page + len, KERN_ERR PRINTK_HEADER
3234 				" SORRY - NO VALID SENSE AVAILABLE\n");
3235 		}
3236 	} else {
3237 		sprintf(page + len, KERN_ERR PRINTK_HEADER
3238 			" SORRY - NO TSB DATA AVAILABLE\n");
3239 	}
3240 	printk("%s", page);
3241 	free_page((unsigned long) page);
3242 }
3243 
3244 static void dasd_eckd_dump_sense(struct dasd_device *device,
3245 				 struct dasd_ccw_req *req, struct irb *irb)
3246 {
3247 	if (req && scsw_is_tm(&req->irb.scsw))
3248 		dasd_eckd_dump_sense_tcw(device, req, irb);
3249 	else
3250 		dasd_eckd_dump_sense_ccw(device, req, irb);
3251 }
3252 
3253 int dasd_eckd_pm_freeze(struct dasd_device *device)
3254 {
3255 	/*
3256 	 * the device should be disconnected from our LCU structure
3257 	 * on restore we will reconnect it and reread LCU specific
3258 	 * information like PAV support that might have changed
3259 	 */
3260 	dasd_alias_remove_device(device);
3261 	dasd_alias_disconnect_device_from_lcu(device);
3262 
3263 	return 0;
3264 }
3265 
3266 int dasd_eckd_restore_device(struct dasd_device *device)
3267 {
3268 	struct dasd_eckd_private *private;
3269 	struct dasd_eckd_characteristics temp_rdc_data;
3270 	int is_known, rc;
3271 	struct dasd_uid temp_uid;
3272 	unsigned long flags;
3273 
3274 	private = (struct dasd_eckd_private *) device->private;
3275 
3276 	/* Read Configuration Data */
3277 	rc = dasd_eckd_read_conf(device);
3278 	if (rc)
3279 		goto out_err;
3280 
3281 	/* Generate device unique id and register in devmap */
3282 	rc = dasd_eckd_generate_uid(device, &private->uid);
3283 	dasd_get_uid(device->cdev, &temp_uid);
3284 	if (memcmp(&private->uid, &temp_uid, sizeof(struct dasd_uid)) != 0)
3285 		dev_err(&device->cdev->dev, "The UID of the DASD has "
3286 			"changed\n");
3287 	if (rc)
3288 		goto out_err;
3289 	dasd_set_uid(device->cdev, &private->uid);
3290 
3291 	/* register lcu with alias handling, enable PAV if this is a new lcu */
3292 	is_known = dasd_alias_make_device_known_to_lcu(device);
3293 	if (is_known < 0)
3294 		return is_known;
3295 	if (!is_known) {
3296 		dasd_eckd_validate_server(device);
3297 		dasd_alias_lcu_setup_complete(device);
3298 	} else
3299 		dasd_alias_wait_for_lcu_setup(device);
3300 
3301 	/* RE-Read Configuration Data */
3302 	rc = dasd_eckd_read_conf(device);
3303 	if (rc)
3304 		goto out_err;
3305 
3306 	/* Read Feature Codes */
3307 	dasd_eckd_read_features(device);
3308 
3309 	/* Read Device Characteristics */
3310 	rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
3311 					 &temp_rdc_data, 64);
3312 	if (rc) {
3313 		DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
3314 				"Read device characteristic failed, rc=%d", rc);
3315 		goto out_err;
3316 	}
3317 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
3318 	memcpy(&private->rdc_data, &temp_rdc_data, sizeof(temp_rdc_data));
3319 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
3320 
3321 	/* add device to alias management */
3322 	dasd_alias_add_device(device);
3323 
3324 	return 0;
3325 
3326 out_err:
3327 	return -1;
3328 }
3329 
3330 static struct ccw_driver dasd_eckd_driver = {
3331 	.name	     = "dasd-eckd",
3332 	.owner	     = THIS_MODULE,
3333 	.ids	     = dasd_eckd_ids,
3334 	.probe	     = dasd_eckd_probe,
3335 	.remove      = dasd_generic_remove,
3336 	.set_offline = dasd_generic_set_offline,
3337 	.set_online  = dasd_eckd_set_online,
3338 	.notify      = dasd_generic_notify,
3339 	.freeze      = dasd_generic_pm_freeze,
3340 	.thaw	     = dasd_generic_restore_device,
3341 	.restore     = dasd_generic_restore_device,
3342 };
3343 
3344 /*
3345  * max_blocks is dependent on the amount of storage that is available
3346  * in the static io buffer for each device. Currently each device has
3347  * 8192 bytes (=2 pages). For 64 bit one dasd_mchunkt_t structure has
3348  * 24 bytes, the struct dasd_ccw_req has 136 bytes and each block can use
3349  * up to 16 bytes (8 for the ccw and 8 for the idal pointer). In
3350  * addition we have one define extent ccw + 16 bytes of data and one
3351  * locate record ccw + 16 bytes of data. That makes:
3352  * (8192 - 24 - 136 - 8 - 16 - 8 - 16) / 16 = 499 blocks at maximum.
3353  * We want to fit two into the available memory so that we can immediately
3354  * start the next request if one finishes off. That makes 249.5 blocks
3355  * for one request. Give a little safety and the result is 240.
3356  */
3357 static struct dasd_discipline dasd_eckd_discipline = {
3358 	.owner = THIS_MODULE,
3359 	.name = "ECKD",
3360 	.ebcname = "ECKD",
3361 	.max_blocks = 240,
3362 	.check_device = dasd_eckd_check_characteristics,
3363 	.uncheck_device = dasd_eckd_uncheck_device,
3364 	.do_analysis = dasd_eckd_do_analysis,
3365 	.ready_to_online = dasd_eckd_ready_to_online,
3366 	.online_to_ready = dasd_eckd_online_to_ready,
3367 	.fill_geometry = dasd_eckd_fill_geometry,
3368 	.start_IO = dasd_start_IO,
3369 	.term_IO = dasd_term_IO,
3370 	.handle_terminated_request = dasd_eckd_handle_terminated_request,
3371 	.format_device = dasd_eckd_format_device,
3372 	.erp_action = dasd_eckd_erp_action,
3373 	.erp_postaction = dasd_eckd_erp_postaction,
3374 	.handle_unsolicited_interrupt = dasd_eckd_handle_unsolicited_interrupt,
3375 	.build_cp = dasd_eckd_build_alias_cp,
3376 	.free_cp = dasd_eckd_free_alias_cp,
3377 	.dump_sense = dasd_eckd_dump_sense,
3378 	.dump_sense_dbf = dasd_eckd_dump_sense_dbf,
3379 	.fill_info = dasd_eckd_fill_info,
3380 	.ioctl = dasd_eckd_ioctl,
3381 	.freeze = dasd_eckd_pm_freeze,
3382 	.restore = dasd_eckd_restore_device,
3383 };
3384 
3385 static int __init
3386 dasd_eckd_init(void)
3387 {
3388 	int ret;
3389 
3390 	ASCEBC(dasd_eckd_discipline.ebcname, 4);
3391 	ret = ccw_driver_register(&dasd_eckd_driver);
3392 	if (!ret)
3393 		wait_for_device_probe();
3394 
3395 	return ret;
3396 }
3397 
3398 static void __exit
3399 dasd_eckd_cleanup(void)
3400 {
3401 	ccw_driver_unregister(&dasd_eckd_driver);
3402 }
3403 
3404 module_init(dasd_eckd_init);
3405 module_exit(dasd_eckd_cleanup);
3406