xref: /openbmc/linux/drivers/s390/block/dasd_eckd.c (revision 7dd65feb)
1 /*
2  * File...........: linux/drivers/s390/block/dasd_eckd.c
3  * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4  *		    Horst Hummel <Horst.Hummel@de.ibm.com>
5  *		    Carsten Otte <Cotte@de.ibm.com>
6  *		    Martin Schwidefsky <schwidefsky@de.ibm.com>
7  * Bugreports.to..: <Linux390@de.ibm.com>
8  * Copyright IBM Corp. 1999, 2009
9  * EMC Symmetrix ioctl Copyright EMC Corporation, 2008
10  * Author.........: Nigel Hislop <hislop_nigel@emc.com>
11  */
12 
13 #define KMSG_COMPONENT "dasd-eckd"
14 
15 #include <linux/stddef.h>
16 #include <linux/kernel.h>
17 #include <linux/slab.h>
18 #include <linux/hdreg.h>	/* HDIO_GETGEO			    */
19 #include <linux/bio.h>
20 #include <linux/module.h>
21 #include <linux/init.h>
22 
23 #include <asm/debug.h>
24 #include <asm/idals.h>
25 #include <asm/ebcdic.h>
26 #include <asm/io.h>
27 #include <asm/uaccess.h>
28 #include <asm/cio.h>
29 #include <asm/ccwdev.h>
30 #include <asm/itcw.h>
31 
32 #include "dasd_int.h"
33 #include "dasd_eckd.h"
34 #include "../cio/chsc.h"
35 
36 
37 #ifdef PRINTK_HEADER
38 #undef PRINTK_HEADER
39 #endif				/* PRINTK_HEADER */
40 #define PRINTK_HEADER "dasd(eckd):"
41 
42 #define ECKD_C0(i) (i->home_bytes)
43 #define ECKD_F(i) (i->formula)
44 #define ECKD_F1(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f1):\
45 		    (i->factors.f_0x02.f1))
46 #define ECKD_F2(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f2):\
47 		    (i->factors.f_0x02.f2))
48 #define ECKD_F3(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f3):\
49 		    (i->factors.f_0x02.f3))
50 #define ECKD_F4(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f4):0)
51 #define ECKD_F5(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f5):0)
52 #define ECKD_F6(i) (i->factor6)
53 #define ECKD_F7(i) (i->factor7)
54 #define ECKD_F8(i) (i->factor8)
55 
56 MODULE_LICENSE("GPL");
57 
58 static struct dasd_discipline dasd_eckd_discipline;
59 
60 /* The ccw bus type uses this table to find devices that it sends to
61  * dasd_eckd_probe */
62 static struct ccw_device_id dasd_eckd_ids[] = {
63 	{ CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1},
64 	{ CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2},
65 	{ CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3390, 0), .driver_info = 0x3},
66 	{ CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4},
67 	{ CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5},
68 	{ CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6},
69 	{ CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3390, 0), .driver_info = 0x7},
70 	{ CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3380, 0), .driver_info = 0x8},
71 	{ CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3390, 0), .driver_info = 0x9},
72 	{ CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3380, 0), .driver_info = 0xa},
73 	{ /* end of list */ },
74 };
75 
76 MODULE_DEVICE_TABLE(ccw, dasd_eckd_ids);
77 
78 static struct ccw_driver dasd_eckd_driver; /* see below */
79 
80 #define INIT_CQR_OK 0
81 #define INIT_CQR_UNFORMATTED 1
82 #define INIT_CQR_ERROR 2
83 
84 
85 /* initial attempt at a probe function. this can be simplified once
86  * the other detection code is gone */
87 static int
88 dasd_eckd_probe (struct ccw_device *cdev)
89 {
90 	int ret;
91 
92 	/* set ECKD specific ccw-device options */
93 	ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE |
94 				     CCWDEV_DO_PATHGROUP | CCWDEV_DO_MULTIPATH);
95 	if (ret) {
96 		DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
97 				"dasd_eckd_probe: could not set "
98 				"ccw-device options");
99 		return ret;
100 	}
101 	ret = dasd_generic_probe(cdev, &dasd_eckd_discipline);
102 	return ret;
103 }
104 
105 static int
106 dasd_eckd_set_online(struct ccw_device *cdev)
107 {
108 	return dasd_generic_set_online(cdev, &dasd_eckd_discipline);
109 }
110 
111 static const int sizes_trk0[] = { 28, 148, 84 };
112 #define LABEL_SIZE 140
113 
114 static inline unsigned int
115 round_up_multiple(unsigned int no, unsigned int mult)
116 {
117 	int rem = no % mult;
118 	return (rem ? no - rem + mult : no);
119 }
120 
121 static inline unsigned int
122 ceil_quot(unsigned int d1, unsigned int d2)
123 {
124 	return (d1 + (d2 - 1)) / d2;
125 }
126 
127 static unsigned int
128 recs_per_track(struct dasd_eckd_characteristics * rdc,
129 	       unsigned int kl, unsigned int dl)
130 {
131 	int dn, kn;
132 
133 	switch (rdc->dev_type) {
134 	case 0x3380:
135 		if (kl)
136 			return 1499 / (15 + 7 + ceil_quot(kl + 12, 32) +
137 				       ceil_quot(dl + 12, 32));
138 		else
139 			return 1499 / (15 + ceil_quot(dl + 12, 32));
140 	case 0x3390:
141 		dn = ceil_quot(dl + 6, 232) + 1;
142 		if (kl) {
143 			kn = ceil_quot(kl + 6, 232) + 1;
144 			return 1729 / (10 + 9 + ceil_quot(kl + 6 * kn, 34) +
145 				       9 + ceil_quot(dl + 6 * dn, 34));
146 		} else
147 			return 1729 / (10 + 9 + ceil_quot(dl + 6 * dn, 34));
148 	case 0x9345:
149 		dn = ceil_quot(dl + 6, 232) + 1;
150 		if (kl) {
151 			kn = ceil_quot(kl + 6, 232) + 1;
152 			return 1420 / (18 + 7 + ceil_quot(kl + 6 * kn, 34) +
153 				       ceil_quot(dl + 6 * dn, 34));
154 		} else
155 			return 1420 / (18 + 7 + ceil_quot(dl + 6 * dn, 34));
156 	}
157 	return 0;
158 }
159 
160 static void set_ch_t(struct ch_t *geo, __u32 cyl, __u8 head)
161 {
162 	geo->cyl = (__u16) cyl;
163 	geo->head = cyl >> 16;
164 	geo->head <<= 4;
165 	geo->head |= head;
166 }
167 
168 static int
169 check_XRC (struct ccw1         *de_ccw,
170            struct DE_eckd_data *data,
171            struct dasd_device  *device)
172 {
173         struct dasd_eckd_private *private;
174 	int rc;
175 
176         private = (struct dasd_eckd_private *) device->private;
177 	if (!private->rdc_data.facilities.XRC_supported)
178 		return 0;
179 
180         /* switch on System Time Stamp - needed for XRC Support */
181 	data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid'   */
182 	data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */
183 
184 	rc = get_sync_clock(&data->ep_sys_time);
185 	/* Ignore return code if sync clock is switched off. */
186 	if (rc == -ENOSYS || rc == -EACCES)
187 		rc = 0;
188 
189 	de_ccw->count = sizeof(struct DE_eckd_data);
190 	de_ccw->flags |= CCW_FLAG_SLI;
191 	return rc;
192 }
193 
194 static int
195 define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk,
196 	      unsigned int totrk, int cmd, struct dasd_device *device)
197 {
198 	struct dasd_eckd_private *private;
199 	u32 begcyl, endcyl;
200 	u16 heads, beghead, endhead;
201 	int rc = 0;
202 
203 	private = (struct dasd_eckd_private *) device->private;
204 
205 	ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
206 	ccw->flags = 0;
207 	ccw->count = 16;
208 	ccw->cda = (__u32) __pa(data);
209 
210 	memset(data, 0, sizeof(struct DE_eckd_data));
211 	switch (cmd) {
212 	case DASD_ECKD_CCW_READ_HOME_ADDRESS:
213 	case DASD_ECKD_CCW_READ_RECORD_ZERO:
214 	case DASD_ECKD_CCW_READ:
215 	case DASD_ECKD_CCW_READ_MT:
216 	case DASD_ECKD_CCW_READ_CKD:
217 	case DASD_ECKD_CCW_READ_CKD_MT:
218 	case DASD_ECKD_CCW_READ_KD:
219 	case DASD_ECKD_CCW_READ_KD_MT:
220 	case DASD_ECKD_CCW_READ_COUNT:
221 		data->mask.perm = 0x1;
222 		data->attributes.operation = private->attrib.operation;
223 		break;
224 	case DASD_ECKD_CCW_WRITE:
225 	case DASD_ECKD_CCW_WRITE_MT:
226 	case DASD_ECKD_CCW_WRITE_KD:
227 	case DASD_ECKD_CCW_WRITE_KD_MT:
228 		data->mask.perm = 0x02;
229 		data->attributes.operation = private->attrib.operation;
230 		rc = check_XRC (ccw, data, device);
231 		break;
232 	case DASD_ECKD_CCW_WRITE_CKD:
233 	case DASD_ECKD_CCW_WRITE_CKD_MT:
234 		data->attributes.operation = DASD_BYPASS_CACHE;
235 		rc = check_XRC (ccw, data, device);
236 		break;
237 	case DASD_ECKD_CCW_ERASE:
238 	case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
239 	case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
240 		data->mask.perm = 0x3;
241 		data->mask.auth = 0x1;
242 		data->attributes.operation = DASD_BYPASS_CACHE;
243 		rc = check_XRC (ccw, data, device);
244 		break;
245 	default:
246 		dev_err(&device->cdev->dev,
247 			"0x%x is not a known command\n", cmd);
248 		break;
249 	}
250 
251 	data->attributes.mode = 0x3;	/* ECKD */
252 
253 	if ((private->rdc_data.cu_type == 0x2105 ||
254 	     private->rdc_data.cu_type == 0x2107 ||
255 	     private->rdc_data.cu_type == 0x1750)
256 	    && !(private->uses_cdl && trk < 2))
257 		data->ga_extended |= 0x40; /* Regular Data Format Mode */
258 
259 	heads = private->rdc_data.trk_per_cyl;
260 	begcyl = trk / heads;
261 	beghead = trk % heads;
262 	endcyl = totrk / heads;
263 	endhead = totrk % heads;
264 
265 	/* check for sequential prestage - enhance cylinder range */
266 	if (data->attributes.operation == DASD_SEQ_PRESTAGE ||
267 	    data->attributes.operation == DASD_SEQ_ACCESS) {
268 
269 		if (endcyl + private->attrib.nr_cyl < private->real_cyl)
270 			endcyl += private->attrib.nr_cyl;
271 		else
272 			endcyl = (private->real_cyl - 1);
273 	}
274 
275 	set_ch_t(&data->beg_ext, begcyl, beghead);
276 	set_ch_t(&data->end_ext, endcyl, endhead);
277 	return rc;
278 }
279 
280 static int check_XRC_on_prefix(struct PFX_eckd_data *pfxdata,
281 			       struct dasd_device  *device)
282 {
283 	struct dasd_eckd_private *private;
284 	int rc;
285 
286 	private = (struct dasd_eckd_private *) device->private;
287 	if (!private->rdc_data.facilities.XRC_supported)
288 		return 0;
289 
290 	/* switch on System Time Stamp - needed for XRC Support */
291 	pfxdata->define_extent.ga_extended |= 0x08; /* 'Time Stamp Valid'   */
292 	pfxdata->define_extent.ga_extended |= 0x02; /* 'Extended Parameter' */
293 	pfxdata->validity.time_stamp = 1;	    /* 'Time Stamp Valid'   */
294 
295 	rc = get_sync_clock(&pfxdata->define_extent.ep_sys_time);
296 	/* Ignore return code if sync clock is switched off. */
297 	if (rc == -ENOSYS || rc == -EACCES)
298 		rc = 0;
299 	return rc;
300 }
301 
302 static void fill_LRE_data(struct LRE_eckd_data *data, unsigned int trk,
303 			  unsigned int rec_on_trk, int count, int cmd,
304 			  struct dasd_device *device, unsigned int reclen,
305 			  unsigned int tlf)
306 {
307 	struct dasd_eckd_private *private;
308 	int sector;
309 	int dn, d;
310 
311 	private = (struct dasd_eckd_private *) device->private;
312 
313 	memset(data, 0, sizeof(*data));
314 	sector = 0;
315 	if (rec_on_trk) {
316 		switch (private->rdc_data.dev_type) {
317 		case 0x3390:
318 			dn = ceil_quot(reclen + 6, 232);
319 			d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
320 			sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
321 			break;
322 		case 0x3380:
323 			d = 7 + ceil_quot(reclen + 12, 32);
324 			sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
325 			break;
326 		}
327 	}
328 	data->sector = sector;
329 	/* note: meaning of count depends on the operation
330 	 *	 for record based I/O it's the number of records, but for
331 	 *	 track based I/O it's the number of tracks
332 	 */
333 	data->count = count;
334 	switch (cmd) {
335 	case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
336 		data->operation.orientation = 0x3;
337 		data->operation.operation = 0x03;
338 		break;
339 	case DASD_ECKD_CCW_READ_HOME_ADDRESS:
340 		data->operation.orientation = 0x3;
341 		data->operation.operation = 0x16;
342 		break;
343 	case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
344 		data->operation.orientation = 0x1;
345 		data->operation.operation = 0x03;
346 		data->count++;
347 		break;
348 	case DASD_ECKD_CCW_READ_RECORD_ZERO:
349 		data->operation.orientation = 0x3;
350 		data->operation.operation = 0x16;
351 		data->count++;
352 		break;
353 	case DASD_ECKD_CCW_WRITE:
354 	case DASD_ECKD_CCW_WRITE_MT:
355 	case DASD_ECKD_CCW_WRITE_KD:
356 	case DASD_ECKD_CCW_WRITE_KD_MT:
357 		data->auxiliary.length_valid = 0x1;
358 		data->length = reclen;
359 		data->operation.operation = 0x01;
360 		break;
361 	case DASD_ECKD_CCW_WRITE_CKD:
362 	case DASD_ECKD_CCW_WRITE_CKD_MT:
363 		data->auxiliary.length_valid = 0x1;
364 		data->length = reclen;
365 		data->operation.operation = 0x03;
366 		break;
367 	case DASD_ECKD_CCW_WRITE_TRACK_DATA:
368 		data->auxiliary.length_valid = 0x1;
369 		data->length = reclen;	/* not tlf, as one might think */
370 		data->operation.operation = 0x3F;
371 		data->extended_operation = 0x23;
372 		break;
373 	case DASD_ECKD_CCW_READ:
374 	case DASD_ECKD_CCW_READ_MT:
375 	case DASD_ECKD_CCW_READ_KD:
376 	case DASD_ECKD_CCW_READ_KD_MT:
377 		data->auxiliary.length_valid = 0x1;
378 		data->length = reclen;
379 		data->operation.operation = 0x06;
380 		break;
381 	case DASD_ECKD_CCW_READ_CKD:
382 	case DASD_ECKD_CCW_READ_CKD_MT:
383 		data->auxiliary.length_valid = 0x1;
384 		data->length = reclen;
385 		data->operation.operation = 0x16;
386 		break;
387 	case DASD_ECKD_CCW_READ_COUNT:
388 		data->operation.operation = 0x06;
389 		break;
390 	case DASD_ECKD_CCW_READ_TRACK_DATA:
391 		data->auxiliary.length_valid = 0x1;
392 		data->length = tlf;
393 		data->operation.operation = 0x0C;
394 		break;
395 	case DASD_ECKD_CCW_ERASE:
396 		data->length = reclen;
397 		data->auxiliary.length_valid = 0x1;
398 		data->operation.operation = 0x0b;
399 		break;
400 	default:
401 		DBF_DEV_EVENT(DBF_ERR, device,
402 			    "fill LRE unknown opcode 0x%x", cmd);
403 		BUG();
404 	}
405 	set_ch_t(&data->seek_addr,
406 		 trk / private->rdc_data.trk_per_cyl,
407 		 trk % private->rdc_data.trk_per_cyl);
408 	data->search_arg.cyl = data->seek_addr.cyl;
409 	data->search_arg.head = data->seek_addr.head;
410 	data->search_arg.record = rec_on_trk;
411 }
412 
413 static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
414 		      unsigned int trk, unsigned int totrk, int cmd,
415 		      struct dasd_device *basedev, struct dasd_device *startdev,
416 		      unsigned char format, unsigned int rec_on_trk, int count,
417 		      unsigned int blksize, unsigned int tlf)
418 {
419 	struct dasd_eckd_private *basepriv, *startpriv;
420 	struct DE_eckd_data *dedata;
421 	struct LRE_eckd_data *lredata;
422 	u32 begcyl, endcyl;
423 	u16 heads, beghead, endhead;
424 	int rc = 0;
425 
426 	basepriv = (struct dasd_eckd_private *) basedev->private;
427 	startpriv = (struct dasd_eckd_private *) startdev->private;
428 	dedata = &pfxdata->define_extent;
429 	lredata = &pfxdata->locate_record;
430 
431 	ccw->cmd_code = DASD_ECKD_CCW_PFX;
432 	ccw->flags = 0;
433 	ccw->count = sizeof(*pfxdata);
434 	ccw->cda = (__u32) __pa(pfxdata);
435 
436 	memset(pfxdata, 0, sizeof(*pfxdata));
437 	/* prefix data */
438 	if (format > 1) {
439 		DBF_DEV_EVENT(DBF_ERR, basedev,
440 			      "PFX LRE unknown format 0x%x", format);
441 		BUG();
442 		return -EINVAL;
443 	}
444 	pfxdata->format = format;
445 	pfxdata->base_address = basepriv->ned->unit_addr;
446 	pfxdata->base_lss = basepriv->ned->ID;
447 	pfxdata->validity.define_extent = 1;
448 
449 	/* private uid is kept up to date, conf_data may be outdated */
450 	if (startpriv->uid.type != UA_BASE_DEVICE) {
451 		pfxdata->validity.verify_base = 1;
452 		if (startpriv->uid.type == UA_HYPER_PAV_ALIAS)
453 			pfxdata->validity.hyper_pav = 1;
454 	}
455 
456 	/* define extend data (mostly)*/
457 	switch (cmd) {
458 	case DASD_ECKD_CCW_READ_HOME_ADDRESS:
459 	case DASD_ECKD_CCW_READ_RECORD_ZERO:
460 	case DASD_ECKD_CCW_READ:
461 	case DASD_ECKD_CCW_READ_MT:
462 	case DASD_ECKD_CCW_READ_CKD:
463 	case DASD_ECKD_CCW_READ_CKD_MT:
464 	case DASD_ECKD_CCW_READ_KD:
465 	case DASD_ECKD_CCW_READ_KD_MT:
466 	case DASD_ECKD_CCW_READ_COUNT:
467 		dedata->mask.perm = 0x1;
468 		dedata->attributes.operation = basepriv->attrib.operation;
469 		break;
470 	case DASD_ECKD_CCW_READ_TRACK_DATA:
471 		dedata->mask.perm = 0x1;
472 		dedata->attributes.operation = basepriv->attrib.operation;
473 		dedata->blk_size = 0;
474 		break;
475 	case DASD_ECKD_CCW_WRITE:
476 	case DASD_ECKD_CCW_WRITE_MT:
477 	case DASD_ECKD_CCW_WRITE_KD:
478 	case DASD_ECKD_CCW_WRITE_KD_MT:
479 		dedata->mask.perm = 0x02;
480 		dedata->attributes.operation = basepriv->attrib.operation;
481 		rc = check_XRC_on_prefix(pfxdata, basedev);
482 		break;
483 	case DASD_ECKD_CCW_WRITE_CKD:
484 	case DASD_ECKD_CCW_WRITE_CKD_MT:
485 		dedata->attributes.operation = DASD_BYPASS_CACHE;
486 		rc = check_XRC_on_prefix(pfxdata, basedev);
487 		break;
488 	case DASD_ECKD_CCW_ERASE:
489 	case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
490 	case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
491 		dedata->mask.perm = 0x3;
492 		dedata->mask.auth = 0x1;
493 		dedata->attributes.operation = DASD_BYPASS_CACHE;
494 		rc = check_XRC_on_prefix(pfxdata, basedev);
495 		break;
496 	case DASD_ECKD_CCW_WRITE_TRACK_DATA:
497 		dedata->mask.perm = 0x02;
498 		dedata->attributes.operation = basepriv->attrib.operation;
499 		dedata->blk_size = blksize;
500 		rc = check_XRC_on_prefix(pfxdata, basedev);
501 		break;
502 	default:
503 		DBF_DEV_EVENT(DBF_ERR, basedev,
504 			    "PFX LRE unknown opcode 0x%x", cmd);
505 		BUG();
506 		return -EINVAL;
507 	}
508 
509 	dedata->attributes.mode = 0x3;	/* ECKD */
510 
511 	if ((basepriv->rdc_data.cu_type == 0x2105 ||
512 	     basepriv->rdc_data.cu_type == 0x2107 ||
513 	     basepriv->rdc_data.cu_type == 0x1750)
514 	    && !(basepriv->uses_cdl && trk < 2))
515 		dedata->ga_extended |= 0x40; /* Regular Data Format Mode */
516 
517 	heads = basepriv->rdc_data.trk_per_cyl;
518 	begcyl = trk / heads;
519 	beghead = trk % heads;
520 	endcyl = totrk / heads;
521 	endhead = totrk % heads;
522 
523 	/* check for sequential prestage - enhance cylinder range */
524 	if (dedata->attributes.operation == DASD_SEQ_PRESTAGE ||
525 	    dedata->attributes.operation == DASD_SEQ_ACCESS) {
526 
527 		if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl)
528 			endcyl += basepriv->attrib.nr_cyl;
529 		else
530 			endcyl = (basepriv->real_cyl - 1);
531 	}
532 
533 	set_ch_t(&dedata->beg_ext, begcyl, beghead);
534 	set_ch_t(&dedata->end_ext, endcyl, endhead);
535 
536 	if (format == 1) {
537 		fill_LRE_data(lredata, trk, rec_on_trk, count, cmd,
538 			      basedev, blksize, tlf);
539 	}
540 
541 	return rc;
542 }
543 
544 static int prefix(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
545 		  unsigned int trk, unsigned int totrk, int cmd,
546 		  struct dasd_device *basedev, struct dasd_device *startdev)
547 {
548 	return prefix_LRE(ccw, pfxdata, trk, totrk, cmd, basedev, startdev,
549 			  0, 0, 0, 0, 0);
550 }
551 
552 static void
553 locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, unsigned int trk,
554 	      unsigned int rec_on_trk, int no_rec, int cmd,
555 	      struct dasd_device * device, int reclen)
556 {
557 	struct dasd_eckd_private *private;
558 	int sector;
559 	int dn, d;
560 
561 	private = (struct dasd_eckd_private *) device->private;
562 
563 	DBF_DEV_EVENT(DBF_INFO, device,
564 		  "Locate: trk %d, rec %d, no_rec %d, cmd %d, reclen %d",
565 		  trk, rec_on_trk, no_rec, cmd, reclen);
566 
567 	ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD;
568 	ccw->flags = 0;
569 	ccw->count = 16;
570 	ccw->cda = (__u32) __pa(data);
571 
572 	memset(data, 0, sizeof(struct LO_eckd_data));
573 	sector = 0;
574 	if (rec_on_trk) {
575 		switch (private->rdc_data.dev_type) {
576 		case 0x3390:
577 			dn = ceil_quot(reclen + 6, 232);
578 			d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
579 			sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
580 			break;
581 		case 0x3380:
582 			d = 7 + ceil_quot(reclen + 12, 32);
583 			sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
584 			break;
585 		}
586 	}
587 	data->sector = sector;
588 	data->count = no_rec;
589 	switch (cmd) {
590 	case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
591 		data->operation.orientation = 0x3;
592 		data->operation.operation = 0x03;
593 		break;
594 	case DASD_ECKD_CCW_READ_HOME_ADDRESS:
595 		data->operation.orientation = 0x3;
596 		data->operation.operation = 0x16;
597 		break;
598 	case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
599 		data->operation.orientation = 0x1;
600 		data->operation.operation = 0x03;
601 		data->count++;
602 		break;
603 	case DASD_ECKD_CCW_READ_RECORD_ZERO:
604 		data->operation.orientation = 0x3;
605 		data->operation.operation = 0x16;
606 		data->count++;
607 		break;
608 	case DASD_ECKD_CCW_WRITE:
609 	case DASD_ECKD_CCW_WRITE_MT:
610 	case DASD_ECKD_CCW_WRITE_KD:
611 	case DASD_ECKD_CCW_WRITE_KD_MT:
612 		data->auxiliary.last_bytes_used = 0x1;
613 		data->length = reclen;
614 		data->operation.operation = 0x01;
615 		break;
616 	case DASD_ECKD_CCW_WRITE_CKD:
617 	case DASD_ECKD_CCW_WRITE_CKD_MT:
618 		data->auxiliary.last_bytes_used = 0x1;
619 		data->length = reclen;
620 		data->operation.operation = 0x03;
621 		break;
622 	case DASD_ECKD_CCW_READ:
623 	case DASD_ECKD_CCW_READ_MT:
624 	case DASD_ECKD_CCW_READ_KD:
625 	case DASD_ECKD_CCW_READ_KD_MT:
626 		data->auxiliary.last_bytes_used = 0x1;
627 		data->length = reclen;
628 		data->operation.operation = 0x06;
629 		break;
630 	case DASD_ECKD_CCW_READ_CKD:
631 	case DASD_ECKD_CCW_READ_CKD_MT:
632 		data->auxiliary.last_bytes_used = 0x1;
633 		data->length = reclen;
634 		data->operation.operation = 0x16;
635 		break;
636 	case DASD_ECKD_CCW_READ_COUNT:
637 		data->operation.operation = 0x06;
638 		break;
639 	case DASD_ECKD_CCW_ERASE:
640 		data->length = reclen;
641 		data->auxiliary.last_bytes_used = 0x1;
642 		data->operation.operation = 0x0b;
643 		break;
644 	default:
645 		DBF_DEV_EVENT(DBF_ERR, device, "unknown locate record "
646 			      "opcode 0x%x", cmd);
647 	}
648 	set_ch_t(&data->seek_addr,
649 		 trk / private->rdc_data.trk_per_cyl,
650 		 trk % private->rdc_data.trk_per_cyl);
651 	data->search_arg.cyl = data->seek_addr.cyl;
652 	data->search_arg.head = data->seek_addr.head;
653 	data->search_arg.record = rec_on_trk;
654 }
655 
656 /*
657  * Returns 1 if the block is one of the special blocks that needs
658  * to get read/written with the KD variant of the command.
659  * That is DASD_ECKD_READ_KD_MT instead of DASD_ECKD_READ_MT and
660  * DASD_ECKD_WRITE_KD_MT instead of DASD_ECKD_WRITE_MT.
661  * Luckily the KD variants differ only by one bit (0x08) from the
662  * normal variant. So don't wonder about code like:
663  * if (dasd_eckd_cdl_special(blk_per_trk, recid))
664  *         ccw->cmd_code |= 0x8;
665  */
666 static inline int
667 dasd_eckd_cdl_special(int blk_per_trk, int recid)
668 {
669 	if (recid < 3)
670 		return 1;
671 	if (recid < blk_per_trk)
672 		return 0;
673 	if (recid < 2 * blk_per_trk)
674 		return 1;
675 	return 0;
676 }
677 
678 /*
679  * Returns the record size for the special blocks of the cdl format.
680  * Only returns something useful if dasd_eckd_cdl_special is true
681  * for the recid.
682  */
683 static inline int
684 dasd_eckd_cdl_reclen(int recid)
685 {
686 	if (recid < 3)
687 		return sizes_trk0[recid];
688 	return LABEL_SIZE;
689 }
690 
691 /*
692  * Generate device unique id that specifies the physical device.
693  */
694 static int dasd_eckd_generate_uid(struct dasd_device *device,
695 				  struct dasd_uid *uid)
696 {
697 	struct dasd_eckd_private *private;
698 	int count;
699 
700 	private = (struct dasd_eckd_private *) device->private;
701 	if (!private)
702 		return -ENODEV;
703 	if (!private->ned || !private->gneq)
704 		return -ENODEV;
705 
706 	memset(uid, 0, sizeof(struct dasd_uid));
707 	memcpy(uid->vendor, private->ned->HDA_manufacturer,
708 	       sizeof(uid->vendor) - 1);
709 	EBCASC(uid->vendor, sizeof(uid->vendor) - 1);
710 	memcpy(uid->serial, private->ned->HDA_location,
711 	       sizeof(uid->serial) - 1);
712 	EBCASC(uid->serial, sizeof(uid->serial) - 1);
713 	uid->ssid = private->gneq->subsystemID;
714 	uid->real_unit_addr = private->ned->unit_addr;
715 	if (private->sneq) {
716 		uid->type = private->sneq->sua_flags;
717 		if (uid->type == UA_BASE_PAV_ALIAS)
718 			uid->base_unit_addr = private->sneq->base_unit_addr;
719 	} else {
720 		uid->type = UA_BASE_DEVICE;
721 	}
722 	if (private->vdsneq) {
723 		for (count = 0; count < 16; count++) {
724 			sprintf(uid->vduit+2*count, "%02x",
725 				private->vdsneq->uit[count]);
726 		}
727 	}
728 	return 0;
729 }
730 
731 static struct dasd_ccw_req *dasd_eckd_build_rcd_lpm(struct dasd_device *device,
732 						    void *rcd_buffer,
733 						    struct ciw *ciw, __u8 lpm)
734 {
735 	struct dasd_ccw_req *cqr;
736 	struct ccw1 *ccw;
737 
738 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */, ciw->count,
739 				   device);
740 
741 	if (IS_ERR(cqr)) {
742 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
743 			      "Could not allocate RCD request");
744 		return cqr;
745 	}
746 
747 	ccw = cqr->cpaddr;
748 	ccw->cmd_code = ciw->cmd;
749 	ccw->cda = (__u32)(addr_t)rcd_buffer;
750 	ccw->count = ciw->count;
751 
752 	cqr->startdev = device;
753 	cqr->memdev = device;
754 	cqr->block = NULL;
755 	cqr->expires = 10*HZ;
756 	cqr->lpm = lpm;
757 	cqr->retries = 256;
758 	cqr->buildclk = get_clock();
759 	cqr->status = DASD_CQR_FILLED;
760 	return cqr;
761 }
762 
763 static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
764 				   void **rcd_buffer,
765 				   int *rcd_buffer_size, __u8 lpm)
766 {
767 	struct ciw *ciw;
768 	char *rcd_buf = NULL;
769 	int ret;
770 	struct dasd_ccw_req *cqr;
771 
772 	/*
773 	 * scan for RCD command in extended SenseID data
774 	 */
775 	ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
776 	if (!ciw || ciw->cmd == 0) {
777 		ret = -EOPNOTSUPP;
778 		goto out_error;
779 	}
780 	rcd_buf = kzalloc(ciw->count, GFP_KERNEL | GFP_DMA);
781 	if (!rcd_buf) {
782 		ret = -ENOMEM;
783 		goto out_error;
784 	}
785 
786 	/*
787 	 * buffer has to start with EBCDIC "V1.0" to show
788 	 * support for virtual device SNEQ
789 	 */
790 	rcd_buf[0] = 0xE5;
791 	rcd_buf[1] = 0xF1;
792 	rcd_buf[2] = 0x4B;
793 	rcd_buf[3] = 0xF0;
794 	cqr = dasd_eckd_build_rcd_lpm(device, rcd_buf, ciw, lpm);
795 	if (IS_ERR(cqr)) {
796 		ret =  PTR_ERR(cqr);
797 		goto out_error;
798 	}
799 	ret = dasd_sleep_on(cqr);
800 	/*
801 	 * on success we update the user input parms
802 	 */
803 	dasd_sfree_request(cqr, cqr->memdev);
804 	if (ret)
805 		goto out_error;
806 
807 	*rcd_buffer_size = ciw->count;
808 	*rcd_buffer = rcd_buf;
809 	return 0;
810 out_error:
811 	kfree(rcd_buf);
812 	*rcd_buffer = NULL;
813 	*rcd_buffer_size = 0;
814 	return ret;
815 }
816 
817 static int dasd_eckd_identify_conf_parts(struct dasd_eckd_private *private)
818 {
819 
820 	struct dasd_sneq *sneq;
821 	int i, count;
822 
823 	private->ned = NULL;
824 	private->sneq = NULL;
825 	private->vdsneq = NULL;
826 	private->gneq = NULL;
827 	count = private->conf_len / sizeof(struct dasd_sneq);
828 	sneq = (struct dasd_sneq *)private->conf_data;
829 	for (i = 0; i < count; ++i) {
830 		if (sneq->flags.identifier == 1 && sneq->format == 1)
831 			private->sneq = sneq;
832 		else if (sneq->flags.identifier == 1 && sneq->format == 4)
833 			private->vdsneq = (struct vd_sneq *)sneq;
834 		else if (sneq->flags.identifier == 2)
835 			private->gneq = (struct dasd_gneq *)sneq;
836 		else if (sneq->flags.identifier == 3 && sneq->res1 == 1)
837 			private->ned = (struct dasd_ned *)sneq;
838 		sneq++;
839 	}
840 	if (!private->ned || !private->gneq) {
841 		private->ned = NULL;
842 		private->sneq = NULL;
843 		private->vdsneq = NULL;
844 		private->gneq = NULL;
845 		return -EINVAL;
846 	}
847 	return 0;
848 
849 };
850 
851 static unsigned char dasd_eckd_path_access(void *conf_data, int conf_len)
852 {
853 	struct dasd_gneq *gneq;
854 	int i, count, found;
855 
856 	count = conf_len / sizeof(*gneq);
857 	gneq = (struct dasd_gneq *)conf_data;
858 	found = 0;
859 	for (i = 0; i < count; ++i) {
860 		if (gneq->flags.identifier == 2) {
861 			found = 1;
862 			break;
863 		}
864 		gneq++;
865 	}
866 	if (found)
867 		return ((char *)gneq)[18] & 0x07;
868 	else
869 		return 0;
870 }
871 
872 static int dasd_eckd_read_conf(struct dasd_device *device)
873 {
874 	void *conf_data;
875 	int conf_len, conf_data_saved;
876 	int rc;
877 	__u8 lpm;
878 	struct dasd_eckd_private *private;
879 	struct dasd_eckd_path *path_data;
880 
881 	private = (struct dasd_eckd_private *) device->private;
882 	path_data = (struct dasd_eckd_path *) &private->path_data;
883 	path_data->opm = ccw_device_get_path_mask(device->cdev);
884 	lpm = 0x80;
885 	conf_data_saved = 0;
886 	/* get configuration data per operational path */
887 	for (lpm = 0x80; lpm; lpm>>= 1) {
888 		if (lpm & path_data->opm){
889 			rc = dasd_eckd_read_conf_lpm(device, &conf_data,
890 						     &conf_len, lpm);
891 			if (rc && rc != -EOPNOTSUPP) {	/* -EOPNOTSUPP is ok */
892 				DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
893 					  "Read configuration data returned "
894 					  "error %d", rc);
895 				return rc;
896 			}
897 			if (conf_data == NULL) {
898 				DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
899 						"No configuration data "
900 						"retrieved");
901 				continue;	/* no error */
902 			}
903 			/* save first valid configuration data */
904 			if (!conf_data_saved) {
905 				kfree(private->conf_data);
906 				private->conf_data = conf_data;
907 				private->conf_len = conf_len;
908 				if (dasd_eckd_identify_conf_parts(private)) {
909 					private->conf_data = NULL;
910 					private->conf_len = 0;
911 					kfree(conf_data);
912 					continue;
913 				}
914 				conf_data_saved++;
915 			}
916 			switch (dasd_eckd_path_access(conf_data, conf_len)) {
917 			case 0x02:
918 				path_data->npm |= lpm;
919 				break;
920 			case 0x03:
921 				path_data->ppm |= lpm;
922 				break;
923 			}
924 			if (conf_data != private->conf_data)
925 				kfree(conf_data);
926 		}
927 	}
928 	return 0;
929 }
930 
931 static int dasd_eckd_read_features(struct dasd_device *device)
932 {
933 	struct dasd_psf_prssd_data *prssdp;
934 	struct dasd_rssd_features *features;
935 	struct dasd_ccw_req *cqr;
936 	struct ccw1 *ccw;
937 	int rc;
938 	struct dasd_eckd_private *private;
939 
940 	private = (struct dasd_eckd_private *) device->private;
941 	memset(&private->features, 0, sizeof(struct dasd_rssd_features));
942 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */	+ 1 /* RSSD */,
943 				   (sizeof(struct dasd_psf_prssd_data) +
944 				    sizeof(struct dasd_rssd_features)),
945 				   device);
946 	if (IS_ERR(cqr)) {
947 		DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not "
948 				"allocate initialization request");
949 		return PTR_ERR(cqr);
950 	}
951 	cqr->startdev = device;
952 	cqr->memdev = device;
953 	cqr->block = NULL;
954 	cqr->retries = 256;
955 	cqr->expires = 10 * HZ;
956 
957 	/* Prepare for Read Subsystem Data */
958 	prssdp = (struct dasd_psf_prssd_data *) cqr->data;
959 	memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
960 	prssdp->order = PSF_ORDER_PRSSD;
961 	prssdp->suborder = 0x41;	/* Read Feature Codes */
962 	/* all other bytes of prssdp must be zero */
963 
964 	ccw = cqr->cpaddr;
965 	ccw->cmd_code = DASD_ECKD_CCW_PSF;
966 	ccw->count = sizeof(struct dasd_psf_prssd_data);
967 	ccw->flags |= CCW_FLAG_CC;
968 	ccw->cda = (__u32)(addr_t) prssdp;
969 
970 	/* Read Subsystem Data - feature codes */
971 	features = (struct dasd_rssd_features *) (prssdp + 1);
972 	memset(features, 0, sizeof(struct dasd_rssd_features));
973 
974 	ccw++;
975 	ccw->cmd_code = DASD_ECKD_CCW_RSSD;
976 	ccw->count = sizeof(struct dasd_rssd_features);
977 	ccw->cda = (__u32)(addr_t) features;
978 
979 	cqr->buildclk = get_clock();
980 	cqr->status = DASD_CQR_FILLED;
981 	rc = dasd_sleep_on(cqr);
982 	if (rc == 0) {
983 		prssdp = (struct dasd_psf_prssd_data *) cqr->data;
984 		features = (struct dasd_rssd_features *) (prssdp + 1);
985 		memcpy(&private->features, features,
986 		       sizeof(struct dasd_rssd_features));
987 	} else
988 		dev_warn(&device->cdev->dev, "Reading device feature codes"
989 			 " failed with rc=%d\n", rc);
990 	dasd_sfree_request(cqr, cqr->memdev);
991 	return rc;
992 }
993 
994 
995 /*
996  * Build CP for Perform Subsystem Function - SSC.
997  */
998 static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
999 						    int enable_pav)
1000 {
1001 	struct dasd_ccw_req *cqr;
1002 	struct dasd_psf_ssc_data *psf_ssc_data;
1003 	struct ccw1 *ccw;
1004 
1005 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
1006 				  sizeof(struct dasd_psf_ssc_data),
1007 				  device);
1008 
1009 	if (IS_ERR(cqr)) {
1010 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1011 			   "Could not allocate PSF-SSC request");
1012 		return cqr;
1013 	}
1014 	psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data;
1015 	psf_ssc_data->order = PSF_ORDER_SSC;
1016 	psf_ssc_data->suborder = 0xc0;
1017 	if (enable_pav) {
1018 		psf_ssc_data->suborder |= 0x08;
1019 		psf_ssc_data->reserved[0] = 0x88;
1020 	}
1021 	ccw = cqr->cpaddr;
1022 	ccw->cmd_code = DASD_ECKD_CCW_PSF;
1023 	ccw->cda = (__u32)(addr_t)psf_ssc_data;
1024 	ccw->count = 66;
1025 
1026 	cqr->startdev = device;
1027 	cqr->memdev = device;
1028 	cqr->block = NULL;
1029 	cqr->retries = 256;
1030 	cqr->expires = 10*HZ;
1031 	cqr->buildclk = get_clock();
1032 	cqr->status = DASD_CQR_FILLED;
1033 	return cqr;
1034 }
1035 
1036 /*
1037  * Perform Subsystem Function.
1038  * It is necessary to trigger CIO for channel revalidation since this
1039  * call might change behaviour of DASD devices.
1040  */
1041 static int
1042 dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav)
1043 {
1044 	struct dasd_ccw_req *cqr;
1045 	int rc;
1046 
1047 	cqr = dasd_eckd_build_psf_ssc(device, enable_pav);
1048 	if (IS_ERR(cqr))
1049 		return PTR_ERR(cqr);
1050 
1051 	rc = dasd_sleep_on(cqr);
1052 	if (!rc)
1053 		/* trigger CIO to reprobe devices */
1054 		css_schedule_reprobe();
1055 	dasd_sfree_request(cqr, cqr->memdev);
1056 	return rc;
1057 }
1058 
1059 /*
1060  * Valide storage server of current device.
1061  */
1062 static void dasd_eckd_validate_server(struct dasd_device *device)
1063 {
1064 	int rc;
1065 	struct dasd_eckd_private *private;
1066 	int enable_pav;
1067 
1068 	if (dasd_nopav || MACHINE_IS_VM)
1069 		enable_pav = 0;
1070 	else
1071 		enable_pav = 1;
1072 	rc = dasd_eckd_psf_ssc(device, enable_pav);
1073 
1074 	/* may be requested feature is not available on server,
1075 	 * therefore just report error and go ahead */
1076 	private = (struct dasd_eckd_private *) device->private;
1077 	DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "PSF-SSC for SSID %04x "
1078 			"returned rc=%d", private->uid.ssid, rc);
1079 }
1080 
1081 /*
1082  * Check device characteristics.
1083  * If the device is accessible using ECKD discipline, the device is enabled.
1084  */
1085 static int
1086 dasd_eckd_check_characteristics(struct dasd_device *device)
1087 {
1088 	struct dasd_eckd_private *private;
1089 	struct dasd_block *block;
1090 	int is_known, rc;
1091 
1092 	if (!ccw_device_is_pathgroup(device->cdev)) {
1093 		dev_warn(&device->cdev->dev,
1094 			 "A channel path group could not be established\n");
1095 		return -EIO;
1096 	}
1097 	if (!ccw_device_is_multipath(device->cdev)) {
1098 		dev_info(&device->cdev->dev,
1099 			 "The DASD is not operating in multipath mode\n");
1100 	}
1101 	private = (struct dasd_eckd_private *) device->private;
1102 	if (!private) {
1103 		private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
1104 		if (!private) {
1105 			dev_warn(&device->cdev->dev,
1106 				 "Allocating memory for private DASD data "
1107 				 "failed\n");
1108 			return -ENOMEM;
1109 		}
1110 		device->private = (void *) private;
1111 	} else {
1112 		memset(private, 0, sizeof(*private));
1113 	}
1114 	/* Invalidate status of initial analysis. */
1115 	private->init_cqr_status = -1;
1116 	/* Set default cache operations. */
1117 	private->attrib.operation = DASD_NORMAL_CACHE;
1118 	private->attrib.nr_cyl = 0;
1119 
1120 	/* Read Configuration Data */
1121 	rc = dasd_eckd_read_conf(device);
1122 	if (rc)
1123 		goto out_err1;
1124 
1125 	/* Generate device unique id and register in devmap */
1126 	rc = dasd_eckd_generate_uid(device, &private->uid);
1127 	if (rc)
1128 		goto out_err1;
1129 	dasd_set_uid(device->cdev, &private->uid);
1130 
1131 	if (private->uid.type == UA_BASE_DEVICE) {
1132 		block = dasd_alloc_block();
1133 		if (IS_ERR(block)) {
1134 			DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1135 					"could not allocate dasd "
1136 					"block structure");
1137 			rc = PTR_ERR(block);
1138 			goto out_err1;
1139 		}
1140 		device->block = block;
1141 		block->base = device;
1142 	}
1143 
1144 	/* register lcu with alias handling, enable PAV if this is a new lcu */
1145 	is_known = dasd_alias_make_device_known_to_lcu(device);
1146 	if (is_known < 0) {
1147 		rc = is_known;
1148 		goto out_err2;
1149 	}
1150 	/*
1151 	 * dasd_eckd_vaildate_server is done on the first device that
1152 	 * is found for an LCU. All later other devices have to wait
1153 	 * for it, so they will read the correct feature codes.
1154 	 */
1155 	if (!is_known) {
1156 		dasd_eckd_validate_server(device);
1157 		dasd_alias_lcu_setup_complete(device);
1158 	} else
1159 		dasd_alias_wait_for_lcu_setup(device);
1160 
1161 	/* device may report different configuration data after LCU setup */
1162 	rc = dasd_eckd_read_conf(device);
1163 	if (rc)
1164 		goto out_err3;
1165 
1166 	/* Read Feature Codes */
1167 	dasd_eckd_read_features(device);
1168 
1169 	/* Read Device Characteristics */
1170 	rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
1171 					 &private->rdc_data, 64);
1172 	if (rc) {
1173 		DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1174 				"Read device characteristic failed, rc=%d", rc);
1175 		goto out_err3;
1176 	}
1177 	/* find the vaild cylinder size */
1178 	if (private->rdc_data.no_cyl == LV_COMPAT_CYL &&
1179 	    private->rdc_data.long_no_cyl)
1180 		private->real_cyl = private->rdc_data.long_no_cyl;
1181 	else
1182 		private->real_cyl = private->rdc_data.no_cyl;
1183 
1184 	dev_info(&device->cdev->dev, "New DASD %04X/%02X (CU %04X/%02X) "
1185 		 "with %d cylinders, %d heads, %d sectors\n",
1186 		 private->rdc_data.dev_type,
1187 		 private->rdc_data.dev_model,
1188 		 private->rdc_data.cu_type,
1189 		 private->rdc_data.cu_model.model,
1190 		 private->real_cyl,
1191 		 private->rdc_data.trk_per_cyl,
1192 		 private->rdc_data.sec_per_trk);
1193 	return 0;
1194 
1195 out_err3:
1196 	dasd_alias_disconnect_device_from_lcu(device);
1197 out_err2:
1198 	dasd_free_block(device->block);
1199 	device->block = NULL;
1200 out_err1:
1201 	kfree(private->conf_data);
1202 	kfree(device->private);
1203 	device->private = NULL;
1204 	return rc;
1205 }
1206 
1207 static void dasd_eckd_uncheck_device(struct dasd_device *device)
1208 {
1209 	struct dasd_eckd_private *private;
1210 
1211 	private = (struct dasd_eckd_private *) device->private;
1212 	dasd_alias_disconnect_device_from_lcu(device);
1213 	private->ned = NULL;
1214 	private->sneq = NULL;
1215 	private->vdsneq = NULL;
1216 	private->gneq = NULL;
1217 	private->conf_len = 0;
1218 	kfree(private->conf_data);
1219 	private->conf_data = NULL;
1220 }
1221 
1222 static struct dasd_ccw_req *
1223 dasd_eckd_analysis_ccw(struct dasd_device *device)
1224 {
1225 	struct dasd_eckd_private *private;
1226 	struct eckd_count *count_data;
1227 	struct LO_eckd_data *LO_data;
1228 	struct dasd_ccw_req *cqr;
1229 	struct ccw1 *ccw;
1230 	int cplength, datasize;
1231 	int i;
1232 
1233 	private = (struct dasd_eckd_private *) device->private;
1234 
1235 	cplength = 8;
1236 	datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data);
1237 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device);
1238 	if (IS_ERR(cqr))
1239 		return cqr;
1240 	ccw = cqr->cpaddr;
1241 	/* Define extent for the first 3 tracks. */
1242 	define_extent(ccw++, cqr->data, 0, 2,
1243 		      DASD_ECKD_CCW_READ_COUNT, device);
1244 	LO_data = cqr->data + sizeof(struct DE_eckd_data);
1245 	/* Locate record for the first 4 records on track 0. */
1246 	ccw[-1].flags |= CCW_FLAG_CC;
1247 	locate_record(ccw++, LO_data++, 0, 0, 4,
1248 		      DASD_ECKD_CCW_READ_COUNT, device, 0);
1249 
1250 	count_data = private->count_area;
1251 	for (i = 0; i < 4; i++) {
1252 		ccw[-1].flags |= CCW_FLAG_CC;
1253 		ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
1254 		ccw->flags = 0;
1255 		ccw->count = 8;
1256 		ccw->cda = (__u32)(addr_t) count_data;
1257 		ccw++;
1258 		count_data++;
1259 	}
1260 
1261 	/* Locate record for the first record on track 2. */
1262 	ccw[-1].flags |= CCW_FLAG_CC;
1263 	locate_record(ccw++, LO_data++, 2, 0, 1,
1264 		      DASD_ECKD_CCW_READ_COUNT, device, 0);
1265 	/* Read count ccw. */
1266 	ccw[-1].flags |= CCW_FLAG_CC;
1267 	ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
1268 	ccw->flags = 0;
1269 	ccw->count = 8;
1270 	ccw->cda = (__u32)(addr_t) count_data;
1271 
1272 	cqr->block = NULL;
1273 	cqr->startdev = device;
1274 	cqr->memdev = device;
1275 	cqr->retries = 255;
1276 	cqr->buildclk = get_clock();
1277 	cqr->status = DASD_CQR_FILLED;
1278 	return cqr;
1279 }
1280 
1281 /* differentiate between 'no record found' and any other error */
1282 static int dasd_eckd_analysis_evaluation(struct dasd_ccw_req *init_cqr)
1283 {
1284 	char *sense;
1285 	if (init_cqr->status == DASD_CQR_DONE)
1286 		return INIT_CQR_OK;
1287 	else if (init_cqr->status == DASD_CQR_NEED_ERP ||
1288 		 init_cqr->status == DASD_CQR_FAILED) {
1289 		sense = dasd_get_sense(&init_cqr->irb);
1290 		if (sense && (sense[1] & SNS1_NO_REC_FOUND))
1291 			return INIT_CQR_UNFORMATTED;
1292 		else
1293 			return INIT_CQR_ERROR;
1294 	} else
1295 		return INIT_CQR_ERROR;
1296 }
1297 
1298 /*
1299  * This is the callback function for the init_analysis cqr. It saves
1300  * the status of the initial analysis ccw before it frees it and kicks
1301  * the device to continue the startup sequence. This will call
1302  * dasd_eckd_do_analysis again (if the devices has not been marked
1303  * for deletion in the meantime).
1304  */
1305 static void dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr,
1306 					void *data)
1307 {
1308 	struct dasd_eckd_private *private;
1309 	struct dasd_device *device;
1310 
1311 	device = init_cqr->startdev;
1312 	private = (struct dasd_eckd_private *) device->private;
1313 	private->init_cqr_status = dasd_eckd_analysis_evaluation(init_cqr);
1314 	dasd_sfree_request(init_cqr, device);
1315 	dasd_kick_device(device);
1316 }
1317 
1318 static int dasd_eckd_start_analysis(struct dasd_block *block)
1319 {
1320 	struct dasd_eckd_private *private;
1321 	struct dasd_ccw_req *init_cqr;
1322 
1323 	private = (struct dasd_eckd_private *) block->base->private;
1324 	init_cqr = dasd_eckd_analysis_ccw(block->base);
1325 	if (IS_ERR(init_cqr))
1326 		return PTR_ERR(init_cqr);
1327 	init_cqr->callback = dasd_eckd_analysis_callback;
1328 	init_cqr->callback_data = NULL;
1329 	init_cqr->expires = 5*HZ;
1330 	/* first try without ERP, so we can later handle unformatted
1331 	 * devices as special case
1332 	 */
1333 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &init_cqr->flags);
1334 	init_cqr->retries = 0;
1335 	dasd_add_request_head(init_cqr);
1336 	return -EAGAIN;
1337 }
1338 
1339 static int dasd_eckd_end_analysis(struct dasd_block *block)
1340 {
1341 	struct dasd_device *device;
1342 	struct dasd_eckd_private *private;
1343 	struct eckd_count *count_area;
1344 	unsigned int sb, blk_per_trk;
1345 	int status, i;
1346 	struct dasd_ccw_req *init_cqr;
1347 
1348 	device = block->base;
1349 	private = (struct dasd_eckd_private *) device->private;
1350 	status = private->init_cqr_status;
1351 	private->init_cqr_status = -1;
1352 	if (status == INIT_CQR_ERROR) {
1353 		/* try again, this time with full ERP */
1354 		init_cqr = dasd_eckd_analysis_ccw(device);
1355 		dasd_sleep_on(init_cqr);
1356 		status = dasd_eckd_analysis_evaluation(init_cqr);
1357 		dasd_sfree_request(init_cqr, device);
1358 	}
1359 
1360 	if (status == INIT_CQR_UNFORMATTED) {
1361 		dev_warn(&device->cdev->dev, "The DASD is not formatted\n");
1362 		return -EMEDIUMTYPE;
1363 	} else if (status == INIT_CQR_ERROR) {
1364 		dev_err(&device->cdev->dev,
1365 			"Detecting the DASD disk layout failed because "
1366 			"of an I/O error\n");
1367 		return -EIO;
1368 	}
1369 
1370 	private->uses_cdl = 1;
1371 	/* Check Track 0 for Compatible Disk Layout */
1372 	count_area = NULL;
1373 	for (i = 0; i < 3; i++) {
1374 		if (private->count_area[i].kl != 4 ||
1375 		    private->count_area[i].dl != dasd_eckd_cdl_reclen(i) - 4) {
1376 			private->uses_cdl = 0;
1377 			break;
1378 		}
1379 	}
1380 	if (i == 3)
1381 		count_area = &private->count_area[4];
1382 
1383 	if (private->uses_cdl == 0) {
1384 		for (i = 0; i < 5; i++) {
1385 			if ((private->count_area[i].kl != 0) ||
1386 			    (private->count_area[i].dl !=
1387 			     private->count_area[0].dl))
1388 				break;
1389 		}
1390 		if (i == 5)
1391 			count_area = &private->count_area[0];
1392 	} else {
1393 		if (private->count_area[3].record == 1)
1394 			dev_warn(&device->cdev->dev,
1395 				 "Track 0 has no records following the VTOC\n");
1396 	}
1397 	if (count_area != NULL && count_area->kl == 0) {
1398 		/* we found notthing violating our disk layout */
1399 		if (dasd_check_blocksize(count_area->dl) == 0)
1400 			block->bp_block = count_area->dl;
1401 	}
1402 	if (block->bp_block == 0) {
1403 		dev_warn(&device->cdev->dev,
1404 			 "The disk layout of the DASD is not supported\n");
1405 		return -EMEDIUMTYPE;
1406 	}
1407 	block->s2b_shift = 0;	/* bits to shift 512 to get a block */
1408 	for (sb = 512; sb < block->bp_block; sb = sb << 1)
1409 		block->s2b_shift++;
1410 
1411 	blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block);
1412 	block->blocks = (private->real_cyl *
1413 			  private->rdc_data.trk_per_cyl *
1414 			  blk_per_trk);
1415 
1416 	dev_info(&device->cdev->dev,
1417 		 "DASD with %d KB/block, %d KB total size, %d KB/track, "
1418 		 "%s\n", (block->bp_block >> 10),
1419 		 ((private->real_cyl *
1420 		   private->rdc_data.trk_per_cyl *
1421 		   blk_per_trk * (block->bp_block >> 9)) >> 1),
1422 		 ((blk_per_trk * block->bp_block) >> 10),
1423 		 private->uses_cdl ?
1424 		 "compatible disk layout" : "linux disk layout");
1425 
1426 	return 0;
1427 }
1428 
1429 static int dasd_eckd_do_analysis(struct dasd_block *block)
1430 {
1431 	struct dasd_eckd_private *private;
1432 
1433 	private = (struct dasd_eckd_private *) block->base->private;
1434 	if (private->init_cqr_status < 0)
1435 		return dasd_eckd_start_analysis(block);
1436 	else
1437 		return dasd_eckd_end_analysis(block);
1438 }
1439 
1440 static int dasd_eckd_ready_to_online(struct dasd_device *device)
1441 {
1442 	return dasd_alias_add_device(device);
1443 };
1444 
1445 static int dasd_eckd_online_to_ready(struct dasd_device *device)
1446 {
1447 	return dasd_alias_remove_device(device);
1448 };
1449 
1450 static int
1451 dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo)
1452 {
1453 	struct dasd_eckd_private *private;
1454 
1455 	private = (struct dasd_eckd_private *) block->base->private;
1456 	if (dasd_check_blocksize(block->bp_block) == 0) {
1457 		geo->sectors = recs_per_track(&private->rdc_data,
1458 					      0, block->bp_block);
1459 	}
1460 	geo->cylinders = private->rdc_data.no_cyl;
1461 	geo->heads = private->rdc_data.trk_per_cyl;
1462 	return 0;
1463 }
1464 
1465 static struct dasd_ccw_req *
1466 dasd_eckd_format_device(struct dasd_device * device,
1467 			struct format_data_t * fdata)
1468 {
1469 	struct dasd_eckd_private *private;
1470 	struct dasd_ccw_req *fcp;
1471 	struct eckd_count *ect;
1472 	struct ccw1 *ccw;
1473 	void *data;
1474 	int rpt;
1475 	struct ch_t address;
1476 	int cplength, datasize;
1477 	int i;
1478 	int intensity = 0;
1479 	int r0_perm;
1480 
1481 	private = (struct dasd_eckd_private *) device->private;
1482 	rpt = recs_per_track(&private->rdc_data, 0, fdata->blksize);
1483 	set_ch_t(&address,
1484 		 fdata->start_unit / private->rdc_data.trk_per_cyl,
1485 		 fdata->start_unit % private->rdc_data.trk_per_cyl);
1486 
1487 	/* Sanity checks. */
1488 	if (fdata->start_unit >=
1489 	    (private->real_cyl * private->rdc_data.trk_per_cyl)) {
1490 		dev_warn(&device->cdev->dev, "Start track number %d used in "
1491 			 "formatting is too big\n", fdata->start_unit);
1492 		return ERR_PTR(-EINVAL);
1493 	}
1494 	if (fdata->start_unit > fdata->stop_unit) {
1495 		dev_warn(&device->cdev->dev, "Start track %d used in "
1496 			 "formatting exceeds end track\n", fdata->start_unit);
1497 		return ERR_PTR(-EINVAL);
1498 	}
1499 	if (dasd_check_blocksize(fdata->blksize) != 0) {
1500 		dev_warn(&device->cdev->dev,
1501 			 "The DASD cannot be formatted with block size %d\n",
1502 			 fdata->blksize);
1503 		return ERR_PTR(-EINVAL);
1504 	}
1505 
1506 	/*
1507 	 * fdata->intensity is a bit string that tells us what to do:
1508 	 *   Bit 0: write record zero
1509 	 *   Bit 1: write home address, currently not supported
1510 	 *   Bit 2: invalidate tracks
1511 	 *   Bit 3: use OS/390 compatible disk layout (cdl)
1512 	 *   Bit 4: do not allow storage subsystem to modify record zero
1513 	 * Only some bit combinations do make sense.
1514 	 */
1515 	if (fdata->intensity & 0x10) {
1516 		r0_perm = 0;
1517 		intensity = fdata->intensity & ~0x10;
1518 	} else {
1519 		r0_perm = 1;
1520 		intensity = fdata->intensity;
1521 	}
1522 	switch (intensity) {
1523 	case 0x00:	/* Normal format */
1524 	case 0x08:	/* Normal format, use cdl. */
1525 		cplength = 2 + rpt;
1526 		datasize = sizeof(struct DE_eckd_data) +
1527 			sizeof(struct LO_eckd_data) +
1528 			rpt * sizeof(struct eckd_count);
1529 		break;
1530 	case 0x01:	/* Write record zero and format track. */
1531 	case 0x09:	/* Write record zero and format track, use cdl. */
1532 		cplength = 3 + rpt;
1533 		datasize = sizeof(struct DE_eckd_data) +
1534 			sizeof(struct LO_eckd_data) +
1535 			sizeof(struct eckd_count) +
1536 			rpt * sizeof(struct eckd_count);
1537 		break;
1538 	case 0x04:	/* Invalidate track. */
1539 	case 0x0c:	/* Invalidate track, use cdl. */
1540 		cplength = 3;
1541 		datasize = sizeof(struct DE_eckd_data) +
1542 			sizeof(struct LO_eckd_data) +
1543 			sizeof(struct eckd_count);
1544 		break;
1545 	default:
1546 		dev_warn(&device->cdev->dev, "An I/O control call used "
1547 			 "incorrect flags 0x%x\n", fdata->intensity);
1548 		return ERR_PTR(-EINVAL);
1549 	}
1550 	/* Allocate the format ccw request. */
1551 	fcp = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device);
1552 	if (IS_ERR(fcp))
1553 		return fcp;
1554 
1555 	data = fcp->data;
1556 	ccw = fcp->cpaddr;
1557 
1558 	switch (intensity & ~0x08) {
1559 	case 0x00: /* Normal format. */
1560 		define_extent(ccw++, (struct DE_eckd_data *) data,
1561 			      fdata->start_unit, fdata->start_unit,
1562 			      DASD_ECKD_CCW_WRITE_CKD, device);
1563 		/* grant subsystem permission to format R0 */
1564 		if (r0_perm)
1565 			((struct DE_eckd_data *)data)->ga_extended |= 0x04;
1566 		data += sizeof(struct DE_eckd_data);
1567 		ccw[-1].flags |= CCW_FLAG_CC;
1568 		locate_record(ccw++, (struct LO_eckd_data *) data,
1569 			      fdata->start_unit, 0, rpt,
1570 			      DASD_ECKD_CCW_WRITE_CKD, device,
1571 			      fdata->blksize);
1572 		data += sizeof(struct LO_eckd_data);
1573 		break;
1574 	case 0x01: /* Write record zero + format track. */
1575 		define_extent(ccw++, (struct DE_eckd_data *) data,
1576 			      fdata->start_unit, fdata->start_unit,
1577 			      DASD_ECKD_CCW_WRITE_RECORD_ZERO,
1578 			      device);
1579 		data += sizeof(struct DE_eckd_data);
1580 		ccw[-1].flags |= CCW_FLAG_CC;
1581 		locate_record(ccw++, (struct LO_eckd_data *) data,
1582 			      fdata->start_unit, 0, rpt + 1,
1583 			      DASD_ECKD_CCW_WRITE_RECORD_ZERO, device,
1584 			      device->block->bp_block);
1585 		data += sizeof(struct LO_eckd_data);
1586 		break;
1587 	case 0x04: /* Invalidate track. */
1588 		define_extent(ccw++, (struct DE_eckd_data *) data,
1589 			      fdata->start_unit, fdata->start_unit,
1590 			      DASD_ECKD_CCW_WRITE_CKD, device);
1591 		data += sizeof(struct DE_eckd_data);
1592 		ccw[-1].flags |= CCW_FLAG_CC;
1593 		locate_record(ccw++, (struct LO_eckd_data *) data,
1594 			      fdata->start_unit, 0, 1,
1595 			      DASD_ECKD_CCW_WRITE_CKD, device, 8);
1596 		data += sizeof(struct LO_eckd_data);
1597 		break;
1598 	}
1599 	if (intensity & 0x01) {	/* write record zero */
1600 		ect = (struct eckd_count *) data;
1601 		data += sizeof(struct eckd_count);
1602 		ect->cyl = address.cyl;
1603 		ect->head = address.head;
1604 		ect->record = 0;
1605 		ect->kl = 0;
1606 		ect->dl = 8;
1607 		ccw[-1].flags |= CCW_FLAG_CC;
1608 		ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO;
1609 		ccw->flags = CCW_FLAG_SLI;
1610 		ccw->count = 8;
1611 		ccw->cda = (__u32)(addr_t) ect;
1612 		ccw++;
1613 	}
1614 	if ((intensity & ~0x08) & 0x04) {	/* erase track */
1615 		ect = (struct eckd_count *) data;
1616 		data += sizeof(struct eckd_count);
1617 		ect->cyl = address.cyl;
1618 		ect->head = address.head;
1619 		ect->record = 1;
1620 		ect->kl = 0;
1621 		ect->dl = 0;
1622 		ccw[-1].flags |= CCW_FLAG_CC;
1623 		ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
1624 		ccw->flags = CCW_FLAG_SLI;
1625 		ccw->count = 8;
1626 		ccw->cda = (__u32)(addr_t) ect;
1627 	} else {		/* write remaining records */
1628 		for (i = 0; i < rpt; i++) {
1629 			ect = (struct eckd_count *) data;
1630 			data += sizeof(struct eckd_count);
1631 			ect->cyl = address.cyl;
1632 			ect->head = address.head;
1633 			ect->record = i + 1;
1634 			ect->kl = 0;
1635 			ect->dl = fdata->blksize;
1636 			/* Check for special tracks 0-1 when formatting CDL */
1637 			if ((intensity & 0x08) &&
1638 			    fdata->start_unit == 0) {
1639 				if (i < 3) {
1640 					ect->kl = 4;
1641 					ect->dl = sizes_trk0[i] - 4;
1642 				}
1643 			}
1644 			if ((intensity & 0x08) &&
1645 			    fdata->start_unit == 1) {
1646 				ect->kl = 44;
1647 				ect->dl = LABEL_SIZE - 44;
1648 			}
1649 			ccw[-1].flags |= CCW_FLAG_CC;
1650 			ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
1651 			ccw->flags = CCW_FLAG_SLI;
1652 			ccw->count = 8;
1653 			ccw->cda = (__u32)(addr_t) ect;
1654 			ccw++;
1655 		}
1656 	}
1657 	fcp->startdev = device;
1658 	fcp->memdev = device;
1659 	fcp->retries = 256;
1660 	fcp->buildclk = get_clock();
1661 	fcp->status = DASD_CQR_FILLED;
1662 	return fcp;
1663 }
1664 
1665 static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr)
1666 {
1667 	cqr->status = DASD_CQR_FILLED;
1668 	if (cqr->block && (cqr->startdev != cqr->block->base)) {
1669 		dasd_eckd_reset_ccw_to_base_io(cqr);
1670 		cqr->startdev = cqr->block->base;
1671 	}
1672 };
1673 
1674 static dasd_erp_fn_t
1675 dasd_eckd_erp_action(struct dasd_ccw_req * cqr)
1676 {
1677 	struct dasd_device *device = (struct dasd_device *) cqr->startdev;
1678 	struct ccw_device *cdev = device->cdev;
1679 
1680 	switch (cdev->id.cu_type) {
1681 	case 0x3990:
1682 	case 0x2105:
1683 	case 0x2107:
1684 	case 0x1750:
1685 		return dasd_3990_erp_action;
1686 	case 0x9343:
1687 	case 0x3880:
1688 	default:
1689 		return dasd_default_erp_action;
1690 	}
1691 }
1692 
1693 static dasd_erp_fn_t
1694 dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr)
1695 {
1696 	return dasd_default_erp_postaction;
1697 }
1698 
1699 
1700 static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device,
1701 						   struct irb *irb)
1702 {
1703 	char mask;
1704 	char *sense = NULL;
1705 
1706 	/* first of all check for state change pending interrupt */
1707 	mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
1708 	if ((scsw_dstat(&irb->scsw) & mask) == mask) {
1709 		dasd_generic_handle_state_change(device);
1710 		return;
1711 	}
1712 
1713 	/* summary unit check */
1714 	if ((scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) &&
1715 	    (irb->ecw[7] == 0x0D)) {
1716 		dasd_alias_handle_summary_unit_check(device, irb);
1717 		return;
1718 	}
1719 
1720 	sense = dasd_get_sense(irb);
1721 	/* service information message SIM */
1722 	if (sense && !(sense[27] & DASD_SENSE_BIT_0) &&
1723 	    ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) {
1724 		dasd_3990_erp_handle_sim(device, sense);
1725 		dasd_schedule_device_bh(device);
1726 		return;
1727 	}
1728 
1729 	if ((scsw_cc(&irb->scsw) == 1) &&
1730 	    (scsw_fctl(&irb->scsw) & SCSW_FCTL_START_FUNC) &&
1731 	    (scsw_actl(&irb->scsw) & SCSW_ACTL_START_PEND) &&
1732 	    (scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND)) {
1733 		/* fake irb do nothing, they are handled elsewhere */
1734 		dasd_schedule_device_bh(device);
1735 		return;
1736 	}
1737 
1738 	if (!sense) {
1739 		/* just report other unsolicited interrupts */
1740 		DBF_DEV_EVENT(DBF_ERR, device, "%s",
1741 			    "unsolicited interrupt received");
1742 	} else {
1743 		DBF_DEV_EVENT(DBF_ERR, device, "%s",
1744 			    "unsolicited interrupt received "
1745 			    "(sense available)");
1746 		device->discipline->dump_sense_dbf(device, irb, "unsolicited");
1747 	}
1748 
1749 	dasd_schedule_device_bh(device);
1750 	return;
1751 };
1752 
1753 
1754 static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
1755 					       struct dasd_device *startdev,
1756 					       struct dasd_block *block,
1757 					       struct request *req,
1758 					       sector_t first_rec,
1759 					       sector_t last_rec,
1760 					       sector_t first_trk,
1761 					       sector_t last_trk,
1762 					       unsigned int first_offs,
1763 					       unsigned int last_offs,
1764 					       unsigned int blk_per_trk,
1765 					       unsigned int blksize)
1766 {
1767 	struct dasd_eckd_private *private;
1768 	unsigned long *idaws;
1769 	struct LO_eckd_data *LO_data;
1770 	struct dasd_ccw_req *cqr;
1771 	struct ccw1 *ccw;
1772 	struct req_iterator iter;
1773 	struct bio_vec *bv;
1774 	char *dst;
1775 	unsigned int off;
1776 	int count, cidaw, cplength, datasize;
1777 	sector_t recid;
1778 	unsigned char cmd, rcmd;
1779 	int use_prefix;
1780 	struct dasd_device *basedev;
1781 
1782 	basedev = block->base;
1783 	private = (struct dasd_eckd_private *) basedev->private;
1784 	if (rq_data_dir(req) == READ)
1785 		cmd = DASD_ECKD_CCW_READ_MT;
1786 	else if (rq_data_dir(req) == WRITE)
1787 		cmd = DASD_ECKD_CCW_WRITE_MT;
1788 	else
1789 		return ERR_PTR(-EINVAL);
1790 
1791 	/* Check struct bio and count the number of blocks for the request. */
1792 	count = 0;
1793 	cidaw = 0;
1794 	rq_for_each_segment(bv, req, iter) {
1795 		if (bv->bv_len & (blksize - 1))
1796 			/* Eckd can only do full blocks. */
1797 			return ERR_PTR(-EINVAL);
1798 		count += bv->bv_len >> (block->s2b_shift + 9);
1799 #if defined(CONFIG_64BIT)
1800 		if (idal_is_needed (page_address(bv->bv_page), bv->bv_len))
1801 			cidaw += bv->bv_len >> (block->s2b_shift + 9);
1802 #endif
1803 	}
1804 	/* Paranoia. */
1805 	if (count != last_rec - first_rec + 1)
1806 		return ERR_PTR(-EINVAL);
1807 
1808 	/* use the prefix command if available */
1809 	use_prefix = private->features.feature[8] & 0x01;
1810 	if (use_prefix) {
1811 		/* 1x prefix + number of blocks */
1812 		cplength = 2 + count;
1813 		/* 1x prefix + cidaws*sizeof(long) */
1814 		datasize = sizeof(struct PFX_eckd_data) +
1815 			sizeof(struct LO_eckd_data) +
1816 			cidaw * sizeof(unsigned long);
1817 	} else {
1818 		/* 1x define extent + 1x locate record + number of blocks */
1819 		cplength = 2 + count;
1820 		/* 1x define extent + 1x locate record + cidaws*sizeof(long) */
1821 		datasize = sizeof(struct DE_eckd_data) +
1822 			sizeof(struct LO_eckd_data) +
1823 			cidaw * sizeof(unsigned long);
1824 	}
1825 	/* Find out the number of additional locate record ccws for cdl. */
1826 	if (private->uses_cdl && first_rec < 2*blk_per_trk) {
1827 		if (last_rec >= 2*blk_per_trk)
1828 			count = 2*blk_per_trk - first_rec;
1829 		cplength += count;
1830 		datasize += count*sizeof(struct LO_eckd_data);
1831 	}
1832 	/* Allocate the ccw request. */
1833 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
1834 				   startdev);
1835 	if (IS_ERR(cqr))
1836 		return cqr;
1837 	ccw = cqr->cpaddr;
1838 	/* First ccw is define extent or prefix. */
1839 	if (use_prefix) {
1840 		if (prefix(ccw++, cqr->data, first_trk,
1841 			   last_trk, cmd, basedev, startdev) == -EAGAIN) {
1842 			/* Clock not in sync and XRC is enabled.
1843 			 * Try again later.
1844 			 */
1845 			dasd_sfree_request(cqr, startdev);
1846 			return ERR_PTR(-EAGAIN);
1847 		}
1848 		idaws = (unsigned long *) (cqr->data +
1849 					   sizeof(struct PFX_eckd_data));
1850 	} else {
1851 		if (define_extent(ccw++, cqr->data, first_trk,
1852 				  last_trk, cmd, startdev) == -EAGAIN) {
1853 			/* Clock not in sync and XRC is enabled.
1854 			 * Try again later.
1855 			 */
1856 			dasd_sfree_request(cqr, startdev);
1857 			return ERR_PTR(-EAGAIN);
1858 		}
1859 		idaws = (unsigned long *) (cqr->data +
1860 					   sizeof(struct DE_eckd_data));
1861 	}
1862 	/* Build locate_record+read/write/ccws. */
1863 	LO_data = (struct LO_eckd_data *) (idaws + cidaw);
1864 	recid = first_rec;
1865 	if (private->uses_cdl == 0 || recid > 2*blk_per_trk) {
1866 		/* Only standard blocks so there is just one locate record. */
1867 		ccw[-1].flags |= CCW_FLAG_CC;
1868 		locate_record(ccw++, LO_data++, first_trk, first_offs + 1,
1869 			      last_rec - recid + 1, cmd, basedev, blksize);
1870 	}
1871 	rq_for_each_segment(bv, req, iter) {
1872 		dst = page_address(bv->bv_page) + bv->bv_offset;
1873 		if (dasd_page_cache) {
1874 			char *copy = kmem_cache_alloc(dasd_page_cache,
1875 						      GFP_DMA | __GFP_NOWARN);
1876 			if (copy && rq_data_dir(req) == WRITE)
1877 				memcpy(copy + bv->bv_offset, dst, bv->bv_len);
1878 			if (copy)
1879 				dst = copy + bv->bv_offset;
1880 		}
1881 		for (off = 0; off < bv->bv_len; off += blksize) {
1882 			sector_t trkid = recid;
1883 			unsigned int recoffs = sector_div(trkid, blk_per_trk);
1884 			rcmd = cmd;
1885 			count = blksize;
1886 			/* Locate record for cdl special block ? */
1887 			if (private->uses_cdl && recid < 2*blk_per_trk) {
1888 				if (dasd_eckd_cdl_special(blk_per_trk, recid)){
1889 					rcmd |= 0x8;
1890 					count = dasd_eckd_cdl_reclen(recid);
1891 					if (count < blksize &&
1892 					    rq_data_dir(req) == READ)
1893 						memset(dst + count, 0xe5,
1894 						       blksize - count);
1895 				}
1896 				ccw[-1].flags |= CCW_FLAG_CC;
1897 				locate_record(ccw++, LO_data++,
1898 					      trkid, recoffs + 1,
1899 					      1, rcmd, basedev, count);
1900 			}
1901 			/* Locate record for standard blocks ? */
1902 			if (private->uses_cdl && recid == 2*blk_per_trk) {
1903 				ccw[-1].flags |= CCW_FLAG_CC;
1904 				locate_record(ccw++, LO_data++,
1905 					      trkid, recoffs + 1,
1906 					      last_rec - recid + 1,
1907 					      cmd, basedev, count);
1908 			}
1909 			/* Read/write ccw. */
1910 			ccw[-1].flags |= CCW_FLAG_CC;
1911 			ccw->cmd_code = rcmd;
1912 			ccw->count = count;
1913 			if (idal_is_needed(dst, blksize)) {
1914 				ccw->cda = (__u32)(addr_t) idaws;
1915 				ccw->flags = CCW_FLAG_IDA;
1916 				idaws = idal_create_words(idaws, dst, blksize);
1917 			} else {
1918 				ccw->cda = (__u32)(addr_t) dst;
1919 				ccw->flags = 0;
1920 			}
1921 			ccw++;
1922 			dst += blksize;
1923 			recid++;
1924 		}
1925 	}
1926 	if (blk_noretry_request(req) ||
1927 	    block->base->features & DASD_FEATURE_FAILFAST)
1928 		set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
1929 	cqr->startdev = startdev;
1930 	cqr->memdev = startdev;
1931 	cqr->block = block;
1932 	cqr->expires = 5 * 60 * HZ;	/* 5 minutes */
1933 	cqr->lpm = private->path_data.ppm;
1934 	cqr->retries = 256;
1935 	cqr->buildclk = get_clock();
1936 	cqr->status = DASD_CQR_FILLED;
1937 	return cqr;
1938 }
1939 
1940 static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
1941 					       struct dasd_device *startdev,
1942 					       struct dasd_block *block,
1943 					       struct request *req,
1944 					       sector_t first_rec,
1945 					       sector_t last_rec,
1946 					       sector_t first_trk,
1947 					       sector_t last_trk,
1948 					       unsigned int first_offs,
1949 					       unsigned int last_offs,
1950 					       unsigned int blk_per_trk,
1951 					       unsigned int blksize)
1952 {
1953 	struct dasd_eckd_private *private;
1954 	unsigned long *idaws;
1955 	struct dasd_ccw_req *cqr;
1956 	struct ccw1 *ccw;
1957 	struct req_iterator iter;
1958 	struct bio_vec *bv;
1959 	char *dst, *idaw_dst;
1960 	unsigned int cidaw, cplength, datasize;
1961 	unsigned int tlf;
1962 	sector_t recid;
1963 	unsigned char cmd;
1964 	struct dasd_device *basedev;
1965 	unsigned int trkcount, count, count_to_trk_end;
1966 	unsigned int idaw_len, seg_len, part_len, len_to_track_end;
1967 	unsigned char new_track, end_idaw;
1968 	sector_t trkid;
1969 	unsigned int recoffs;
1970 
1971 	basedev = block->base;
1972 	private = (struct dasd_eckd_private *) basedev->private;
1973 	if (rq_data_dir(req) == READ)
1974 		cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
1975 	else if (rq_data_dir(req) == WRITE)
1976 		cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
1977 	else
1978 		return ERR_PTR(-EINVAL);
1979 
1980 	/* Track based I/O needs IDAWs for each page, and not just for
1981 	 * 64 bit addresses. We need additional idals for pages
1982 	 * that get filled from two tracks, so we use the number
1983 	 * of records as upper limit.
1984 	 */
1985 	cidaw = last_rec - first_rec + 1;
1986 	trkcount = last_trk - first_trk + 1;
1987 
1988 	/* 1x prefix + one read/write ccw per track */
1989 	cplength = 1 + trkcount;
1990 
1991 	/* on 31-bit we need space for two 32 bit addresses per page
1992 	 * on 64-bit one 64 bit address
1993 	 */
1994 	datasize = sizeof(struct PFX_eckd_data) +
1995 		cidaw * sizeof(unsigned long long);
1996 
1997 	/* Allocate the ccw request. */
1998 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
1999 				   startdev);
2000 	if (IS_ERR(cqr))
2001 		return cqr;
2002 	ccw = cqr->cpaddr;
2003 	/* transfer length factor: how many bytes to read from the last track */
2004 	if (first_trk == last_trk)
2005 		tlf = last_offs - first_offs + 1;
2006 	else
2007 		tlf = last_offs + 1;
2008 	tlf *= blksize;
2009 
2010 	if (prefix_LRE(ccw++, cqr->data, first_trk,
2011 		       last_trk, cmd, basedev, startdev,
2012 		       1 /* format */, first_offs + 1,
2013 		       trkcount, blksize,
2014 		       tlf) == -EAGAIN) {
2015 		/* Clock not in sync and XRC is enabled.
2016 		 * Try again later.
2017 		 */
2018 		dasd_sfree_request(cqr, startdev);
2019 		return ERR_PTR(-EAGAIN);
2020 	}
2021 
2022 	/*
2023 	 * The translation of request into ccw programs must meet the
2024 	 * following conditions:
2025 	 * - all idaws but the first and the last must address full pages
2026 	 *   (or 2K blocks on 31-bit)
2027 	 * - the scope of a ccw and it's idal ends with the track boundaries
2028 	 */
2029 	idaws = (unsigned long *) (cqr->data + sizeof(struct PFX_eckd_data));
2030 	recid = first_rec;
2031 	new_track = 1;
2032 	end_idaw = 0;
2033 	len_to_track_end = 0;
2034 	idaw_dst = 0;
2035 	idaw_len = 0;
2036 	rq_for_each_segment(bv, req, iter) {
2037 		dst = page_address(bv->bv_page) + bv->bv_offset;
2038 		seg_len = bv->bv_len;
2039 		while (seg_len) {
2040 			if (new_track) {
2041 				trkid = recid;
2042 				recoffs = sector_div(trkid, blk_per_trk);
2043 				count_to_trk_end = blk_per_trk - recoffs;
2044 				count = min((last_rec - recid + 1),
2045 					    (sector_t)count_to_trk_end);
2046 				len_to_track_end = count * blksize;
2047 				ccw[-1].flags |= CCW_FLAG_CC;
2048 				ccw->cmd_code = cmd;
2049 				ccw->count = len_to_track_end;
2050 				ccw->cda = (__u32)(addr_t)idaws;
2051 				ccw->flags = CCW_FLAG_IDA;
2052 				ccw++;
2053 				recid += count;
2054 				new_track = 0;
2055 				/* first idaw for a ccw may start anywhere */
2056 				if (!idaw_dst)
2057 					idaw_dst = dst;
2058 			}
2059 			/* If we start a new idaw, we must make sure that it
2060 			 * starts on an IDA_BLOCK_SIZE boundary.
2061 			 * If we continue an idaw, we must make sure that the
2062 			 * current segment begins where the so far accumulated
2063 			 * idaw ends
2064 			 */
2065 			if (!idaw_dst) {
2066 				if (__pa(dst) & (IDA_BLOCK_SIZE-1)) {
2067 					dasd_sfree_request(cqr, startdev);
2068 					return ERR_PTR(-ERANGE);
2069 				} else
2070 					idaw_dst = dst;
2071 			}
2072 			if ((idaw_dst + idaw_len) != dst) {
2073 				dasd_sfree_request(cqr, startdev);
2074 				return ERR_PTR(-ERANGE);
2075 			}
2076 			part_len = min(seg_len, len_to_track_end);
2077 			seg_len -= part_len;
2078 			dst += part_len;
2079 			idaw_len += part_len;
2080 			len_to_track_end -= part_len;
2081 			/* collected memory area ends on an IDA_BLOCK border,
2082 			 * -> create an idaw
2083 			 * idal_create_words will handle cases where idaw_len
2084 			 * is larger then IDA_BLOCK_SIZE
2085 			 */
2086 			if (!(__pa(idaw_dst + idaw_len) & (IDA_BLOCK_SIZE-1)))
2087 				end_idaw = 1;
2088 			/* We also need to end the idaw at track end */
2089 			if (!len_to_track_end) {
2090 				new_track = 1;
2091 				end_idaw = 1;
2092 			}
2093 			if (end_idaw) {
2094 				idaws = idal_create_words(idaws, idaw_dst,
2095 							  idaw_len);
2096 				idaw_dst = 0;
2097 				idaw_len = 0;
2098 				end_idaw = 0;
2099 			}
2100 		}
2101 	}
2102 
2103 	if (blk_noretry_request(req) ||
2104 	    block->base->features & DASD_FEATURE_FAILFAST)
2105 		set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
2106 	cqr->startdev = startdev;
2107 	cqr->memdev = startdev;
2108 	cqr->block = block;
2109 	cqr->expires = 5 * 60 * HZ;	/* 5 minutes */
2110 	cqr->lpm = private->path_data.ppm;
2111 	cqr->retries = 256;
2112 	cqr->buildclk = get_clock();
2113 	cqr->status = DASD_CQR_FILLED;
2114 	return cqr;
2115 }
2116 
2117 static int prepare_itcw(struct itcw *itcw,
2118 			unsigned int trk, unsigned int totrk, int cmd,
2119 			struct dasd_device *basedev,
2120 			struct dasd_device *startdev,
2121 			unsigned int rec_on_trk, int count,
2122 			unsigned int blksize,
2123 			unsigned int total_data_size,
2124 			unsigned int tlf,
2125 			unsigned int blk_per_trk)
2126 {
2127 	struct PFX_eckd_data pfxdata;
2128 	struct dasd_eckd_private *basepriv, *startpriv;
2129 	struct DE_eckd_data *dedata;
2130 	struct LRE_eckd_data *lredata;
2131 	struct dcw *dcw;
2132 
2133 	u32 begcyl, endcyl;
2134 	u16 heads, beghead, endhead;
2135 	u8 pfx_cmd;
2136 
2137 	int rc = 0;
2138 	int sector = 0;
2139 	int dn, d;
2140 
2141 
2142 	/* setup prefix data */
2143 	basepriv = (struct dasd_eckd_private *) basedev->private;
2144 	startpriv = (struct dasd_eckd_private *) startdev->private;
2145 	dedata = &pfxdata.define_extent;
2146 	lredata = &pfxdata.locate_record;
2147 
2148 	memset(&pfxdata, 0, sizeof(pfxdata));
2149 	pfxdata.format = 1; /* PFX with LRE */
2150 	pfxdata.base_address = basepriv->ned->unit_addr;
2151 	pfxdata.base_lss = basepriv->ned->ID;
2152 	pfxdata.validity.define_extent = 1;
2153 
2154 	/* private uid is kept up to date, conf_data may be outdated */
2155 	if (startpriv->uid.type != UA_BASE_DEVICE) {
2156 		pfxdata.validity.verify_base = 1;
2157 		if (startpriv->uid.type == UA_HYPER_PAV_ALIAS)
2158 			pfxdata.validity.hyper_pav = 1;
2159 	}
2160 
2161 	switch (cmd) {
2162 	case DASD_ECKD_CCW_READ_TRACK_DATA:
2163 		dedata->mask.perm = 0x1;
2164 		dedata->attributes.operation = basepriv->attrib.operation;
2165 		dedata->blk_size = blksize;
2166 		dedata->ga_extended |= 0x42;
2167 		lredata->operation.orientation = 0x0;
2168 		lredata->operation.operation = 0x0C;
2169 		lredata->auxiliary.check_bytes = 0x01;
2170 		pfx_cmd = DASD_ECKD_CCW_PFX_READ;
2171 		break;
2172 	case DASD_ECKD_CCW_WRITE_TRACK_DATA:
2173 		dedata->mask.perm = 0x02;
2174 		dedata->attributes.operation = basepriv->attrib.operation;
2175 		dedata->blk_size = blksize;
2176 		rc = check_XRC_on_prefix(&pfxdata, basedev);
2177 		dedata->ga_extended |= 0x42;
2178 		lredata->operation.orientation = 0x0;
2179 		lredata->operation.operation = 0x3F;
2180 		lredata->extended_operation = 0x23;
2181 		lredata->auxiliary.check_bytes = 0x2;
2182 		pfx_cmd = DASD_ECKD_CCW_PFX;
2183 		break;
2184 	default:
2185 		DBF_DEV_EVENT(DBF_ERR, basedev,
2186 			      "prepare itcw, unknown opcode 0x%x", cmd);
2187 		BUG();
2188 		break;
2189 	}
2190 	if (rc)
2191 		return rc;
2192 
2193 	dedata->attributes.mode = 0x3;	/* ECKD */
2194 
2195 	heads = basepriv->rdc_data.trk_per_cyl;
2196 	begcyl = trk / heads;
2197 	beghead = trk % heads;
2198 	endcyl = totrk / heads;
2199 	endhead = totrk % heads;
2200 
2201 	/* check for sequential prestage - enhance cylinder range */
2202 	if (dedata->attributes.operation == DASD_SEQ_PRESTAGE ||
2203 	    dedata->attributes.operation == DASD_SEQ_ACCESS) {
2204 
2205 		if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl)
2206 			endcyl += basepriv->attrib.nr_cyl;
2207 		else
2208 			endcyl = (basepriv->real_cyl - 1);
2209 	}
2210 
2211 	set_ch_t(&dedata->beg_ext, begcyl, beghead);
2212 	set_ch_t(&dedata->end_ext, endcyl, endhead);
2213 
2214 	dedata->ep_format = 0x20; /* records per track is valid */
2215 	dedata->ep_rec_per_track = blk_per_trk;
2216 
2217 	if (rec_on_trk) {
2218 		switch (basepriv->rdc_data.dev_type) {
2219 		case 0x3390:
2220 			dn = ceil_quot(blksize + 6, 232);
2221 			d = 9 + ceil_quot(blksize + 6 * (dn + 1), 34);
2222 			sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
2223 			break;
2224 		case 0x3380:
2225 			d = 7 + ceil_quot(blksize + 12, 32);
2226 			sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
2227 			break;
2228 		}
2229 	}
2230 
2231 	lredata->auxiliary.length_valid = 1;
2232 	lredata->auxiliary.length_scope = 1;
2233 	lredata->auxiliary.imbedded_ccw_valid = 1;
2234 	lredata->length = tlf;
2235 	lredata->imbedded_ccw = cmd;
2236 	lredata->count = count;
2237 	lredata->sector = sector;
2238 	set_ch_t(&lredata->seek_addr, begcyl, beghead);
2239 	lredata->search_arg.cyl = lredata->seek_addr.cyl;
2240 	lredata->search_arg.head = lredata->seek_addr.head;
2241 	lredata->search_arg.record = rec_on_trk;
2242 
2243 	dcw = itcw_add_dcw(itcw, pfx_cmd, 0,
2244 		     &pfxdata, sizeof(pfxdata), total_data_size);
2245 
2246 	return rc;
2247 }
2248 
2249 static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
2250 					       struct dasd_device *startdev,
2251 					       struct dasd_block *block,
2252 					       struct request *req,
2253 					       sector_t first_rec,
2254 					       sector_t last_rec,
2255 					       sector_t first_trk,
2256 					       sector_t last_trk,
2257 					       unsigned int first_offs,
2258 					       unsigned int last_offs,
2259 					       unsigned int blk_per_trk,
2260 					       unsigned int blksize)
2261 {
2262 	struct dasd_eckd_private *private;
2263 	struct dasd_ccw_req *cqr;
2264 	struct req_iterator iter;
2265 	struct bio_vec *bv;
2266 	char *dst;
2267 	unsigned int trkcount, ctidaw;
2268 	unsigned char cmd;
2269 	struct dasd_device *basedev;
2270 	unsigned int tlf;
2271 	struct itcw *itcw;
2272 	struct tidaw *last_tidaw = NULL;
2273 	int itcw_op;
2274 	size_t itcw_size;
2275 
2276 	basedev = block->base;
2277 	private = (struct dasd_eckd_private *) basedev->private;
2278 	if (rq_data_dir(req) == READ) {
2279 		cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
2280 		itcw_op = ITCW_OP_READ;
2281 	} else if (rq_data_dir(req) == WRITE) {
2282 		cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
2283 		itcw_op = ITCW_OP_WRITE;
2284 	} else
2285 		return ERR_PTR(-EINVAL);
2286 
2287 	/* trackbased I/O needs address all memory via TIDAWs,
2288 	 * not just for 64 bit addresses. This allows us to map
2289 	 * each segment directly to one tidaw.
2290 	 */
2291 	trkcount = last_trk - first_trk + 1;
2292 	ctidaw = 0;
2293 	rq_for_each_segment(bv, req, iter) {
2294 		++ctidaw;
2295 	}
2296 
2297 	/* Allocate the ccw request. */
2298 	itcw_size = itcw_calc_size(0, ctidaw, 0);
2299 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev);
2300 	if (IS_ERR(cqr))
2301 		return cqr;
2302 
2303 	cqr->cpmode = 1;
2304 	cqr->startdev = startdev;
2305 	cqr->memdev = startdev;
2306 	cqr->block = block;
2307 	cqr->expires = 100*HZ;
2308 	cqr->buildclk = get_clock();
2309 	cqr->status = DASD_CQR_FILLED;
2310 	cqr->retries = 10;
2311 
2312 	/* transfer length factor: how many bytes to read from the last track */
2313 	if (first_trk == last_trk)
2314 		tlf = last_offs - first_offs + 1;
2315 	else
2316 		tlf = last_offs + 1;
2317 	tlf *= blksize;
2318 
2319 	itcw = itcw_init(cqr->data, itcw_size, itcw_op, 0, ctidaw, 0);
2320 	cqr->cpaddr = itcw_get_tcw(itcw);
2321 
2322 	if (prepare_itcw(itcw, first_trk, last_trk,
2323 			 cmd, basedev, startdev,
2324 			 first_offs + 1,
2325 			 trkcount, blksize,
2326 			 (last_rec - first_rec + 1) * blksize,
2327 			 tlf, blk_per_trk) == -EAGAIN) {
2328 		/* Clock not in sync and XRC is enabled.
2329 		 * Try again later.
2330 		 */
2331 		dasd_sfree_request(cqr, startdev);
2332 		return ERR_PTR(-EAGAIN);
2333 	}
2334 
2335 	/*
2336 	 * A tidaw can address 4k of memory, but must not cross page boundaries
2337 	 * We can let the block layer handle this by setting
2338 	 * blk_queue_segment_boundary to page boundaries and
2339 	 * blk_max_segment_size to page size when setting up the request queue.
2340 	 */
2341 	rq_for_each_segment(bv, req, iter) {
2342 		dst = page_address(bv->bv_page) + bv->bv_offset;
2343 		last_tidaw = itcw_add_tidaw(itcw, 0x00, dst, bv->bv_len);
2344 		if (IS_ERR(last_tidaw))
2345 			return (struct dasd_ccw_req *)last_tidaw;
2346 	}
2347 
2348 	last_tidaw->flags |= 0x80;
2349 	itcw_finalize(itcw);
2350 
2351 	if (blk_noretry_request(req) ||
2352 	    block->base->features & DASD_FEATURE_FAILFAST)
2353 		set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
2354 	cqr->startdev = startdev;
2355 	cqr->memdev = startdev;
2356 	cqr->block = block;
2357 	cqr->expires = 5 * 60 * HZ;	/* 5 minutes */
2358 	cqr->lpm = private->path_data.ppm;
2359 	cqr->retries = 256;
2360 	cqr->buildclk = get_clock();
2361 	cqr->status = DASD_CQR_FILLED;
2362 	return cqr;
2363 }
2364 
2365 static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
2366 					       struct dasd_block *block,
2367 					       struct request *req)
2368 {
2369 	int tpm, cmdrtd, cmdwtd;
2370 	int use_prefix;
2371 #if defined(CONFIG_64BIT)
2372 	int fcx_in_css, fcx_in_gneq, fcx_in_features;
2373 #endif
2374 	struct dasd_eckd_private *private;
2375 	struct dasd_device *basedev;
2376 	sector_t first_rec, last_rec;
2377 	sector_t first_trk, last_trk;
2378 	unsigned int first_offs, last_offs;
2379 	unsigned int blk_per_trk, blksize;
2380 	int cdlspecial;
2381 	struct dasd_ccw_req *cqr;
2382 
2383 	basedev = block->base;
2384 	private = (struct dasd_eckd_private *) basedev->private;
2385 
2386 	/* Calculate number of blocks/records per track. */
2387 	blksize = block->bp_block;
2388 	blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
2389 	if (blk_per_trk == 0)
2390 		return ERR_PTR(-EINVAL);
2391 	/* Calculate record id of first and last block. */
2392 	first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift;
2393 	first_offs = sector_div(first_trk, blk_per_trk);
2394 	last_rec = last_trk =
2395 		(blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
2396 	last_offs = sector_div(last_trk, blk_per_trk);
2397 	cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk);
2398 
2399 	/* is transport mode supported? */
2400 #if defined(CONFIG_64BIT)
2401 	fcx_in_css = css_general_characteristics.fcx;
2402 	fcx_in_gneq = private->gneq->reserved2[7] & 0x04;
2403 	fcx_in_features = private->features.feature[40] & 0x80;
2404 	tpm = fcx_in_css && fcx_in_gneq && fcx_in_features;
2405 #else
2406 	tpm = 0;
2407 #endif
2408 
2409 	/* is read track data and write track data in command mode supported? */
2410 	cmdrtd = private->features.feature[9] & 0x20;
2411 	cmdwtd = private->features.feature[12] & 0x40;
2412 	use_prefix = private->features.feature[8] & 0x01;
2413 
2414 	cqr = NULL;
2415 	if (cdlspecial || dasd_page_cache) {
2416 		/* do nothing, just fall through to the cmd mode single case */
2417 	} else if (!dasd_nofcx && tpm && (first_trk == last_trk)) {
2418 		cqr = dasd_eckd_build_cp_tpm_track(startdev, block, req,
2419 						    first_rec, last_rec,
2420 						    first_trk, last_trk,
2421 						    first_offs, last_offs,
2422 						    blk_per_trk, blksize);
2423 		if (IS_ERR(cqr) && PTR_ERR(cqr) != -EAGAIN)
2424 			cqr = NULL;
2425 	} else if (use_prefix &&
2426 		   (((rq_data_dir(req) == READ) && cmdrtd) ||
2427 		    ((rq_data_dir(req) == WRITE) && cmdwtd))) {
2428 		cqr = dasd_eckd_build_cp_cmd_track(startdev, block, req,
2429 						   first_rec, last_rec,
2430 						   first_trk, last_trk,
2431 						   first_offs, last_offs,
2432 						   blk_per_trk, blksize);
2433 		if (IS_ERR(cqr) && PTR_ERR(cqr) != -EAGAIN)
2434 			cqr = NULL;
2435 	}
2436 	if (!cqr)
2437 		cqr = dasd_eckd_build_cp_cmd_single(startdev, block, req,
2438 						    first_rec, last_rec,
2439 						    first_trk, last_trk,
2440 						    first_offs, last_offs,
2441 						    blk_per_trk, blksize);
2442 	return cqr;
2443 }
2444 
2445 static int
2446 dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
2447 {
2448 	struct dasd_eckd_private *private;
2449 	struct ccw1 *ccw;
2450 	struct req_iterator iter;
2451 	struct bio_vec *bv;
2452 	char *dst, *cda;
2453 	unsigned int blksize, blk_per_trk, off;
2454 	sector_t recid;
2455 	int status;
2456 
2457 	if (!dasd_page_cache)
2458 		goto out;
2459 	private = (struct dasd_eckd_private *) cqr->block->base->private;
2460 	blksize = cqr->block->bp_block;
2461 	blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
2462 	recid = blk_rq_pos(req) >> cqr->block->s2b_shift;
2463 	ccw = cqr->cpaddr;
2464 	/* Skip over define extent & locate record. */
2465 	ccw++;
2466 	if (private->uses_cdl == 0 || recid > 2*blk_per_trk)
2467 		ccw++;
2468 	rq_for_each_segment(bv, req, iter) {
2469 		dst = page_address(bv->bv_page) + bv->bv_offset;
2470 		for (off = 0; off < bv->bv_len; off += blksize) {
2471 			/* Skip locate record. */
2472 			if (private->uses_cdl && recid <= 2*blk_per_trk)
2473 				ccw++;
2474 			if (dst) {
2475 				if (ccw->flags & CCW_FLAG_IDA)
2476 					cda = *((char **)((addr_t) ccw->cda));
2477 				else
2478 					cda = (char *)((addr_t) ccw->cda);
2479 				if (dst != cda) {
2480 					if (rq_data_dir(req) == READ)
2481 						memcpy(dst, cda, bv->bv_len);
2482 					kmem_cache_free(dasd_page_cache,
2483 					    (void *)((addr_t)cda & PAGE_MASK));
2484 				}
2485 				dst = NULL;
2486 			}
2487 			ccw++;
2488 			recid++;
2489 		}
2490 	}
2491 out:
2492 	status = cqr->status == DASD_CQR_DONE;
2493 	dasd_sfree_request(cqr, cqr->memdev);
2494 	return status;
2495 }
2496 
2497 /*
2498  * Modify ccw/tcw in cqr so it can be started on a base device.
2499  *
2500  * Note that this is not enough to restart the cqr!
2501  * Either reset cqr->startdev as well (summary unit check handling)
2502  * or restart via separate cqr (as in ERP handling).
2503  */
2504 void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *cqr)
2505 {
2506 	struct ccw1 *ccw;
2507 	struct PFX_eckd_data *pfxdata;
2508 	struct tcw *tcw;
2509 	struct tccb *tccb;
2510 	struct dcw *dcw;
2511 
2512 	if (cqr->cpmode == 1) {
2513 		tcw = cqr->cpaddr;
2514 		tccb = tcw_get_tccb(tcw);
2515 		dcw = (struct dcw *)&tccb->tca[0];
2516 		pfxdata = (struct PFX_eckd_data *)&dcw->cd[0];
2517 		pfxdata->validity.verify_base = 0;
2518 		pfxdata->validity.hyper_pav = 0;
2519 	} else {
2520 		ccw = cqr->cpaddr;
2521 		pfxdata = cqr->data;
2522 		if (ccw->cmd_code == DASD_ECKD_CCW_PFX) {
2523 			pfxdata->validity.verify_base = 0;
2524 			pfxdata->validity.hyper_pav = 0;
2525 		}
2526 	}
2527 }
2528 
2529 #define DASD_ECKD_CHANQ_MAX_SIZE 4
2530 
2531 static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base,
2532 						     struct dasd_block *block,
2533 						     struct request *req)
2534 {
2535 	struct dasd_eckd_private *private;
2536 	struct dasd_device *startdev;
2537 	unsigned long flags;
2538 	struct dasd_ccw_req *cqr;
2539 
2540 	startdev = dasd_alias_get_start_dev(base);
2541 	if (!startdev)
2542 		startdev = base;
2543 	private = (struct dasd_eckd_private *) startdev->private;
2544 	if (private->count >= DASD_ECKD_CHANQ_MAX_SIZE)
2545 		return ERR_PTR(-EBUSY);
2546 
2547 	spin_lock_irqsave(get_ccwdev_lock(startdev->cdev), flags);
2548 	private->count++;
2549 	cqr = dasd_eckd_build_cp(startdev, block, req);
2550 	if (IS_ERR(cqr))
2551 		private->count--;
2552 	spin_unlock_irqrestore(get_ccwdev_lock(startdev->cdev), flags);
2553 	return cqr;
2554 }
2555 
2556 static int dasd_eckd_free_alias_cp(struct dasd_ccw_req *cqr,
2557 				   struct request *req)
2558 {
2559 	struct dasd_eckd_private *private;
2560 	unsigned long flags;
2561 
2562 	spin_lock_irqsave(get_ccwdev_lock(cqr->memdev->cdev), flags);
2563 	private = (struct dasd_eckd_private *) cqr->memdev->private;
2564 	private->count--;
2565 	spin_unlock_irqrestore(get_ccwdev_lock(cqr->memdev->cdev), flags);
2566 	return dasd_eckd_free_cp(cqr, req);
2567 }
2568 
2569 static int
2570 dasd_eckd_fill_info(struct dasd_device * device,
2571 		    struct dasd_information2_t * info)
2572 {
2573 	struct dasd_eckd_private *private;
2574 
2575 	private = (struct dasd_eckd_private *) device->private;
2576 	info->label_block = 2;
2577 	info->FBA_layout = private->uses_cdl ? 0 : 1;
2578 	info->format = private->uses_cdl ? DASD_FORMAT_CDL : DASD_FORMAT_LDL;
2579 	info->characteristics_size = sizeof(struct dasd_eckd_characteristics);
2580 	memcpy(info->characteristics, &private->rdc_data,
2581 	       sizeof(struct dasd_eckd_characteristics));
2582 	info->confdata_size = min((unsigned long)private->conf_len,
2583 				  sizeof(info->configuration_data));
2584 	memcpy(info->configuration_data, private->conf_data,
2585 	       info->confdata_size);
2586 	return 0;
2587 }
2588 
2589 /*
2590  * SECTION: ioctl functions for eckd devices.
2591  */
2592 
2593 /*
2594  * Release device ioctl.
2595  * Buils a channel programm to releases a prior reserved
2596  * (see dasd_eckd_reserve) device.
2597  */
2598 static int
2599 dasd_eckd_release(struct dasd_device *device)
2600 {
2601 	struct dasd_ccw_req *cqr;
2602 	int rc;
2603 	struct ccw1 *ccw;
2604 
2605 	if (!capable(CAP_SYS_ADMIN))
2606 		return -EACCES;
2607 
2608 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
2609 	if (IS_ERR(cqr)) {
2610 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
2611 			    "Could not allocate initialization request");
2612 		return PTR_ERR(cqr);
2613 	}
2614 	ccw = cqr->cpaddr;
2615 	ccw->cmd_code = DASD_ECKD_CCW_RELEASE;
2616 	ccw->flags |= CCW_FLAG_SLI;
2617 	ccw->count = 32;
2618 	ccw->cda = (__u32)(addr_t) cqr->data;
2619 	cqr->startdev = device;
2620 	cqr->memdev = device;
2621 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
2622 	set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
2623 	cqr->retries = 2;	/* set retry counter to enable basic ERP */
2624 	cqr->expires = 2 * HZ;
2625 	cqr->buildclk = get_clock();
2626 	cqr->status = DASD_CQR_FILLED;
2627 
2628 	rc = dasd_sleep_on_immediatly(cqr);
2629 
2630 	dasd_sfree_request(cqr, cqr->memdev);
2631 	return rc;
2632 }
2633 
2634 /*
2635  * Reserve device ioctl.
2636  * Options are set to 'synchronous wait for interrupt' and
2637  * 'timeout the request'. This leads to a terminate IO if
2638  * the interrupt is outstanding for a certain time.
2639  */
2640 static int
2641 dasd_eckd_reserve(struct dasd_device *device)
2642 {
2643 	struct dasd_ccw_req *cqr;
2644 	int rc;
2645 	struct ccw1 *ccw;
2646 
2647 	if (!capable(CAP_SYS_ADMIN))
2648 		return -EACCES;
2649 
2650 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
2651 	if (IS_ERR(cqr)) {
2652 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
2653 			    "Could not allocate initialization request");
2654 		return PTR_ERR(cqr);
2655 	}
2656 	ccw = cqr->cpaddr;
2657 	ccw->cmd_code = DASD_ECKD_CCW_RESERVE;
2658 	ccw->flags |= CCW_FLAG_SLI;
2659 	ccw->count = 32;
2660 	ccw->cda = (__u32)(addr_t) cqr->data;
2661 	cqr->startdev = device;
2662 	cqr->memdev = device;
2663 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
2664 	set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
2665 	cqr->retries = 2;	/* set retry counter to enable basic ERP */
2666 	cqr->expires = 2 * HZ;
2667 	cqr->buildclk = get_clock();
2668 	cqr->status = DASD_CQR_FILLED;
2669 
2670 	rc = dasd_sleep_on_immediatly(cqr);
2671 
2672 	dasd_sfree_request(cqr, cqr->memdev);
2673 	return rc;
2674 }
2675 
2676 /*
2677  * Steal lock ioctl - unconditional reserve device.
2678  * Buils a channel programm to break a device's reservation.
2679  * (unconditional reserve)
2680  */
2681 static int
2682 dasd_eckd_steal_lock(struct dasd_device *device)
2683 {
2684 	struct dasd_ccw_req *cqr;
2685 	int rc;
2686 	struct ccw1 *ccw;
2687 
2688 	if (!capable(CAP_SYS_ADMIN))
2689 		return -EACCES;
2690 
2691 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
2692 	if (IS_ERR(cqr)) {
2693 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
2694 			    "Could not allocate initialization request");
2695 		return PTR_ERR(cqr);
2696 	}
2697 	ccw = cqr->cpaddr;
2698 	ccw->cmd_code = DASD_ECKD_CCW_SLCK;
2699 	ccw->flags |= CCW_FLAG_SLI;
2700 	ccw->count = 32;
2701 	ccw->cda = (__u32)(addr_t) cqr->data;
2702 	cqr->startdev = device;
2703 	cqr->memdev = device;
2704 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
2705 	set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
2706 	cqr->retries = 2;	/* set retry counter to enable basic ERP */
2707 	cqr->expires = 2 * HZ;
2708 	cqr->buildclk = get_clock();
2709 	cqr->status = DASD_CQR_FILLED;
2710 
2711 	rc = dasd_sleep_on_immediatly(cqr);
2712 
2713 	dasd_sfree_request(cqr, cqr->memdev);
2714 	return rc;
2715 }
2716 
2717 /*
2718  * Read performance statistics
2719  */
2720 static int
2721 dasd_eckd_performance(struct dasd_device *device, void __user *argp)
2722 {
2723 	struct dasd_psf_prssd_data *prssdp;
2724 	struct dasd_rssd_perf_stats_t *stats;
2725 	struct dasd_ccw_req *cqr;
2726 	struct ccw1 *ccw;
2727 	int rc;
2728 
2729 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */  + 1 /* RSSD */,
2730 				   (sizeof(struct dasd_psf_prssd_data) +
2731 				    sizeof(struct dasd_rssd_perf_stats_t)),
2732 				   device);
2733 	if (IS_ERR(cqr)) {
2734 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
2735 			    "Could not allocate initialization request");
2736 		return PTR_ERR(cqr);
2737 	}
2738 	cqr->startdev = device;
2739 	cqr->memdev = device;
2740 	cqr->retries = 0;
2741 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
2742 	cqr->expires = 10 * HZ;
2743 
2744 	/* Prepare for Read Subsystem Data */
2745 	prssdp = (struct dasd_psf_prssd_data *) cqr->data;
2746 	memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
2747 	prssdp->order = PSF_ORDER_PRSSD;
2748 	prssdp->suborder = 0x01;	/* Performance Statistics */
2749 	prssdp->varies[1] = 0x01;	/* Perf Statistics for the Subsystem */
2750 
2751 	ccw = cqr->cpaddr;
2752 	ccw->cmd_code = DASD_ECKD_CCW_PSF;
2753 	ccw->count = sizeof(struct dasd_psf_prssd_data);
2754 	ccw->flags |= CCW_FLAG_CC;
2755 	ccw->cda = (__u32)(addr_t) prssdp;
2756 
2757 	/* Read Subsystem Data - Performance Statistics */
2758 	stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
2759 	memset(stats, 0, sizeof(struct dasd_rssd_perf_stats_t));
2760 
2761 	ccw++;
2762 	ccw->cmd_code = DASD_ECKD_CCW_RSSD;
2763 	ccw->count = sizeof(struct dasd_rssd_perf_stats_t);
2764 	ccw->cda = (__u32)(addr_t) stats;
2765 
2766 	cqr->buildclk = get_clock();
2767 	cqr->status = DASD_CQR_FILLED;
2768 	rc = dasd_sleep_on(cqr);
2769 	if (rc == 0) {
2770 		prssdp = (struct dasd_psf_prssd_data *) cqr->data;
2771 		stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
2772 		if (copy_to_user(argp, stats,
2773 				 sizeof(struct dasd_rssd_perf_stats_t)))
2774 			rc = -EFAULT;
2775 	}
2776 	dasd_sfree_request(cqr, cqr->memdev);
2777 	return rc;
2778 }
2779 
2780 /*
2781  * Get attributes (cache operations)
2782  * Returnes the cache attributes used in Define Extend (DE).
2783  */
2784 static int
2785 dasd_eckd_get_attrib(struct dasd_device *device, void __user *argp)
2786 {
2787 	struct dasd_eckd_private *private =
2788 		(struct dasd_eckd_private *)device->private;
2789 	struct attrib_data_t attrib = private->attrib;
2790 	int rc;
2791 
2792         if (!capable(CAP_SYS_ADMIN))
2793                 return -EACCES;
2794 	if (!argp)
2795                 return -EINVAL;
2796 
2797 	rc = 0;
2798 	if (copy_to_user(argp, (long *) &attrib,
2799 			 sizeof(struct attrib_data_t)))
2800 		rc = -EFAULT;
2801 
2802 	return rc;
2803 }
2804 
2805 /*
2806  * Set attributes (cache operations)
2807  * Stores the attributes for cache operation to be used in Define Extend (DE).
2808  */
2809 static int
2810 dasd_eckd_set_attrib(struct dasd_device *device, void __user *argp)
2811 {
2812 	struct dasd_eckd_private *private =
2813 		(struct dasd_eckd_private *)device->private;
2814 	struct attrib_data_t attrib;
2815 
2816 	if (!capable(CAP_SYS_ADMIN))
2817 		return -EACCES;
2818 	if (!argp)
2819 		return -EINVAL;
2820 
2821 	if (copy_from_user(&attrib, argp, sizeof(struct attrib_data_t)))
2822 		return -EFAULT;
2823 	private->attrib = attrib;
2824 
2825 	dev_info(&device->cdev->dev,
2826 		 "The DASD cache mode was set to %x (%i cylinder prestage)\n",
2827 		 private->attrib.operation, private->attrib.nr_cyl);
2828 	return 0;
2829 }
2830 
2831 /*
2832  * Issue syscall I/O to EMC Symmetrix array.
2833  * CCWs are PSF and RSSD
2834  */
2835 static int dasd_symm_io(struct dasd_device *device, void __user *argp)
2836 {
2837 	struct dasd_symmio_parms usrparm;
2838 	char *psf_data, *rssd_result;
2839 	struct dasd_ccw_req *cqr;
2840 	struct ccw1 *ccw;
2841 	int rc;
2842 
2843 	/* Copy parms from caller */
2844 	rc = -EFAULT;
2845 	if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
2846 		goto out;
2847 #ifndef CONFIG_64BIT
2848 	/* Make sure pointers are sane even on 31 bit. */
2849 	if ((usrparm.psf_data >> 32) != 0 || (usrparm.rssd_result >> 32) != 0) {
2850 		rc = -EINVAL;
2851 		goto out;
2852 	}
2853 #endif
2854 	/* alloc I/O data area */
2855 	psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA);
2856 	rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA);
2857 	if (!psf_data || !rssd_result) {
2858 		rc = -ENOMEM;
2859 		goto out_free;
2860 	}
2861 
2862 	/* get syscall header from user space */
2863 	rc = -EFAULT;
2864 	if (copy_from_user(psf_data,
2865 			   (void __user *)(unsigned long) usrparm.psf_data,
2866 			   usrparm.psf_data_len))
2867 		goto out_free;
2868 
2869 	/* sanity check on syscall header */
2870 	if (psf_data[0] != 0x17 && psf_data[1] != 0xce) {
2871 		rc = -EINVAL;
2872 		goto out_free;
2873 	}
2874 
2875 	/* setup CCWs for PSF + RSSD */
2876 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 , 0, device);
2877 	if (IS_ERR(cqr)) {
2878 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
2879 			"Could not allocate initialization request");
2880 		rc = PTR_ERR(cqr);
2881 		goto out_free;
2882 	}
2883 
2884 	cqr->startdev = device;
2885 	cqr->memdev = device;
2886 	cqr->retries = 3;
2887 	cqr->expires = 10 * HZ;
2888 	cqr->buildclk = get_clock();
2889 	cqr->status = DASD_CQR_FILLED;
2890 
2891 	/* Build the ccws */
2892 	ccw = cqr->cpaddr;
2893 
2894 	/* PSF ccw */
2895 	ccw->cmd_code = DASD_ECKD_CCW_PSF;
2896 	ccw->count = usrparm.psf_data_len;
2897 	ccw->flags |= CCW_FLAG_CC;
2898 	ccw->cda = (__u32)(addr_t) psf_data;
2899 
2900 	ccw++;
2901 
2902 	/* RSSD ccw  */
2903 	ccw->cmd_code = DASD_ECKD_CCW_RSSD;
2904 	ccw->count = usrparm.rssd_result_len;
2905 	ccw->flags = CCW_FLAG_SLI ;
2906 	ccw->cda = (__u32)(addr_t) rssd_result;
2907 
2908 	rc = dasd_sleep_on(cqr);
2909 	if (rc)
2910 		goto out_sfree;
2911 
2912 	rc = -EFAULT;
2913 	if (copy_to_user((void __user *)(unsigned long) usrparm.rssd_result,
2914 			   rssd_result, usrparm.rssd_result_len))
2915 		goto out_sfree;
2916 	rc = 0;
2917 
2918 out_sfree:
2919 	dasd_sfree_request(cqr, cqr->memdev);
2920 out_free:
2921 	kfree(rssd_result);
2922 	kfree(psf_data);
2923 out:
2924 	DBF_DEV_EVENT(DBF_WARNING, device, "Symmetrix ioctl: rc=%d", rc);
2925 	return rc;
2926 }
2927 
2928 static int
2929 dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp)
2930 {
2931 	struct dasd_device *device = block->base;
2932 
2933 	switch (cmd) {
2934 	case BIODASDGATTR:
2935 		return dasd_eckd_get_attrib(device, argp);
2936 	case BIODASDSATTR:
2937 		return dasd_eckd_set_attrib(device, argp);
2938 	case BIODASDPSRD:
2939 		return dasd_eckd_performance(device, argp);
2940 	case BIODASDRLSE:
2941 		return dasd_eckd_release(device);
2942 	case BIODASDRSRV:
2943 		return dasd_eckd_reserve(device);
2944 	case BIODASDSLCK:
2945 		return dasd_eckd_steal_lock(device);
2946 	case BIODASDSYMMIO:
2947 		return dasd_symm_io(device, argp);
2948 	default:
2949 		return -ENOIOCTLCMD;
2950 	}
2951 }
2952 
2953 /*
2954  * Dump the range of CCWs into 'page' buffer
2955  * and return number of printed chars.
2956  */
2957 static int
2958 dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
2959 {
2960 	int len, count;
2961 	char *datap;
2962 
2963 	len = 0;
2964 	while (from <= to) {
2965 		len += sprintf(page + len, KERN_ERR PRINTK_HEADER
2966 			       " CCW %p: %08X %08X DAT:",
2967 			       from, ((int *) from)[0], ((int *) from)[1]);
2968 
2969 		/* get pointer to data (consider IDALs) */
2970 		if (from->flags & CCW_FLAG_IDA)
2971 			datap = (char *) *((addr_t *) (addr_t) from->cda);
2972 		else
2973 			datap = (char *) ((addr_t) from->cda);
2974 
2975 		/* dump data (max 32 bytes) */
2976 		for (count = 0; count < from->count && count < 32; count++) {
2977 			if (count % 8 == 0) len += sprintf(page + len, " ");
2978 			if (count % 4 == 0) len += sprintf(page + len, " ");
2979 			len += sprintf(page + len, "%02x", datap[count]);
2980 		}
2981 		len += sprintf(page + len, "\n");
2982 		from++;
2983 	}
2984 	return len;
2985 }
2986 
2987 static void
2988 dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct irb *irb,
2989 			 char *reason)
2990 {
2991 	u64 *sense;
2992 
2993 	sense = (u64 *) dasd_get_sense(irb);
2994 	if (sense) {
2995 		DBF_DEV_EVENT(DBF_EMERG, device,
2996 			      "%s: %s %02x%02x%02x %016llx %016llx %016llx "
2997 			      "%016llx", reason,
2998 			      scsw_is_tm(&irb->scsw) ? "t" : "c",
2999 			      scsw_cc(&irb->scsw), scsw_cstat(&irb->scsw),
3000 			      scsw_dstat(&irb->scsw), sense[0], sense[1],
3001 			      sense[2], sense[3]);
3002 	} else {
3003 		DBF_DEV_EVENT(DBF_EMERG, device, "%s",
3004 			      "SORRY - NO VALID SENSE AVAILABLE\n");
3005 	}
3006 }
3007 
3008 /*
3009  * Print sense data and related channel program.
3010  * Parts are printed because printk buffer is only 1024 bytes.
3011  */
3012 static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
3013 				 struct dasd_ccw_req *req, struct irb *irb)
3014 {
3015 	char *page;
3016 	struct ccw1 *first, *last, *fail, *from, *to;
3017 	int len, sl, sct;
3018 
3019 	page = (char *) get_zeroed_page(GFP_ATOMIC);
3020 	if (page == NULL) {
3021 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
3022 			      "No memory to dump sense data\n");
3023 		return;
3024 	}
3025 	/* dump the sense data */
3026 	len = sprintf(page,  KERN_ERR PRINTK_HEADER
3027 		      " I/O status report for device %s:\n",
3028 		      dev_name(&device->cdev->dev));
3029 	len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3030 		       " in req: %p CS: 0x%02X DS: 0x%02X CC: 0x%02X RC: %d\n",
3031 		       req, scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw),
3032 		       scsw_cc(&irb->scsw), req->intrc);
3033 	len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3034 		       " device %s: Failing CCW: %p\n",
3035 		       dev_name(&device->cdev->dev),
3036 		       (void *) (addr_t) irb->scsw.cmd.cpa);
3037 	if (irb->esw.esw0.erw.cons) {
3038 		for (sl = 0; sl < 4; sl++) {
3039 			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3040 				       " Sense(hex) %2d-%2d:",
3041 				       (8 * sl), ((8 * sl) + 7));
3042 
3043 			for (sct = 0; sct < 8; sct++) {
3044 				len += sprintf(page + len, " %02x",
3045 					       irb->ecw[8 * sl + sct]);
3046 			}
3047 			len += sprintf(page + len, "\n");
3048 		}
3049 
3050 		if (irb->ecw[27] & DASD_SENSE_BIT_0) {
3051 			/* 24 Byte Sense Data */
3052 			sprintf(page + len, KERN_ERR PRINTK_HEADER
3053 				" 24 Byte: %x MSG %x, "
3054 				"%s MSGb to SYSOP\n",
3055 				irb->ecw[7] >> 4, irb->ecw[7] & 0x0f,
3056 				irb->ecw[1] & 0x10 ? "" : "no");
3057 		} else {
3058 			/* 32 Byte Sense Data */
3059 			sprintf(page + len, KERN_ERR PRINTK_HEADER
3060 				" 32 Byte: Format: %x "
3061 				"Exception class %x\n",
3062 				irb->ecw[6] & 0x0f, irb->ecw[22] >> 4);
3063 		}
3064 	} else {
3065 		sprintf(page + len, KERN_ERR PRINTK_HEADER
3066 			" SORRY - NO VALID SENSE AVAILABLE\n");
3067 	}
3068 	printk("%s", page);
3069 
3070 	if (req) {
3071 		/* req == NULL for unsolicited interrupts */
3072 		/* dump the Channel Program (max 140 Bytes per line) */
3073 		/* Count CCW and print first CCWs (maximum 1024 % 140 = 7) */
3074 		first = req->cpaddr;
3075 		for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
3076 		to = min(first + 6, last);
3077 		len = sprintf(page,  KERN_ERR PRINTK_HEADER
3078 			      " Related CP in req: %p\n", req);
3079 		dasd_eckd_dump_ccw_range(first, to, page + len);
3080 		printk("%s", page);
3081 
3082 		/* print failing CCW area (maximum 4) */
3083 		/* scsw->cda is either valid or zero  */
3084 		len = 0;
3085 		from = ++to;
3086 		fail = (struct ccw1 *)(addr_t)
3087 				irb->scsw.cmd.cpa; /* failing CCW */
3088 		if (from <  fail - 2) {
3089 			from = fail - 2;     /* there is a gap - print header */
3090 			len += sprintf(page, KERN_ERR PRINTK_HEADER "......\n");
3091 		}
3092 		to = min(fail + 1, last);
3093 		len += dasd_eckd_dump_ccw_range(from, to, page + len);
3094 
3095 		/* print last CCWs (maximum 2) */
3096 		from = max(from, ++to);
3097 		if (from < last - 1) {
3098 			from = last - 1;     /* there is a gap - print header */
3099 			len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n");
3100 		}
3101 		len += dasd_eckd_dump_ccw_range(from, last, page + len);
3102 		if (len > 0)
3103 			printk("%s", page);
3104 	}
3105 	free_page((unsigned long) page);
3106 }
3107 
3108 
3109 /*
3110  * Print sense data from a tcw.
3111  */
3112 static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
3113 				 struct dasd_ccw_req *req, struct irb *irb)
3114 {
3115 	char *page;
3116 	int len, sl, sct, residual;
3117 
3118 	struct tsb *tsb;
3119 	u8 *sense;
3120 
3121 
3122 	page = (char *) get_zeroed_page(GFP_ATOMIC);
3123 	if (page == NULL) {
3124 		DBF_DEV_EVENT(DBF_WARNING, device, " %s",
3125 			    "No memory to dump sense data");
3126 		return;
3127 	}
3128 	/* dump the sense data */
3129 	len = sprintf(page,  KERN_ERR PRINTK_HEADER
3130 		      " I/O status report for device %s:\n",
3131 		      dev_name(&device->cdev->dev));
3132 	len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3133 		       " in req: %p CS: 0x%02X DS: 0x%02X CC: 0x%02X RC: %d "
3134 		       "fcxs: 0x%02X schxs: 0x%02X\n", req,
3135 		       scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw),
3136 		       scsw_cc(&irb->scsw), req->intrc,
3137 		       irb->scsw.tm.fcxs, irb->scsw.tm.schxs);
3138 	len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3139 		       " device %s: Failing TCW: %p\n",
3140 		       dev_name(&device->cdev->dev),
3141 		       (void *) (addr_t) irb->scsw.tm.tcw);
3142 
3143 	tsb = NULL;
3144 	sense = NULL;
3145 	if (irb->scsw.tm.tcw)
3146 		tsb = tcw_get_tsb(
3147 			(struct tcw *)(unsigned long)irb->scsw.tm.tcw);
3148 
3149 	if (tsb && (irb->scsw.tm.fcxs == 0x01)) {
3150 		len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3151 			       " tsb->length %d\n", tsb->length);
3152 		len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3153 			       " tsb->flags %x\n", tsb->flags);
3154 		len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3155 			       " tsb->dcw_offset %d\n", tsb->dcw_offset);
3156 		len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3157 			       " tsb->count %d\n", tsb->count);
3158 		residual = tsb->count - 28;
3159 		len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3160 			       " residual %d\n", residual);
3161 
3162 		switch (tsb->flags & 0x07) {
3163 		case 1:	/* tsa_iostat */
3164 			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3165 			       " tsb->tsa.iostat.dev_time %d\n",
3166 				       tsb->tsa.iostat.dev_time);
3167 			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3168 			       " tsb->tsa.iostat.def_time %d\n",
3169 				       tsb->tsa.iostat.def_time);
3170 			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3171 			       " tsb->tsa.iostat.queue_time %d\n",
3172 				       tsb->tsa.iostat.queue_time);
3173 			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3174 			       " tsb->tsa.iostat.dev_busy_time %d\n",
3175 				       tsb->tsa.iostat.dev_busy_time);
3176 			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3177 			       " tsb->tsa.iostat.dev_act_time %d\n",
3178 				       tsb->tsa.iostat.dev_act_time);
3179 			sense = tsb->tsa.iostat.sense;
3180 			break;
3181 		case 2: /* ts_ddpc */
3182 			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3183 			       " tsb->tsa.ddpc.rc %d\n", tsb->tsa.ddpc.rc);
3184 			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3185 			       " tsb->tsa.ddpc.rcq:  ");
3186 			for (sl = 0; sl < 16; sl++) {
3187 				for (sct = 0; sct < 8; sct++) {
3188 					len += sprintf(page + len, " %02x",
3189 						       tsb->tsa.ddpc.rcq[sl]);
3190 				}
3191 				len += sprintf(page + len, "\n");
3192 			}
3193 			sense = tsb->tsa.ddpc.sense;
3194 			break;
3195 		case 3: /* tsa_intrg */
3196 			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3197 				      " tsb->tsa.intrg.: not supportet yet \n");
3198 			break;
3199 		}
3200 
3201 		if (sense) {
3202 			for (sl = 0; sl < 4; sl++) {
3203 				len += sprintf(page + len,
3204 					       KERN_ERR PRINTK_HEADER
3205 					       " Sense(hex) %2d-%2d:",
3206 					       (8 * sl), ((8 * sl) + 7));
3207 				for (sct = 0; sct < 8; sct++) {
3208 					len += sprintf(page + len, " %02x",
3209 						       sense[8 * sl + sct]);
3210 				}
3211 				len += sprintf(page + len, "\n");
3212 			}
3213 
3214 			if (sense[27] & DASD_SENSE_BIT_0) {
3215 				/* 24 Byte Sense Data */
3216 				sprintf(page + len, KERN_ERR PRINTK_HEADER
3217 					" 24 Byte: %x MSG %x, "
3218 					"%s MSGb to SYSOP\n",
3219 					sense[7] >> 4, sense[7] & 0x0f,
3220 					sense[1] & 0x10 ? "" : "no");
3221 			} else {
3222 				/* 32 Byte Sense Data */
3223 				sprintf(page + len, KERN_ERR PRINTK_HEADER
3224 					" 32 Byte: Format: %x "
3225 					"Exception class %x\n",
3226 					sense[6] & 0x0f, sense[22] >> 4);
3227 			}
3228 		} else {
3229 			sprintf(page + len, KERN_ERR PRINTK_HEADER
3230 				" SORRY - NO VALID SENSE AVAILABLE\n");
3231 		}
3232 	} else {
3233 		sprintf(page + len, KERN_ERR PRINTK_HEADER
3234 			" SORRY - NO TSB DATA AVAILABLE\n");
3235 	}
3236 	printk("%s", page);
3237 	free_page((unsigned long) page);
3238 }
3239 
3240 static void dasd_eckd_dump_sense(struct dasd_device *device,
3241 				 struct dasd_ccw_req *req, struct irb *irb)
3242 {
3243 	if (req && scsw_is_tm(&req->irb.scsw))
3244 		dasd_eckd_dump_sense_tcw(device, req, irb);
3245 	else
3246 		dasd_eckd_dump_sense_ccw(device, req, irb);
3247 }
3248 
3249 int dasd_eckd_pm_freeze(struct dasd_device *device)
3250 {
3251 	/*
3252 	 * the device should be disconnected from our LCU structure
3253 	 * on restore we will reconnect it and reread LCU specific
3254 	 * information like PAV support that might have changed
3255 	 */
3256 	dasd_alias_remove_device(device);
3257 	dasd_alias_disconnect_device_from_lcu(device);
3258 
3259 	return 0;
3260 }
3261 
3262 int dasd_eckd_restore_device(struct dasd_device *device)
3263 {
3264 	struct dasd_eckd_private *private;
3265 	struct dasd_eckd_characteristics temp_rdc_data;
3266 	int is_known, rc;
3267 	struct dasd_uid temp_uid;
3268 	unsigned long flags;
3269 
3270 	private = (struct dasd_eckd_private *) device->private;
3271 
3272 	/* Read Configuration Data */
3273 	rc = dasd_eckd_read_conf(device);
3274 	if (rc)
3275 		goto out_err;
3276 
3277 	/* Generate device unique id and register in devmap */
3278 	rc = dasd_eckd_generate_uid(device, &private->uid);
3279 	dasd_get_uid(device->cdev, &temp_uid);
3280 	if (memcmp(&private->uid, &temp_uid, sizeof(struct dasd_uid)) != 0)
3281 		dev_err(&device->cdev->dev, "The UID of the DASD has "
3282 			"changed\n");
3283 	if (rc)
3284 		goto out_err;
3285 	dasd_set_uid(device->cdev, &private->uid);
3286 
3287 	/* register lcu with alias handling, enable PAV if this is a new lcu */
3288 	is_known = dasd_alias_make_device_known_to_lcu(device);
3289 	if (is_known < 0)
3290 		return is_known;
3291 	if (!is_known) {
3292 		dasd_eckd_validate_server(device);
3293 		dasd_alias_lcu_setup_complete(device);
3294 	} else
3295 		dasd_alias_wait_for_lcu_setup(device);
3296 
3297 	/* RE-Read Configuration Data */
3298 	rc = dasd_eckd_read_conf(device);
3299 	if (rc)
3300 		goto out_err;
3301 
3302 	/* Read Feature Codes */
3303 	dasd_eckd_read_features(device);
3304 
3305 	/* Read Device Characteristics */
3306 	rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
3307 					 &temp_rdc_data, 64);
3308 	if (rc) {
3309 		DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
3310 				"Read device characteristic failed, rc=%d", rc);
3311 		goto out_err;
3312 	}
3313 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
3314 	memcpy(&private->rdc_data, &temp_rdc_data, sizeof(temp_rdc_data));
3315 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
3316 
3317 	/* add device to alias management */
3318 	dasd_alias_add_device(device);
3319 
3320 	return 0;
3321 
3322 out_err:
3323 	return -1;
3324 }
3325 
3326 static struct ccw_driver dasd_eckd_driver = {
3327 	.name	     = "dasd-eckd",
3328 	.owner	     = THIS_MODULE,
3329 	.ids	     = dasd_eckd_ids,
3330 	.probe	     = dasd_eckd_probe,
3331 	.remove      = dasd_generic_remove,
3332 	.set_offline = dasd_generic_set_offline,
3333 	.set_online  = dasd_eckd_set_online,
3334 	.notify      = dasd_generic_notify,
3335 	.freeze      = dasd_generic_pm_freeze,
3336 	.thaw	     = dasd_generic_restore_device,
3337 	.restore     = dasd_generic_restore_device,
3338 };
3339 
3340 /*
3341  * max_blocks is dependent on the amount of storage that is available
3342  * in the static io buffer for each device. Currently each device has
3343  * 8192 bytes (=2 pages). For 64 bit one dasd_mchunkt_t structure has
3344  * 24 bytes, the struct dasd_ccw_req has 136 bytes and each block can use
3345  * up to 16 bytes (8 for the ccw and 8 for the idal pointer). In
3346  * addition we have one define extent ccw + 16 bytes of data and one
3347  * locate record ccw + 16 bytes of data. That makes:
3348  * (8192 - 24 - 136 - 8 - 16 - 8 - 16) / 16 = 499 blocks at maximum.
3349  * We want to fit two into the available memory so that we can immediately
3350  * start the next request if one finishes off. That makes 249.5 blocks
3351  * for one request. Give a little safety and the result is 240.
3352  */
3353 static struct dasd_discipline dasd_eckd_discipline = {
3354 	.owner = THIS_MODULE,
3355 	.name = "ECKD",
3356 	.ebcname = "ECKD",
3357 	.max_blocks = 240,
3358 	.check_device = dasd_eckd_check_characteristics,
3359 	.uncheck_device = dasd_eckd_uncheck_device,
3360 	.do_analysis = dasd_eckd_do_analysis,
3361 	.ready_to_online = dasd_eckd_ready_to_online,
3362 	.online_to_ready = dasd_eckd_online_to_ready,
3363 	.fill_geometry = dasd_eckd_fill_geometry,
3364 	.start_IO = dasd_start_IO,
3365 	.term_IO = dasd_term_IO,
3366 	.handle_terminated_request = dasd_eckd_handle_terminated_request,
3367 	.format_device = dasd_eckd_format_device,
3368 	.erp_action = dasd_eckd_erp_action,
3369 	.erp_postaction = dasd_eckd_erp_postaction,
3370 	.handle_unsolicited_interrupt = dasd_eckd_handle_unsolicited_interrupt,
3371 	.build_cp = dasd_eckd_build_alias_cp,
3372 	.free_cp = dasd_eckd_free_alias_cp,
3373 	.dump_sense = dasd_eckd_dump_sense,
3374 	.dump_sense_dbf = dasd_eckd_dump_sense_dbf,
3375 	.fill_info = dasd_eckd_fill_info,
3376 	.ioctl = dasd_eckd_ioctl,
3377 	.freeze = dasd_eckd_pm_freeze,
3378 	.restore = dasd_eckd_restore_device,
3379 };
3380 
3381 static int __init
3382 dasd_eckd_init(void)
3383 {
3384 	int ret;
3385 
3386 	ASCEBC(dasd_eckd_discipline.ebcname, 4);
3387 	ret = ccw_driver_register(&dasd_eckd_driver);
3388 	if (!ret)
3389 		wait_for_device_probe();
3390 
3391 	return ret;
3392 }
3393 
3394 static void __exit
3395 dasd_eckd_cleanup(void)
3396 {
3397 	ccw_driver_unregister(&dasd_eckd_driver);
3398 }
3399 
3400 module_init(dasd_eckd_init);
3401 module_exit(dasd_eckd_cleanup);
3402