xref: /openbmc/linux/drivers/s390/block/dasd_eckd.c (revision eb6e199b)
1 /*
2  * File...........: linux/drivers/s390/block/dasd_eckd.c
3  * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4  *		    Horst Hummel <Horst.Hummel@de.ibm.com>
5  *		    Carsten Otte <Cotte@de.ibm.com>
6  *		    Martin Schwidefsky <schwidefsky@de.ibm.com>
7  * Bugreports.to..: <Linux390@de.ibm.com>
8  * Copyright IBM Corp. 1999, 2009
9  * EMC Symmetrix ioctl Copyright EMC Corporation, 2008
10  * Author.........: Nigel Hislop <hislop_nigel@emc.com>
11  */
12 
13 #define KMSG_COMPONENT "dasd-eckd"
14 
15 #include <linux/stddef.h>
16 #include <linux/kernel.h>
17 #include <linux/slab.h>
18 #include <linux/hdreg.h>	/* HDIO_GETGEO			    */
19 #include <linux/bio.h>
20 #include <linux/module.h>
21 #include <linux/init.h>
22 
23 #include <asm/debug.h>
24 #include <asm/idals.h>
25 #include <asm/ebcdic.h>
26 #include <asm/io.h>
27 #include <asm/uaccess.h>
28 #include <asm/cio.h>
29 #include <asm/ccwdev.h>
30 #include <asm/itcw.h>
31 
32 #include "dasd_int.h"
33 #include "dasd_eckd.h"
34 #include "../cio/chsc.h"
35 
36 
37 #ifdef PRINTK_HEADER
38 #undef PRINTK_HEADER
39 #endif				/* PRINTK_HEADER */
40 #define PRINTK_HEADER "dasd(eckd):"
41 
42 #define ECKD_C0(i) (i->home_bytes)
43 #define ECKD_F(i) (i->formula)
44 #define ECKD_F1(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f1):\
45 		    (i->factors.f_0x02.f1))
46 #define ECKD_F2(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f2):\
47 		    (i->factors.f_0x02.f2))
48 #define ECKD_F3(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f3):\
49 		    (i->factors.f_0x02.f3))
50 #define ECKD_F4(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f4):0)
51 #define ECKD_F5(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f5):0)
52 #define ECKD_F6(i) (i->factor6)
53 #define ECKD_F7(i) (i->factor7)
54 #define ECKD_F8(i) (i->factor8)
55 
56 MODULE_LICENSE("GPL");
57 
58 static struct dasd_discipline dasd_eckd_discipline;
59 
60 /* The ccw bus type uses this table to find devices that it sends to
61  * dasd_eckd_probe */
62 static struct ccw_device_id dasd_eckd_ids[] = {
63 	{ CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1},
64 	{ CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2},
65 	{ CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3390, 0), .driver_info = 0x3},
66 	{ CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4},
67 	{ CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5},
68 	{ CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6},
69 	{ CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3390, 0), .driver_info = 0x7},
70 	{ CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3380, 0), .driver_info = 0x8},
71 	{ CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3390, 0), .driver_info = 0x9},
72 	{ CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3380, 0), .driver_info = 0xa},
73 	{ /* end of list */ },
74 };
75 
76 MODULE_DEVICE_TABLE(ccw, dasd_eckd_ids);
77 
78 static struct ccw_driver dasd_eckd_driver; /* see below */
79 
80 #define INIT_CQR_OK 0
81 #define INIT_CQR_UNFORMATTED 1
82 #define INIT_CQR_ERROR 2
83 
84 
85 /* initial attempt at a probe function. this can be simplified once
86  * the other detection code is gone */
87 static int
88 dasd_eckd_probe (struct ccw_device *cdev)
89 {
90 	int ret;
91 
92 	/* set ECKD specific ccw-device options */
93 	ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE |
94 				     CCWDEV_DO_PATHGROUP | CCWDEV_DO_MULTIPATH);
95 	if (ret) {
96 		DBF_EVENT(DBF_WARNING,
97 		       "dasd_eckd_probe: could not set ccw-device options "
98 		       "for %s\n", dev_name(&cdev->dev));
99 		return ret;
100 	}
101 	ret = dasd_generic_probe(cdev, &dasd_eckd_discipline);
102 	return ret;
103 }
104 
105 static int
106 dasd_eckd_set_online(struct ccw_device *cdev)
107 {
108 	return dasd_generic_set_online(cdev, &dasd_eckd_discipline);
109 }
110 
111 static const int sizes_trk0[] = { 28, 148, 84 };
112 #define LABEL_SIZE 140
113 
114 static inline unsigned int
115 round_up_multiple(unsigned int no, unsigned int mult)
116 {
117 	int rem = no % mult;
118 	return (rem ? no - rem + mult : no);
119 }
120 
121 static inline unsigned int
122 ceil_quot(unsigned int d1, unsigned int d2)
123 {
124 	return (d1 + (d2 - 1)) / d2;
125 }
126 
127 static unsigned int
128 recs_per_track(struct dasd_eckd_characteristics * rdc,
129 	       unsigned int kl, unsigned int dl)
130 {
131 	int dn, kn;
132 
133 	switch (rdc->dev_type) {
134 	case 0x3380:
135 		if (kl)
136 			return 1499 / (15 + 7 + ceil_quot(kl + 12, 32) +
137 				       ceil_quot(dl + 12, 32));
138 		else
139 			return 1499 / (15 + ceil_quot(dl + 12, 32));
140 	case 0x3390:
141 		dn = ceil_quot(dl + 6, 232) + 1;
142 		if (kl) {
143 			kn = ceil_quot(kl + 6, 232) + 1;
144 			return 1729 / (10 + 9 + ceil_quot(kl + 6 * kn, 34) +
145 				       9 + ceil_quot(dl + 6 * dn, 34));
146 		} else
147 			return 1729 / (10 + 9 + ceil_quot(dl + 6 * dn, 34));
148 	case 0x9345:
149 		dn = ceil_quot(dl + 6, 232) + 1;
150 		if (kl) {
151 			kn = ceil_quot(kl + 6, 232) + 1;
152 			return 1420 / (18 + 7 + ceil_quot(kl + 6 * kn, 34) +
153 				       ceil_quot(dl + 6 * dn, 34));
154 		} else
155 			return 1420 / (18 + 7 + ceil_quot(dl + 6 * dn, 34));
156 	}
157 	return 0;
158 }
159 
160 static void set_ch_t(struct ch_t *geo, __u32 cyl, __u8 head)
161 {
162 	geo->cyl = (__u16) cyl;
163 	geo->head = cyl >> 16;
164 	geo->head <<= 4;
165 	geo->head |= head;
166 }
167 
168 static int
169 check_XRC (struct ccw1         *de_ccw,
170            struct DE_eckd_data *data,
171            struct dasd_device  *device)
172 {
173         struct dasd_eckd_private *private;
174 	int rc;
175 
176         private = (struct dasd_eckd_private *) device->private;
177 	if (!private->rdc_data.facilities.XRC_supported)
178 		return 0;
179 
180         /* switch on System Time Stamp - needed for XRC Support */
181 	data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid'   */
182 	data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */
183 
184 	rc = get_sync_clock(&data->ep_sys_time);
185 	/* Ignore return code if sync clock is switched off. */
186 	if (rc == -ENOSYS || rc == -EACCES)
187 		rc = 0;
188 
189 	de_ccw->count = sizeof(struct DE_eckd_data);
190 	de_ccw->flags |= CCW_FLAG_SLI;
191 	return rc;
192 }
193 
194 static int
195 define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk,
196 	      unsigned int totrk, int cmd, struct dasd_device *device)
197 {
198 	struct dasd_eckd_private *private;
199 	u32 begcyl, endcyl;
200 	u16 heads, beghead, endhead;
201 	int rc = 0;
202 
203 	private = (struct dasd_eckd_private *) device->private;
204 
205 	ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
206 	ccw->flags = 0;
207 	ccw->count = 16;
208 	ccw->cda = (__u32) __pa(data);
209 
210 	memset(data, 0, sizeof(struct DE_eckd_data));
211 	switch (cmd) {
212 	case DASD_ECKD_CCW_READ_HOME_ADDRESS:
213 	case DASD_ECKD_CCW_READ_RECORD_ZERO:
214 	case DASD_ECKD_CCW_READ:
215 	case DASD_ECKD_CCW_READ_MT:
216 	case DASD_ECKD_CCW_READ_CKD:
217 	case DASD_ECKD_CCW_READ_CKD_MT:
218 	case DASD_ECKD_CCW_READ_KD:
219 	case DASD_ECKD_CCW_READ_KD_MT:
220 	case DASD_ECKD_CCW_READ_COUNT:
221 		data->mask.perm = 0x1;
222 		data->attributes.operation = private->attrib.operation;
223 		break;
224 	case DASD_ECKD_CCW_WRITE:
225 	case DASD_ECKD_CCW_WRITE_MT:
226 	case DASD_ECKD_CCW_WRITE_KD:
227 	case DASD_ECKD_CCW_WRITE_KD_MT:
228 		data->mask.perm = 0x02;
229 		data->attributes.operation = private->attrib.operation;
230 		rc = check_XRC (ccw, data, device);
231 		break;
232 	case DASD_ECKD_CCW_WRITE_CKD:
233 	case DASD_ECKD_CCW_WRITE_CKD_MT:
234 		data->attributes.operation = DASD_BYPASS_CACHE;
235 		rc = check_XRC (ccw, data, device);
236 		break;
237 	case DASD_ECKD_CCW_ERASE:
238 	case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
239 	case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
240 		data->mask.perm = 0x3;
241 		data->mask.auth = 0x1;
242 		data->attributes.operation = DASD_BYPASS_CACHE;
243 		rc = check_XRC (ccw, data, device);
244 		break;
245 	default:
246 		dev_err(&device->cdev->dev,
247 			"0x%x is not a known command\n", cmd);
248 		break;
249 	}
250 
251 	data->attributes.mode = 0x3;	/* ECKD */
252 
253 	if ((private->rdc_data.cu_type == 0x2105 ||
254 	     private->rdc_data.cu_type == 0x2107 ||
255 	     private->rdc_data.cu_type == 0x1750)
256 	    && !(private->uses_cdl && trk < 2))
257 		data->ga_extended |= 0x40; /* Regular Data Format Mode */
258 
259 	heads = private->rdc_data.trk_per_cyl;
260 	begcyl = trk / heads;
261 	beghead = trk % heads;
262 	endcyl = totrk / heads;
263 	endhead = totrk % heads;
264 
265 	/* check for sequential prestage - enhance cylinder range */
266 	if (data->attributes.operation == DASD_SEQ_PRESTAGE ||
267 	    data->attributes.operation == DASD_SEQ_ACCESS) {
268 
269 		if (endcyl + private->attrib.nr_cyl < private->real_cyl)
270 			endcyl += private->attrib.nr_cyl;
271 		else
272 			endcyl = (private->real_cyl - 1);
273 	}
274 
275 	set_ch_t(&data->beg_ext, begcyl, beghead);
276 	set_ch_t(&data->end_ext, endcyl, endhead);
277 	return rc;
278 }
279 
280 static int check_XRC_on_prefix(struct PFX_eckd_data *pfxdata,
281 			       struct dasd_device  *device)
282 {
283 	struct dasd_eckd_private *private;
284 	int rc;
285 
286 	private = (struct dasd_eckd_private *) device->private;
287 	if (!private->rdc_data.facilities.XRC_supported)
288 		return 0;
289 
290 	/* switch on System Time Stamp - needed for XRC Support */
291 	pfxdata->define_extent.ga_extended |= 0x08; /* 'Time Stamp Valid'   */
292 	pfxdata->define_extent.ga_extended |= 0x02; /* 'Extended Parameter' */
293 	pfxdata->validity.time_stamp = 1;	    /* 'Time Stamp Valid'   */
294 
295 	rc = get_sync_clock(&pfxdata->define_extent.ep_sys_time);
296 	/* Ignore return code if sync clock is switched off. */
297 	if (rc == -ENOSYS || rc == -EACCES)
298 		rc = 0;
299 	return rc;
300 }
301 
302 static void fill_LRE_data(struct LRE_eckd_data *data, unsigned int trk,
303 			  unsigned int rec_on_trk, int count, int cmd,
304 			  struct dasd_device *device, unsigned int reclen,
305 			  unsigned int tlf)
306 {
307 	struct dasd_eckd_private *private;
308 	int sector;
309 	int dn, d;
310 
311 	private = (struct dasd_eckd_private *) device->private;
312 
313 	memset(data, 0, sizeof(*data));
314 	sector = 0;
315 	if (rec_on_trk) {
316 		switch (private->rdc_data.dev_type) {
317 		case 0x3390:
318 			dn = ceil_quot(reclen + 6, 232);
319 			d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
320 			sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
321 			break;
322 		case 0x3380:
323 			d = 7 + ceil_quot(reclen + 12, 32);
324 			sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
325 			break;
326 		}
327 	}
328 	data->sector = sector;
329 	/* note: meaning of count depends on the operation
330 	 *	 for record based I/O it's the number of records, but for
331 	 *	 track based I/O it's the number of tracks
332 	 */
333 	data->count = count;
334 	switch (cmd) {
335 	case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
336 		data->operation.orientation = 0x3;
337 		data->operation.operation = 0x03;
338 		break;
339 	case DASD_ECKD_CCW_READ_HOME_ADDRESS:
340 		data->operation.orientation = 0x3;
341 		data->operation.operation = 0x16;
342 		break;
343 	case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
344 		data->operation.orientation = 0x1;
345 		data->operation.operation = 0x03;
346 		data->count++;
347 		break;
348 	case DASD_ECKD_CCW_READ_RECORD_ZERO:
349 		data->operation.orientation = 0x3;
350 		data->operation.operation = 0x16;
351 		data->count++;
352 		break;
353 	case DASD_ECKD_CCW_WRITE:
354 	case DASD_ECKD_CCW_WRITE_MT:
355 	case DASD_ECKD_CCW_WRITE_KD:
356 	case DASD_ECKD_CCW_WRITE_KD_MT:
357 		data->auxiliary.length_valid = 0x1;
358 		data->length = reclen;
359 		data->operation.operation = 0x01;
360 		break;
361 	case DASD_ECKD_CCW_WRITE_CKD:
362 	case DASD_ECKD_CCW_WRITE_CKD_MT:
363 		data->auxiliary.length_valid = 0x1;
364 		data->length = reclen;
365 		data->operation.operation = 0x03;
366 		break;
367 	case DASD_ECKD_CCW_WRITE_TRACK_DATA:
368 		data->auxiliary.length_valid = 0x1;
369 		data->length = reclen;	/* not tlf, as one might think */
370 		data->operation.operation = 0x3F;
371 		data->extended_operation = 0x23;
372 		break;
373 	case DASD_ECKD_CCW_READ:
374 	case DASD_ECKD_CCW_READ_MT:
375 	case DASD_ECKD_CCW_READ_KD:
376 	case DASD_ECKD_CCW_READ_KD_MT:
377 		data->auxiliary.length_valid = 0x1;
378 		data->length = reclen;
379 		data->operation.operation = 0x06;
380 		break;
381 	case DASD_ECKD_CCW_READ_CKD:
382 	case DASD_ECKD_CCW_READ_CKD_MT:
383 		data->auxiliary.length_valid = 0x1;
384 		data->length = reclen;
385 		data->operation.operation = 0x16;
386 		break;
387 	case DASD_ECKD_CCW_READ_COUNT:
388 		data->operation.operation = 0x06;
389 		break;
390 	case DASD_ECKD_CCW_READ_TRACK_DATA:
391 		data->auxiliary.length_valid = 0x1;
392 		data->length = tlf;
393 		data->operation.operation = 0x0C;
394 		break;
395 	case DASD_ECKD_CCW_ERASE:
396 		data->length = reclen;
397 		data->auxiliary.length_valid = 0x1;
398 		data->operation.operation = 0x0b;
399 		break;
400 	default:
401 		DBF_DEV_EVENT(DBF_ERR, device,
402 			    "fill LRE unknown opcode 0x%x", cmd);
403 		BUG();
404 	}
405 	set_ch_t(&data->seek_addr,
406 		 trk / private->rdc_data.trk_per_cyl,
407 		 trk % private->rdc_data.trk_per_cyl);
408 	data->search_arg.cyl = data->seek_addr.cyl;
409 	data->search_arg.head = data->seek_addr.head;
410 	data->search_arg.record = rec_on_trk;
411 }
412 
413 static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
414 		      unsigned int trk, unsigned int totrk, int cmd,
415 		      struct dasd_device *basedev, struct dasd_device *startdev,
416 		      unsigned char format, unsigned int rec_on_trk, int count,
417 		      unsigned int blksize, unsigned int tlf)
418 {
419 	struct dasd_eckd_private *basepriv, *startpriv;
420 	struct DE_eckd_data *dedata;
421 	struct LRE_eckd_data *lredata;
422 	u32 begcyl, endcyl;
423 	u16 heads, beghead, endhead;
424 	int rc = 0;
425 
426 	basepriv = (struct dasd_eckd_private *) basedev->private;
427 	startpriv = (struct dasd_eckd_private *) startdev->private;
428 	dedata = &pfxdata->define_extent;
429 	lredata = &pfxdata->locate_record;
430 
431 	ccw->cmd_code = DASD_ECKD_CCW_PFX;
432 	ccw->flags = 0;
433 	ccw->count = sizeof(*pfxdata);
434 	ccw->cda = (__u32) __pa(pfxdata);
435 
436 	memset(pfxdata, 0, sizeof(*pfxdata));
437 	/* prefix data */
438 	if (format > 1) {
439 		DBF_DEV_EVENT(DBF_ERR, basedev,
440 			      "PFX LRE unknown format 0x%x", format);
441 		BUG();
442 		return -EINVAL;
443 	}
444 	pfxdata->format = format;
445 	pfxdata->base_address = basepriv->ned->unit_addr;
446 	pfxdata->base_lss = basepriv->ned->ID;
447 	pfxdata->validity.define_extent = 1;
448 
449 	/* private uid is kept up to date, conf_data may be outdated */
450 	if (startpriv->uid.type != UA_BASE_DEVICE) {
451 		pfxdata->validity.verify_base = 1;
452 		if (startpriv->uid.type == UA_HYPER_PAV_ALIAS)
453 			pfxdata->validity.hyper_pav = 1;
454 	}
455 
456 	/* define extend data (mostly)*/
457 	switch (cmd) {
458 	case DASD_ECKD_CCW_READ_HOME_ADDRESS:
459 	case DASD_ECKD_CCW_READ_RECORD_ZERO:
460 	case DASD_ECKD_CCW_READ:
461 	case DASD_ECKD_CCW_READ_MT:
462 	case DASD_ECKD_CCW_READ_CKD:
463 	case DASD_ECKD_CCW_READ_CKD_MT:
464 	case DASD_ECKD_CCW_READ_KD:
465 	case DASD_ECKD_CCW_READ_KD_MT:
466 	case DASD_ECKD_CCW_READ_COUNT:
467 		dedata->mask.perm = 0x1;
468 		dedata->attributes.operation = basepriv->attrib.operation;
469 		break;
470 	case DASD_ECKD_CCW_READ_TRACK_DATA:
471 		dedata->mask.perm = 0x1;
472 		dedata->attributes.operation = basepriv->attrib.operation;
473 		dedata->blk_size = 0;
474 		break;
475 	case DASD_ECKD_CCW_WRITE:
476 	case DASD_ECKD_CCW_WRITE_MT:
477 	case DASD_ECKD_CCW_WRITE_KD:
478 	case DASD_ECKD_CCW_WRITE_KD_MT:
479 		dedata->mask.perm = 0x02;
480 		dedata->attributes.operation = basepriv->attrib.operation;
481 		rc = check_XRC_on_prefix(pfxdata, basedev);
482 		break;
483 	case DASD_ECKD_CCW_WRITE_CKD:
484 	case DASD_ECKD_CCW_WRITE_CKD_MT:
485 		dedata->attributes.operation = DASD_BYPASS_CACHE;
486 		rc = check_XRC_on_prefix(pfxdata, basedev);
487 		break;
488 	case DASD_ECKD_CCW_ERASE:
489 	case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
490 	case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
491 		dedata->mask.perm = 0x3;
492 		dedata->mask.auth = 0x1;
493 		dedata->attributes.operation = DASD_BYPASS_CACHE;
494 		rc = check_XRC_on_prefix(pfxdata, basedev);
495 		break;
496 	case DASD_ECKD_CCW_WRITE_TRACK_DATA:
497 		dedata->mask.perm = 0x02;
498 		dedata->attributes.operation = basepriv->attrib.operation;
499 		dedata->blk_size = blksize;
500 		rc = check_XRC_on_prefix(pfxdata, basedev);
501 		break;
502 	default:
503 		DBF_DEV_EVENT(DBF_ERR, basedev,
504 			    "PFX LRE unknown opcode 0x%x", cmd);
505 		BUG();
506 		return -EINVAL;
507 	}
508 
509 	dedata->attributes.mode = 0x3;	/* ECKD */
510 
511 	if ((basepriv->rdc_data.cu_type == 0x2105 ||
512 	     basepriv->rdc_data.cu_type == 0x2107 ||
513 	     basepriv->rdc_data.cu_type == 0x1750)
514 	    && !(basepriv->uses_cdl && trk < 2))
515 		dedata->ga_extended |= 0x40; /* Regular Data Format Mode */
516 
517 	heads = basepriv->rdc_data.trk_per_cyl;
518 	begcyl = trk / heads;
519 	beghead = trk % heads;
520 	endcyl = totrk / heads;
521 	endhead = totrk % heads;
522 
523 	/* check for sequential prestage - enhance cylinder range */
524 	if (dedata->attributes.operation == DASD_SEQ_PRESTAGE ||
525 	    dedata->attributes.operation == DASD_SEQ_ACCESS) {
526 
527 		if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl)
528 			endcyl += basepriv->attrib.nr_cyl;
529 		else
530 			endcyl = (basepriv->real_cyl - 1);
531 	}
532 
533 	set_ch_t(&dedata->beg_ext, begcyl, beghead);
534 	set_ch_t(&dedata->end_ext, endcyl, endhead);
535 
536 	if (format == 1) {
537 		fill_LRE_data(lredata, trk, rec_on_trk, count, cmd,
538 			      basedev, blksize, tlf);
539 	}
540 
541 	return rc;
542 }
543 
544 static int prefix(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
545 		  unsigned int trk, unsigned int totrk, int cmd,
546 		  struct dasd_device *basedev, struct dasd_device *startdev)
547 {
548 	return prefix_LRE(ccw, pfxdata, trk, totrk, cmd, basedev, startdev,
549 			  0, 0, 0, 0, 0);
550 }
551 
552 static void
553 locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, unsigned int trk,
554 	      unsigned int rec_on_trk, int no_rec, int cmd,
555 	      struct dasd_device * device, int reclen)
556 {
557 	struct dasd_eckd_private *private;
558 	int sector;
559 	int dn, d;
560 
561 	private = (struct dasd_eckd_private *) device->private;
562 
563 	DBF_DEV_EVENT(DBF_INFO, device,
564 		  "Locate: trk %d, rec %d, no_rec %d, cmd %d, reclen %d",
565 		  trk, rec_on_trk, no_rec, cmd, reclen);
566 
567 	ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD;
568 	ccw->flags = 0;
569 	ccw->count = 16;
570 	ccw->cda = (__u32) __pa(data);
571 
572 	memset(data, 0, sizeof(struct LO_eckd_data));
573 	sector = 0;
574 	if (rec_on_trk) {
575 		switch (private->rdc_data.dev_type) {
576 		case 0x3390:
577 			dn = ceil_quot(reclen + 6, 232);
578 			d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
579 			sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
580 			break;
581 		case 0x3380:
582 			d = 7 + ceil_quot(reclen + 12, 32);
583 			sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
584 			break;
585 		}
586 	}
587 	data->sector = sector;
588 	data->count = no_rec;
589 	switch (cmd) {
590 	case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
591 		data->operation.orientation = 0x3;
592 		data->operation.operation = 0x03;
593 		break;
594 	case DASD_ECKD_CCW_READ_HOME_ADDRESS:
595 		data->operation.orientation = 0x3;
596 		data->operation.operation = 0x16;
597 		break;
598 	case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
599 		data->operation.orientation = 0x1;
600 		data->operation.operation = 0x03;
601 		data->count++;
602 		break;
603 	case DASD_ECKD_CCW_READ_RECORD_ZERO:
604 		data->operation.orientation = 0x3;
605 		data->operation.operation = 0x16;
606 		data->count++;
607 		break;
608 	case DASD_ECKD_CCW_WRITE:
609 	case DASD_ECKD_CCW_WRITE_MT:
610 	case DASD_ECKD_CCW_WRITE_KD:
611 	case DASD_ECKD_CCW_WRITE_KD_MT:
612 		data->auxiliary.last_bytes_used = 0x1;
613 		data->length = reclen;
614 		data->operation.operation = 0x01;
615 		break;
616 	case DASD_ECKD_CCW_WRITE_CKD:
617 	case DASD_ECKD_CCW_WRITE_CKD_MT:
618 		data->auxiliary.last_bytes_used = 0x1;
619 		data->length = reclen;
620 		data->operation.operation = 0x03;
621 		break;
622 	case DASD_ECKD_CCW_READ:
623 	case DASD_ECKD_CCW_READ_MT:
624 	case DASD_ECKD_CCW_READ_KD:
625 	case DASD_ECKD_CCW_READ_KD_MT:
626 		data->auxiliary.last_bytes_used = 0x1;
627 		data->length = reclen;
628 		data->operation.operation = 0x06;
629 		break;
630 	case DASD_ECKD_CCW_READ_CKD:
631 	case DASD_ECKD_CCW_READ_CKD_MT:
632 		data->auxiliary.last_bytes_used = 0x1;
633 		data->length = reclen;
634 		data->operation.operation = 0x16;
635 		break;
636 	case DASD_ECKD_CCW_READ_COUNT:
637 		data->operation.operation = 0x06;
638 		break;
639 	case DASD_ECKD_CCW_ERASE:
640 		data->length = reclen;
641 		data->auxiliary.last_bytes_used = 0x1;
642 		data->operation.operation = 0x0b;
643 		break;
644 	default:
645 		DBF_DEV_EVENT(DBF_ERR, device, "unknown locate record "
646 			      "opcode 0x%x", cmd);
647 	}
648 	set_ch_t(&data->seek_addr,
649 		 trk / private->rdc_data.trk_per_cyl,
650 		 trk % private->rdc_data.trk_per_cyl);
651 	data->search_arg.cyl = data->seek_addr.cyl;
652 	data->search_arg.head = data->seek_addr.head;
653 	data->search_arg.record = rec_on_trk;
654 }
655 
656 /*
657  * Returns 1 if the block is one of the special blocks that needs
658  * to get read/written with the KD variant of the command.
659  * That is DASD_ECKD_READ_KD_MT instead of DASD_ECKD_READ_MT and
660  * DASD_ECKD_WRITE_KD_MT instead of DASD_ECKD_WRITE_MT.
661  * Luckily the KD variants differ only by one bit (0x08) from the
662  * normal variant. So don't wonder about code like:
663  * if (dasd_eckd_cdl_special(blk_per_trk, recid))
664  *         ccw->cmd_code |= 0x8;
665  */
666 static inline int
667 dasd_eckd_cdl_special(int blk_per_trk, int recid)
668 {
669 	if (recid < 3)
670 		return 1;
671 	if (recid < blk_per_trk)
672 		return 0;
673 	if (recid < 2 * blk_per_trk)
674 		return 1;
675 	return 0;
676 }
677 
678 /*
679  * Returns the record size for the special blocks of the cdl format.
680  * Only returns something useful if dasd_eckd_cdl_special is true
681  * for the recid.
682  */
683 static inline int
684 dasd_eckd_cdl_reclen(int recid)
685 {
686 	if (recid < 3)
687 		return sizes_trk0[recid];
688 	return LABEL_SIZE;
689 }
690 
691 /*
692  * Generate device unique id that specifies the physical device.
693  */
694 static int dasd_eckd_generate_uid(struct dasd_device *device,
695 				  struct dasd_uid *uid)
696 {
697 	struct dasd_eckd_private *private;
698 	int count;
699 
700 	private = (struct dasd_eckd_private *) device->private;
701 	if (!private)
702 		return -ENODEV;
703 	if (!private->ned || !private->gneq)
704 		return -ENODEV;
705 
706 	memset(uid, 0, sizeof(struct dasd_uid));
707 	memcpy(uid->vendor, private->ned->HDA_manufacturer,
708 	       sizeof(uid->vendor) - 1);
709 	EBCASC(uid->vendor, sizeof(uid->vendor) - 1);
710 	memcpy(uid->serial, private->ned->HDA_location,
711 	       sizeof(uid->serial) - 1);
712 	EBCASC(uid->serial, sizeof(uid->serial) - 1);
713 	uid->ssid = private->gneq->subsystemID;
714 	uid->real_unit_addr = private->ned->unit_addr;
715 	if (private->sneq) {
716 		uid->type = private->sneq->sua_flags;
717 		if (uid->type == UA_BASE_PAV_ALIAS)
718 			uid->base_unit_addr = private->sneq->base_unit_addr;
719 	} else {
720 		uid->type = UA_BASE_DEVICE;
721 	}
722 	if (private->vdsneq) {
723 		for (count = 0; count < 16; count++) {
724 			sprintf(uid->vduit+2*count, "%02x",
725 				private->vdsneq->uit[count]);
726 		}
727 	}
728 	return 0;
729 }
730 
731 static struct dasd_ccw_req *dasd_eckd_build_rcd_lpm(struct dasd_device *device,
732 						    void *rcd_buffer,
733 						    struct ciw *ciw, __u8 lpm)
734 {
735 	struct dasd_ccw_req *cqr;
736 	struct ccw1 *ccw;
737 
738 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */, ciw->count,
739 				   device);
740 
741 	if (IS_ERR(cqr)) {
742 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
743 			      "Could not allocate RCD request");
744 		return cqr;
745 	}
746 
747 	ccw = cqr->cpaddr;
748 	ccw->cmd_code = ciw->cmd;
749 	ccw->cda = (__u32)(addr_t)rcd_buffer;
750 	ccw->count = ciw->count;
751 
752 	cqr->startdev = device;
753 	cqr->memdev = device;
754 	cqr->block = NULL;
755 	cqr->expires = 10*HZ;
756 	cqr->lpm = lpm;
757 	cqr->retries = 256;
758 	cqr->buildclk = get_clock();
759 	cqr->status = DASD_CQR_FILLED;
760 	return cqr;
761 }
762 
763 static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
764 				   void **rcd_buffer,
765 				   int *rcd_buffer_size, __u8 lpm)
766 {
767 	struct ciw *ciw;
768 	char *rcd_buf = NULL;
769 	int ret;
770 	struct dasd_ccw_req *cqr;
771 
772 	/*
773 	 * scan for RCD command in extended SenseID data
774 	 */
775 	ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
776 	if (!ciw || ciw->cmd == 0) {
777 		ret = -EOPNOTSUPP;
778 		goto out_error;
779 	}
780 	rcd_buf = kzalloc(ciw->count, GFP_KERNEL | GFP_DMA);
781 	if (!rcd_buf) {
782 		ret = -ENOMEM;
783 		goto out_error;
784 	}
785 
786 	/*
787 	 * buffer has to start with EBCDIC "V1.0" to show
788 	 * support for virtual device SNEQ
789 	 */
790 	rcd_buf[0] = 0xE5;
791 	rcd_buf[1] = 0xF1;
792 	rcd_buf[2] = 0x4B;
793 	rcd_buf[3] = 0xF0;
794 	cqr = dasd_eckd_build_rcd_lpm(device, rcd_buf, ciw, lpm);
795 	if (IS_ERR(cqr)) {
796 		ret =  PTR_ERR(cqr);
797 		goto out_error;
798 	}
799 	ret = dasd_sleep_on(cqr);
800 	/*
801 	 * on success we update the user input parms
802 	 */
803 	dasd_sfree_request(cqr, cqr->memdev);
804 	if (ret)
805 		goto out_error;
806 
807 	*rcd_buffer_size = ciw->count;
808 	*rcd_buffer = rcd_buf;
809 	return 0;
810 out_error:
811 	kfree(rcd_buf);
812 	*rcd_buffer = NULL;
813 	*rcd_buffer_size = 0;
814 	return ret;
815 }
816 
817 static int dasd_eckd_identify_conf_parts(struct dasd_eckd_private *private)
818 {
819 
820 	struct dasd_sneq *sneq;
821 	int i, count;
822 
823 	private->ned = NULL;
824 	private->sneq = NULL;
825 	private->vdsneq = NULL;
826 	private->gneq = NULL;
827 	count = private->conf_len / sizeof(struct dasd_sneq);
828 	sneq = (struct dasd_sneq *)private->conf_data;
829 	for (i = 0; i < count; ++i) {
830 		if (sneq->flags.identifier == 1 && sneq->format == 1)
831 			private->sneq = sneq;
832 		else if (sneq->flags.identifier == 1 && sneq->format == 4)
833 			private->vdsneq = (struct vd_sneq *)sneq;
834 		else if (sneq->flags.identifier == 2)
835 			private->gneq = (struct dasd_gneq *)sneq;
836 		else if (sneq->flags.identifier == 3 && sneq->res1 == 1)
837 			private->ned = (struct dasd_ned *)sneq;
838 		sneq++;
839 	}
840 	if (!private->ned || !private->gneq) {
841 		private->ned = NULL;
842 		private->sneq = NULL;
843 		private->vdsneq = NULL;
844 		private->gneq = NULL;
845 		return -EINVAL;
846 	}
847 	return 0;
848 
849 };
850 
851 static unsigned char dasd_eckd_path_access(void *conf_data, int conf_len)
852 {
853 	struct dasd_gneq *gneq;
854 	int i, count, found;
855 
856 	count = conf_len / sizeof(*gneq);
857 	gneq = (struct dasd_gneq *)conf_data;
858 	found = 0;
859 	for (i = 0; i < count; ++i) {
860 		if (gneq->flags.identifier == 2) {
861 			found = 1;
862 			break;
863 		}
864 		gneq++;
865 	}
866 	if (found)
867 		return ((char *)gneq)[18] & 0x07;
868 	else
869 		return 0;
870 }
871 
872 static int dasd_eckd_read_conf(struct dasd_device *device)
873 {
874 	void *conf_data;
875 	int conf_len, conf_data_saved;
876 	int rc;
877 	__u8 lpm;
878 	struct dasd_eckd_private *private;
879 	struct dasd_eckd_path *path_data;
880 
881 	private = (struct dasd_eckd_private *) device->private;
882 	path_data = (struct dasd_eckd_path *) &private->path_data;
883 	path_data->opm = ccw_device_get_path_mask(device->cdev);
884 	lpm = 0x80;
885 	conf_data_saved = 0;
886 	/* get configuration data per operational path */
887 	for (lpm = 0x80; lpm; lpm>>= 1) {
888 		if (lpm & path_data->opm){
889 			rc = dasd_eckd_read_conf_lpm(device, &conf_data,
890 						     &conf_len, lpm);
891 			if (rc && rc != -EOPNOTSUPP) {	/* -EOPNOTSUPP is ok */
892 				DBF_EVENT(DBF_WARNING,
893 					  "Read configuration data returned "
894 					  "error %d for device: %s", rc,
895 					  dev_name(&device->cdev->dev));
896 				return rc;
897 			}
898 			if (conf_data == NULL) {
899 				DBF_EVENT(DBF_WARNING, "No configuration "
900 					  "data retrieved for device: %s",
901 					  dev_name(&device->cdev->dev));
902 				continue;	/* no error */
903 			}
904 			/* save first valid configuration data */
905 			if (!conf_data_saved) {
906 				kfree(private->conf_data);
907 				private->conf_data = conf_data;
908 				private->conf_len = conf_len;
909 				if (dasd_eckd_identify_conf_parts(private)) {
910 					private->conf_data = NULL;
911 					private->conf_len = 0;
912 					kfree(conf_data);
913 					continue;
914 				}
915 				conf_data_saved++;
916 			}
917 			switch (dasd_eckd_path_access(conf_data, conf_len)) {
918 			case 0x02:
919 				path_data->npm |= lpm;
920 				break;
921 			case 0x03:
922 				path_data->ppm |= lpm;
923 				break;
924 			}
925 			if (conf_data != private->conf_data)
926 				kfree(conf_data);
927 		}
928 	}
929 	return 0;
930 }
931 
932 static int dasd_eckd_read_features(struct dasd_device *device)
933 {
934 	struct dasd_psf_prssd_data *prssdp;
935 	struct dasd_rssd_features *features;
936 	struct dasd_ccw_req *cqr;
937 	struct ccw1 *ccw;
938 	int rc;
939 	struct dasd_eckd_private *private;
940 
941 	private = (struct dasd_eckd_private *) device->private;
942 	memset(&private->features, 0, sizeof(struct dasd_rssd_features));
943 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */	+ 1 /* RSSD */,
944 				   (sizeof(struct dasd_psf_prssd_data) +
945 				    sizeof(struct dasd_rssd_features)),
946 				   device);
947 	if (IS_ERR(cqr)) {
948 		DBF_EVENT(DBF_WARNING, "Could not allocate initialization "
949 			  "request for device: %s",
950 			  dev_name(&device->cdev->dev));
951 		return PTR_ERR(cqr);
952 	}
953 	cqr->startdev = device;
954 	cqr->memdev = device;
955 	cqr->block = NULL;
956 	cqr->retries = 256;
957 	cqr->expires = 10 * HZ;
958 
959 	/* Prepare for Read Subsystem Data */
960 	prssdp = (struct dasd_psf_prssd_data *) cqr->data;
961 	memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
962 	prssdp->order = PSF_ORDER_PRSSD;
963 	prssdp->suborder = 0x41;	/* Read Feature Codes */
964 	/* all other bytes of prssdp must be zero */
965 
966 	ccw = cqr->cpaddr;
967 	ccw->cmd_code = DASD_ECKD_CCW_PSF;
968 	ccw->count = sizeof(struct dasd_psf_prssd_data);
969 	ccw->flags |= CCW_FLAG_CC;
970 	ccw->cda = (__u32)(addr_t) prssdp;
971 
972 	/* Read Subsystem Data - feature codes */
973 	features = (struct dasd_rssd_features *) (prssdp + 1);
974 	memset(features, 0, sizeof(struct dasd_rssd_features));
975 
976 	ccw++;
977 	ccw->cmd_code = DASD_ECKD_CCW_RSSD;
978 	ccw->count = sizeof(struct dasd_rssd_features);
979 	ccw->cda = (__u32)(addr_t) features;
980 
981 	cqr->buildclk = get_clock();
982 	cqr->status = DASD_CQR_FILLED;
983 	rc = dasd_sleep_on(cqr);
984 	if (rc == 0) {
985 		prssdp = (struct dasd_psf_prssd_data *) cqr->data;
986 		features = (struct dasd_rssd_features *) (prssdp + 1);
987 		memcpy(&private->features, features,
988 		       sizeof(struct dasd_rssd_features));
989 	} else
990 		dev_warn(&device->cdev->dev, "Reading device feature codes"
991 			 " failed with rc=%d\n", rc);
992 	dasd_sfree_request(cqr, cqr->memdev);
993 	return rc;
994 }
995 
996 
997 /*
998  * Build CP for Perform Subsystem Function - SSC.
999  */
1000 static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
1001 						    int enable_pav)
1002 {
1003 	struct dasd_ccw_req *cqr;
1004 	struct dasd_psf_ssc_data *psf_ssc_data;
1005 	struct ccw1 *ccw;
1006 
1007 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
1008 				  sizeof(struct dasd_psf_ssc_data),
1009 				  device);
1010 
1011 	if (IS_ERR(cqr)) {
1012 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1013 			   "Could not allocate PSF-SSC request");
1014 		return cqr;
1015 	}
1016 	psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data;
1017 	psf_ssc_data->order = PSF_ORDER_SSC;
1018 	psf_ssc_data->suborder = 0xc0;
1019 	if (enable_pav) {
1020 		psf_ssc_data->suborder |= 0x08;
1021 		psf_ssc_data->reserved[0] = 0x88;
1022 	}
1023 	ccw = cqr->cpaddr;
1024 	ccw->cmd_code = DASD_ECKD_CCW_PSF;
1025 	ccw->cda = (__u32)(addr_t)psf_ssc_data;
1026 	ccw->count = 66;
1027 
1028 	cqr->startdev = device;
1029 	cqr->memdev = device;
1030 	cqr->block = NULL;
1031 	cqr->retries = 256;
1032 	cqr->expires = 10*HZ;
1033 	cqr->buildclk = get_clock();
1034 	cqr->status = DASD_CQR_FILLED;
1035 	return cqr;
1036 }
1037 
1038 /*
1039  * Perform Subsystem Function.
1040  * It is necessary to trigger CIO for channel revalidation since this
1041  * call might change behaviour of DASD devices.
1042  */
1043 static int
1044 dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav)
1045 {
1046 	struct dasd_ccw_req *cqr;
1047 	int rc;
1048 
1049 	cqr = dasd_eckd_build_psf_ssc(device, enable_pav);
1050 	if (IS_ERR(cqr))
1051 		return PTR_ERR(cqr);
1052 
1053 	rc = dasd_sleep_on(cqr);
1054 	if (!rc)
1055 		/* trigger CIO to reprobe devices */
1056 		css_schedule_reprobe();
1057 	dasd_sfree_request(cqr, cqr->memdev);
1058 	return rc;
1059 }
1060 
1061 /*
1062  * Valide storage server of current device.
1063  */
1064 static int dasd_eckd_validate_server(struct dasd_device *device)
1065 {
1066 	int rc;
1067 	struct dasd_eckd_private *private;
1068 	int enable_pav;
1069 
1070 	if (dasd_nopav || MACHINE_IS_VM)
1071 		enable_pav = 0;
1072 	else
1073 		enable_pav = 1;
1074 	rc = dasd_eckd_psf_ssc(device, enable_pav);
1075 
1076 	/* may be requested feature is not available on server,
1077 	 * therefore just report error and go ahead */
1078 	private = (struct dasd_eckd_private *) device->private;
1079 	DBF_EVENT(DBF_WARNING, "PSF-SSC on storage subsystem %s.%s.%04x "
1080 		  "returned rc=%d for device: %s",
1081 		  private->uid.vendor, private->uid.serial,
1082 		  private->uid.ssid, rc, dev_name(&device->cdev->dev));
1083 	/* RE-Read Configuration Data */
1084 	return dasd_eckd_read_conf(device);
1085 }
1086 
1087 /*
1088  * Check device characteristics.
1089  * If the device is accessible using ECKD discipline, the device is enabled.
1090  */
1091 static int
1092 dasd_eckd_check_characteristics(struct dasd_device *device)
1093 {
1094 	struct dasd_eckd_private *private;
1095 	struct dasd_block *block;
1096 	int is_known, rc;
1097 
1098 	if (!ccw_device_is_pathgroup(device->cdev)) {
1099 		dev_warn(&device->cdev->dev,
1100 			 "A channel path group could not be established\n");
1101 		return -EIO;
1102 	}
1103 	if (!ccw_device_is_multipath(device->cdev)) {
1104 		dev_info(&device->cdev->dev,
1105 			 "The DASD is not operating in multipath mode\n");
1106 	}
1107 	private = (struct dasd_eckd_private *) device->private;
1108 	if (!private) {
1109 		private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
1110 		if (!private) {
1111 			dev_warn(&device->cdev->dev,
1112 				 "Allocating memory for private DASD data "
1113 				 "failed\n");
1114 			return -ENOMEM;
1115 		}
1116 		device->private = (void *) private;
1117 	} else {
1118 		memset(private, 0, sizeof(*private));
1119 	}
1120 	/* Invalidate status of initial analysis. */
1121 	private->init_cqr_status = -1;
1122 	/* Set default cache operations. */
1123 	private->attrib.operation = DASD_NORMAL_CACHE;
1124 	private->attrib.nr_cyl = 0;
1125 
1126 	/* Read Configuration Data */
1127 	rc = dasd_eckd_read_conf(device);
1128 	if (rc)
1129 		goto out_err1;
1130 
1131 	/* Generate device unique id and register in devmap */
1132 	rc = dasd_eckd_generate_uid(device, &private->uid);
1133 	if (rc)
1134 		goto out_err1;
1135 	dasd_set_uid(device->cdev, &private->uid);
1136 
1137 	if (private->uid.type == UA_BASE_DEVICE) {
1138 		block = dasd_alloc_block();
1139 		if (IS_ERR(block)) {
1140 			DBF_EVENT(DBF_WARNING, "could not allocate dasd "
1141 				  "block structure for device: %s",
1142 				  dev_name(&device->cdev->dev));
1143 			rc = PTR_ERR(block);
1144 			goto out_err1;
1145 		}
1146 		device->block = block;
1147 		block->base = device;
1148 	}
1149 
1150 	/* register lcu with alias handling, enable PAV if this is a new lcu */
1151 	is_known = dasd_alias_make_device_known_to_lcu(device);
1152 	if (is_known < 0) {
1153 		rc = is_known;
1154 		goto out_err2;
1155 	}
1156 	if (!is_known) {
1157 		/* new lcu found */
1158 		rc = dasd_eckd_validate_server(device); /* will switch pav on */
1159 		if (rc)
1160 			goto out_err3;
1161 	}
1162 
1163 	/* Read Feature Codes */
1164 	dasd_eckd_read_features(device);
1165 
1166 	/* Read Device Characteristics */
1167 	rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
1168 					 &private->rdc_data, 64);
1169 	if (rc) {
1170 		DBF_EVENT(DBF_WARNING,
1171 			  "Read device characteristics failed, rc=%d for "
1172 			  "device: %s", rc, dev_name(&device->cdev->dev));
1173 		goto out_err3;
1174 	}
1175 	/* find the vaild cylinder size */
1176 	if (private->rdc_data.no_cyl == LV_COMPAT_CYL &&
1177 	    private->rdc_data.long_no_cyl)
1178 		private->real_cyl = private->rdc_data.long_no_cyl;
1179 	else
1180 		private->real_cyl = private->rdc_data.no_cyl;
1181 
1182 	dev_info(&device->cdev->dev, "New DASD %04X/%02X (CU %04X/%02X) "
1183 		 "with %d cylinders, %d heads, %d sectors\n",
1184 		 private->rdc_data.dev_type,
1185 		 private->rdc_data.dev_model,
1186 		 private->rdc_data.cu_type,
1187 		 private->rdc_data.cu_model.model,
1188 		 private->real_cyl,
1189 		 private->rdc_data.trk_per_cyl,
1190 		 private->rdc_data.sec_per_trk);
1191 	return 0;
1192 
1193 out_err3:
1194 	dasd_alias_disconnect_device_from_lcu(device);
1195 out_err2:
1196 	dasd_free_block(device->block);
1197 	device->block = NULL;
1198 out_err1:
1199 	kfree(private->conf_data);
1200 	kfree(device->private);
1201 	device->private = NULL;
1202 	return rc;
1203 }
1204 
1205 static void dasd_eckd_uncheck_device(struct dasd_device *device)
1206 {
1207 	struct dasd_eckd_private *private;
1208 
1209 	private = (struct dasd_eckd_private *) device->private;
1210 	dasd_alias_disconnect_device_from_lcu(device);
1211 	private->ned = NULL;
1212 	private->sneq = NULL;
1213 	private->vdsneq = NULL;
1214 	private->gneq = NULL;
1215 	private->conf_len = 0;
1216 	kfree(private->conf_data);
1217 	private->conf_data = NULL;
1218 }
1219 
1220 static struct dasd_ccw_req *
1221 dasd_eckd_analysis_ccw(struct dasd_device *device)
1222 {
1223 	struct dasd_eckd_private *private;
1224 	struct eckd_count *count_data;
1225 	struct LO_eckd_data *LO_data;
1226 	struct dasd_ccw_req *cqr;
1227 	struct ccw1 *ccw;
1228 	int cplength, datasize;
1229 	int i;
1230 
1231 	private = (struct dasd_eckd_private *) device->private;
1232 
1233 	cplength = 8;
1234 	datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data);
1235 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device);
1236 	if (IS_ERR(cqr))
1237 		return cqr;
1238 	ccw = cqr->cpaddr;
1239 	/* Define extent for the first 3 tracks. */
1240 	define_extent(ccw++, cqr->data, 0, 2,
1241 		      DASD_ECKD_CCW_READ_COUNT, device);
1242 	LO_data = cqr->data + sizeof(struct DE_eckd_data);
1243 	/* Locate record for the first 4 records on track 0. */
1244 	ccw[-1].flags |= CCW_FLAG_CC;
1245 	locate_record(ccw++, LO_data++, 0, 0, 4,
1246 		      DASD_ECKD_CCW_READ_COUNT, device, 0);
1247 
1248 	count_data = private->count_area;
1249 	for (i = 0; i < 4; i++) {
1250 		ccw[-1].flags |= CCW_FLAG_CC;
1251 		ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
1252 		ccw->flags = 0;
1253 		ccw->count = 8;
1254 		ccw->cda = (__u32)(addr_t) count_data;
1255 		ccw++;
1256 		count_data++;
1257 	}
1258 
1259 	/* Locate record for the first record on track 2. */
1260 	ccw[-1].flags |= CCW_FLAG_CC;
1261 	locate_record(ccw++, LO_data++, 2, 0, 1,
1262 		      DASD_ECKD_CCW_READ_COUNT, device, 0);
1263 	/* Read count ccw. */
1264 	ccw[-1].flags |= CCW_FLAG_CC;
1265 	ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
1266 	ccw->flags = 0;
1267 	ccw->count = 8;
1268 	ccw->cda = (__u32)(addr_t) count_data;
1269 
1270 	cqr->block = NULL;
1271 	cqr->startdev = device;
1272 	cqr->memdev = device;
1273 	cqr->retries = 255;
1274 	cqr->buildclk = get_clock();
1275 	cqr->status = DASD_CQR_FILLED;
1276 	return cqr;
1277 }
1278 
1279 /* differentiate between 'no record found' and any other error */
1280 static int dasd_eckd_analysis_evaluation(struct dasd_ccw_req *init_cqr)
1281 {
1282 	char *sense;
1283 	if (init_cqr->status == DASD_CQR_DONE)
1284 		return INIT_CQR_OK;
1285 	else if (init_cqr->status == DASD_CQR_NEED_ERP ||
1286 		 init_cqr->status == DASD_CQR_FAILED) {
1287 		sense = dasd_get_sense(&init_cqr->irb);
1288 		if (sense && (sense[1] & SNS1_NO_REC_FOUND))
1289 			return INIT_CQR_UNFORMATTED;
1290 		else
1291 			return INIT_CQR_ERROR;
1292 	} else
1293 		return INIT_CQR_ERROR;
1294 }
1295 
1296 /*
1297  * This is the callback function for the init_analysis cqr. It saves
1298  * the status of the initial analysis ccw before it frees it and kicks
1299  * the device to continue the startup sequence. This will call
1300  * dasd_eckd_do_analysis again (if the devices has not been marked
1301  * for deletion in the meantime).
1302  */
1303 static void dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr,
1304 					void *data)
1305 {
1306 	struct dasd_eckd_private *private;
1307 	struct dasd_device *device;
1308 
1309 	device = init_cqr->startdev;
1310 	private = (struct dasd_eckd_private *) device->private;
1311 	private->init_cqr_status = dasd_eckd_analysis_evaluation(init_cqr);
1312 	dasd_sfree_request(init_cqr, device);
1313 	dasd_kick_device(device);
1314 }
1315 
1316 static int dasd_eckd_start_analysis(struct dasd_block *block)
1317 {
1318 	struct dasd_eckd_private *private;
1319 	struct dasd_ccw_req *init_cqr;
1320 
1321 	private = (struct dasd_eckd_private *) block->base->private;
1322 	init_cqr = dasd_eckd_analysis_ccw(block->base);
1323 	if (IS_ERR(init_cqr))
1324 		return PTR_ERR(init_cqr);
1325 	init_cqr->callback = dasd_eckd_analysis_callback;
1326 	init_cqr->callback_data = NULL;
1327 	init_cqr->expires = 5*HZ;
1328 	/* first try without ERP, so we can later handle unformatted
1329 	 * devices as special case
1330 	 */
1331 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &init_cqr->flags);
1332 	init_cqr->retries = 0;
1333 	dasd_add_request_head(init_cqr);
1334 	return -EAGAIN;
1335 }
1336 
1337 static int dasd_eckd_end_analysis(struct dasd_block *block)
1338 {
1339 	struct dasd_device *device;
1340 	struct dasd_eckd_private *private;
1341 	struct eckd_count *count_area;
1342 	unsigned int sb, blk_per_trk;
1343 	int status, i;
1344 	struct dasd_ccw_req *init_cqr;
1345 
1346 	device = block->base;
1347 	private = (struct dasd_eckd_private *) device->private;
1348 	status = private->init_cqr_status;
1349 	private->init_cqr_status = -1;
1350 	if (status == INIT_CQR_ERROR) {
1351 		/* try again, this time with full ERP */
1352 		init_cqr = dasd_eckd_analysis_ccw(device);
1353 		dasd_sleep_on(init_cqr);
1354 		status = dasd_eckd_analysis_evaluation(init_cqr);
1355 		dasd_sfree_request(init_cqr, device);
1356 	}
1357 
1358 	if (status == INIT_CQR_UNFORMATTED) {
1359 		dev_warn(&device->cdev->dev, "The DASD is not formatted\n");
1360 		return -EMEDIUMTYPE;
1361 	} else if (status == INIT_CQR_ERROR) {
1362 		dev_err(&device->cdev->dev,
1363 			"Detecting the DASD disk layout failed because "
1364 			"of an I/O error\n");
1365 		return -EIO;
1366 	}
1367 
1368 	private->uses_cdl = 1;
1369 	/* Check Track 0 for Compatible Disk Layout */
1370 	count_area = NULL;
1371 	for (i = 0; i < 3; i++) {
1372 		if (private->count_area[i].kl != 4 ||
1373 		    private->count_area[i].dl != dasd_eckd_cdl_reclen(i) - 4) {
1374 			private->uses_cdl = 0;
1375 			break;
1376 		}
1377 	}
1378 	if (i == 3)
1379 		count_area = &private->count_area[4];
1380 
1381 	if (private->uses_cdl == 0) {
1382 		for (i = 0; i < 5; i++) {
1383 			if ((private->count_area[i].kl != 0) ||
1384 			    (private->count_area[i].dl !=
1385 			     private->count_area[0].dl))
1386 				break;
1387 		}
1388 		if (i == 5)
1389 			count_area = &private->count_area[0];
1390 	} else {
1391 		if (private->count_area[3].record == 1)
1392 			dev_warn(&device->cdev->dev,
1393 				 "Track 0 has no records following the VTOC\n");
1394 	}
1395 	if (count_area != NULL && count_area->kl == 0) {
1396 		/* we found notthing violating our disk layout */
1397 		if (dasd_check_blocksize(count_area->dl) == 0)
1398 			block->bp_block = count_area->dl;
1399 	}
1400 	if (block->bp_block == 0) {
1401 		dev_warn(&device->cdev->dev,
1402 			 "The disk layout of the DASD is not supported\n");
1403 		return -EMEDIUMTYPE;
1404 	}
1405 	block->s2b_shift = 0;	/* bits to shift 512 to get a block */
1406 	for (sb = 512; sb < block->bp_block; sb = sb << 1)
1407 		block->s2b_shift++;
1408 
1409 	blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block);
1410 	block->blocks = (private->real_cyl *
1411 			  private->rdc_data.trk_per_cyl *
1412 			  blk_per_trk);
1413 
1414 	dev_info(&device->cdev->dev,
1415 		 "DASD with %d KB/block, %d KB total size, %d KB/track, "
1416 		 "%s\n", (block->bp_block >> 10),
1417 		 ((private->real_cyl *
1418 		   private->rdc_data.trk_per_cyl *
1419 		   blk_per_trk * (block->bp_block >> 9)) >> 1),
1420 		 ((blk_per_trk * block->bp_block) >> 10),
1421 		 private->uses_cdl ?
1422 		 "compatible disk layout" : "linux disk layout");
1423 
1424 	return 0;
1425 }
1426 
1427 static int dasd_eckd_do_analysis(struct dasd_block *block)
1428 {
1429 	struct dasd_eckd_private *private;
1430 
1431 	private = (struct dasd_eckd_private *) block->base->private;
1432 	if (private->init_cqr_status < 0)
1433 		return dasd_eckd_start_analysis(block);
1434 	else
1435 		return dasd_eckd_end_analysis(block);
1436 }
1437 
1438 static int dasd_eckd_ready_to_online(struct dasd_device *device)
1439 {
1440 	return dasd_alias_add_device(device);
1441 };
1442 
1443 static int dasd_eckd_online_to_ready(struct dasd_device *device)
1444 {
1445 	return dasd_alias_remove_device(device);
1446 };
1447 
1448 static int
1449 dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo)
1450 {
1451 	struct dasd_eckd_private *private;
1452 
1453 	private = (struct dasd_eckd_private *) block->base->private;
1454 	if (dasd_check_blocksize(block->bp_block) == 0) {
1455 		geo->sectors = recs_per_track(&private->rdc_data,
1456 					      0, block->bp_block);
1457 	}
1458 	geo->cylinders = private->rdc_data.no_cyl;
1459 	geo->heads = private->rdc_data.trk_per_cyl;
1460 	return 0;
1461 }
1462 
1463 static struct dasd_ccw_req *
1464 dasd_eckd_format_device(struct dasd_device * device,
1465 			struct format_data_t * fdata)
1466 {
1467 	struct dasd_eckd_private *private;
1468 	struct dasd_ccw_req *fcp;
1469 	struct eckd_count *ect;
1470 	struct ccw1 *ccw;
1471 	void *data;
1472 	int rpt;
1473 	struct ch_t address;
1474 	int cplength, datasize;
1475 	int i;
1476 	int intensity = 0;
1477 	int r0_perm;
1478 
1479 	private = (struct dasd_eckd_private *) device->private;
1480 	rpt = recs_per_track(&private->rdc_data, 0, fdata->blksize);
1481 	set_ch_t(&address,
1482 		 fdata->start_unit / private->rdc_data.trk_per_cyl,
1483 		 fdata->start_unit % private->rdc_data.trk_per_cyl);
1484 
1485 	/* Sanity checks. */
1486 	if (fdata->start_unit >=
1487 	    (private->real_cyl * private->rdc_data.trk_per_cyl)) {
1488 		dev_warn(&device->cdev->dev, "Start track number %d used in "
1489 			 "formatting is too big\n", fdata->start_unit);
1490 		return ERR_PTR(-EINVAL);
1491 	}
1492 	if (fdata->start_unit > fdata->stop_unit) {
1493 		dev_warn(&device->cdev->dev, "Start track %d used in "
1494 			 "formatting exceeds end track\n", fdata->start_unit);
1495 		return ERR_PTR(-EINVAL);
1496 	}
1497 	if (dasd_check_blocksize(fdata->blksize) != 0) {
1498 		dev_warn(&device->cdev->dev,
1499 			 "The DASD cannot be formatted with block size %d\n",
1500 			 fdata->blksize);
1501 		return ERR_PTR(-EINVAL);
1502 	}
1503 
1504 	/*
1505 	 * fdata->intensity is a bit string that tells us what to do:
1506 	 *   Bit 0: write record zero
1507 	 *   Bit 1: write home address, currently not supported
1508 	 *   Bit 2: invalidate tracks
1509 	 *   Bit 3: use OS/390 compatible disk layout (cdl)
1510 	 *   Bit 4: do not allow storage subsystem to modify record zero
1511 	 * Only some bit combinations do make sense.
1512 	 */
1513 	if (fdata->intensity & 0x10) {
1514 		r0_perm = 0;
1515 		intensity = fdata->intensity & ~0x10;
1516 	} else {
1517 		r0_perm = 1;
1518 		intensity = fdata->intensity;
1519 	}
1520 	switch (intensity) {
1521 	case 0x00:	/* Normal format */
1522 	case 0x08:	/* Normal format, use cdl. */
1523 		cplength = 2 + rpt;
1524 		datasize = sizeof(struct DE_eckd_data) +
1525 			sizeof(struct LO_eckd_data) +
1526 			rpt * sizeof(struct eckd_count);
1527 		break;
1528 	case 0x01:	/* Write record zero and format track. */
1529 	case 0x09:	/* Write record zero and format track, use cdl. */
1530 		cplength = 3 + rpt;
1531 		datasize = sizeof(struct DE_eckd_data) +
1532 			sizeof(struct LO_eckd_data) +
1533 			sizeof(struct eckd_count) +
1534 			rpt * sizeof(struct eckd_count);
1535 		break;
1536 	case 0x04:	/* Invalidate track. */
1537 	case 0x0c:	/* Invalidate track, use cdl. */
1538 		cplength = 3;
1539 		datasize = sizeof(struct DE_eckd_data) +
1540 			sizeof(struct LO_eckd_data) +
1541 			sizeof(struct eckd_count);
1542 		break;
1543 	default:
1544 		dev_warn(&device->cdev->dev, "An I/O control call used "
1545 			 "incorrect flags 0x%x\n", fdata->intensity);
1546 		return ERR_PTR(-EINVAL);
1547 	}
1548 	/* Allocate the format ccw request. */
1549 	fcp = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device);
1550 	if (IS_ERR(fcp))
1551 		return fcp;
1552 
1553 	data = fcp->data;
1554 	ccw = fcp->cpaddr;
1555 
1556 	switch (intensity & ~0x08) {
1557 	case 0x00: /* Normal format. */
1558 		define_extent(ccw++, (struct DE_eckd_data *) data,
1559 			      fdata->start_unit, fdata->start_unit,
1560 			      DASD_ECKD_CCW_WRITE_CKD, device);
1561 		/* grant subsystem permission to format R0 */
1562 		if (r0_perm)
1563 			((struct DE_eckd_data *)data)->ga_extended |= 0x04;
1564 		data += sizeof(struct DE_eckd_data);
1565 		ccw[-1].flags |= CCW_FLAG_CC;
1566 		locate_record(ccw++, (struct LO_eckd_data *) data,
1567 			      fdata->start_unit, 0, rpt,
1568 			      DASD_ECKD_CCW_WRITE_CKD, device,
1569 			      fdata->blksize);
1570 		data += sizeof(struct LO_eckd_data);
1571 		break;
1572 	case 0x01: /* Write record zero + format track. */
1573 		define_extent(ccw++, (struct DE_eckd_data *) data,
1574 			      fdata->start_unit, fdata->start_unit,
1575 			      DASD_ECKD_CCW_WRITE_RECORD_ZERO,
1576 			      device);
1577 		data += sizeof(struct DE_eckd_data);
1578 		ccw[-1].flags |= CCW_FLAG_CC;
1579 		locate_record(ccw++, (struct LO_eckd_data *) data,
1580 			      fdata->start_unit, 0, rpt + 1,
1581 			      DASD_ECKD_CCW_WRITE_RECORD_ZERO, device,
1582 			      device->block->bp_block);
1583 		data += sizeof(struct LO_eckd_data);
1584 		break;
1585 	case 0x04: /* Invalidate track. */
1586 		define_extent(ccw++, (struct DE_eckd_data *) data,
1587 			      fdata->start_unit, fdata->start_unit,
1588 			      DASD_ECKD_CCW_WRITE_CKD, device);
1589 		data += sizeof(struct DE_eckd_data);
1590 		ccw[-1].flags |= CCW_FLAG_CC;
1591 		locate_record(ccw++, (struct LO_eckd_data *) data,
1592 			      fdata->start_unit, 0, 1,
1593 			      DASD_ECKD_CCW_WRITE_CKD, device, 8);
1594 		data += sizeof(struct LO_eckd_data);
1595 		break;
1596 	}
1597 	if (intensity & 0x01) {	/* write record zero */
1598 		ect = (struct eckd_count *) data;
1599 		data += sizeof(struct eckd_count);
1600 		ect->cyl = address.cyl;
1601 		ect->head = address.head;
1602 		ect->record = 0;
1603 		ect->kl = 0;
1604 		ect->dl = 8;
1605 		ccw[-1].flags |= CCW_FLAG_CC;
1606 		ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO;
1607 		ccw->flags = CCW_FLAG_SLI;
1608 		ccw->count = 8;
1609 		ccw->cda = (__u32)(addr_t) ect;
1610 		ccw++;
1611 	}
1612 	if ((intensity & ~0x08) & 0x04) {	/* erase track */
1613 		ect = (struct eckd_count *) data;
1614 		data += sizeof(struct eckd_count);
1615 		ect->cyl = address.cyl;
1616 		ect->head = address.head;
1617 		ect->record = 1;
1618 		ect->kl = 0;
1619 		ect->dl = 0;
1620 		ccw[-1].flags |= CCW_FLAG_CC;
1621 		ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
1622 		ccw->flags = CCW_FLAG_SLI;
1623 		ccw->count = 8;
1624 		ccw->cda = (__u32)(addr_t) ect;
1625 	} else {		/* write remaining records */
1626 		for (i = 0; i < rpt; i++) {
1627 			ect = (struct eckd_count *) data;
1628 			data += sizeof(struct eckd_count);
1629 			ect->cyl = address.cyl;
1630 			ect->head = address.head;
1631 			ect->record = i + 1;
1632 			ect->kl = 0;
1633 			ect->dl = fdata->blksize;
1634 			/* Check for special tracks 0-1 when formatting CDL */
1635 			if ((intensity & 0x08) &&
1636 			    fdata->start_unit == 0) {
1637 				if (i < 3) {
1638 					ect->kl = 4;
1639 					ect->dl = sizes_trk0[i] - 4;
1640 				}
1641 			}
1642 			if ((intensity & 0x08) &&
1643 			    fdata->start_unit == 1) {
1644 				ect->kl = 44;
1645 				ect->dl = LABEL_SIZE - 44;
1646 			}
1647 			ccw[-1].flags |= CCW_FLAG_CC;
1648 			ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
1649 			ccw->flags = CCW_FLAG_SLI;
1650 			ccw->count = 8;
1651 			ccw->cda = (__u32)(addr_t) ect;
1652 			ccw++;
1653 		}
1654 	}
1655 	fcp->startdev = device;
1656 	fcp->memdev = device;
1657 	fcp->retries = 256;
1658 	fcp->buildclk = get_clock();
1659 	fcp->status = DASD_CQR_FILLED;
1660 	return fcp;
1661 }
1662 
1663 static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr)
1664 {
1665 	cqr->status = DASD_CQR_FILLED;
1666 	if (cqr->block && (cqr->startdev != cqr->block->base)) {
1667 		dasd_eckd_reset_ccw_to_base_io(cqr);
1668 		cqr->startdev = cqr->block->base;
1669 	}
1670 };
1671 
1672 static dasd_erp_fn_t
1673 dasd_eckd_erp_action(struct dasd_ccw_req * cqr)
1674 {
1675 	struct dasd_device *device = (struct dasd_device *) cqr->startdev;
1676 	struct ccw_device *cdev = device->cdev;
1677 
1678 	switch (cdev->id.cu_type) {
1679 	case 0x3990:
1680 	case 0x2105:
1681 	case 0x2107:
1682 	case 0x1750:
1683 		return dasd_3990_erp_action;
1684 	case 0x9343:
1685 	case 0x3880:
1686 	default:
1687 		return dasd_default_erp_action;
1688 	}
1689 }
1690 
1691 static dasd_erp_fn_t
1692 dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr)
1693 {
1694 	return dasd_default_erp_postaction;
1695 }
1696 
1697 
1698 static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device,
1699 						   struct irb *irb)
1700 {
1701 	char mask;
1702 	char *sense = NULL;
1703 
1704 	/* first of all check for state change pending interrupt */
1705 	mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
1706 	if ((scsw_dstat(&irb->scsw) & mask) == mask) {
1707 		dasd_generic_handle_state_change(device);
1708 		return;
1709 	}
1710 
1711 	/* summary unit check */
1712 	if ((scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) &&
1713 	    (irb->ecw[7] == 0x0D)) {
1714 		dasd_alias_handle_summary_unit_check(device, irb);
1715 		return;
1716 	}
1717 
1718 	sense = dasd_get_sense(irb);
1719 	/* service information message SIM */
1720 	if (sense && !(sense[27] & DASD_SENSE_BIT_0) &&
1721 	    ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) {
1722 		dasd_3990_erp_handle_sim(device, sense);
1723 		dasd_schedule_device_bh(device);
1724 		return;
1725 	}
1726 
1727 	if ((scsw_cc(&irb->scsw) == 1) &&
1728 	    (scsw_fctl(&irb->scsw) & SCSW_FCTL_START_FUNC) &&
1729 	    (scsw_actl(&irb->scsw) & SCSW_ACTL_START_PEND) &&
1730 	    (scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND)) {
1731 		/* fake irb do nothing, they are handled elsewhere */
1732 		dasd_schedule_device_bh(device);
1733 		return;
1734 	}
1735 
1736 	if (!sense) {
1737 		/* just report other unsolicited interrupts */
1738 		DBF_DEV_EVENT(DBF_ERR, device, "%s",
1739 			    "unsolicited interrupt received");
1740 	} else {
1741 		DBF_DEV_EVENT(DBF_ERR, device, "%s",
1742 			    "unsolicited interrupt received "
1743 			    "(sense available)");
1744 		device->discipline->dump_sense_dbf(device, irb, "unsolicited");
1745 	}
1746 
1747 	dasd_schedule_device_bh(device);
1748 	return;
1749 };
1750 
1751 
1752 static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
1753 					       struct dasd_device *startdev,
1754 					       struct dasd_block *block,
1755 					       struct request *req,
1756 					       sector_t first_rec,
1757 					       sector_t last_rec,
1758 					       sector_t first_trk,
1759 					       sector_t last_trk,
1760 					       unsigned int first_offs,
1761 					       unsigned int last_offs,
1762 					       unsigned int blk_per_trk,
1763 					       unsigned int blksize)
1764 {
1765 	struct dasd_eckd_private *private;
1766 	unsigned long *idaws;
1767 	struct LO_eckd_data *LO_data;
1768 	struct dasd_ccw_req *cqr;
1769 	struct ccw1 *ccw;
1770 	struct req_iterator iter;
1771 	struct bio_vec *bv;
1772 	char *dst;
1773 	unsigned int off;
1774 	int count, cidaw, cplength, datasize;
1775 	sector_t recid;
1776 	unsigned char cmd, rcmd;
1777 	int use_prefix;
1778 	struct dasd_device *basedev;
1779 
1780 	basedev = block->base;
1781 	private = (struct dasd_eckd_private *) basedev->private;
1782 	if (rq_data_dir(req) == READ)
1783 		cmd = DASD_ECKD_CCW_READ_MT;
1784 	else if (rq_data_dir(req) == WRITE)
1785 		cmd = DASD_ECKD_CCW_WRITE_MT;
1786 	else
1787 		return ERR_PTR(-EINVAL);
1788 
1789 	/* Check struct bio and count the number of blocks for the request. */
1790 	count = 0;
1791 	cidaw = 0;
1792 	rq_for_each_segment(bv, req, iter) {
1793 		if (bv->bv_len & (blksize - 1))
1794 			/* Eckd can only do full blocks. */
1795 			return ERR_PTR(-EINVAL);
1796 		count += bv->bv_len >> (block->s2b_shift + 9);
1797 #if defined(CONFIG_64BIT)
1798 		if (idal_is_needed (page_address(bv->bv_page), bv->bv_len))
1799 			cidaw += bv->bv_len >> (block->s2b_shift + 9);
1800 #endif
1801 	}
1802 	/* Paranoia. */
1803 	if (count != last_rec - first_rec + 1)
1804 		return ERR_PTR(-EINVAL);
1805 
1806 	/* use the prefix command if available */
1807 	use_prefix = private->features.feature[8] & 0x01;
1808 	if (use_prefix) {
1809 		/* 1x prefix + number of blocks */
1810 		cplength = 2 + count;
1811 		/* 1x prefix + cidaws*sizeof(long) */
1812 		datasize = sizeof(struct PFX_eckd_data) +
1813 			sizeof(struct LO_eckd_data) +
1814 			cidaw * sizeof(unsigned long);
1815 	} else {
1816 		/* 1x define extent + 1x locate record + number of blocks */
1817 		cplength = 2 + count;
1818 		/* 1x define extent + 1x locate record + cidaws*sizeof(long) */
1819 		datasize = sizeof(struct DE_eckd_data) +
1820 			sizeof(struct LO_eckd_data) +
1821 			cidaw * sizeof(unsigned long);
1822 	}
1823 	/* Find out the number of additional locate record ccws for cdl. */
1824 	if (private->uses_cdl && first_rec < 2*blk_per_trk) {
1825 		if (last_rec >= 2*blk_per_trk)
1826 			count = 2*blk_per_trk - first_rec;
1827 		cplength += count;
1828 		datasize += count*sizeof(struct LO_eckd_data);
1829 	}
1830 	/* Allocate the ccw request. */
1831 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
1832 				   startdev);
1833 	if (IS_ERR(cqr))
1834 		return cqr;
1835 	ccw = cqr->cpaddr;
1836 	/* First ccw is define extent or prefix. */
1837 	if (use_prefix) {
1838 		if (prefix(ccw++, cqr->data, first_trk,
1839 			   last_trk, cmd, basedev, startdev) == -EAGAIN) {
1840 			/* Clock not in sync and XRC is enabled.
1841 			 * Try again later.
1842 			 */
1843 			dasd_sfree_request(cqr, startdev);
1844 			return ERR_PTR(-EAGAIN);
1845 		}
1846 		idaws = (unsigned long *) (cqr->data +
1847 					   sizeof(struct PFX_eckd_data));
1848 	} else {
1849 		if (define_extent(ccw++, cqr->data, first_trk,
1850 				  last_trk, cmd, startdev) == -EAGAIN) {
1851 			/* Clock not in sync and XRC is enabled.
1852 			 * Try again later.
1853 			 */
1854 			dasd_sfree_request(cqr, startdev);
1855 			return ERR_PTR(-EAGAIN);
1856 		}
1857 		idaws = (unsigned long *) (cqr->data +
1858 					   sizeof(struct DE_eckd_data));
1859 	}
1860 	/* Build locate_record+read/write/ccws. */
1861 	LO_data = (struct LO_eckd_data *) (idaws + cidaw);
1862 	recid = first_rec;
1863 	if (private->uses_cdl == 0 || recid > 2*blk_per_trk) {
1864 		/* Only standard blocks so there is just one locate record. */
1865 		ccw[-1].flags |= CCW_FLAG_CC;
1866 		locate_record(ccw++, LO_data++, first_trk, first_offs + 1,
1867 			      last_rec - recid + 1, cmd, basedev, blksize);
1868 	}
1869 	rq_for_each_segment(bv, req, iter) {
1870 		dst = page_address(bv->bv_page) + bv->bv_offset;
1871 		if (dasd_page_cache) {
1872 			char *copy = kmem_cache_alloc(dasd_page_cache,
1873 						      GFP_DMA | __GFP_NOWARN);
1874 			if (copy && rq_data_dir(req) == WRITE)
1875 				memcpy(copy + bv->bv_offset, dst, bv->bv_len);
1876 			if (copy)
1877 				dst = copy + bv->bv_offset;
1878 		}
1879 		for (off = 0; off < bv->bv_len; off += blksize) {
1880 			sector_t trkid = recid;
1881 			unsigned int recoffs = sector_div(trkid, blk_per_trk);
1882 			rcmd = cmd;
1883 			count = blksize;
1884 			/* Locate record for cdl special block ? */
1885 			if (private->uses_cdl && recid < 2*blk_per_trk) {
1886 				if (dasd_eckd_cdl_special(blk_per_trk, recid)){
1887 					rcmd |= 0x8;
1888 					count = dasd_eckd_cdl_reclen(recid);
1889 					if (count < blksize &&
1890 					    rq_data_dir(req) == READ)
1891 						memset(dst + count, 0xe5,
1892 						       blksize - count);
1893 				}
1894 				ccw[-1].flags |= CCW_FLAG_CC;
1895 				locate_record(ccw++, LO_data++,
1896 					      trkid, recoffs + 1,
1897 					      1, rcmd, basedev, count);
1898 			}
1899 			/* Locate record for standard blocks ? */
1900 			if (private->uses_cdl && recid == 2*blk_per_trk) {
1901 				ccw[-1].flags |= CCW_FLAG_CC;
1902 				locate_record(ccw++, LO_data++,
1903 					      trkid, recoffs + 1,
1904 					      last_rec - recid + 1,
1905 					      cmd, basedev, count);
1906 			}
1907 			/* Read/write ccw. */
1908 			ccw[-1].flags |= CCW_FLAG_CC;
1909 			ccw->cmd_code = rcmd;
1910 			ccw->count = count;
1911 			if (idal_is_needed(dst, blksize)) {
1912 				ccw->cda = (__u32)(addr_t) idaws;
1913 				ccw->flags = CCW_FLAG_IDA;
1914 				idaws = idal_create_words(idaws, dst, blksize);
1915 			} else {
1916 				ccw->cda = (__u32)(addr_t) dst;
1917 				ccw->flags = 0;
1918 			}
1919 			ccw++;
1920 			dst += blksize;
1921 			recid++;
1922 		}
1923 	}
1924 	if (blk_noretry_request(req) ||
1925 	    block->base->features & DASD_FEATURE_FAILFAST)
1926 		set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
1927 	cqr->startdev = startdev;
1928 	cqr->memdev = startdev;
1929 	cqr->block = block;
1930 	cqr->expires = 5 * 60 * HZ;	/* 5 minutes */
1931 	cqr->lpm = private->path_data.ppm;
1932 	cqr->retries = 256;
1933 	cqr->buildclk = get_clock();
1934 	cqr->status = DASD_CQR_FILLED;
1935 	return cqr;
1936 }
1937 
1938 static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
1939 					       struct dasd_device *startdev,
1940 					       struct dasd_block *block,
1941 					       struct request *req,
1942 					       sector_t first_rec,
1943 					       sector_t last_rec,
1944 					       sector_t first_trk,
1945 					       sector_t last_trk,
1946 					       unsigned int first_offs,
1947 					       unsigned int last_offs,
1948 					       unsigned int blk_per_trk,
1949 					       unsigned int blksize)
1950 {
1951 	struct dasd_eckd_private *private;
1952 	unsigned long *idaws;
1953 	struct dasd_ccw_req *cqr;
1954 	struct ccw1 *ccw;
1955 	struct req_iterator iter;
1956 	struct bio_vec *bv;
1957 	char *dst, *idaw_dst;
1958 	unsigned int cidaw, cplength, datasize;
1959 	unsigned int tlf;
1960 	sector_t recid;
1961 	unsigned char cmd;
1962 	struct dasd_device *basedev;
1963 	unsigned int trkcount, count, count_to_trk_end;
1964 	unsigned int idaw_len, seg_len, part_len, len_to_track_end;
1965 	unsigned char new_track, end_idaw;
1966 	sector_t trkid;
1967 	unsigned int recoffs;
1968 
1969 	basedev = block->base;
1970 	private = (struct dasd_eckd_private *) basedev->private;
1971 	if (rq_data_dir(req) == READ)
1972 		cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
1973 	else if (rq_data_dir(req) == WRITE)
1974 		cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
1975 	else
1976 		return ERR_PTR(-EINVAL);
1977 
1978 	/* Track based I/O needs IDAWs for each page, and not just for
1979 	 * 64 bit addresses. We need additional idals for pages
1980 	 * that get filled from two tracks, so we use the number
1981 	 * of records as upper limit.
1982 	 */
1983 	cidaw = last_rec - first_rec + 1;
1984 	trkcount = last_trk - first_trk + 1;
1985 
1986 	/* 1x prefix + one read/write ccw per track */
1987 	cplength = 1 + trkcount;
1988 
1989 	/* on 31-bit we need space for two 32 bit addresses per page
1990 	 * on 64-bit one 64 bit address
1991 	 */
1992 	datasize = sizeof(struct PFX_eckd_data) +
1993 		cidaw * sizeof(unsigned long long);
1994 
1995 	/* Allocate the ccw request. */
1996 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
1997 				   startdev);
1998 	if (IS_ERR(cqr))
1999 		return cqr;
2000 	ccw = cqr->cpaddr;
2001 	/* transfer length factor: how many bytes to read from the last track */
2002 	if (first_trk == last_trk)
2003 		tlf = last_offs - first_offs + 1;
2004 	else
2005 		tlf = last_offs + 1;
2006 	tlf *= blksize;
2007 
2008 	if (prefix_LRE(ccw++, cqr->data, first_trk,
2009 		       last_trk, cmd, basedev, startdev,
2010 		       1 /* format */, first_offs + 1,
2011 		       trkcount, blksize,
2012 		       tlf) == -EAGAIN) {
2013 		/* Clock not in sync and XRC is enabled.
2014 		 * Try again later.
2015 		 */
2016 		dasd_sfree_request(cqr, startdev);
2017 		return ERR_PTR(-EAGAIN);
2018 	}
2019 
2020 	/*
2021 	 * The translation of request into ccw programs must meet the
2022 	 * following conditions:
2023 	 * - all idaws but the first and the last must address full pages
2024 	 *   (or 2K blocks on 31-bit)
2025 	 * - the scope of a ccw and it's idal ends with the track boundaries
2026 	 */
2027 	idaws = (unsigned long *) (cqr->data + sizeof(struct PFX_eckd_data));
2028 	recid = first_rec;
2029 	new_track = 1;
2030 	end_idaw = 0;
2031 	len_to_track_end = 0;
2032 	idaw_dst = 0;
2033 	idaw_len = 0;
2034 	rq_for_each_segment(bv, req, iter) {
2035 		dst = page_address(bv->bv_page) + bv->bv_offset;
2036 		seg_len = bv->bv_len;
2037 		while (seg_len) {
2038 			if (new_track) {
2039 				trkid = recid;
2040 				recoffs = sector_div(trkid, blk_per_trk);
2041 				count_to_trk_end = blk_per_trk - recoffs;
2042 				count = min((last_rec - recid + 1),
2043 					    (sector_t)count_to_trk_end);
2044 				len_to_track_end = count * blksize;
2045 				ccw[-1].flags |= CCW_FLAG_CC;
2046 				ccw->cmd_code = cmd;
2047 				ccw->count = len_to_track_end;
2048 				ccw->cda = (__u32)(addr_t)idaws;
2049 				ccw->flags = CCW_FLAG_IDA;
2050 				ccw++;
2051 				recid += count;
2052 				new_track = 0;
2053 				/* first idaw for a ccw may start anywhere */
2054 				if (!idaw_dst)
2055 					idaw_dst = dst;
2056 			}
2057 			/* If we start a new idaw, we must make sure that it
2058 			 * starts on an IDA_BLOCK_SIZE boundary.
2059 			 * If we continue an idaw, we must make sure that the
2060 			 * current segment begins where the so far accumulated
2061 			 * idaw ends
2062 			 */
2063 			if (!idaw_dst) {
2064 				if (__pa(dst) & (IDA_BLOCK_SIZE-1)) {
2065 					dasd_sfree_request(cqr, startdev);
2066 					return ERR_PTR(-ERANGE);
2067 				} else
2068 					idaw_dst = dst;
2069 			}
2070 			if ((idaw_dst + idaw_len) != dst) {
2071 				dasd_sfree_request(cqr, startdev);
2072 				return ERR_PTR(-ERANGE);
2073 			}
2074 			part_len = min(seg_len, len_to_track_end);
2075 			seg_len -= part_len;
2076 			dst += part_len;
2077 			idaw_len += part_len;
2078 			len_to_track_end -= part_len;
2079 			/* collected memory area ends on an IDA_BLOCK border,
2080 			 * -> create an idaw
2081 			 * idal_create_words will handle cases where idaw_len
2082 			 * is larger then IDA_BLOCK_SIZE
2083 			 */
2084 			if (!(__pa(idaw_dst + idaw_len) & (IDA_BLOCK_SIZE-1)))
2085 				end_idaw = 1;
2086 			/* We also need to end the idaw at track end */
2087 			if (!len_to_track_end) {
2088 				new_track = 1;
2089 				end_idaw = 1;
2090 			}
2091 			if (end_idaw) {
2092 				idaws = idal_create_words(idaws, idaw_dst,
2093 							  idaw_len);
2094 				idaw_dst = 0;
2095 				idaw_len = 0;
2096 				end_idaw = 0;
2097 			}
2098 		}
2099 	}
2100 
2101 	if (blk_noretry_request(req) ||
2102 	    block->base->features & DASD_FEATURE_FAILFAST)
2103 		set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
2104 	cqr->startdev = startdev;
2105 	cqr->memdev = startdev;
2106 	cqr->block = block;
2107 	cqr->expires = 5 * 60 * HZ;	/* 5 minutes */
2108 	cqr->lpm = private->path_data.ppm;
2109 	cqr->retries = 256;
2110 	cqr->buildclk = get_clock();
2111 	cqr->status = DASD_CQR_FILLED;
2112 	return cqr;
2113 }
2114 
2115 static int prepare_itcw(struct itcw *itcw,
2116 			unsigned int trk, unsigned int totrk, int cmd,
2117 			struct dasd_device *basedev,
2118 			struct dasd_device *startdev,
2119 			unsigned int rec_on_trk, int count,
2120 			unsigned int blksize,
2121 			unsigned int total_data_size,
2122 			unsigned int tlf,
2123 			unsigned int blk_per_trk)
2124 {
2125 	struct PFX_eckd_data pfxdata;
2126 	struct dasd_eckd_private *basepriv, *startpriv;
2127 	struct DE_eckd_data *dedata;
2128 	struct LRE_eckd_data *lredata;
2129 	struct dcw *dcw;
2130 
2131 	u32 begcyl, endcyl;
2132 	u16 heads, beghead, endhead;
2133 	u8 pfx_cmd;
2134 
2135 	int rc = 0;
2136 	int sector = 0;
2137 	int dn, d;
2138 
2139 
2140 	/* setup prefix data */
2141 	basepriv = (struct dasd_eckd_private *) basedev->private;
2142 	startpriv = (struct dasd_eckd_private *) startdev->private;
2143 	dedata = &pfxdata.define_extent;
2144 	lredata = &pfxdata.locate_record;
2145 
2146 	memset(&pfxdata, 0, sizeof(pfxdata));
2147 	pfxdata.format = 1; /* PFX with LRE */
2148 	pfxdata.base_address = basepriv->ned->unit_addr;
2149 	pfxdata.base_lss = basepriv->ned->ID;
2150 	pfxdata.validity.define_extent = 1;
2151 
2152 	/* private uid is kept up to date, conf_data may be outdated */
2153 	if (startpriv->uid.type != UA_BASE_DEVICE) {
2154 		pfxdata.validity.verify_base = 1;
2155 		if (startpriv->uid.type == UA_HYPER_PAV_ALIAS)
2156 			pfxdata.validity.hyper_pav = 1;
2157 	}
2158 
2159 	switch (cmd) {
2160 	case DASD_ECKD_CCW_READ_TRACK_DATA:
2161 		dedata->mask.perm = 0x1;
2162 		dedata->attributes.operation = basepriv->attrib.operation;
2163 		dedata->blk_size = blksize;
2164 		dedata->ga_extended |= 0x42;
2165 		lredata->operation.orientation = 0x0;
2166 		lredata->operation.operation = 0x0C;
2167 		lredata->auxiliary.check_bytes = 0x01;
2168 		pfx_cmd = DASD_ECKD_CCW_PFX_READ;
2169 		break;
2170 	case DASD_ECKD_CCW_WRITE_TRACK_DATA:
2171 		dedata->mask.perm = 0x02;
2172 		dedata->attributes.operation = basepriv->attrib.operation;
2173 		dedata->blk_size = blksize;
2174 		rc = check_XRC_on_prefix(&pfxdata, basedev);
2175 		dedata->ga_extended |= 0x42;
2176 		lredata->operation.orientation = 0x0;
2177 		lredata->operation.operation = 0x3F;
2178 		lredata->extended_operation = 0x23;
2179 		lredata->auxiliary.check_bytes = 0x2;
2180 		pfx_cmd = DASD_ECKD_CCW_PFX;
2181 		break;
2182 	default:
2183 		DBF_DEV_EVENT(DBF_ERR, basedev,
2184 			      "prepare itcw, unknown opcode 0x%x", cmd);
2185 		BUG();
2186 		break;
2187 	}
2188 	if (rc)
2189 		return rc;
2190 
2191 	dedata->attributes.mode = 0x3;	/* ECKD */
2192 
2193 	heads = basepriv->rdc_data.trk_per_cyl;
2194 	begcyl = trk / heads;
2195 	beghead = trk % heads;
2196 	endcyl = totrk / heads;
2197 	endhead = totrk % heads;
2198 
2199 	/* check for sequential prestage - enhance cylinder range */
2200 	if (dedata->attributes.operation == DASD_SEQ_PRESTAGE ||
2201 	    dedata->attributes.operation == DASD_SEQ_ACCESS) {
2202 
2203 		if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl)
2204 			endcyl += basepriv->attrib.nr_cyl;
2205 		else
2206 			endcyl = (basepriv->real_cyl - 1);
2207 	}
2208 
2209 	set_ch_t(&dedata->beg_ext, begcyl, beghead);
2210 	set_ch_t(&dedata->end_ext, endcyl, endhead);
2211 
2212 	dedata->ep_format = 0x20; /* records per track is valid */
2213 	dedata->ep_rec_per_track = blk_per_trk;
2214 
2215 	if (rec_on_trk) {
2216 		switch (basepriv->rdc_data.dev_type) {
2217 		case 0x3390:
2218 			dn = ceil_quot(blksize + 6, 232);
2219 			d = 9 + ceil_quot(blksize + 6 * (dn + 1), 34);
2220 			sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
2221 			break;
2222 		case 0x3380:
2223 			d = 7 + ceil_quot(blksize + 12, 32);
2224 			sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
2225 			break;
2226 		}
2227 	}
2228 
2229 	lredata->auxiliary.length_valid = 1;
2230 	lredata->auxiliary.length_scope = 1;
2231 	lredata->auxiliary.imbedded_ccw_valid = 1;
2232 	lredata->length = tlf;
2233 	lredata->imbedded_ccw = cmd;
2234 	lredata->count = count;
2235 	lredata->sector = sector;
2236 	set_ch_t(&lredata->seek_addr, begcyl, beghead);
2237 	lredata->search_arg.cyl = lredata->seek_addr.cyl;
2238 	lredata->search_arg.head = lredata->seek_addr.head;
2239 	lredata->search_arg.record = rec_on_trk;
2240 
2241 	dcw = itcw_add_dcw(itcw, pfx_cmd, 0,
2242 		     &pfxdata, sizeof(pfxdata), total_data_size);
2243 
2244 	return rc;
2245 }
2246 
2247 static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
2248 					       struct dasd_device *startdev,
2249 					       struct dasd_block *block,
2250 					       struct request *req,
2251 					       sector_t first_rec,
2252 					       sector_t last_rec,
2253 					       sector_t first_trk,
2254 					       sector_t last_trk,
2255 					       unsigned int first_offs,
2256 					       unsigned int last_offs,
2257 					       unsigned int blk_per_trk,
2258 					       unsigned int blksize)
2259 {
2260 	struct dasd_eckd_private *private;
2261 	struct dasd_ccw_req *cqr;
2262 	struct req_iterator iter;
2263 	struct bio_vec *bv;
2264 	char *dst;
2265 	unsigned int trkcount, ctidaw;
2266 	unsigned char cmd;
2267 	struct dasd_device *basedev;
2268 	unsigned int tlf;
2269 	struct itcw *itcw;
2270 	struct tidaw *last_tidaw = NULL;
2271 	int itcw_op;
2272 	size_t itcw_size;
2273 
2274 	basedev = block->base;
2275 	private = (struct dasd_eckd_private *) basedev->private;
2276 	if (rq_data_dir(req) == READ) {
2277 		cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
2278 		itcw_op = ITCW_OP_READ;
2279 	} else if (rq_data_dir(req) == WRITE) {
2280 		cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
2281 		itcw_op = ITCW_OP_WRITE;
2282 	} else
2283 		return ERR_PTR(-EINVAL);
2284 
2285 	/* trackbased I/O needs address all memory via TIDAWs,
2286 	 * not just for 64 bit addresses. This allows us to map
2287 	 * each segment directly to one tidaw.
2288 	 */
2289 	trkcount = last_trk - first_trk + 1;
2290 	ctidaw = 0;
2291 	rq_for_each_segment(bv, req, iter) {
2292 		++ctidaw;
2293 	}
2294 
2295 	/* Allocate the ccw request. */
2296 	itcw_size = itcw_calc_size(0, ctidaw, 0);
2297 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev);
2298 	if (IS_ERR(cqr))
2299 		return cqr;
2300 
2301 	cqr->cpmode = 1;
2302 	cqr->startdev = startdev;
2303 	cqr->memdev = startdev;
2304 	cqr->block = block;
2305 	cqr->expires = 100*HZ;
2306 	cqr->buildclk = get_clock();
2307 	cqr->status = DASD_CQR_FILLED;
2308 	cqr->retries = 10;
2309 
2310 	/* transfer length factor: how many bytes to read from the last track */
2311 	if (first_trk == last_trk)
2312 		tlf = last_offs - first_offs + 1;
2313 	else
2314 		tlf = last_offs + 1;
2315 	tlf *= blksize;
2316 
2317 	itcw = itcw_init(cqr->data, itcw_size, itcw_op, 0, ctidaw, 0);
2318 	cqr->cpaddr = itcw_get_tcw(itcw);
2319 
2320 	if (prepare_itcw(itcw, first_trk, last_trk,
2321 			 cmd, basedev, startdev,
2322 			 first_offs + 1,
2323 			 trkcount, blksize,
2324 			 (last_rec - first_rec + 1) * blksize,
2325 			 tlf, blk_per_trk) == -EAGAIN) {
2326 		/* Clock not in sync and XRC is enabled.
2327 		 * Try again later.
2328 		 */
2329 		dasd_sfree_request(cqr, startdev);
2330 		return ERR_PTR(-EAGAIN);
2331 	}
2332 
2333 	/*
2334 	 * A tidaw can address 4k of memory, but must not cross page boundaries
2335 	 * We can let the block layer handle this by setting
2336 	 * blk_queue_segment_boundary to page boundaries and
2337 	 * blk_max_segment_size to page size when setting up the request queue.
2338 	 */
2339 	rq_for_each_segment(bv, req, iter) {
2340 		dst = page_address(bv->bv_page) + bv->bv_offset;
2341 		last_tidaw = itcw_add_tidaw(itcw, 0x00, dst, bv->bv_len);
2342 		if (IS_ERR(last_tidaw))
2343 			return (struct dasd_ccw_req *)last_tidaw;
2344 	}
2345 
2346 	last_tidaw->flags |= 0x80;
2347 	itcw_finalize(itcw);
2348 
2349 	if (blk_noretry_request(req) ||
2350 	    block->base->features & DASD_FEATURE_FAILFAST)
2351 		set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
2352 	cqr->startdev = startdev;
2353 	cqr->memdev = startdev;
2354 	cqr->block = block;
2355 	cqr->expires = 5 * 60 * HZ;	/* 5 minutes */
2356 	cqr->lpm = private->path_data.ppm;
2357 	cqr->retries = 256;
2358 	cqr->buildclk = get_clock();
2359 	cqr->status = DASD_CQR_FILLED;
2360 	return cqr;
2361 }
2362 
2363 static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
2364 					       struct dasd_block *block,
2365 					       struct request *req)
2366 {
2367 	int tpm, cmdrtd, cmdwtd;
2368 	int use_prefix;
2369 #if defined(CONFIG_64BIT)
2370 	int fcx_in_css, fcx_in_gneq, fcx_in_features;
2371 #endif
2372 	struct dasd_eckd_private *private;
2373 	struct dasd_device *basedev;
2374 	sector_t first_rec, last_rec;
2375 	sector_t first_trk, last_trk;
2376 	unsigned int first_offs, last_offs;
2377 	unsigned int blk_per_trk, blksize;
2378 	int cdlspecial;
2379 	struct dasd_ccw_req *cqr;
2380 
2381 	basedev = block->base;
2382 	private = (struct dasd_eckd_private *) basedev->private;
2383 
2384 	/* Calculate number of blocks/records per track. */
2385 	blksize = block->bp_block;
2386 	blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
2387 	if (blk_per_trk == 0)
2388 		return ERR_PTR(-EINVAL);
2389 	/* Calculate record id of first and last block. */
2390 	first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift;
2391 	first_offs = sector_div(first_trk, blk_per_trk);
2392 	last_rec = last_trk =
2393 		(blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
2394 	last_offs = sector_div(last_trk, blk_per_trk);
2395 	cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk);
2396 
2397 	/* is transport mode supported? */
2398 #if defined(CONFIG_64BIT)
2399 	fcx_in_css = css_general_characteristics.fcx;
2400 	fcx_in_gneq = private->gneq->reserved2[7] & 0x04;
2401 	fcx_in_features = private->features.feature[40] & 0x80;
2402 	tpm = fcx_in_css && fcx_in_gneq && fcx_in_features;
2403 #else
2404 	tpm = 0;
2405 #endif
2406 
2407 	/* is read track data and write track data in command mode supported? */
2408 	cmdrtd = private->features.feature[9] & 0x20;
2409 	cmdwtd = private->features.feature[12] & 0x40;
2410 	use_prefix = private->features.feature[8] & 0x01;
2411 
2412 	cqr = NULL;
2413 	if (cdlspecial || dasd_page_cache) {
2414 		/* do nothing, just fall through to the cmd mode single case */
2415 	} else if (!dasd_nofcx && tpm && (first_trk == last_trk)) {
2416 		cqr = dasd_eckd_build_cp_tpm_track(startdev, block, req,
2417 						    first_rec, last_rec,
2418 						    first_trk, last_trk,
2419 						    first_offs, last_offs,
2420 						    blk_per_trk, blksize);
2421 		if (IS_ERR(cqr) && PTR_ERR(cqr) != -EAGAIN)
2422 			cqr = NULL;
2423 	} else if (use_prefix &&
2424 		   (((rq_data_dir(req) == READ) && cmdrtd) ||
2425 		    ((rq_data_dir(req) == WRITE) && cmdwtd))) {
2426 		cqr = dasd_eckd_build_cp_cmd_track(startdev, block, req,
2427 						   first_rec, last_rec,
2428 						   first_trk, last_trk,
2429 						   first_offs, last_offs,
2430 						   blk_per_trk, blksize);
2431 		if (IS_ERR(cqr) && PTR_ERR(cqr) != -EAGAIN)
2432 			cqr = NULL;
2433 	}
2434 	if (!cqr)
2435 		cqr = dasd_eckd_build_cp_cmd_single(startdev, block, req,
2436 						    first_rec, last_rec,
2437 						    first_trk, last_trk,
2438 						    first_offs, last_offs,
2439 						    blk_per_trk, blksize);
2440 	return cqr;
2441 }
2442 
2443 static int
2444 dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
2445 {
2446 	struct dasd_eckd_private *private;
2447 	struct ccw1 *ccw;
2448 	struct req_iterator iter;
2449 	struct bio_vec *bv;
2450 	char *dst, *cda;
2451 	unsigned int blksize, blk_per_trk, off;
2452 	sector_t recid;
2453 	int status;
2454 
2455 	if (!dasd_page_cache)
2456 		goto out;
2457 	private = (struct dasd_eckd_private *) cqr->block->base->private;
2458 	blksize = cqr->block->bp_block;
2459 	blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
2460 	recid = blk_rq_pos(req) >> cqr->block->s2b_shift;
2461 	ccw = cqr->cpaddr;
2462 	/* Skip over define extent & locate record. */
2463 	ccw++;
2464 	if (private->uses_cdl == 0 || recid > 2*blk_per_trk)
2465 		ccw++;
2466 	rq_for_each_segment(bv, req, iter) {
2467 		dst = page_address(bv->bv_page) + bv->bv_offset;
2468 		for (off = 0; off < bv->bv_len; off += blksize) {
2469 			/* Skip locate record. */
2470 			if (private->uses_cdl && recid <= 2*blk_per_trk)
2471 				ccw++;
2472 			if (dst) {
2473 				if (ccw->flags & CCW_FLAG_IDA)
2474 					cda = *((char **)((addr_t) ccw->cda));
2475 				else
2476 					cda = (char *)((addr_t) ccw->cda);
2477 				if (dst != cda) {
2478 					if (rq_data_dir(req) == READ)
2479 						memcpy(dst, cda, bv->bv_len);
2480 					kmem_cache_free(dasd_page_cache,
2481 					    (void *)((addr_t)cda & PAGE_MASK));
2482 				}
2483 				dst = NULL;
2484 			}
2485 			ccw++;
2486 			recid++;
2487 		}
2488 	}
2489 out:
2490 	status = cqr->status == DASD_CQR_DONE;
2491 	dasd_sfree_request(cqr, cqr->memdev);
2492 	return status;
2493 }
2494 
2495 /*
2496  * Modify ccw/tcw in cqr so it can be started on a base device.
2497  *
2498  * Note that this is not enough to restart the cqr!
2499  * Either reset cqr->startdev as well (summary unit check handling)
2500  * or restart via separate cqr (as in ERP handling).
2501  */
2502 void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *cqr)
2503 {
2504 	struct ccw1 *ccw;
2505 	struct PFX_eckd_data *pfxdata;
2506 	struct tcw *tcw;
2507 	struct tccb *tccb;
2508 	struct dcw *dcw;
2509 
2510 	if (cqr->cpmode == 1) {
2511 		tcw = cqr->cpaddr;
2512 		tccb = tcw_get_tccb(tcw);
2513 		dcw = (struct dcw *)&tccb->tca[0];
2514 		pfxdata = (struct PFX_eckd_data *)&dcw->cd[0];
2515 		pfxdata->validity.verify_base = 0;
2516 		pfxdata->validity.hyper_pav = 0;
2517 	} else {
2518 		ccw = cqr->cpaddr;
2519 		pfxdata = cqr->data;
2520 		if (ccw->cmd_code == DASD_ECKD_CCW_PFX) {
2521 			pfxdata->validity.verify_base = 0;
2522 			pfxdata->validity.hyper_pav = 0;
2523 		}
2524 	}
2525 }
2526 
2527 #define DASD_ECKD_CHANQ_MAX_SIZE 4
2528 
2529 static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base,
2530 						     struct dasd_block *block,
2531 						     struct request *req)
2532 {
2533 	struct dasd_eckd_private *private;
2534 	struct dasd_device *startdev;
2535 	unsigned long flags;
2536 	struct dasd_ccw_req *cqr;
2537 
2538 	startdev = dasd_alias_get_start_dev(base);
2539 	if (!startdev)
2540 		startdev = base;
2541 	private = (struct dasd_eckd_private *) startdev->private;
2542 	if (private->count >= DASD_ECKD_CHANQ_MAX_SIZE)
2543 		return ERR_PTR(-EBUSY);
2544 
2545 	spin_lock_irqsave(get_ccwdev_lock(startdev->cdev), flags);
2546 	private->count++;
2547 	cqr = dasd_eckd_build_cp(startdev, block, req);
2548 	if (IS_ERR(cqr))
2549 		private->count--;
2550 	spin_unlock_irqrestore(get_ccwdev_lock(startdev->cdev), flags);
2551 	return cqr;
2552 }
2553 
2554 static int dasd_eckd_free_alias_cp(struct dasd_ccw_req *cqr,
2555 				   struct request *req)
2556 {
2557 	struct dasd_eckd_private *private;
2558 	unsigned long flags;
2559 
2560 	spin_lock_irqsave(get_ccwdev_lock(cqr->memdev->cdev), flags);
2561 	private = (struct dasd_eckd_private *) cqr->memdev->private;
2562 	private->count--;
2563 	spin_unlock_irqrestore(get_ccwdev_lock(cqr->memdev->cdev), flags);
2564 	return dasd_eckd_free_cp(cqr, req);
2565 }
2566 
2567 static int
2568 dasd_eckd_fill_info(struct dasd_device * device,
2569 		    struct dasd_information2_t * info)
2570 {
2571 	struct dasd_eckd_private *private;
2572 
2573 	private = (struct dasd_eckd_private *) device->private;
2574 	info->label_block = 2;
2575 	info->FBA_layout = private->uses_cdl ? 0 : 1;
2576 	info->format = private->uses_cdl ? DASD_FORMAT_CDL : DASD_FORMAT_LDL;
2577 	info->characteristics_size = sizeof(struct dasd_eckd_characteristics);
2578 	memcpy(info->characteristics, &private->rdc_data,
2579 	       sizeof(struct dasd_eckd_characteristics));
2580 	info->confdata_size = min((unsigned long)private->conf_len,
2581 				  sizeof(info->configuration_data));
2582 	memcpy(info->configuration_data, private->conf_data,
2583 	       info->confdata_size);
2584 	return 0;
2585 }
2586 
2587 /*
2588  * SECTION: ioctl functions for eckd devices.
2589  */
2590 
2591 /*
2592  * Release device ioctl.
2593  * Buils a channel programm to releases a prior reserved
2594  * (see dasd_eckd_reserve) device.
2595  */
2596 static int
2597 dasd_eckd_release(struct dasd_device *device)
2598 {
2599 	struct dasd_ccw_req *cqr;
2600 	int rc;
2601 	struct ccw1 *ccw;
2602 
2603 	if (!capable(CAP_SYS_ADMIN))
2604 		return -EACCES;
2605 
2606 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
2607 	if (IS_ERR(cqr)) {
2608 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
2609 			    "Could not allocate initialization request");
2610 		return PTR_ERR(cqr);
2611 	}
2612 	ccw = cqr->cpaddr;
2613 	ccw->cmd_code = DASD_ECKD_CCW_RELEASE;
2614 	ccw->flags |= CCW_FLAG_SLI;
2615 	ccw->count = 32;
2616 	ccw->cda = (__u32)(addr_t) cqr->data;
2617 	cqr->startdev = device;
2618 	cqr->memdev = device;
2619 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
2620 	set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
2621 	cqr->retries = 2;	/* set retry counter to enable basic ERP */
2622 	cqr->expires = 2 * HZ;
2623 	cqr->buildclk = get_clock();
2624 	cqr->status = DASD_CQR_FILLED;
2625 
2626 	rc = dasd_sleep_on_immediatly(cqr);
2627 
2628 	dasd_sfree_request(cqr, cqr->memdev);
2629 	return rc;
2630 }
2631 
2632 /*
2633  * Reserve device ioctl.
2634  * Options are set to 'synchronous wait for interrupt' and
2635  * 'timeout the request'. This leads to a terminate IO if
2636  * the interrupt is outstanding for a certain time.
2637  */
2638 static int
2639 dasd_eckd_reserve(struct dasd_device *device)
2640 {
2641 	struct dasd_ccw_req *cqr;
2642 	int rc;
2643 	struct ccw1 *ccw;
2644 
2645 	if (!capable(CAP_SYS_ADMIN))
2646 		return -EACCES;
2647 
2648 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
2649 	if (IS_ERR(cqr)) {
2650 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
2651 			    "Could not allocate initialization request");
2652 		return PTR_ERR(cqr);
2653 	}
2654 	ccw = cqr->cpaddr;
2655 	ccw->cmd_code = DASD_ECKD_CCW_RESERVE;
2656 	ccw->flags |= CCW_FLAG_SLI;
2657 	ccw->count = 32;
2658 	ccw->cda = (__u32)(addr_t) cqr->data;
2659 	cqr->startdev = device;
2660 	cqr->memdev = device;
2661 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
2662 	set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
2663 	cqr->retries = 2;	/* set retry counter to enable basic ERP */
2664 	cqr->expires = 2 * HZ;
2665 	cqr->buildclk = get_clock();
2666 	cqr->status = DASD_CQR_FILLED;
2667 
2668 	rc = dasd_sleep_on_immediatly(cqr);
2669 
2670 	dasd_sfree_request(cqr, cqr->memdev);
2671 	return rc;
2672 }
2673 
2674 /*
2675  * Steal lock ioctl - unconditional reserve device.
2676  * Buils a channel programm to break a device's reservation.
2677  * (unconditional reserve)
2678  */
2679 static int
2680 dasd_eckd_steal_lock(struct dasd_device *device)
2681 {
2682 	struct dasd_ccw_req *cqr;
2683 	int rc;
2684 	struct ccw1 *ccw;
2685 
2686 	if (!capable(CAP_SYS_ADMIN))
2687 		return -EACCES;
2688 
2689 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
2690 	if (IS_ERR(cqr)) {
2691 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
2692 			    "Could not allocate initialization request");
2693 		return PTR_ERR(cqr);
2694 	}
2695 	ccw = cqr->cpaddr;
2696 	ccw->cmd_code = DASD_ECKD_CCW_SLCK;
2697 	ccw->flags |= CCW_FLAG_SLI;
2698 	ccw->count = 32;
2699 	ccw->cda = (__u32)(addr_t) cqr->data;
2700 	cqr->startdev = device;
2701 	cqr->memdev = device;
2702 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
2703 	set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
2704 	cqr->retries = 2;	/* set retry counter to enable basic ERP */
2705 	cqr->expires = 2 * HZ;
2706 	cqr->buildclk = get_clock();
2707 	cqr->status = DASD_CQR_FILLED;
2708 
2709 	rc = dasd_sleep_on_immediatly(cqr);
2710 
2711 	dasd_sfree_request(cqr, cqr->memdev);
2712 	return rc;
2713 }
2714 
2715 /*
2716  * Read performance statistics
2717  */
2718 static int
2719 dasd_eckd_performance(struct dasd_device *device, void __user *argp)
2720 {
2721 	struct dasd_psf_prssd_data *prssdp;
2722 	struct dasd_rssd_perf_stats_t *stats;
2723 	struct dasd_ccw_req *cqr;
2724 	struct ccw1 *ccw;
2725 	int rc;
2726 
2727 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */  + 1 /* RSSD */,
2728 				   (sizeof(struct dasd_psf_prssd_data) +
2729 				    sizeof(struct dasd_rssd_perf_stats_t)),
2730 				   device);
2731 	if (IS_ERR(cqr)) {
2732 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
2733 			    "Could not allocate initialization request");
2734 		return PTR_ERR(cqr);
2735 	}
2736 	cqr->startdev = device;
2737 	cqr->memdev = device;
2738 	cqr->retries = 0;
2739 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
2740 	cqr->expires = 10 * HZ;
2741 
2742 	/* Prepare for Read Subsystem Data */
2743 	prssdp = (struct dasd_psf_prssd_data *) cqr->data;
2744 	memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
2745 	prssdp->order = PSF_ORDER_PRSSD;
2746 	prssdp->suborder = 0x01;	/* Performance Statistics */
2747 	prssdp->varies[1] = 0x01;	/* Perf Statistics for the Subsystem */
2748 
2749 	ccw = cqr->cpaddr;
2750 	ccw->cmd_code = DASD_ECKD_CCW_PSF;
2751 	ccw->count = sizeof(struct dasd_psf_prssd_data);
2752 	ccw->flags |= CCW_FLAG_CC;
2753 	ccw->cda = (__u32)(addr_t) prssdp;
2754 
2755 	/* Read Subsystem Data - Performance Statistics */
2756 	stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
2757 	memset(stats, 0, sizeof(struct dasd_rssd_perf_stats_t));
2758 
2759 	ccw++;
2760 	ccw->cmd_code = DASD_ECKD_CCW_RSSD;
2761 	ccw->count = sizeof(struct dasd_rssd_perf_stats_t);
2762 	ccw->cda = (__u32)(addr_t) stats;
2763 
2764 	cqr->buildclk = get_clock();
2765 	cqr->status = DASD_CQR_FILLED;
2766 	rc = dasd_sleep_on(cqr);
2767 	if (rc == 0) {
2768 		prssdp = (struct dasd_psf_prssd_data *) cqr->data;
2769 		stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
2770 		if (copy_to_user(argp, stats,
2771 				 sizeof(struct dasd_rssd_perf_stats_t)))
2772 			rc = -EFAULT;
2773 	}
2774 	dasd_sfree_request(cqr, cqr->memdev);
2775 	return rc;
2776 }
2777 
2778 /*
2779  * Get attributes (cache operations)
2780  * Returnes the cache attributes used in Define Extend (DE).
2781  */
2782 static int
2783 dasd_eckd_get_attrib(struct dasd_device *device, void __user *argp)
2784 {
2785 	struct dasd_eckd_private *private =
2786 		(struct dasd_eckd_private *)device->private;
2787 	struct attrib_data_t attrib = private->attrib;
2788 	int rc;
2789 
2790         if (!capable(CAP_SYS_ADMIN))
2791                 return -EACCES;
2792 	if (!argp)
2793                 return -EINVAL;
2794 
2795 	rc = 0;
2796 	if (copy_to_user(argp, (long *) &attrib,
2797 			 sizeof(struct attrib_data_t)))
2798 		rc = -EFAULT;
2799 
2800 	return rc;
2801 }
2802 
2803 /*
2804  * Set attributes (cache operations)
2805  * Stores the attributes for cache operation to be used in Define Extend (DE).
2806  */
2807 static int
2808 dasd_eckd_set_attrib(struct dasd_device *device, void __user *argp)
2809 {
2810 	struct dasd_eckd_private *private =
2811 		(struct dasd_eckd_private *)device->private;
2812 	struct attrib_data_t attrib;
2813 
2814 	if (!capable(CAP_SYS_ADMIN))
2815 		return -EACCES;
2816 	if (!argp)
2817 		return -EINVAL;
2818 
2819 	if (copy_from_user(&attrib, argp, sizeof(struct attrib_data_t)))
2820 		return -EFAULT;
2821 	private->attrib = attrib;
2822 
2823 	dev_info(&device->cdev->dev,
2824 		 "The DASD cache mode was set to %x (%i cylinder prestage)\n",
2825 		 private->attrib.operation, private->attrib.nr_cyl);
2826 	return 0;
2827 }
2828 
2829 /*
2830  * Issue syscall I/O to EMC Symmetrix array.
2831  * CCWs are PSF and RSSD
2832  */
2833 static int dasd_symm_io(struct dasd_device *device, void __user *argp)
2834 {
2835 	struct dasd_symmio_parms usrparm;
2836 	char *psf_data, *rssd_result;
2837 	struct dasd_ccw_req *cqr;
2838 	struct ccw1 *ccw;
2839 	int rc;
2840 
2841 	/* Copy parms from caller */
2842 	rc = -EFAULT;
2843 	if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
2844 		goto out;
2845 #ifndef CONFIG_64BIT
2846 	/* Make sure pointers are sane even on 31 bit. */
2847 	if ((usrparm.psf_data >> 32) != 0 || (usrparm.rssd_result >> 32) != 0) {
2848 		rc = -EINVAL;
2849 		goto out;
2850 	}
2851 #endif
2852 	/* alloc I/O data area */
2853 	psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA);
2854 	rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA);
2855 	if (!psf_data || !rssd_result) {
2856 		rc = -ENOMEM;
2857 		goto out_free;
2858 	}
2859 
2860 	/* get syscall header from user space */
2861 	rc = -EFAULT;
2862 	if (copy_from_user(psf_data,
2863 			   (void __user *)(unsigned long) usrparm.psf_data,
2864 			   usrparm.psf_data_len))
2865 		goto out_free;
2866 
2867 	/* sanity check on syscall header */
2868 	if (psf_data[0] != 0x17 && psf_data[1] != 0xce) {
2869 		rc = -EINVAL;
2870 		goto out_free;
2871 	}
2872 
2873 	/* setup CCWs for PSF + RSSD */
2874 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 , 0, device);
2875 	if (IS_ERR(cqr)) {
2876 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
2877 			"Could not allocate initialization request");
2878 		rc = PTR_ERR(cqr);
2879 		goto out_free;
2880 	}
2881 
2882 	cqr->startdev = device;
2883 	cqr->memdev = device;
2884 	cqr->retries = 3;
2885 	cqr->expires = 10 * HZ;
2886 	cqr->buildclk = get_clock();
2887 	cqr->status = DASD_CQR_FILLED;
2888 
2889 	/* Build the ccws */
2890 	ccw = cqr->cpaddr;
2891 
2892 	/* PSF ccw */
2893 	ccw->cmd_code = DASD_ECKD_CCW_PSF;
2894 	ccw->count = usrparm.psf_data_len;
2895 	ccw->flags |= CCW_FLAG_CC;
2896 	ccw->cda = (__u32)(addr_t) psf_data;
2897 
2898 	ccw++;
2899 
2900 	/* RSSD ccw  */
2901 	ccw->cmd_code = DASD_ECKD_CCW_RSSD;
2902 	ccw->count = usrparm.rssd_result_len;
2903 	ccw->flags = CCW_FLAG_SLI ;
2904 	ccw->cda = (__u32)(addr_t) rssd_result;
2905 
2906 	rc = dasd_sleep_on(cqr);
2907 	if (rc)
2908 		goto out_sfree;
2909 
2910 	rc = -EFAULT;
2911 	if (copy_to_user((void __user *)(unsigned long) usrparm.rssd_result,
2912 			   rssd_result, usrparm.rssd_result_len))
2913 		goto out_sfree;
2914 	rc = 0;
2915 
2916 out_sfree:
2917 	dasd_sfree_request(cqr, cqr->memdev);
2918 out_free:
2919 	kfree(rssd_result);
2920 	kfree(psf_data);
2921 out:
2922 	DBF_DEV_EVENT(DBF_WARNING, device, "Symmetrix ioctl: rc=%d", rc);
2923 	return rc;
2924 }
2925 
2926 static int
2927 dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp)
2928 {
2929 	struct dasd_device *device = block->base;
2930 
2931 	switch (cmd) {
2932 	case BIODASDGATTR:
2933 		return dasd_eckd_get_attrib(device, argp);
2934 	case BIODASDSATTR:
2935 		return dasd_eckd_set_attrib(device, argp);
2936 	case BIODASDPSRD:
2937 		return dasd_eckd_performance(device, argp);
2938 	case BIODASDRLSE:
2939 		return dasd_eckd_release(device);
2940 	case BIODASDRSRV:
2941 		return dasd_eckd_reserve(device);
2942 	case BIODASDSLCK:
2943 		return dasd_eckd_steal_lock(device);
2944 	case BIODASDSYMMIO:
2945 		return dasd_symm_io(device, argp);
2946 	default:
2947 		return -ENOIOCTLCMD;
2948 	}
2949 }
2950 
2951 /*
2952  * Dump the range of CCWs into 'page' buffer
2953  * and return number of printed chars.
2954  */
2955 static int
2956 dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
2957 {
2958 	int len, count;
2959 	char *datap;
2960 
2961 	len = 0;
2962 	while (from <= to) {
2963 		len += sprintf(page + len, KERN_ERR PRINTK_HEADER
2964 			       " CCW %p: %08X %08X DAT:",
2965 			       from, ((int *) from)[0], ((int *) from)[1]);
2966 
2967 		/* get pointer to data (consider IDALs) */
2968 		if (from->flags & CCW_FLAG_IDA)
2969 			datap = (char *) *((addr_t *) (addr_t) from->cda);
2970 		else
2971 			datap = (char *) ((addr_t) from->cda);
2972 
2973 		/* dump data (max 32 bytes) */
2974 		for (count = 0; count < from->count && count < 32; count++) {
2975 			if (count % 8 == 0) len += sprintf(page + len, " ");
2976 			if (count % 4 == 0) len += sprintf(page + len, " ");
2977 			len += sprintf(page + len, "%02x", datap[count]);
2978 		}
2979 		len += sprintf(page + len, "\n");
2980 		from++;
2981 	}
2982 	return len;
2983 }
2984 
2985 static void
2986 dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct irb *irb,
2987 			 char *reason)
2988 {
2989 	u64 *sense;
2990 
2991 	sense = (u64 *) dasd_get_sense(irb);
2992 	if (sense) {
2993 		DBF_DEV_EVENT(DBF_EMERG, device,
2994 			      "%s: %s %02x%02x%02x %016llx %016llx %016llx "
2995 			      "%016llx", reason,
2996 			      scsw_is_tm(&irb->scsw) ? "t" : "c",
2997 			      scsw_cc(&irb->scsw), scsw_cstat(&irb->scsw),
2998 			      scsw_dstat(&irb->scsw), sense[0], sense[1],
2999 			      sense[2], sense[3]);
3000 	} else {
3001 		DBF_DEV_EVENT(DBF_EMERG, device, "%s",
3002 			      "SORRY - NO VALID SENSE AVAILABLE\n");
3003 	}
3004 }
3005 
3006 /*
3007  * Print sense data and related channel program.
3008  * Parts are printed because printk buffer is only 1024 bytes.
3009  */
3010 static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
3011 				 struct dasd_ccw_req *req, struct irb *irb)
3012 {
3013 	char *page;
3014 	struct ccw1 *first, *last, *fail, *from, *to;
3015 	int len, sl, sct;
3016 
3017 	page = (char *) get_zeroed_page(GFP_ATOMIC);
3018 	if (page == NULL) {
3019 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
3020 			      "No memory to dump sense data\n");
3021 		return;
3022 	}
3023 	/* dump the sense data */
3024 	len = sprintf(page,  KERN_ERR PRINTK_HEADER
3025 		      " I/O status report for device %s:\n",
3026 		      dev_name(&device->cdev->dev));
3027 	len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3028 		       " in req: %p CS: 0x%02X DS: 0x%02X CC: 0x%02X RC: %d\n",
3029 		       req, scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw),
3030 		       scsw_cc(&irb->scsw), req->intrc);
3031 	len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3032 		       " device %s: Failing CCW: %p\n",
3033 		       dev_name(&device->cdev->dev),
3034 		       (void *) (addr_t) irb->scsw.cmd.cpa);
3035 	if (irb->esw.esw0.erw.cons) {
3036 		for (sl = 0; sl < 4; sl++) {
3037 			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3038 				       " Sense(hex) %2d-%2d:",
3039 				       (8 * sl), ((8 * sl) + 7));
3040 
3041 			for (sct = 0; sct < 8; sct++) {
3042 				len += sprintf(page + len, " %02x",
3043 					       irb->ecw[8 * sl + sct]);
3044 			}
3045 			len += sprintf(page + len, "\n");
3046 		}
3047 
3048 		if (irb->ecw[27] & DASD_SENSE_BIT_0) {
3049 			/* 24 Byte Sense Data */
3050 			sprintf(page + len, KERN_ERR PRINTK_HEADER
3051 				" 24 Byte: %x MSG %x, "
3052 				"%s MSGb to SYSOP\n",
3053 				irb->ecw[7] >> 4, irb->ecw[7] & 0x0f,
3054 				irb->ecw[1] & 0x10 ? "" : "no");
3055 		} else {
3056 			/* 32 Byte Sense Data */
3057 			sprintf(page + len, KERN_ERR PRINTK_HEADER
3058 				" 32 Byte: Format: %x "
3059 				"Exception class %x\n",
3060 				irb->ecw[6] & 0x0f, irb->ecw[22] >> 4);
3061 		}
3062 	} else {
3063 		sprintf(page + len, KERN_ERR PRINTK_HEADER
3064 			" SORRY - NO VALID SENSE AVAILABLE\n");
3065 	}
3066 	printk("%s", page);
3067 
3068 	if (req) {
3069 		/* req == NULL for unsolicited interrupts */
3070 		/* dump the Channel Program (max 140 Bytes per line) */
3071 		/* Count CCW and print first CCWs (maximum 1024 % 140 = 7) */
3072 		first = req->cpaddr;
3073 		for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
3074 		to = min(first + 6, last);
3075 		len = sprintf(page,  KERN_ERR PRINTK_HEADER
3076 			      " Related CP in req: %p\n", req);
3077 		dasd_eckd_dump_ccw_range(first, to, page + len);
3078 		printk("%s", page);
3079 
3080 		/* print failing CCW area (maximum 4) */
3081 		/* scsw->cda is either valid or zero  */
3082 		len = 0;
3083 		from = ++to;
3084 		fail = (struct ccw1 *)(addr_t)
3085 				irb->scsw.cmd.cpa; /* failing CCW */
3086 		if (from <  fail - 2) {
3087 			from = fail - 2;     /* there is a gap - print header */
3088 			len += sprintf(page, KERN_ERR PRINTK_HEADER "......\n");
3089 		}
3090 		to = min(fail + 1, last);
3091 		len += dasd_eckd_dump_ccw_range(from, to, page + len);
3092 
3093 		/* print last CCWs (maximum 2) */
3094 		from = max(from, ++to);
3095 		if (from < last - 1) {
3096 			from = last - 1;     /* there is a gap - print header */
3097 			len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n");
3098 		}
3099 		len += dasd_eckd_dump_ccw_range(from, last, page + len);
3100 		if (len > 0)
3101 			printk("%s", page);
3102 	}
3103 	free_page((unsigned long) page);
3104 }
3105 
3106 
3107 /*
3108  * Print sense data from a tcw.
3109  */
3110 static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
3111 				 struct dasd_ccw_req *req, struct irb *irb)
3112 {
3113 	char *page;
3114 	int len, sl, sct, residual;
3115 
3116 	struct tsb *tsb;
3117 	u8 *sense;
3118 
3119 
3120 	page = (char *) get_zeroed_page(GFP_ATOMIC);
3121 	if (page == NULL) {
3122 		DBF_DEV_EVENT(DBF_WARNING, device, " %s",
3123 			    "No memory to dump sense data");
3124 		return;
3125 	}
3126 	/* dump the sense data */
3127 	len = sprintf(page,  KERN_ERR PRINTK_HEADER
3128 		      " I/O status report for device %s:\n",
3129 		      dev_name(&device->cdev->dev));
3130 	len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3131 		       " in req: %p CS: 0x%02X DS: 0x%02X CC: 0x%02X RC: %d "
3132 		       "fcxs: 0x%02X schxs: 0x%02X\n", req,
3133 		       scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw),
3134 		       scsw_cc(&irb->scsw), req->intrc,
3135 		       irb->scsw.tm.fcxs, irb->scsw.tm.schxs);
3136 	len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3137 		       " device %s: Failing TCW: %p\n",
3138 		       dev_name(&device->cdev->dev),
3139 		       (void *) (addr_t) irb->scsw.tm.tcw);
3140 
3141 	tsb = NULL;
3142 	sense = NULL;
3143 	if (irb->scsw.tm.tcw)
3144 		tsb = tcw_get_tsb(
3145 			(struct tcw *)(unsigned long)irb->scsw.tm.tcw);
3146 
3147 	if (tsb && (irb->scsw.tm.fcxs == 0x01)) {
3148 		len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3149 			       " tsb->length %d\n", tsb->length);
3150 		len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3151 			       " tsb->flags %x\n", tsb->flags);
3152 		len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3153 			       " tsb->dcw_offset %d\n", tsb->dcw_offset);
3154 		len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3155 			       " tsb->count %d\n", tsb->count);
3156 		residual = tsb->count - 28;
3157 		len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3158 			       " residual %d\n", residual);
3159 
3160 		switch (tsb->flags & 0x07) {
3161 		case 1:	/* tsa_iostat */
3162 			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3163 			       " tsb->tsa.iostat.dev_time %d\n",
3164 				       tsb->tsa.iostat.dev_time);
3165 			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3166 			       " tsb->tsa.iostat.def_time %d\n",
3167 				       tsb->tsa.iostat.def_time);
3168 			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3169 			       " tsb->tsa.iostat.queue_time %d\n",
3170 				       tsb->tsa.iostat.queue_time);
3171 			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3172 			       " tsb->tsa.iostat.dev_busy_time %d\n",
3173 				       tsb->tsa.iostat.dev_busy_time);
3174 			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3175 			       " tsb->tsa.iostat.dev_act_time %d\n",
3176 				       tsb->tsa.iostat.dev_act_time);
3177 			sense = tsb->tsa.iostat.sense;
3178 			break;
3179 		case 2: /* ts_ddpc */
3180 			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3181 			       " tsb->tsa.ddpc.rc %d\n", tsb->tsa.ddpc.rc);
3182 			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3183 			       " tsb->tsa.ddpc.rcq:  ");
3184 			for (sl = 0; sl < 16; sl++) {
3185 				for (sct = 0; sct < 8; sct++) {
3186 					len += sprintf(page + len, " %02x",
3187 						       tsb->tsa.ddpc.rcq[sl]);
3188 				}
3189 				len += sprintf(page + len, "\n");
3190 			}
3191 			sense = tsb->tsa.ddpc.sense;
3192 			break;
3193 		case 3: /* tsa_intrg */
3194 			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3195 				      " tsb->tsa.intrg.: not supportet yet \n");
3196 			break;
3197 		}
3198 
3199 		if (sense) {
3200 			for (sl = 0; sl < 4; sl++) {
3201 				len += sprintf(page + len,
3202 					       KERN_ERR PRINTK_HEADER
3203 					       " Sense(hex) %2d-%2d:",
3204 					       (8 * sl), ((8 * sl) + 7));
3205 				for (sct = 0; sct < 8; sct++) {
3206 					len += sprintf(page + len, " %02x",
3207 						       sense[8 * sl + sct]);
3208 				}
3209 				len += sprintf(page + len, "\n");
3210 			}
3211 
3212 			if (sense[27] & DASD_SENSE_BIT_0) {
3213 				/* 24 Byte Sense Data */
3214 				sprintf(page + len, KERN_ERR PRINTK_HEADER
3215 					" 24 Byte: %x MSG %x, "
3216 					"%s MSGb to SYSOP\n",
3217 					sense[7] >> 4, sense[7] & 0x0f,
3218 					sense[1] & 0x10 ? "" : "no");
3219 			} else {
3220 				/* 32 Byte Sense Data */
3221 				sprintf(page + len, KERN_ERR PRINTK_HEADER
3222 					" 32 Byte: Format: %x "
3223 					"Exception class %x\n",
3224 					sense[6] & 0x0f, sense[22] >> 4);
3225 			}
3226 		} else {
3227 			sprintf(page + len, KERN_ERR PRINTK_HEADER
3228 				" SORRY - NO VALID SENSE AVAILABLE\n");
3229 		}
3230 	} else {
3231 		sprintf(page + len, KERN_ERR PRINTK_HEADER
3232 			" SORRY - NO TSB DATA AVAILABLE\n");
3233 	}
3234 	printk("%s", page);
3235 	free_page((unsigned long) page);
3236 }
3237 
3238 static void dasd_eckd_dump_sense(struct dasd_device *device,
3239 				 struct dasd_ccw_req *req, struct irb *irb)
3240 {
3241 	if (req && scsw_is_tm(&req->irb.scsw))
3242 		dasd_eckd_dump_sense_tcw(device, req, irb);
3243 	else
3244 		dasd_eckd_dump_sense_ccw(device, req, irb);
3245 }
3246 
3247 int dasd_eckd_pm_freeze(struct dasd_device *device)
3248 {
3249 	/*
3250 	 * the device should be disconnected from our LCU structure
3251 	 * on restore we will reconnect it and reread LCU specific
3252 	 * information like PAV support that might have changed
3253 	 */
3254 	dasd_alias_remove_device(device);
3255 	dasd_alias_disconnect_device_from_lcu(device);
3256 
3257 	return 0;
3258 }
3259 
3260 int dasd_eckd_restore_device(struct dasd_device *device)
3261 {
3262 	struct dasd_eckd_private *private;
3263 	struct dasd_eckd_characteristics temp_rdc_data;
3264 	int is_known, rc;
3265 	struct dasd_uid temp_uid;
3266 	unsigned long flags;
3267 
3268 	private = (struct dasd_eckd_private *) device->private;
3269 
3270 	/* Read Configuration Data */
3271 	rc = dasd_eckd_read_conf(device);
3272 	if (rc)
3273 		goto out_err;
3274 
3275 	/* Generate device unique id and register in devmap */
3276 	rc = dasd_eckd_generate_uid(device, &private->uid);
3277 	dasd_get_uid(device->cdev, &temp_uid);
3278 	if (memcmp(&private->uid, &temp_uid, sizeof(struct dasd_uid)) != 0)
3279 		dev_err(&device->cdev->dev, "The UID of the DASD has "
3280 			"changed\n");
3281 	if (rc)
3282 		goto out_err;
3283 	dasd_set_uid(device->cdev, &private->uid);
3284 
3285 	/* register lcu with alias handling, enable PAV if this is a new lcu */
3286 	is_known = dasd_alias_make_device_known_to_lcu(device);
3287 	if (is_known < 0)
3288 		return is_known;
3289 	if (!is_known) {
3290 		/* new lcu found */
3291 		rc = dasd_eckd_validate_server(device); /* will switch pav on */
3292 		if (rc)
3293 			goto out_err;
3294 	}
3295 
3296 	/* Read Feature Codes */
3297 	dasd_eckd_read_features(device);
3298 
3299 	/* Read Device Characteristics */
3300 	rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
3301 					 &temp_rdc_data, 64);
3302 	if (rc) {
3303 		DBF_EVENT(DBF_WARNING,
3304 			  "Read device characteristics failed, rc=%d for "
3305 			  "device: %s", rc, dev_name(&device->cdev->dev));
3306 		goto out_err;
3307 	}
3308 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
3309 	memcpy(&private->rdc_data, &temp_rdc_data, sizeof(temp_rdc_data));
3310 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
3311 
3312 	/* add device to alias management */
3313 	dasd_alias_add_device(device);
3314 
3315 	return 0;
3316 
3317 out_err:
3318 	return -1;
3319 }
3320 
3321 static struct ccw_driver dasd_eckd_driver = {
3322 	.name	     = "dasd-eckd",
3323 	.owner	     = THIS_MODULE,
3324 	.ids	     = dasd_eckd_ids,
3325 	.probe	     = dasd_eckd_probe,
3326 	.remove      = dasd_generic_remove,
3327 	.set_offline = dasd_generic_set_offline,
3328 	.set_online  = dasd_eckd_set_online,
3329 	.notify      = dasd_generic_notify,
3330 	.freeze      = dasd_generic_pm_freeze,
3331 	.thaw	     = dasd_generic_restore_device,
3332 	.restore     = dasd_generic_restore_device,
3333 };
3334 
3335 /*
3336  * max_blocks is dependent on the amount of storage that is available
3337  * in the static io buffer for each device. Currently each device has
3338  * 8192 bytes (=2 pages). For 64 bit one dasd_mchunkt_t structure has
3339  * 24 bytes, the struct dasd_ccw_req has 136 bytes and each block can use
3340  * up to 16 bytes (8 for the ccw and 8 for the idal pointer). In
3341  * addition we have one define extent ccw + 16 bytes of data and one
3342  * locate record ccw + 16 bytes of data. That makes:
3343  * (8192 - 24 - 136 - 8 - 16 - 8 - 16) / 16 = 499 blocks at maximum.
3344  * We want to fit two into the available memory so that we can immediately
3345  * start the next request if one finishes off. That makes 249.5 blocks
3346  * for one request. Give a little safety and the result is 240.
3347  */
3348 static struct dasd_discipline dasd_eckd_discipline = {
3349 	.owner = THIS_MODULE,
3350 	.name = "ECKD",
3351 	.ebcname = "ECKD",
3352 	.max_blocks = 240,
3353 	.check_device = dasd_eckd_check_characteristics,
3354 	.uncheck_device = dasd_eckd_uncheck_device,
3355 	.do_analysis = dasd_eckd_do_analysis,
3356 	.ready_to_online = dasd_eckd_ready_to_online,
3357 	.online_to_ready = dasd_eckd_online_to_ready,
3358 	.fill_geometry = dasd_eckd_fill_geometry,
3359 	.start_IO = dasd_start_IO,
3360 	.term_IO = dasd_term_IO,
3361 	.handle_terminated_request = dasd_eckd_handle_terminated_request,
3362 	.format_device = dasd_eckd_format_device,
3363 	.erp_action = dasd_eckd_erp_action,
3364 	.erp_postaction = dasd_eckd_erp_postaction,
3365 	.handle_unsolicited_interrupt = dasd_eckd_handle_unsolicited_interrupt,
3366 	.build_cp = dasd_eckd_build_alias_cp,
3367 	.free_cp = dasd_eckd_free_alias_cp,
3368 	.dump_sense = dasd_eckd_dump_sense,
3369 	.dump_sense_dbf = dasd_eckd_dump_sense_dbf,
3370 	.fill_info = dasd_eckd_fill_info,
3371 	.ioctl = dasd_eckd_ioctl,
3372 	.freeze = dasd_eckd_pm_freeze,
3373 	.restore = dasd_eckd_restore_device,
3374 };
3375 
3376 static int __init
3377 dasd_eckd_init(void)
3378 {
3379 	int ret;
3380 
3381 	ASCEBC(dasd_eckd_discipline.ebcname, 4);
3382 	ret = ccw_driver_register(&dasd_eckd_driver);
3383 	if (!ret)
3384 		wait_for_device_probe();
3385 
3386 	return ret;
3387 }
3388 
3389 static void __exit
3390 dasd_eckd_cleanup(void)
3391 {
3392 	ccw_driver_unregister(&dasd_eckd_driver);
3393 }
3394 
3395 module_init(dasd_eckd_init);
3396 module_exit(dasd_eckd_cleanup);
3397