xref: /openbmc/linux/drivers/s390/block/dasd_eckd.c (revision d54853ef)
1 /*
2  * File...........: linux/drivers/s390/block/dasd_eckd.c
3  * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4  *		    Horst Hummel <Horst.Hummel@de.ibm.com>
5  *		    Carsten Otte <Cotte@de.ibm.com>
6  *		    Martin Schwidefsky <schwidefsky@de.ibm.com>
7  * Bugreports.to..: <Linux390@de.ibm.com>
8  * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
9  *
10  */
11 
12 #include <linux/stddef.h>
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/hdreg.h>	/* HDIO_GETGEO			    */
16 #include <linux/bio.h>
17 #include <linux/module.h>
18 #include <linux/init.h>
19 
20 #include <asm/debug.h>
21 #include <asm/idals.h>
22 #include <asm/ebcdic.h>
23 #include <asm/io.h>
24 #include <asm/todclk.h>
25 #include <asm/uaccess.h>
26 #include <asm/cio.h>
27 #include <asm/ccwdev.h>
28 
29 #include "dasd_int.h"
30 #include "dasd_eckd.h"
31 
32 #ifdef PRINTK_HEADER
33 #undef PRINTK_HEADER
34 #endif				/* PRINTK_HEADER */
35 #define PRINTK_HEADER "dasd(eckd):"
36 
37 #define ECKD_C0(i) (i->home_bytes)
38 #define ECKD_F(i) (i->formula)
39 #define ECKD_F1(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f1):\
40 		    (i->factors.f_0x02.f1))
41 #define ECKD_F2(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f2):\
42 		    (i->factors.f_0x02.f2))
43 #define ECKD_F3(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f3):\
44 		    (i->factors.f_0x02.f3))
45 #define ECKD_F4(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f4):0)
46 #define ECKD_F5(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f5):0)
47 #define ECKD_F6(i) (i->factor6)
48 #define ECKD_F7(i) (i->factor7)
49 #define ECKD_F8(i) (i->factor8)
50 
51 MODULE_LICENSE("GPL");
52 
53 static struct dasd_discipline dasd_eckd_discipline;
54 
55 struct dasd_eckd_private {
56 	struct dasd_eckd_characteristics rdc_data;
57 	struct dasd_eckd_confdata conf_data;
58 	struct dasd_eckd_path path_data;
59 	struct eckd_count count_area[5];
60 	int init_cqr_status;
61 	int uses_cdl;
62 	struct attrib_data_t attrib;	/* e.g. cache operations */
63 };
64 
65 /* The ccw bus type uses this table to find devices that it sends to
66  * dasd_eckd_probe */
67 static struct ccw_device_id dasd_eckd_ids[] = {
68 	{ CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1},
69 	{ CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2},
70 	{ CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3390, 0), .driver_info = 0x3},
71 	{ CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4},
72 	{ CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5},
73 	{ CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6},
74 	{ CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3390, 0), .driver_info = 0x7},
75 	{ CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3380, 0), .driver_info = 0x8},
76 	{ CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3390, 0), .driver_info = 0x9},
77 	{ CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3380, 0), .driver_info = 0xa},
78 	{ /* end of list */ },
79 };
80 
81 MODULE_DEVICE_TABLE(ccw, dasd_eckd_ids);
82 
83 static struct ccw_driver dasd_eckd_driver; /* see below */
84 
85 /* initial attempt at a probe function. this can be simplified once
86  * the other detection code is gone */
87 static int
88 dasd_eckd_probe (struct ccw_device *cdev)
89 {
90 	int ret;
91 
92 	/* set ECKD specific ccw-device options */
93 	ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE);
94 	if (ret) {
95 		printk(KERN_WARNING
96 		       "dasd_eckd_probe: could not set ccw-device options "
97 		       "for %s\n", cdev->dev.bus_id);
98 		return ret;
99 	}
100 	ret = dasd_generic_probe(cdev, &dasd_eckd_discipline);
101 	return ret;
102 }
103 
104 static int
105 dasd_eckd_set_online(struct ccw_device *cdev)
106 {
107 	return dasd_generic_set_online(cdev, &dasd_eckd_discipline);
108 }
109 
110 static struct ccw_driver dasd_eckd_driver = {
111 	.name        = "dasd-eckd",
112 	.owner       = THIS_MODULE,
113 	.ids         = dasd_eckd_ids,
114 	.probe       = dasd_eckd_probe,
115 	.remove      = dasd_generic_remove,
116 	.set_offline = dasd_generic_set_offline,
117 	.set_online  = dasd_eckd_set_online,
118 	.notify      = dasd_generic_notify,
119 };
120 
121 static const int sizes_trk0[] = { 28, 148, 84 };
122 #define LABEL_SIZE 140
123 
124 static inline unsigned int
125 round_up_multiple(unsigned int no, unsigned int mult)
126 {
127 	int rem = no % mult;
128 	return (rem ? no - rem + mult : no);
129 }
130 
131 static inline unsigned int
132 ceil_quot(unsigned int d1, unsigned int d2)
133 {
134 	return (d1 + (d2 - 1)) / d2;
135 }
136 
137 static inline int
138 bytes_per_record(struct dasd_eckd_characteristics *rdc, int kl, int dl)
139 {
140 	unsigned int fl1, fl2, int1, int2;
141 	int bpr;
142 
143 	switch (rdc->formula) {
144 	case 0x01:
145 		fl1 = round_up_multiple(ECKD_F2(rdc) + dl, ECKD_F1(rdc));
146 		fl2 = round_up_multiple(kl ? ECKD_F2(rdc) + kl : 0,
147 					ECKD_F1(rdc));
148 		bpr = fl1 + fl2;
149 		break;
150 	case 0x02:
151 		int1 = ceil_quot(dl + ECKD_F6(rdc), ECKD_F5(rdc) << 1);
152 		int2 = ceil_quot(kl + ECKD_F6(rdc), ECKD_F5(rdc) << 1);
153 		fl1 = round_up_multiple(ECKD_F1(rdc) * ECKD_F2(rdc) + dl +
154 					ECKD_F6(rdc) + ECKD_F4(rdc) * int1,
155 					ECKD_F1(rdc));
156 		fl2 = round_up_multiple(ECKD_F1(rdc) * ECKD_F3(rdc) + kl +
157 					ECKD_F6(rdc) + ECKD_F4(rdc) * int2,
158 					ECKD_F1(rdc));
159 		bpr = fl1 + fl2;
160 		break;
161 	default:
162 		bpr = 0;
163 		break;
164 	}
165 	return bpr;
166 }
167 
168 static inline unsigned int
169 bytes_per_track(struct dasd_eckd_characteristics *rdc)
170 {
171 	return *(unsigned int *) (rdc->byte_per_track) >> 8;
172 }
173 
174 static inline unsigned int
175 recs_per_track(struct dasd_eckd_characteristics * rdc,
176 	       unsigned int kl, unsigned int dl)
177 {
178 	int dn, kn;
179 
180 	switch (rdc->dev_type) {
181 	case 0x3380:
182 		if (kl)
183 			return 1499 / (15 + 7 + ceil_quot(kl + 12, 32) +
184 				       ceil_quot(dl + 12, 32));
185 		else
186 			return 1499 / (15 + ceil_quot(dl + 12, 32));
187 	case 0x3390:
188 		dn = ceil_quot(dl + 6, 232) + 1;
189 		if (kl) {
190 			kn = ceil_quot(kl + 6, 232) + 1;
191 			return 1729 / (10 + 9 + ceil_quot(kl + 6 * kn, 34) +
192 				       9 + ceil_quot(dl + 6 * dn, 34));
193 		} else
194 			return 1729 / (10 + 9 + ceil_quot(dl + 6 * dn, 34));
195 	case 0x9345:
196 		dn = ceil_quot(dl + 6, 232) + 1;
197 		if (kl) {
198 			kn = ceil_quot(kl + 6, 232) + 1;
199 			return 1420 / (18 + 7 + ceil_quot(kl + 6 * kn, 34) +
200 				       ceil_quot(dl + 6 * dn, 34));
201 		} else
202 			return 1420 / (18 + 7 + ceil_quot(dl + 6 * dn, 34));
203 	}
204 	return 0;
205 }
206 
207 static inline int
208 check_XRC (struct ccw1         *de_ccw,
209            struct DE_eckd_data *data,
210            struct dasd_device  *device)
211 {
212         struct dasd_eckd_private *private;
213 	int rc;
214 
215         private = (struct dasd_eckd_private *) device->private;
216 	if (!private->rdc_data.facilities.XRC_supported)
217 		return 0;
218 
219         /* switch on System Time Stamp - needed for XRC Support */
220 	data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid'   */
221 	data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */
222 
223 	rc = get_sync_clock(&data->ep_sys_time);
224 	/* Ignore return code if sync clock is switched off. */
225 	if (rc == -ENOSYS || rc == -EACCES)
226 		rc = 0;
227 
228 	de_ccw->count = sizeof (struct DE_eckd_data);
229 	de_ccw->flags |= CCW_FLAG_SLI;
230 	return rc;
231 }
232 
233 static inline int
234 define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk,
235 	      int totrk, int cmd, struct dasd_device * device)
236 {
237 	struct dasd_eckd_private *private;
238 	struct ch_t geo, beg, end;
239 	int rc = 0;
240 
241 	private = (struct dasd_eckd_private *) device->private;
242 
243 	ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
244 	ccw->flags = 0;
245 	ccw->count = 16;
246 	ccw->cda = (__u32) __pa(data);
247 
248 	memset(data, 0, sizeof (struct DE_eckd_data));
249 	switch (cmd) {
250 	case DASD_ECKD_CCW_READ_HOME_ADDRESS:
251 	case DASD_ECKD_CCW_READ_RECORD_ZERO:
252 	case DASD_ECKD_CCW_READ:
253 	case DASD_ECKD_CCW_READ_MT:
254 	case DASD_ECKD_CCW_READ_CKD:
255 	case DASD_ECKD_CCW_READ_CKD_MT:
256 	case DASD_ECKD_CCW_READ_KD:
257 	case DASD_ECKD_CCW_READ_KD_MT:
258 	case DASD_ECKD_CCW_READ_COUNT:
259 		data->mask.perm = 0x1;
260 		data->attributes.operation = private->attrib.operation;
261 		break;
262 	case DASD_ECKD_CCW_WRITE:
263 	case DASD_ECKD_CCW_WRITE_MT:
264 	case DASD_ECKD_CCW_WRITE_KD:
265 	case DASD_ECKD_CCW_WRITE_KD_MT:
266 		data->mask.perm = 0x02;
267 		data->attributes.operation = private->attrib.operation;
268 		rc = check_XRC (ccw, data, device);
269 		break;
270 	case DASD_ECKD_CCW_WRITE_CKD:
271 	case DASD_ECKD_CCW_WRITE_CKD_MT:
272 		data->attributes.operation = DASD_BYPASS_CACHE;
273 		rc = check_XRC (ccw, data, device);
274 		break;
275 	case DASD_ECKD_CCW_ERASE:
276 	case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
277 	case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
278 		data->mask.perm = 0x3;
279 		data->mask.auth = 0x1;
280 		data->attributes.operation = DASD_BYPASS_CACHE;
281 		rc = check_XRC (ccw, data, device);
282 		break;
283 	default:
284 		DEV_MESSAGE(KERN_ERR, device, "unknown opcode 0x%x", cmd);
285 		break;
286 	}
287 
288 	data->attributes.mode = 0x3;	/* ECKD */
289 
290 	if ((private->rdc_data.cu_type == 0x2105 ||
291 	     private->rdc_data.cu_type == 0x2107 ||
292 	     private->rdc_data.cu_type == 0x1750)
293 	    && !(private->uses_cdl && trk < 2))
294 		data->ga_extended |= 0x40; /* Regular Data Format Mode */
295 
296 	geo.cyl = private->rdc_data.no_cyl;
297 	geo.head = private->rdc_data.trk_per_cyl;
298 	beg.cyl = trk / geo.head;
299 	beg.head = trk % geo.head;
300 	end.cyl = totrk / geo.head;
301 	end.head = totrk % geo.head;
302 
303 	/* check for sequential prestage - enhance cylinder range */
304 	if (data->attributes.operation == DASD_SEQ_PRESTAGE ||
305 	    data->attributes.operation == DASD_SEQ_ACCESS) {
306 
307 		if (end.cyl + private->attrib.nr_cyl < geo.cyl)
308 			end.cyl += private->attrib.nr_cyl;
309 		else
310 			end.cyl = (geo.cyl - 1);
311 	}
312 
313 	data->beg_ext.cyl = beg.cyl;
314 	data->beg_ext.head = beg.head;
315 	data->end_ext.cyl = end.cyl;
316 	data->end_ext.head = end.head;
317 	return rc;
318 }
319 
320 static inline void
321 locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, int trk,
322 	      int rec_on_trk, int no_rec, int cmd,
323 	      struct dasd_device * device, int reclen)
324 {
325 	struct dasd_eckd_private *private;
326 	int sector;
327 	int dn, d;
328 
329 	private = (struct dasd_eckd_private *) device->private;
330 
331 	DBF_DEV_EVENT(DBF_INFO, device,
332 		  "Locate: trk %d, rec %d, no_rec %d, cmd %d, reclen %d",
333 		  trk, rec_on_trk, no_rec, cmd, reclen);
334 
335 	ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD;
336 	ccw->flags = 0;
337 	ccw->count = 16;
338 	ccw->cda = (__u32) __pa(data);
339 
340 	memset(data, 0, sizeof (struct LO_eckd_data));
341 	sector = 0;
342 	if (rec_on_trk) {
343 		switch (private->rdc_data.dev_type) {
344 		case 0x3390:
345 			dn = ceil_quot(reclen + 6, 232);
346 			d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
347 			sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
348 			break;
349 		case 0x3380:
350 			d = 7 + ceil_quot(reclen + 12, 32);
351 			sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
352 			break;
353 		}
354 	}
355 	data->sector = sector;
356 	data->count = no_rec;
357 	switch (cmd) {
358 	case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
359 		data->operation.orientation = 0x3;
360 		data->operation.operation = 0x03;
361 		break;
362 	case DASD_ECKD_CCW_READ_HOME_ADDRESS:
363 		data->operation.orientation = 0x3;
364 		data->operation.operation = 0x16;
365 		break;
366 	case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
367 		data->operation.orientation = 0x1;
368 		data->operation.operation = 0x03;
369 		data->count++;
370 		break;
371 	case DASD_ECKD_CCW_READ_RECORD_ZERO:
372 		data->operation.orientation = 0x3;
373 		data->operation.operation = 0x16;
374 		data->count++;
375 		break;
376 	case DASD_ECKD_CCW_WRITE:
377 	case DASD_ECKD_CCW_WRITE_MT:
378 	case DASD_ECKD_CCW_WRITE_KD:
379 	case DASD_ECKD_CCW_WRITE_KD_MT:
380 		data->auxiliary.last_bytes_used = 0x1;
381 		data->length = reclen;
382 		data->operation.operation = 0x01;
383 		break;
384 	case DASD_ECKD_CCW_WRITE_CKD:
385 	case DASD_ECKD_CCW_WRITE_CKD_MT:
386 		data->auxiliary.last_bytes_used = 0x1;
387 		data->length = reclen;
388 		data->operation.operation = 0x03;
389 		break;
390 	case DASD_ECKD_CCW_READ:
391 	case DASD_ECKD_CCW_READ_MT:
392 	case DASD_ECKD_CCW_READ_KD:
393 	case DASD_ECKD_CCW_READ_KD_MT:
394 		data->auxiliary.last_bytes_used = 0x1;
395 		data->length = reclen;
396 		data->operation.operation = 0x06;
397 		break;
398 	case DASD_ECKD_CCW_READ_CKD:
399 	case DASD_ECKD_CCW_READ_CKD_MT:
400 		data->auxiliary.last_bytes_used = 0x1;
401 		data->length = reclen;
402 		data->operation.operation = 0x16;
403 		break;
404 	case DASD_ECKD_CCW_READ_COUNT:
405 		data->operation.operation = 0x06;
406 		break;
407 	case DASD_ECKD_CCW_ERASE:
408 		data->length = reclen;
409 		data->auxiliary.last_bytes_used = 0x1;
410 		data->operation.operation = 0x0b;
411 		break;
412 	default:
413 		DEV_MESSAGE(KERN_ERR, device, "unknown opcode 0x%x", cmd);
414 	}
415 	data->seek_addr.cyl = data->search_arg.cyl =
416 		trk / private->rdc_data.trk_per_cyl;
417 	data->seek_addr.head = data->search_arg.head =
418 		trk % private->rdc_data.trk_per_cyl;
419 	data->search_arg.record = rec_on_trk;
420 }
421 
422 /*
423  * Returns 1 if the block is one of the special blocks that needs
424  * to get read/written with the KD variant of the command.
425  * That is DASD_ECKD_READ_KD_MT instead of DASD_ECKD_READ_MT and
426  * DASD_ECKD_WRITE_KD_MT instead of DASD_ECKD_WRITE_MT.
427  * Luckily the KD variants differ only by one bit (0x08) from the
428  * normal variant. So don't wonder about code like:
429  * if (dasd_eckd_cdl_special(blk_per_trk, recid))
430  *         ccw->cmd_code |= 0x8;
431  */
432 static inline int
433 dasd_eckd_cdl_special(int blk_per_trk, int recid)
434 {
435 	if (recid < 3)
436 		return 1;
437 	if (recid < blk_per_trk)
438 		return 0;
439 	if (recid < 2 * blk_per_trk)
440 		return 1;
441 	return 0;
442 }
443 
444 /*
445  * Returns the record size for the special blocks of the cdl format.
446  * Only returns something useful if dasd_eckd_cdl_special is true
447  * for the recid.
448  */
449 static inline int
450 dasd_eckd_cdl_reclen(int recid)
451 {
452 	if (recid < 3)
453 		return sizes_trk0[recid];
454 	return LABEL_SIZE;
455 }
456 
457 /*
458  * Generate device unique id that specifies the physical device.
459  */
460 static int
461 dasd_eckd_generate_uid(struct dasd_device *device, struct dasd_uid *uid)
462 {
463 	struct dasd_eckd_private *private;
464 	struct dasd_eckd_confdata *confdata;
465 
466 	private = (struct dasd_eckd_private *) device->private;
467 	if (!private)
468 		return -ENODEV;
469 	confdata = &private->conf_data;
470 	if (!confdata)
471 		return -ENODEV;
472 
473 	memset(uid, 0, sizeof(struct dasd_uid));
474 	memcpy(uid->vendor, confdata->ned1.HDA_manufacturer,
475 	       sizeof(uid->vendor) - 1);
476 	EBCASC(uid->vendor, sizeof(uid->vendor) - 1);
477 	memcpy(uid->serial, confdata->ned1.HDA_location,
478 	       sizeof(uid->serial) - 1);
479 	EBCASC(uid->serial, sizeof(uid->serial) - 1);
480 	uid->ssid = confdata->neq.subsystemID;
481 	if (confdata->ned2.sneq.flags == 0x40) {
482 		uid->alias = 1;
483 		uid->unit_addr = confdata->ned2.sneq.base_unit_addr;
484 	} else
485 		uid->unit_addr = confdata->ned1.unit_addr;
486 
487 	return 0;
488 }
489 
490 static int
491 dasd_eckd_read_conf(struct dasd_device *device)
492 {
493 	void *conf_data;
494 	int conf_len, conf_data_saved;
495 	int rc;
496 	__u8 lpm;
497 	struct dasd_eckd_private *private;
498 	struct dasd_eckd_path *path_data;
499 
500 	private = (struct dasd_eckd_private *) device->private;
501 	path_data = (struct dasd_eckd_path *) &private->path_data;
502 	path_data->opm = ccw_device_get_path_mask(device->cdev);
503 	lpm = 0x80;
504 	conf_data_saved = 0;
505 
506 	/* get configuration data per operational path */
507 	for (lpm = 0x80; lpm; lpm>>= 1) {
508 		if (lpm & path_data->opm){
509 			rc = read_conf_data_lpm(device->cdev, &conf_data,
510 						&conf_len, lpm);
511 			if (rc && rc != -EOPNOTSUPP) {	/* -EOPNOTSUPP is ok */
512 				MESSAGE(KERN_WARNING,
513 					"Read configuration data returned "
514 					"error %d", rc);
515 				return rc;
516 			}
517 			if (conf_data == NULL) {
518 				MESSAGE(KERN_WARNING, "%s", "No configuration "
519 					"data retrieved");
520 				continue;	/* no errror */
521 			}
522 			if (conf_len != sizeof (struct dasd_eckd_confdata)) {
523 				MESSAGE(KERN_WARNING,
524 					"sizes of configuration data mismatch"
525 					"%d (read) vs %ld (expected)",
526 					conf_len,
527 					sizeof (struct dasd_eckd_confdata));
528 				kfree(conf_data);
529 				continue;	/* no errror */
530 			}
531 			/* save first valid configuration data */
532 			if (!conf_data_saved){
533 				memcpy(&private->conf_data, conf_data,
534 				       sizeof (struct dasd_eckd_confdata));
535 				conf_data_saved++;
536 			}
537 			switch (((char *)conf_data)[242] & 0x07){
538 			case 0x02:
539 				path_data->npm |= lpm;
540 				break;
541 			case 0x03:
542 				path_data->ppm |= lpm;
543 				break;
544 			}
545 			kfree(conf_data);
546 		}
547 	}
548 	return 0;
549 }
550 
551 /*
552  * Build CP for Perform Subsystem Function - SSC.
553  */
554 static struct dasd_ccw_req *
555 dasd_eckd_build_psf_ssc(struct dasd_device *device)
556 {
557        struct dasd_ccw_req *cqr;
558        struct dasd_psf_ssc_data *psf_ssc_data;
559        struct ccw1 *ccw;
560 
561        cqr = dasd_smalloc_request("ECKD", 1 /* PSF */ ,
562 				  sizeof(struct dasd_psf_ssc_data),
563 				  device);
564 
565        if (IS_ERR(cqr)) {
566 	       DEV_MESSAGE(KERN_WARNING, device, "%s",
567 			   "Could not allocate PSF-SSC request");
568 	       return cqr;
569        }
570        psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data;
571        psf_ssc_data->order = PSF_ORDER_SSC;
572        psf_ssc_data->suborder = 0x08;
573 
574        ccw = cqr->cpaddr;
575        ccw->cmd_code = DASD_ECKD_CCW_PSF;
576        ccw->cda = (__u32)(addr_t)psf_ssc_data;
577        ccw->count = 66;
578 
579        cqr->device = device;
580        cqr->expires = 10*HZ;
581        cqr->buildclk = get_clock();
582        cqr->status = DASD_CQR_FILLED;
583        return cqr;
584 }
585 
586 /*
587  * Perform Subsystem Function.
588  * It is necessary to trigger CIO for channel revalidation since this
589  * call might change behaviour of DASD devices.
590  */
591 static int
592 dasd_eckd_psf_ssc(struct dasd_device *device)
593 {
594        struct dasd_ccw_req *cqr;
595        int rc;
596 
597        cqr = dasd_eckd_build_psf_ssc(device);
598        if (IS_ERR(cqr))
599 	       return PTR_ERR(cqr);
600 
601        rc = dasd_sleep_on(cqr);
602        if (!rc)
603 	       /* trigger CIO to reprobe devices */
604 	       css_schedule_reprobe();
605        dasd_sfree_request(cqr, cqr->device);
606        return rc;
607 }
608 
609 /*
610  * Valide storage server of current device.
611  */
612 static int
613 dasd_eckd_validate_server(struct dasd_device *device, struct dasd_uid *uid)
614 {
615 	int rc;
616 
617 	/* Currently PAV is the only reason to 'validate' server on LPAR */
618 	if (dasd_nopav || MACHINE_IS_VM)
619 		return 0;
620 
621 	rc = dasd_eckd_psf_ssc(device);
622 	/* may be requested feature is not available on server,
623 	 * therefore just report error and go ahead */
624 	DEV_MESSAGE(KERN_INFO, device,
625 		    "PSF-SSC on storage subsystem %s.%s.%04x returned rc=%d",
626 		    uid->vendor, uid->serial, uid->ssid, rc);
627 	/* RE-Read Configuration Data */
628 	return dasd_eckd_read_conf(device);
629 }
630 
631 /*
632  * Check device characteristics.
633  * If the device is accessible using ECKD discipline, the device is enabled.
634  */
635 static int
636 dasd_eckd_check_characteristics(struct dasd_device *device)
637 {
638 	struct dasd_eckd_private *private;
639 	struct dasd_uid uid;
640 	void *rdc_data;
641 	int rc;
642 
643 	private = (struct dasd_eckd_private *) device->private;
644 	if (private == NULL) {
645 		private = kzalloc(sizeof(struct dasd_eckd_private),
646 				  GFP_KERNEL | GFP_DMA);
647 		if (private == NULL) {
648 			DEV_MESSAGE(KERN_WARNING, device, "%s",
649 				    "memory allocation failed for private "
650 				    "data");
651 			return -ENOMEM;
652 		}
653 		device->private = (void *) private;
654 	}
655 	/* Invalidate status of initial analysis. */
656 	private->init_cqr_status = -1;
657 	/* Set default cache operations. */
658 	private->attrib.operation = DASD_NORMAL_CACHE;
659 	private->attrib.nr_cyl = 0;
660 
661 	/* Read Configuration Data */
662 	rc = dasd_eckd_read_conf(device);
663 	if (rc)
664 		return rc;
665 
666 	/* Generate device unique id and register in devmap */
667 	rc = dasd_eckd_generate_uid(device, &uid);
668 	if (rc)
669 		return rc;
670 	rc = dasd_set_uid(device->cdev, &uid);
671 	if (rc == 1)	/* new server found */
672 		rc = dasd_eckd_validate_server(device, &uid);
673 	if (rc)
674 		return rc;
675 
676 	/* Read Device Characteristics */
677 	rdc_data = (void *) &(private->rdc_data);
678 	memset(rdc_data, 0, sizeof(rdc_data));
679 	rc = read_dev_chars(device->cdev, &rdc_data, 64);
680 	if (rc)
681 		DEV_MESSAGE(KERN_WARNING, device,
682 			    "Read device characteristics returned "
683 			    "rc=%d", rc);
684 
685 	DEV_MESSAGE(KERN_INFO, device,
686 		    "%04X/%02X(CU:%04X/%02X) Cyl:%d Head:%d Sec:%d",
687 		    private->rdc_data.dev_type,
688 		    private->rdc_data.dev_model,
689 		    private->rdc_data.cu_type,
690 		    private->rdc_data.cu_model.model,
691 		    private->rdc_data.no_cyl,
692 		    private->rdc_data.trk_per_cyl,
693 		    private->rdc_data.sec_per_trk);
694 	return rc;
695 }
696 
697 static struct dasd_ccw_req *
698 dasd_eckd_analysis_ccw(struct dasd_device *device)
699 {
700 	struct dasd_eckd_private *private;
701 	struct eckd_count *count_data;
702 	struct LO_eckd_data *LO_data;
703 	struct dasd_ccw_req *cqr;
704 	struct ccw1 *ccw;
705 	int cplength, datasize;
706 	int i;
707 
708 	private = (struct dasd_eckd_private *) device->private;
709 
710 	cplength = 8;
711 	datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data);
712 	cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
713 				   cplength, datasize, device);
714 	if (IS_ERR(cqr))
715 		return cqr;
716 	ccw = cqr->cpaddr;
717 	/* Define extent for the first 3 tracks. */
718 	define_extent(ccw++, cqr->data, 0, 2,
719 		      DASD_ECKD_CCW_READ_COUNT, device);
720 	LO_data = cqr->data + sizeof (struct DE_eckd_data);
721 	/* Locate record for the first 4 records on track 0. */
722 	ccw[-1].flags |= CCW_FLAG_CC;
723 	locate_record(ccw++, LO_data++, 0, 0, 4,
724 		      DASD_ECKD_CCW_READ_COUNT, device, 0);
725 
726 	count_data = private->count_area;
727 	for (i = 0; i < 4; i++) {
728 		ccw[-1].flags |= CCW_FLAG_CC;
729 		ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
730 		ccw->flags = 0;
731 		ccw->count = 8;
732 		ccw->cda = (__u32)(addr_t) count_data;
733 		ccw++;
734 		count_data++;
735 	}
736 
737 	/* Locate record for the first record on track 2. */
738 	ccw[-1].flags |= CCW_FLAG_CC;
739 	locate_record(ccw++, LO_data++, 2, 0, 1,
740 		      DASD_ECKD_CCW_READ_COUNT, device, 0);
741 	/* Read count ccw. */
742 	ccw[-1].flags |= CCW_FLAG_CC;
743 	ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
744 	ccw->flags = 0;
745 	ccw->count = 8;
746 	ccw->cda = (__u32)(addr_t) count_data;
747 
748 	cqr->device = device;
749 	cqr->retries = 0;
750 	cqr->buildclk = get_clock();
751 	cqr->status = DASD_CQR_FILLED;
752 	return cqr;
753 }
754 
755 /*
756  * This is the callback function for the init_analysis cqr. It saves
757  * the status of the initial analysis ccw before it frees it and kicks
758  * the device to continue the startup sequence. This will call
759  * dasd_eckd_do_analysis again (if the devices has not been marked
760  * for deletion in the meantime).
761  */
762 static void
763 dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr, void *data)
764 {
765 	struct dasd_eckd_private *private;
766 	struct dasd_device *device;
767 
768 	device = init_cqr->device;
769 	private = (struct dasd_eckd_private *) device->private;
770 	private->init_cqr_status = init_cqr->status;
771 	dasd_sfree_request(init_cqr, device);
772 	dasd_kick_device(device);
773 }
774 
775 static int
776 dasd_eckd_start_analysis(struct dasd_device *device)
777 {
778 	struct dasd_eckd_private *private;
779 	struct dasd_ccw_req *init_cqr;
780 
781 	private = (struct dasd_eckd_private *) device->private;
782 	init_cqr = dasd_eckd_analysis_ccw(device);
783 	if (IS_ERR(init_cqr))
784 		return PTR_ERR(init_cqr);
785 	init_cqr->callback = dasd_eckd_analysis_callback;
786 	init_cqr->callback_data = NULL;
787 	init_cqr->expires = 5*HZ;
788 	dasd_add_request_head(init_cqr);
789 	return -EAGAIN;
790 }
791 
792 static int
793 dasd_eckd_end_analysis(struct dasd_device *device)
794 {
795 	struct dasd_eckd_private *private;
796 	struct eckd_count *count_area;
797 	unsigned int sb, blk_per_trk;
798 	int status, i;
799 
800 	private = (struct dasd_eckd_private *) device->private;
801 	status = private->init_cqr_status;
802 	private->init_cqr_status = -1;
803 	if (status != DASD_CQR_DONE) {
804 		DEV_MESSAGE(KERN_WARNING, device, "%s",
805 			    "volume analysis returned unformatted disk");
806 		return -EMEDIUMTYPE;
807 	}
808 
809 	private->uses_cdl = 1;
810 	/* Calculate number of blocks/records per track. */
811 	blk_per_trk = recs_per_track(&private->rdc_data, 0, device->bp_block);
812 	/* Check Track 0 for Compatible Disk Layout */
813 	count_area = NULL;
814 	for (i = 0; i < 3; i++) {
815 		if (private->count_area[i].kl != 4 ||
816 		    private->count_area[i].dl != dasd_eckd_cdl_reclen(i) - 4) {
817 			private->uses_cdl = 0;
818 			break;
819 		}
820 	}
821 	if (i == 3)
822 		count_area = &private->count_area[4];
823 
824 	if (private->uses_cdl == 0) {
825 		for (i = 0; i < 5; i++) {
826 			if ((private->count_area[i].kl != 0) ||
827 			    (private->count_area[i].dl !=
828 			     private->count_area[0].dl))
829 				break;
830 		}
831 		if (i == 5)
832 			count_area = &private->count_area[0];
833 	} else {
834 		if (private->count_area[3].record == 1)
835 			DEV_MESSAGE(KERN_WARNING, device, "%s",
836 				    "Trk 0: no records after VTOC!");
837 	}
838 	if (count_area != NULL && count_area->kl == 0) {
839 		/* we found notthing violating our disk layout */
840 		if (dasd_check_blocksize(count_area->dl) == 0)
841 			device->bp_block = count_area->dl;
842 	}
843 	if (device->bp_block == 0) {
844 		DEV_MESSAGE(KERN_WARNING, device, "%s",
845 			    "Volume has incompatible disk layout");
846 		return -EMEDIUMTYPE;
847 	}
848 	device->s2b_shift = 0;	/* bits to shift 512 to get a block */
849 	for (sb = 512; sb < device->bp_block; sb = sb << 1)
850 		device->s2b_shift++;
851 
852 	blk_per_trk = recs_per_track(&private->rdc_data, 0, device->bp_block);
853 	device->blocks = (private->rdc_data.no_cyl *
854 			  private->rdc_data.trk_per_cyl *
855 			  blk_per_trk);
856 
857 	DEV_MESSAGE(KERN_INFO, device,
858 		    "(%dkB blks): %dkB at %dkB/trk %s",
859 		    (device->bp_block >> 10),
860 		    ((private->rdc_data.no_cyl *
861 		      private->rdc_data.trk_per_cyl *
862 		      blk_per_trk * (device->bp_block >> 9)) >> 1),
863 		    ((blk_per_trk * device->bp_block) >> 10),
864 		    private->uses_cdl ?
865 		    "compatible disk layout" : "linux disk layout");
866 
867 	return 0;
868 }
869 
870 static int
871 dasd_eckd_do_analysis(struct dasd_device *device)
872 {
873 	struct dasd_eckd_private *private;
874 
875 	private = (struct dasd_eckd_private *) device->private;
876 	if (private->init_cqr_status < 0)
877 		return dasd_eckd_start_analysis(device);
878 	else
879 		return dasd_eckd_end_analysis(device);
880 }
881 
882 static int
883 dasd_eckd_fill_geometry(struct dasd_device *device, struct hd_geometry *geo)
884 {
885 	struct dasd_eckd_private *private;
886 
887 	private = (struct dasd_eckd_private *) device->private;
888 	if (dasd_check_blocksize(device->bp_block) == 0) {
889 		geo->sectors = recs_per_track(&private->rdc_data,
890 					      0, device->bp_block);
891 	}
892 	geo->cylinders = private->rdc_data.no_cyl;
893 	geo->heads = private->rdc_data.trk_per_cyl;
894 	return 0;
895 }
896 
897 static struct dasd_ccw_req *
898 dasd_eckd_format_device(struct dasd_device * device,
899 			struct format_data_t * fdata)
900 {
901 	struct dasd_eckd_private *private;
902 	struct dasd_ccw_req *fcp;
903 	struct eckd_count *ect;
904 	struct ccw1 *ccw;
905 	void *data;
906 	int rpt, cyl, head;
907 	int cplength, datasize;
908 	int i;
909 
910 	private = (struct dasd_eckd_private *) device->private;
911 	rpt = recs_per_track(&private->rdc_data, 0, fdata->blksize);
912 	cyl = fdata->start_unit / private->rdc_data.trk_per_cyl;
913 	head = fdata->start_unit % private->rdc_data.trk_per_cyl;
914 
915 	/* Sanity checks. */
916 	if (fdata->start_unit >=
917 	    (private->rdc_data.no_cyl * private->rdc_data.trk_per_cyl)) {
918 		DEV_MESSAGE(KERN_INFO, device, "Track no %d too big!",
919 			    fdata->start_unit);
920 		return ERR_PTR(-EINVAL);
921 	}
922 	if (fdata->start_unit > fdata->stop_unit) {
923 		DEV_MESSAGE(KERN_INFO, device, "Track %d reached! ending.",
924 			    fdata->start_unit);
925 		return ERR_PTR(-EINVAL);
926 	}
927 	if (dasd_check_blocksize(fdata->blksize) != 0) {
928 		DEV_MESSAGE(KERN_WARNING, device,
929 			    "Invalid blocksize %d...terminating!",
930 			    fdata->blksize);
931 		return ERR_PTR(-EINVAL);
932 	}
933 
934 	/*
935 	 * fdata->intensity is a bit string that tells us what to do:
936 	 *   Bit 0: write record zero
937 	 *   Bit 1: write home address, currently not supported
938 	 *   Bit 2: invalidate tracks
939 	 *   Bit 3: use OS/390 compatible disk layout (cdl)
940 	 * Only some bit combinations do make sense.
941 	 */
942 	switch (fdata->intensity) {
943 	case 0x00:	/* Normal format */
944 	case 0x08:	/* Normal format, use cdl. */
945 		cplength = 2 + rpt;
946 		datasize = sizeof(struct DE_eckd_data) +
947 			sizeof(struct LO_eckd_data) +
948 			rpt * sizeof(struct eckd_count);
949 		break;
950 	case 0x01:	/* Write record zero and format track. */
951 	case 0x09:	/* Write record zero and format track, use cdl. */
952 		cplength = 3 + rpt;
953 		datasize = sizeof(struct DE_eckd_data) +
954 			sizeof(struct LO_eckd_data) +
955 			sizeof(struct eckd_count) +
956 			rpt * sizeof(struct eckd_count);
957 		break;
958 	case 0x04:	/* Invalidate track. */
959 	case 0x0c:	/* Invalidate track, use cdl. */
960 		cplength = 3;
961 		datasize = sizeof(struct DE_eckd_data) +
962 			sizeof(struct LO_eckd_data) +
963 			sizeof(struct eckd_count);
964 		break;
965 	default:
966 		DEV_MESSAGE(KERN_WARNING, device, "Invalid flags 0x%x.",
967 			    fdata->intensity);
968 		return ERR_PTR(-EINVAL);
969 	}
970 	/* Allocate the format ccw request. */
971 	fcp = dasd_smalloc_request(dasd_eckd_discipline.name,
972 				   cplength, datasize, device);
973 	if (IS_ERR(fcp))
974 		return fcp;
975 
976 	data = fcp->data;
977 	ccw = fcp->cpaddr;
978 
979 	switch (fdata->intensity & ~0x08) {
980 	case 0x00: /* Normal format. */
981 		define_extent(ccw++, (struct DE_eckd_data *) data,
982 			      fdata->start_unit, fdata->start_unit,
983 			      DASD_ECKD_CCW_WRITE_CKD, device);
984 		data += sizeof(struct DE_eckd_data);
985 		ccw[-1].flags |= CCW_FLAG_CC;
986 		locate_record(ccw++, (struct LO_eckd_data *) data,
987 			      fdata->start_unit, 0, rpt,
988 			      DASD_ECKD_CCW_WRITE_CKD, device,
989 			      fdata->blksize);
990 		data += sizeof(struct LO_eckd_data);
991 		break;
992 	case 0x01: /* Write record zero + format track. */
993 		define_extent(ccw++, (struct DE_eckd_data *) data,
994 			      fdata->start_unit, fdata->start_unit,
995 			      DASD_ECKD_CCW_WRITE_RECORD_ZERO,
996 			      device);
997 		data += sizeof(struct DE_eckd_data);
998 		ccw[-1].flags |= CCW_FLAG_CC;
999 		locate_record(ccw++, (struct LO_eckd_data *) data,
1000 			      fdata->start_unit, 0, rpt + 1,
1001 			      DASD_ECKD_CCW_WRITE_RECORD_ZERO, device,
1002 			      device->bp_block);
1003 		data += sizeof(struct LO_eckd_data);
1004 		break;
1005 	case 0x04: /* Invalidate track. */
1006 		define_extent(ccw++, (struct DE_eckd_data *) data,
1007 			      fdata->start_unit, fdata->start_unit,
1008 			      DASD_ECKD_CCW_WRITE_CKD, device);
1009 		data += sizeof(struct DE_eckd_data);
1010 		ccw[-1].flags |= CCW_FLAG_CC;
1011 		locate_record(ccw++, (struct LO_eckd_data *) data,
1012 			      fdata->start_unit, 0, 1,
1013 			      DASD_ECKD_CCW_WRITE_CKD, device, 8);
1014 		data += sizeof(struct LO_eckd_data);
1015 		break;
1016 	}
1017 	if (fdata->intensity & 0x01) {	/* write record zero */
1018 		ect = (struct eckd_count *) data;
1019 		data += sizeof(struct eckd_count);
1020 		ect->cyl = cyl;
1021 		ect->head = head;
1022 		ect->record = 0;
1023 		ect->kl = 0;
1024 		ect->dl = 8;
1025 		ccw[-1].flags |= CCW_FLAG_CC;
1026 		ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO;
1027 		ccw->flags = CCW_FLAG_SLI;
1028 		ccw->count = 8;
1029 		ccw->cda = (__u32)(addr_t) ect;
1030 		ccw++;
1031 	}
1032 	if ((fdata->intensity & ~0x08) & 0x04) {	/* erase track */
1033 		ect = (struct eckd_count *) data;
1034 		data += sizeof(struct eckd_count);
1035 		ect->cyl = cyl;
1036 		ect->head = head;
1037 		ect->record = 1;
1038 		ect->kl = 0;
1039 		ect->dl = 0;
1040 		ccw[-1].flags |= CCW_FLAG_CC;
1041 		ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
1042 		ccw->flags = CCW_FLAG_SLI;
1043 		ccw->count = 8;
1044 		ccw->cda = (__u32)(addr_t) ect;
1045 	} else {		/* write remaining records */
1046 		for (i = 0; i < rpt; i++) {
1047 			ect = (struct eckd_count *) data;
1048 			data += sizeof(struct eckd_count);
1049 			ect->cyl = cyl;
1050 			ect->head = head;
1051 			ect->record = i + 1;
1052 			ect->kl = 0;
1053 			ect->dl = fdata->blksize;
1054 			/* Check for special tracks 0-1 when formatting CDL */
1055 			if ((fdata->intensity & 0x08) &&
1056 			    fdata->start_unit == 0) {
1057 				if (i < 3) {
1058 					ect->kl = 4;
1059 					ect->dl = sizes_trk0[i] - 4;
1060 				}
1061 			}
1062 			if ((fdata->intensity & 0x08) &&
1063 			    fdata->start_unit == 1) {
1064 				ect->kl = 44;
1065 				ect->dl = LABEL_SIZE - 44;
1066 			}
1067 			ccw[-1].flags |= CCW_FLAG_CC;
1068 			ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
1069 			ccw->flags = CCW_FLAG_SLI;
1070 			ccw->count = 8;
1071 			ccw->cda = (__u32)(addr_t) ect;
1072 			ccw++;
1073 		}
1074 	}
1075 	fcp->device = device;
1076 	fcp->retries = 2;	/* set retry counter to enable ERP */
1077 	fcp->buildclk = get_clock();
1078 	fcp->status = DASD_CQR_FILLED;
1079 	return fcp;
1080 }
1081 
1082 static dasd_era_t
1083 dasd_eckd_examine_error(struct dasd_ccw_req * cqr, struct irb * irb)
1084 {
1085 	struct dasd_device *device = (struct dasd_device *) cqr->device;
1086 	struct ccw_device *cdev = device->cdev;
1087 
1088 	if (irb->scsw.cstat == 0x00 &&
1089 	    irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
1090 		return dasd_era_none;
1091 
1092 	switch (cdev->id.cu_type) {
1093 	case 0x3990:
1094 	case 0x2105:
1095 	case 0x2107:
1096 	case 0x1750:
1097 		return dasd_3990_erp_examine(cqr, irb);
1098 	case 0x9343:
1099 		return dasd_9343_erp_examine(cqr, irb);
1100 	case 0x3880:
1101 	default:
1102 		DEV_MESSAGE(KERN_WARNING, device, "%s",
1103 			    "default (unknown CU type) - RECOVERABLE return");
1104 		return dasd_era_recover;
1105 	}
1106 }
1107 
1108 static dasd_erp_fn_t
1109 dasd_eckd_erp_action(struct dasd_ccw_req * cqr)
1110 {
1111 	struct dasd_device *device = (struct dasd_device *) cqr->device;
1112 	struct ccw_device *cdev = device->cdev;
1113 
1114 	switch (cdev->id.cu_type) {
1115 	case 0x3990:
1116 	case 0x2105:
1117 	case 0x2107:
1118 	case 0x1750:
1119 		return dasd_3990_erp_action;
1120 	case 0x9343:
1121 	case 0x3880:
1122 	default:
1123 		return dasd_default_erp_action;
1124 	}
1125 }
1126 
1127 static dasd_erp_fn_t
1128 dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr)
1129 {
1130 	return dasd_default_erp_postaction;
1131 }
1132 
1133 static struct dasd_ccw_req *
1134 dasd_eckd_build_cp(struct dasd_device * device, struct request *req)
1135 {
1136 	struct dasd_eckd_private *private;
1137 	unsigned long *idaws;
1138 	struct LO_eckd_data *LO_data;
1139 	struct dasd_ccw_req *cqr;
1140 	struct ccw1 *ccw;
1141 	struct bio *bio;
1142 	struct bio_vec *bv;
1143 	char *dst;
1144 	unsigned int blksize, blk_per_trk, off;
1145 	int count, cidaw, cplength, datasize;
1146 	sector_t recid, first_rec, last_rec;
1147 	sector_t first_trk, last_trk;
1148 	unsigned int first_offs, last_offs;
1149 	unsigned char cmd, rcmd;
1150 	int i;
1151 
1152 	private = (struct dasd_eckd_private *) device->private;
1153 	if (rq_data_dir(req) == READ)
1154 		cmd = DASD_ECKD_CCW_READ_MT;
1155 	else if (rq_data_dir(req) == WRITE)
1156 		cmd = DASD_ECKD_CCW_WRITE_MT;
1157 	else
1158 		return ERR_PTR(-EINVAL);
1159 	/* Calculate number of blocks/records per track. */
1160 	blksize = device->bp_block;
1161 	blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
1162 	/* Calculate record id of first and last block. */
1163 	first_rec = first_trk = req->sector >> device->s2b_shift;
1164 	first_offs = sector_div(first_trk, blk_per_trk);
1165 	last_rec = last_trk =
1166 		(req->sector + req->nr_sectors - 1) >> device->s2b_shift;
1167 	last_offs = sector_div(last_trk, blk_per_trk);
1168 	/* Check struct bio and count the number of blocks for the request. */
1169 	count = 0;
1170 	cidaw = 0;
1171 	rq_for_each_bio(bio, req) {
1172 		bio_for_each_segment(bv, bio, i) {
1173 			if (bv->bv_len & (blksize - 1))
1174 				/* Eckd can only do full blocks. */
1175 				return ERR_PTR(-EINVAL);
1176 			count += bv->bv_len >> (device->s2b_shift + 9);
1177 #if defined(CONFIG_64BIT)
1178 			if (idal_is_needed (page_address(bv->bv_page),
1179 					    bv->bv_len))
1180 				cidaw += bv->bv_len >> (device->s2b_shift + 9);
1181 #endif
1182 		}
1183 	}
1184 	/* Paranoia. */
1185 	if (count != last_rec - first_rec + 1)
1186 		return ERR_PTR(-EINVAL);
1187 	/* 1x define extent + 1x locate record + number of blocks */
1188 	cplength = 2 + count;
1189 	/* 1x define extent + 1x locate record + cidaws*sizeof(long) */
1190 	datasize = sizeof(struct DE_eckd_data) + sizeof(struct LO_eckd_data) +
1191 		cidaw * sizeof(unsigned long);
1192 	/* Find out the number of additional locate record ccws for cdl. */
1193 	if (private->uses_cdl && first_rec < 2*blk_per_trk) {
1194 		if (last_rec >= 2*blk_per_trk)
1195 			count = 2*blk_per_trk - first_rec;
1196 		cplength += count;
1197 		datasize += count*sizeof(struct LO_eckd_data);
1198 	}
1199 	/* Allocate the ccw request. */
1200 	cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
1201 				   cplength, datasize, device);
1202 	if (IS_ERR(cqr))
1203 		return cqr;
1204 	ccw = cqr->cpaddr;
1205 	/* First ccw is define extent. */
1206 	if (define_extent(ccw++, cqr->data, first_trk,
1207 			  last_trk, cmd, device) == -EAGAIN) {
1208 		/* Clock not in sync and XRC is enabled. Try again later. */
1209 		dasd_sfree_request(cqr, device);
1210 		return ERR_PTR(-EAGAIN);
1211 	}
1212 	/* Build locate_record+read/write/ccws. */
1213 	idaws = (unsigned long *) (cqr->data + sizeof(struct DE_eckd_data));
1214 	LO_data = (struct LO_eckd_data *) (idaws + cidaw);
1215 	recid = first_rec;
1216 	if (private->uses_cdl == 0 || recid > 2*blk_per_trk) {
1217 		/* Only standard blocks so there is just one locate record. */
1218 		ccw[-1].flags |= CCW_FLAG_CC;
1219 		locate_record(ccw++, LO_data++, first_trk, first_offs + 1,
1220 			      last_rec - recid + 1, cmd, device, blksize);
1221 	}
1222 	rq_for_each_bio(bio, req) bio_for_each_segment(bv, bio, i) {
1223 		dst = page_address(bv->bv_page) + bv->bv_offset;
1224 		if (dasd_page_cache) {
1225 			char *copy = kmem_cache_alloc(dasd_page_cache,
1226 						      GFP_DMA | __GFP_NOWARN);
1227 			if (copy && rq_data_dir(req) == WRITE)
1228 				memcpy(copy + bv->bv_offset, dst, bv->bv_len);
1229 			if (copy)
1230 				dst = copy + bv->bv_offset;
1231 		}
1232 		for (off = 0; off < bv->bv_len; off += blksize) {
1233 			sector_t trkid = recid;
1234 			unsigned int recoffs = sector_div(trkid, blk_per_trk);
1235 			rcmd = cmd;
1236 			count = blksize;
1237 			/* Locate record for cdl special block ? */
1238 			if (private->uses_cdl && recid < 2*blk_per_trk) {
1239 				if (dasd_eckd_cdl_special(blk_per_trk, recid)){
1240 					rcmd |= 0x8;
1241 					count = dasd_eckd_cdl_reclen(recid);
1242 					if (count < blksize &&
1243 					    rq_data_dir(req) == READ)
1244 						memset(dst + count, 0xe5,
1245 						       blksize - count);
1246 				}
1247 				ccw[-1].flags |= CCW_FLAG_CC;
1248 				locate_record(ccw++, LO_data++,
1249 					      trkid, recoffs + 1,
1250 					      1, rcmd, device, count);
1251 			}
1252 			/* Locate record for standard blocks ? */
1253 			if (private->uses_cdl && recid == 2*blk_per_trk) {
1254 				ccw[-1].flags |= CCW_FLAG_CC;
1255 				locate_record(ccw++, LO_data++,
1256 					      trkid, recoffs + 1,
1257 					      last_rec - recid + 1,
1258 					      cmd, device, count);
1259 			}
1260 			/* Read/write ccw. */
1261 			ccw[-1].flags |= CCW_FLAG_CC;
1262 			ccw->cmd_code = rcmd;
1263 			ccw->count = count;
1264 			if (idal_is_needed(dst, blksize)) {
1265 				ccw->cda = (__u32)(addr_t) idaws;
1266 				ccw->flags = CCW_FLAG_IDA;
1267 				idaws = idal_create_words(idaws, dst, blksize);
1268 			} else {
1269 				ccw->cda = (__u32)(addr_t) dst;
1270 				ccw->flags = 0;
1271 			}
1272 			ccw++;
1273 			dst += blksize;
1274 			recid++;
1275 		}
1276 	}
1277 	if (req->cmd_flags & REQ_FAILFAST)
1278 		set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
1279 	cqr->device = device;
1280 	cqr->expires = 5 * 60 * HZ;	/* 5 minutes */
1281 	cqr->lpm = private->path_data.ppm;
1282 	cqr->retries = 256;
1283 	cqr->buildclk = get_clock();
1284 	cqr->status = DASD_CQR_FILLED;
1285 	return cqr;
1286 }
1287 
1288 static int
1289 dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
1290 {
1291 	struct dasd_eckd_private *private;
1292 	struct ccw1 *ccw;
1293 	struct bio *bio;
1294 	struct bio_vec *bv;
1295 	char *dst, *cda;
1296 	unsigned int blksize, blk_per_trk, off;
1297 	sector_t recid;
1298 	int i, status;
1299 
1300 	if (!dasd_page_cache)
1301 		goto out;
1302 	private = (struct dasd_eckd_private *) cqr->device->private;
1303 	blksize = cqr->device->bp_block;
1304 	blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
1305 	recid = req->sector >> cqr->device->s2b_shift;
1306 	ccw = cqr->cpaddr;
1307 	/* Skip over define extent & locate record. */
1308 	ccw++;
1309 	if (private->uses_cdl == 0 || recid > 2*blk_per_trk)
1310 		ccw++;
1311 	rq_for_each_bio(bio, req) bio_for_each_segment(bv, bio, i) {
1312 		dst = page_address(bv->bv_page) + bv->bv_offset;
1313 		for (off = 0; off < bv->bv_len; off += blksize) {
1314 			/* Skip locate record. */
1315 			if (private->uses_cdl && recid <= 2*blk_per_trk)
1316 				ccw++;
1317 			if (dst) {
1318 				if (ccw->flags & CCW_FLAG_IDA)
1319 					cda = *((char **)((addr_t) ccw->cda));
1320 				else
1321 					cda = (char *)((addr_t) ccw->cda);
1322 				if (dst != cda) {
1323 					if (rq_data_dir(req) == READ)
1324 						memcpy(dst, cda, bv->bv_len);
1325 					kmem_cache_free(dasd_page_cache,
1326 					    (void *)((addr_t)cda & PAGE_MASK));
1327 				}
1328 				dst = NULL;
1329 			}
1330 			ccw++;
1331 			recid++;
1332 		}
1333 	}
1334 out:
1335 	status = cqr->status == DASD_CQR_DONE;
1336 	dasd_sfree_request(cqr, cqr->device);
1337 	return status;
1338 }
1339 
1340 static int
1341 dasd_eckd_fill_info(struct dasd_device * device,
1342 		    struct dasd_information2_t * info)
1343 {
1344 	struct dasd_eckd_private *private;
1345 
1346 	private = (struct dasd_eckd_private *) device->private;
1347 	info->label_block = 2;
1348 	info->FBA_layout = private->uses_cdl ? 0 : 1;
1349 	info->format = private->uses_cdl ? DASD_FORMAT_CDL : DASD_FORMAT_LDL;
1350 	info->characteristics_size = sizeof(struct dasd_eckd_characteristics);
1351 	memcpy(info->characteristics, &private->rdc_data,
1352 	       sizeof(struct dasd_eckd_characteristics));
1353 	info->confdata_size = sizeof (struct dasd_eckd_confdata);
1354 	memcpy(info->configuration_data, &private->conf_data,
1355 	       sizeof (struct dasd_eckd_confdata));
1356 	return 0;
1357 }
1358 
1359 /*
1360  * SECTION: ioctl functions for eckd devices.
1361  */
1362 
1363 /*
1364  * Release device ioctl.
1365  * Buils a channel programm to releases a prior reserved
1366  * (see dasd_eckd_reserve) device.
1367  */
1368 static int
1369 dasd_eckd_release(struct dasd_device *device)
1370 {
1371 	struct dasd_ccw_req *cqr;
1372 	int rc;
1373 
1374 	if (!capable(CAP_SYS_ADMIN))
1375 		return -EACCES;
1376 
1377 	cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
1378 				   1, 32, device);
1379 	if (IS_ERR(cqr)) {
1380 		DEV_MESSAGE(KERN_WARNING, device, "%s",
1381 			    "Could not allocate initialization request");
1382 		return PTR_ERR(cqr);
1383 	}
1384 	cqr->cpaddr->cmd_code = DASD_ECKD_CCW_RELEASE;
1385         cqr->cpaddr->flags |= CCW_FLAG_SLI;
1386         cqr->cpaddr->count = 32;
1387 	cqr->cpaddr->cda = (__u32)(addr_t) cqr->data;
1388 	cqr->device = device;
1389 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1390 	set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
1391 	cqr->retries = 2;	/* set retry counter to enable basic ERP */
1392 	cqr->expires = 2 * HZ;
1393 	cqr->buildclk = get_clock();
1394 	cqr->status = DASD_CQR_FILLED;
1395 
1396 	rc = dasd_sleep_on_immediatly(cqr);
1397 
1398 	dasd_sfree_request(cqr, cqr->device);
1399 	return rc;
1400 }
1401 
1402 /*
1403  * Reserve device ioctl.
1404  * Options are set to 'synchronous wait for interrupt' and
1405  * 'timeout the request'. This leads to a terminate IO if
1406  * the interrupt is outstanding for a certain time.
1407  */
1408 static int
1409 dasd_eckd_reserve(struct dasd_device *device)
1410 {
1411 	struct dasd_ccw_req *cqr;
1412 	int rc;
1413 
1414 	if (!capable(CAP_SYS_ADMIN))
1415 		return -EACCES;
1416 
1417 	cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
1418 				   1, 32, device);
1419 	if (IS_ERR(cqr)) {
1420 		DEV_MESSAGE(KERN_WARNING, device, "%s",
1421 			    "Could not allocate initialization request");
1422 		return PTR_ERR(cqr);
1423 	}
1424 	cqr->cpaddr->cmd_code = DASD_ECKD_CCW_RESERVE;
1425         cqr->cpaddr->flags |= CCW_FLAG_SLI;
1426         cqr->cpaddr->count = 32;
1427 	cqr->cpaddr->cda = (__u32)(addr_t) cqr->data;
1428 	cqr->device = device;
1429 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1430 	set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
1431 	cqr->retries = 2;	/* set retry counter to enable basic ERP */
1432 	cqr->expires = 2 * HZ;
1433 	cqr->buildclk = get_clock();
1434 	cqr->status = DASD_CQR_FILLED;
1435 
1436 	rc = dasd_sleep_on_immediatly(cqr);
1437 
1438 	dasd_sfree_request(cqr, cqr->device);
1439 	return rc;
1440 }
1441 
1442 /*
1443  * Steal lock ioctl - unconditional reserve device.
1444  * Buils a channel programm to break a device's reservation.
1445  * (unconditional reserve)
1446  */
1447 static int
1448 dasd_eckd_steal_lock(struct dasd_device *device)
1449 {
1450 	struct dasd_ccw_req *cqr;
1451 	int rc;
1452 
1453 	if (!capable(CAP_SYS_ADMIN))
1454 		return -EACCES;
1455 
1456 	cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
1457 				   1, 32, device);
1458 	if (IS_ERR(cqr)) {
1459 		DEV_MESSAGE(KERN_WARNING, device, "%s",
1460 			    "Could not allocate initialization request");
1461 		return PTR_ERR(cqr);
1462 	}
1463 	cqr->cpaddr->cmd_code = DASD_ECKD_CCW_SLCK;
1464         cqr->cpaddr->flags |= CCW_FLAG_SLI;
1465         cqr->cpaddr->count = 32;
1466 	cqr->cpaddr->cda = (__u32)(addr_t) cqr->data;
1467 	cqr->device = device;
1468 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1469 	set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
1470 	cqr->retries = 2;	/* set retry counter to enable basic ERP */
1471 	cqr->expires = 2 * HZ;
1472 	cqr->buildclk = get_clock();
1473 	cqr->status = DASD_CQR_FILLED;
1474 
1475 	rc = dasd_sleep_on_immediatly(cqr);
1476 
1477 	dasd_sfree_request(cqr, cqr->device);
1478 	return rc;
1479 }
1480 
1481 /*
1482  * Read performance statistics
1483  */
1484 static int
1485 dasd_eckd_performance(struct dasd_device *device, void __user *argp)
1486 {
1487 	struct dasd_psf_prssd_data *prssdp;
1488 	struct dasd_rssd_perf_stats_t *stats;
1489 	struct dasd_ccw_req *cqr;
1490 	struct ccw1 *ccw;
1491 	int rc;
1492 
1493 	cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
1494 				   1 /* PSF */  + 1 /* RSSD */ ,
1495 				   (sizeof (struct dasd_psf_prssd_data) +
1496 				    sizeof (struct dasd_rssd_perf_stats_t)),
1497 				   device);
1498 	if (IS_ERR(cqr)) {
1499 		DEV_MESSAGE(KERN_WARNING, device, "%s",
1500 			    "Could not allocate initialization request");
1501 		return PTR_ERR(cqr);
1502 	}
1503 	cqr->device = device;
1504 	cqr->retries = 0;
1505 	cqr->expires = 10 * HZ;
1506 
1507 	/* Prepare for Read Subsystem Data */
1508 	prssdp = (struct dasd_psf_prssd_data *) cqr->data;
1509 	memset(prssdp, 0, sizeof (struct dasd_psf_prssd_data));
1510 	prssdp->order = PSF_ORDER_PRSSD;
1511 	prssdp->suborder = 0x01;	/* Perfomance Statistics */
1512 	prssdp->varies[1] = 0x01;	/* Perf Statistics for the Subsystem */
1513 
1514 	ccw = cqr->cpaddr;
1515 	ccw->cmd_code = DASD_ECKD_CCW_PSF;
1516 	ccw->count = sizeof (struct dasd_psf_prssd_data);
1517 	ccw->flags |= CCW_FLAG_CC;
1518 	ccw->cda = (__u32)(addr_t) prssdp;
1519 
1520 	/* Read Subsystem Data - Performance Statistics */
1521 	stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
1522 	memset(stats, 0, sizeof (struct dasd_rssd_perf_stats_t));
1523 
1524 	ccw++;
1525 	ccw->cmd_code = DASD_ECKD_CCW_RSSD;
1526 	ccw->count = sizeof (struct dasd_rssd_perf_stats_t);
1527 	ccw->cda = (__u32)(addr_t) stats;
1528 
1529 	cqr->buildclk = get_clock();
1530 	cqr->status = DASD_CQR_FILLED;
1531 	rc = dasd_sleep_on(cqr);
1532 	if (rc == 0) {
1533 		/* Prepare for Read Subsystem Data */
1534 		prssdp = (struct dasd_psf_prssd_data *) cqr->data;
1535 		stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
1536 		if (copy_to_user(argp, stats,
1537 				 sizeof(struct dasd_rssd_perf_stats_t)))
1538 			rc = -EFAULT;
1539 	}
1540 	dasd_sfree_request(cqr, cqr->device);
1541 	return rc;
1542 }
1543 
1544 /*
1545  * Get attributes (cache operations)
1546  * Returnes the cache attributes used in Define Extend (DE).
1547  */
1548 static int
1549 dasd_eckd_get_attrib(struct dasd_device *device, void __user *argp)
1550 {
1551 	struct dasd_eckd_private *private =
1552 		(struct dasd_eckd_private *)device->private;
1553 	struct attrib_data_t attrib = private->attrib;
1554 	int rc;
1555 
1556         if (!capable(CAP_SYS_ADMIN))
1557                 return -EACCES;
1558 	if (!argp)
1559                 return -EINVAL;
1560 
1561 	rc = 0;
1562 	if (copy_to_user(argp, (long *) &attrib,
1563 			 sizeof (struct attrib_data_t)))
1564 		rc = -EFAULT;
1565 
1566 	return rc;
1567 }
1568 
1569 /*
1570  * Set attributes (cache operations)
1571  * Stores the attributes for cache operation to be used in Define Extend (DE).
1572  */
1573 static int
1574 dasd_eckd_set_attrib(struct dasd_device *device, void __user *argp)
1575 {
1576 	struct dasd_eckd_private *private =
1577 		(struct dasd_eckd_private *)device->private;
1578 	struct attrib_data_t attrib;
1579 
1580 	if (!capable(CAP_SYS_ADMIN))
1581 		return -EACCES;
1582 	if (!argp)
1583 		return -EINVAL;
1584 
1585 	if (copy_from_user(&attrib, argp, sizeof(struct attrib_data_t)))
1586 		return -EFAULT;
1587 	private->attrib = attrib;
1588 
1589 	DEV_MESSAGE(KERN_INFO, device,
1590 		    "cache operation mode set to %x (%i cylinder prestage)",
1591 		    private->attrib.operation, private->attrib.nr_cyl);
1592 	return 0;
1593 }
1594 
1595 static int
1596 dasd_eckd_ioctl(struct dasd_device *device, unsigned int cmd, void __user *argp)
1597 {
1598 	switch (cmd) {
1599 	case BIODASDGATTR:
1600 		return dasd_eckd_get_attrib(device, argp);
1601 	case BIODASDSATTR:
1602 		return dasd_eckd_set_attrib(device, argp);
1603 	case BIODASDPSRD:
1604 		return dasd_eckd_performance(device, argp);
1605 	case BIODASDRLSE:
1606 		return dasd_eckd_release(device);
1607 	case BIODASDRSRV:
1608 		return dasd_eckd_reserve(device);
1609 	case BIODASDSLCK:
1610 		return dasd_eckd_steal_lock(device);
1611 	default:
1612 		return -ENOIOCTLCMD;
1613 	}
1614 }
1615 
1616 /*
1617  * Dump the range of CCWs into 'page' buffer
1618  * and return number of printed chars.
1619  */
1620 static inline int
1621 dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
1622 {
1623 	int len, count;
1624 	char *datap;
1625 
1626 	len = 0;
1627 	while (from <= to) {
1628 		len += sprintf(page + len, KERN_ERR PRINTK_HEADER
1629 			       " CCW %p: %08X %08X DAT:",
1630 			       from, ((int *) from)[0], ((int *) from)[1]);
1631 
1632 		/* get pointer to data (consider IDALs) */
1633 		if (from->flags & CCW_FLAG_IDA)
1634 			datap = (char *) *((addr_t *) (addr_t) from->cda);
1635 		else
1636 			datap = (char *) ((addr_t) from->cda);
1637 
1638 		/* dump data (max 32 bytes) */
1639 		for (count = 0; count < from->count && count < 32; count++) {
1640 			if (count % 8 == 0) len += sprintf(page + len, " ");
1641 			if (count % 4 == 0) len += sprintf(page + len, " ");
1642 			len += sprintf(page + len, "%02x", datap[count]);
1643 		}
1644 		len += sprintf(page + len, "\n");
1645 		from++;
1646 	}
1647 	return len;
1648 }
1649 
1650 /*
1651  * Print sense data and related channel program.
1652  * Parts are printed because printk buffer is only 1024 bytes.
1653  */
1654 static void
1655 dasd_eckd_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
1656 		     struct irb *irb)
1657 {
1658 	char *page;
1659 	struct ccw1 *first, *last, *fail, *from, *to;
1660 	int len, sl, sct;
1661 
1662 	page = (char *) get_zeroed_page(GFP_ATOMIC);
1663 	if (page == NULL) {
1664 		DEV_MESSAGE(KERN_ERR, device, " %s",
1665 			    "No memory to dump sense data");
1666 		return;
1667 	}
1668 	/* dump the sense data */
1669 	len = sprintf(page,  KERN_ERR PRINTK_HEADER
1670 		      " I/O status report for device %s:\n",
1671 		      device->cdev->dev.bus_id);
1672 	len += sprintf(page + len, KERN_ERR PRINTK_HEADER
1673 		       " in req: %p CS: 0x%02X DS: 0x%02X\n", req,
1674 		       irb->scsw.cstat, irb->scsw.dstat);
1675 	len += sprintf(page + len, KERN_ERR PRINTK_HEADER
1676 		       " device %s: Failing CCW: %p\n",
1677 		       device->cdev->dev.bus_id,
1678 		       (void *) (addr_t) irb->scsw.cpa);
1679 	if (irb->esw.esw0.erw.cons) {
1680 		for (sl = 0; sl < 4; sl++) {
1681 			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
1682 				       " Sense(hex) %2d-%2d:",
1683 				       (8 * sl), ((8 * sl) + 7));
1684 
1685 			for (sct = 0; sct < 8; sct++) {
1686 				len += sprintf(page + len, " %02x",
1687 					       irb->ecw[8 * sl + sct]);
1688 			}
1689 			len += sprintf(page + len, "\n");
1690 		}
1691 
1692 		if (irb->ecw[27] & DASD_SENSE_BIT_0) {
1693 			/* 24 Byte Sense Data */
1694 			sprintf(page + len, KERN_ERR PRINTK_HEADER
1695 				" 24 Byte: %x MSG %x, "
1696 				"%s MSGb to SYSOP\n",
1697 				irb->ecw[7] >> 4, irb->ecw[7] & 0x0f,
1698 				irb->ecw[1] & 0x10 ? "" : "no");
1699 		} else {
1700 			/* 32 Byte Sense Data */
1701 			sprintf(page + len, KERN_ERR PRINTK_HEADER
1702 				" 32 Byte: Format: %x "
1703 				"Exception class %x\n",
1704 				irb->ecw[6] & 0x0f, irb->ecw[22] >> 4);
1705 		}
1706 	} else {
1707 		sprintf(page + len, KERN_ERR PRINTK_HEADER
1708 			" SORRY - NO VALID SENSE AVAILABLE\n");
1709 	}
1710 	printk("%s", page);
1711 
1712 	/* dump the Channel Program (max 140 Bytes per line) */
1713 	/* Count CCW and print first CCWs (maximum 1024 % 140 = 7) */
1714 	first = req->cpaddr;
1715 	for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
1716 	to = min(first + 6, last);
1717 	len = sprintf(page,  KERN_ERR PRINTK_HEADER
1718 		      " Related CP in req: %p\n", req);
1719 	dasd_eckd_dump_ccw_range(first, to, page + len);
1720 	printk("%s", page);
1721 
1722 	/* print failing CCW area (maximum 4) */
1723 	/* scsw->cda is either valid or zero  */
1724 	len = 0;
1725 	from = ++to;
1726 	fail = (struct ccw1 *)(addr_t) irb->scsw.cpa; /* failing CCW */
1727 	if (from <  fail - 2) {
1728 		from = fail - 2;     /* there is a gap - print header */
1729 		len += sprintf(page, KERN_ERR PRINTK_HEADER "......\n");
1730 	}
1731 	to = min(fail + 1, last);
1732 	len += dasd_eckd_dump_ccw_range(from, to, page + len);
1733 
1734 	/* print last CCWs (maximum 2) */
1735 	from = max(from, ++to);
1736 	if (from < last - 1) {
1737 		from = last - 1;     /* there is a gap - print header */
1738 		len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n");
1739 	}
1740 	len += dasd_eckd_dump_ccw_range(from, last, page + len);
1741 	if (len > 0)
1742 		printk("%s", page);
1743 	free_page((unsigned long) page);
1744 }
1745 
1746 /*
1747  * max_blocks is dependent on the amount of storage that is available
1748  * in the static io buffer for each device. Currently each device has
1749  * 8192 bytes (=2 pages). For 64 bit one dasd_mchunkt_t structure has
1750  * 24 bytes, the struct dasd_ccw_req has 136 bytes and each block can use
1751  * up to 16 bytes (8 for the ccw and 8 for the idal pointer). In
1752  * addition we have one define extent ccw + 16 bytes of data and one
1753  * locate record ccw + 16 bytes of data. That makes:
1754  * (8192 - 24 - 136 - 8 - 16 - 8 - 16) / 16 = 499 blocks at maximum.
1755  * We want to fit two into the available memory so that we can immediately
1756  * start the next request if one finishes off. That makes 249.5 blocks
1757  * for one request. Give a little safety and the result is 240.
1758  */
1759 static struct dasd_discipline dasd_eckd_discipline = {
1760 	.owner = THIS_MODULE,
1761 	.name = "ECKD",
1762 	.ebcname = "ECKD",
1763 	.max_blocks = 240,
1764 	.check_device = dasd_eckd_check_characteristics,
1765 	.do_analysis = dasd_eckd_do_analysis,
1766 	.fill_geometry = dasd_eckd_fill_geometry,
1767 	.start_IO = dasd_start_IO,
1768 	.term_IO = dasd_term_IO,
1769 	.format_device = dasd_eckd_format_device,
1770 	.examine_error = dasd_eckd_examine_error,
1771 	.erp_action = dasd_eckd_erp_action,
1772 	.erp_postaction = dasd_eckd_erp_postaction,
1773 	.build_cp = dasd_eckd_build_cp,
1774 	.free_cp = dasd_eckd_free_cp,
1775 	.dump_sense = dasd_eckd_dump_sense,
1776 	.fill_info = dasd_eckd_fill_info,
1777 	.ioctl = dasd_eckd_ioctl,
1778 };
1779 
1780 static int __init
1781 dasd_eckd_init(void)
1782 {
1783 	ASCEBC(dasd_eckd_discipline.ebcname, 4);
1784 	return ccw_driver_register(&dasd_eckd_driver);
1785 }
1786 
1787 static void __exit
1788 dasd_eckd_cleanup(void)
1789 {
1790 	ccw_driver_unregister(&dasd_eckd_driver);
1791 }
1792 
1793 module_init(dasd_eckd_init);
1794 module_exit(dasd_eckd_cleanup);
1795