xref: /openbmc/linux/drivers/s390/block/dasd_eckd.c (revision 817f2c84)
1 /*
2  * File...........: linux/drivers/s390/block/dasd_eckd.c
3  * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4  *		    Horst Hummel <Horst.Hummel@de.ibm.com>
5  *		    Carsten Otte <Cotte@de.ibm.com>
6  *		    Martin Schwidefsky <schwidefsky@de.ibm.com>
7  * Bugreports.to..: <Linux390@de.ibm.com>
8  * Copyright IBM Corp. 1999, 2009
9  * EMC Symmetrix ioctl Copyright EMC Corporation, 2008
10  * Author.........: Nigel Hislop <hislop_nigel@emc.com>
11  */
12 
13 #define KMSG_COMPONENT "dasd-eckd"
14 
15 #include <linux/stddef.h>
16 #include <linux/kernel.h>
17 #include <linux/slab.h>
18 #include <linux/hdreg.h>	/* HDIO_GETGEO			    */
19 #include <linux/bio.h>
20 #include <linux/module.h>
21 #include <linux/init.h>
22 
23 #include <asm/debug.h>
24 #include <asm/idals.h>
25 #include <asm/ebcdic.h>
26 #include <asm/compat.h>
27 #include <asm/io.h>
28 #include <asm/uaccess.h>
29 #include <asm/cio.h>
30 #include <asm/ccwdev.h>
31 #include <asm/itcw.h>
32 
33 #include "dasd_int.h"
34 #include "dasd_eckd.h"
35 #include "../cio/chsc.h"
36 
37 
38 #ifdef PRINTK_HEADER
39 #undef PRINTK_HEADER
40 #endif				/* PRINTK_HEADER */
41 #define PRINTK_HEADER "dasd(eckd):"
42 
43 #define ECKD_C0(i) (i->home_bytes)
44 #define ECKD_F(i) (i->formula)
45 #define ECKD_F1(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f1):\
46 		    (i->factors.f_0x02.f1))
47 #define ECKD_F2(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f2):\
48 		    (i->factors.f_0x02.f2))
49 #define ECKD_F3(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f3):\
50 		    (i->factors.f_0x02.f3))
51 #define ECKD_F4(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f4):0)
52 #define ECKD_F5(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f5):0)
53 #define ECKD_F6(i) (i->factor6)
54 #define ECKD_F7(i) (i->factor7)
55 #define ECKD_F8(i) (i->factor8)
56 
57 MODULE_LICENSE("GPL");
58 
59 static struct dasd_discipline dasd_eckd_discipline;
60 
61 /* The ccw bus type uses this table to find devices that it sends to
62  * dasd_eckd_probe */
63 static struct ccw_device_id dasd_eckd_ids[] = {
64 	{ CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1},
65 	{ CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2},
66 	{ CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3390, 0), .driver_info = 0x3},
67 	{ CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4},
68 	{ CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5},
69 	{ CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6},
70 	{ CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3390, 0), .driver_info = 0x7},
71 	{ CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3380, 0), .driver_info = 0x8},
72 	{ CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3390, 0), .driver_info = 0x9},
73 	{ CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3380, 0), .driver_info = 0xa},
74 	{ /* end of list */ },
75 };
76 
77 MODULE_DEVICE_TABLE(ccw, dasd_eckd_ids);
78 
79 static struct ccw_driver dasd_eckd_driver; /* see below */
80 
81 #define INIT_CQR_OK 0
82 #define INIT_CQR_UNFORMATTED 1
83 #define INIT_CQR_ERROR 2
84 
85 /* emergency request for reserve/release */
86 static struct {
87 	struct dasd_ccw_req cqr;
88 	struct ccw1 ccw;
89 	char data[32];
90 } *dasd_reserve_req;
91 static DEFINE_MUTEX(dasd_reserve_mutex);
92 
93 
94 /* initial attempt at a probe function. this can be simplified once
95  * the other detection code is gone */
96 static int
97 dasd_eckd_probe (struct ccw_device *cdev)
98 {
99 	int ret;
100 
101 	/* set ECKD specific ccw-device options */
102 	ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE |
103 				     CCWDEV_DO_PATHGROUP | CCWDEV_DO_MULTIPATH);
104 	if (ret) {
105 		DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
106 				"dasd_eckd_probe: could not set "
107 				"ccw-device options");
108 		return ret;
109 	}
110 	ret = dasd_generic_probe(cdev, &dasd_eckd_discipline);
111 	return ret;
112 }
113 
114 static int
115 dasd_eckd_set_online(struct ccw_device *cdev)
116 {
117 	return dasd_generic_set_online(cdev, &dasd_eckd_discipline);
118 }
119 
120 static const int sizes_trk0[] = { 28, 148, 84 };
121 #define LABEL_SIZE 140
122 
123 static inline unsigned int
124 round_up_multiple(unsigned int no, unsigned int mult)
125 {
126 	int rem = no % mult;
127 	return (rem ? no - rem + mult : no);
128 }
129 
130 static inline unsigned int
131 ceil_quot(unsigned int d1, unsigned int d2)
132 {
133 	return (d1 + (d2 - 1)) / d2;
134 }
135 
136 static unsigned int
137 recs_per_track(struct dasd_eckd_characteristics * rdc,
138 	       unsigned int kl, unsigned int dl)
139 {
140 	int dn, kn;
141 
142 	switch (rdc->dev_type) {
143 	case 0x3380:
144 		if (kl)
145 			return 1499 / (15 + 7 + ceil_quot(kl + 12, 32) +
146 				       ceil_quot(dl + 12, 32));
147 		else
148 			return 1499 / (15 + ceil_quot(dl + 12, 32));
149 	case 0x3390:
150 		dn = ceil_quot(dl + 6, 232) + 1;
151 		if (kl) {
152 			kn = ceil_quot(kl + 6, 232) + 1;
153 			return 1729 / (10 + 9 + ceil_quot(kl + 6 * kn, 34) +
154 				       9 + ceil_quot(dl + 6 * dn, 34));
155 		} else
156 			return 1729 / (10 + 9 + ceil_quot(dl + 6 * dn, 34));
157 	case 0x9345:
158 		dn = ceil_quot(dl + 6, 232) + 1;
159 		if (kl) {
160 			kn = ceil_quot(kl + 6, 232) + 1;
161 			return 1420 / (18 + 7 + ceil_quot(kl + 6 * kn, 34) +
162 				       ceil_quot(dl + 6 * dn, 34));
163 		} else
164 			return 1420 / (18 + 7 + ceil_quot(dl + 6 * dn, 34));
165 	}
166 	return 0;
167 }
168 
169 static void set_ch_t(struct ch_t *geo, __u32 cyl, __u8 head)
170 {
171 	geo->cyl = (__u16) cyl;
172 	geo->head = cyl >> 16;
173 	geo->head <<= 4;
174 	geo->head |= head;
175 }
176 
177 static int
178 check_XRC (struct ccw1         *de_ccw,
179            struct DE_eckd_data *data,
180            struct dasd_device  *device)
181 {
182         struct dasd_eckd_private *private;
183 	int rc;
184 
185         private = (struct dasd_eckd_private *) device->private;
186 	if (!private->rdc_data.facilities.XRC_supported)
187 		return 0;
188 
189         /* switch on System Time Stamp - needed for XRC Support */
190 	data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid'   */
191 	data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */
192 
193 	rc = get_sync_clock(&data->ep_sys_time);
194 	/* Ignore return code if sync clock is switched off. */
195 	if (rc == -ENOSYS || rc == -EACCES)
196 		rc = 0;
197 
198 	de_ccw->count = sizeof(struct DE_eckd_data);
199 	de_ccw->flags |= CCW_FLAG_SLI;
200 	return rc;
201 }
202 
203 static int
204 define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk,
205 	      unsigned int totrk, int cmd, struct dasd_device *device)
206 {
207 	struct dasd_eckd_private *private;
208 	u32 begcyl, endcyl;
209 	u16 heads, beghead, endhead;
210 	int rc = 0;
211 
212 	private = (struct dasd_eckd_private *) device->private;
213 
214 	ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
215 	ccw->flags = 0;
216 	ccw->count = 16;
217 	ccw->cda = (__u32) __pa(data);
218 
219 	memset(data, 0, sizeof(struct DE_eckd_data));
220 	switch (cmd) {
221 	case DASD_ECKD_CCW_READ_HOME_ADDRESS:
222 	case DASD_ECKD_CCW_READ_RECORD_ZERO:
223 	case DASD_ECKD_CCW_READ:
224 	case DASD_ECKD_CCW_READ_MT:
225 	case DASD_ECKD_CCW_READ_CKD:
226 	case DASD_ECKD_CCW_READ_CKD_MT:
227 	case DASD_ECKD_CCW_READ_KD:
228 	case DASD_ECKD_CCW_READ_KD_MT:
229 	case DASD_ECKD_CCW_READ_COUNT:
230 		data->mask.perm = 0x1;
231 		data->attributes.operation = private->attrib.operation;
232 		break;
233 	case DASD_ECKD_CCW_WRITE:
234 	case DASD_ECKD_CCW_WRITE_MT:
235 	case DASD_ECKD_CCW_WRITE_KD:
236 	case DASD_ECKD_CCW_WRITE_KD_MT:
237 		data->mask.perm = 0x02;
238 		data->attributes.operation = private->attrib.operation;
239 		rc = check_XRC (ccw, data, device);
240 		break;
241 	case DASD_ECKD_CCW_WRITE_CKD:
242 	case DASD_ECKD_CCW_WRITE_CKD_MT:
243 		data->attributes.operation = DASD_BYPASS_CACHE;
244 		rc = check_XRC (ccw, data, device);
245 		break;
246 	case DASD_ECKD_CCW_ERASE:
247 	case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
248 	case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
249 		data->mask.perm = 0x3;
250 		data->mask.auth = 0x1;
251 		data->attributes.operation = DASD_BYPASS_CACHE;
252 		rc = check_XRC (ccw, data, device);
253 		break;
254 	default:
255 		dev_err(&device->cdev->dev,
256 			"0x%x is not a known command\n", cmd);
257 		break;
258 	}
259 
260 	data->attributes.mode = 0x3;	/* ECKD */
261 
262 	if ((private->rdc_data.cu_type == 0x2105 ||
263 	     private->rdc_data.cu_type == 0x2107 ||
264 	     private->rdc_data.cu_type == 0x1750)
265 	    && !(private->uses_cdl && trk < 2))
266 		data->ga_extended |= 0x40; /* Regular Data Format Mode */
267 
268 	heads = private->rdc_data.trk_per_cyl;
269 	begcyl = trk / heads;
270 	beghead = trk % heads;
271 	endcyl = totrk / heads;
272 	endhead = totrk % heads;
273 
274 	/* check for sequential prestage - enhance cylinder range */
275 	if (data->attributes.operation == DASD_SEQ_PRESTAGE ||
276 	    data->attributes.operation == DASD_SEQ_ACCESS) {
277 
278 		if (endcyl + private->attrib.nr_cyl < private->real_cyl)
279 			endcyl += private->attrib.nr_cyl;
280 		else
281 			endcyl = (private->real_cyl - 1);
282 	}
283 
284 	set_ch_t(&data->beg_ext, begcyl, beghead);
285 	set_ch_t(&data->end_ext, endcyl, endhead);
286 	return rc;
287 }
288 
289 static int check_XRC_on_prefix(struct PFX_eckd_data *pfxdata,
290 			       struct dasd_device  *device)
291 {
292 	struct dasd_eckd_private *private;
293 	int rc;
294 
295 	private = (struct dasd_eckd_private *) device->private;
296 	if (!private->rdc_data.facilities.XRC_supported)
297 		return 0;
298 
299 	/* switch on System Time Stamp - needed for XRC Support */
300 	pfxdata->define_extent.ga_extended |= 0x08; /* 'Time Stamp Valid'   */
301 	pfxdata->define_extent.ga_extended |= 0x02; /* 'Extended Parameter' */
302 	pfxdata->validity.time_stamp = 1;	    /* 'Time Stamp Valid'   */
303 
304 	rc = get_sync_clock(&pfxdata->define_extent.ep_sys_time);
305 	/* Ignore return code if sync clock is switched off. */
306 	if (rc == -ENOSYS || rc == -EACCES)
307 		rc = 0;
308 	return rc;
309 }
310 
311 static void fill_LRE_data(struct LRE_eckd_data *data, unsigned int trk,
312 			  unsigned int rec_on_trk, int count, int cmd,
313 			  struct dasd_device *device, unsigned int reclen,
314 			  unsigned int tlf)
315 {
316 	struct dasd_eckd_private *private;
317 	int sector;
318 	int dn, d;
319 
320 	private = (struct dasd_eckd_private *) device->private;
321 
322 	memset(data, 0, sizeof(*data));
323 	sector = 0;
324 	if (rec_on_trk) {
325 		switch (private->rdc_data.dev_type) {
326 		case 0x3390:
327 			dn = ceil_quot(reclen + 6, 232);
328 			d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
329 			sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
330 			break;
331 		case 0x3380:
332 			d = 7 + ceil_quot(reclen + 12, 32);
333 			sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
334 			break;
335 		}
336 	}
337 	data->sector = sector;
338 	/* note: meaning of count depends on the operation
339 	 *	 for record based I/O it's the number of records, but for
340 	 *	 track based I/O it's the number of tracks
341 	 */
342 	data->count = count;
343 	switch (cmd) {
344 	case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
345 		data->operation.orientation = 0x3;
346 		data->operation.operation = 0x03;
347 		break;
348 	case DASD_ECKD_CCW_READ_HOME_ADDRESS:
349 		data->operation.orientation = 0x3;
350 		data->operation.operation = 0x16;
351 		break;
352 	case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
353 		data->operation.orientation = 0x1;
354 		data->operation.operation = 0x03;
355 		data->count++;
356 		break;
357 	case DASD_ECKD_CCW_READ_RECORD_ZERO:
358 		data->operation.orientation = 0x3;
359 		data->operation.operation = 0x16;
360 		data->count++;
361 		break;
362 	case DASD_ECKD_CCW_WRITE:
363 	case DASD_ECKD_CCW_WRITE_MT:
364 	case DASD_ECKD_CCW_WRITE_KD:
365 	case DASD_ECKD_CCW_WRITE_KD_MT:
366 		data->auxiliary.length_valid = 0x1;
367 		data->length = reclen;
368 		data->operation.operation = 0x01;
369 		break;
370 	case DASD_ECKD_CCW_WRITE_CKD:
371 	case DASD_ECKD_CCW_WRITE_CKD_MT:
372 		data->auxiliary.length_valid = 0x1;
373 		data->length = reclen;
374 		data->operation.operation = 0x03;
375 		break;
376 	case DASD_ECKD_CCW_WRITE_TRACK_DATA:
377 		data->auxiliary.length_valid = 0x1;
378 		data->length = reclen;	/* not tlf, as one might think */
379 		data->operation.operation = 0x3F;
380 		data->extended_operation = 0x23;
381 		break;
382 	case DASD_ECKD_CCW_READ:
383 	case DASD_ECKD_CCW_READ_MT:
384 	case DASD_ECKD_CCW_READ_KD:
385 	case DASD_ECKD_CCW_READ_KD_MT:
386 		data->auxiliary.length_valid = 0x1;
387 		data->length = reclen;
388 		data->operation.operation = 0x06;
389 		break;
390 	case DASD_ECKD_CCW_READ_CKD:
391 	case DASD_ECKD_CCW_READ_CKD_MT:
392 		data->auxiliary.length_valid = 0x1;
393 		data->length = reclen;
394 		data->operation.operation = 0x16;
395 		break;
396 	case DASD_ECKD_CCW_READ_COUNT:
397 		data->operation.operation = 0x06;
398 		break;
399 	case DASD_ECKD_CCW_READ_TRACK_DATA:
400 		data->auxiliary.length_valid = 0x1;
401 		data->length = tlf;
402 		data->operation.operation = 0x0C;
403 		break;
404 	case DASD_ECKD_CCW_ERASE:
405 		data->length = reclen;
406 		data->auxiliary.length_valid = 0x1;
407 		data->operation.operation = 0x0b;
408 		break;
409 	default:
410 		DBF_DEV_EVENT(DBF_ERR, device,
411 			    "fill LRE unknown opcode 0x%x", cmd);
412 		BUG();
413 	}
414 	set_ch_t(&data->seek_addr,
415 		 trk / private->rdc_data.trk_per_cyl,
416 		 trk % private->rdc_data.trk_per_cyl);
417 	data->search_arg.cyl = data->seek_addr.cyl;
418 	data->search_arg.head = data->seek_addr.head;
419 	data->search_arg.record = rec_on_trk;
420 }
421 
422 static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
423 		      unsigned int trk, unsigned int totrk, int cmd,
424 		      struct dasd_device *basedev, struct dasd_device *startdev,
425 		      unsigned char format, unsigned int rec_on_trk, int count,
426 		      unsigned int blksize, unsigned int tlf)
427 {
428 	struct dasd_eckd_private *basepriv, *startpriv;
429 	struct DE_eckd_data *dedata;
430 	struct LRE_eckd_data *lredata;
431 	u32 begcyl, endcyl;
432 	u16 heads, beghead, endhead;
433 	int rc = 0;
434 
435 	basepriv = (struct dasd_eckd_private *) basedev->private;
436 	startpriv = (struct dasd_eckd_private *) startdev->private;
437 	dedata = &pfxdata->define_extent;
438 	lredata = &pfxdata->locate_record;
439 
440 	ccw->cmd_code = DASD_ECKD_CCW_PFX;
441 	ccw->flags = 0;
442 	ccw->count = sizeof(*pfxdata);
443 	ccw->cda = (__u32) __pa(pfxdata);
444 
445 	memset(pfxdata, 0, sizeof(*pfxdata));
446 	/* prefix data */
447 	if (format > 1) {
448 		DBF_DEV_EVENT(DBF_ERR, basedev,
449 			      "PFX LRE unknown format 0x%x", format);
450 		BUG();
451 		return -EINVAL;
452 	}
453 	pfxdata->format = format;
454 	pfxdata->base_address = basepriv->ned->unit_addr;
455 	pfxdata->base_lss = basepriv->ned->ID;
456 	pfxdata->validity.define_extent = 1;
457 
458 	/* private uid is kept up to date, conf_data may be outdated */
459 	if (startpriv->uid.type != UA_BASE_DEVICE) {
460 		pfxdata->validity.verify_base = 1;
461 		if (startpriv->uid.type == UA_HYPER_PAV_ALIAS)
462 			pfxdata->validity.hyper_pav = 1;
463 	}
464 
465 	/* define extend data (mostly)*/
466 	switch (cmd) {
467 	case DASD_ECKD_CCW_READ_HOME_ADDRESS:
468 	case DASD_ECKD_CCW_READ_RECORD_ZERO:
469 	case DASD_ECKD_CCW_READ:
470 	case DASD_ECKD_CCW_READ_MT:
471 	case DASD_ECKD_CCW_READ_CKD:
472 	case DASD_ECKD_CCW_READ_CKD_MT:
473 	case DASD_ECKD_CCW_READ_KD:
474 	case DASD_ECKD_CCW_READ_KD_MT:
475 	case DASD_ECKD_CCW_READ_COUNT:
476 		dedata->mask.perm = 0x1;
477 		dedata->attributes.operation = basepriv->attrib.operation;
478 		break;
479 	case DASD_ECKD_CCW_READ_TRACK_DATA:
480 		dedata->mask.perm = 0x1;
481 		dedata->attributes.operation = basepriv->attrib.operation;
482 		dedata->blk_size = 0;
483 		break;
484 	case DASD_ECKD_CCW_WRITE:
485 	case DASD_ECKD_CCW_WRITE_MT:
486 	case DASD_ECKD_CCW_WRITE_KD:
487 	case DASD_ECKD_CCW_WRITE_KD_MT:
488 		dedata->mask.perm = 0x02;
489 		dedata->attributes.operation = basepriv->attrib.operation;
490 		rc = check_XRC_on_prefix(pfxdata, basedev);
491 		break;
492 	case DASD_ECKD_CCW_WRITE_CKD:
493 	case DASD_ECKD_CCW_WRITE_CKD_MT:
494 		dedata->attributes.operation = DASD_BYPASS_CACHE;
495 		rc = check_XRC_on_prefix(pfxdata, basedev);
496 		break;
497 	case DASD_ECKD_CCW_ERASE:
498 	case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
499 	case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
500 		dedata->mask.perm = 0x3;
501 		dedata->mask.auth = 0x1;
502 		dedata->attributes.operation = DASD_BYPASS_CACHE;
503 		rc = check_XRC_on_prefix(pfxdata, basedev);
504 		break;
505 	case DASD_ECKD_CCW_WRITE_TRACK_DATA:
506 		dedata->mask.perm = 0x02;
507 		dedata->attributes.operation = basepriv->attrib.operation;
508 		dedata->blk_size = blksize;
509 		rc = check_XRC_on_prefix(pfxdata, basedev);
510 		break;
511 	default:
512 		DBF_DEV_EVENT(DBF_ERR, basedev,
513 			    "PFX LRE unknown opcode 0x%x", cmd);
514 		BUG();
515 		return -EINVAL;
516 	}
517 
518 	dedata->attributes.mode = 0x3;	/* ECKD */
519 
520 	if ((basepriv->rdc_data.cu_type == 0x2105 ||
521 	     basepriv->rdc_data.cu_type == 0x2107 ||
522 	     basepriv->rdc_data.cu_type == 0x1750)
523 	    && !(basepriv->uses_cdl && trk < 2))
524 		dedata->ga_extended |= 0x40; /* Regular Data Format Mode */
525 
526 	heads = basepriv->rdc_data.trk_per_cyl;
527 	begcyl = trk / heads;
528 	beghead = trk % heads;
529 	endcyl = totrk / heads;
530 	endhead = totrk % heads;
531 
532 	/* check for sequential prestage - enhance cylinder range */
533 	if (dedata->attributes.operation == DASD_SEQ_PRESTAGE ||
534 	    dedata->attributes.operation == DASD_SEQ_ACCESS) {
535 
536 		if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl)
537 			endcyl += basepriv->attrib.nr_cyl;
538 		else
539 			endcyl = (basepriv->real_cyl - 1);
540 	}
541 
542 	set_ch_t(&dedata->beg_ext, begcyl, beghead);
543 	set_ch_t(&dedata->end_ext, endcyl, endhead);
544 
545 	if (format == 1) {
546 		fill_LRE_data(lredata, trk, rec_on_trk, count, cmd,
547 			      basedev, blksize, tlf);
548 	}
549 
550 	return rc;
551 }
552 
553 static int prefix(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
554 		  unsigned int trk, unsigned int totrk, int cmd,
555 		  struct dasd_device *basedev, struct dasd_device *startdev)
556 {
557 	return prefix_LRE(ccw, pfxdata, trk, totrk, cmd, basedev, startdev,
558 			  0, 0, 0, 0, 0);
559 }
560 
561 static void
562 locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, unsigned int trk,
563 	      unsigned int rec_on_trk, int no_rec, int cmd,
564 	      struct dasd_device * device, int reclen)
565 {
566 	struct dasd_eckd_private *private;
567 	int sector;
568 	int dn, d;
569 
570 	private = (struct dasd_eckd_private *) device->private;
571 
572 	DBF_DEV_EVENT(DBF_INFO, device,
573 		  "Locate: trk %d, rec %d, no_rec %d, cmd %d, reclen %d",
574 		  trk, rec_on_trk, no_rec, cmd, reclen);
575 
576 	ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD;
577 	ccw->flags = 0;
578 	ccw->count = 16;
579 	ccw->cda = (__u32) __pa(data);
580 
581 	memset(data, 0, sizeof(struct LO_eckd_data));
582 	sector = 0;
583 	if (rec_on_trk) {
584 		switch (private->rdc_data.dev_type) {
585 		case 0x3390:
586 			dn = ceil_quot(reclen + 6, 232);
587 			d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
588 			sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
589 			break;
590 		case 0x3380:
591 			d = 7 + ceil_quot(reclen + 12, 32);
592 			sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
593 			break;
594 		}
595 	}
596 	data->sector = sector;
597 	data->count = no_rec;
598 	switch (cmd) {
599 	case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
600 		data->operation.orientation = 0x3;
601 		data->operation.operation = 0x03;
602 		break;
603 	case DASD_ECKD_CCW_READ_HOME_ADDRESS:
604 		data->operation.orientation = 0x3;
605 		data->operation.operation = 0x16;
606 		break;
607 	case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
608 		data->operation.orientation = 0x1;
609 		data->operation.operation = 0x03;
610 		data->count++;
611 		break;
612 	case DASD_ECKD_CCW_READ_RECORD_ZERO:
613 		data->operation.orientation = 0x3;
614 		data->operation.operation = 0x16;
615 		data->count++;
616 		break;
617 	case DASD_ECKD_CCW_WRITE:
618 	case DASD_ECKD_CCW_WRITE_MT:
619 	case DASD_ECKD_CCW_WRITE_KD:
620 	case DASD_ECKD_CCW_WRITE_KD_MT:
621 		data->auxiliary.last_bytes_used = 0x1;
622 		data->length = reclen;
623 		data->operation.operation = 0x01;
624 		break;
625 	case DASD_ECKD_CCW_WRITE_CKD:
626 	case DASD_ECKD_CCW_WRITE_CKD_MT:
627 		data->auxiliary.last_bytes_used = 0x1;
628 		data->length = reclen;
629 		data->operation.operation = 0x03;
630 		break;
631 	case DASD_ECKD_CCW_READ:
632 	case DASD_ECKD_CCW_READ_MT:
633 	case DASD_ECKD_CCW_READ_KD:
634 	case DASD_ECKD_CCW_READ_KD_MT:
635 		data->auxiliary.last_bytes_used = 0x1;
636 		data->length = reclen;
637 		data->operation.operation = 0x06;
638 		break;
639 	case DASD_ECKD_CCW_READ_CKD:
640 	case DASD_ECKD_CCW_READ_CKD_MT:
641 		data->auxiliary.last_bytes_used = 0x1;
642 		data->length = reclen;
643 		data->operation.operation = 0x16;
644 		break;
645 	case DASD_ECKD_CCW_READ_COUNT:
646 		data->operation.operation = 0x06;
647 		break;
648 	case DASD_ECKD_CCW_ERASE:
649 		data->length = reclen;
650 		data->auxiliary.last_bytes_used = 0x1;
651 		data->operation.operation = 0x0b;
652 		break;
653 	default:
654 		DBF_DEV_EVENT(DBF_ERR, device, "unknown locate record "
655 			      "opcode 0x%x", cmd);
656 	}
657 	set_ch_t(&data->seek_addr,
658 		 trk / private->rdc_data.trk_per_cyl,
659 		 trk % private->rdc_data.trk_per_cyl);
660 	data->search_arg.cyl = data->seek_addr.cyl;
661 	data->search_arg.head = data->seek_addr.head;
662 	data->search_arg.record = rec_on_trk;
663 }
664 
665 /*
666  * Returns 1 if the block is one of the special blocks that needs
667  * to get read/written with the KD variant of the command.
668  * That is DASD_ECKD_READ_KD_MT instead of DASD_ECKD_READ_MT and
669  * DASD_ECKD_WRITE_KD_MT instead of DASD_ECKD_WRITE_MT.
670  * Luckily the KD variants differ only by one bit (0x08) from the
671  * normal variant. So don't wonder about code like:
672  * if (dasd_eckd_cdl_special(blk_per_trk, recid))
673  *         ccw->cmd_code |= 0x8;
674  */
675 static inline int
676 dasd_eckd_cdl_special(int blk_per_trk, int recid)
677 {
678 	if (recid < 3)
679 		return 1;
680 	if (recid < blk_per_trk)
681 		return 0;
682 	if (recid < 2 * blk_per_trk)
683 		return 1;
684 	return 0;
685 }
686 
687 /*
688  * Returns the record size for the special blocks of the cdl format.
689  * Only returns something useful if dasd_eckd_cdl_special is true
690  * for the recid.
691  */
692 static inline int
693 dasd_eckd_cdl_reclen(int recid)
694 {
695 	if (recid < 3)
696 		return sizes_trk0[recid];
697 	return LABEL_SIZE;
698 }
699 
700 /*
701  * Generate device unique id that specifies the physical device.
702  */
703 static int dasd_eckd_generate_uid(struct dasd_device *device)
704 {
705 	struct dasd_eckd_private *private;
706 	struct dasd_uid *uid;
707 	int count;
708 	unsigned long flags;
709 
710 	private = (struct dasd_eckd_private *) device->private;
711 	if (!private)
712 		return -ENODEV;
713 	if (!private->ned || !private->gneq)
714 		return -ENODEV;
715 	uid = &private->uid;
716 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
717 	memset(uid, 0, sizeof(struct dasd_uid));
718 	memcpy(uid->vendor, private->ned->HDA_manufacturer,
719 	       sizeof(uid->vendor) - 1);
720 	EBCASC(uid->vendor, sizeof(uid->vendor) - 1);
721 	memcpy(uid->serial, private->ned->HDA_location,
722 	       sizeof(uid->serial) - 1);
723 	EBCASC(uid->serial, sizeof(uid->serial) - 1);
724 	uid->ssid = private->gneq->subsystemID;
725 	uid->real_unit_addr = private->ned->unit_addr;
726 	if (private->sneq) {
727 		uid->type = private->sneq->sua_flags;
728 		if (uid->type == UA_BASE_PAV_ALIAS)
729 			uid->base_unit_addr = private->sneq->base_unit_addr;
730 	} else {
731 		uid->type = UA_BASE_DEVICE;
732 	}
733 	if (private->vdsneq) {
734 		for (count = 0; count < 16; count++) {
735 			sprintf(uid->vduit+2*count, "%02x",
736 				private->vdsneq->uit[count]);
737 		}
738 	}
739 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
740 	return 0;
741 }
742 
743 static int dasd_eckd_get_uid(struct dasd_device *device, struct dasd_uid *uid)
744 {
745 	struct dasd_eckd_private *private;
746 	unsigned long flags;
747 
748 	if (device->private) {
749 		private = (struct dasd_eckd_private *)device->private;
750 		spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
751 		*uid = private->uid;
752 		spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
753 		return 0;
754 	}
755 	return -EINVAL;
756 }
757 
758 static struct dasd_ccw_req *dasd_eckd_build_rcd_lpm(struct dasd_device *device,
759 						    void *rcd_buffer,
760 						    struct ciw *ciw, __u8 lpm)
761 {
762 	struct dasd_ccw_req *cqr;
763 	struct ccw1 *ccw;
764 
765 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */, ciw->count,
766 				   device);
767 
768 	if (IS_ERR(cqr)) {
769 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
770 			      "Could not allocate RCD request");
771 		return cqr;
772 	}
773 
774 	ccw = cqr->cpaddr;
775 	ccw->cmd_code = ciw->cmd;
776 	ccw->cda = (__u32)(addr_t)rcd_buffer;
777 	ccw->count = ciw->count;
778 
779 	cqr->startdev = device;
780 	cqr->memdev = device;
781 	cqr->block = NULL;
782 	cqr->expires = 10*HZ;
783 	cqr->lpm = lpm;
784 	cqr->retries = 256;
785 	cqr->buildclk = get_clock();
786 	cqr->status = DASD_CQR_FILLED;
787 	return cqr;
788 }
789 
790 static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
791 				   void **rcd_buffer,
792 				   int *rcd_buffer_size, __u8 lpm)
793 {
794 	struct ciw *ciw;
795 	char *rcd_buf = NULL;
796 	int ret;
797 	struct dasd_ccw_req *cqr;
798 
799 	/*
800 	 * scan for RCD command in extended SenseID data
801 	 */
802 	ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
803 	if (!ciw || ciw->cmd == 0) {
804 		ret = -EOPNOTSUPP;
805 		goto out_error;
806 	}
807 	rcd_buf = kzalloc(ciw->count, GFP_KERNEL | GFP_DMA);
808 	if (!rcd_buf) {
809 		ret = -ENOMEM;
810 		goto out_error;
811 	}
812 
813 	/*
814 	 * buffer has to start with EBCDIC "V1.0" to show
815 	 * support for virtual device SNEQ
816 	 */
817 	rcd_buf[0] = 0xE5;
818 	rcd_buf[1] = 0xF1;
819 	rcd_buf[2] = 0x4B;
820 	rcd_buf[3] = 0xF0;
821 	cqr = dasd_eckd_build_rcd_lpm(device, rcd_buf, ciw, lpm);
822 	if (IS_ERR(cqr)) {
823 		ret =  PTR_ERR(cqr);
824 		goto out_error;
825 	}
826 	ret = dasd_sleep_on(cqr);
827 	/*
828 	 * on success we update the user input parms
829 	 */
830 	dasd_sfree_request(cqr, cqr->memdev);
831 	if (ret)
832 		goto out_error;
833 
834 	*rcd_buffer_size = ciw->count;
835 	*rcd_buffer = rcd_buf;
836 	return 0;
837 out_error:
838 	kfree(rcd_buf);
839 	*rcd_buffer = NULL;
840 	*rcd_buffer_size = 0;
841 	return ret;
842 }
843 
844 static int dasd_eckd_identify_conf_parts(struct dasd_eckd_private *private)
845 {
846 
847 	struct dasd_sneq *sneq;
848 	int i, count;
849 
850 	private->ned = NULL;
851 	private->sneq = NULL;
852 	private->vdsneq = NULL;
853 	private->gneq = NULL;
854 	count = private->conf_len / sizeof(struct dasd_sneq);
855 	sneq = (struct dasd_sneq *)private->conf_data;
856 	for (i = 0; i < count; ++i) {
857 		if (sneq->flags.identifier == 1 && sneq->format == 1)
858 			private->sneq = sneq;
859 		else if (sneq->flags.identifier == 1 && sneq->format == 4)
860 			private->vdsneq = (struct vd_sneq *)sneq;
861 		else if (sneq->flags.identifier == 2)
862 			private->gneq = (struct dasd_gneq *)sneq;
863 		else if (sneq->flags.identifier == 3 && sneq->res1 == 1)
864 			private->ned = (struct dasd_ned *)sneq;
865 		sneq++;
866 	}
867 	if (!private->ned || !private->gneq) {
868 		private->ned = NULL;
869 		private->sneq = NULL;
870 		private->vdsneq = NULL;
871 		private->gneq = NULL;
872 		return -EINVAL;
873 	}
874 	return 0;
875 
876 };
877 
878 static unsigned char dasd_eckd_path_access(void *conf_data, int conf_len)
879 {
880 	struct dasd_gneq *gneq;
881 	int i, count, found;
882 
883 	count = conf_len / sizeof(*gneq);
884 	gneq = (struct dasd_gneq *)conf_data;
885 	found = 0;
886 	for (i = 0; i < count; ++i) {
887 		if (gneq->flags.identifier == 2) {
888 			found = 1;
889 			break;
890 		}
891 		gneq++;
892 	}
893 	if (found)
894 		return ((char *)gneq)[18] & 0x07;
895 	else
896 		return 0;
897 }
898 
899 static int dasd_eckd_read_conf(struct dasd_device *device)
900 {
901 	void *conf_data;
902 	int conf_len, conf_data_saved;
903 	int rc;
904 	__u8 lpm;
905 	struct dasd_eckd_private *private;
906 	struct dasd_eckd_path *path_data;
907 
908 	private = (struct dasd_eckd_private *) device->private;
909 	path_data = (struct dasd_eckd_path *) &private->path_data;
910 	path_data->opm = ccw_device_get_path_mask(device->cdev);
911 	lpm = 0x80;
912 	conf_data_saved = 0;
913 	/* get configuration data per operational path */
914 	for (lpm = 0x80; lpm; lpm>>= 1) {
915 		if (lpm & path_data->opm){
916 			rc = dasd_eckd_read_conf_lpm(device, &conf_data,
917 						     &conf_len, lpm);
918 			if (rc && rc != -EOPNOTSUPP) {	/* -EOPNOTSUPP is ok */
919 				DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
920 					  "Read configuration data returned "
921 					  "error %d", rc);
922 				return rc;
923 			}
924 			if (conf_data == NULL) {
925 				DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
926 						"No configuration data "
927 						"retrieved");
928 				continue;	/* no error */
929 			}
930 			/* save first valid configuration data */
931 			if (!conf_data_saved) {
932 				kfree(private->conf_data);
933 				private->conf_data = conf_data;
934 				private->conf_len = conf_len;
935 				if (dasd_eckd_identify_conf_parts(private)) {
936 					private->conf_data = NULL;
937 					private->conf_len = 0;
938 					kfree(conf_data);
939 					continue;
940 				}
941 				conf_data_saved++;
942 			}
943 			switch (dasd_eckd_path_access(conf_data, conf_len)) {
944 			case 0x02:
945 				path_data->npm |= lpm;
946 				break;
947 			case 0x03:
948 				path_data->ppm |= lpm;
949 				break;
950 			}
951 			if (conf_data != private->conf_data)
952 				kfree(conf_data);
953 		}
954 	}
955 	return 0;
956 }
957 
958 static int dasd_eckd_read_features(struct dasd_device *device)
959 {
960 	struct dasd_psf_prssd_data *prssdp;
961 	struct dasd_rssd_features *features;
962 	struct dasd_ccw_req *cqr;
963 	struct ccw1 *ccw;
964 	int rc;
965 	struct dasd_eckd_private *private;
966 
967 	private = (struct dasd_eckd_private *) device->private;
968 	memset(&private->features, 0, sizeof(struct dasd_rssd_features));
969 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */	+ 1 /* RSSD */,
970 				   (sizeof(struct dasd_psf_prssd_data) +
971 				    sizeof(struct dasd_rssd_features)),
972 				   device);
973 	if (IS_ERR(cqr)) {
974 		DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not "
975 				"allocate initialization request");
976 		return PTR_ERR(cqr);
977 	}
978 	cqr->startdev = device;
979 	cqr->memdev = device;
980 	cqr->block = NULL;
981 	cqr->retries = 256;
982 	cqr->expires = 10 * HZ;
983 
984 	/* Prepare for Read Subsystem Data */
985 	prssdp = (struct dasd_psf_prssd_data *) cqr->data;
986 	memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
987 	prssdp->order = PSF_ORDER_PRSSD;
988 	prssdp->suborder = 0x41;	/* Read Feature Codes */
989 	/* all other bytes of prssdp must be zero */
990 
991 	ccw = cqr->cpaddr;
992 	ccw->cmd_code = DASD_ECKD_CCW_PSF;
993 	ccw->count = sizeof(struct dasd_psf_prssd_data);
994 	ccw->flags |= CCW_FLAG_CC;
995 	ccw->cda = (__u32)(addr_t) prssdp;
996 
997 	/* Read Subsystem Data - feature codes */
998 	features = (struct dasd_rssd_features *) (prssdp + 1);
999 	memset(features, 0, sizeof(struct dasd_rssd_features));
1000 
1001 	ccw++;
1002 	ccw->cmd_code = DASD_ECKD_CCW_RSSD;
1003 	ccw->count = sizeof(struct dasd_rssd_features);
1004 	ccw->cda = (__u32)(addr_t) features;
1005 
1006 	cqr->buildclk = get_clock();
1007 	cqr->status = DASD_CQR_FILLED;
1008 	rc = dasd_sleep_on(cqr);
1009 	if (rc == 0) {
1010 		prssdp = (struct dasd_psf_prssd_data *) cqr->data;
1011 		features = (struct dasd_rssd_features *) (prssdp + 1);
1012 		memcpy(&private->features, features,
1013 		       sizeof(struct dasd_rssd_features));
1014 	} else
1015 		dev_warn(&device->cdev->dev, "Reading device feature codes"
1016 			 " failed with rc=%d\n", rc);
1017 	dasd_sfree_request(cqr, cqr->memdev);
1018 	return rc;
1019 }
1020 
1021 
1022 /*
1023  * Build CP for Perform Subsystem Function - SSC.
1024  */
1025 static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
1026 						    int enable_pav)
1027 {
1028 	struct dasd_ccw_req *cqr;
1029 	struct dasd_psf_ssc_data *psf_ssc_data;
1030 	struct ccw1 *ccw;
1031 
1032 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
1033 				  sizeof(struct dasd_psf_ssc_data),
1034 				  device);
1035 
1036 	if (IS_ERR(cqr)) {
1037 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1038 			   "Could not allocate PSF-SSC request");
1039 		return cqr;
1040 	}
1041 	psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data;
1042 	psf_ssc_data->order = PSF_ORDER_SSC;
1043 	psf_ssc_data->suborder = 0xc0;
1044 	if (enable_pav) {
1045 		psf_ssc_data->suborder |= 0x08;
1046 		psf_ssc_data->reserved[0] = 0x88;
1047 	}
1048 	ccw = cqr->cpaddr;
1049 	ccw->cmd_code = DASD_ECKD_CCW_PSF;
1050 	ccw->cda = (__u32)(addr_t)psf_ssc_data;
1051 	ccw->count = 66;
1052 
1053 	cqr->startdev = device;
1054 	cqr->memdev = device;
1055 	cqr->block = NULL;
1056 	cqr->retries = 256;
1057 	cqr->expires = 10*HZ;
1058 	cqr->buildclk = get_clock();
1059 	cqr->status = DASD_CQR_FILLED;
1060 	return cqr;
1061 }
1062 
1063 /*
1064  * Perform Subsystem Function.
1065  * It is necessary to trigger CIO for channel revalidation since this
1066  * call might change behaviour of DASD devices.
1067  */
1068 static int
1069 dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav)
1070 {
1071 	struct dasd_ccw_req *cqr;
1072 	int rc;
1073 
1074 	cqr = dasd_eckd_build_psf_ssc(device, enable_pav);
1075 	if (IS_ERR(cqr))
1076 		return PTR_ERR(cqr);
1077 
1078 	rc = dasd_sleep_on(cqr);
1079 	if (!rc)
1080 		/* trigger CIO to reprobe devices */
1081 		css_schedule_reprobe();
1082 	dasd_sfree_request(cqr, cqr->memdev);
1083 	return rc;
1084 }
1085 
1086 /*
1087  * Valide storage server of current device.
1088  */
1089 static void dasd_eckd_validate_server(struct dasd_device *device)
1090 {
1091 	int rc;
1092 	struct dasd_eckd_private *private;
1093 	int enable_pav;
1094 
1095 	if (dasd_nopav || MACHINE_IS_VM)
1096 		enable_pav = 0;
1097 	else
1098 		enable_pav = 1;
1099 	rc = dasd_eckd_psf_ssc(device, enable_pav);
1100 
1101 	/* may be requested feature is not available on server,
1102 	 * therefore just report error and go ahead */
1103 	private = (struct dasd_eckd_private *) device->private;
1104 	DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "PSF-SSC for SSID %04x "
1105 			"returned rc=%d", private->uid.ssid, rc);
1106 }
1107 
1108 /*
1109  * Check device characteristics.
1110  * If the device is accessible using ECKD discipline, the device is enabled.
1111  */
1112 static int
1113 dasd_eckd_check_characteristics(struct dasd_device *device)
1114 {
1115 	struct dasd_eckd_private *private;
1116 	struct dasd_block *block;
1117 	struct dasd_uid temp_uid;
1118 	int is_known, rc, i;
1119 	int readonly;
1120 	unsigned long value;
1121 
1122 	if (!ccw_device_is_pathgroup(device->cdev)) {
1123 		dev_warn(&device->cdev->dev,
1124 			 "A channel path group could not be established\n");
1125 		return -EIO;
1126 	}
1127 	if (!ccw_device_is_multipath(device->cdev)) {
1128 		dev_info(&device->cdev->dev,
1129 			 "The DASD is not operating in multipath mode\n");
1130 	}
1131 	private = (struct dasd_eckd_private *) device->private;
1132 	if (!private) {
1133 		private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
1134 		if (!private) {
1135 			dev_warn(&device->cdev->dev,
1136 				 "Allocating memory for private DASD data "
1137 				 "failed\n");
1138 			return -ENOMEM;
1139 		}
1140 		device->private = (void *) private;
1141 	} else {
1142 		memset(private, 0, sizeof(*private));
1143 	}
1144 	/* Invalidate status of initial analysis. */
1145 	private->init_cqr_status = -1;
1146 	/* Set default cache operations. */
1147 	private->attrib.operation = DASD_NORMAL_CACHE;
1148 	private->attrib.nr_cyl = 0;
1149 
1150 	/* Read Configuration Data */
1151 	rc = dasd_eckd_read_conf(device);
1152 	if (rc)
1153 		goto out_err1;
1154 
1155 	/* set default timeout */
1156 	device->default_expires = DASD_EXPIRES;
1157 	if (private->gneq) {
1158 		value = 1;
1159 		for (i = 0; i < private->gneq->timeout.value; i++)
1160 			value = 10 * value;
1161 		value = value * private->gneq->timeout.number;
1162 		/* do not accept useless values */
1163 		if (value != 0 && value <= DASD_EXPIRES_MAX)
1164 			device->default_expires = value;
1165 	}
1166 
1167 	/* Generate device unique id */
1168 	rc = dasd_eckd_generate_uid(device);
1169 	if (rc)
1170 		goto out_err1;
1171 
1172 	dasd_eckd_get_uid(device, &temp_uid);
1173 	if (temp_uid.type == UA_BASE_DEVICE) {
1174 		block = dasd_alloc_block();
1175 		if (IS_ERR(block)) {
1176 			DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1177 					"could not allocate dasd "
1178 					"block structure");
1179 			rc = PTR_ERR(block);
1180 			goto out_err1;
1181 		}
1182 		device->block = block;
1183 		block->base = device;
1184 	}
1185 
1186 	/* register lcu with alias handling, enable PAV if this is a new lcu */
1187 	is_known = dasd_alias_make_device_known_to_lcu(device);
1188 	if (is_known < 0) {
1189 		rc = is_known;
1190 		goto out_err2;
1191 	}
1192 	/*
1193 	 * dasd_eckd_validate_server is done on the first device that
1194 	 * is found for an LCU. All later other devices have to wait
1195 	 * for it, so they will read the correct feature codes.
1196 	 */
1197 	if (!is_known) {
1198 		dasd_eckd_validate_server(device);
1199 		dasd_alias_lcu_setup_complete(device);
1200 	} else
1201 		dasd_alias_wait_for_lcu_setup(device);
1202 
1203 	/* device may report different configuration data after LCU setup */
1204 	rc = dasd_eckd_read_conf(device);
1205 	if (rc)
1206 		goto out_err3;
1207 
1208 	/* Read Feature Codes */
1209 	dasd_eckd_read_features(device);
1210 
1211 	/* Read Device Characteristics */
1212 	rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
1213 					 &private->rdc_data, 64);
1214 	if (rc) {
1215 		DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1216 				"Read device characteristic failed, rc=%d", rc);
1217 		goto out_err3;
1218 	}
1219 	/* find the valid cylinder size */
1220 	if (private->rdc_data.no_cyl == LV_COMPAT_CYL &&
1221 	    private->rdc_data.long_no_cyl)
1222 		private->real_cyl = private->rdc_data.long_no_cyl;
1223 	else
1224 		private->real_cyl = private->rdc_data.no_cyl;
1225 
1226 	readonly = dasd_device_is_ro(device);
1227 	if (readonly)
1228 		set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
1229 
1230 	dev_info(&device->cdev->dev, "New DASD %04X/%02X (CU %04X/%02X) "
1231 		 "with %d cylinders, %d heads, %d sectors%s\n",
1232 		 private->rdc_data.dev_type,
1233 		 private->rdc_data.dev_model,
1234 		 private->rdc_data.cu_type,
1235 		 private->rdc_data.cu_model.model,
1236 		 private->real_cyl,
1237 		 private->rdc_data.trk_per_cyl,
1238 		 private->rdc_data.sec_per_trk,
1239 		 readonly ? ", read-only device" : "");
1240 	return 0;
1241 
1242 out_err3:
1243 	dasd_alias_disconnect_device_from_lcu(device);
1244 out_err2:
1245 	dasd_free_block(device->block);
1246 	device->block = NULL;
1247 out_err1:
1248 	kfree(private->conf_data);
1249 	kfree(device->private);
1250 	device->private = NULL;
1251 	return rc;
1252 }
1253 
1254 static void dasd_eckd_uncheck_device(struct dasd_device *device)
1255 {
1256 	struct dasd_eckd_private *private;
1257 
1258 	private = (struct dasd_eckd_private *) device->private;
1259 	dasd_alias_disconnect_device_from_lcu(device);
1260 	private->ned = NULL;
1261 	private->sneq = NULL;
1262 	private->vdsneq = NULL;
1263 	private->gneq = NULL;
1264 	private->conf_len = 0;
1265 	kfree(private->conf_data);
1266 	private->conf_data = NULL;
1267 }
1268 
1269 static struct dasd_ccw_req *
1270 dasd_eckd_analysis_ccw(struct dasd_device *device)
1271 {
1272 	struct dasd_eckd_private *private;
1273 	struct eckd_count *count_data;
1274 	struct LO_eckd_data *LO_data;
1275 	struct dasd_ccw_req *cqr;
1276 	struct ccw1 *ccw;
1277 	int cplength, datasize;
1278 	int i;
1279 
1280 	private = (struct dasd_eckd_private *) device->private;
1281 
1282 	cplength = 8;
1283 	datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data);
1284 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device);
1285 	if (IS_ERR(cqr))
1286 		return cqr;
1287 	ccw = cqr->cpaddr;
1288 	/* Define extent for the first 3 tracks. */
1289 	define_extent(ccw++, cqr->data, 0, 2,
1290 		      DASD_ECKD_CCW_READ_COUNT, device);
1291 	LO_data = cqr->data + sizeof(struct DE_eckd_data);
1292 	/* Locate record for the first 4 records on track 0. */
1293 	ccw[-1].flags |= CCW_FLAG_CC;
1294 	locate_record(ccw++, LO_data++, 0, 0, 4,
1295 		      DASD_ECKD_CCW_READ_COUNT, device, 0);
1296 
1297 	count_data = private->count_area;
1298 	for (i = 0; i < 4; i++) {
1299 		ccw[-1].flags |= CCW_FLAG_CC;
1300 		ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
1301 		ccw->flags = 0;
1302 		ccw->count = 8;
1303 		ccw->cda = (__u32)(addr_t) count_data;
1304 		ccw++;
1305 		count_data++;
1306 	}
1307 
1308 	/* Locate record for the first record on track 2. */
1309 	ccw[-1].flags |= CCW_FLAG_CC;
1310 	locate_record(ccw++, LO_data++, 2, 0, 1,
1311 		      DASD_ECKD_CCW_READ_COUNT, device, 0);
1312 	/* Read count ccw. */
1313 	ccw[-1].flags |= CCW_FLAG_CC;
1314 	ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
1315 	ccw->flags = 0;
1316 	ccw->count = 8;
1317 	ccw->cda = (__u32)(addr_t) count_data;
1318 
1319 	cqr->block = NULL;
1320 	cqr->startdev = device;
1321 	cqr->memdev = device;
1322 	cqr->retries = 255;
1323 	cqr->buildclk = get_clock();
1324 	cqr->status = DASD_CQR_FILLED;
1325 	return cqr;
1326 }
1327 
1328 /* differentiate between 'no record found' and any other error */
1329 static int dasd_eckd_analysis_evaluation(struct dasd_ccw_req *init_cqr)
1330 {
1331 	char *sense;
1332 	if (init_cqr->status == DASD_CQR_DONE)
1333 		return INIT_CQR_OK;
1334 	else if (init_cqr->status == DASD_CQR_NEED_ERP ||
1335 		 init_cqr->status == DASD_CQR_FAILED) {
1336 		sense = dasd_get_sense(&init_cqr->irb);
1337 		if (sense && (sense[1] & SNS1_NO_REC_FOUND))
1338 			return INIT_CQR_UNFORMATTED;
1339 		else
1340 			return INIT_CQR_ERROR;
1341 	} else
1342 		return INIT_CQR_ERROR;
1343 }
1344 
1345 /*
1346  * This is the callback function for the init_analysis cqr. It saves
1347  * the status of the initial analysis ccw before it frees it and kicks
1348  * the device to continue the startup sequence. This will call
1349  * dasd_eckd_do_analysis again (if the devices has not been marked
1350  * for deletion in the meantime).
1351  */
1352 static void dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr,
1353 					void *data)
1354 {
1355 	struct dasd_eckd_private *private;
1356 	struct dasd_device *device;
1357 
1358 	device = init_cqr->startdev;
1359 	private = (struct dasd_eckd_private *) device->private;
1360 	private->init_cqr_status = dasd_eckd_analysis_evaluation(init_cqr);
1361 	dasd_sfree_request(init_cqr, device);
1362 	dasd_kick_device(device);
1363 }
1364 
1365 static int dasd_eckd_start_analysis(struct dasd_block *block)
1366 {
1367 	struct dasd_eckd_private *private;
1368 	struct dasd_ccw_req *init_cqr;
1369 
1370 	private = (struct dasd_eckd_private *) block->base->private;
1371 	init_cqr = dasd_eckd_analysis_ccw(block->base);
1372 	if (IS_ERR(init_cqr))
1373 		return PTR_ERR(init_cqr);
1374 	init_cqr->callback = dasd_eckd_analysis_callback;
1375 	init_cqr->callback_data = NULL;
1376 	init_cqr->expires = 5*HZ;
1377 	/* first try without ERP, so we can later handle unformatted
1378 	 * devices as special case
1379 	 */
1380 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &init_cqr->flags);
1381 	init_cqr->retries = 0;
1382 	dasd_add_request_head(init_cqr);
1383 	return -EAGAIN;
1384 }
1385 
1386 static int dasd_eckd_end_analysis(struct dasd_block *block)
1387 {
1388 	struct dasd_device *device;
1389 	struct dasd_eckd_private *private;
1390 	struct eckd_count *count_area;
1391 	unsigned int sb, blk_per_trk;
1392 	int status, i;
1393 	struct dasd_ccw_req *init_cqr;
1394 
1395 	device = block->base;
1396 	private = (struct dasd_eckd_private *) device->private;
1397 	status = private->init_cqr_status;
1398 	private->init_cqr_status = -1;
1399 	if (status == INIT_CQR_ERROR) {
1400 		/* try again, this time with full ERP */
1401 		init_cqr = dasd_eckd_analysis_ccw(device);
1402 		dasd_sleep_on(init_cqr);
1403 		status = dasd_eckd_analysis_evaluation(init_cqr);
1404 		dasd_sfree_request(init_cqr, device);
1405 	}
1406 
1407 	if (status == INIT_CQR_UNFORMATTED) {
1408 		dev_warn(&device->cdev->dev, "The DASD is not formatted\n");
1409 		return -EMEDIUMTYPE;
1410 	} else if (status == INIT_CQR_ERROR) {
1411 		dev_err(&device->cdev->dev,
1412 			"Detecting the DASD disk layout failed because "
1413 			"of an I/O error\n");
1414 		return -EIO;
1415 	}
1416 
1417 	private->uses_cdl = 1;
1418 	/* Check Track 0 for Compatible Disk Layout */
1419 	count_area = NULL;
1420 	for (i = 0; i < 3; i++) {
1421 		if (private->count_area[i].kl != 4 ||
1422 		    private->count_area[i].dl != dasd_eckd_cdl_reclen(i) - 4) {
1423 			private->uses_cdl = 0;
1424 			break;
1425 		}
1426 	}
1427 	if (i == 3)
1428 		count_area = &private->count_area[4];
1429 
1430 	if (private->uses_cdl == 0) {
1431 		for (i = 0; i < 5; i++) {
1432 			if ((private->count_area[i].kl != 0) ||
1433 			    (private->count_area[i].dl !=
1434 			     private->count_area[0].dl))
1435 				break;
1436 		}
1437 		if (i == 5)
1438 			count_area = &private->count_area[0];
1439 	} else {
1440 		if (private->count_area[3].record == 1)
1441 			dev_warn(&device->cdev->dev,
1442 				 "Track 0 has no records following the VTOC\n");
1443 	}
1444 	if (count_area != NULL && count_area->kl == 0) {
1445 		/* we found notthing violating our disk layout */
1446 		if (dasd_check_blocksize(count_area->dl) == 0)
1447 			block->bp_block = count_area->dl;
1448 	}
1449 	if (block->bp_block == 0) {
1450 		dev_warn(&device->cdev->dev,
1451 			 "The disk layout of the DASD is not supported\n");
1452 		return -EMEDIUMTYPE;
1453 	}
1454 	block->s2b_shift = 0;	/* bits to shift 512 to get a block */
1455 	for (sb = 512; sb < block->bp_block; sb = sb << 1)
1456 		block->s2b_shift++;
1457 
1458 	blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block);
1459 	block->blocks = (private->real_cyl *
1460 			  private->rdc_data.trk_per_cyl *
1461 			  blk_per_trk);
1462 
1463 	dev_info(&device->cdev->dev,
1464 		 "DASD with %d KB/block, %d KB total size, %d KB/track, "
1465 		 "%s\n", (block->bp_block >> 10),
1466 		 ((private->real_cyl *
1467 		   private->rdc_data.trk_per_cyl *
1468 		   blk_per_trk * (block->bp_block >> 9)) >> 1),
1469 		 ((blk_per_trk * block->bp_block) >> 10),
1470 		 private->uses_cdl ?
1471 		 "compatible disk layout" : "linux disk layout");
1472 
1473 	return 0;
1474 }
1475 
1476 static int dasd_eckd_do_analysis(struct dasd_block *block)
1477 {
1478 	struct dasd_eckd_private *private;
1479 
1480 	private = (struct dasd_eckd_private *) block->base->private;
1481 	if (private->init_cqr_status < 0)
1482 		return dasd_eckd_start_analysis(block);
1483 	else
1484 		return dasd_eckd_end_analysis(block);
1485 }
1486 
1487 static int dasd_eckd_ready_to_online(struct dasd_device *device)
1488 {
1489 	return dasd_alias_add_device(device);
1490 };
1491 
1492 static int dasd_eckd_online_to_ready(struct dasd_device *device)
1493 {
1494 	cancel_work_sync(&device->reload_device);
1495 	return dasd_alias_remove_device(device);
1496 };
1497 
1498 static int
1499 dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo)
1500 {
1501 	struct dasd_eckd_private *private;
1502 
1503 	private = (struct dasd_eckd_private *) block->base->private;
1504 	if (dasd_check_blocksize(block->bp_block) == 0) {
1505 		geo->sectors = recs_per_track(&private->rdc_data,
1506 					      0, block->bp_block);
1507 	}
1508 	geo->cylinders = private->rdc_data.no_cyl;
1509 	geo->heads = private->rdc_data.trk_per_cyl;
1510 	return 0;
1511 }
1512 
1513 static struct dasd_ccw_req *
1514 dasd_eckd_format_device(struct dasd_device * device,
1515 			struct format_data_t * fdata)
1516 {
1517 	struct dasd_eckd_private *private;
1518 	struct dasd_ccw_req *fcp;
1519 	struct eckd_count *ect;
1520 	struct ccw1 *ccw;
1521 	void *data;
1522 	int rpt;
1523 	struct ch_t address;
1524 	int cplength, datasize;
1525 	int i;
1526 	int intensity = 0;
1527 	int r0_perm;
1528 
1529 	private = (struct dasd_eckd_private *) device->private;
1530 	rpt = recs_per_track(&private->rdc_data, 0, fdata->blksize);
1531 	set_ch_t(&address,
1532 		 fdata->start_unit / private->rdc_data.trk_per_cyl,
1533 		 fdata->start_unit % private->rdc_data.trk_per_cyl);
1534 
1535 	/* Sanity checks. */
1536 	if (fdata->start_unit >=
1537 	    (private->real_cyl * private->rdc_data.trk_per_cyl)) {
1538 		dev_warn(&device->cdev->dev, "Start track number %d used in "
1539 			 "formatting is too big\n", fdata->start_unit);
1540 		return ERR_PTR(-EINVAL);
1541 	}
1542 	if (fdata->start_unit > fdata->stop_unit) {
1543 		dev_warn(&device->cdev->dev, "Start track %d used in "
1544 			 "formatting exceeds end track\n", fdata->start_unit);
1545 		return ERR_PTR(-EINVAL);
1546 	}
1547 	if (dasd_check_blocksize(fdata->blksize) != 0) {
1548 		dev_warn(&device->cdev->dev,
1549 			 "The DASD cannot be formatted with block size %d\n",
1550 			 fdata->blksize);
1551 		return ERR_PTR(-EINVAL);
1552 	}
1553 
1554 	/*
1555 	 * fdata->intensity is a bit string that tells us what to do:
1556 	 *   Bit 0: write record zero
1557 	 *   Bit 1: write home address, currently not supported
1558 	 *   Bit 2: invalidate tracks
1559 	 *   Bit 3: use OS/390 compatible disk layout (cdl)
1560 	 *   Bit 4: do not allow storage subsystem to modify record zero
1561 	 * Only some bit combinations do make sense.
1562 	 */
1563 	if (fdata->intensity & 0x10) {
1564 		r0_perm = 0;
1565 		intensity = fdata->intensity & ~0x10;
1566 	} else {
1567 		r0_perm = 1;
1568 		intensity = fdata->intensity;
1569 	}
1570 	switch (intensity) {
1571 	case 0x00:	/* Normal format */
1572 	case 0x08:	/* Normal format, use cdl. */
1573 		cplength = 2 + rpt;
1574 		datasize = sizeof(struct DE_eckd_data) +
1575 			sizeof(struct LO_eckd_data) +
1576 			rpt * sizeof(struct eckd_count);
1577 		break;
1578 	case 0x01:	/* Write record zero and format track. */
1579 	case 0x09:	/* Write record zero and format track, use cdl. */
1580 		cplength = 3 + rpt;
1581 		datasize = sizeof(struct DE_eckd_data) +
1582 			sizeof(struct LO_eckd_data) +
1583 			sizeof(struct eckd_count) +
1584 			rpt * sizeof(struct eckd_count);
1585 		break;
1586 	case 0x04:	/* Invalidate track. */
1587 	case 0x0c:	/* Invalidate track, use cdl. */
1588 		cplength = 3;
1589 		datasize = sizeof(struct DE_eckd_data) +
1590 			sizeof(struct LO_eckd_data) +
1591 			sizeof(struct eckd_count);
1592 		break;
1593 	default:
1594 		dev_warn(&device->cdev->dev, "An I/O control call used "
1595 			 "incorrect flags 0x%x\n", fdata->intensity);
1596 		return ERR_PTR(-EINVAL);
1597 	}
1598 	/* Allocate the format ccw request. */
1599 	fcp = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device);
1600 	if (IS_ERR(fcp))
1601 		return fcp;
1602 
1603 	data = fcp->data;
1604 	ccw = fcp->cpaddr;
1605 
1606 	switch (intensity & ~0x08) {
1607 	case 0x00: /* Normal format. */
1608 		define_extent(ccw++, (struct DE_eckd_data *) data,
1609 			      fdata->start_unit, fdata->start_unit,
1610 			      DASD_ECKD_CCW_WRITE_CKD, device);
1611 		/* grant subsystem permission to format R0 */
1612 		if (r0_perm)
1613 			((struct DE_eckd_data *)data)->ga_extended |= 0x04;
1614 		data += sizeof(struct DE_eckd_data);
1615 		ccw[-1].flags |= CCW_FLAG_CC;
1616 		locate_record(ccw++, (struct LO_eckd_data *) data,
1617 			      fdata->start_unit, 0, rpt,
1618 			      DASD_ECKD_CCW_WRITE_CKD, device,
1619 			      fdata->blksize);
1620 		data += sizeof(struct LO_eckd_data);
1621 		break;
1622 	case 0x01: /* Write record zero + format track. */
1623 		define_extent(ccw++, (struct DE_eckd_data *) data,
1624 			      fdata->start_unit, fdata->start_unit,
1625 			      DASD_ECKD_CCW_WRITE_RECORD_ZERO,
1626 			      device);
1627 		data += sizeof(struct DE_eckd_data);
1628 		ccw[-1].flags |= CCW_FLAG_CC;
1629 		locate_record(ccw++, (struct LO_eckd_data *) data,
1630 			      fdata->start_unit, 0, rpt + 1,
1631 			      DASD_ECKD_CCW_WRITE_RECORD_ZERO, device,
1632 			      device->block->bp_block);
1633 		data += sizeof(struct LO_eckd_data);
1634 		break;
1635 	case 0x04: /* Invalidate track. */
1636 		define_extent(ccw++, (struct DE_eckd_data *) data,
1637 			      fdata->start_unit, fdata->start_unit,
1638 			      DASD_ECKD_CCW_WRITE_CKD, device);
1639 		data += sizeof(struct DE_eckd_data);
1640 		ccw[-1].flags |= CCW_FLAG_CC;
1641 		locate_record(ccw++, (struct LO_eckd_data *) data,
1642 			      fdata->start_unit, 0, 1,
1643 			      DASD_ECKD_CCW_WRITE_CKD, device, 8);
1644 		data += sizeof(struct LO_eckd_data);
1645 		break;
1646 	}
1647 	if (intensity & 0x01) {	/* write record zero */
1648 		ect = (struct eckd_count *) data;
1649 		data += sizeof(struct eckd_count);
1650 		ect->cyl = address.cyl;
1651 		ect->head = address.head;
1652 		ect->record = 0;
1653 		ect->kl = 0;
1654 		ect->dl = 8;
1655 		ccw[-1].flags |= CCW_FLAG_CC;
1656 		ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO;
1657 		ccw->flags = CCW_FLAG_SLI;
1658 		ccw->count = 8;
1659 		ccw->cda = (__u32)(addr_t) ect;
1660 		ccw++;
1661 	}
1662 	if ((intensity & ~0x08) & 0x04) {	/* erase track */
1663 		ect = (struct eckd_count *) data;
1664 		data += sizeof(struct eckd_count);
1665 		ect->cyl = address.cyl;
1666 		ect->head = address.head;
1667 		ect->record = 1;
1668 		ect->kl = 0;
1669 		ect->dl = 0;
1670 		ccw[-1].flags |= CCW_FLAG_CC;
1671 		ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
1672 		ccw->flags = CCW_FLAG_SLI;
1673 		ccw->count = 8;
1674 		ccw->cda = (__u32)(addr_t) ect;
1675 	} else {		/* write remaining records */
1676 		for (i = 0; i < rpt; i++) {
1677 			ect = (struct eckd_count *) data;
1678 			data += sizeof(struct eckd_count);
1679 			ect->cyl = address.cyl;
1680 			ect->head = address.head;
1681 			ect->record = i + 1;
1682 			ect->kl = 0;
1683 			ect->dl = fdata->blksize;
1684 			/* Check for special tracks 0-1 when formatting CDL */
1685 			if ((intensity & 0x08) &&
1686 			    fdata->start_unit == 0) {
1687 				if (i < 3) {
1688 					ect->kl = 4;
1689 					ect->dl = sizes_trk0[i] - 4;
1690 				}
1691 			}
1692 			if ((intensity & 0x08) &&
1693 			    fdata->start_unit == 1) {
1694 				ect->kl = 44;
1695 				ect->dl = LABEL_SIZE - 44;
1696 			}
1697 			ccw[-1].flags |= CCW_FLAG_CC;
1698 			ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
1699 			ccw->flags = CCW_FLAG_SLI;
1700 			ccw->count = 8;
1701 			ccw->cda = (__u32)(addr_t) ect;
1702 			ccw++;
1703 		}
1704 	}
1705 	fcp->startdev = device;
1706 	fcp->memdev = device;
1707 	fcp->retries = 256;
1708 	fcp->buildclk = get_clock();
1709 	fcp->status = DASD_CQR_FILLED;
1710 	return fcp;
1711 }
1712 
1713 static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr)
1714 {
1715 	cqr->status = DASD_CQR_FILLED;
1716 	if (cqr->block && (cqr->startdev != cqr->block->base)) {
1717 		dasd_eckd_reset_ccw_to_base_io(cqr);
1718 		cqr->startdev = cqr->block->base;
1719 	}
1720 };
1721 
1722 static dasd_erp_fn_t
1723 dasd_eckd_erp_action(struct dasd_ccw_req * cqr)
1724 {
1725 	struct dasd_device *device = (struct dasd_device *) cqr->startdev;
1726 	struct ccw_device *cdev = device->cdev;
1727 
1728 	switch (cdev->id.cu_type) {
1729 	case 0x3990:
1730 	case 0x2105:
1731 	case 0x2107:
1732 	case 0x1750:
1733 		return dasd_3990_erp_action;
1734 	case 0x9343:
1735 	case 0x3880:
1736 	default:
1737 		return dasd_default_erp_action;
1738 	}
1739 }
1740 
1741 static dasd_erp_fn_t
1742 dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr)
1743 {
1744 	return dasd_default_erp_postaction;
1745 }
1746 
1747 
1748 static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device,
1749 						   struct irb *irb)
1750 {
1751 	char mask;
1752 	char *sense = NULL;
1753 	struct dasd_eckd_private *private;
1754 
1755 	private = (struct dasd_eckd_private *) device->private;
1756 	/* first of all check for state change pending interrupt */
1757 	mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
1758 	if ((scsw_dstat(&irb->scsw) & mask) == mask) {
1759 		/* for alias only and not in offline processing*/
1760 		if (!device->block && private->lcu &&
1761 		    !test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
1762 			/*
1763 			 * the state change could be caused by an alias
1764 			 * reassignment remove device from alias handling
1765 			 * to prevent new requests from being scheduled on
1766 			 * the wrong alias device
1767 			 */
1768 			dasd_alias_remove_device(device);
1769 
1770 			/* schedule worker to reload device */
1771 			dasd_reload_device(device);
1772 		}
1773 
1774 		dasd_generic_handle_state_change(device);
1775 		return;
1776 	}
1777 
1778 	/* summary unit check */
1779 	if ((scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) &&
1780 	    (irb->ecw[7] == 0x0D)) {
1781 		dasd_alias_handle_summary_unit_check(device, irb);
1782 		return;
1783 	}
1784 
1785 	sense = dasd_get_sense(irb);
1786 	/* service information message SIM */
1787 	if (sense && !(sense[27] & DASD_SENSE_BIT_0) &&
1788 	    ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) {
1789 		dasd_3990_erp_handle_sim(device, sense);
1790 		dasd_schedule_device_bh(device);
1791 		return;
1792 	}
1793 
1794 	if ((scsw_cc(&irb->scsw) == 1) &&
1795 	    (scsw_fctl(&irb->scsw) & SCSW_FCTL_START_FUNC) &&
1796 	    (scsw_actl(&irb->scsw) & SCSW_ACTL_START_PEND) &&
1797 	    (scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND)) {
1798 		/* fake irb do nothing, they are handled elsewhere */
1799 		dasd_schedule_device_bh(device);
1800 		return;
1801 	}
1802 
1803 	if (!sense) {
1804 		/* just report other unsolicited interrupts */
1805 		DBF_DEV_EVENT(DBF_ERR, device, "%s",
1806 			    "unsolicited interrupt received");
1807 	} else {
1808 		DBF_DEV_EVENT(DBF_ERR, device, "%s",
1809 			    "unsolicited interrupt received "
1810 			    "(sense available)");
1811 		device->discipline->dump_sense_dbf(device, irb, "unsolicited");
1812 	}
1813 
1814 	dasd_schedule_device_bh(device);
1815 	return;
1816 };
1817 
1818 
1819 static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
1820 					       struct dasd_device *startdev,
1821 					       struct dasd_block *block,
1822 					       struct request *req,
1823 					       sector_t first_rec,
1824 					       sector_t last_rec,
1825 					       sector_t first_trk,
1826 					       sector_t last_trk,
1827 					       unsigned int first_offs,
1828 					       unsigned int last_offs,
1829 					       unsigned int blk_per_trk,
1830 					       unsigned int blksize)
1831 {
1832 	struct dasd_eckd_private *private;
1833 	unsigned long *idaws;
1834 	struct LO_eckd_data *LO_data;
1835 	struct dasd_ccw_req *cqr;
1836 	struct ccw1 *ccw;
1837 	struct req_iterator iter;
1838 	struct bio_vec *bv;
1839 	char *dst;
1840 	unsigned int off;
1841 	int count, cidaw, cplength, datasize;
1842 	sector_t recid;
1843 	unsigned char cmd, rcmd;
1844 	int use_prefix;
1845 	struct dasd_device *basedev;
1846 
1847 	basedev = block->base;
1848 	private = (struct dasd_eckd_private *) basedev->private;
1849 	if (rq_data_dir(req) == READ)
1850 		cmd = DASD_ECKD_CCW_READ_MT;
1851 	else if (rq_data_dir(req) == WRITE)
1852 		cmd = DASD_ECKD_CCW_WRITE_MT;
1853 	else
1854 		return ERR_PTR(-EINVAL);
1855 
1856 	/* Check struct bio and count the number of blocks for the request. */
1857 	count = 0;
1858 	cidaw = 0;
1859 	rq_for_each_segment(bv, req, iter) {
1860 		if (bv->bv_len & (blksize - 1))
1861 			/* Eckd can only do full blocks. */
1862 			return ERR_PTR(-EINVAL);
1863 		count += bv->bv_len >> (block->s2b_shift + 9);
1864 #if defined(CONFIG_64BIT)
1865 		if (idal_is_needed (page_address(bv->bv_page), bv->bv_len))
1866 			cidaw += bv->bv_len >> (block->s2b_shift + 9);
1867 #endif
1868 	}
1869 	/* Paranoia. */
1870 	if (count != last_rec - first_rec + 1)
1871 		return ERR_PTR(-EINVAL);
1872 
1873 	/* use the prefix command if available */
1874 	use_prefix = private->features.feature[8] & 0x01;
1875 	if (use_prefix) {
1876 		/* 1x prefix + number of blocks */
1877 		cplength = 2 + count;
1878 		/* 1x prefix + cidaws*sizeof(long) */
1879 		datasize = sizeof(struct PFX_eckd_data) +
1880 			sizeof(struct LO_eckd_data) +
1881 			cidaw * sizeof(unsigned long);
1882 	} else {
1883 		/* 1x define extent + 1x locate record + number of blocks */
1884 		cplength = 2 + count;
1885 		/* 1x define extent + 1x locate record + cidaws*sizeof(long) */
1886 		datasize = sizeof(struct DE_eckd_data) +
1887 			sizeof(struct LO_eckd_data) +
1888 			cidaw * sizeof(unsigned long);
1889 	}
1890 	/* Find out the number of additional locate record ccws for cdl. */
1891 	if (private->uses_cdl && first_rec < 2*blk_per_trk) {
1892 		if (last_rec >= 2*blk_per_trk)
1893 			count = 2*blk_per_trk - first_rec;
1894 		cplength += count;
1895 		datasize += count*sizeof(struct LO_eckd_data);
1896 	}
1897 	/* Allocate the ccw request. */
1898 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
1899 				   startdev);
1900 	if (IS_ERR(cqr))
1901 		return cqr;
1902 	ccw = cqr->cpaddr;
1903 	/* First ccw is define extent or prefix. */
1904 	if (use_prefix) {
1905 		if (prefix(ccw++, cqr->data, first_trk,
1906 			   last_trk, cmd, basedev, startdev) == -EAGAIN) {
1907 			/* Clock not in sync and XRC is enabled.
1908 			 * Try again later.
1909 			 */
1910 			dasd_sfree_request(cqr, startdev);
1911 			return ERR_PTR(-EAGAIN);
1912 		}
1913 		idaws = (unsigned long *) (cqr->data +
1914 					   sizeof(struct PFX_eckd_data));
1915 	} else {
1916 		if (define_extent(ccw++, cqr->data, first_trk,
1917 				  last_trk, cmd, startdev) == -EAGAIN) {
1918 			/* Clock not in sync and XRC is enabled.
1919 			 * Try again later.
1920 			 */
1921 			dasd_sfree_request(cqr, startdev);
1922 			return ERR_PTR(-EAGAIN);
1923 		}
1924 		idaws = (unsigned long *) (cqr->data +
1925 					   sizeof(struct DE_eckd_data));
1926 	}
1927 	/* Build locate_record+read/write/ccws. */
1928 	LO_data = (struct LO_eckd_data *) (idaws + cidaw);
1929 	recid = first_rec;
1930 	if (private->uses_cdl == 0 || recid > 2*blk_per_trk) {
1931 		/* Only standard blocks so there is just one locate record. */
1932 		ccw[-1].flags |= CCW_FLAG_CC;
1933 		locate_record(ccw++, LO_data++, first_trk, first_offs + 1,
1934 			      last_rec - recid + 1, cmd, basedev, blksize);
1935 	}
1936 	rq_for_each_segment(bv, req, iter) {
1937 		dst = page_address(bv->bv_page) + bv->bv_offset;
1938 		if (dasd_page_cache) {
1939 			char *copy = kmem_cache_alloc(dasd_page_cache,
1940 						      GFP_DMA | __GFP_NOWARN);
1941 			if (copy && rq_data_dir(req) == WRITE)
1942 				memcpy(copy + bv->bv_offset, dst, bv->bv_len);
1943 			if (copy)
1944 				dst = copy + bv->bv_offset;
1945 		}
1946 		for (off = 0; off < bv->bv_len; off += blksize) {
1947 			sector_t trkid = recid;
1948 			unsigned int recoffs = sector_div(trkid, blk_per_trk);
1949 			rcmd = cmd;
1950 			count = blksize;
1951 			/* Locate record for cdl special block ? */
1952 			if (private->uses_cdl && recid < 2*blk_per_trk) {
1953 				if (dasd_eckd_cdl_special(blk_per_trk, recid)){
1954 					rcmd |= 0x8;
1955 					count = dasd_eckd_cdl_reclen(recid);
1956 					if (count < blksize &&
1957 					    rq_data_dir(req) == READ)
1958 						memset(dst + count, 0xe5,
1959 						       blksize - count);
1960 				}
1961 				ccw[-1].flags |= CCW_FLAG_CC;
1962 				locate_record(ccw++, LO_data++,
1963 					      trkid, recoffs + 1,
1964 					      1, rcmd, basedev, count);
1965 			}
1966 			/* Locate record for standard blocks ? */
1967 			if (private->uses_cdl && recid == 2*blk_per_trk) {
1968 				ccw[-1].flags |= CCW_FLAG_CC;
1969 				locate_record(ccw++, LO_data++,
1970 					      trkid, recoffs + 1,
1971 					      last_rec - recid + 1,
1972 					      cmd, basedev, count);
1973 			}
1974 			/* Read/write ccw. */
1975 			ccw[-1].flags |= CCW_FLAG_CC;
1976 			ccw->cmd_code = rcmd;
1977 			ccw->count = count;
1978 			if (idal_is_needed(dst, blksize)) {
1979 				ccw->cda = (__u32)(addr_t) idaws;
1980 				ccw->flags = CCW_FLAG_IDA;
1981 				idaws = idal_create_words(idaws, dst, blksize);
1982 			} else {
1983 				ccw->cda = (__u32)(addr_t) dst;
1984 				ccw->flags = 0;
1985 			}
1986 			ccw++;
1987 			dst += blksize;
1988 			recid++;
1989 		}
1990 	}
1991 	if (blk_noretry_request(req) ||
1992 	    block->base->features & DASD_FEATURE_FAILFAST)
1993 		set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
1994 	cqr->startdev = startdev;
1995 	cqr->memdev = startdev;
1996 	cqr->block = block;
1997 	cqr->expires = startdev->default_expires * HZ;	/* default 5 minutes */
1998 	cqr->lpm = private->path_data.ppm;
1999 	cqr->retries = 256;
2000 	cqr->buildclk = get_clock();
2001 	cqr->status = DASD_CQR_FILLED;
2002 	return cqr;
2003 }
2004 
2005 static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
2006 					       struct dasd_device *startdev,
2007 					       struct dasd_block *block,
2008 					       struct request *req,
2009 					       sector_t first_rec,
2010 					       sector_t last_rec,
2011 					       sector_t first_trk,
2012 					       sector_t last_trk,
2013 					       unsigned int first_offs,
2014 					       unsigned int last_offs,
2015 					       unsigned int blk_per_trk,
2016 					       unsigned int blksize)
2017 {
2018 	struct dasd_eckd_private *private;
2019 	unsigned long *idaws;
2020 	struct dasd_ccw_req *cqr;
2021 	struct ccw1 *ccw;
2022 	struct req_iterator iter;
2023 	struct bio_vec *bv;
2024 	char *dst, *idaw_dst;
2025 	unsigned int cidaw, cplength, datasize;
2026 	unsigned int tlf;
2027 	sector_t recid;
2028 	unsigned char cmd;
2029 	struct dasd_device *basedev;
2030 	unsigned int trkcount, count, count_to_trk_end;
2031 	unsigned int idaw_len, seg_len, part_len, len_to_track_end;
2032 	unsigned char new_track, end_idaw;
2033 	sector_t trkid;
2034 	unsigned int recoffs;
2035 
2036 	basedev = block->base;
2037 	private = (struct dasd_eckd_private *) basedev->private;
2038 	if (rq_data_dir(req) == READ)
2039 		cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
2040 	else if (rq_data_dir(req) == WRITE)
2041 		cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
2042 	else
2043 		return ERR_PTR(-EINVAL);
2044 
2045 	/* Track based I/O needs IDAWs for each page, and not just for
2046 	 * 64 bit addresses. We need additional idals for pages
2047 	 * that get filled from two tracks, so we use the number
2048 	 * of records as upper limit.
2049 	 */
2050 	cidaw = last_rec - first_rec + 1;
2051 	trkcount = last_trk - first_trk + 1;
2052 
2053 	/* 1x prefix + one read/write ccw per track */
2054 	cplength = 1 + trkcount;
2055 
2056 	/* on 31-bit we need space for two 32 bit addresses per page
2057 	 * on 64-bit one 64 bit address
2058 	 */
2059 	datasize = sizeof(struct PFX_eckd_data) +
2060 		cidaw * sizeof(unsigned long long);
2061 
2062 	/* Allocate the ccw request. */
2063 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
2064 				   startdev);
2065 	if (IS_ERR(cqr))
2066 		return cqr;
2067 	ccw = cqr->cpaddr;
2068 	/* transfer length factor: how many bytes to read from the last track */
2069 	if (first_trk == last_trk)
2070 		tlf = last_offs - first_offs + 1;
2071 	else
2072 		tlf = last_offs + 1;
2073 	tlf *= blksize;
2074 
2075 	if (prefix_LRE(ccw++, cqr->data, first_trk,
2076 		       last_trk, cmd, basedev, startdev,
2077 		       1 /* format */, first_offs + 1,
2078 		       trkcount, blksize,
2079 		       tlf) == -EAGAIN) {
2080 		/* Clock not in sync and XRC is enabled.
2081 		 * Try again later.
2082 		 */
2083 		dasd_sfree_request(cqr, startdev);
2084 		return ERR_PTR(-EAGAIN);
2085 	}
2086 
2087 	/*
2088 	 * The translation of request into ccw programs must meet the
2089 	 * following conditions:
2090 	 * - all idaws but the first and the last must address full pages
2091 	 *   (or 2K blocks on 31-bit)
2092 	 * - the scope of a ccw and it's idal ends with the track boundaries
2093 	 */
2094 	idaws = (unsigned long *) (cqr->data + sizeof(struct PFX_eckd_data));
2095 	recid = first_rec;
2096 	new_track = 1;
2097 	end_idaw = 0;
2098 	len_to_track_end = 0;
2099 	idaw_dst = 0;
2100 	idaw_len = 0;
2101 	rq_for_each_segment(bv, req, iter) {
2102 		dst = page_address(bv->bv_page) + bv->bv_offset;
2103 		seg_len = bv->bv_len;
2104 		while (seg_len) {
2105 			if (new_track) {
2106 				trkid = recid;
2107 				recoffs = sector_div(trkid, blk_per_trk);
2108 				count_to_trk_end = blk_per_trk - recoffs;
2109 				count = min((last_rec - recid + 1),
2110 					    (sector_t)count_to_trk_end);
2111 				len_to_track_end = count * blksize;
2112 				ccw[-1].flags |= CCW_FLAG_CC;
2113 				ccw->cmd_code = cmd;
2114 				ccw->count = len_to_track_end;
2115 				ccw->cda = (__u32)(addr_t)idaws;
2116 				ccw->flags = CCW_FLAG_IDA;
2117 				ccw++;
2118 				recid += count;
2119 				new_track = 0;
2120 				/* first idaw for a ccw may start anywhere */
2121 				if (!idaw_dst)
2122 					idaw_dst = dst;
2123 			}
2124 			/* If we start a new idaw, we must make sure that it
2125 			 * starts on an IDA_BLOCK_SIZE boundary.
2126 			 * If we continue an idaw, we must make sure that the
2127 			 * current segment begins where the so far accumulated
2128 			 * idaw ends
2129 			 */
2130 			if (!idaw_dst) {
2131 				if (__pa(dst) & (IDA_BLOCK_SIZE-1)) {
2132 					dasd_sfree_request(cqr, startdev);
2133 					return ERR_PTR(-ERANGE);
2134 				} else
2135 					idaw_dst = dst;
2136 			}
2137 			if ((idaw_dst + idaw_len) != dst) {
2138 				dasd_sfree_request(cqr, startdev);
2139 				return ERR_PTR(-ERANGE);
2140 			}
2141 			part_len = min(seg_len, len_to_track_end);
2142 			seg_len -= part_len;
2143 			dst += part_len;
2144 			idaw_len += part_len;
2145 			len_to_track_end -= part_len;
2146 			/* collected memory area ends on an IDA_BLOCK border,
2147 			 * -> create an idaw
2148 			 * idal_create_words will handle cases where idaw_len
2149 			 * is larger then IDA_BLOCK_SIZE
2150 			 */
2151 			if (!(__pa(idaw_dst + idaw_len) & (IDA_BLOCK_SIZE-1)))
2152 				end_idaw = 1;
2153 			/* We also need to end the idaw at track end */
2154 			if (!len_to_track_end) {
2155 				new_track = 1;
2156 				end_idaw = 1;
2157 			}
2158 			if (end_idaw) {
2159 				idaws = idal_create_words(idaws, idaw_dst,
2160 							  idaw_len);
2161 				idaw_dst = 0;
2162 				idaw_len = 0;
2163 				end_idaw = 0;
2164 			}
2165 		}
2166 	}
2167 
2168 	if (blk_noretry_request(req) ||
2169 	    block->base->features & DASD_FEATURE_FAILFAST)
2170 		set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
2171 	cqr->startdev = startdev;
2172 	cqr->memdev = startdev;
2173 	cqr->block = block;
2174 	cqr->expires = startdev->default_expires * HZ;	/* default 5 minutes */
2175 	cqr->lpm = private->path_data.ppm;
2176 	cqr->retries = 256;
2177 	cqr->buildclk = get_clock();
2178 	cqr->status = DASD_CQR_FILLED;
2179 	return cqr;
2180 }
2181 
2182 static int prepare_itcw(struct itcw *itcw,
2183 			unsigned int trk, unsigned int totrk, int cmd,
2184 			struct dasd_device *basedev,
2185 			struct dasd_device *startdev,
2186 			unsigned int rec_on_trk, int count,
2187 			unsigned int blksize,
2188 			unsigned int total_data_size,
2189 			unsigned int tlf,
2190 			unsigned int blk_per_trk)
2191 {
2192 	struct PFX_eckd_data pfxdata;
2193 	struct dasd_eckd_private *basepriv, *startpriv;
2194 	struct DE_eckd_data *dedata;
2195 	struct LRE_eckd_data *lredata;
2196 	struct dcw *dcw;
2197 
2198 	u32 begcyl, endcyl;
2199 	u16 heads, beghead, endhead;
2200 	u8 pfx_cmd;
2201 
2202 	int rc = 0;
2203 	int sector = 0;
2204 	int dn, d;
2205 
2206 
2207 	/* setup prefix data */
2208 	basepriv = (struct dasd_eckd_private *) basedev->private;
2209 	startpriv = (struct dasd_eckd_private *) startdev->private;
2210 	dedata = &pfxdata.define_extent;
2211 	lredata = &pfxdata.locate_record;
2212 
2213 	memset(&pfxdata, 0, sizeof(pfxdata));
2214 	pfxdata.format = 1; /* PFX with LRE */
2215 	pfxdata.base_address = basepriv->ned->unit_addr;
2216 	pfxdata.base_lss = basepriv->ned->ID;
2217 	pfxdata.validity.define_extent = 1;
2218 
2219 	/* private uid is kept up to date, conf_data may be outdated */
2220 	if (startpriv->uid.type != UA_BASE_DEVICE) {
2221 		pfxdata.validity.verify_base = 1;
2222 		if (startpriv->uid.type == UA_HYPER_PAV_ALIAS)
2223 			pfxdata.validity.hyper_pav = 1;
2224 	}
2225 
2226 	switch (cmd) {
2227 	case DASD_ECKD_CCW_READ_TRACK_DATA:
2228 		dedata->mask.perm = 0x1;
2229 		dedata->attributes.operation = basepriv->attrib.operation;
2230 		dedata->blk_size = blksize;
2231 		dedata->ga_extended |= 0x42;
2232 		lredata->operation.orientation = 0x0;
2233 		lredata->operation.operation = 0x0C;
2234 		lredata->auxiliary.check_bytes = 0x01;
2235 		pfx_cmd = DASD_ECKD_CCW_PFX_READ;
2236 		break;
2237 	case DASD_ECKD_CCW_WRITE_TRACK_DATA:
2238 		dedata->mask.perm = 0x02;
2239 		dedata->attributes.operation = basepriv->attrib.operation;
2240 		dedata->blk_size = blksize;
2241 		rc = check_XRC_on_prefix(&pfxdata, basedev);
2242 		dedata->ga_extended |= 0x42;
2243 		lredata->operation.orientation = 0x0;
2244 		lredata->operation.operation = 0x3F;
2245 		lredata->extended_operation = 0x23;
2246 		lredata->auxiliary.check_bytes = 0x2;
2247 		pfx_cmd = DASD_ECKD_CCW_PFX;
2248 		break;
2249 	default:
2250 		DBF_DEV_EVENT(DBF_ERR, basedev,
2251 			      "prepare itcw, unknown opcode 0x%x", cmd);
2252 		BUG();
2253 		break;
2254 	}
2255 	if (rc)
2256 		return rc;
2257 
2258 	dedata->attributes.mode = 0x3;	/* ECKD */
2259 
2260 	heads = basepriv->rdc_data.trk_per_cyl;
2261 	begcyl = trk / heads;
2262 	beghead = trk % heads;
2263 	endcyl = totrk / heads;
2264 	endhead = totrk % heads;
2265 
2266 	/* check for sequential prestage - enhance cylinder range */
2267 	if (dedata->attributes.operation == DASD_SEQ_PRESTAGE ||
2268 	    dedata->attributes.operation == DASD_SEQ_ACCESS) {
2269 
2270 		if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl)
2271 			endcyl += basepriv->attrib.nr_cyl;
2272 		else
2273 			endcyl = (basepriv->real_cyl - 1);
2274 	}
2275 
2276 	set_ch_t(&dedata->beg_ext, begcyl, beghead);
2277 	set_ch_t(&dedata->end_ext, endcyl, endhead);
2278 
2279 	dedata->ep_format = 0x20; /* records per track is valid */
2280 	dedata->ep_rec_per_track = blk_per_trk;
2281 
2282 	if (rec_on_trk) {
2283 		switch (basepriv->rdc_data.dev_type) {
2284 		case 0x3390:
2285 			dn = ceil_quot(blksize + 6, 232);
2286 			d = 9 + ceil_quot(blksize + 6 * (dn + 1), 34);
2287 			sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
2288 			break;
2289 		case 0x3380:
2290 			d = 7 + ceil_quot(blksize + 12, 32);
2291 			sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
2292 			break;
2293 		}
2294 	}
2295 
2296 	lredata->auxiliary.length_valid = 1;
2297 	lredata->auxiliary.length_scope = 1;
2298 	lredata->auxiliary.imbedded_ccw_valid = 1;
2299 	lredata->length = tlf;
2300 	lredata->imbedded_ccw = cmd;
2301 	lredata->count = count;
2302 	lredata->sector = sector;
2303 	set_ch_t(&lredata->seek_addr, begcyl, beghead);
2304 	lredata->search_arg.cyl = lredata->seek_addr.cyl;
2305 	lredata->search_arg.head = lredata->seek_addr.head;
2306 	lredata->search_arg.record = rec_on_trk;
2307 
2308 	dcw = itcw_add_dcw(itcw, pfx_cmd, 0,
2309 		     &pfxdata, sizeof(pfxdata), total_data_size);
2310 
2311 	return rc;
2312 }
2313 
2314 static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
2315 					       struct dasd_device *startdev,
2316 					       struct dasd_block *block,
2317 					       struct request *req,
2318 					       sector_t first_rec,
2319 					       sector_t last_rec,
2320 					       sector_t first_trk,
2321 					       sector_t last_trk,
2322 					       unsigned int first_offs,
2323 					       unsigned int last_offs,
2324 					       unsigned int blk_per_trk,
2325 					       unsigned int blksize)
2326 {
2327 	struct dasd_eckd_private *private;
2328 	struct dasd_ccw_req *cqr;
2329 	struct req_iterator iter;
2330 	struct bio_vec *bv;
2331 	char *dst;
2332 	unsigned int trkcount, ctidaw;
2333 	unsigned char cmd;
2334 	struct dasd_device *basedev;
2335 	unsigned int tlf;
2336 	struct itcw *itcw;
2337 	struct tidaw *last_tidaw = NULL;
2338 	int itcw_op;
2339 	size_t itcw_size;
2340 
2341 	basedev = block->base;
2342 	private = (struct dasd_eckd_private *) basedev->private;
2343 	if (rq_data_dir(req) == READ) {
2344 		cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
2345 		itcw_op = ITCW_OP_READ;
2346 	} else if (rq_data_dir(req) == WRITE) {
2347 		cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
2348 		itcw_op = ITCW_OP_WRITE;
2349 	} else
2350 		return ERR_PTR(-EINVAL);
2351 
2352 	/* trackbased I/O needs address all memory via TIDAWs,
2353 	 * not just for 64 bit addresses. This allows us to map
2354 	 * each segment directly to one tidaw.
2355 	 */
2356 	trkcount = last_trk - first_trk + 1;
2357 	ctidaw = 0;
2358 	rq_for_each_segment(bv, req, iter) {
2359 		++ctidaw;
2360 	}
2361 
2362 	/* Allocate the ccw request. */
2363 	itcw_size = itcw_calc_size(0, ctidaw, 0);
2364 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev);
2365 	if (IS_ERR(cqr))
2366 		return cqr;
2367 
2368 	cqr->cpmode = 1;
2369 	cqr->startdev = startdev;
2370 	cqr->memdev = startdev;
2371 	cqr->block = block;
2372 	cqr->expires = 100*HZ;
2373 	cqr->buildclk = get_clock();
2374 	cqr->status = DASD_CQR_FILLED;
2375 	cqr->retries = 10;
2376 
2377 	/* transfer length factor: how many bytes to read from the last track */
2378 	if (first_trk == last_trk)
2379 		tlf = last_offs - first_offs + 1;
2380 	else
2381 		tlf = last_offs + 1;
2382 	tlf *= blksize;
2383 
2384 	itcw = itcw_init(cqr->data, itcw_size, itcw_op, 0, ctidaw, 0);
2385 	cqr->cpaddr = itcw_get_tcw(itcw);
2386 
2387 	if (prepare_itcw(itcw, first_trk, last_trk,
2388 			 cmd, basedev, startdev,
2389 			 first_offs + 1,
2390 			 trkcount, blksize,
2391 			 (last_rec - first_rec + 1) * blksize,
2392 			 tlf, blk_per_trk) == -EAGAIN) {
2393 		/* Clock not in sync and XRC is enabled.
2394 		 * Try again later.
2395 		 */
2396 		dasd_sfree_request(cqr, startdev);
2397 		return ERR_PTR(-EAGAIN);
2398 	}
2399 
2400 	/*
2401 	 * A tidaw can address 4k of memory, but must not cross page boundaries
2402 	 * We can let the block layer handle this by setting
2403 	 * blk_queue_segment_boundary to page boundaries and
2404 	 * blk_max_segment_size to page size when setting up the request queue.
2405 	 */
2406 	rq_for_each_segment(bv, req, iter) {
2407 		dst = page_address(bv->bv_page) + bv->bv_offset;
2408 		last_tidaw = itcw_add_tidaw(itcw, 0x00, dst, bv->bv_len);
2409 		if (IS_ERR(last_tidaw))
2410 			return (struct dasd_ccw_req *)last_tidaw;
2411 	}
2412 
2413 	last_tidaw->flags |= 0x80;
2414 	itcw_finalize(itcw);
2415 
2416 	if (blk_noretry_request(req) ||
2417 	    block->base->features & DASD_FEATURE_FAILFAST)
2418 		set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
2419 	cqr->startdev = startdev;
2420 	cqr->memdev = startdev;
2421 	cqr->block = block;
2422 	cqr->expires = startdev->default_expires * HZ;	/* default 5 minutes */
2423 	cqr->lpm = private->path_data.ppm;
2424 	cqr->retries = 256;
2425 	cqr->buildclk = get_clock();
2426 	cqr->status = DASD_CQR_FILLED;
2427 	return cqr;
2428 }
2429 
2430 static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
2431 					       struct dasd_block *block,
2432 					       struct request *req)
2433 {
2434 	int tpm, cmdrtd, cmdwtd;
2435 	int use_prefix;
2436 #if defined(CONFIG_64BIT)
2437 	int fcx_in_css, fcx_in_gneq, fcx_in_features;
2438 #endif
2439 	struct dasd_eckd_private *private;
2440 	struct dasd_device *basedev;
2441 	sector_t first_rec, last_rec;
2442 	sector_t first_trk, last_trk;
2443 	unsigned int first_offs, last_offs;
2444 	unsigned int blk_per_trk, blksize;
2445 	int cdlspecial;
2446 	struct dasd_ccw_req *cqr;
2447 
2448 	basedev = block->base;
2449 	private = (struct dasd_eckd_private *) basedev->private;
2450 
2451 	/* Calculate number of blocks/records per track. */
2452 	blksize = block->bp_block;
2453 	blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
2454 	if (blk_per_trk == 0)
2455 		return ERR_PTR(-EINVAL);
2456 	/* Calculate record id of first and last block. */
2457 	first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift;
2458 	first_offs = sector_div(first_trk, blk_per_trk);
2459 	last_rec = last_trk =
2460 		(blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
2461 	last_offs = sector_div(last_trk, blk_per_trk);
2462 	cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk);
2463 
2464 	/* is transport mode supported? */
2465 #if defined(CONFIG_64BIT)
2466 	fcx_in_css = css_general_characteristics.fcx;
2467 	fcx_in_gneq = private->gneq->reserved2[7] & 0x04;
2468 	fcx_in_features = private->features.feature[40] & 0x80;
2469 	tpm = fcx_in_css && fcx_in_gneq && fcx_in_features;
2470 #else
2471 	tpm = 0;
2472 #endif
2473 
2474 	/* is read track data and write track data in command mode supported? */
2475 	cmdrtd = private->features.feature[9] & 0x20;
2476 	cmdwtd = private->features.feature[12] & 0x40;
2477 	use_prefix = private->features.feature[8] & 0x01;
2478 
2479 	cqr = NULL;
2480 	if (cdlspecial || dasd_page_cache) {
2481 		/* do nothing, just fall through to the cmd mode single case */
2482 	} else if (!dasd_nofcx && tpm && (first_trk == last_trk)) {
2483 		cqr = dasd_eckd_build_cp_tpm_track(startdev, block, req,
2484 						    first_rec, last_rec,
2485 						    first_trk, last_trk,
2486 						    first_offs, last_offs,
2487 						    blk_per_trk, blksize);
2488 		if (IS_ERR(cqr) && PTR_ERR(cqr) != -EAGAIN)
2489 			cqr = NULL;
2490 	} else if (use_prefix &&
2491 		   (((rq_data_dir(req) == READ) && cmdrtd) ||
2492 		    ((rq_data_dir(req) == WRITE) && cmdwtd))) {
2493 		cqr = dasd_eckd_build_cp_cmd_track(startdev, block, req,
2494 						   first_rec, last_rec,
2495 						   first_trk, last_trk,
2496 						   first_offs, last_offs,
2497 						   blk_per_trk, blksize);
2498 		if (IS_ERR(cqr) && PTR_ERR(cqr) != -EAGAIN)
2499 			cqr = NULL;
2500 	}
2501 	if (!cqr)
2502 		cqr = dasd_eckd_build_cp_cmd_single(startdev, block, req,
2503 						    first_rec, last_rec,
2504 						    first_trk, last_trk,
2505 						    first_offs, last_offs,
2506 						    blk_per_trk, blksize);
2507 	return cqr;
2508 }
2509 
2510 static int
2511 dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
2512 {
2513 	struct dasd_eckd_private *private;
2514 	struct ccw1 *ccw;
2515 	struct req_iterator iter;
2516 	struct bio_vec *bv;
2517 	char *dst, *cda;
2518 	unsigned int blksize, blk_per_trk, off;
2519 	sector_t recid;
2520 	int status;
2521 
2522 	if (!dasd_page_cache)
2523 		goto out;
2524 	private = (struct dasd_eckd_private *) cqr->block->base->private;
2525 	blksize = cqr->block->bp_block;
2526 	blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
2527 	recid = blk_rq_pos(req) >> cqr->block->s2b_shift;
2528 	ccw = cqr->cpaddr;
2529 	/* Skip over define extent & locate record. */
2530 	ccw++;
2531 	if (private->uses_cdl == 0 || recid > 2*blk_per_trk)
2532 		ccw++;
2533 	rq_for_each_segment(bv, req, iter) {
2534 		dst = page_address(bv->bv_page) + bv->bv_offset;
2535 		for (off = 0; off < bv->bv_len; off += blksize) {
2536 			/* Skip locate record. */
2537 			if (private->uses_cdl && recid <= 2*blk_per_trk)
2538 				ccw++;
2539 			if (dst) {
2540 				if (ccw->flags & CCW_FLAG_IDA)
2541 					cda = *((char **)((addr_t) ccw->cda));
2542 				else
2543 					cda = (char *)((addr_t) ccw->cda);
2544 				if (dst != cda) {
2545 					if (rq_data_dir(req) == READ)
2546 						memcpy(dst, cda, bv->bv_len);
2547 					kmem_cache_free(dasd_page_cache,
2548 					    (void *)((addr_t)cda & PAGE_MASK));
2549 				}
2550 				dst = NULL;
2551 			}
2552 			ccw++;
2553 			recid++;
2554 		}
2555 	}
2556 out:
2557 	status = cqr->status == DASD_CQR_DONE;
2558 	dasd_sfree_request(cqr, cqr->memdev);
2559 	return status;
2560 }
2561 
2562 /*
2563  * Modify ccw/tcw in cqr so it can be started on a base device.
2564  *
2565  * Note that this is not enough to restart the cqr!
2566  * Either reset cqr->startdev as well (summary unit check handling)
2567  * or restart via separate cqr (as in ERP handling).
2568  */
2569 void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *cqr)
2570 {
2571 	struct ccw1 *ccw;
2572 	struct PFX_eckd_data *pfxdata;
2573 	struct tcw *tcw;
2574 	struct tccb *tccb;
2575 	struct dcw *dcw;
2576 
2577 	if (cqr->cpmode == 1) {
2578 		tcw = cqr->cpaddr;
2579 		tccb = tcw_get_tccb(tcw);
2580 		dcw = (struct dcw *)&tccb->tca[0];
2581 		pfxdata = (struct PFX_eckd_data *)&dcw->cd[0];
2582 		pfxdata->validity.verify_base = 0;
2583 		pfxdata->validity.hyper_pav = 0;
2584 	} else {
2585 		ccw = cqr->cpaddr;
2586 		pfxdata = cqr->data;
2587 		if (ccw->cmd_code == DASD_ECKD_CCW_PFX) {
2588 			pfxdata->validity.verify_base = 0;
2589 			pfxdata->validity.hyper_pav = 0;
2590 		}
2591 	}
2592 }
2593 
2594 #define DASD_ECKD_CHANQ_MAX_SIZE 4
2595 
2596 static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base,
2597 						     struct dasd_block *block,
2598 						     struct request *req)
2599 {
2600 	struct dasd_eckd_private *private;
2601 	struct dasd_device *startdev;
2602 	unsigned long flags;
2603 	struct dasd_ccw_req *cqr;
2604 
2605 	startdev = dasd_alias_get_start_dev(base);
2606 	if (!startdev)
2607 		startdev = base;
2608 	private = (struct dasd_eckd_private *) startdev->private;
2609 	if (private->count >= DASD_ECKD_CHANQ_MAX_SIZE)
2610 		return ERR_PTR(-EBUSY);
2611 
2612 	spin_lock_irqsave(get_ccwdev_lock(startdev->cdev), flags);
2613 	private->count++;
2614 	cqr = dasd_eckd_build_cp(startdev, block, req);
2615 	if (IS_ERR(cqr))
2616 		private->count--;
2617 	spin_unlock_irqrestore(get_ccwdev_lock(startdev->cdev), flags);
2618 	return cqr;
2619 }
2620 
2621 static int dasd_eckd_free_alias_cp(struct dasd_ccw_req *cqr,
2622 				   struct request *req)
2623 {
2624 	struct dasd_eckd_private *private;
2625 	unsigned long flags;
2626 
2627 	spin_lock_irqsave(get_ccwdev_lock(cqr->memdev->cdev), flags);
2628 	private = (struct dasd_eckd_private *) cqr->memdev->private;
2629 	private->count--;
2630 	spin_unlock_irqrestore(get_ccwdev_lock(cqr->memdev->cdev), flags);
2631 	return dasd_eckd_free_cp(cqr, req);
2632 }
2633 
2634 static int
2635 dasd_eckd_fill_info(struct dasd_device * device,
2636 		    struct dasd_information2_t * info)
2637 {
2638 	struct dasd_eckd_private *private;
2639 
2640 	private = (struct dasd_eckd_private *) device->private;
2641 	info->label_block = 2;
2642 	info->FBA_layout = private->uses_cdl ? 0 : 1;
2643 	info->format = private->uses_cdl ? DASD_FORMAT_CDL : DASD_FORMAT_LDL;
2644 	info->characteristics_size = sizeof(struct dasd_eckd_characteristics);
2645 	memcpy(info->characteristics, &private->rdc_data,
2646 	       sizeof(struct dasd_eckd_characteristics));
2647 	info->confdata_size = min((unsigned long)private->conf_len,
2648 				  sizeof(info->configuration_data));
2649 	memcpy(info->configuration_data, private->conf_data,
2650 	       info->confdata_size);
2651 	return 0;
2652 }
2653 
2654 /*
2655  * SECTION: ioctl functions for eckd devices.
2656  */
2657 
2658 /*
2659  * Release device ioctl.
2660  * Buils a channel programm to releases a prior reserved
2661  * (see dasd_eckd_reserve) device.
2662  */
2663 static int
2664 dasd_eckd_release(struct dasd_device *device)
2665 {
2666 	struct dasd_ccw_req *cqr;
2667 	int rc;
2668 	struct ccw1 *ccw;
2669 	int useglobal;
2670 
2671 	if (!capable(CAP_SYS_ADMIN))
2672 		return -EACCES;
2673 
2674 	useglobal = 0;
2675 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
2676 	if (IS_ERR(cqr)) {
2677 		mutex_lock(&dasd_reserve_mutex);
2678 		useglobal = 1;
2679 		cqr = &dasd_reserve_req->cqr;
2680 		memset(cqr, 0, sizeof(*cqr));
2681 		memset(&dasd_reserve_req->ccw, 0,
2682 		       sizeof(dasd_reserve_req->ccw));
2683 		cqr->cpaddr = &dasd_reserve_req->ccw;
2684 		cqr->data = &dasd_reserve_req->data;
2685 		cqr->magic = DASD_ECKD_MAGIC;
2686 	}
2687 	ccw = cqr->cpaddr;
2688 	ccw->cmd_code = DASD_ECKD_CCW_RELEASE;
2689 	ccw->flags |= CCW_FLAG_SLI;
2690 	ccw->count = 32;
2691 	ccw->cda = (__u32)(addr_t) cqr->data;
2692 	cqr->startdev = device;
2693 	cqr->memdev = device;
2694 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
2695 	set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
2696 	cqr->retries = 2;	/* set retry counter to enable basic ERP */
2697 	cqr->expires = 2 * HZ;
2698 	cqr->buildclk = get_clock();
2699 	cqr->status = DASD_CQR_FILLED;
2700 
2701 	rc = dasd_sleep_on_immediatly(cqr);
2702 
2703 	if (useglobal)
2704 		mutex_unlock(&dasd_reserve_mutex);
2705 	else
2706 		dasd_sfree_request(cqr, cqr->memdev);
2707 	return rc;
2708 }
2709 
2710 /*
2711  * Reserve device ioctl.
2712  * Options are set to 'synchronous wait for interrupt' and
2713  * 'timeout the request'. This leads to a terminate IO if
2714  * the interrupt is outstanding for a certain time.
2715  */
2716 static int
2717 dasd_eckd_reserve(struct dasd_device *device)
2718 {
2719 	struct dasd_ccw_req *cqr;
2720 	int rc;
2721 	struct ccw1 *ccw;
2722 	int useglobal;
2723 
2724 	if (!capable(CAP_SYS_ADMIN))
2725 		return -EACCES;
2726 
2727 	useglobal = 0;
2728 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
2729 	if (IS_ERR(cqr)) {
2730 		mutex_lock(&dasd_reserve_mutex);
2731 		useglobal = 1;
2732 		cqr = &dasd_reserve_req->cqr;
2733 		memset(cqr, 0, sizeof(*cqr));
2734 		memset(&dasd_reserve_req->ccw, 0,
2735 		       sizeof(dasd_reserve_req->ccw));
2736 		cqr->cpaddr = &dasd_reserve_req->ccw;
2737 		cqr->data = &dasd_reserve_req->data;
2738 		cqr->magic = DASD_ECKD_MAGIC;
2739 	}
2740 	ccw = cqr->cpaddr;
2741 	ccw->cmd_code = DASD_ECKD_CCW_RESERVE;
2742 	ccw->flags |= CCW_FLAG_SLI;
2743 	ccw->count = 32;
2744 	ccw->cda = (__u32)(addr_t) cqr->data;
2745 	cqr->startdev = device;
2746 	cqr->memdev = device;
2747 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
2748 	set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
2749 	cqr->retries = 2;	/* set retry counter to enable basic ERP */
2750 	cqr->expires = 2 * HZ;
2751 	cqr->buildclk = get_clock();
2752 	cqr->status = DASD_CQR_FILLED;
2753 
2754 	rc = dasd_sleep_on_immediatly(cqr);
2755 
2756 	if (useglobal)
2757 		mutex_unlock(&dasd_reserve_mutex);
2758 	else
2759 		dasd_sfree_request(cqr, cqr->memdev);
2760 	return rc;
2761 }
2762 
2763 /*
2764  * Steal lock ioctl - unconditional reserve device.
2765  * Buils a channel programm to break a device's reservation.
2766  * (unconditional reserve)
2767  */
2768 static int
2769 dasd_eckd_steal_lock(struct dasd_device *device)
2770 {
2771 	struct dasd_ccw_req *cqr;
2772 	int rc;
2773 	struct ccw1 *ccw;
2774 	int useglobal;
2775 
2776 	if (!capable(CAP_SYS_ADMIN))
2777 		return -EACCES;
2778 
2779 	useglobal = 0;
2780 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
2781 	if (IS_ERR(cqr)) {
2782 		mutex_lock(&dasd_reserve_mutex);
2783 		useglobal = 1;
2784 		cqr = &dasd_reserve_req->cqr;
2785 		memset(cqr, 0, sizeof(*cqr));
2786 		memset(&dasd_reserve_req->ccw, 0,
2787 		       sizeof(dasd_reserve_req->ccw));
2788 		cqr->cpaddr = &dasd_reserve_req->ccw;
2789 		cqr->data = &dasd_reserve_req->data;
2790 		cqr->magic = DASD_ECKD_MAGIC;
2791 	}
2792 	ccw = cqr->cpaddr;
2793 	ccw->cmd_code = DASD_ECKD_CCW_SLCK;
2794 	ccw->flags |= CCW_FLAG_SLI;
2795 	ccw->count = 32;
2796 	ccw->cda = (__u32)(addr_t) cqr->data;
2797 	cqr->startdev = device;
2798 	cqr->memdev = device;
2799 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
2800 	set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
2801 	cqr->retries = 2;	/* set retry counter to enable basic ERP */
2802 	cqr->expires = 2 * HZ;
2803 	cqr->buildclk = get_clock();
2804 	cqr->status = DASD_CQR_FILLED;
2805 
2806 	rc = dasd_sleep_on_immediatly(cqr);
2807 
2808 	if (useglobal)
2809 		mutex_unlock(&dasd_reserve_mutex);
2810 	else
2811 		dasd_sfree_request(cqr, cqr->memdev);
2812 	return rc;
2813 }
2814 
2815 /*
2816  * Read performance statistics
2817  */
2818 static int
2819 dasd_eckd_performance(struct dasd_device *device, void __user *argp)
2820 {
2821 	struct dasd_psf_prssd_data *prssdp;
2822 	struct dasd_rssd_perf_stats_t *stats;
2823 	struct dasd_ccw_req *cqr;
2824 	struct ccw1 *ccw;
2825 	int rc;
2826 
2827 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */  + 1 /* RSSD */,
2828 				   (sizeof(struct dasd_psf_prssd_data) +
2829 				    sizeof(struct dasd_rssd_perf_stats_t)),
2830 				   device);
2831 	if (IS_ERR(cqr)) {
2832 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
2833 			    "Could not allocate initialization request");
2834 		return PTR_ERR(cqr);
2835 	}
2836 	cqr->startdev = device;
2837 	cqr->memdev = device;
2838 	cqr->retries = 0;
2839 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
2840 	cqr->expires = 10 * HZ;
2841 
2842 	/* Prepare for Read Subsystem Data */
2843 	prssdp = (struct dasd_psf_prssd_data *) cqr->data;
2844 	memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
2845 	prssdp->order = PSF_ORDER_PRSSD;
2846 	prssdp->suborder = 0x01;	/* Performance Statistics */
2847 	prssdp->varies[1] = 0x01;	/* Perf Statistics for the Subsystem */
2848 
2849 	ccw = cqr->cpaddr;
2850 	ccw->cmd_code = DASD_ECKD_CCW_PSF;
2851 	ccw->count = sizeof(struct dasd_psf_prssd_data);
2852 	ccw->flags |= CCW_FLAG_CC;
2853 	ccw->cda = (__u32)(addr_t) prssdp;
2854 
2855 	/* Read Subsystem Data - Performance Statistics */
2856 	stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
2857 	memset(stats, 0, sizeof(struct dasd_rssd_perf_stats_t));
2858 
2859 	ccw++;
2860 	ccw->cmd_code = DASD_ECKD_CCW_RSSD;
2861 	ccw->count = sizeof(struct dasd_rssd_perf_stats_t);
2862 	ccw->cda = (__u32)(addr_t) stats;
2863 
2864 	cqr->buildclk = get_clock();
2865 	cqr->status = DASD_CQR_FILLED;
2866 	rc = dasd_sleep_on(cqr);
2867 	if (rc == 0) {
2868 		prssdp = (struct dasd_psf_prssd_data *) cqr->data;
2869 		stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
2870 		if (copy_to_user(argp, stats,
2871 				 sizeof(struct dasd_rssd_perf_stats_t)))
2872 			rc = -EFAULT;
2873 	}
2874 	dasd_sfree_request(cqr, cqr->memdev);
2875 	return rc;
2876 }
2877 
2878 /*
2879  * Get attributes (cache operations)
2880  * Returnes the cache attributes used in Define Extend (DE).
2881  */
2882 static int
2883 dasd_eckd_get_attrib(struct dasd_device *device, void __user *argp)
2884 {
2885 	struct dasd_eckd_private *private =
2886 		(struct dasd_eckd_private *)device->private;
2887 	struct attrib_data_t attrib = private->attrib;
2888 	int rc;
2889 
2890         if (!capable(CAP_SYS_ADMIN))
2891                 return -EACCES;
2892 	if (!argp)
2893                 return -EINVAL;
2894 
2895 	rc = 0;
2896 	if (copy_to_user(argp, (long *) &attrib,
2897 			 sizeof(struct attrib_data_t)))
2898 		rc = -EFAULT;
2899 
2900 	return rc;
2901 }
2902 
2903 /*
2904  * Set attributes (cache operations)
2905  * Stores the attributes for cache operation to be used in Define Extend (DE).
2906  */
2907 static int
2908 dasd_eckd_set_attrib(struct dasd_device *device, void __user *argp)
2909 {
2910 	struct dasd_eckd_private *private =
2911 		(struct dasd_eckd_private *)device->private;
2912 	struct attrib_data_t attrib;
2913 
2914 	if (!capable(CAP_SYS_ADMIN))
2915 		return -EACCES;
2916 	if (!argp)
2917 		return -EINVAL;
2918 
2919 	if (copy_from_user(&attrib, argp, sizeof(struct attrib_data_t)))
2920 		return -EFAULT;
2921 	private->attrib = attrib;
2922 
2923 	dev_info(&device->cdev->dev,
2924 		 "The DASD cache mode was set to %x (%i cylinder prestage)\n",
2925 		 private->attrib.operation, private->attrib.nr_cyl);
2926 	return 0;
2927 }
2928 
2929 /*
2930  * Issue syscall I/O to EMC Symmetrix array.
2931  * CCWs are PSF and RSSD
2932  */
2933 static int dasd_symm_io(struct dasd_device *device, void __user *argp)
2934 {
2935 	struct dasd_symmio_parms usrparm;
2936 	char *psf_data, *rssd_result;
2937 	struct dasd_ccw_req *cqr;
2938 	struct ccw1 *ccw;
2939 	char psf0, psf1;
2940 	int rc;
2941 
2942 	if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO))
2943 		return -EACCES;
2944 	psf0 = psf1 = 0;
2945 
2946 	/* Copy parms from caller */
2947 	rc = -EFAULT;
2948 	if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
2949 		goto out;
2950 	if (is_compat_task() || sizeof(long) == 4) {
2951 		/* Make sure pointers are sane even on 31 bit. */
2952 		rc = -EINVAL;
2953 		if ((usrparm.psf_data >> 32) != 0)
2954 			goto out;
2955 		if ((usrparm.rssd_result >> 32) != 0)
2956 			goto out;
2957 		usrparm.psf_data &= 0x7fffffffULL;
2958 		usrparm.rssd_result &= 0x7fffffffULL;
2959 	}
2960 	/* alloc I/O data area */
2961 	psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA);
2962 	rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA);
2963 	if (!psf_data || !rssd_result) {
2964 		rc = -ENOMEM;
2965 		goto out_free;
2966 	}
2967 
2968 	/* get syscall header from user space */
2969 	rc = -EFAULT;
2970 	if (copy_from_user(psf_data,
2971 			   (void __user *)(unsigned long) usrparm.psf_data,
2972 			   usrparm.psf_data_len))
2973 		goto out_free;
2974 	psf0 = psf_data[0];
2975 	psf1 = psf_data[1];
2976 
2977 	/* setup CCWs for PSF + RSSD */
2978 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 , 0, device);
2979 	if (IS_ERR(cqr)) {
2980 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
2981 			"Could not allocate initialization request");
2982 		rc = PTR_ERR(cqr);
2983 		goto out_free;
2984 	}
2985 
2986 	cqr->startdev = device;
2987 	cqr->memdev = device;
2988 	cqr->retries = 3;
2989 	cqr->expires = 10 * HZ;
2990 	cqr->buildclk = get_clock();
2991 	cqr->status = DASD_CQR_FILLED;
2992 
2993 	/* Build the ccws */
2994 	ccw = cqr->cpaddr;
2995 
2996 	/* PSF ccw */
2997 	ccw->cmd_code = DASD_ECKD_CCW_PSF;
2998 	ccw->count = usrparm.psf_data_len;
2999 	ccw->flags |= CCW_FLAG_CC;
3000 	ccw->cda = (__u32)(addr_t) psf_data;
3001 
3002 	ccw++;
3003 
3004 	/* RSSD ccw  */
3005 	ccw->cmd_code = DASD_ECKD_CCW_RSSD;
3006 	ccw->count = usrparm.rssd_result_len;
3007 	ccw->flags = CCW_FLAG_SLI ;
3008 	ccw->cda = (__u32)(addr_t) rssd_result;
3009 
3010 	rc = dasd_sleep_on(cqr);
3011 	if (rc)
3012 		goto out_sfree;
3013 
3014 	rc = -EFAULT;
3015 	if (copy_to_user((void __user *)(unsigned long) usrparm.rssd_result,
3016 			   rssd_result, usrparm.rssd_result_len))
3017 		goto out_sfree;
3018 	rc = 0;
3019 
3020 out_sfree:
3021 	dasd_sfree_request(cqr, cqr->memdev);
3022 out_free:
3023 	kfree(rssd_result);
3024 	kfree(psf_data);
3025 out:
3026 	DBF_DEV_EVENT(DBF_WARNING, device,
3027 		      "Symmetrix ioctl (0x%02x 0x%02x): rc=%d",
3028 		      (int) psf0, (int) psf1, rc);
3029 	return rc;
3030 }
3031 
3032 static int
3033 dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp)
3034 {
3035 	struct dasd_device *device = block->base;
3036 
3037 	switch (cmd) {
3038 	case BIODASDGATTR:
3039 		return dasd_eckd_get_attrib(device, argp);
3040 	case BIODASDSATTR:
3041 		return dasd_eckd_set_attrib(device, argp);
3042 	case BIODASDPSRD:
3043 		return dasd_eckd_performance(device, argp);
3044 	case BIODASDRLSE:
3045 		return dasd_eckd_release(device);
3046 	case BIODASDRSRV:
3047 		return dasd_eckd_reserve(device);
3048 	case BIODASDSLCK:
3049 		return dasd_eckd_steal_lock(device);
3050 	case BIODASDSYMMIO:
3051 		return dasd_symm_io(device, argp);
3052 	default:
3053 		return -ENOIOCTLCMD;
3054 	}
3055 }
3056 
3057 /*
3058  * Dump the range of CCWs into 'page' buffer
3059  * and return number of printed chars.
3060  */
3061 static int
3062 dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
3063 {
3064 	int len, count;
3065 	char *datap;
3066 
3067 	len = 0;
3068 	while (from <= to) {
3069 		len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3070 			       " CCW %p: %08X %08X DAT:",
3071 			       from, ((int *) from)[0], ((int *) from)[1]);
3072 
3073 		/* get pointer to data (consider IDALs) */
3074 		if (from->flags & CCW_FLAG_IDA)
3075 			datap = (char *) *((addr_t *) (addr_t) from->cda);
3076 		else
3077 			datap = (char *) ((addr_t) from->cda);
3078 
3079 		/* dump data (max 32 bytes) */
3080 		for (count = 0; count < from->count && count < 32; count++) {
3081 			if (count % 8 == 0) len += sprintf(page + len, " ");
3082 			if (count % 4 == 0) len += sprintf(page + len, " ");
3083 			len += sprintf(page + len, "%02x", datap[count]);
3084 		}
3085 		len += sprintf(page + len, "\n");
3086 		from++;
3087 	}
3088 	return len;
3089 }
3090 
3091 static void
3092 dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct irb *irb,
3093 			 char *reason)
3094 {
3095 	u64 *sense;
3096 
3097 	sense = (u64 *) dasd_get_sense(irb);
3098 	if (sense) {
3099 		DBF_DEV_EVENT(DBF_EMERG, device,
3100 			      "%s: %s %02x%02x%02x %016llx %016llx %016llx "
3101 			      "%016llx", reason,
3102 			      scsw_is_tm(&irb->scsw) ? "t" : "c",
3103 			      scsw_cc(&irb->scsw), scsw_cstat(&irb->scsw),
3104 			      scsw_dstat(&irb->scsw), sense[0], sense[1],
3105 			      sense[2], sense[3]);
3106 	} else {
3107 		DBF_DEV_EVENT(DBF_EMERG, device, "%s",
3108 			      "SORRY - NO VALID SENSE AVAILABLE\n");
3109 	}
3110 }
3111 
3112 /*
3113  * Print sense data and related channel program.
3114  * Parts are printed because printk buffer is only 1024 bytes.
3115  */
3116 static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
3117 				 struct dasd_ccw_req *req, struct irb *irb)
3118 {
3119 	char *page;
3120 	struct ccw1 *first, *last, *fail, *from, *to;
3121 	int len, sl, sct;
3122 
3123 	page = (char *) get_zeroed_page(GFP_ATOMIC);
3124 	if (page == NULL) {
3125 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
3126 			      "No memory to dump sense data\n");
3127 		return;
3128 	}
3129 	/* dump the sense data */
3130 	len = sprintf(page,  KERN_ERR PRINTK_HEADER
3131 		      " I/O status report for device %s:\n",
3132 		      dev_name(&device->cdev->dev));
3133 	len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3134 		       " in req: %p CS: 0x%02X DS: 0x%02X CC: 0x%02X RC: %d\n",
3135 		       req, scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw),
3136 		       scsw_cc(&irb->scsw), req ? req->intrc : 0);
3137 	len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3138 		       " device %s: Failing CCW: %p\n",
3139 		       dev_name(&device->cdev->dev),
3140 		       (void *) (addr_t) irb->scsw.cmd.cpa);
3141 	if (irb->esw.esw0.erw.cons) {
3142 		for (sl = 0; sl < 4; sl++) {
3143 			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3144 				       " Sense(hex) %2d-%2d:",
3145 				       (8 * sl), ((8 * sl) + 7));
3146 
3147 			for (sct = 0; sct < 8; sct++) {
3148 				len += sprintf(page + len, " %02x",
3149 					       irb->ecw[8 * sl + sct]);
3150 			}
3151 			len += sprintf(page + len, "\n");
3152 		}
3153 
3154 		if (irb->ecw[27] & DASD_SENSE_BIT_0) {
3155 			/* 24 Byte Sense Data */
3156 			sprintf(page + len, KERN_ERR PRINTK_HEADER
3157 				" 24 Byte: %x MSG %x, "
3158 				"%s MSGb to SYSOP\n",
3159 				irb->ecw[7] >> 4, irb->ecw[7] & 0x0f,
3160 				irb->ecw[1] & 0x10 ? "" : "no");
3161 		} else {
3162 			/* 32 Byte Sense Data */
3163 			sprintf(page + len, KERN_ERR PRINTK_HEADER
3164 				" 32 Byte: Format: %x "
3165 				"Exception class %x\n",
3166 				irb->ecw[6] & 0x0f, irb->ecw[22] >> 4);
3167 		}
3168 	} else {
3169 		sprintf(page + len, KERN_ERR PRINTK_HEADER
3170 			" SORRY - NO VALID SENSE AVAILABLE\n");
3171 	}
3172 	printk("%s", page);
3173 
3174 	if (req) {
3175 		/* req == NULL for unsolicited interrupts */
3176 		/* dump the Channel Program (max 140 Bytes per line) */
3177 		/* Count CCW and print first CCWs (maximum 1024 % 140 = 7) */
3178 		first = req->cpaddr;
3179 		for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
3180 		to = min(first + 6, last);
3181 		len = sprintf(page,  KERN_ERR PRINTK_HEADER
3182 			      " Related CP in req: %p\n", req);
3183 		dasd_eckd_dump_ccw_range(first, to, page + len);
3184 		printk("%s", page);
3185 
3186 		/* print failing CCW area (maximum 4) */
3187 		/* scsw->cda is either valid or zero  */
3188 		len = 0;
3189 		from = ++to;
3190 		fail = (struct ccw1 *)(addr_t)
3191 				irb->scsw.cmd.cpa; /* failing CCW */
3192 		if (from <  fail - 2) {
3193 			from = fail - 2;     /* there is a gap - print header */
3194 			len += sprintf(page, KERN_ERR PRINTK_HEADER "......\n");
3195 		}
3196 		to = min(fail + 1, last);
3197 		len += dasd_eckd_dump_ccw_range(from, to, page + len);
3198 
3199 		/* print last CCWs (maximum 2) */
3200 		from = max(from, ++to);
3201 		if (from < last - 1) {
3202 			from = last - 1;     /* there is a gap - print header */
3203 			len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n");
3204 		}
3205 		len += dasd_eckd_dump_ccw_range(from, last, page + len);
3206 		if (len > 0)
3207 			printk("%s", page);
3208 	}
3209 	free_page((unsigned long) page);
3210 }
3211 
3212 
3213 /*
3214  * Print sense data from a tcw.
3215  */
3216 static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
3217 				 struct dasd_ccw_req *req, struct irb *irb)
3218 {
3219 	char *page;
3220 	int len, sl, sct, residual;
3221 
3222 	struct tsb *tsb;
3223 	u8 *sense;
3224 
3225 
3226 	page = (char *) get_zeroed_page(GFP_ATOMIC);
3227 	if (page == NULL) {
3228 		DBF_DEV_EVENT(DBF_WARNING, device, " %s",
3229 			    "No memory to dump sense data");
3230 		return;
3231 	}
3232 	/* dump the sense data */
3233 	len = sprintf(page,  KERN_ERR PRINTK_HEADER
3234 		      " I/O status report for device %s:\n",
3235 		      dev_name(&device->cdev->dev));
3236 	len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3237 		       " in req: %p CS: 0x%02X DS: 0x%02X CC: 0x%02X RC: %d "
3238 		       "fcxs: 0x%02X schxs: 0x%02X\n", req,
3239 		       scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw),
3240 		       scsw_cc(&irb->scsw), req->intrc,
3241 		       irb->scsw.tm.fcxs, irb->scsw.tm.schxs);
3242 	len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3243 		       " device %s: Failing TCW: %p\n",
3244 		       dev_name(&device->cdev->dev),
3245 		       (void *) (addr_t) irb->scsw.tm.tcw);
3246 
3247 	tsb = NULL;
3248 	sense = NULL;
3249 	if (irb->scsw.tm.tcw && (irb->scsw.tm.fcxs == 0x01))
3250 		tsb = tcw_get_tsb(
3251 			(struct tcw *)(unsigned long)irb->scsw.tm.tcw);
3252 
3253 	if (tsb) {
3254 		len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3255 			       " tsb->length %d\n", tsb->length);
3256 		len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3257 			       " tsb->flags %x\n", tsb->flags);
3258 		len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3259 			       " tsb->dcw_offset %d\n", tsb->dcw_offset);
3260 		len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3261 			       " tsb->count %d\n", tsb->count);
3262 		residual = tsb->count - 28;
3263 		len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3264 			       " residual %d\n", residual);
3265 
3266 		switch (tsb->flags & 0x07) {
3267 		case 1:	/* tsa_iostat */
3268 			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3269 			       " tsb->tsa.iostat.dev_time %d\n",
3270 				       tsb->tsa.iostat.dev_time);
3271 			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3272 			       " tsb->tsa.iostat.def_time %d\n",
3273 				       tsb->tsa.iostat.def_time);
3274 			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3275 			       " tsb->tsa.iostat.queue_time %d\n",
3276 				       tsb->tsa.iostat.queue_time);
3277 			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3278 			       " tsb->tsa.iostat.dev_busy_time %d\n",
3279 				       tsb->tsa.iostat.dev_busy_time);
3280 			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3281 			       " tsb->tsa.iostat.dev_act_time %d\n",
3282 				       tsb->tsa.iostat.dev_act_time);
3283 			sense = tsb->tsa.iostat.sense;
3284 			break;
3285 		case 2: /* ts_ddpc */
3286 			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3287 			       " tsb->tsa.ddpc.rc %d\n", tsb->tsa.ddpc.rc);
3288 			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3289 			       " tsb->tsa.ddpc.rcq:  ");
3290 			for (sl = 0; sl < 16; sl++) {
3291 				for (sct = 0; sct < 8; sct++) {
3292 					len += sprintf(page + len, " %02x",
3293 						       tsb->tsa.ddpc.rcq[sl]);
3294 				}
3295 				len += sprintf(page + len, "\n");
3296 			}
3297 			sense = tsb->tsa.ddpc.sense;
3298 			break;
3299 		case 3: /* tsa_intrg */
3300 			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3301 				      " tsb->tsa.intrg.: not supportet yet \n");
3302 			break;
3303 		}
3304 
3305 		if (sense) {
3306 			for (sl = 0; sl < 4; sl++) {
3307 				len += sprintf(page + len,
3308 					       KERN_ERR PRINTK_HEADER
3309 					       " Sense(hex) %2d-%2d:",
3310 					       (8 * sl), ((8 * sl) + 7));
3311 				for (sct = 0; sct < 8; sct++) {
3312 					len += sprintf(page + len, " %02x",
3313 						       sense[8 * sl + sct]);
3314 				}
3315 				len += sprintf(page + len, "\n");
3316 			}
3317 
3318 			if (sense[27] & DASD_SENSE_BIT_0) {
3319 				/* 24 Byte Sense Data */
3320 				sprintf(page + len, KERN_ERR PRINTK_HEADER
3321 					" 24 Byte: %x MSG %x, "
3322 					"%s MSGb to SYSOP\n",
3323 					sense[7] >> 4, sense[7] & 0x0f,
3324 					sense[1] & 0x10 ? "" : "no");
3325 			} else {
3326 				/* 32 Byte Sense Data */
3327 				sprintf(page + len, KERN_ERR PRINTK_HEADER
3328 					" 32 Byte: Format: %x "
3329 					"Exception class %x\n",
3330 					sense[6] & 0x0f, sense[22] >> 4);
3331 			}
3332 		} else {
3333 			sprintf(page + len, KERN_ERR PRINTK_HEADER
3334 				" SORRY - NO VALID SENSE AVAILABLE\n");
3335 		}
3336 	} else {
3337 		sprintf(page + len, KERN_ERR PRINTK_HEADER
3338 			" SORRY - NO TSB DATA AVAILABLE\n");
3339 	}
3340 	printk("%s", page);
3341 	free_page((unsigned long) page);
3342 }
3343 
3344 static void dasd_eckd_dump_sense(struct dasd_device *device,
3345 				 struct dasd_ccw_req *req, struct irb *irb)
3346 {
3347 	if (req && scsw_is_tm(&req->irb.scsw))
3348 		dasd_eckd_dump_sense_tcw(device, req, irb);
3349 	else
3350 		dasd_eckd_dump_sense_ccw(device, req, irb);
3351 }
3352 
3353 static int dasd_eckd_pm_freeze(struct dasd_device *device)
3354 {
3355 	/*
3356 	 * the device should be disconnected from our LCU structure
3357 	 * on restore we will reconnect it and reread LCU specific
3358 	 * information like PAV support that might have changed
3359 	 */
3360 	dasd_alias_remove_device(device);
3361 	dasd_alias_disconnect_device_from_lcu(device);
3362 
3363 	return 0;
3364 }
3365 
3366 static int dasd_eckd_restore_device(struct dasd_device *device)
3367 {
3368 	struct dasd_eckd_private *private;
3369 	struct dasd_eckd_characteristics temp_rdc_data;
3370 	int is_known, rc;
3371 	struct dasd_uid temp_uid;
3372 	unsigned long flags;
3373 
3374 	private = (struct dasd_eckd_private *) device->private;
3375 
3376 	/* Read Configuration Data */
3377 	rc = dasd_eckd_read_conf(device);
3378 	if (rc)
3379 		goto out_err;
3380 
3381 	dasd_eckd_get_uid(device, &temp_uid);
3382 	/* Generate device unique id */
3383 	rc = dasd_eckd_generate_uid(device);
3384 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
3385 	if (memcmp(&private->uid, &temp_uid, sizeof(struct dasd_uid)) != 0)
3386 		dev_err(&device->cdev->dev, "The UID of the DASD has "
3387 			"changed\n");
3388 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
3389 	if (rc)
3390 		goto out_err;
3391 
3392 	/* register lcu with alias handling, enable PAV if this is a new lcu */
3393 	is_known = dasd_alias_make_device_known_to_lcu(device);
3394 	if (is_known < 0)
3395 		return is_known;
3396 	if (!is_known) {
3397 		dasd_eckd_validate_server(device);
3398 		dasd_alias_lcu_setup_complete(device);
3399 	} else
3400 		dasd_alias_wait_for_lcu_setup(device);
3401 
3402 	/* RE-Read Configuration Data */
3403 	rc = dasd_eckd_read_conf(device);
3404 	if (rc)
3405 		goto out_err;
3406 
3407 	/* Read Feature Codes */
3408 	dasd_eckd_read_features(device);
3409 
3410 	/* Read Device Characteristics */
3411 	rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
3412 					 &temp_rdc_data, 64);
3413 	if (rc) {
3414 		DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
3415 				"Read device characteristic failed, rc=%d", rc);
3416 		goto out_err;
3417 	}
3418 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
3419 	memcpy(&private->rdc_data, &temp_rdc_data, sizeof(temp_rdc_data));
3420 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
3421 
3422 	/* add device to alias management */
3423 	dasd_alias_add_device(device);
3424 
3425 	return 0;
3426 
3427 out_err:
3428 	return -1;
3429 }
3430 
3431 static int dasd_eckd_reload_device(struct dasd_device *device)
3432 {
3433 	struct dasd_eckd_private *private;
3434 	int rc, old_base;
3435 	char print_uid[60];
3436 	struct dasd_uid uid;
3437 	unsigned long flags;
3438 
3439 	private = (struct dasd_eckd_private *) device->private;
3440 
3441 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
3442 	old_base = private->uid.base_unit_addr;
3443 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
3444 
3445 	/* Read Configuration Data */
3446 	rc = dasd_eckd_read_conf(device);
3447 	if (rc)
3448 		goto out_err;
3449 
3450 	rc = dasd_eckd_generate_uid(device);
3451 	if (rc)
3452 		goto out_err;
3453 	/*
3454 	 * update unit address configuration and
3455 	 * add device to alias management
3456 	 */
3457 	dasd_alias_update_add_device(device);
3458 
3459 	dasd_eckd_get_uid(device, &uid);
3460 
3461 	if (old_base != uid.base_unit_addr) {
3462 		if (strlen(uid.vduit) > 0)
3463 			snprintf(print_uid, sizeof(print_uid),
3464 				 "%s.%s.%04x.%02x.%s", uid.vendor, uid.serial,
3465 				 uid.ssid, uid.base_unit_addr, uid.vduit);
3466 		else
3467 			snprintf(print_uid, sizeof(print_uid),
3468 				 "%s.%s.%04x.%02x", uid.vendor, uid.serial,
3469 				 uid.ssid, uid.base_unit_addr);
3470 
3471 		dev_info(&device->cdev->dev,
3472 			 "An Alias device was reassigned to a new base device "
3473 			 "with UID: %s\n", print_uid);
3474 	}
3475 	return 0;
3476 
3477 out_err:
3478 	return -1;
3479 }
3480 
3481 static struct ccw_driver dasd_eckd_driver = {
3482 	.name	     = "dasd-eckd",
3483 	.owner	     = THIS_MODULE,
3484 	.ids	     = dasd_eckd_ids,
3485 	.probe	     = dasd_eckd_probe,
3486 	.remove      = dasd_generic_remove,
3487 	.set_offline = dasd_generic_set_offline,
3488 	.set_online  = dasd_eckd_set_online,
3489 	.notify      = dasd_generic_notify,
3490 	.freeze      = dasd_generic_pm_freeze,
3491 	.thaw	     = dasd_generic_restore_device,
3492 	.restore     = dasd_generic_restore_device,
3493 	.uc_handler  = dasd_generic_uc_handler,
3494 };
3495 
3496 /*
3497  * max_blocks is dependent on the amount of storage that is available
3498  * in the static io buffer for each device. Currently each device has
3499  * 8192 bytes (=2 pages). For 64 bit one dasd_mchunkt_t structure has
3500  * 24 bytes, the struct dasd_ccw_req has 136 bytes and each block can use
3501  * up to 16 bytes (8 for the ccw and 8 for the idal pointer). In
3502  * addition we have one define extent ccw + 16 bytes of data and one
3503  * locate record ccw + 16 bytes of data. That makes:
3504  * (8192 - 24 - 136 - 8 - 16 - 8 - 16) / 16 = 499 blocks at maximum.
3505  * We want to fit two into the available memory so that we can immediately
3506  * start the next request if one finishes off. That makes 249.5 blocks
3507  * for one request. Give a little safety and the result is 240.
3508  */
3509 static struct dasd_discipline dasd_eckd_discipline = {
3510 	.owner = THIS_MODULE,
3511 	.name = "ECKD",
3512 	.ebcname = "ECKD",
3513 	.max_blocks = 240,
3514 	.check_device = dasd_eckd_check_characteristics,
3515 	.uncheck_device = dasd_eckd_uncheck_device,
3516 	.do_analysis = dasd_eckd_do_analysis,
3517 	.ready_to_online = dasd_eckd_ready_to_online,
3518 	.online_to_ready = dasd_eckd_online_to_ready,
3519 	.fill_geometry = dasd_eckd_fill_geometry,
3520 	.start_IO = dasd_start_IO,
3521 	.term_IO = dasd_term_IO,
3522 	.handle_terminated_request = dasd_eckd_handle_terminated_request,
3523 	.format_device = dasd_eckd_format_device,
3524 	.erp_action = dasd_eckd_erp_action,
3525 	.erp_postaction = dasd_eckd_erp_postaction,
3526 	.handle_unsolicited_interrupt = dasd_eckd_handle_unsolicited_interrupt,
3527 	.build_cp = dasd_eckd_build_alias_cp,
3528 	.free_cp = dasd_eckd_free_alias_cp,
3529 	.dump_sense = dasd_eckd_dump_sense,
3530 	.dump_sense_dbf = dasd_eckd_dump_sense_dbf,
3531 	.fill_info = dasd_eckd_fill_info,
3532 	.ioctl = dasd_eckd_ioctl,
3533 	.freeze = dasd_eckd_pm_freeze,
3534 	.restore = dasd_eckd_restore_device,
3535 	.reload = dasd_eckd_reload_device,
3536 	.get_uid = dasd_eckd_get_uid,
3537 };
3538 
3539 static int __init
3540 dasd_eckd_init(void)
3541 {
3542 	int ret;
3543 
3544 	ASCEBC(dasd_eckd_discipline.ebcname, 4);
3545 	dasd_reserve_req = kmalloc(sizeof(*dasd_reserve_req),
3546 				   GFP_KERNEL | GFP_DMA);
3547 	if (!dasd_reserve_req)
3548 		return -ENOMEM;
3549 	ret = ccw_driver_register(&dasd_eckd_driver);
3550 	if (!ret)
3551 		wait_for_device_probe();
3552 	else
3553 		kfree(dasd_reserve_req);
3554 	return ret;
3555 }
3556 
3557 static void __exit
3558 dasd_eckd_cleanup(void)
3559 {
3560 	ccw_driver_unregister(&dasd_eckd_driver);
3561 	kfree(dasd_reserve_req);
3562 }
3563 
3564 module_init(dasd_eckd_init);
3565 module_exit(dasd_eckd_cleanup);
3566