xref: /openbmc/linux/drivers/s390/block/dasd_eckd.c (revision 2dedf0d9)
1 /*
2  * File...........: linux/drivers/s390/block/dasd_eckd.c
3  * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4  *		    Horst Hummel <Horst.Hummel@de.ibm.com>
5  *		    Carsten Otte <Cotte@de.ibm.com>
6  *		    Martin Schwidefsky <schwidefsky@de.ibm.com>
7  * Bugreports.to..: <Linux390@de.ibm.com>
8  * Copyright IBM Corp. 1999, 2009
9  * EMC Symmetrix ioctl Copyright EMC Corporation, 2008
10  * Author.........: Nigel Hislop <hislop_nigel@emc.com>
11  */
12 
13 #define KMSG_COMPONENT "dasd-eckd"
14 
15 #include <linux/stddef.h>
16 #include <linux/kernel.h>
17 #include <linux/slab.h>
18 #include <linux/hdreg.h>	/* HDIO_GETGEO			    */
19 #include <linux/bio.h>
20 #include <linux/module.h>
21 #include <linux/init.h>
22 
23 #include <asm/debug.h>
24 #include <asm/idals.h>
25 #include <asm/ebcdic.h>
26 #include <asm/compat.h>
27 #include <asm/io.h>
28 #include <asm/uaccess.h>
29 #include <asm/cio.h>
30 #include <asm/ccwdev.h>
31 #include <asm/itcw.h>
32 
33 #include "dasd_int.h"
34 #include "dasd_eckd.h"
35 #include "../cio/chsc.h"
36 
37 
38 #ifdef PRINTK_HEADER
39 #undef PRINTK_HEADER
40 #endif				/* PRINTK_HEADER */
41 #define PRINTK_HEADER "dasd(eckd):"
42 
43 #define ECKD_C0(i) (i->home_bytes)
44 #define ECKD_F(i) (i->formula)
45 #define ECKD_F1(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f1):\
46 		    (i->factors.f_0x02.f1))
47 #define ECKD_F2(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f2):\
48 		    (i->factors.f_0x02.f2))
49 #define ECKD_F3(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f3):\
50 		    (i->factors.f_0x02.f3))
51 #define ECKD_F4(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f4):0)
52 #define ECKD_F5(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f5):0)
53 #define ECKD_F6(i) (i->factor6)
54 #define ECKD_F7(i) (i->factor7)
55 #define ECKD_F8(i) (i->factor8)
56 
57 MODULE_LICENSE("GPL");
58 
59 static struct dasd_discipline dasd_eckd_discipline;
60 
61 /* The ccw bus type uses this table to find devices that it sends to
62  * dasd_eckd_probe */
63 static struct ccw_device_id dasd_eckd_ids[] = {
64 	{ CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1},
65 	{ CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2},
66 	{ CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3390, 0), .driver_info = 0x3},
67 	{ CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4},
68 	{ CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5},
69 	{ CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6},
70 	{ CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3390, 0), .driver_info = 0x7},
71 	{ CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3380, 0), .driver_info = 0x8},
72 	{ CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3390, 0), .driver_info = 0x9},
73 	{ CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3380, 0), .driver_info = 0xa},
74 	{ /* end of list */ },
75 };
76 
77 MODULE_DEVICE_TABLE(ccw, dasd_eckd_ids);
78 
79 static struct ccw_driver dasd_eckd_driver; /* see below */
80 
81 #define INIT_CQR_OK 0
82 #define INIT_CQR_UNFORMATTED 1
83 #define INIT_CQR_ERROR 2
84 
85 
86 /* initial attempt at a probe function. this can be simplified once
87  * the other detection code is gone */
88 static int
89 dasd_eckd_probe (struct ccw_device *cdev)
90 {
91 	int ret;
92 
93 	/* set ECKD specific ccw-device options */
94 	ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE |
95 				     CCWDEV_DO_PATHGROUP | CCWDEV_DO_MULTIPATH);
96 	if (ret) {
97 		DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
98 				"dasd_eckd_probe: could not set "
99 				"ccw-device options");
100 		return ret;
101 	}
102 	ret = dasd_generic_probe(cdev, &dasd_eckd_discipline);
103 	return ret;
104 }
105 
106 static int
107 dasd_eckd_set_online(struct ccw_device *cdev)
108 {
109 	return dasd_generic_set_online(cdev, &dasd_eckd_discipline);
110 }
111 
112 static const int sizes_trk0[] = { 28, 148, 84 };
113 #define LABEL_SIZE 140
114 
115 static inline unsigned int
116 round_up_multiple(unsigned int no, unsigned int mult)
117 {
118 	int rem = no % mult;
119 	return (rem ? no - rem + mult : no);
120 }
121 
122 static inline unsigned int
123 ceil_quot(unsigned int d1, unsigned int d2)
124 {
125 	return (d1 + (d2 - 1)) / d2;
126 }
127 
128 static unsigned int
129 recs_per_track(struct dasd_eckd_characteristics * rdc,
130 	       unsigned int kl, unsigned int dl)
131 {
132 	int dn, kn;
133 
134 	switch (rdc->dev_type) {
135 	case 0x3380:
136 		if (kl)
137 			return 1499 / (15 + 7 + ceil_quot(kl + 12, 32) +
138 				       ceil_quot(dl + 12, 32));
139 		else
140 			return 1499 / (15 + ceil_quot(dl + 12, 32));
141 	case 0x3390:
142 		dn = ceil_quot(dl + 6, 232) + 1;
143 		if (kl) {
144 			kn = ceil_quot(kl + 6, 232) + 1;
145 			return 1729 / (10 + 9 + ceil_quot(kl + 6 * kn, 34) +
146 				       9 + ceil_quot(dl + 6 * dn, 34));
147 		} else
148 			return 1729 / (10 + 9 + ceil_quot(dl + 6 * dn, 34));
149 	case 0x9345:
150 		dn = ceil_quot(dl + 6, 232) + 1;
151 		if (kl) {
152 			kn = ceil_quot(kl + 6, 232) + 1;
153 			return 1420 / (18 + 7 + ceil_quot(kl + 6 * kn, 34) +
154 				       ceil_quot(dl + 6 * dn, 34));
155 		} else
156 			return 1420 / (18 + 7 + ceil_quot(dl + 6 * dn, 34));
157 	}
158 	return 0;
159 }
160 
161 static void set_ch_t(struct ch_t *geo, __u32 cyl, __u8 head)
162 {
163 	geo->cyl = (__u16) cyl;
164 	geo->head = cyl >> 16;
165 	geo->head <<= 4;
166 	geo->head |= head;
167 }
168 
169 static int
170 check_XRC (struct ccw1         *de_ccw,
171            struct DE_eckd_data *data,
172            struct dasd_device  *device)
173 {
174         struct dasd_eckd_private *private;
175 	int rc;
176 
177         private = (struct dasd_eckd_private *) device->private;
178 	if (!private->rdc_data.facilities.XRC_supported)
179 		return 0;
180 
181         /* switch on System Time Stamp - needed for XRC Support */
182 	data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid'   */
183 	data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */
184 
185 	rc = get_sync_clock(&data->ep_sys_time);
186 	/* Ignore return code if sync clock is switched off. */
187 	if (rc == -ENOSYS || rc == -EACCES)
188 		rc = 0;
189 
190 	de_ccw->count = sizeof(struct DE_eckd_data);
191 	de_ccw->flags |= CCW_FLAG_SLI;
192 	return rc;
193 }
194 
195 static int
196 define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk,
197 	      unsigned int totrk, int cmd, struct dasd_device *device)
198 {
199 	struct dasd_eckd_private *private;
200 	u32 begcyl, endcyl;
201 	u16 heads, beghead, endhead;
202 	int rc = 0;
203 
204 	private = (struct dasd_eckd_private *) device->private;
205 
206 	ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
207 	ccw->flags = 0;
208 	ccw->count = 16;
209 	ccw->cda = (__u32) __pa(data);
210 
211 	memset(data, 0, sizeof(struct DE_eckd_data));
212 	switch (cmd) {
213 	case DASD_ECKD_CCW_READ_HOME_ADDRESS:
214 	case DASD_ECKD_CCW_READ_RECORD_ZERO:
215 	case DASD_ECKD_CCW_READ:
216 	case DASD_ECKD_CCW_READ_MT:
217 	case DASD_ECKD_CCW_READ_CKD:
218 	case DASD_ECKD_CCW_READ_CKD_MT:
219 	case DASD_ECKD_CCW_READ_KD:
220 	case DASD_ECKD_CCW_READ_KD_MT:
221 	case DASD_ECKD_CCW_READ_COUNT:
222 		data->mask.perm = 0x1;
223 		data->attributes.operation = private->attrib.operation;
224 		break;
225 	case DASD_ECKD_CCW_WRITE:
226 	case DASD_ECKD_CCW_WRITE_MT:
227 	case DASD_ECKD_CCW_WRITE_KD:
228 	case DASD_ECKD_CCW_WRITE_KD_MT:
229 		data->mask.perm = 0x02;
230 		data->attributes.operation = private->attrib.operation;
231 		rc = check_XRC (ccw, data, device);
232 		break;
233 	case DASD_ECKD_CCW_WRITE_CKD:
234 	case DASD_ECKD_CCW_WRITE_CKD_MT:
235 		data->attributes.operation = DASD_BYPASS_CACHE;
236 		rc = check_XRC (ccw, data, device);
237 		break;
238 	case DASD_ECKD_CCW_ERASE:
239 	case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
240 	case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
241 		data->mask.perm = 0x3;
242 		data->mask.auth = 0x1;
243 		data->attributes.operation = DASD_BYPASS_CACHE;
244 		rc = check_XRC (ccw, data, device);
245 		break;
246 	default:
247 		dev_err(&device->cdev->dev,
248 			"0x%x is not a known command\n", cmd);
249 		break;
250 	}
251 
252 	data->attributes.mode = 0x3;	/* ECKD */
253 
254 	if ((private->rdc_data.cu_type == 0x2105 ||
255 	     private->rdc_data.cu_type == 0x2107 ||
256 	     private->rdc_data.cu_type == 0x1750)
257 	    && !(private->uses_cdl && trk < 2))
258 		data->ga_extended |= 0x40; /* Regular Data Format Mode */
259 
260 	heads = private->rdc_data.trk_per_cyl;
261 	begcyl = trk / heads;
262 	beghead = trk % heads;
263 	endcyl = totrk / heads;
264 	endhead = totrk % heads;
265 
266 	/* check for sequential prestage - enhance cylinder range */
267 	if (data->attributes.operation == DASD_SEQ_PRESTAGE ||
268 	    data->attributes.operation == DASD_SEQ_ACCESS) {
269 
270 		if (endcyl + private->attrib.nr_cyl < private->real_cyl)
271 			endcyl += private->attrib.nr_cyl;
272 		else
273 			endcyl = (private->real_cyl - 1);
274 	}
275 
276 	set_ch_t(&data->beg_ext, begcyl, beghead);
277 	set_ch_t(&data->end_ext, endcyl, endhead);
278 	return rc;
279 }
280 
281 static int check_XRC_on_prefix(struct PFX_eckd_data *pfxdata,
282 			       struct dasd_device  *device)
283 {
284 	struct dasd_eckd_private *private;
285 	int rc;
286 
287 	private = (struct dasd_eckd_private *) device->private;
288 	if (!private->rdc_data.facilities.XRC_supported)
289 		return 0;
290 
291 	/* switch on System Time Stamp - needed for XRC Support */
292 	pfxdata->define_extent.ga_extended |= 0x08; /* 'Time Stamp Valid'   */
293 	pfxdata->define_extent.ga_extended |= 0x02; /* 'Extended Parameter' */
294 	pfxdata->validity.time_stamp = 1;	    /* 'Time Stamp Valid'   */
295 
296 	rc = get_sync_clock(&pfxdata->define_extent.ep_sys_time);
297 	/* Ignore return code if sync clock is switched off. */
298 	if (rc == -ENOSYS || rc == -EACCES)
299 		rc = 0;
300 	return rc;
301 }
302 
303 static void fill_LRE_data(struct LRE_eckd_data *data, unsigned int trk,
304 			  unsigned int rec_on_trk, int count, int cmd,
305 			  struct dasd_device *device, unsigned int reclen,
306 			  unsigned int tlf)
307 {
308 	struct dasd_eckd_private *private;
309 	int sector;
310 	int dn, d;
311 
312 	private = (struct dasd_eckd_private *) device->private;
313 
314 	memset(data, 0, sizeof(*data));
315 	sector = 0;
316 	if (rec_on_trk) {
317 		switch (private->rdc_data.dev_type) {
318 		case 0x3390:
319 			dn = ceil_quot(reclen + 6, 232);
320 			d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
321 			sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
322 			break;
323 		case 0x3380:
324 			d = 7 + ceil_quot(reclen + 12, 32);
325 			sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
326 			break;
327 		}
328 	}
329 	data->sector = sector;
330 	/* note: meaning of count depends on the operation
331 	 *	 for record based I/O it's the number of records, but for
332 	 *	 track based I/O it's the number of tracks
333 	 */
334 	data->count = count;
335 	switch (cmd) {
336 	case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
337 		data->operation.orientation = 0x3;
338 		data->operation.operation = 0x03;
339 		break;
340 	case DASD_ECKD_CCW_READ_HOME_ADDRESS:
341 		data->operation.orientation = 0x3;
342 		data->operation.operation = 0x16;
343 		break;
344 	case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
345 		data->operation.orientation = 0x1;
346 		data->operation.operation = 0x03;
347 		data->count++;
348 		break;
349 	case DASD_ECKD_CCW_READ_RECORD_ZERO:
350 		data->operation.orientation = 0x3;
351 		data->operation.operation = 0x16;
352 		data->count++;
353 		break;
354 	case DASD_ECKD_CCW_WRITE:
355 	case DASD_ECKD_CCW_WRITE_MT:
356 	case DASD_ECKD_CCW_WRITE_KD:
357 	case DASD_ECKD_CCW_WRITE_KD_MT:
358 		data->auxiliary.length_valid = 0x1;
359 		data->length = reclen;
360 		data->operation.operation = 0x01;
361 		break;
362 	case DASD_ECKD_CCW_WRITE_CKD:
363 	case DASD_ECKD_CCW_WRITE_CKD_MT:
364 		data->auxiliary.length_valid = 0x1;
365 		data->length = reclen;
366 		data->operation.operation = 0x03;
367 		break;
368 	case DASD_ECKD_CCW_WRITE_TRACK_DATA:
369 		data->auxiliary.length_valid = 0x1;
370 		data->length = reclen;	/* not tlf, as one might think */
371 		data->operation.operation = 0x3F;
372 		data->extended_operation = 0x23;
373 		break;
374 	case DASD_ECKD_CCW_READ:
375 	case DASD_ECKD_CCW_READ_MT:
376 	case DASD_ECKD_CCW_READ_KD:
377 	case DASD_ECKD_CCW_READ_KD_MT:
378 		data->auxiliary.length_valid = 0x1;
379 		data->length = reclen;
380 		data->operation.operation = 0x06;
381 		break;
382 	case DASD_ECKD_CCW_READ_CKD:
383 	case DASD_ECKD_CCW_READ_CKD_MT:
384 		data->auxiliary.length_valid = 0x1;
385 		data->length = reclen;
386 		data->operation.operation = 0x16;
387 		break;
388 	case DASD_ECKD_CCW_READ_COUNT:
389 		data->operation.operation = 0x06;
390 		break;
391 	case DASD_ECKD_CCW_READ_TRACK_DATA:
392 		data->auxiliary.length_valid = 0x1;
393 		data->length = tlf;
394 		data->operation.operation = 0x0C;
395 		break;
396 	case DASD_ECKD_CCW_ERASE:
397 		data->length = reclen;
398 		data->auxiliary.length_valid = 0x1;
399 		data->operation.operation = 0x0b;
400 		break;
401 	default:
402 		DBF_DEV_EVENT(DBF_ERR, device,
403 			    "fill LRE unknown opcode 0x%x", cmd);
404 		BUG();
405 	}
406 	set_ch_t(&data->seek_addr,
407 		 trk / private->rdc_data.trk_per_cyl,
408 		 trk % private->rdc_data.trk_per_cyl);
409 	data->search_arg.cyl = data->seek_addr.cyl;
410 	data->search_arg.head = data->seek_addr.head;
411 	data->search_arg.record = rec_on_trk;
412 }
413 
414 static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
415 		      unsigned int trk, unsigned int totrk, int cmd,
416 		      struct dasd_device *basedev, struct dasd_device *startdev,
417 		      unsigned char format, unsigned int rec_on_trk, int count,
418 		      unsigned int blksize, unsigned int tlf)
419 {
420 	struct dasd_eckd_private *basepriv, *startpriv;
421 	struct DE_eckd_data *dedata;
422 	struct LRE_eckd_data *lredata;
423 	u32 begcyl, endcyl;
424 	u16 heads, beghead, endhead;
425 	int rc = 0;
426 
427 	basepriv = (struct dasd_eckd_private *) basedev->private;
428 	startpriv = (struct dasd_eckd_private *) startdev->private;
429 	dedata = &pfxdata->define_extent;
430 	lredata = &pfxdata->locate_record;
431 
432 	ccw->cmd_code = DASD_ECKD_CCW_PFX;
433 	ccw->flags = 0;
434 	ccw->count = sizeof(*pfxdata);
435 	ccw->cda = (__u32) __pa(pfxdata);
436 
437 	memset(pfxdata, 0, sizeof(*pfxdata));
438 	/* prefix data */
439 	if (format > 1) {
440 		DBF_DEV_EVENT(DBF_ERR, basedev,
441 			      "PFX LRE unknown format 0x%x", format);
442 		BUG();
443 		return -EINVAL;
444 	}
445 	pfxdata->format = format;
446 	pfxdata->base_address = basepriv->ned->unit_addr;
447 	pfxdata->base_lss = basepriv->ned->ID;
448 	pfxdata->validity.define_extent = 1;
449 
450 	/* private uid is kept up to date, conf_data may be outdated */
451 	if (startpriv->uid.type != UA_BASE_DEVICE) {
452 		pfxdata->validity.verify_base = 1;
453 		if (startpriv->uid.type == UA_HYPER_PAV_ALIAS)
454 			pfxdata->validity.hyper_pav = 1;
455 	}
456 
457 	/* define extend data (mostly)*/
458 	switch (cmd) {
459 	case DASD_ECKD_CCW_READ_HOME_ADDRESS:
460 	case DASD_ECKD_CCW_READ_RECORD_ZERO:
461 	case DASD_ECKD_CCW_READ:
462 	case DASD_ECKD_CCW_READ_MT:
463 	case DASD_ECKD_CCW_READ_CKD:
464 	case DASD_ECKD_CCW_READ_CKD_MT:
465 	case DASD_ECKD_CCW_READ_KD:
466 	case DASD_ECKD_CCW_READ_KD_MT:
467 	case DASD_ECKD_CCW_READ_COUNT:
468 		dedata->mask.perm = 0x1;
469 		dedata->attributes.operation = basepriv->attrib.operation;
470 		break;
471 	case DASD_ECKD_CCW_READ_TRACK_DATA:
472 		dedata->mask.perm = 0x1;
473 		dedata->attributes.operation = basepriv->attrib.operation;
474 		dedata->blk_size = 0;
475 		break;
476 	case DASD_ECKD_CCW_WRITE:
477 	case DASD_ECKD_CCW_WRITE_MT:
478 	case DASD_ECKD_CCW_WRITE_KD:
479 	case DASD_ECKD_CCW_WRITE_KD_MT:
480 		dedata->mask.perm = 0x02;
481 		dedata->attributes.operation = basepriv->attrib.operation;
482 		rc = check_XRC_on_prefix(pfxdata, basedev);
483 		break;
484 	case DASD_ECKD_CCW_WRITE_CKD:
485 	case DASD_ECKD_CCW_WRITE_CKD_MT:
486 		dedata->attributes.operation = DASD_BYPASS_CACHE;
487 		rc = check_XRC_on_prefix(pfxdata, basedev);
488 		break;
489 	case DASD_ECKD_CCW_ERASE:
490 	case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
491 	case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
492 		dedata->mask.perm = 0x3;
493 		dedata->mask.auth = 0x1;
494 		dedata->attributes.operation = DASD_BYPASS_CACHE;
495 		rc = check_XRC_on_prefix(pfxdata, basedev);
496 		break;
497 	case DASD_ECKD_CCW_WRITE_TRACK_DATA:
498 		dedata->mask.perm = 0x02;
499 		dedata->attributes.operation = basepriv->attrib.operation;
500 		dedata->blk_size = blksize;
501 		rc = check_XRC_on_prefix(pfxdata, basedev);
502 		break;
503 	default:
504 		DBF_DEV_EVENT(DBF_ERR, basedev,
505 			    "PFX LRE unknown opcode 0x%x", cmd);
506 		BUG();
507 		return -EINVAL;
508 	}
509 
510 	dedata->attributes.mode = 0x3;	/* ECKD */
511 
512 	if ((basepriv->rdc_data.cu_type == 0x2105 ||
513 	     basepriv->rdc_data.cu_type == 0x2107 ||
514 	     basepriv->rdc_data.cu_type == 0x1750)
515 	    && !(basepriv->uses_cdl && trk < 2))
516 		dedata->ga_extended |= 0x40; /* Regular Data Format Mode */
517 
518 	heads = basepriv->rdc_data.trk_per_cyl;
519 	begcyl = trk / heads;
520 	beghead = trk % heads;
521 	endcyl = totrk / heads;
522 	endhead = totrk % heads;
523 
524 	/* check for sequential prestage - enhance cylinder range */
525 	if (dedata->attributes.operation == DASD_SEQ_PRESTAGE ||
526 	    dedata->attributes.operation == DASD_SEQ_ACCESS) {
527 
528 		if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl)
529 			endcyl += basepriv->attrib.nr_cyl;
530 		else
531 			endcyl = (basepriv->real_cyl - 1);
532 	}
533 
534 	set_ch_t(&dedata->beg_ext, begcyl, beghead);
535 	set_ch_t(&dedata->end_ext, endcyl, endhead);
536 
537 	if (format == 1) {
538 		fill_LRE_data(lredata, trk, rec_on_trk, count, cmd,
539 			      basedev, blksize, tlf);
540 	}
541 
542 	return rc;
543 }
544 
545 static int prefix(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
546 		  unsigned int trk, unsigned int totrk, int cmd,
547 		  struct dasd_device *basedev, struct dasd_device *startdev)
548 {
549 	return prefix_LRE(ccw, pfxdata, trk, totrk, cmd, basedev, startdev,
550 			  0, 0, 0, 0, 0);
551 }
552 
553 static void
554 locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, unsigned int trk,
555 	      unsigned int rec_on_trk, int no_rec, int cmd,
556 	      struct dasd_device * device, int reclen)
557 {
558 	struct dasd_eckd_private *private;
559 	int sector;
560 	int dn, d;
561 
562 	private = (struct dasd_eckd_private *) device->private;
563 
564 	DBF_DEV_EVENT(DBF_INFO, device,
565 		  "Locate: trk %d, rec %d, no_rec %d, cmd %d, reclen %d",
566 		  trk, rec_on_trk, no_rec, cmd, reclen);
567 
568 	ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD;
569 	ccw->flags = 0;
570 	ccw->count = 16;
571 	ccw->cda = (__u32) __pa(data);
572 
573 	memset(data, 0, sizeof(struct LO_eckd_data));
574 	sector = 0;
575 	if (rec_on_trk) {
576 		switch (private->rdc_data.dev_type) {
577 		case 0x3390:
578 			dn = ceil_quot(reclen + 6, 232);
579 			d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
580 			sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
581 			break;
582 		case 0x3380:
583 			d = 7 + ceil_quot(reclen + 12, 32);
584 			sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
585 			break;
586 		}
587 	}
588 	data->sector = sector;
589 	data->count = no_rec;
590 	switch (cmd) {
591 	case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
592 		data->operation.orientation = 0x3;
593 		data->operation.operation = 0x03;
594 		break;
595 	case DASD_ECKD_CCW_READ_HOME_ADDRESS:
596 		data->operation.orientation = 0x3;
597 		data->operation.operation = 0x16;
598 		break;
599 	case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
600 		data->operation.orientation = 0x1;
601 		data->operation.operation = 0x03;
602 		data->count++;
603 		break;
604 	case DASD_ECKD_CCW_READ_RECORD_ZERO:
605 		data->operation.orientation = 0x3;
606 		data->operation.operation = 0x16;
607 		data->count++;
608 		break;
609 	case DASD_ECKD_CCW_WRITE:
610 	case DASD_ECKD_CCW_WRITE_MT:
611 	case DASD_ECKD_CCW_WRITE_KD:
612 	case DASD_ECKD_CCW_WRITE_KD_MT:
613 		data->auxiliary.last_bytes_used = 0x1;
614 		data->length = reclen;
615 		data->operation.operation = 0x01;
616 		break;
617 	case DASD_ECKD_CCW_WRITE_CKD:
618 	case DASD_ECKD_CCW_WRITE_CKD_MT:
619 		data->auxiliary.last_bytes_used = 0x1;
620 		data->length = reclen;
621 		data->operation.operation = 0x03;
622 		break;
623 	case DASD_ECKD_CCW_READ:
624 	case DASD_ECKD_CCW_READ_MT:
625 	case DASD_ECKD_CCW_READ_KD:
626 	case DASD_ECKD_CCW_READ_KD_MT:
627 		data->auxiliary.last_bytes_used = 0x1;
628 		data->length = reclen;
629 		data->operation.operation = 0x06;
630 		break;
631 	case DASD_ECKD_CCW_READ_CKD:
632 	case DASD_ECKD_CCW_READ_CKD_MT:
633 		data->auxiliary.last_bytes_used = 0x1;
634 		data->length = reclen;
635 		data->operation.operation = 0x16;
636 		break;
637 	case DASD_ECKD_CCW_READ_COUNT:
638 		data->operation.operation = 0x06;
639 		break;
640 	case DASD_ECKD_CCW_ERASE:
641 		data->length = reclen;
642 		data->auxiliary.last_bytes_used = 0x1;
643 		data->operation.operation = 0x0b;
644 		break;
645 	default:
646 		DBF_DEV_EVENT(DBF_ERR, device, "unknown locate record "
647 			      "opcode 0x%x", cmd);
648 	}
649 	set_ch_t(&data->seek_addr,
650 		 trk / private->rdc_data.trk_per_cyl,
651 		 trk % private->rdc_data.trk_per_cyl);
652 	data->search_arg.cyl = data->seek_addr.cyl;
653 	data->search_arg.head = data->seek_addr.head;
654 	data->search_arg.record = rec_on_trk;
655 }
656 
657 /*
658  * Returns 1 if the block is one of the special blocks that needs
659  * to get read/written with the KD variant of the command.
660  * That is DASD_ECKD_READ_KD_MT instead of DASD_ECKD_READ_MT and
661  * DASD_ECKD_WRITE_KD_MT instead of DASD_ECKD_WRITE_MT.
662  * Luckily the KD variants differ only by one bit (0x08) from the
663  * normal variant. So don't wonder about code like:
664  * if (dasd_eckd_cdl_special(blk_per_trk, recid))
665  *         ccw->cmd_code |= 0x8;
666  */
667 static inline int
668 dasd_eckd_cdl_special(int blk_per_trk, int recid)
669 {
670 	if (recid < 3)
671 		return 1;
672 	if (recid < blk_per_trk)
673 		return 0;
674 	if (recid < 2 * blk_per_trk)
675 		return 1;
676 	return 0;
677 }
678 
679 /*
680  * Returns the record size for the special blocks of the cdl format.
681  * Only returns something useful if dasd_eckd_cdl_special is true
682  * for the recid.
683  */
684 static inline int
685 dasd_eckd_cdl_reclen(int recid)
686 {
687 	if (recid < 3)
688 		return sizes_trk0[recid];
689 	return LABEL_SIZE;
690 }
691 
692 /*
693  * Generate device unique id that specifies the physical device.
694  */
695 static int dasd_eckd_generate_uid(struct dasd_device *device)
696 {
697 	struct dasd_eckd_private *private;
698 	struct dasd_uid *uid;
699 	int count;
700 	unsigned long flags;
701 
702 	private = (struct dasd_eckd_private *) device->private;
703 	if (!private)
704 		return -ENODEV;
705 	if (!private->ned || !private->gneq)
706 		return -ENODEV;
707 	uid = &private->uid;
708 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
709 	memset(uid, 0, sizeof(struct dasd_uid));
710 	memcpy(uid->vendor, private->ned->HDA_manufacturer,
711 	       sizeof(uid->vendor) - 1);
712 	EBCASC(uid->vendor, sizeof(uid->vendor) - 1);
713 	memcpy(uid->serial, private->ned->HDA_location,
714 	       sizeof(uid->serial) - 1);
715 	EBCASC(uid->serial, sizeof(uid->serial) - 1);
716 	uid->ssid = private->gneq->subsystemID;
717 	uid->real_unit_addr = private->ned->unit_addr;
718 	if (private->sneq) {
719 		uid->type = private->sneq->sua_flags;
720 		if (uid->type == UA_BASE_PAV_ALIAS)
721 			uid->base_unit_addr = private->sneq->base_unit_addr;
722 	} else {
723 		uid->type = UA_BASE_DEVICE;
724 	}
725 	if (private->vdsneq) {
726 		for (count = 0; count < 16; count++) {
727 			sprintf(uid->vduit+2*count, "%02x",
728 				private->vdsneq->uit[count]);
729 		}
730 	}
731 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
732 	return 0;
733 }
734 
735 static int dasd_eckd_get_uid(struct dasd_device *device, struct dasd_uid *uid)
736 {
737 	struct dasd_eckd_private *private;
738 	unsigned long flags;
739 
740 	if (device->private) {
741 		private = (struct dasd_eckd_private *)device->private;
742 		spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
743 		*uid = private->uid;
744 		spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
745 		return 0;
746 	}
747 	return -EINVAL;
748 }
749 
750 static struct dasd_ccw_req *dasd_eckd_build_rcd_lpm(struct dasd_device *device,
751 						    void *rcd_buffer,
752 						    struct ciw *ciw, __u8 lpm)
753 {
754 	struct dasd_ccw_req *cqr;
755 	struct ccw1 *ccw;
756 
757 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */, ciw->count,
758 				   device);
759 
760 	if (IS_ERR(cqr)) {
761 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
762 			      "Could not allocate RCD request");
763 		return cqr;
764 	}
765 
766 	ccw = cqr->cpaddr;
767 	ccw->cmd_code = ciw->cmd;
768 	ccw->cda = (__u32)(addr_t)rcd_buffer;
769 	ccw->count = ciw->count;
770 
771 	cqr->startdev = device;
772 	cqr->memdev = device;
773 	cqr->block = NULL;
774 	cqr->expires = 10*HZ;
775 	cqr->lpm = lpm;
776 	cqr->retries = 256;
777 	cqr->buildclk = get_clock();
778 	cqr->status = DASD_CQR_FILLED;
779 	return cqr;
780 }
781 
782 static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
783 				   void **rcd_buffer,
784 				   int *rcd_buffer_size, __u8 lpm)
785 {
786 	struct ciw *ciw;
787 	char *rcd_buf = NULL;
788 	int ret;
789 	struct dasd_ccw_req *cqr;
790 
791 	/*
792 	 * scan for RCD command in extended SenseID data
793 	 */
794 	ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
795 	if (!ciw || ciw->cmd == 0) {
796 		ret = -EOPNOTSUPP;
797 		goto out_error;
798 	}
799 	rcd_buf = kzalloc(ciw->count, GFP_KERNEL | GFP_DMA);
800 	if (!rcd_buf) {
801 		ret = -ENOMEM;
802 		goto out_error;
803 	}
804 
805 	/*
806 	 * buffer has to start with EBCDIC "V1.0" to show
807 	 * support for virtual device SNEQ
808 	 */
809 	rcd_buf[0] = 0xE5;
810 	rcd_buf[1] = 0xF1;
811 	rcd_buf[2] = 0x4B;
812 	rcd_buf[3] = 0xF0;
813 	cqr = dasd_eckd_build_rcd_lpm(device, rcd_buf, ciw, lpm);
814 	if (IS_ERR(cqr)) {
815 		ret =  PTR_ERR(cqr);
816 		goto out_error;
817 	}
818 	ret = dasd_sleep_on(cqr);
819 	/*
820 	 * on success we update the user input parms
821 	 */
822 	dasd_sfree_request(cqr, cqr->memdev);
823 	if (ret)
824 		goto out_error;
825 
826 	*rcd_buffer_size = ciw->count;
827 	*rcd_buffer = rcd_buf;
828 	return 0;
829 out_error:
830 	kfree(rcd_buf);
831 	*rcd_buffer = NULL;
832 	*rcd_buffer_size = 0;
833 	return ret;
834 }
835 
836 static int dasd_eckd_identify_conf_parts(struct dasd_eckd_private *private)
837 {
838 
839 	struct dasd_sneq *sneq;
840 	int i, count;
841 
842 	private->ned = NULL;
843 	private->sneq = NULL;
844 	private->vdsneq = NULL;
845 	private->gneq = NULL;
846 	count = private->conf_len / sizeof(struct dasd_sneq);
847 	sneq = (struct dasd_sneq *)private->conf_data;
848 	for (i = 0; i < count; ++i) {
849 		if (sneq->flags.identifier == 1 && sneq->format == 1)
850 			private->sneq = sneq;
851 		else if (sneq->flags.identifier == 1 && sneq->format == 4)
852 			private->vdsneq = (struct vd_sneq *)sneq;
853 		else if (sneq->flags.identifier == 2)
854 			private->gneq = (struct dasd_gneq *)sneq;
855 		else if (sneq->flags.identifier == 3 && sneq->res1 == 1)
856 			private->ned = (struct dasd_ned *)sneq;
857 		sneq++;
858 	}
859 	if (!private->ned || !private->gneq) {
860 		private->ned = NULL;
861 		private->sneq = NULL;
862 		private->vdsneq = NULL;
863 		private->gneq = NULL;
864 		return -EINVAL;
865 	}
866 	return 0;
867 
868 };
869 
870 static unsigned char dasd_eckd_path_access(void *conf_data, int conf_len)
871 {
872 	struct dasd_gneq *gneq;
873 	int i, count, found;
874 
875 	count = conf_len / sizeof(*gneq);
876 	gneq = (struct dasd_gneq *)conf_data;
877 	found = 0;
878 	for (i = 0; i < count; ++i) {
879 		if (gneq->flags.identifier == 2) {
880 			found = 1;
881 			break;
882 		}
883 		gneq++;
884 	}
885 	if (found)
886 		return ((char *)gneq)[18] & 0x07;
887 	else
888 		return 0;
889 }
890 
891 static int dasd_eckd_read_conf(struct dasd_device *device)
892 {
893 	void *conf_data;
894 	int conf_len, conf_data_saved;
895 	int rc;
896 	__u8 lpm;
897 	struct dasd_eckd_private *private;
898 	struct dasd_eckd_path *path_data;
899 
900 	private = (struct dasd_eckd_private *) device->private;
901 	path_data = (struct dasd_eckd_path *) &private->path_data;
902 	path_data->opm = ccw_device_get_path_mask(device->cdev);
903 	lpm = 0x80;
904 	conf_data_saved = 0;
905 	/* get configuration data per operational path */
906 	for (lpm = 0x80; lpm; lpm>>= 1) {
907 		if (lpm & path_data->opm){
908 			rc = dasd_eckd_read_conf_lpm(device, &conf_data,
909 						     &conf_len, lpm);
910 			if (rc && rc != -EOPNOTSUPP) {	/* -EOPNOTSUPP is ok */
911 				DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
912 					  "Read configuration data returned "
913 					  "error %d", rc);
914 				return rc;
915 			}
916 			if (conf_data == NULL) {
917 				DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
918 						"No configuration data "
919 						"retrieved");
920 				continue;	/* no error */
921 			}
922 			/* save first valid configuration data */
923 			if (!conf_data_saved) {
924 				kfree(private->conf_data);
925 				private->conf_data = conf_data;
926 				private->conf_len = conf_len;
927 				if (dasd_eckd_identify_conf_parts(private)) {
928 					private->conf_data = NULL;
929 					private->conf_len = 0;
930 					kfree(conf_data);
931 					continue;
932 				}
933 				conf_data_saved++;
934 			}
935 			switch (dasd_eckd_path_access(conf_data, conf_len)) {
936 			case 0x02:
937 				path_data->npm |= lpm;
938 				break;
939 			case 0x03:
940 				path_data->ppm |= lpm;
941 				break;
942 			}
943 			if (conf_data != private->conf_data)
944 				kfree(conf_data);
945 		}
946 	}
947 	return 0;
948 }
949 
950 static int dasd_eckd_read_features(struct dasd_device *device)
951 {
952 	struct dasd_psf_prssd_data *prssdp;
953 	struct dasd_rssd_features *features;
954 	struct dasd_ccw_req *cqr;
955 	struct ccw1 *ccw;
956 	int rc;
957 	struct dasd_eckd_private *private;
958 
959 	private = (struct dasd_eckd_private *) device->private;
960 	memset(&private->features, 0, sizeof(struct dasd_rssd_features));
961 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */	+ 1 /* RSSD */,
962 				   (sizeof(struct dasd_psf_prssd_data) +
963 				    sizeof(struct dasd_rssd_features)),
964 				   device);
965 	if (IS_ERR(cqr)) {
966 		DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not "
967 				"allocate initialization request");
968 		return PTR_ERR(cqr);
969 	}
970 	cqr->startdev = device;
971 	cqr->memdev = device;
972 	cqr->block = NULL;
973 	cqr->retries = 256;
974 	cqr->expires = 10 * HZ;
975 
976 	/* Prepare for Read Subsystem Data */
977 	prssdp = (struct dasd_psf_prssd_data *) cqr->data;
978 	memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
979 	prssdp->order = PSF_ORDER_PRSSD;
980 	prssdp->suborder = 0x41;	/* Read Feature Codes */
981 	/* all other bytes of prssdp must be zero */
982 
983 	ccw = cqr->cpaddr;
984 	ccw->cmd_code = DASD_ECKD_CCW_PSF;
985 	ccw->count = sizeof(struct dasd_psf_prssd_data);
986 	ccw->flags |= CCW_FLAG_CC;
987 	ccw->cda = (__u32)(addr_t) prssdp;
988 
989 	/* Read Subsystem Data - feature codes */
990 	features = (struct dasd_rssd_features *) (prssdp + 1);
991 	memset(features, 0, sizeof(struct dasd_rssd_features));
992 
993 	ccw++;
994 	ccw->cmd_code = DASD_ECKD_CCW_RSSD;
995 	ccw->count = sizeof(struct dasd_rssd_features);
996 	ccw->cda = (__u32)(addr_t) features;
997 
998 	cqr->buildclk = get_clock();
999 	cqr->status = DASD_CQR_FILLED;
1000 	rc = dasd_sleep_on(cqr);
1001 	if (rc == 0) {
1002 		prssdp = (struct dasd_psf_prssd_data *) cqr->data;
1003 		features = (struct dasd_rssd_features *) (prssdp + 1);
1004 		memcpy(&private->features, features,
1005 		       sizeof(struct dasd_rssd_features));
1006 	} else
1007 		dev_warn(&device->cdev->dev, "Reading device feature codes"
1008 			 " failed with rc=%d\n", rc);
1009 	dasd_sfree_request(cqr, cqr->memdev);
1010 	return rc;
1011 }
1012 
1013 
1014 /*
1015  * Build CP for Perform Subsystem Function - SSC.
1016  */
1017 static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
1018 						    int enable_pav)
1019 {
1020 	struct dasd_ccw_req *cqr;
1021 	struct dasd_psf_ssc_data *psf_ssc_data;
1022 	struct ccw1 *ccw;
1023 
1024 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
1025 				  sizeof(struct dasd_psf_ssc_data),
1026 				  device);
1027 
1028 	if (IS_ERR(cqr)) {
1029 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1030 			   "Could not allocate PSF-SSC request");
1031 		return cqr;
1032 	}
1033 	psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data;
1034 	psf_ssc_data->order = PSF_ORDER_SSC;
1035 	psf_ssc_data->suborder = 0xc0;
1036 	if (enable_pav) {
1037 		psf_ssc_data->suborder |= 0x08;
1038 		psf_ssc_data->reserved[0] = 0x88;
1039 	}
1040 	ccw = cqr->cpaddr;
1041 	ccw->cmd_code = DASD_ECKD_CCW_PSF;
1042 	ccw->cda = (__u32)(addr_t)psf_ssc_data;
1043 	ccw->count = 66;
1044 
1045 	cqr->startdev = device;
1046 	cqr->memdev = device;
1047 	cqr->block = NULL;
1048 	cqr->retries = 256;
1049 	cqr->expires = 10*HZ;
1050 	cqr->buildclk = get_clock();
1051 	cqr->status = DASD_CQR_FILLED;
1052 	return cqr;
1053 }
1054 
1055 /*
1056  * Perform Subsystem Function.
1057  * It is necessary to trigger CIO for channel revalidation since this
1058  * call might change behaviour of DASD devices.
1059  */
1060 static int
1061 dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav)
1062 {
1063 	struct dasd_ccw_req *cqr;
1064 	int rc;
1065 
1066 	cqr = dasd_eckd_build_psf_ssc(device, enable_pav);
1067 	if (IS_ERR(cqr))
1068 		return PTR_ERR(cqr);
1069 
1070 	rc = dasd_sleep_on(cqr);
1071 	if (!rc)
1072 		/* trigger CIO to reprobe devices */
1073 		css_schedule_reprobe();
1074 	dasd_sfree_request(cqr, cqr->memdev);
1075 	return rc;
1076 }
1077 
1078 /*
1079  * Valide storage server of current device.
1080  */
1081 static void dasd_eckd_validate_server(struct dasd_device *device)
1082 {
1083 	int rc;
1084 	struct dasd_eckd_private *private;
1085 	int enable_pav;
1086 
1087 	if (dasd_nopav || MACHINE_IS_VM)
1088 		enable_pav = 0;
1089 	else
1090 		enable_pav = 1;
1091 	rc = dasd_eckd_psf_ssc(device, enable_pav);
1092 
1093 	/* may be requested feature is not available on server,
1094 	 * therefore just report error and go ahead */
1095 	private = (struct dasd_eckd_private *) device->private;
1096 	DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "PSF-SSC for SSID %04x "
1097 			"returned rc=%d", private->uid.ssid, rc);
1098 }
1099 
1100 /*
1101  * Check device characteristics.
1102  * If the device is accessible using ECKD discipline, the device is enabled.
1103  */
1104 static int
1105 dasd_eckd_check_characteristics(struct dasd_device *device)
1106 {
1107 	struct dasd_eckd_private *private;
1108 	struct dasd_block *block;
1109 	struct dasd_uid temp_uid;
1110 	int is_known, rc;
1111 	int readonly;
1112 
1113 	if (!ccw_device_is_pathgroup(device->cdev)) {
1114 		dev_warn(&device->cdev->dev,
1115 			 "A channel path group could not be established\n");
1116 		return -EIO;
1117 	}
1118 	if (!ccw_device_is_multipath(device->cdev)) {
1119 		dev_info(&device->cdev->dev,
1120 			 "The DASD is not operating in multipath mode\n");
1121 	}
1122 	private = (struct dasd_eckd_private *) device->private;
1123 	if (!private) {
1124 		private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
1125 		if (!private) {
1126 			dev_warn(&device->cdev->dev,
1127 				 "Allocating memory for private DASD data "
1128 				 "failed\n");
1129 			return -ENOMEM;
1130 		}
1131 		device->private = (void *) private;
1132 	} else {
1133 		memset(private, 0, sizeof(*private));
1134 	}
1135 	/* Invalidate status of initial analysis. */
1136 	private->init_cqr_status = -1;
1137 	/* Set default cache operations. */
1138 	private->attrib.operation = DASD_NORMAL_CACHE;
1139 	private->attrib.nr_cyl = 0;
1140 
1141 	/* Read Configuration Data */
1142 	rc = dasd_eckd_read_conf(device);
1143 	if (rc)
1144 		goto out_err1;
1145 
1146 	/* Generate device unique id */
1147 	rc = dasd_eckd_generate_uid(device);
1148 	if (rc)
1149 		goto out_err1;
1150 
1151 	dasd_eckd_get_uid(device, &temp_uid);
1152 	if (temp_uid.type == UA_BASE_DEVICE) {
1153 		block = dasd_alloc_block();
1154 		if (IS_ERR(block)) {
1155 			DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1156 					"could not allocate dasd "
1157 					"block structure");
1158 			rc = PTR_ERR(block);
1159 			goto out_err1;
1160 		}
1161 		device->block = block;
1162 		block->base = device;
1163 	}
1164 
1165 	/* register lcu with alias handling, enable PAV if this is a new lcu */
1166 	is_known = dasd_alias_make_device_known_to_lcu(device);
1167 	if (is_known < 0) {
1168 		rc = is_known;
1169 		goto out_err2;
1170 	}
1171 	/*
1172 	 * dasd_eckd_vaildate_server is done on the first device that
1173 	 * is found for an LCU. All later other devices have to wait
1174 	 * for it, so they will read the correct feature codes.
1175 	 */
1176 	if (!is_known) {
1177 		dasd_eckd_validate_server(device);
1178 		dasd_alias_lcu_setup_complete(device);
1179 	} else
1180 		dasd_alias_wait_for_lcu_setup(device);
1181 
1182 	/* device may report different configuration data after LCU setup */
1183 	rc = dasd_eckd_read_conf(device);
1184 	if (rc)
1185 		goto out_err3;
1186 
1187 	/* Read Feature Codes */
1188 	dasd_eckd_read_features(device);
1189 
1190 	/* Read Device Characteristics */
1191 	rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
1192 					 &private->rdc_data, 64);
1193 	if (rc) {
1194 		DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1195 				"Read device characteristic failed, rc=%d", rc);
1196 		goto out_err3;
1197 	}
1198 	/* find the vaild cylinder size */
1199 	if (private->rdc_data.no_cyl == LV_COMPAT_CYL &&
1200 	    private->rdc_data.long_no_cyl)
1201 		private->real_cyl = private->rdc_data.long_no_cyl;
1202 	else
1203 		private->real_cyl = private->rdc_data.no_cyl;
1204 
1205 	readonly = dasd_device_is_ro(device);
1206 	if (readonly)
1207 		set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
1208 
1209 	dev_info(&device->cdev->dev, "New DASD %04X/%02X (CU %04X/%02X) "
1210 		 "with %d cylinders, %d heads, %d sectors%s\n",
1211 		 private->rdc_data.dev_type,
1212 		 private->rdc_data.dev_model,
1213 		 private->rdc_data.cu_type,
1214 		 private->rdc_data.cu_model.model,
1215 		 private->real_cyl,
1216 		 private->rdc_data.trk_per_cyl,
1217 		 private->rdc_data.sec_per_trk,
1218 		 readonly ? ", read-only device" : "");
1219 	return 0;
1220 
1221 out_err3:
1222 	dasd_alias_disconnect_device_from_lcu(device);
1223 out_err2:
1224 	dasd_free_block(device->block);
1225 	device->block = NULL;
1226 out_err1:
1227 	kfree(private->conf_data);
1228 	kfree(device->private);
1229 	device->private = NULL;
1230 	return rc;
1231 }
1232 
1233 static void dasd_eckd_uncheck_device(struct dasd_device *device)
1234 {
1235 	struct dasd_eckd_private *private;
1236 
1237 	private = (struct dasd_eckd_private *) device->private;
1238 	dasd_alias_disconnect_device_from_lcu(device);
1239 	private->ned = NULL;
1240 	private->sneq = NULL;
1241 	private->vdsneq = NULL;
1242 	private->gneq = NULL;
1243 	private->conf_len = 0;
1244 	kfree(private->conf_data);
1245 	private->conf_data = NULL;
1246 }
1247 
1248 static struct dasd_ccw_req *
1249 dasd_eckd_analysis_ccw(struct dasd_device *device)
1250 {
1251 	struct dasd_eckd_private *private;
1252 	struct eckd_count *count_data;
1253 	struct LO_eckd_data *LO_data;
1254 	struct dasd_ccw_req *cqr;
1255 	struct ccw1 *ccw;
1256 	int cplength, datasize;
1257 	int i;
1258 
1259 	private = (struct dasd_eckd_private *) device->private;
1260 
1261 	cplength = 8;
1262 	datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data);
1263 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device);
1264 	if (IS_ERR(cqr))
1265 		return cqr;
1266 	ccw = cqr->cpaddr;
1267 	/* Define extent for the first 3 tracks. */
1268 	define_extent(ccw++, cqr->data, 0, 2,
1269 		      DASD_ECKD_CCW_READ_COUNT, device);
1270 	LO_data = cqr->data + sizeof(struct DE_eckd_data);
1271 	/* Locate record for the first 4 records on track 0. */
1272 	ccw[-1].flags |= CCW_FLAG_CC;
1273 	locate_record(ccw++, LO_data++, 0, 0, 4,
1274 		      DASD_ECKD_CCW_READ_COUNT, device, 0);
1275 
1276 	count_data = private->count_area;
1277 	for (i = 0; i < 4; i++) {
1278 		ccw[-1].flags |= CCW_FLAG_CC;
1279 		ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
1280 		ccw->flags = 0;
1281 		ccw->count = 8;
1282 		ccw->cda = (__u32)(addr_t) count_data;
1283 		ccw++;
1284 		count_data++;
1285 	}
1286 
1287 	/* Locate record for the first record on track 2. */
1288 	ccw[-1].flags |= CCW_FLAG_CC;
1289 	locate_record(ccw++, LO_data++, 2, 0, 1,
1290 		      DASD_ECKD_CCW_READ_COUNT, device, 0);
1291 	/* Read count ccw. */
1292 	ccw[-1].flags |= CCW_FLAG_CC;
1293 	ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
1294 	ccw->flags = 0;
1295 	ccw->count = 8;
1296 	ccw->cda = (__u32)(addr_t) count_data;
1297 
1298 	cqr->block = NULL;
1299 	cqr->startdev = device;
1300 	cqr->memdev = device;
1301 	cqr->retries = 255;
1302 	cqr->buildclk = get_clock();
1303 	cqr->status = DASD_CQR_FILLED;
1304 	return cqr;
1305 }
1306 
1307 /* differentiate between 'no record found' and any other error */
1308 static int dasd_eckd_analysis_evaluation(struct dasd_ccw_req *init_cqr)
1309 {
1310 	char *sense;
1311 	if (init_cqr->status == DASD_CQR_DONE)
1312 		return INIT_CQR_OK;
1313 	else if (init_cqr->status == DASD_CQR_NEED_ERP ||
1314 		 init_cqr->status == DASD_CQR_FAILED) {
1315 		sense = dasd_get_sense(&init_cqr->irb);
1316 		if (sense && (sense[1] & SNS1_NO_REC_FOUND))
1317 			return INIT_CQR_UNFORMATTED;
1318 		else
1319 			return INIT_CQR_ERROR;
1320 	} else
1321 		return INIT_CQR_ERROR;
1322 }
1323 
1324 /*
1325  * This is the callback function for the init_analysis cqr. It saves
1326  * the status of the initial analysis ccw before it frees it and kicks
1327  * the device to continue the startup sequence. This will call
1328  * dasd_eckd_do_analysis again (if the devices has not been marked
1329  * for deletion in the meantime).
1330  */
1331 static void dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr,
1332 					void *data)
1333 {
1334 	struct dasd_eckd_private *private;
1335 	struct dasd_device *device;
1336 
1337 	device = init_cqr->startdev;
1338 	private = (struct dasd_eckd_private *) device->private;
1339 	private->init_cqr_status = dasd_eckd_analysis_evaluation(init_cqr);
1340 	dasd_sfree_request(init_cqr, device);
1341 	dasd_kick_device(device);
1342 }
1343 
1344 static int dasd_eckd_start_analysis(struct dasd_block *block)
1345 {
1346 	struct dasd_eckd_private *private;
1347 	struct dasd_ccw_req *init_cqr;
1348 
1349 	private = (struct dasd_eckd_private *) block->base->private;
1350 	init_cqr = dasd_eckd_analysis_ccw(block->base);
1351 	if (IS_ERR(init_cqr))
1352 		return PTR_ERR(init_cqr);
1353 	init_cqr->callback = dasd_eckd_analysis_callback;
1354 	init_cqr->callback_data = NULL;
1355 	init_cqr->expires = 5*HZ;
1356 	/* first try without ERP, so we can later handle unformatted
1357 	 * devices as special case
1358 	 */
1359 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &init_cqr->flags);
1360 	init_cqr->retries = 0;
1361 	dasd_add_request_head(init_cqr);
1362 	return -EAGAIN;
1363 }
1364 
1365 static int dasd_eckd_end_analysis(struct dasd_block *block)
1366 {
1367 	struct dasd_device *device;
1368 	struct dasd_eckd_private *private;
1369 	struct eckd_count *count_area;
1370 	unsigned int sb, blk_per_trk;
1371 	int status, i;
1372 	struct dasd_ccw_req *init_cqr;
1373 
1374 	device = block->base;
1375 	private = (struct dasd_eckd_private *) device->private;
1376 	status = private->init_cqr_status;
1377 	private->init_cqr_status = -1;
1378 	if (status == INIT_CQR_ERROR) {
1379 		/* try again, this time with full ERP */
1380 		init_cqr = dasd_eckd_analysis_ccw(device);
1381 		dasd_sleep_on(init_cqr);
1382 		status = dasd_eckd_analysis_evaluation(init_cqr);
1383 		dasd_sfree_request(init_cqr, device);
1384 	}
1385 
1386 	if (status == INIT_CQR_UNFORMATTED) {
1387 		dev_warn(&device->cdev->dev, "The DASD is not formatted\n");
1388 		return -EMEDIUMTYPE;
1389 	} else if (status == INIT_CQR_ERROR) {
1390 		dev_err(&device->cdev->dev,
1391 			"Detecting the DASD disk layout failed because "
1392 			"of an I/O error\n");
1393 		return -EIO;
1394 	}
1395 
1396 	private->uses_cdl = 1;
1397 	/* Check Track 0 for Compatible Disk Layout */
1398 	count_area = NULL;
1399 	for (i = 0; i < 3; i++) {
1400 		if (private->count_area[i].kl != 4 ||
1401 		    private->count_area[i].dl != dasd_eckd_cdl_reclen(i) - 4) {
1402 			private->uses_cdl = 0;
1403 			break;
1404 		}
1405 	}
1406 	if (i == 3)
1407 		count_area = &private->count_area[4];
1408 
1409 	if (private->uses_cdl == 0) {
1410 		for (i = 0; i < 5; i++) {
1411 			if ((private->count_area[i].kl != 0) ||
1412 			    (private->count_area[i].dl !=
1413 			     private->count_area[0].dl))
1414 				break;
1415 		}
1416 		if (i == 5)
1417 			count_area = &private->count_area[0];
1418 	} else {
1419 		if (private->count_area[3].record == 1)
1420 			dev_warn(&device->cdev->dev,
1421 				 "Track 0 has no records following the VTOC\n");
1422 	}
1423 	if (count_area != NULL && count_area->kl == 0) {
1424 		/* we found notthing violating our disk layout */
1425 		if (dasd_check_blocksize(count_area->dl) == 0)
1426 			block->bp_block = count_area->dl;
1427 	}
1428 	if (block->bp_block == 0) {
1429 		dev_warn(&device->cdev->dev,
1430 			 "The disk layout of the DASD is not supported\n");
1431 		return -EMEDIUMTYPE;
1432 	}
1433 	block->s2b_shift = 0;	/* bits to shift 512 to get a block */
1434 	for (sb = 512; sb < block->bp_block; sb = sb << 1)
1435 		block->s2b_shift++;
1436 
1437 	blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block);
1438 	block->blocks = (private->real_cyl *
1439 			  private->rdc_data.trk_per_cyl *
1440 			  blk_per_trk);
1441 
1442 	dev_info(&device->cdev->dev,
1443 		 "DASD with %d KB/block, %d KB total size, %d KB/track, "
1444 		 "%s\n", (block->bp_block >> 10),
1445 		 ((private->real_cyl *
1446 		   private->rdc_data.trk_per_cyl *
1447 		   blk_per_trk * (block->bp_block >> 9)) >> 1),
1448 		 ((blk_per_trk * block->bp_block) >> 10),
1449 		 private->uses_cdl ?
1450 		 "compatible disk layout" : "linux disk layout");
1451 
1452 	return 0;
1453 }
1454 
1455 static int dasd_eckd_do_analysis(struct dasd_block *block)
1456 {
1457 	struct dasd_eckd_private *private;
1458 
1459 	private = (struct dasd_eckd_private *) block->base->private;
1460 	if (private->init_cqr_status < 0)
1461 		return dasd_eckd_start_analysis(block);
1462 	else
1463 		return dasd_eckd_end_analysis(block);
1464 }
1465 
1466 static int dasd_eckd_ready_to_online(struct dasd_device *device)
1467 {
1468 	return dasd_alias_add_device(device);
1469 };
1470 
1471 static int dasd_eckd_online_to_ready(struct dasd_device *device)
1472 {
1473 	cancel_work_sync(&device->reload_device);
1474 	return dasd_alias_remove_device(device);
1475 };
1476 
1477 static int
1478 dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo)
1479 {
1480 	struct dasd_eckd_private *private;
1481 
1482 	private = (struct dasd_eckd_private *) block->base->private;
1483 	if (dasd_check_blocksize(block->bp_block) == 0) {
1484 		geo->sectors = recs_per_track(&private->rdc_data,
1485 					      0, block->bp_block);
1486 	}
1487 	geo->cylinders = private->rdc_data.no_cyl;
1488 	geo->heads = private->rdc_data.trk_per_cyl;
1489 	return 0;
1490 }
1491 
1492 static struct dasd_ccw_req *
1493 dasd_eckd_format_device(struct dasd_device * device,
1494 			struct format_data_t * fdata)
1495 {
1496 	struct dasd_eckd_private *private;
1497 	struct dasd_ccw_req *fcp;
1498 	struct eckd_count *ect;
1499 	struct ccw1 *ccw;
1500 	void *data;
1501 	int rpt;
1502 	struct ch_t address;
1503 	int cplength, datasize;
1504 	int i;
1505 	int intensity = 0;
1506 	int r0_perm;
1507 
1508 	private = (struct dasd_eckd_private *) device->private;
1509 	rpt = recs_per_track(&private->rdc_data, 0, fdata->blksize);
1510 	set_ch_t(&address,
1511 		 fdata->start_unit / private->rdc_data.trk_per_cyl,
1512 		 fdata->start_unit % private->rdc_data.trk_per_cyl);
1513 
1514 	/* Sanity checks. */
1515 	if (fdata->start_unit >=
1516 	    (private->real_cyl * private->rdc_data.trk_per_cyl)) {
1517 		dev_warn(&device->cdev->dev, "Start track number %d used in "
1518 			 "formatting is too big\n", fdata->start_unit);
1519 		return ERR_PTR(-EINVAL);
1520 	}
1521 	if (fdata->start_unit > fdata->stop_unit) {
1522 		dev_warn(&device->cdev->dev, "Start track %d used in "
1523 			 "formatting exceeds end track\n", fdata->start_unit);
1524 		return ERR_PTR(-EINVAL);
1525 	}
1526 	if (dasd_check_blocksize(fdata->blksize) != 0) {
1527 		dev_warn(&device->cdev->dev,
1528 			 "The DASD cannot be formatted with block size %d\n",
1529 			 fdata->blksize);
1530 		return ERR_PTR(-EINVAL);
1531 	}
1532 
1533 	/*
1534 	 * fdata->intensity is a bit string that tells us what to do:
1535 	 *   Bit 0: write record zero
1536 	 *   Bit 1: write home address, currently not supported
1537 	 *   Bit 2: invalidate tracks
1538 	 *   Bit 3: use OS/390 compatible disk layout (cdl)
1539 	 *   Bit 4: do not allow storage subsystem to modify record zero
1540 	 * Only some bit combinations do make sense.
1541 	 */
1542 	if (fdata->intensity & 0x10) {
1543 		r0_perm = 0;
1544 		intensity = fdata->intensity & ~0x10;
1545 	} else {
1546 		r0_perm = 1;
1547 		intensity = fdata->intensity;
1548 	}
1549 	switch (intensity) {
1550 	case 0x00:	/* Normal format */
1551 	case 0x08:	/* Normal format, use cdl. */
1552 		cplength = 2 + rpt;
1553 		datasize = sizeof(struct DE_eckd_data) +
1554 			sizeof(struct LO_eckd_data) +
1555 			rpt * sizeof(struct eckd_count);
1556 		break;
1557 	case 0x01:	/* Write record zero and format track. */
1558 	case 0x09:	/* Write record zero and format track, use cdl. */
1559 		cplength = 3 + rpt;
1560 		datasize = sizeof(struct DE_eckd_data) +
1561 			sizeof(struct LO_eckd_data) +
1562 			sizeof(struct eckd_count) +
1563 			rpt * sizeof(struct eckd_count);
1564 		break;
1565 	case 0x04:	/* Invalidate track. */
1566 	case 0x0c:	/* Invalidate track, use cdl. */
1567 		cplength = 3;
1568 		datasize = sizeof(struct DE_eckd_data) +
1569 			sizeof(struct LO_eckd_data) +
1570 			sizeof(struct eckd_count);
1571 		break;
1572 	default:
1573 		dev_warn(&device->cdev->dev, "An I/O control call used "
1574 			 "incorrect flags 0x%x\n", fdata->intensity);
1575 		return ERR_PTR(-EINVAL);
1576 	}
1577 	/* Allocate the format ccw request. */
1578 	fcp = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device);
1579 	if (IS_ERR(fcp))
1580 		return fcp;
1581 
1582 	data = fcp->data;
1583 	ccw = fcp->cpaddr;
1584 
1585 	switch (intensity & ~0x08) {
1586 	case 0x00: /* Normal format. */
1587 		define_extent(ccw++, (struct DE_eckd_data *) data,
1588 			      fdata->start_unit, fdata->start_unit,
1589 			      DASD_ECKD_CCW_WRITE_CKD, device);
1590 		/* grant subsystem permission to format R0 */
1591 		if (r0_perm)
1592 			((struct DE_eckd_data *)data)->ga_extended |= 0x04;
1593 		data += sizeof(struct DE_eckd_data);
1594 		ccw[-1].flags |= CCW_FLAG_CC;
1595 		locate_record(ccw++, (struct LO_eckd_data *) data,
1596 			      fdata->start_unit, 0, rpt,
1597 			      DASD_ECKD_CCW_WRITE_CKD, device,
1598 			      fdata->blksize);
1599 		data += sizeof(struct LO_eckd_data);
1600 		break;
1601 	case 0x01: /* Write record zero + format track. */
1602 		define_extent(ccw++, (struct DE_eckd_data *) data,
1603 			      fdata->start_unit, fdata->start_unit,
1604 			      DASD_ECKD_CCW_WRITE_RECORD_ZERO,
1605 			      device);
1606 		data += sizeof(struct DE_eckd_data);
1607 		ccw[-1].flags |= CCW_FLAG_CC;
1608 		locate_record(ccw++, (struct LO_eckd_data *) data,
1609 			      fdata->start_unit, 0, rpt + 1,
1610 			      DASD_ECKD_CCW_WRITE_RECORD_ZERO, device,
1611 			      device->block->bp_block);
1612 		data += sizeof(struct LO_eckd_data);
1613 		break;
1614 	case 0x04: /* Invalidate track. */
1615 		define_extent(ccw++, (struct DE_eckd_data *) data,
1616 			      fdata->start_unit, fdata->start_unit,
1617 			      DASD_ECKD_CCW_WRITE_CKD, device);
1618 		data += sizeof(struct DE_eckd_data);
1619 		ccw[-1].flags |= CCW_FLAG_CC;
1620 		locate_record(ccw++, (struct LO_eckd_data *) data,
1621 			      fdata->start_unit, 0, 1,
1622 			      DASD_ECKD_CCW_WRITE_CKD, device, 8);
1623 		data += sizeof(struct LO_eckd_data);
1624 		break;
1625 	}
1626 	if (intensity & 0x01) {	/* write record zero */
1627 		ect = (struct eckd_count *) data;
1628 		data += sizeof(struct eckd_count);
1629 		ect->cyl = address.cyl;
1630 		ect->head = address.head;
1631 		ect->record = 0;
1632 		ect->kl = 0;
1633 		ect->dl = 8;
1634 		ccw[-1].flags |= CCW_FLAG_CC;
1635 		ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO;
1636 		ccw->flags = CCW_FLAG_SLI;
1637 		ccw->count = 8;
1638 		ccw->cda = (__u32)(addr_t) ect;
1639 		ccw++;
1640 	}
1641 	if ((intensity & ~0x08) & 0x04) {	/* erase track */
1642 		ect = (struct eckd_count *) data;
1643 		data += sizeof(struct eckd_count);
1644 		ect->cyl = address.cyl;
1645 		ect->head = address.head;
1646 		ect->record = 1;
1647 		ect->kl = 0;
1648 		ect->dl = 0;
1649 		ccw[-1].flags |= CCW_FLAG_CC;
1650 		ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
1651 		ccw->flags = CCW_FLAG_SLI;
1652 		ccw->count = 8;
1653 		ccw->cda = (__u32)(addr_t) ect;
1654 	} else {		/* write remaining records */
1655 		for (i = 0; i < rpt; i++) {
1656 			ect = (struct eckd_count *) data;
1657 			data += sizeof(struct eckd_count);
1658 			ect->cyl = address.cyl;
1659 			ect->head = address.head;
1660 			ect->record = i + 1;
1661 			ect->kl = 0;
1662 			ect->dl = fdata->blksize;
1663 			/* Check for special tracks 0-1 when formatting CDL */
1664 			if ((intensity & 0x08) &&
1665 			    fdata->start_unit == 0) {
1666 				if (i < 3) {
1667 					ect->kl = 4;
1668 					ect->dl = sizes_trk0[i] - 4;
1669 				}
1670 			}
1671 			if ((intensity & 0x08) &&
1672 			    fdata->start_unit == 1) {
1673 				ect->kl = 44;
1674 				ect->dl = LABEL_SIZE - 44;
1675 			}
1676 			ccw[-1].flags |= CCW_FLAG_CC;
1677 			ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
1678 			ccw->flags = CCW_FLAG_SLI;
1679 			ccw->count = 8;
1680 			ccw->cda = (__u32)(addr_t) ect;
1681 			ccw++;
1682 		}
1683 	}
1684 	fcp->startdev = device;
1685 	fcp->memdev = device;
1686 	fcp->retries = 256;
1687 	fcp->buildclk = get_clock();
1688 	fcp->status = DASD_CQR_FILLED;
1689 	return fcp;
1690 }
1691 
1692 static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr)
1693 {
1694 	cqr->status = DASD_CQR_FILLED;
1695 	if (cqr->block && (cqr->startdev != cqr->block->base)) {
1696 		dasd_eckd_reset_ccw_to_base_io(cqr);
1697 		cqr->startdev = cqr->block->base;
1698 	}
1699 };
1700 
1701 static dasd_erp_fn_t
1702 dasd_eckd_erp_action(struct dasd_ccw_req * cqr)
1703 {
1704 	struct dasd_device *device = (struct dasd_device *) cqr->startdev;
1705 	struct ccw_device *cdev = device->cdev;
1706 
1707 	switch (cdev->id.cu_type) {
1708 	case 0x3990:
1709 	case 0x2105:
1710 	case 0x2107:
1711 	case 0x1750:
1712 		return dasd_3990_erp_action;
1713 	case 0x9343:
1714 	case 0x3880:
1715 	default:
1716 		return dasd_default_erp_action;
1717 	}
1718 }
1719 
1720 static dasd_erp_fn_t
1721 dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr)
1722 {
1723 	return dasd_default_erp_postaction;
1724 }
1725 
1726 
1727 static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device,
1728 						   struct irb *irb)
1729 {
1730 	char mask;
1731 	char *sense = NULL;
1732 	struct dasd_eckd_private *private;
1733 
1734 	private = (struct dasd_eckd_private *) device->private;
1735 	/* first of all check for state change pending interrupt */
1736 	mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
1737 	if ((scsw_dstat(&irb->scsw) & mask) == mask) {
1738 		/* for alias only and not in offline processing*/
1739 		if (!device->block && private->lcu &&
1740 		    !test_bit(DASD_FLAG_OFFLINE, &device->flags)) {
1741 			/*
1742 			 * the state change could be caused by an alias
1743 			 * reassignment remove device from alias handling
1744 			 * to prevent new requests from being scheduled on
1745 			 * the wrong alias device
1746 			 */
1747 			dasd_alias_remove_device(device);
1748 
1749 			/* schedule worker to reload device */
1750 			dasd_reload_device(device);
1751 		}
1752 
1753 		dasd_generic_handle_state_change(device);
1754 		return;
1755 	}
1756 
1757 	/* summary unit check */
1758 	if ((scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) &&
1759 	    (irb->ecw[7] == 0x0D)) {
1760 		dasd_alias_handle_summary_unit_check(device, irb);
1761 		return;
1762 	}
1763 
1764 	sense = dasd_get_sense(irb);
1765 	/* service information message SIM */
1766 	if (sense && !(sense[27] & DASD_SENSE_BIT_0) &&
1767 	    ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) {
1768 		dasd_3990_erp_handle_sim(device, sense);
1769 		dasd_schedule_device_bh(device);
1770 		return;
1771 	}
1772 
1773 	if ((scsw_cc(&irb->scsw) == 1) &&
1774 	    (scsw_fctl(&irb->scsw) & SCSW_FCTL_START_FUNC) &&
1775 	    (scsw_actl(&irb->scsw) & SCSW_ACTL_START_PEND) &&
1776 	    (scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND)) {
1777 		/* fake irb do nothing, they are handled elsewhere */
1778 		dasd_schedule_device_bh(device);
1779 		return;
1780 	}
1781 
1782 	if (!sense) {
1783 		/* just report other unsolicited interrupts */
1784 		DBF_DEV_EVENT(DBF_ERR, device, "%s",
1785 			    "unsolicited interrupt received");
1786 	} else {
1787 		DBF_DEV_EVENT(DBF_ERR, device, "%s",
1788 			    "unsolicited interrupt received "
1789 			    "(sense available)");
1790 		device->discipline->dump_sense_dbf(device, irb, "unsolicited");
1791 	}
1792 
1793 	dasd_schedule_device_bh(device);
1794 	return;
1795 };
1796 
1797 
1798 static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
1799 					       struct dasd_device *startdev,
1800 					       struct dasd_block *block,
1801 					       struct request *req,
1802 					       sector_t first_rec,
1803 					       sector_t last_rec,
1804 					       sector_t first_trk,
1805 					       sector_t last_trk,
1806 					       unsigned int first_offs,
1807 					       unsigned int last_offs,
1808 					       unsigned int blk_per_trk,
1809 					       unsigned int blksize)
1810 {
1811 	struct dasd_eckd_private *private;
1812 	unsigned long *idaws;
1813 	struct LO_eckd_data *LO_data;
1814 	struct dasd_ccw_req *cqr;
1815 	struct ccw1 *ccw;
1816 	struct req_iterator iter;
1817 	struct bio_vec *bv;
1818 	char *dst;
1819 	unsigned int off;
1820 	int count, cidaw, cplength, datasize;
1821 	sector_t recid;
1822 	unsigned char cmd, rcmd;
1823 	int use_prefix;
1824 	struct dasd_device *basedev;
1825 
1826 	basedev = block->base;
1827 	private = (struct dasd_eckd_private *) basedev->private;
1828 	if (rq_data_dir(req) == READ)
1829 		cmd = DASD_ECKD_CCW_READ_MT;
1830 	else if (rq_data_dir(req) == WRITE)
1831 		cmd = DASD_ECKD_CCW_WRITE_MT;
1832 	else
1833 		return ERR_PTR(-EINVAL);
1834 
1835 	/* Check struct bio and count the number of blocks for the request. */
1836 	count = 0;
1837 	cidaw = 0;
1838 	rq_for_each_segment(bv, req, iter) {
1839 		if (bv->bv_len & (blksize - 1))
1840 			/* Eckd can only do full blocks. */
1841 			return ERR_PTR(-EINVAL);
1842 		count += bv->bv_len >> (block->s2b_shift + 9);
1843 #if defined(CONFIG_64BIT)
1844 		if (idal_is_needed (page_address(bv->bv_page), bv->bv_len))
1845 			cidaw += bv->bv_len >> (block->s2b_shift + 9);
1846 #endif
1847 	}
1848 	/* Paranoia. */
1849 	if (count != last_rec - first_rec + 1)
1850 		return ERR_PTR(-EINVAL);
1851 
1852 	/* use the prefix command if available */
1853 	use_prefix = private->features.feature[8] & 0x01;
1854 	if (use_prefix) {
1855 		/* 1x prefix + number of blocks */
1856 		cplength = 2 + count;
1857 		/* 1x prefix + cidaws*sizeof(long) */
1858 		datasize = sizeof(struct PFX_eckd_data) +
1859 			sizeof(struct LO_eckd_data) +
1860 			cidaw * sizeof(unsigned long);
1861 	} else {
1862 		/* 1x define extent + 1x locate record + number of blocks */
1863 		cplength = 2 + count;
1864 		/* 1x define extent + 1x locate record + cidaws*sizeof(long) */
1865 		datasize = sizeof(struct DE_eckd_data) +
1866 			sizeof(struct LO_eckd_data) +
1867 			cidaw * sizeof(unsigned long);
1868 	}
1869 	/* Find out the number of additional locate record ccws for cdl. */
1870 	if (private->uses_cdl && first_rec < 2*blk_per_trk) {
1871 		if (last_rec >= 2*blk_per_trk)
1872 			count = 2*blk_per_trk - first_rec;
1873 		cplength += count;
1874 		datasize += count*sizeof(struct LO_eckd_data);
1875 	}
1876 	/* Allocate the ccw request. */
1877 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
1878 				   startdev);
1879 	if (IS_ERR(cqr))
1880 		return cqr;
1881 	ccw = cqr->cpaddr;
1882 	/* First ccw is define extent or prefix. */
1883 	if (use_prefix) {
1884 		if (prefix(ccw++, cqr->data, first_trk,
1885 			   last_trk, cmd, basedev, startdev) == -EAGAIN) {
1886 			/* Clock not in sync and XRC is enabled.
1887 			 * Try again later.
1888 			 */
1889 			dasd_sfree_request(cqr, startdev);
1890 			return ERR_PTR(-EAGAIN);
1891 		}
1892 		idaws = (unsigned long *) (cqr->data +
1893 					   sizeof(struct PFX_eckd_data));
1894 	} else {
1895 		if (define_extent(ccw++, cqr->data, first_trk,
1896 				  last_trk, cmd, startdev) == -EAGAIN) {
1897 			/* Clock not in sync and XRC is enabled.
1898 			 * Try again later.
1899 			 */
1900 			dasd_sfree_request(cqr, startdev);
1901 			return ERR_PTR(-EAGAIN);
1902 		}
1903 		idaws = (unsigned long *) (cqr->data +
1904 					   sizeof(struct DE_eckd_data));
1905 	}
1906 	/* Build locate_record+read/write/ccws. */
1907 	LO_data = (struct LO_eckd_data *) (idaws + cidaw);
1908 	recid = first_rec;
1909 	if (private->uses_cdl == 0 || recid > 2*blk_per_trk) {
1910 		/* Only standard blocks so there is just one locate record. */
1911 		ccw[-1].flags |= CCW_FLAG_CC;
1912 		locate_record(ccw++, LO_data++, first_trk, first_offs + 1,
1913 			      last_rec - recid + 1, cmd, basedev, blksize);
1914 	}
1915 	rq_for_each_segment(bv, req, iter) {
1916 		dst = page_address(bv->bv_page) + bv->bv_offset;
1917 		if (dasd_page_cache) {
1918 			char *copy = kmem_cache_alloc(dasd_page_cache,
1919 						      GFP_DMA | __GFP_NOWARN);
1920 			if (copy && rq_data_dir(req) == WRITE)
1921 				memcpy(copy + bv->bv_offset, dst, bv->bv_len);
1922 			if (copy)
1923 				dst = copy + bv->bv_offset;
1924 		}
1925 		for (off = 0; off < bv->bv_len; off += blksize) {
1926 			sector_t trkid = recid;
1927 			unsigned int recoffs = sector_div(trkid, blk_per_trk);
1928 			rcmd = cmd;
1929 			count = blksize;
1930 			/* Locate record for cdl special block ? */
1931 			if (private->uses_cdl && recid < 2*blk_per_trk) {
1932 				if (dasd_eckd_cdl_special(blk_per_trk, recid)){
1933 					rcmd |= 0x8;
1934 					count = dasd_eckd_cdl_reclen(recid);
1935 					if (count < blksize &&
1936 					    rq_data_dir(req) == READ)
1937 						memset(dst + count, 0xe5,
1938 						       blksize - count);
1939 				}
1940 				ccw[-1].flags |= CCW_FLAG_CC;
1941 				locate_record(ccw++, LO_data++,
1942 					      trkid, recoffs + 1,
1943 					      1, rcmd, basedev, count);
1944 			}
1945 			/* Locate record for standard blocks ? */
1946 			if (private->uses_cdl && recid == 2*blk_per_trk) {
1947 				ccw[-1].flags |= CCW_FLAG_CC;
1948 				locate_record(ccw++, LO_data++,
1949 					      trkid, recoffs + 1,
1950 					      last_rec - recid + 1,
1951 					      cmd, basedev, count);
1952 			}
1953 			/* Read/write ccw. */
1954 			ccw[-1].flags |= CCW_FLAG_CC;
1955 			ccw->cmd_code = rcmd;
1956 			ccw->count = count;
1957 			if (idal_is_needed(dst, blksize)) {
1958 				ccw->cda = (__u32)(addr_t) idaws;
1959 				ccw->flags = CCW_FLAG_IDA;
1960 				idaws = idal_create_words(idaws, dst, blksize);
1961 			} else {
1962 				ccw->cda = (__u32)(addr_t) dst;
1963 				ccw->flags = 0;
1964 			}
1965 			ccw++;
1966 			dst += blksize;
1967 			recid++;
1968 		}
1969 	}
1970 	if (blk_noretry_request(req) ||
1971 	    block->base->features & DASD_FEATURE_FAILFAST)
1972 		set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
1973 	cqr->startdev = startdev;
1974 	cqr->memdev = startdev;
1975 	cqr->block = block;
1976 	cqr->expires = 5 * 60 * HZ;	/* 5 minutes */
1977 	cqr->lpm = private->path_data.ppm;
1978 	cqr->retries = 256;
1979 	cqr->buildclk = get_clock();
1980 	cqr->status = DASD_CQR_FILLED;
1981 	return cqr;
1982 }
1983 
1984 static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
1985 					       struct dasd_device *startdev,
1986 					       struct dasd_block *block,
1987 					       struct request *req,
1988 					       sector_t first_rec,
1989 					       sector_t last_rec,
1990 					       sector_t first_trk,
1991 					       sector_t last_trk,
1992 					       unsigned int first_offs,
1993 					       unsigned int last_offs,
1994 					       unsigned int blk_per_trk,
1995 					       unsigned int blksize)
1996 {
1997 	struct dasd_eckd_private *private;
1998 	unsigned long *idaws;
1999 	struct dasd_ccw_req *cqr;
2000 	struct ccw1 *ccw;
2001 	struct req_iterator iter;
2002 	struct bio_vec *bv;
2003 	char *dst, *idaw_dst;
2004 	unsigned int cidaw, cplength, datasize;
2005 	unsigned int tlf;
2006 	sector_t recid;
2007 	unsigned char cmd;
2008 	struct dasd_device *basedev;
2009 	unsigned int trkcount, count, count_to_trk_end;
2010 	unsigned int idaw_len, seg_len, part_len, len_to_track_end;
2011 	unsigned char new_track, end_idaw;
2012 	sector_t trkid;
2013 	unsigned int recoffs;
2014 
2015 	basedev = block->base;
2016 	private = (struct dasd_eckd_private *) basedev->private;
2017 	if (rq_data_dir(req) == READ)
2018 		cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
2019 	else if (rq_data_dir(req) == WRITE)
2020 		cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
2021 	else
2022 		return ERR_PTR(-EINVAL);
2023 
2024 	/* Track based I/O needs IDAWs for each page, and not just for
2025 	 * 64 bit addresses. We need additional idals for pages
2026 	 * that get filled from two tracks, so we use the number
2027 	 * of records as upper limit.
2028 	 */
2029 	cidaw = last_rec - first_rec + 1;
2030 	trkcount = last_trk - first_trk + 1;
2031 
2032 	/* 1x prefix + one read/write ccw per track */
2033 	cplength = 1 + trkcount;
2034 
2035 	/* on 31-bit we need space for two 32 bit addresses per page
2036 	 * on 64-bit one 64 bit address
2037 	 */
2038 	datasize = sizeof(struct PFX_eckd_data) +
2039 		cidaw * sizeof(unsigned long long);
2040 
2041 	/* Allocate the ccw request. */
2042 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
2043 				   startdev);
2044 	if (IS_ERR(cqr))
2045 		return cqr;
2046 	ccw = cqr->cpaddr;
2047 	/* transfer length factor: how many bytes to read from the last track */
2048 	if (first_trk == last_trk)
2049 		tlf = last_offs - first_offs + 1;
2050 	else
2051 		tlf = last_offs + 1;
2052 	tlf *= blksize;
2053 
2054 	if (prefix_LRE(ccw++, cqr->data, first_trk,
2055 		       last_trk, cmd, basedev, startdev,
2056 		       1 /* format */, first_offs + 1,
2057 		       trkcount, blksize,
2058 		       tlf) == -EAGAIN) {
2059 		/* Clock not in sync and XRC is enabled.
2060 		 * Try again later.
2061 		 */
2062 		dasd_sfree_request(cqr, startdev);
2063 		return ERR_PTR(-EAGAIN);
2064 	}
2065 
2066 	/*
2067 	 * The translation of request into ccw programs must meet the
2068 	 * following conditions:
2069 	 * - all idaws but the first and the last must address full pages
2070 	 *   (or 2K blocks on 31-bit)
2071 	 * - the scope of a ccw and it's idal ends with the track boundaries
2072 	 */
2073 	idaws = (unsigned long *) (cqr->data + sizeof(struct PFX_eckd_data));
2074 	recid = first_rec;
2075 	new_track = 1;
2076 	end_idaw = 0;
2077 	len_to_track_end = 0;
2078 	idaw_dst = 0;
2079 	idaw_len = 0;
2080 	rq_for_each_segment(bv, req, iter) {
2081 		dst = page_address(bv->bv_page) + bv->bv_offset;
2082 		seg_len = bv->bv_len;
2083 		while (seg_len) {
2084 			if (new_track) {
2085 				trkid = recid;
2086 				recoffs = sector_div(trkid, blk_per_trk);
2087 				count_to_trk_end = blk_per_trk - recoffs;
2088 				count = min((last_rec - recid + 1),
2089 					    (sector_t)count_to_trk_end);
2090 				len_to_track_end = count * blksize;
2091 				ccw[-1].flags |= CCW_FLAG_CC;
2092 				ccw->cmd_code = cmd;
2093 				ccw->count = len_to_track_end;
2094 				ccw->cda = (__u32)(addr_t)idaws;
2095 				ccw->flags = CCW_FLAG_IDA;
2096 				ccw++;
2097 				recid += count;
2098 				new_track = 0;
2099 				/* first idaw for a ccw may start anywhere */
2100 				if (!idaw_dst)
2101 					idaw_dst = dst;
2102 			}
2103 			/* If we start a new idaw, we must make sure that it
2104 			 * starts on an IDA_BLOCK_SIZE boundary.
2105 			 * If we continue an idaw, we must make sure that the
2106 			 * current segment begins where the so far accumulated
2107 			 * idaw ends
2108 			 */
2109 			if (!idaw_dst) {
2110 				if (__pa(dst) & (IDA_BLOCK_SIZE-1)) {
2111 					dasd_sfree_request(cqr, startdev);
2112 					return ERR_PTR(-ERANGE);
2113 				} else
2114 					idaw_dst = dst;
2115 			}
2116 			if ((idaw_dst + idaw_len) != dst) {
2117 				dasd_sfree_request(cqr, startdev);
2118 				return ERR_PTR(-ERANGE);
2119 			}
2120 			part_len = min(seg_len, len_to_track_end);
2121 			seg_len -= part_len;
2122 			dst += part_len;
2123 			idaw_len += part_len;
2124 			len_to_track_end -= part_len;
2125 			/* collected memory area ends on an IDA_BLOCK border,
2126 			 * -> create an idaw
2127 			 * idal_create_words will handle cases where idaw_len
2128 			 * is larger then IDA_BLOCK_SIZE
2129 			 */
2130 			if (!(__pa(idaw_dst + idaw_len) & (IDA_BLOCK_SIZE-1)))
2131 				end_idaw = 1;
2132 			/* We also need to end the idaw at track end */
2133 			if (!len_to_track_end) {
2134 				new_track = 1;
2135 				end_idaw = 1;
2136 			}
2137 			if (end_idaw) {
2138 				idaws = idal_create_words(idaws, idaw_dst,
2139 							  idaw_len);
2140 				idaw_dst = 0;
2141 				idaw_len = 0;
2142 				end_idaw = 0;
2143 			}
2144 		}
2145 	}
2146 
2147 	if (blk_noretry_request(req) ||
2148 	    block->base->features & DASD_FEATURE_FAILFAST)
2149 		set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
2150 	cqr->startdev = startdev;
2151 	cqr->memdev = startdev;
2152 	cqr->block = block;
2153 	cqr->expires = 5 * 60 * HZ;	/* 5 minutes */
2154 	cqr->lpm = private->path_data.ppm;
2155 	cqr->retries = 256;
2156 	cqr->buildclk = get_clock();
2157 	cqr->status = DASD_CQR_FILLED;
2158 	return cqr;
2159 }
2160 
2161 static int prepare_itcw(struct itcw *itcw,
2162 			unsigned int trk, unsigned int totrk, int cmd,
2163 			struct dasd_device *basedev,
2164 			struct dasd_device *startdev,
2165 			unsigned int rec_on_trk, int count,
2166 			unsigned int blksize,
2167 			unsigned int total_data_size,
2168 			unsigned int tlf,
2169 			unsigned int blk_per_trk)
2170 {
2171 	struct PFX_eckd_data pfxdata;
2172 	struct dasd_eckd_private *basepriv, *startpriv;
2173 	struct DE_eckd_data *dedata;
2174 	struct LRE_eckd_data *lredata;
2175 	struct dcw *dcw;
2176 
2177 	u32 begcyl, endcyl;
2178 	u16 heads, beghead, endhead;
2179 	u8 pfx_cmd;
2180 
2181 	int rc = 0;
2182 	int sector = 0;
2183 	int dn, d;
2184 
2185 
2186 	/* setup prefix data */
2187 	basepriv = (struct dasd_eckd_private *) basedev->private;
2188 	startpriv = (struct dasd_eckd_private *) startdev->private;
2189 	dedata = &pfxdata.define_extent;
2190 	lredata = &pfxdata.locate_record;
2191 
2192 	memset(&pfxdata, 0, sizeof(pfxdata));
2193 	pfxdata.format = 1; /* PFX with LRE */
2194 	pfxdata.base_address = basepriv->ned->unit_addr;
2195 	pfxdata.base_lss = basepriv->ned->ID;
2196 	pfxdata.validity.define_extent = 1;
2197 
2198 	/* private uid is kept up to date, conf_data may be outdated */
2199 	if (startpriv->uid.type != UA_BASE_DEVICE) {
2200 		pfxdata.validity.verify_base = 1;
2201 		if (startpriv->uid.type == UA_HYPER_PAV_ALIAS)
2202 			pfxdata.validity.hyper_pav = 1;
2203 	}
2204 
2205 	switch (cmd) {
2206 	case DASD_ECKD_CCW_READ_TRACK_DATA:
2207 		dedata->mask.perm = 0x1;
2208 		dedata->attributes.operation = basepriv->attrib.operation;
2209 		dedata->blk_size = blksize;
2210 		dedata->ga_extended |= 0x42;
2211 		lredata->operation.orientation = 0x0;
2212 		lredata->operation.operation = 0x0C;
2213 		lredata->auxiliary.check_bytes = 0x01;
2214 		pfx_cmd = DASD_ECKD_CCW_PFX_READ;
2215 		break;
2216 	case DASD_ECKD_CCW_WRITE_TRACK_DATA:
2217 		dedata->mask.perm = 0x02;
2218 		dedata->attributes.operation = basepriv->attrib.operation;
2219 		dedata->blk_size = blksize;
2220 		rc = check_XRC_on_prefix(&pfxdata, basedev);
2221 		dedata->ga_extended |= 0x42;
2222 		lredata->operation.orientation = 0x0;
2223 		lredata->operation.operation = 0x3F;
2224 		lredata->extended_operation = 0x23;
2225 		lredata->auxiliary.check_bytes = 0x2;
2226 		pfx_cmd = DASD_ECKD_CCW_PFX;
2227 		break;
2228 	default:
2229 		DBF_DEV_EVENT(DBF_ERR, basedev,
2230 			      "prepare itcw, unknown opcode 0x%x", cmd);
2231 		BUG();
2232 		break;
2233 	}
2234 	if (rc)
2235 		return rc;
2236 
2237 	dedata->attributes.mode = 0x3;	/* ECKD */
2238 
2239 	heads = basepriv->rdc_data.trk_per_cyl;
2240 	begcyl = trk / heads;
2241 	beghead = trk % heads;
2242 	endcyl = totrk / heads;
2243 	endhead = totrk % heads;
2244 
2245 	/* check for sequential prestage - enhance cylinder range */
2246 	if (dedata->attributes.operation == DASD_SEQ_PRESTAGE ||
2247 	    dedata->attributes.operation == DASD_SEQ_ACCESS) {
2248 
2249 		if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl)
2250 			endcyl += basepriv->attrib.nr_cyl;
2251 		else
2252 			endcyl = (basepriv->real_cyl - 1);
2253 	}
2254 
2255 	set_ch_t(&dedata->beg_ext, begcyl, beghead);
2256 	set_ch_t(&dedata->end_ext, endcyl, endhead);
2257 
2258 	dedata->ep_format = 0x20; /* records per track is valid */
2259 	dedata->ep_rec_per_track = blk_per_trk;
2260 
2261 	if (rec_on_trk) {
2262 		switch (basepriv->rdc_data.dev_type) {
2263 		case 0x3390:
2264 			dn = ceil_quot(blksize + 6, 232);
2265 			d = 9 + ceil_quot(blksize + 6 * (dn + 1), 34);
2266 			sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
2267 			break;
2268 		case 0x3380:
2269 			d = 7 + ceil_quot(blksize + 12, 32);
2270 			sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
2271 			break;
2272 		}
2273 	}
2274 
2275 	lredata->auxiliary.length_valid = 1;
2276 	lredata->auxiliary.length_scope = 1;
2277 	lredata->auxiliary.imbedded_ccw_valid = 1;
2278 	lredata->length = tlf;
2279 	lredata->imbedded_ccw = cmd;
2280 	lredata->count = count;
2281 	lredata->sector = sector;
2282 	set_ch_t(&lredata->seek_addr, begcyl, beghead);
2283 	lredata->search_arg.cyl = lredata->seek_addr.cyl;
2284 	lredata->search_arg.head = lredata->seek_addr.head;
2285 	lredata->search_arg.record = rec_on_trk;
2286 
2287 	dcw = itcw_add_dcw(itcw, pfx_cmd, 0,
2288 		     &pfxdata, sizeof(pfxdata), total_data_size);
2289 
2290 	return rc;
2291 }
2292 
2293 static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
2294 					       struct dasd_device *startdev,
2295 					       struct dasd_block *block,
2296 					       struct request *req,
2297 					       sector_t first_rec,
2298 					       sector_t last_rec,
2299 					       sector_t first_trk,
2300 					       sector_t last_trk,
2301 					       unsigned int first_offs,
2302 					       unsigned int last_offs,
2303 					       unsigned int blk_per_trk,
2304 					       unsigned int blksize)
2305 {
2306 	struct dasd_eckd_private *private;
2307 	struct dasd_ccw_req *cqr;
2308 	struct req_iterator iter;
2309 	struct bio_vec *bv;
2310 	char *dst;
2311 	unsigned int trkcount, ctidaw;
2312 	unsigned char cmd;
2313 	struct dasd_device *basedev;
2314 	unsigned int tlf;
2315 	struct itcw *itcw;
2316 	struct tidaw *last_tidaw = NULL;
2317 	int itcw_op;
2318 	size_t itcw_size;
2319 
2320 	basedev = block->base;
2321 	private = (struct dasd_eckd_private *) basedev->private;
2322 	if (rq_data_dir(req) == READ) {
2323 		cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
2324 		itcw_op = ITCW_OP_READ;
2325 	} else if (rq_data_dir(req) == WRITE) {
2326 		cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
2327 		itcw_op = ITCW_OP_WRITE;
2328 	} else
2329 		return ERR_PTR(-EINVAL);
2330 
2331 	/* trackbased I/O needs address all memory via TIDAWs,
2332 	 * not just for 64 bit addresses. This allows us to map
2333 	 * each segment directly to one tidaw.
2334 	 */
2335 	trkcount = last_trk - first_trk + 1;
2336 	ctidaw = 0;
2337 	rq_for_each_segment(bv, req, iter) {
2338 		++ctidaw;
2339 	}
2340 
2341 	/* Allocate the ccw request. */
2342 	itcw_size = itcw_calc_size(0, ctidaw, 0);
2343 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev);
2344 	if (IS_ERR(cqr))
2345 		return cqr;
2346 
2347 	cqr->cpmode = 1;
2348 	cqr->startdev = startdev;
2349 	cqr->memdev = startdev;
2350 	cqr->block = block;
2351 	cqr->expires = 100*HZ;
2352 	cqr->buildclk = get_clock();
2353 	cqr->status = DASD_CQR_FILLED;
2354 	cqr->retries = 10;
2355 
2356 	/* transfer length factor: how many bytes to read from the last track */
2357 	if (first_trk == last_trk)
2358 		tlf = last_offs - first_offs + 1;
2359 	else
2360 		tlf = last_offs + 1;
2361 	tlf *= blksize;
2362 
2363 	itcw = itcw_init(cqr->data, itcw_size, itcw_op, 0, ctidaw, 0);
2364 	cqr->cpaddr = itcw_get_tcw(itcw);
2365 
2366 	if (prepare_itcw(itcw, first_trk, last_trk,
2367 			 cmd, basedev, startdev,
2368 			 first_offs + 1,
2369 			 trkcount, blksize,
2370 			 (last_rec - first_rec + 1) * blksize,
2371 			 tlf, blk_per_trk) == -EAGAIN) {
2372 		/* Clock not in sync and XRC is enabled.
2373 		 * Try again later.
2374 		 */
2375 		dasd_sfree_request(cqr, startdev);
2376 		return ERR_PTR(-EAGAIN);
2377 	}
2378 
2379 	/*
2380 	 * A tidaw can address 4k of memory, but must not cross page boundaries
2381 	 * We can let the block layer handle this by setting
2382 	 * blk_queue_segment_boundary to page boundaries and
2383 	 * blk_max_segment_size to page size when setting up the request queue.
2384 	 */
2385 	rq_for_each_segment(bv, req, iter) {
2386 		dst = page_address(bv->bv_page) + bv->bv_offset;
2387 		last_tidaw = itcw_add_tidaw(itcw, 0x00, dst, bv->bv_len);
2388 		if (IS_ERR(last_tidaw))
2389 			return (struct dasd_ccw_req *)last_tidaw;
2390 	}
2391 
2392 	last_tidaw->flags |= 0x80;
2393 	itcw_finalize(itcw);
2394 
2395 	if (blk_noretry_request(req) ||
2396 	    block->base->features & DASD_FEATURE_FAILFAST)
2397 		set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
2398 	cqr->startdev = startdev;
2399 	cqr->memdev = startdev;
2400 	cqr->block = block;
2401 	cqr->expires = 5 * 60 * HZ;	/* 5 minutes */
2402 	cqr->lpm = private->path_data.ppm;
2403 	cqr->retries = 256;
2404 	cqr->buildclk = get_clock();
2405 	cqr->status = DASD_CQR_FILLED;
2406 	return cqr;
2407 }
2408 
2409 static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
2410 					       struct dasd_block *block,
2411 					       struct request *req)
2412 {
2413 	int tpm, cmdrtd, cmdwtd;
2414 	int use_prefix;
2415 #if defined(CONFIG_64BIT)
2416 	int fcx_in_css, fcx_in_gneq, fcx_in_features;
2417 #endif
2418 	struct dasd_eckd_private *private;
2419 	struct dasd_device *basedev;
2420 	sector_t first_rec, last_rec;
2421 	sector_t first_trk, last_trk;
2422 	unsigned int first_offs, last_offs;
2423 	unsigned int blk_per_trk, blksize;
2424 	int cdlspecial;
2425 	struct dasd_ccw_req *cqr;
2426 
2427 	basedev = block->base;
2428 	private = (struct dasd_eckd_private *) basedev->private;
2429 
2430 	/* Calculate number of blocks/records per track. */
2431 	blksize = block->bp_block;
2432 	blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
2433 	if (blk_per_trk == 0)
2434 		return ERR_PTR(-EINVAL);
2435 	/* Calculate record id of first and last block. */
2436 	first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift;
2437 	first_offs = sector_div(first_trk, blk_per_trk);
2438 	last_rec = last_trk =
2439 		(blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
2440 	last_offs = sector_div(last_trk, blk_per_trk);
2441 	cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk);
2442 
2443 	/* is transport mode supported? */
2444 #if defined(CONFIG_64BIT)
2445 	fcx_in_css = css_general_characteristics.fcx;
2446 	fcx_in_gneq = private->gneq->reserved2[7] & 0x04;
2447 	fcx_in_features = private->features.feature[40] & 0x80;
2448 	tpm = fcx_in_css && fcx_in_gneq && fcx_in_features;
2449 #else
2450 	tpm = 0;
2451 #endif
2452 
2453 	/* is read track data and write track data in command mode supported? */
2454 	cmdrtd = private->features.feature[9] & 0x20;
2455 	cmdwtd = private->features.feature[12] & 0x40;
2456 	use_prefix = private->features.feature[8] & 0x01;
2457 
2458 	cqr = NULL;
2459 	if (cdlspecial || dasd_page_cache) {
2460 		/* do nothing, just fall through to the cmd mode single case */
2461 	} else if (!dasd_nofcx && tpm && (first_trk == last_trk)) {
2462 		cqr = dasd_eckd_build_cp_tpm_track(startdev, block, req,
2463 						    first_rec, last_rec,
2464 						    first_trk, last_trk,
2465 						    first_offs, last_offs,
2466 						    blk_per_trk, blksize);
2467 		if (IS_ERR(cqr) && PTR_ERR(cqr) != -EAGAIN)
2468 			cqr = NULL;
2469 	} else if (use_prefix &&
2470 		   (((rq_data_dir(req) == READ) && cmdrtd) ||
2471 		    ((rq_data_dir(req) == WRITE) && cmdwtd))) {
2472 		cqr = dasd_eckd_build_cp_cmd_track(startdev, block, req,
2473 						   first_rec, last_rec,
2474 						   first_trk, last_trk,
2475 						   first_offs, last_offs,
2476 						   blk_per_trk, blksize);
2477 		if (IS_ERR(cqr) && PTR_ERR(cqr) != -EAGAIN)
2478 			cqr = NULL;
2479 	}
2480 	if (!cqr)
2481 		cqr = dasd_eckd_build_cp_cmd_single(startdev, block, req,
2482 						    first_rec, last_rec,
2483 						    first_trk, last_trk,
2484 						    first_offs, last_offs,
2485 						    blk_per_trk, blksize);
2486 	return cqr;
2487 }
2488 
2489 static int
2490 dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
2491 {
2492 	struct dasd_eckd_private *private;
2493 	struct ccw1 *ccw;
2494 	struct req_iterator iter;
2495 	struct bio_vec *bv;
2496 	char *dst, *cda;
2497 	unsigned int blksize, blk_per_trk, off;
2498 	sector_t recid;
2499 	int status;
2500 
2501 	if (!dasd_page_cache)
2502 		goto out;
2503 	private = (struct dasd_eckd_private *) cqr->block->base->private;
2504 	blksize = cqr->block->bp_block;
2505 	blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
2506 	recid = blk_rq_pos(req) >> cqr->block->s2b_shift;
2507 	ccw = cqr->cpaddr;
2508 	/* Skip over define extent & locate record. */
2509 	ccw++;
2510 	if (private->uses_cdl == 0 || recid > 2*blk_per_trk)
2511 		ccw++;
2512 	rq_for_each_segment(bv, req, iter) {
2513 		dst = page_address(bv->bv_page) + bv->bv_offset;
2514 		for (off = 0; off < bv->bv_len; off += blksize) {
2515 			/* Skip locate record. */
2516 			if (private->uses_cdl && recid <= 2*blk_per_trk)
2517 				ccw++;
2518 			if (dst) {
2519 				if (ccw->flags & CCW_FLAG_IDA)
2520 					cda = *((char **)((addr_t) ccw->cda));
2521 				else
2522 					cda = (char *)((addr_t) ccw->cda);
2523 				if (dst != cda) {
2524 					if (rq_data_dir(req) == READ)
2525 						memcpy(dst, cda, bv->bv_len);
2526 					kmem_cache_free(dasd_page_cache,
2527 					    (void *)((addr_t)cda & PAGE_MASK));
2528 				}
2529 				dst = NULL;
2530 			}
2531 			ccw++;
2532 			recid++;
2533 		}
2534 	}
2535 out:
2536 	status = cqr->status == DASD_CQR_DONE;
2537 	dasd_sfree_request(cqr, cqr->memdev);
2538 	return status;
2539 }
2540 
2541 /*
2542  * Modify ccw/tcw in cqr so it can be started on a base device.
2543  *
2544  * Note that this is not enough to restart the cqr!
2545  * Either reset cqr->startdev as well (summary unit check handling)
2546  * or restart via separate cqr (as in ERP handling).
2547  */
2548 void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *cqr)
2549 {
2550 	struct ccw1 *ccw;
2551 	struct PFX_eckd_data *pfxdata;
2552 	struct tcw *tcw;
2553 	struct tccb *tccb;
2554 	struct dcw *dcw;
2555 
2556 	if (cqr->cpmode == 1) {
2557 		tcw = cqr->cpaddr;
2558 		tccb = tcw_get_tccb(tcw);
2559 		dcw = (struct dcw *)&tccb->tca[0];
2560 		pfxdata = (struct PFX_eckd_data *)&dcw->cd[0];
2561 		pfxdata->validity.verify_base = 0;
2562 		pfxdata->validity.hyper_pav = 0;
2563 	} else {
2564 		ccw = cqr->cpaddr;
2565 		pfxdata = cqr->data;
2566 		if (ccw->cmd_code == DASD_ECKD_CCW_PFX) {
2567 			pfxdata->validity.verify_base = 0;
2568 			pfxdata->validity.hyper_pav = 0;
2569 		}
2570 	}
2571 }
2572 
2573 #define DASD_ECKD_CHANQ_MAX_SIZE 4
2574 
2575 static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base,
2576 						     struct dasd_block *block,
2577 						     struct request *req)
2578 {
2579 	struct dasd_eckd_private *private;
2580 	struct dasd_device *startdev;
2581 	unsigned long flags;
2582 	struct dasd_ccw_req *cqr;
2583 
2584 	startdev = dasd_alias_get_start_dev(base);
2585 	if (!startdev)
2586 		startdev = base;
2587 	private = (struct dasd_eckd_private *) startdev->private;
2588 	if (private->count >= DASD_ECKD_CHANQ_MAX_SIZE)
2589 		return ERR_PTR(-EBUSY);
2590 
2591 	spin_lock_irqsave(get_ccwdev_lock(startdev->cdev), flags);
2592 	private->count++;
2593 	cqr = dasd_eckd_build_cp(startdev, block, req);
2594 	if (IS_ERR(cqr))
2595 		private->count--;
2596 	spin_unlock_irqrestore(get_ccwdev_lock(startdev->cdev), flags);
2597 	return cqr;
2598 }
2599 
2600 static int dasd_eckd_free_alias_cp(struct dasd_ccw_req *cqr,
2601 				   struct request *req)
2602 {
2603 	struct dasd_eckd_private *private;
2604 	unsigned long flags;
2605 
2606 	spin_lock_irqsave(get_ccwdev_lock(cqr->memdev->cdev), flags);
2607 	private = (struct dasd_eckd_private *) cqr->memdev->private;
2608 	private->count--;
2609 	spin_unlock_irqrestore(get_ccwdev_lock(cqr->memdev->cdev), flags);
2610 	return dasd_eckd_free_cp(cqr, req);
2611 }
2612 
2613 static int
2614 dasd_eckd_fill_info(struct dasd_device * device,
2615 		    struct dasd_information2_t * info)
2616 {
2617 	struct dasd_eckd_private *private;
2618 
2619 	private = (struct dasd_eckd_private *) device->private;
2620 	info->label_block = 2;
2621 	info->FBA_layout = private->uses_cdl ? 0 : 1;
2622 	info->format = private->uses_cdl ? DASD_FORMAT_CDL : DASD_FORMAT_LDL;
2623 	info->characteristics_size = sizeof(struct dasd_eckd_characteristics);
2624 	memcpy(info->characteristics, &private->rdc_data,
2625 	       sizeof(struct dasd_eckd_characteristics));
2626 	info->confdata_size = min((unsigned long)private->conf_len,
2627 				  sizeof(info->configuration_data));
2628 	memcpy(info->configuration_data, private->conf_data,
2629 	       info->confdata_size);
2630 	return 0;
2631 }
2632 
2633 /*
2634  * SECTION: ioctl functions for eckd devices.
2635  */
2636 
2637 /*
2638  * Release device ioctl.
2639  * Buils a channel programm to releases a prior reserved
2640  * (see dasd_eckd_reserve) device.
2641  */
2642 static int
2643 dasd_eckd_release(struct dasd_device *device)
2644 {
2645 	struct dasd_ccw_req *cqr;
2646 	int rc;
2647 	struct ccw1 *ccw;
2648 
2649 	if (!capable(CAP_SYS_ADMIN))
2650 		return -EACCES;
2651 
2652 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
2653 	if (IS_ERR(cqr)) {
2654 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
2655 			    "Could not allocate initialization request");
2656 		return PTR_ERR(cqr);
2657 	}
2658 	ccw = cqr->cpaddr;
2659 	ccw->cmd_code = DASD_ECKD_CCW_RELEASE;
2660 	ccw->flags |= CCW_FLAG_SLI;
2661 	ccw->count = 32;
2662 	ccw->cda = (__u32)(addr_t) cqr->data;
2663 	cqr->startdev = device;
2664 	cqr->memdev = device;
2665 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
2666 	set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
2667 	cqr->retries = 2;	/* set retry counter to enable basic ERP */
2668 	cqr->expires = 2 * HZ;
2669 	cqr->buildclk = get_clock();
2670 	cqr->status = DASD_CQR_FILLED;
2671 
2672 	rc = dasd_sleep_on_immediatly(cqr);
2673 
2674 	dasd_sfree_request(cqr, cqr->memdev);
2675 	return rc;
2676 }
2677 
2678 /*
2679  * Reserve device ioctl.
2680  * Options are set to 'synchronous wait for interrupt' and
2681  * 'timeout the request'. This leads to a terminate IO if
2682  * the interrupt is outstanding for a certain time.
2683  */
2684 static int
2685 dasd_eckd_reserve(struct dasd_device *device)
2686 {
2687 	struct dasd_ccw_req *cqr;
2688 	int rc;
2689 	struct ccw1 *ccw;
2690 
2691 	if (!capable(CAP_SYS_ADMIN))
2692 		return -EACCES;
2693 
2694 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
2695 	if (IS_ERR(cqr)) {
2696 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
2697 			    "Could not allocate initialization request");
2698 		return PTR_ERR(cqr);
2699 	}
2700 	ccw = cqr->cpaddr;
2701 	ccw->cmd_code = DASD_ECKD_CCW_RESERVE;
2702 	ccw->flags |= CCW_FLAG_SLI;
2703 	ccw->count = 32;
2704 	ccw->cda = (__u32)(addr_t) cqr->data;
2705 	cqr->startdev = device;
2706 	cqr->memdev = device;
2707 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
2708 	set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
2709 	cqr->retries = 2;	/* set retry counter to enable basic ERP */
2710 	cqr->expires = 2 * HZ;
2711 	cqr->buildclk = get_clock();
2712 	cqr->status = DASD_CQR_FILLED;
2713 
2714 	rc = dasd_sleep_on_immediatly(cqr);
2715 
2716 	dasd_sfree_request(cqr, cqr->memdev);
2717 	return rc;
2718 }
2719 
2720 /*
2721  * Steal lock ioctl - unconditional reserve device.
2722  * Buils a channel programm to break a device's reservation.
2723  * (unconditional reserve)
2724  */
2725 static int
2726 dasd_eckd_steal_lock(struct dasd_device *device)
2727 {
2728 	struct dasd_ccw_req *cqr;
2729 	int rc;
2730 	struct ccw1 *ccw;
2731 
2732 	if (!capable(CAP_SYS_ADMIN))
2733 		return -EACCES;
2734 
2735 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
2736 	if (IS_ERR(cqr)) {
2737 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
2738 			    "Could not allocate initialization request");
2739 		return PTR_ERR(cqr);
2740 	}
2741 	ccw = cqr->cpaddr;
2742 	ccw->cmd_code = DASD_ECKD_CCW_SLCK;
2743 	ccw->flags |= CCW_FLAG_SLI;
2744 	ccw->count = 32;
2745 	ccw->cda = (__u32)(addr_t) cqr->data;
2746 	cqr->startdev = device;
2747 	cqr->memdev = device;
2748 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
2749 	set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
2750 	cqr->retries = 2;	/* set retry counter to enable basic ERP */
2751 	cqr->expires = 2 * HZ;
2752 	cqr->buildclk = get_clock();
2753 	cqr->status = DASD_CQR_FILLED;
2754 
2755 	rc = dasd_sleep_on_immediatly(cqr);
2756 
2757 	dasd_sfree_request(cqr, cqr->memdev);
2758 	return rc;
2759 }
2760 
2761 /*
2762  * Read performance statistics
2763  */
2764 static int
2765 dasd_eckd_performance(struct dasd_device *device, void __user *argp)
2766 {
2767 	struct dasd_psf_prssd_data *prssdp;
2768 	struct dasd_rssd_perf_stats_t *stats;
2769 	struct dasd_ccw_req *cqr;
2770 	struct ccw1 *ccw;
2771 	int rc;
2772 
2773 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */  + 1 /* RSSD */,
2774 				   (sizeof(struct dasd_psf_prssd_data) +
2775 				    sizeof(struct dasd_rssd_perf_stats_t)),
2776 				   device);
2777 	if (IS_ERR(cqr)) {
2778 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
2779 			    "Could not allocate initialization request");
2780 		return PTR_ERR(cqr);
2781 	}
2782 	cqr->startdev = device;
2783 	cqr->memdev = device;
2784 	cqr->retries = 0;
2785 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
2786 	cqr->expires = 10 * HZ;
2787 
2788 	/* Prepare for Read Subsystem Data */
2789 	prssdp = (struct dasd_psf_prssd_data *) cqr->data;
2790 	memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
2791 	prssdp->order = PSF_ORDER_PRSSD;
2792 	prssdp->suborder = 0x01;	/* Performance Statistics */
2793 	prssdp->varies[1] = 0x01;	/* Perf Statistics for the Subsystem */
2794 
2795 	ccw = cqr->cpaddr;
2796 	ccw->cmd_code = DASD_ECKD_CCW_PSF;
2797 	ccw->count = sizeof(struct dasd_psf_prssd_data);
2798 	ccw->flags |= CCW_FLAG_CC;
2799 	ccw->cda = (__u32)(addr_t) prssdp;
2800 
2801 	/* Read Subsystem Data - Performance Statistics */
2802 	stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
2803 	memset(stats, 0, sizeof(struct dasd_rssd_perf_stats_t));
2804 
2805 	ccw++;
2806 	ccw->cmd_code = DASD_ECKD_CCW_RSSD;
2807 	ccw->count = sizeof(struct dasd_rssd_perf_stats_t);
2808 	ccw->cda = (__u32)(addr_t) stats;
2809 
2810 	cqr->buildclk = get_clock();
2811 	cqr->status = DASD_CQR_FILLED;
2812 	rc = dasd_sleep_on(cqr);
2813 	if (rc == 0) {
2814 		prssdp = (struct dasd_psf_prssd_data *) cqr->data;
2815 		stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
2816 		if (copy_to_user(argp, stats,
2817 				 sizeof(struct dasd_rssd_perf_stats_t)))
2818 			rc = -EFAULT;
2819 	}
2820 	dasd_sfree_request(cqr, cqr->memdev);
2821 	return rc;
2822 }
2823 
2824 /*
2825  * Get attributes (cache operations)
2826  * Returnes the cache attributes used in Define Extend (DE).
2827  */
2828 static int
2829 dasd_eckd_get_attrib(struct dasd_device *device, void __user *argp)
2830 {
2831 	struct dasd_eckd_private *private =
2832 		(struct dasd_eckd_private *)device->private;
2833 	struct attrib_data_t attrib = private->attrib;
2834 	int rc;
2835 
2836         if (!capable(CAP_SYS_ADMIN))
2837                 return -EACCES;
2838 	if (!argp)
2839                 return -EINVAL;
2840 
2841 	rc = 0;
2842 	if (copy_to_user(argp, (long *) &attrib,
2843 			 sizeof(struct attrib_data_t)))
2844 		rc = -EFAULT;
2845 
2846 	return rc;
2847 }
2848 
2849 /*
2850  * Set attributes (cache operations)
2851  * Stores the attributes for cache operation to be used in Define Extend (DE).
2852  */
2853 static int
2854 dasd_eckd_set_attrib(struct dasd_device *device, void __user *argp)
2855 {
2856 	struct dasd_eckd_private *private =
2857 		(struct dasd_eckd_private *)device->private;
2858 	struct attrib_data_t attrib;
2859 
2860 	if (!capable(CAP_SYS_ADMIN))
2861 		return -EACCES;
2862 	if (!argp)
2863 		return -EINVAL;
2864 
2865 	if (copy_from_user(&attrib, argp, sizeof(struct attrib_data_t)))
2866 		return -EFAULT;
2867 	private->attrib = attrib;
2868 
2869 	dev_info(&device->cdev->dev,
2870 		 "The DASD cache mode was set to %x (%i cylinder prestage)\n",
2871 		 private->attrib.operation, private->attrib.nr_cyl);
2872 	return 0;
2873 }
2874 
2875 /*
2876  * Issue syscall I/O to EMC Symmetrix array.
2877  * CCWs are PSF and RSSD
2878  */
2879 static int dasd_symm_io(struct dasd_device *device, void __user *argp)
2880 {
2881 	struct dasd_symmio_parms usrparm;
2882 	char *psf_data, *rssd_result;
2883 	struct dasd_ccw_req *cqr;
2884 	struct ccw1 *ccw;
2885 	char psf0, psf1;
2886 	int rc;
2887 
2888 	if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO))
2889 		return -EACCES;
2890 	psf0 = psf1 = 0;
2891 
2892 	/* Copy parms from caller */
2893 	rc = -EFAULT;
2894 	if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
2895 		goto out;
2896 	if (is_compat_task() || sizeof(long) == 4) {
2897 		/* Make sure pointers are sane even on 31 bit. */
2898 		rc = -EINVAL;
2899 		if ((usrparm.psf_data >> 32) != 0)
2900 			goto out;
2901 		if ((usrparm.rssd_result >> 32) != 0)
2902 			goto out;
2903 		usrparm.psf_data &= 0x7fffffffULL;
2904 		usrparm.rssd_result &= 0x7fffffffULL;
2905 	}
2906 	/* alloc I/O data area */
2907 	psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA);
2908 	rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA);
2909 	if (!psf_data || !rssd_result) {
2910 		rc = -ENOMEM;
2911 		goto out_free;
2912 	}
2913 
2914 	/* get syscall header from user space */
2915 	rc = -EFAULT;
2916 	if (copy_from_user(psf_data,
2917 			   (void __user *)(unsigned long) usrparm.psf_data,
2918 			   usrparm.psf_data_len))
2919 		goto out_free;
2920 	psf0 = psf_data[0];
2921 	psf1 = psf_data[1];
2922 
2923 	/* setup CCWs for PSF + RSSD */
2924 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 , 0, device);
2925 	if (IS_ERR(cqr)) {
2926 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
2927 			"Could not allocate initialization request");
2928 		rc = PTR_ERR(cqr);
2929 		goto out_free;
2930 	}
2931 
2932 	cqr->startdev = device;
2933 	cqr->memdev = device;
2934 	cqr->retries = 3;
2935 	cqr->expires = 10 * HZ;
2936 	cqr->buildclk = get_clock();
2937 	cqr->status = DASD_CQR_FILLED;
2938 
2939 	/* Build the ccws */
2940 	ccw = cqr->cpaddr;
2941 
2942 	/* PSF ccw */
2943 	ccw->cmd_code = DASD_ECKD_CCW_PSF;
2944 	ccw->count = usrparm.psf_data_len;
2945 	ccw->flags |= CCW_FLAG_CC;
2946 	ccw->cda = (__u32)(addr_t) psf_data;
2947 
2948 	ccw++;
2949 
2950 	/* RSSD ccw  */
2951 	ccw->cmd_code = DASD_ECKD_CCW_RSSD;
2952 	ccw->count = usrparm.rssd_result_len;
2953 	ccw->flags = CCW_FLAG_SLI ;
2954 	ccw->cda = (__u32)(addr_t) rssd_result;
2955 
2956 	rc = dasd_sleep_on(cqr);
2957 	if (rc)
2958 		goto out_sfree;
2959 
2960 	rc = -EFAULT;
2961 	if (copy_to_user((void __user *)(unsigned long) usrparm.rssd_result,
2962 			   rssd_result, usrparm.rssd_result_len))
2963 		goto out_sfree;
2964 	rc = 0;
2965 
2966 out_sfree:
2967 	dasd_sfree_request(cqr, cqr->memdev);
2968 out_free:
2969 	kfree(rssd_result);
2970 	kfree(psf_data);
2971 out:
2972 	DBF_DEV_EVENT(DBF_WARNING, device,
2973 		      "Symmetrix ioctl (0x%02x 0x%02x): rc=%d",
2974 		      (int) psf0, (int) psf1, rc);
2975 	return rc;
2976 }
2977 
2978 static int
2979 dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp)
2980 {
2981 	struct dasd_device *device = block->base;
2982 
2983 	switch (cmd) {
2984 	case BIODASDGATTR:
2985 		return dasd_eckd_get_attrib(device, argp);
2986 	case BIODASDSATTR:
2987 		return dasd_eckd_set_attrib(device, argp);
2988 	case BIODASDPSRD:
2989 		return dasd_eckd_performance(device, argp);
2990 	case BIODASDRLSE:
2991 		return dasd_eckd_release(device);
2992 	case BIODASDRSRV:
2993 		return dasd_eckd_reserve(device);
2994 	case BIODASDSLCK:
2995 		return dasd_eckd_steal_lock(device);
2996 	case BIODASDSYMMIO:
2997 		return dasd_symm_io(device, argp);
2998 	default:
2999 		return -ENOIOCTLCMD;
3000 	}
3001 }
3002 
3003 /*
3004  * Dump the range of CCWs into 'page' buffer
3005  * and return number of printed chars.
3006  */
3007 static int
3008 dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
3009 {
3010 	int len, count;
3011 	char *datap;
3012 
3013 	len = 0;
3014 	while (from <= to) {
3015 		len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3016 			       " CCW %p: %08X %08X DAT:",
3017 			       from, ((int *) from)[0], ((int *) from)[1]);
3018 
3019 		/* get pointer to data (consider IDALs) */
3020 		if (from->flags & CCW_FLAG_IDA)
3021 			datap = (char *) *((addr_t *) (addr_t) from->cda);
3022 		else
3023 			datap = (char *) ((addr_t) from->cda);
3024 
3025 		/* dump data (max 32 bytes) */
3026 		for (count = 0; count < from->count && count < 32; count++) {
3027 			if (count % 8 == 0) len += sprintf(page + len, " ");
3028 			if (count % 4 == 0) len += sprintf(page + len, " ");
3029 			len += sprintf(page + len, "%02x", datap[count]);
3030 		}
3031 		len += sprintf(page + len, "\n");
3032 		from++;
3033 	}
3034 	return len;
3035 }
3036 
3037 static void
3038 dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct irb *irb,
3039 			 char *reason)
3040 {
3041 	u64 *sense;
3042 
3043 	sense = (u64 *) dasd_get_sense(irb);
3044 	if (sense) {
3045 		DBF_DEV_EVENT(DBF_EMERG, device,
3046 			      "%s: %s %02x%02x%02x %016llx %016llx %016llx "
3047 			      "%016llx", reason,
3048 			      scsw_is_tm(&irb->scsw) ? "t" : "c",
3049 			      scsw_cc(&irb->scsw), scsw_cstat(&irb->scsw),
3050 			      scsw_dstat(&irb->scsw), sense[0], sense[1],
3051 			      sense[2], sense[3]);
3052 	} else {
3053 		DBF_DEV_EVENT(DBF_EMERG, device, "%s",
3054 			      "SORRY - NO VALID SENSE AVAILABLE\n");
3055 	}
3056 }
3057 
3058 /*
3059  * Print sense data and related channel program.
3060  * Parts are printed because printk buffer is only 1024 bytes.
3061  */
3062 static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
3063 				 struct dasd_ccw_req *req, struct irb *irb)
3064 {
3065 	char *page;
3066 	struct ccw1 *first, *last, *fail, *from, *to;
3067 	int len, sl, sct;
3068 
3069 	page = (char *) get_zeroed_page(GFP_ATOMIC);
3070 	if (page == NULL) {
3071 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
3072 			      "No memory to dump sense data\n");
3073 		return;
3074 	}
3075 	/* dump the sense data */
3076 	len = sprintf(page,  KERN_ERR PRINTK_HEADER
3077 		      " I/O status report for device %s:\n",
3078 		      dev_name(&device->cdev->dev));
3079 	len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3080 		       " in req: %p CS: 0x%02X DS: 0x%02X CC: 0x%02X RC: %d\n",
3081 		       req, scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw),
3082 		       scsw_cc(&irb->scsw), req ? req->intrc : 0);
3083 	len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3084 		       " device %s: Failing CCW: %p\n",
3085 		       dev_name(&device->cdev->dev),
3086 		       (void *) (addr_t) irb->scsw.cmd.cpa);
3087 	if (irb->esw.esw0.erw.cons) {
3088 		for (sl = 0; sl < 4; sl++) {
3089 			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3090 				       " Sense(hex) %2d-%2d:",
3091 				       (8 * sl), ((8 * sl) + 7));
3092 
3093 			for (sct = 0; sct < 8; sct++) {
3094 				len += sprintf(page + len, " %02x",
3095 					       irb->ecw[8 * sl + sct]);
3096 			}
3097 			len += sprintf(page + len, "\n");
3098 		}
3099 
3100 		if (irb->ecw[27] & DASD_SENSE_BIT_0) {
3101 			/* 24 Byte Sense Data */
3102 			sprintf(page + len, KERN_ERR PRINTK_HEADER
3103 				" 24 Byte: %x MSG %x, "
3104 				"%s MSGb to SYSOP\n",
3105 				irb->ecw[7] >> 4, irb->ecw[7] & 0x0f,
3106 				irb->ecw[1] & 0x10 ? "" : "no");
3107 		} else {
3108 			/* 32 Byte Sense Data */
3109 			sprintf(page + len, KERN_ERR PRINTK_HEADER
3110 				" 32 Byte: Format: %x "
3111 				"Exception class %x\n",
3112 				irb->ecw[6] & 0x0f, irb->ecw[22] >> 4);
3113 		}
3114 	} else {
3115 		sprintf(page + len, KERN_ERR PRINTK_HEADER
3116 			" SORRY - NO VALID SENSE AVAILABLE\n");
3117 	}
3118 	printk("%s", page);
3119 
3120 	if (req) {
3121 		/* req == NULL for unsolicited interrupts */
3122 		/* dump the Channel Program (max 140 Bytes per line) */
3123 		/* Count CCW and print first CCWs (maximum 1024 % 140 = 7) */
3124 		first = req->cpaddr;
3125 		for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
3126 		to = min(first + 6, last);
3127 		len = sprintf(page,  KERN_ERR PRINTK_HEADER
3128 			      " Related CP in req: %p\n", req);
3129 		dasd_eckd_dump_ccw_range(first, to, page + len);
3130 		printk("%s", page);
3131 
3132 		/* print failing CCW area (maximum 4) */
3133 		/* scsw->cda is either valid or zero  */
3134 		len = 0;
3135 		from = ++to;
3136 		fail = (struct ccw1 *)(addr_t)
3137 				irb->scsw.cmd.cpa; /* failing CCW */
3138 		if (from <  fail - 2) {
3139 			from = fail - 2;     /* there is a gap - print header */
3140 			len += sprintf(page, KERN_ERR PRINTK_HEADER "......\n");
3141 		}
3142 		to = min(fail + 1, last);
3143 		len += dasd_eckd_dump_ccw_range(from, to, page + len);
3144 
3145 		/* print last CCWs (maximum 2) */
3146 		from = max(from, ++to);
3147 		if (from < last - 1) {
3148 			from = last - 1;     /* there is a gap - print header */
3149 			len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n");
3150 		}
3151 		len += dasd_eckd_dump_ccw_range(from, last, page + len);
3152 		if (len > 0)
3153 			printk("%s", page);
3154 	}
3155 	free_page((unsigned long) page);
3156 }
3157 
3158 
3159 /*
3160  * Print sense data from a tcw.
3161  */
3162 static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
3163 				 struct dasd_ccw_req *req, struct irb *irb)
3164 {
3165 	char *page;
3166 	int len, sl, sct, residual;
3167 
3168 	struct tsb *tsb;
3169 	u8 *sense;
3170 
3171 
3172 	page = (char *) get_zeroed_page(GFP_ATOMIC);
3173 	if (page == NULL) {
3174 		DBF_DEV_EVENT(DBF_WARNING, device, " %s",
3175 			    "No memory to dump sense data");
3176 		return;
3177 	}
3178 	/* dump the sense data */
3179 	len = sprintf(page,  KERN_ERR PRINTK_HEADER
3180 		      " I/O status report for device %s:\n",
3181 		      dev_name(&device->cdev->dev));
3182 	len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3183 		       " in req: %p CS: 0x%02X DS: 0x%02X CC: 0x%02X RC: %d "
3184 		       "fcxs: 0x%02X schxs: 0x%02X\n", req,
3185 		       scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw),
3186 		       scsw_cc(&irb->scsw), req->intrc,
3187 		       irb->scsw.tm.fcxs, irb->scsw.tm.schxs);
3188 	len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3189 		       " device %s: Failing TCW: %p\n",
3190 		       dev_name(&device->cdev->dev),
3191 		       (void *) (addr_t) irb->scsw.tm.tcw);
3192 
3193 	tsb = NULL;
3194 	sense = NULL;
3195 	if (irb->scsw.tm.tcw && (irb->scsw.tm.fcxs == 0x01))
3196 		tsb = tcw_get_tsb(
3197 			(struct tcw *)(unsigned long)irb->scsw.tm.tcw);
3198 
3199 	if (tsb) {
3200 		len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3201 			       " tsb->length %d\n", tsb->length);
3202 		len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3203 			       " tsb->flags %x\n", tsb->flags);
3204 		len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3205 			       " tsb->dcw_offset %d\n", tsb->dcw_offset);
3206 		len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3207 			       " tsb->count %d\n", tsb->count);
3208 		residual = tsb->count - 28;
3209 		len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3210 			       " residual %d\n", residual);
3211 
3212 		switch (tsb->flags & 0x07) {
3213 		case 1:	/* tsa_iostat */
3214 			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3215 			       " tsb->tsa.iostat.dev_time %d\n",
3216 				       tsb->tsa.iostat.dev_time);
3217 			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3218 			       " tsb->tsa.iostat.def_time %d\n",
3219 				       tsb->tsa.iostat.def_time);
3220 			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3221 			       " tsb->tsa.iostat.queue_time %d\n",
3222 				       tsb->tsa.iostat.queue_time);
3223 			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3224 			       " tsb->tsa.iostat.dev_busy_time %d\n",
3225 				       tsb->tsa.iostat.dev_busy_time);
3226 			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3227 			       " tsb->tsa.iostat.dev_act_time %d\n",
3228 				       tsb->tsa.iostat.dev_act_time);
3229 			sense = tsb->tsa.iostat.sense;
3230 			break;
3231 		case 2: /* ts_ddpc */
3232 			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3233 			       " tsb->tsa.ddpc.rc %d\n", tsb->tsa.ddpc.rc);
3234 			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3235 			       " tsb->tsa.ddpc.rcq:  ");
3236 			for (sl = 0; sl < 16; sl++) {
3237 				for (sct = 0; sct < 8; sct++) {
3238 					len += sprintf(page + len, " %02x",
3239 						       tsb->tsa.ddpc.rcq[sl]);
3240 				}
3241 				len += sprintf(page + len, "\n");
3242 			}
3243 			sense = tsb->tsa.ddpc.sense;
3244 			break;
3245 		case 3: /* tsa_intrg */
3246 			len += sprintf(page + len, KERN_ERR PRINTK_HEADER
3247 				      " tsb->tsa.intrg.: not supportet yet \n");
3248 			break;
3249 		}
3250 
3251 		if (sense) {
3252 			for (sl = 0; sl < 4; sl++) {
3253 				len += sprintf(page + len,
3254 					       KERN_ERR PRINTK_HEADER
3255 					       " Sense(hex) %2d-%2d:",
3256 					       (8 * sl), ((8 * sl) + 7));
3257 				for (sct = 0; sct < 8; sct++) {
3258 					len += sprintf(page + len, " %02x",
3259 						       sense[8 * sl + sct]);
3260 				}
3261 				len += sprintf(page + len, "\n");
3262 			}
3263 
3264 			if (sense[27] & DASD_SENSE_BIT_0) {
3265 				/* 24 Byte Sense Data */
3266 				sprintf(page + len, KERN_ERR PRINTK_HEADER
3267 					" 24 Byte: %x MSG %x, "
3268 					"%s MSGb to SYSOP\n",
3269 					sense[7] >> 4, sense[7] & 0x0f,
3270 					sense[1] & 0x10 ? "" : "no");
3271 			} else {
3272 				/* 32 Byte Sense Data */
3273 				sprintf(page + len, KERN_ERR PRINTK_HEADER
3274 					" 32 Byte: Format: %x "
3275 					"Exception class %x\n",
3276 					sense[6] & 0x0f, sense[22] >> 4);
3277 			}
3278 		} else {
3279 			sprintf(page + len, KERN_ERR PRINTK_HEADER
3280 				" SORRY - NO VALID SENSE AVAILABLE\n");
3281 		}
3282 	} else {
3283 		sprintf(page + len, KERN_ERR PRINTK_HEADER
3284 			" SORRY - NO TSB DATA AVAILABLE\n");
3285 	}
3286 	printk("%s", page);
3287 	free_page((unsigned long) page);
3288 }
3289 
3290 static void dasd_eckd_dump_sense(struct dasd_device *device,
3291 				 struct dasd_ccw_req *req, struct irb *irb)
3292 {
3293 	if (req && scsw_is_tm(&req->irb.scsw))
3294 		dasd_eckd_dump_sense_tcw(device, req, irb);
3295 	else
3296 		dasd_eckd_dump_sense_ccw(device, req, irb);
3297 }
3298 
3299 static int dasd_eckd_pm_freeze(struct dasd_device *device)
3300 {
3301 	/*
3302 	 * the device should be disconnected from our LCU structure
3303 	 * on restore we will reconnect it and reread LCU specific
3304 	 * information like PAV support that might have changed
3305 	 */
3306 	dasd_alias_remove_device(device);
3307 	dasd_alias_disconnect_device_from_lcu(device);
3308 
3309 	return 0;
3310 }
3311 
3312 static int dasd_eckd_restore_device(struct dasd_device *device)
3313 {
3314 	struct dasd_eckd_private *private;
3315 	struct dasd_eckd_characteristics temp_rdc_data;
3316 	int is_known, rc;
3317 	struct dasd_uid temp_uid;
3318 	unsigned long flags;
3319 
3320 	private = (struct dasd_eckd_private *) device->private;
3321 
3322 	/* Read Configuration Data */
3323 	rc = dasd_eckd_read_conf(device);
3324 	if (rc)
3325 		goto out_err;
3326 
3327 	dasd_eckd_get_uid(device, &temp_uid);
3328 	/* Generate device unique id */
3329 	rc = dasd_eckd_generate_uid(device);
3330 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
3331 	if (memcmp(&private->uid, &temp_uid, sizeof(struct dasd_uid)) != 0)
3332 		dev_err(&device->cdev->dev, "The UID of the DASD has "
3333 			"changed\n");
3334 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
3335 	if (rc)
3336 		goto out_err;
3337 
3338 	/* register lcu with alias handling, enable PAV if this is a new lcu */
3339 	is_known = dasd_alias_make_device_known_to_lcu(device);
3340 	if (is_known < 0)
3341 		return is_known;
3342 	if (!is_known) {
3343 		dasd_eckd_validate_server(device);
3344 		dasd_alias_lcu_setup_complete(device);
3345 	} else
3346 		dasd_alias_wait_for_lcu_setup(device);
3347 
3348 	/* RE-Read Configuration Data */
3349 	rc = dasd_eckd_read_conf(device);
3350 	if (rc)
3351 		goto out_err;
3352 
3353 	/* Read Feature Codes */
3354 	dasd_eckd_read_features(device);
3355 
3356 	/* Read Device Characteristics */
3357 	rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
3358 					 &temp_rdc_data, 64);
3359 	if (rc) {
3360 		DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
3361 				"Read device characteristic failed, rc=%d", rc);
3362 		goto out_err;
3363 	}
3364 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
3365 	memcpy(&private->rdc_data, &temp_rdc_data, sizeof(temp_rdc_data));
3366 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
3367 
3368 	/* add device to alias management */
3369 	dasd_alias_add_device(device);
3370 
3371 	return 0;
3372 
3373 out_err:
3374 	return -1;
3375 }
3376 
3377 static int dasd_eckd_reload_device(struct dasd_device *device)
3378 {
3379 	struct dasd_eckd_private *private;
3380 	int rc, old_base;
3381 	char print_uid[60];
3382 	struct dasd_uid uid;
3383 	unsigned long flags;
3384 
3385 	private = (struct dasd_eckd_private *) device->private;
3386 
3387 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
3388 	old_base = private->uid.base_unit_addr;
3389 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
3390 
3391 	/* Read Configuration Data */
3392 	rc = dasd_eckd_read_conf(device);
3393 	if (rc)
3394 		goto out_err;
3395 
3396 	rc = dasd_eckd_generate_uid(device);
3397 	if (rc)
3398 		goto out_err;
3399 	/*
3400 	 * update unit address configuration and
3401 	 * add device to alias management
3402 	 */
3403 	dasd_alias_update_add_device(device);
3404 
3405 	dasd_eckd_get_uid(device, &uid);
3406 
3407 	if (old_base != uid.base_unit_addr) {
3408 		if (strlen(uid.vduit) > 0)
3409 			snprintf(print_uid, sizeof(print_uid),
3410 				 "%s.%s.%04x.%02x.%s", uid.vendor, uid.serial,
3411 				 uid.ssid, uid.base_unit_addr, uid.vduit);
3412 		else
3413 			snprintf(print_uid, sizeof(print_uid),
3414 				 "%s.%s.%04x.%02x", uid.vendor, uid.serial,
3415 				 uid.ssid, uid.base_unit_addr);
3416 
3417 		dev_info(&device->cdev->dev,
3418 			 "An Alias device was reassigned to a new base device "
3419 			 "with UID: %s\n", print_uid);
3420 	}
3421 	return 0;
3422 
3423 out_err:
3424 	return -1;
3425 }
3426 
3427 static struct ccw_driver dasd_eckd_driver = {
3428 	.name	     = "dasd-eckd",
3429 	.owner	     = THIS_MODULE,
3430 	.ids	     = dasd_eckd_ids,
3431 	.probe	     = dasd_eckd_probe,
3432 	.remove      = dasd_generic_remove,
3433 	.set_offline = dasd_generic_set_offline,
3434 	.set_online  = dasd_eckd_set_online,
3435 	.notify      = dasd_generic_notify,
3436 	.freeze      = dasd_generic_pm_freeze,
3437 	.thaw	     = dasd_generic_restore_device,
3438 	.restore     = dasd_generic_restore_device,
3439 };
3440 
3441 /*
3442  * max_blocks is dependent on the amount of storage that is available
3443  * in the static io buffer for each device. Currently each device has
3444  * 8192 bytes (=2 pages). For 64 bit one dasd_mchunkt_t structure has
3445  * 24 bytes, the struct dasd_ccw_req has 136 bytes and each block can use
3446  * up to 16 bytes (8 for the ccw and 8 for the idal pointer). In
3447  * addition we have one define extent ccw + 16 bytes of data and one
3448  * locate record ccw + 16 bytes of data. That makes:
3449  * (8192 - 24 - 136 - 8 - 16 - 8 - 16) / 16 = 499 blocks at maximum.
3450  * We want to fit two into the available memory so that we can immediately
3451  * start the next request if one finishes off. That makes 249.5 blocks
3452  * for one request. Give a little safety and the result is 240.
3453  */
3454 static struct dasd_discipline dasd_eckd_discipline = {
3455 	.owner = THIS_MODULE,
3456 	.name = "ECKD",
3457 	.ebcname = "ECKD",
3458 	.max_blocks = 240,
3459 	.check_device = dasd_eckd_check_characteristics,
3460 	.uncheck_device = dasd_eckd_uncheck_device,
3461 	.do_analysis = dasd_eckd_do_analysis,
3462 	.ready_to_online = dasd_eckd_ready_to_online,
3463 	.online_to_ready = dasd_eckd_online_to_ready,
3464 	.fill_geometry = dasd_eckd_fill_geometry,
3465 	.start_IO = dasd_start_IO,
3466 	.term_IO = dasd_term_IO,
3467 	.handle_terminated_request = dasd_eckd_handle_terminated_request,
3468 	.format_device = dasd_eckd_format_device,
3469 	.erp_action = dasd_eckd_erp_action,
3470 	.erp_postaction = dasd_eckd_erp_postaction,
3471 	.handle_unsolicited_interrupt = dasd_eckd_handle_unsolicited_interrupt,
3472 	.build_cp = dasd_eckd_build_alias_cp,
3473 	.free_cp = dasd_eckd_free_alias_cp,
3474 	.dump_sense = dasd_eckd_dump_sense,
3475 	.dump_sense_dbf = dasd_eckd_dump_sense_dbf,
3476 	.fill_info = dasd_eckd_fill_info,
3477 	.ioctl = dasd_eckd_ioctl,
3478 	.freeze = dasd_eckd_pm_freeze,
3479 	.restore = dasd_eckd_restore_device,
3480 	.reload = dasd_eckd_reload_device,
3481 	.get_uid = dasd_eckd_get_uid,
3482 };
3483 
3484 static int __init
3485 dasd_eckd_init(void)
3486 {
3487 	int ret;
3488 
3489 	ASCEBC(dasd_eckd_discipline.ebcname, 4);
3490 	ret = ccw_driver_register(&dasd_eckd_driver);
3491 	if (!ret)
3492 		wait_for_device_probe();
3493 
3494 	return ret;
3495 }
3496 
3497 static void __exit
3498 dasd_eckd_cleanup(void)
3499 {
3500 	ccw_driver_unregister(&dasd_eckd_driver);
3501 }
3502 
3503 module_init(dasd_eckd_init);
3504 module_exit(dasd_eckd_cleanup);
3505