xref: /openbmc/linux/drivers/s390/block/dasd_eckd.c (revision 5e2b17e7)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4  *		    Horst Hummel <Horst.Hummel@de.ibm.com>
5  *		    Carsten Otte <Cotte@de.ibm.com>
6  *		    Martin Schwidefsky <schwidefsky@de.ibm.com>
7  * Bugreports.to..: <Linux390@de.ibm.com>
8  * Copyright IBM Corp. 1999, 2009
9  * EMC Symmetrix ioctl Copyright EMC Corporation, 2008
10  * Author.........: Nigel Hislop <hislop_nigel@emc.com>
11  */
12 
13 #define KMSG_COMPONENT "dasd-eckd"
14 
15 #include <linux/stddef.h>
16 #include <linux/kernel.h>
17 #include <linux/slab.h>
18 #include <linux/hdreg.h>	/* HDIO_GETGEO			    */
19 #include <linux/bio.h>
20 #include <linux/module.h>
21 #include <linux/compat.h>
22 #include <linux/init.h>
23 #include <linux/seq_file.h>
24 
25 #include <asm/css_chars.h>
26 #include <asm/debug.h>
27 #include <asm/idals.h>
28 #include <asm/ebcdic.h>
29 #include <asm/io.h>
30 #include <linux/uaccess.h>
31 #include <asm/cio.h>
32 #include <asm/ccwdev.h>
33 #include <asm/itcw.h>
34 #include <asm/schid.h>
35 #include <asm/chpid.h>
36 
37 #include "dasd_int.h"
38 #include "dasd_eckd.h"
39 
40 #ifdef PRINTK_HEADER
41 #undef PRINTK_HEADER
42 #endif				/* PRINTK_HEADER */
43 #define PRINTK_HEADER "dasd(eckd):"
44 
45 /*
46  * raw track access always map to 64k in memory
47  * so it maps to 16 blocks of 4k per track
48  */
49 #define DASD_RAW_BLOCK_PER_TRACK 16
50 #define DASD_RAW_BLOCKSIZE 4096
51 /* 64k are 128 x 512 byte sectors  */
52 #define DASD_RAW_SECTORS_PER_TRACK 128
53 
54 MODULE_LICENSE("GPL");
55 
56 static struct dasd_discipline dasd_eckd_discipline;
57 
58 /* The ccw bus type uses this table to find devices that it sends to
59  * dasd_eckd_probe */
60 static struct ccw_device_id dasd_eckd_ids[] = {
61 	{ CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1},
62 	{ CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2},
63 	{ CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3380, 0), .driver_info = 0x3},
64 	{ CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4},
65 	{ CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5},
66 	{ CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6},
67 	{ CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3390, 0), .driver_info = 0x7},
68 	{ CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3380, 0), .driver_info = 0x8},
69 	{ CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3390, 0), .driver_info = 0x9},
70 	{ CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3380, 0), .driver_info = 0xa},
71 	{ /* end of list */ },
72 };
73 
74 MODULE_DEVICE_TABLE(ccw, dasd_eckd_ids);
75 
76 static struct ccw_driver dasd_eckd_driver; /* see below */
77 
78 static void *rawpadpage;
79 
80 #define INIT_CQR_OK 0
81 #define INIT_CQR_UNFORMATTED 1
82 #define INIT_CQR_ERROR 2
83 
84 /* emergency request for reserve/release */
85 static struct {
86 	struct dasd_ccw_req cqr;
87 	struct ccw1 ccw;
88 	char data[32];
89 } *dasd_reserve_req;
90 static DEFINE_MUTEX(dasd_reserve_mutex);
91 
92 /* definitions for the path verification worker */
93 struct path_verification_work_data {
94 	struct work_struct worker;
95 	struct dasd_device *device;
96 	struct dasd_ccw_req cqr;
97 	struct ccw1 ccw;
98 	__u8 rcd_buffer[DASD_ECKD_RCD_DATA_SIZE];
99 	int isglobal;
100 	__u8 tbvpm;
101 };
102 static struct path_verification_work_data *path_verification_worker;
103 static DEFINE_MUTEX(dasd_path_verification_mutex);
104 
105 struct check_attention_work_data {
106 	struct work_struct worker;
107 	struct dasd_device *device;
108 	__u8 lpum;
109 };
110 
111 static int dasd_eckd_ext_pool_id(struct dasd_device *);
112 static int prepare_itcw(struct itcw *, unsigned int, unsigned int, int,
113 			struct dasd_device *, struct dasd_device *,
114 			unsigned int, int, unsigned int, unsigned int,
115 			unsigned int, unsigned int);
116 
117 /* initial attempt at a probe function. this can be simplified once
118  * the other detection code is gone */
119 static int
120 dasd_eckd_probe (struct ccw_device *cdev)
121 {
122 	int ret;
123 
124 	/* set ECKD specific ccw-device options */
125 	ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE |
126 				     CCWDEV_DO_PATHGROUP | CCWDEV_DO_MULTIPATH);
127 	if (ret) {
128 		DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
129 				"dasd_eckd_probe: could not set "
130 				"ccw-device options");
131 		return ret;
132 	}
133 	ret = dasd_generic_probe(cdev, &dasd_eckd_discipline);
134 	return ret;
135 }
136 
137 static int
138 dasd_eckd_set_online(struct ccw_device *cdev)
139 {
140 	return dasd_generic_set_online(cdev, &dasd_eckd_discipline);
141 }
142 
143 static const int sizes_trk0[] = { 28, 148, 84 };
144 #define LABEL_SIZE 140
145 
146 /* head and record addresses of count_area read in analysis ccw */
147 static const int count_area_head[] = { 0, 0, 0, 0, 1 };
148 static const int count_area_rec[] = { 1, 2, 3, 4, 1 };
149 
150 static inline unsigned int
151 ceil_quot(unsigned int d1, unsigned int d2)
152 {
153 	return (d1 + (d2 - 1)) / d2;
154 }
155 
156 static unsigned int
157 recs_per_track(struct dasd_eckd_characteristics * rdc,
158 	       unsigned int kl, unsigned int dl)
159 {
160 	int dn, kn;
161 
162 	switch (rdc->dev_type) {
163 	case 0x3380:
164 		if (kl)
165 			return 1499 / (15 + 7 + ceil_quot(kl + 12, 32) +
166 				       ceil_quot(dl + 12, 32));
167 		else
168 			return 1499 / (15 + ceil_quot(dl + 12, 32));
169 	case 0x3390:
170 		dn = ceil_quot(dl + 6, 232) + 1;
171 		if (kl) {
172 			kn = ceil_quot(kl + 6, 232) + 1;
173 			return 1729 / (10 + 9 + ceil_quot(kl + 6 * kn, 34) +
174 				       9 + ceil_quot(dl + 6 * dn, 34));
175 		} else
176 			return 1729 / (10 + 9 + ceil_quot(dl + 6 * dn, 34));
177 	case 0x9345:
178 		dn = ceil_quot(dl + 6, 232) + 1;
179 		if (kl) {
180 			kn = ceil_quot(kl + 6, 232) + 1;
181 			return 1420 / (18 + 7 + ceil_quot(kl + 6 * kn, 34) +
182 				       ceil_quot(dl + 6 * dn, 34));
183 		} else
184 			return 1420 / (18 + 7 + ceil_quot(dl + 6 * dn, 34));
185 	}
186 	return 0;
187 }
188 
189 static void set_ch_t(struct ch_t *geo, __u32 cyl, __u8 head)
190 {
191 	geo->cyl = (__u16) cyl;
192 	geo->head = cyl >> 16;
193 	geo->head <<= 4;
194 	geo->head |= head;
195 }
196 
197 static int set_timestamp(struct ccw1 *ccw, struct DE_eckd_data *data,
198 		     struct dasd_device *device)
199 {
200 	struct dasd_eckd_private *private = device->private;
201 	int rc;
202 
203 	rc = get_phys_clock(&data->ep_sys_time);
204 	/*
205 	 * Ignore return code if XRC is not supported or
206 	 * sync clock is switched off
207 	 */
208 	if ((rc && !private->rdc_data.facilities.XRC_supported) ||
209 	    rc == -EOPNOTSUPP || rc == -EACCES)
210 		return 0;
211 
212 	/* switch on System Time Stamp - needed for XRC Support */
213 	data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid'   */
214 	data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */
215 
216 	if (ccw) {
217 		ccw->count = sizeof(struct DE_eckd_data);
218 		ccw->flags |= CCW_FLAG_SLI;
219 	}
220 
221 	return rc;
222 }
223 
224 static int
225 define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk,
226 	      unsigned int totrk, int cmd, struct dasd_device *device,
227 	      int blksize)
228 {
229 	struct dasd_eckd_private *private = device->private;
230 	u16 heads, beghead, endhead;
231 	u32 begcyl, endcyl;
232 	int rc = 0;
233 
234 	if (ccw) {
235 		ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
236 		ccw->flags = 0;
237 		ccw->count = 16;
238 		ccw->cda = (__u32)__pa(data);
239 	}
240 
241 	memset(data, 0, sizeof(struct DE_eckd_data));
242 	switch (cmd) {
243 	case DASD_ECKD_CCW_READ_HOME_ADDRESS:
244 	case DASD_ECKD_CCW_READ_RECORD_ZERO:
245 	case DASD_ECKD_CCW_READ:
246 	case DASD_ECKD_CCW_READ_MT:
247 	case DASD_ECKD_CCW_READ_CKD:
248 	case DASD_ECKD_CCW_READ_CKD_MT:
249 	case DASD_ECKD_CCW_READ_KD:
250 	case DASD_ECKD_CCW_READ_KD_MT:
251 		data->mask.perm = 0x1;
252 		data->attributes.operation = private->attrib.operation;
253 		break;
254 	case DASD_ECKD_CCW_READ_COUNT:
255 		data->mask.perm = 0x1;
256 		data->attributes.operation = DASD_BYPASS_CACHE;
257 		break;
258 	case DASD_ECKD_CCW_READ_TRACK:
259 	case DASD_ECKD_CCW_READ_TRACK_DATA:
260 		data->mask.perm = 0x1;
261 		data->attributes.operation = private->attrib.operation;
262 		data->blk_size = 0;
263 		break;
264 	case DASD_ECKD_CCW_WRITE:
265 	case DASD_ECKD_CCW_WRITE_MT:
266 	case DASD_ECKD_CCW_WRITE_KD:
267 	case DASD_ECKD_CCW_WRITE_KD_MT:
268 		data->mask.perm = 0x02;
269 		data->attributes.operation = private->attrib.operation;
270 		rc = set_timestamp(ccw, data, device);
271 		break;
272 	case DASD_ECKD_CCW_WRITE_CKD:
273 	case DASD_ECKD_CCW_WRITE_CKD_MT:
274 		data->attributes.operation = DASD_BYPASS_CACHE;
275 		rc = set_timestamp(ccw, data, device);
276 		break;
277 	case DASD_ECKD_CCW_ERASE:
278 	case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
279 	case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
280 		data->mask.perm = 0x3;
281 		data->mask.auth = 0x1;
282 		data->attributes.operation = DASD_BYPASS_CACHE;
283 		rc = set_timestamp(ccw, data, device);
284 		break;
285 	case DASD_ECKD_CCW_WRITE_FULL_TRACK:
286 		data->mask.perm = 0x03;
287 		data->attributes.operation = private->attrib.operation;
288 		data->blk_size = 0;
289 		break;
290 	case DASD_ECKD_CCW_WRITE_TRACK_DATA:
291 		data->mask.perm = 0x02;
292 		data->attributes.operation = private->attrib.operation;
293 		data->blk_size = blksize;
294 		rc = set_timestamp(ccw, data, device);
295 		break;
296 	default:
297 		dev_err(&device->cdev->dev,
298 			"0x%x is not a known command\n", cmd);
299 		break;
300 	}
301 
302 	data->attributes.mode = 0x3;	/* ECKD */
303 
304 	if ((private->rdc_data.cu_type == 0x2105 ||
305 	     private->rdc_data.cu_type == 0x2107 ||
306 	     private->rdc_data.cu_type == 0x1750)
307 	    && !(private->uses_cdl && trk < 2))
308 		data->ga_extended |= 0x40; /* Regular Data Format Mode */
309 
310 	heads = private->rdc_data.trk_per_cyl;
311 	begcyl = trk / heads;
312 	beghead = trk % heads;
313 	endcyl = totrk / heads;
314 	endhead = totrk % heads;
315 
316 	/* check for sequential prestage - enhance cylinder range */
317 	if (data->attributes.operation == DASD_SEQ_PRESTAGE ||
318 	    data->attributes.operation == DASD_SEQ_ACCESS) {
319 
320 		if (endcyl + private->attrib.nr_cyl < private->real_cyl)
321 			endcyl += private->attrib.nr_cyl;
322 		else
323 			endcyl = (private->real_cyl - 1);
324 	}
325 
326 	set_ch_t(&data->beg_ext, begcyl, beghead);
327 	set_ch_t(&data->end_ext, endcyl, endhead);
328 	return rc;
329 }
330 
331 
332 static void locate_record_ext(struct ccw1 *ccw, struct LRE_eckd_data *data,
333 			      unsigned int trk, unsigned int rec_on_trk,
334 			      int count, int cmd, struct dasd_device *device,
335 			      unsigned int reclen, unsigned int tlf)
336 {
337 	struct dasd_eckd_private *private = device->private;
338 	int sector;
339 	int dn, d;
340 
341 	if (ccw) {
342 		ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD_EXT;
343 		ccw->flags = 0;
344 		if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK)
345 			ccw->count = 22;
346 		else
347 			ccw->count = 20;
348 		ccw->cda = (__u32)__pa(data);
349 	}
350 
351 	memset(data, 0, sizeof(*data));
352 	sector = 0;
353 	if (rec_on_trk) {
354 		switch (private->rdc_data.dev_type) {
355 		case 0x3390:
356 			dn = ceil_quot(reclen + 6, 232);
357 			d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
358 			sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
359 			break;
360 		case 0x3380:
361 			d = 7 + ceil_quot(reclen + 12, 32);
362 			sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
363 			break;
364 		}
365 	}
366 	data->sector = sector;
367 	/* note: meaning of count depends on the operation
368 	 *	 for record based I/O it's the number of records, but for
369 	 *	 track based I/O it's the number of tracks
370 	 */
371 	data->count = count;
372 	switch (cmd) {
373 	case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
374 		data->operation.orientation = 0x3;
375 		data->operation.operation = 0x03;
376 		break;
377 	case DASD_ECKD_CCW_READ_HOME_ADDRESS:
378 		data->operation.orientation = 0x3;
379 		data->operation.operation = 0x16;
380 		break;
381 	case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
382 		data->operation.orientation = 0x1;
383 		data->operation.operation = 0x03;
384 		data->count++;
385 		break;
386 	case DASD_ECKD_CCW_READ_RECORD_ZERO:
387 		data->operation.orientation = 0x3;
388 		data->operation.operation = 0x16;
389 		data->count++;
390 		break;
391 	case DASD_ECKD_CCW_WRITE:
392 	case DASD_ECKD_CCW_WRITE_MT:
393 	case DASD_ECKD_CCW_WRITE_KD:
394 	case DASD_ECKD_CCW_WRITE_KD_MT:
395 		data->auxiliary.length_valid = 0x1;
396 		data->length = reclen;
397 		data->operation.operation = 0x01;
398 		break;
399 	case DASD_ECKD_CCW_WRITE_CKD:
400 	case DASD_ECKD_CCW_WRITE_CKD_MT:
401 		data->auxiliary.length_valid = 0x1;
402 		data->length = reclen;
403 		data->operation.operation = 0x03;
404 		break;
405 	case DASD_ECKD_CCW_WRITE_FULL_TRACK:
406 		data->operation.orientation = 0x0;
407 		data->operation.operation = 0x3F;
408 		data->extended_operation = 0x11;
409 		data->length = 0;
410 		data->extended_parameter_length = 0x02;
411 		if (data->count > 8) {
412 			data->extended_parameter[0] = 0xFF;
413 			data->extended_parameter[1] = 0xFF;
414 			data->extended_parameter[1] <<= (16 - count);
415 		} else {
416 			data->extended_parameter[0] = 0xFF;
417 			data->extended_parameter[0] <<= (8 - count);
418 			data->extended_parameter[1] = 0x00;
419 		}
420 		data->sector = 0xFF;
421 		break;
422 	case DASD_ECKD_CCW_WRITE_TRACK_DATA:
423 		data->auxiliary.length_valid = 0x1;
424 		data->length = reclen;	/* not tlf, as one might think */
425 		data->operation.operation = 0x3F;
426 		data->extended_operation = 0x23;
427 		break;
428 	case DASD_ECKD_CCW_READ:
429 	case DASD_ECKD_CCW_READ_MT:
430 	case DASD_ECKD_CCW_READ_KD:
431 	case DASD_ECKD_CCW_READ_KD_MT:
432 		data->auxiliary.length_valid = 0x1;
433 		data->length = reclen;
434 		data->operation.operation = 0x06;
435 		break;
436 	case DASD_ECKD_CCW_READ_CKD:
437 	case DASD_ECKD_CCW_READ_CKD_MT:
438 		data->auxiliary.length_valid = 0x1;
439 		data->length = reclen;
440 		data->operation.operation = 0x16;
441 		break;
442 	case DASD_ECKD_CCW_READ_COUNT:
443 		data->operation.operation = 0x06;
444 		break;
445 	case DASD_ECKD_CCW_READ_TRACK:
446 		data->operation.orientation = 0x1;
447 		data->operation.operation = 0x0C;
448 		data->extended_parameter_length = 0;
449 		data->sector = 0xFF;
450 		break;
451 	case DASD_ECKD_CCW_READ_TRACK_DATA:
452 		data->auxiliary.length_valid = 0x1;
453 		data->length = tlf;
454 		data->operation.operation = 0x0C;
455 		break;
456 	case DASD_ECKD_CCW_ERASE:
457 		data->length = reclen;
458 		data->auxiliary.length_valid = 0x1;
459 		data->operation.operation = 0x0b;
460 		break;
461 	default:
462 		DBF_DEV_EVENT(DBF_ERR, device,
463 			    "fill LRE unknown opcode 0x%x", cmd);
464 		BUG();
465 	}
466 	set_ch_t(&data->seek_addr,
467 		 trk / private->rdc_data.trk_per_cyl,
468 		 trk % private->rdc_data.trk_per_cyl);
469 	data->search_arg.cyl = data->seek_addr.cyl;
470 	data->search_arg.head = data->seek_addr.head;
471 	data->search_arg.record = rec_on_trk;
472 }
473 
474 static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
475 		      unsigned int trk, unsigned int totrk, int cmd,
476 		      struct dasd_device *basedev, struct dasd_device *startdev,
477 		      unsigned int format, unsigned int rec_on_trk, int count,
478 		      unsigned int blksize, unsigned int tlf)
479 {
480 	struct dasd_eckd_private *basepriv, *startpriv;
481 	struct LRE_eckd_data *lredata;
482 	struct DE_eckd_data *dedata;
483 	int rc = 0;
484 
485 	basepriv = basedev->private;
486 	startpriv = startdev->private;
487 	dedata = &pfxdata->define_extent;
488 	lredata = &pfxdata->locate_record;
489 
490 	ccw->cmd_code = DASD_ECKD_CCW_PFX;
491 	ccw->flags = 0;
492 	if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK) {
493 		ccw->count = sizeof(*pfxdata) + 2;
494 		ccw->cda = (__u32) __pa(pfxdata);
495 		memset(pfxdata, 0, sizeof(*pfxdata) + 2);
496 	} else {
497 		ccw->count = sizeof(*pfxdata);
498 		ccw->cda = (__u32) __pa(pfxdata);
499 		memset(pfxdata, 0, sizeof(*pfxdata));
500 	}
501 
502 	/* prefix data */
503 	if (format > 1) {
504 		DBF_DEV_EVENT(DBF_ERR, basedev,
505 			      "PFX LRE unknown format 0x%x", format);
506 		BUG();
507 		return -EINVAL;
508 	}
509 	pfxdata->format = format;
510 	pfxdata->base_address = basepriv->ned->unit_addr;
511 	pfxdata->base_lss = basepriv->ned->ID;
512 	pfxdata->validity.define_extent = 1;
513 
514 	/* private uid is kept up to date, conf_data may be outdated */
515 	if (startpriv->uid.type == UA_BASE_PAV_ALIAS)
516 		pfxdata->validity.verify_base = 1;
517 
518 	if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) {
519 		pfxdata->validity.verify_base = 1;
520 		pfxdata->validity.hyper_pav = 1;
521 	}
522 
523 	rc = define_extent(NULL, dedata, trk, totrk, cmd, basedev, blksize);
524 
525 	/*
526 	 * For some commands the System Time Stamp is set in the define extent
527 	 * data when XRC is supported. The validity of the time stamp must be
528 	 * reflected in the prefix data as well.
529 	 */
530 	if (dedata->ga_extended & 0x08 && dedata->ga_extended & 0x02)
531 		pfxdata->validity.time_stamp = 1; /* 'Time Stamp Valid'   */
532 
533 	if (format == 1) {
534 		locate_record_ext(NULL, lredata, trk, rec_on_trk, count, cmd,
535 				  basedev, blksize, tlf);
536 	}
537 
538 	return rc;
539 }
540 
541 static int prefix(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
542 		  unsigned int trk, unsigned int totrk, int cmd,
543 		  struct dasd_device *basedev, struct dasd_device *startdev)
544 {
545 	return prefix_LRE(ccw, pfxdata, trk, totrk, cmd, basedev, startdev,
546 			  0, 0, 0, 0, 0);
547 }
548 
549 static void
550 locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, unsigned int trk,
551 	      unsigned int rec_on_trk, int no_rec, int cmd,
552 	      struct dasd_device * device, int reclen)
553 {
554 	struct dasd_eckd_private *private = device->private;
555 	int sector;
556 	int dn, d;
557 
558 	DBF_DEV_EVENT(DBF_INFO, device,
559 		  "Locate: trk %d, rec %d, no_rec %d, cmd %d, reclen %d",
560 		  trk, rec_on_trk, no_rec, cmd, reclen);
561 
562 	ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD;
563 	ccw->flags = 0;
564 	ccw->count = 16;
565 	ccw->cda = (__u32) __pa(data);
566 
567 	memset(data, 0, sizeof(struct LO_eckd_data));
568 	sector = 0;
569 	if (rec_on_trk) {
570 		switch (private->rdc_data.dev_type) {
571 		case 0x3390:
572 			dn = ceil_quot(reclen + 6, 232);
573 			d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
574 			sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
575 			break;
576 		case 0x3380:
577 			d = 7 + ceil_quot(reclen + 12, 32);
578 			sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
579 			break;
580 		}
581 	}
582 	data->sector = sector;
583 	data->count = no_rec;
584 	switch (cmd) {
585 	case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
586 		data->operation.orientation = 0x3;
587 		data->operation.operation = 0x03;
588 		break;
589 	case DASD_ECKD_CCW_READ_HOME_ADDRESS:
590 		data->operation.orientation = 0x3;
591 		data->operation.operation = 0x16;
592 		break;
593 	case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
594 		data->operation.orientation = 0x1;
595 		data->operation.operation = 0x03;
596 		data->count++;
597 		break;
598 	case DASD_ECKD_CCW_READ_RECORD_ZERO:
599 		data->operation.orientation = 0x3;
600 		data->operation.operation = 0x16;
601 		data->count++;
602 		break;
603 	case DASD_ECKD_CCW_WRITE:
604 	case DASD_ECKD_CCW_WRITE_MT:
605 	case DASD_ECKD_CCW_WRITE_KD:
606 	case DASD_ECKD_CCW_WRITE_KD_MT:
607 		data->auxiliary.last_bytes_used = 0x1;
608 		data->length = reclen;
609 		data->operation.operation = 0x01;
610 		break;
611 	case DASD_ECKD_CCW_WRITE_CKD:
612 	case DASD_ECKD_CCW_WRITE_CKD_MT:
613 		data->auxiliary.last_bytes_used = 0x1;
614 		data->length = reclen;
615 		data->operation.operation = 0x03;
616 		break;
617 	case DASD_ECKD_CCW_READ:
618 	case DASD_ECKD_CCW_READ_MT:
619 	case DASD_ECKD_CCW_READ_KD:
620 	case DASD_ECKD_CCW_READ_KD_MT:
621 		data->auxiliary.last_bytes_used = 0x1;
622 		data->length = reclen;
623 		data->operation.operation = 0x06;
624 		break;
625 	case DASD_ECKD_CCW_READ_CKD:
626 	case DASD_ECKD_CCW_READ_CKD_MT:
627 		data->auxiliary.last_bytes_used = 0x1;
628 		data->length = reclen;
629 		data->operation.operation = 0x16;
630 		break;
631 	case DASD_ECKD_CCW_READ_COUNT:
632 		data->operation.operation = 0x06;
633 		break;
634 	case DASD_ECKD_CCW_ERASE:
635 		data->length = reclen;
636 		data->auxiliary.last_bytes_used = 0x1;
637 		data->operation.operation = 0x0b;
638 		break;
639 	default:
640 		DBF_DEV_EVENT(DBF_ERR, device, "unknown locate record "
641 			      "opcode 0x%x", cmd);
642 	}
643 	set_ch_t(&data->seek_addr,
644 		 trk / private->rdc_data.trk_per_cyl,
645 		 trk % private->rdc_data.trk_per_cyl);
646 	data->search_arg.cyl = data->seek_addr.cyl;
647 	data->search_arg.head = data->seek_addr.head;
648 	data->search_arg.record = rec_on_trk;
649 }
650 
651 /*
652  * Returns 1 if the block is one of the special blocks that needs
653  * to get read/written with the KD variant of the command.
654  * That is DASD_ECKD_READ_KD_MT instead of DASD_ECKD_READ_MT and
655  * DASD_ECKD_WRITE_KD_MT instead of DASD_ECKD_WRITE_MT.
656  * Luckily the KD variants differ only by one bit (0x08) from the
657  * normal variant. So don't wonder about code like:
658  * if (dasd_eckd_cdl_special(blk_per_trk, recid))
659  *         ccw->cmd_code |= 0x8;
660  */
661 static inline int
662 dasd_eckd_cdl_special(int blk_per_trk, int recid)
663 {
664 	if (recid < 3)
665 		return 1;
666 	if (recid < blk_per_trk)
667 		return 0;
668 	if (recid < 2 * blk_per_trk)
669 		return 1;
670 	return 0;
671 }
672 
673 /*
674  * Returns the record size for the special blocks of the cdl format.
675  * Only returns something useful if dasd_eckd_cdl_special is true
676  * for the recid.
677  */
678 static inline int
679 dasd_eckd_cdl_reclen(int recid)
680 {
681 	if (recid < 3)
682 		return sizes_trk0[recid];
683 	return LABEL_SIZE;
684 }
685 /* create unique id from private structure. */
686 static void create_uid(struct dasd_eckd_private *private)
687 {
688 	int count;
689 	struct dasd_uid *uid;
690 
691 	uid = &private->uid;
692 	memset(uid, 0, sizeof(struct dasd_uid));
693 	memcpy(uid->vendor, private->ned->HDA_manufacturer,
694 	       sizeof(uid->vendor) - 1);
695 	EBCASC(uid->vendor, sizeof(uid->vendor) - 1);
696 	memcpy(uid->serial, private->ned->HDA_location,
697 	       sizeof(uid->serial) - 1);
698 	EBCASC(uid->serial, sizeof(uid->serial) - 1);
699 	uid->ssid = private->gneq->subsystemID;
700 	uid->real_unit_addr = private->ned->unit_addr;
701 	if (private->sneq) {
702 		uid->type = private->sneq->sua_flags;
703 		if (uid->type == UA_BASE_PAV_ALIAS)
704 			uid->base_unit_addr = private->sneq->base_unit_addr;
705 	} else {
706 		uid->type = UA_BASE_DEVICE;
707 	}
708 	if (private->vdsneq) {
709 		for (count = 0; count < 16; count++) {
710 			sprintf(uid->vduit+2*count, "%02x",
711 				private->vdsneq->uit[count]);
712 		}
713 	}
714 }
715 
716 /*
717  * Generate device unique id that specifies the physical device.
718  */
719 static int dasd_eckd_generate_uid(struct dasd_device *device)
720 {
721 	struct dasd_eckd_private *private = device->private;
722 	unsigned long flags;
723 
724 	if (!private)
725 		return -ENODEV;
726 	if (!private->ned || !private->gneq)
727 		return -ENODEV;
728 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
729 	create_uid(private);
730 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
731 	return 0;
732 }
733 
734 static int dasd_eckd_get_uid(struct dasd_device *device, struct dasd_uid *uid)
735 {
736 	struct dasd_eckd_private *private = device->private;
737 	unsigned long flags;
738 
739 	if (private) {
740 		spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
741 		*uid = private->uid;
742 		spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
743 		return 0;
744 	}
745 	return -EINVAL;
746 }
747 
748 /*
749  * compare device UID with data of a given dasd_eckd_private structure
750  * return 0 for match
751  */
752 static int dasd_eckd_compare_path_uid(struct dasd_device *device,
753 				      struct dasd_eckd_private *private)
754 {
755 	struct dasd_uid device_uid;
756 
757 	create_uid(private);
758 	dasd_eckd_get_uid(device, &device_uid);
759 
760 	return memcmp(&device_uid, &private->uid, sizeof(struct dasd_uid));
761 }
762 
763 static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device,
764 				   struct dasd_ccw_req *cqr,
765 				   __u8 *rcd_buffer,
766 				   __u8 lpm)
767 {
768 	struct ccw1 *ccw;
769 	/*
770 	 * buffer has to start with EBCDIC "V1.0" to show
771 	 * support for virtual device SNEQ
772 	 */
773 	rcd_buffer[0] = 0xE5;
774 	rcd_buffer[1] = 0xF1;
775 	rcd_buffer[2] = 0x4B;
776 	rcd_buffer[3] = 0xF0;
777 
778 	ccw = cqr->cpaddr;
779 	ccw->cmd_code = DASD_ECKD_CCW_RCD;
780 	ccw->flags = 0;
781 	ccw->cda = (__u32)(addr_t)rcd_buffer;
782 	ccw->count = DASD_ECKD_RCD_DATA_SIZE;
783 	cqr->magic = DASD_ECKD_MAGIC;
784 
785 	cqr->startdev = device;
786 	cqr->memdev = device;
787 	cqr->block = NULL;
788 	cqr->expires = 10*HZ;
789 	cqr->lpm = lpm;
790 	cqr->retries = 256;
791 	cqr->buildclk = get_tod_clock();
792 	cqr->status = DASD_CQR_FILLED;
793 	set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
794 }
795 
796 /*
797  * Wakeup helper for read_conf
798  * if the cqr is not done and needs some error recovery
799  * the buffer has to be re-initialized with the EBCDIC "V1.0"
800  * to show support for virtual device SNEQ
801  */
802 static void read_conf_cb(struct dasd_ccw_req *cqr, void *data)
803 {
804 	struct ccw1 *ccw;
805 	__u8 *rcd_buffer;
806 
807 	if (cqr->status !=  DASD_CQR_DONE) {
808 		ccw = cqr->cpaddr;
809 		rcd_buffer = (__u8 *)((addr_t) ccw->cda);
810 		memset(rcd_buffer, 0, sizeof(*rcd_buffer));
811 
812 		rcd_buffer[0] = 0xE5;
813 		rcd_buffer[1] = 0xF1;
814 		rcd_buffer[2] = 0x4B;
815 		rcd_buffer[3] = 0xF0;
816 	}
817 	dasd_wakeup_cb(cqr, data);
818 }
819 
820 static int dasd_eckd_read_conf_immediately(struct dasd_device *device,
821 					   struct dasd_ccw_req *cqr,
822 					   __u8 *rcd_buffer,
823 					   __u8 lpm)
824 {
825 	struct ciw *ciw;
826 	int rc;
827 	/*
828 	 * sanity check: scan for RCD command in extended SenseID data
829 	 * some devices do not support RCD
830 	 */
831 	ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
832 	if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD)
833 		return -EOPNOTSUPP;
834 
835 	dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buffer, lpm);
836 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
837 	set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
838 	cqr->retries = 5;
839 	cqr->callback = read_conf_cb;
840 	rc = dasd_sleep_on_immediatly(cqr);
841 	return rc;
842 }
843 
844 static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
845 				   void **rcd_buffer,
846 				   int *rcd_buffer_size, __u8 lpm)
847 {
848 	struct ciw *ciw;
849 	char *rcd_buf = NULL;
850 	int ret;
851 	struct dasd_ccw_req *cqr;
852 
853 	/*
854 	 * sanity check: scan for RCD command in extended SenseID data
855 	 * some devices do not support RCD
856 	 */
857 	ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
858 	if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD) {
859 		ret = -EOPNOTSUPP;
860 		goto out_error;
861 	}
862 	rcd_buf = kzalloc(DASD_ECKD_RCD_DATA_SIZE, GFP_KERNEL | GFP_DMA);
863 	if (!rcd_buf) {
864 		ret = -ENOMEM;
865 		goto out_error;
866 	}
867 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */,
868 				   0, /* use rcd_buf as data ara */
869 				   device, NULL);
870 	if (IS_ERR(cqr)) {
871 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
872 			      "Could not allocate RCD request");
873 		ret = -ENOMEM;
874 		goto out_error;
875 	}
876 	dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buf, lpm);
877 	cqr->callback = read_conf_cb;
878 	ret = dasd_sleep_on(cqr);
879 	/*
880 	 * on success we update the user input parms
881 	 */
882 	dasd_sfree_request(cqr, cqr->memdev);
883 	if (ret)
884 		goto out_error;
885 
886 	*rcd_buffer_size = DASD_ECKD_RCD_DATA_SIZE;
887 	*rcd_buffer = rcd_buf;
888 	return 0;
889 out_error:
890 	kfree(rcd_buf);
891 	*rcd_buffer = NULL;
892 	*rcd_buffer_size = 0;
893 	return ret;
894 }
895 
896 static int dasd_eckd_identify_conf_parts(struct dasd_eckd_private *private)
897 {
898 
899 	struct dasd_sneq *sneq;
900 	int i, count;
901 
902 	private->ned = NULL;
903 	private->sneq = NULL;
904 	private->vdsneq = NULL;
905 	private->gneq = NULL;
906 	count = private->conf_len / sizeof(struct dasd_sneq);
907 	sneq = (struct dasd_sneq *)private->conf_data;
908 	for (i = 0; i < count; ++i) {
909 		if (sneq->flags.identifier == 1 && sneq->format == 1)
910 			private->sneq = sneq;
911 		else if (sneq->flags.identifier == 1 && sneq->format == 4)
912 			private->vdsneq = (struct vd_sneq *)sneq;
913 		else if (sneq->flags.identifier == 2)
914 			private->gneq = (struct dasd_gneq *)sneq;
915 		else if (sneq->flags.identifier == 3 && sneq->res1 == 1)
916 			private->ned = (struct dasd_ned *)sneq;
917 		sneq++;
918 	}
919 	if (!private->ned || !private->gneq) {
920 		private->ned = NULL;
921 		private->sneq = NULL;
922 		private->vdsneq = NULL;
923 		private->gneq = NULL;
924 		return -EINVAL;
925 	}
926 	return 0;
927 
928 };
929 
930 static unsigned char dasd_eckd_path_access(void *conf_data, int conf_len)
931 {
932 	struct dasd_gneq *gneq;
933 	int i, count, found;
934 
935 	count = conf_len / sizeof(*gneq);
936 	gneq = (struct dasd_gneq *)conf_data;
937 	found = 0;
938 	for (i = 0; i < count; ++i) {
939 		if (gneq->flags.identifier == 2) {
940 			found = 1;
941 			break;
942 		}
943 		gneq++;
944 	}
945 	if (found)
946 		return ((char *)gneq)[18] & 0x07;
947 	else
948 		return 0;
949 }
950 
951 static void dasd_eckd_clear_conf_data(struct dasd_device *device)
952 {
953 	struct dasd_eckd_private *private = device->private;
954 	int i;
955 
956 	private->conf_data = NULL;
957 	private->conf_len = 0;
958 	for (i = 0; i < 8; i++) {
959 		kfree(device->path[i].conf_data);
960 		device->path[i].conf_data = NULL;
961 		device->path[i].cssid = 0;
962 		device->path[i].ssid = 0;
963 		device->path[i].chpid = 0;
964 	}
965 }
966 
967 
968 static int dasd_eckd_read_conf(struct dasd_device *device)
969 {
970 	void *conf_data;
971 	int conf_len, conf_data_saved;
972 	int rc, path_err, pos;
973 	__u8 lpm, opm;
974 	struct dasd_eckd_private *private, path_private;
975 	struct dasd_uid *uid;
976 	char print_path_uid[60], print_device_uid[60];
977 	struct channel_path_desc_fmt0 *chp_desc;
978 	struct subchannel_id sch_id;
979 
980 	private = device->private;
981 	opm = ccw_device_get_path_mask(device->cdev);
982 	ccw_device_get_schid(device->cdev, &sch_id);
983 	conf_data_saved = 0;
984 	path_err = 0;
985 	/* get configuration data per operational path */
986 	for (lpm = 0x80; lpm; lpm>>= 1) {
987 		if (!(lpm & opm))
988 			continue;
989 		rc = dasd_eckd_read_conf_lpm(device, &conf_data,
990 					     &conf_len, lpm);
991 		if (rc && rc != -EOPNOTSUPP) {	/* -EOPNOTSUPP is ok */
992 			DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
993 					"Read configuration data returned "
994 					"error %d", rc);
995 			return rc;
996 		}
997 		if (conf_data == NULL) {
998 			DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
999 					"No configuration data "
1000 					"retrieved");
1001 			/* no further analysis possible */
1002 			dasd_path_add_opm(device, opm);
1003 			continue;	/* no error */
1004 		}
1005 		/* save first valid configuration data */
1006 		if (!conf_data_saved) {
1007 			/* initially clear previously stored conf_data */
1008 			dasd_eckd_clear_conf_data(device);
1009 			private->conf_data = conf_data;
1010 			private->conf_len = conf_len;
1011 			if (dasd_eckd_identify_conf_parts(private)) {
1012 				private->conf_data = NULL;
1013 				private->conf_len = 0;
1014 				kfree(conf_data);
1015 				continue;
1016 			}
1017 			pos = pathmask_to_pos(lpm);
1018 			/* store per path conf_data */
1019 			device->path[pos].conf_data = conf_data;
1020 			device->path[pos].cssid = sch_id.cssid;
1021 			device->path[pos].ssid = sch_id.ssid;
1022 			chp_desc = ccw_device_get_chp_desc(device->cdev, pos);
1023 			if (chp_desc)
1024 				device->path[pos].chpid = chp_desc->chpid;
1025 			kfree(chp_desc);
1026 			/*
1027 			 * build device UID that other path data
1028 			 * can be compared to it
1029 			 */
1030 			dasd_eckd_generate_uid(device);
1031 			conf_data_saved++;
1032 		} else {
1033 			path_private.conf_data = conf_data;
1034 			path_private.conf_len = DASD_ECKD_RCD_DATA_SIZE;
1035 			if (dasd_eckd_identify_conf_parts(
1036 				    &path_private)) {
1037 				path_private.conf_data = NULL;
1038 				path_private.conf_len = 0;
1039 				kfree(conf_data);
1040 				continue;
1041 			}
1042 			if (dasd_eckd_compare_path_uid(
1043 				    device, &path_private)) {
1044 				uid = &path_private.uid;
1045 				if (strlen(uid->vduit) > 0)
1046 					snprintf(print_path_uid,
1047 						 sizeof(print_path_uid),
1048 						 "%s.%s.%04x.%02x.%s",
1049 						 uid->vendor, uid->serial,
1050 						 uid->ssid, uid->real_unit_addr,
1051 						 uid->vduit);
1052 				else
1053 					snprintf(print_path_uid,
1054 						 sizeof(print_path_uid),
1055 						 "%s.%s.%04x.%02x",
1056 						 uid->vendor, uid->serial,
1057 						 uid->ssid,
1058 						 uid->real_unit_addr);
1059 				uid = &private->uid;
1060 				if (strlen(uid->vduit) > 0)
1061 					snprintf(print_device_uid,
1062 						 sizeof(print_device_uid),
1063 						 "%s.%s.%04x.%02x.%s",
1064 						 uid->vendor, uid->serial,
1065 						 uid->ssid, uid->real_unit_addr,
1066 						 uid->vduit);
1067 				else
1068 					snprintf(print_device_uid,
1069 						 sizeof(print_device_uid),
1070 						 "%s.%s.%04x.%02x",
1071 						 uid->vendor, uid->serial,
1072 						 uid->ssid,
1073 						 uid->real_unit_addr);
1074 				dev_err(&device->cdev->dev,
1075 					"Not all channel paths lead to "
1076 					"the same device, path %02X leads to "
1077 					"device %s instead of %s\n", lpm,
1078 					print_path_uid, print_device_uid);
1079 				path_err = -EINVAL;
1080 				dasd_path_add_cablepm(device, lpm);
1081 				continue;
1082 			}
1083 			pos = pathmask_to_pos(lpm);
1084 			/* store per path conf_data */
1085 			device->path[pos].conf_data = conf_data;
1086 			device->path[pos].cssid = sch_id.cssid;
1087 			device->path[pos].ssid = sch_id.ssid;
1088 			chp_desc = ccw_device_get_chp_desc(device->cdev, pos);
1089 			if (chp_desc)
1090 				device->path[pos].chpid = chp_desc->chpid;
1091 			kfree(chp_desc);
1092 			path_private.conf_data = NULL;
1093 			path_private.conf_len = 0;
1094 		}
1095 		switch (dasd_eckd_path_access(conf_data, conf_len)) {
1096 		case 0x02:
1097 			dasd_path_add_nppm(device, lpm);
1098 			break;
1099 		case 0x03:
1100 			dasd_path_add_ppm(device, lpm);
1101 			break;
1102 		}
1103 		if (!dasd_path_get_opm(device)) {
1104 			dasd_path_set_opm(device, lpm);
1105 			dasd_generic_path_operational(device);
1106 		} else {
1107 			dasd_path_add_opm(device, lpm);
1108 		}
1109 	}
1110 
1111 	return path_err;
1112 }
1113 
1114 static u32 get_fcx_max_data(struct dasd_device *device)
1115 {
1116 	struct dasd_eckd_private *private = device->private;
1117 	int fcx_in_css, fcx_in_gneq, fcx_in_features;
1118 	int tpm, mdc;
1119 
1120 	if (dasd_nofcx)
1121 		return 0;
1122 	/* is transport mode supported? */
1123 	fcx_in_css = css_general_characteristics.fcx;
1124 	fcx_in_gneq = private->gneq->reserved2[7] & 0x04;
1125 	fcx_in_features = private->features.feature[40] & 0x80;
1126 	tpm = fcx_in_css && fcx_in_gneq && fcx_in_features;
1127 
1128 	if (!tpm)
1129 		return 0;
1130 
1131 	mdc = ccw_device_get_mdc(device->cdev, 0);
1132 	if (mdc < 0) {
1133 		dev_warn(&device->cdev->dev, "Detecting the maximum supported data size for zHPF requests failed\n");
1134 		return 0;
1135 	} else {
1136 		return (u32)mdc * FCX_MAX_DATA_FACTOR;
1137 	}
1138 }
1139 
1140 static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
1141 {
1142 	struct dasd_eckd_private *private = device->private;
1143 	int mdc;
1144 	u32 fcx_max_data;
1145 
1146 	if (private->fcx_max_data) {
1147 		mdc = ccw_device_get_mdc(device->cdev, lpm);
1148 		if ((mdc < 0)) {
1149 			dev_warn(&device->cdev->dev,
1150 				 "Detecting the maximum data size for zHPF "
1151 				 "requests failed (rc=%d) for a new path %x\n",
1152 				 mdc, lpm);
1153 			return mdc;
1154 		}
1155 		fcx_max_data = (u32)mdc * FCX_MAX_DATA_FACTOR;
1156 		if (fcx_max_data < private->fcx_max_data) {
1157 			dev_warn(&device->cdev->dev,
1158 				 "The maximum data size for zHPF requests %u "
1159 				 "on a new path %x is below the active maximum "
1160 				 "%u\n", fcx_max_data, lpm,
1161 				 private->fcx_max_data);
1162 			return -EACCES;
1163 		}
1164 	}
1165 	return 0;
1166 }
1167 
1168 static int rebuild_device_uid(struct dasd_device *device,
1169 			      struct path_verification_work_data *data)
1170 {
1171 	struct dasd_eckd_private *private = device->private;
1172 	__u8 lpm, opm = dasd_path_get_opm(device);
1173 	int rc = -ENODEV;
1174 
1175 	for (lpm = 0x80; lpm; lpm >>= 1) {
1176 		if (!(lpm & opm))
1177 			continue;
1178 		memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer));
1179 		memset(&data->cqr, 0, sizeof(data->cqr));
1180 		data->cqr.cpaddr = &data->ccw;
1181 		rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
1182 						     data->rcd_buffer,
1183 						     lpm);
1184 
1185 		if (rc) {
1186 			if (rc == -EOPNOTSUPP) /* -EOPNOTSUPP is ok */
1187 				continue;
1188 			DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1189 					"Read configuration data "
1190 					"returned error %d", rc);
1191 			break;
1192 		}
1193 		memcpy(private->conf_data, data->rcd_buffer,
1194 		       DASD_ECKD_RCD_DATA_SIZE);
1195 		if (dasd_eckd_identify_conf_parts(private)) {
1196 			rc = -ENODEV;
1197 		} else /* first valid path is enough */
1198 			break;
1199 	}
1200 
1201 	if (!rc)
1202 		rc = dasd_eckd_generate_uid(device);
1203 
1204 	return rc;
1205 }
1206 
1207 static void do_path_verification_work(struct work_struct *work)
1208 {
1209 	struct path_verification_work_data *data;
1210 	struct dasd_device *device;
1211 	struct dasd_eckd_private path_private;
1212 	struct dasd_uid *uid;
1213 	__u8 path_rcd_buf[DASD_ECKD_RCD_DATA_SIZE];
1214 	__u8 lpm, opm, npm, ppm, epm, hpfpm, cablepm;
1215 	unsigned long flags;
1216 	char print_uid[60];
1217 	int rc;
1218 
1219 	data = container_of(work, struct path_verification_work_data, worker);
1220 	device = data->device;
1221 
1222 	/* delay path verification until device was resumed */
1223 	if (test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
1224 		schedule_work(work);
1225 		return;
1226 	}
1227 	/* check if path verification already running and delay if so */
1228 	if (test_and_set_bit(DASD_FLAG_PATH_VERIFY, &device->flags)) {
1229 		schedule_work(work);
1230 		return;
1231 	}
1232 	opm = 0;
1233 	npm = 0;
1234 	ppm = 0;
1235 	epm = 0;
1236 	hpfpm = 0;
1237 	cablepm = 0;
1238 
1239 	for (lpm = 0x80; lpm; lpm >>= 1) {
1240 		if (!(lpm & data->tbvpm))
1241 			continue;
1242 		memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer));
1243 		memset(&data->cqr, 0, sizeof(data->cqr));
1244 		data->cqr.cpaddr = &data->ccw;
1245 		rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
1246 						     data->rcd_buffer,
1247 						     lpm);
1248 		if (!rc) {
1249 			switch (dasd_eckd_path_access(data->rcd_buffer,
1250 						      DASD_ECKD_RCD_DATA_SIZE)
1251 				) {
1252 			case 0x02:
1253 				npm |= lpm;
1254 				break;
1255 			case 0x03:
1256 				ppm |= lpm;
1257 				break;
1258 			}
1259 			opm |= lpm;
1260 		} else if (rc == -EOPNOTSUPP) {
1261 			DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1262 					"path verification: No configuration "
1263 					"data retrieved");
1264 			opm |= lpm;
1265 		} else if (rc == -EAGAIN) {
1266 			DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1267 					"path verification: device is stopped,"
1268 					" try again later");
1269 			epm |= lpm;
1270 		} else {
1271 			dev_warn(&device->cdev->dev,
1272 				 "Reading device feature codes failed "
1273 				 "(rc=%d) for new path %x\n", rc, lpm);
1274 			continue;
1275 		}
1276 		if (verify_fcx_max_data(device, lpm)) {
1277 			opm &= ~lpm;
1278 			npm &= ~lpm;
1279 			ppm &= ~lpm;
1280 			hpfpm |= lpm;
1281 			continue;
1282 		}
1283 
1284 		/*
1285 		 * save conf_data for comparison after
1286 		 * rebuild_device_uid may have changed
1287 		 * the original data
1288 		 */
1289 		memcpy(&path_rcd_buf, data->rcd_buffer,
1290 		       DASD_ECKD_RCD_DATA_SIZE);
1291 		path_private.conf_data = (void *) &path_rcd_buf;
1292 		path_private.conf_len = DASD_ECKD_RCD_DATA_SIZE;
1293 		if (dasd_eckd_identify_conf_parts(&path_private)) {
1294 			path_private.conf_data = NULL;
1295 			path_private.conf_len = 0;
1296 			continue;
1297 		}
1298 
1299 		/*
1300 		 * compare path UID with device UID only if at least
1301 		 * one valid path is left
1302 		 * in other case the device UID may have changed and
1303 		 * the first working path UID will be used as device UID
1304 		 */
1305 		if (dasd_path_get_opm(device) &&
1306 		    dasd_eckd_compare_path_uid(device, &path_private)) {
1307 			/*
1308 			 * the comparison was not successful
1309 			 * rebuild the device UID with at least one
1310 			 * known path in case a z/VM hyperswap command
1311 			 * has changed the device
1312 			 *
1313 			 * after this compare again
1314 			 *
1315 			 * if either the rebuild or the recompare fails
1316 			 * the path can not be used
1317 			 */
1318 			if (rebuild_device_uid(device, data) ||
1319 			    dasd_eckd_compare_path_uid(
1320 				    device, &path_private)) {
1321 				uid = &path_private.uid;
1322 				if (strlen(uid->vduit) > 0)
1323 					snprintf(print_uid, sizeof(print_uid),
1324 						 "%s.%s.%04x.%02x.%s",
1325 						 uid->vendor, uid->serial,
1326 						 uid->ssid, uid->real_unit_addr,
1327 						 uid->vduit);
1328 				else
1329 					snprintf(print_uid, sizeof(print_uid),
1330 						 "%s.%s.%04x.%02x",
1331 						 uid->vendor, uid->serial,
1332 						 uid->ssid,
1333 						 uid->real_unit_addr);
1334 				dev_err(&device->cdev->dev,
1335 					"The newly added channel path %02X "
1336 					"will not be used because it leads "
1337 					"to a different device %s\n",
1338 					lpm, print_uid);
1339 				opm &= ~lpm;
1340 				npm &= ~lpm;
1341 				ppm &= ~lpm;
1342 				cablepm |= lpm;
1343 				continue;
1344 			}
1345 		}
1346 
1347 		/*
1348 		 * There is a small chance that a path is lost again between
1349 		 * above path verification and the following modification of
1350 		 * the device opm mask. We could avoid that race here by using
1351 		 * yet another path mask, but we rather deal with this unlikely
1352 		 * situation in dasd_start_IO.
1353 		 */
1354 		spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1355 		if (!dasd_path_get_opm(device) && opm) {
1356 			dasd_path_set_opm(device, opm);
1357 			dasd_generic_path_operational(device);
1358 		} else {
1359 			dasd_path_add_opm(device, opm);
1360 		}
1361 		dasd_path_add_nppm(device, npm);
1362 		dasd_path_add_ppm(device, ppm);
1363 		dasd_path_add_tbvpm(device, epm);
1364 		dasd_path_add_cablepm(device, cablepm);
1365 		dasd_path_add_nohpfpm(device, hpfpm);
1366 		spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1367 	}
1368 	clear_bit(DASD_FLAG_PATH_VERIFY, &device->flags);
1369 	dasd_put_device(device);
1370 	if (data->isglobal)
1371 		mutex_unlock(&dasd_path_verification_mutex);
1372 	else
1373 		kfree(data);
1374 }
1375 
1376 static int dasd_eckd_verify_path(struct dasd_device *device, __u8 lpm)
1377 {
1378 	struct path_verification_work_data *data;
1379 
1380 	data = kmalloc(sizeof(*data), GFP_ATOMIC | GFP_DMA);
1381 	if (!data) {
1382 		if (mutex_trylock(&dasd_path_verification_mutex)) {
1383 			data = path_verification_worker;
1384 			data->isglobal = 1;
1385 		} else
1386 			return -ENOMEM;
1387 	} else {
1388 		memset(data, 0, sizeof(*data));
1389 		data->isglobal = 0;
1390 	}
1391 	INIT_WORK(&data->worker, do_path_verification_work);
1392 	dasd_get_device(device);
1393 	data->device = device;
1394 	data->tbvpm = lpm;
1395 	schedule_work(&data->worker);
1396 	return 0;
1397 }
1398 
1399 static void dasd_eckd_reset_path(struct dasd_device *device, __u8 pm)
1400 {
1401 	struct dasd_eckd_private *private = device->private;
1402 	unsigned long flags;
1403 
1404 	if (!private->fcx_max_data)
1405 		private->fcx_max_data = get_fcx_max_data(device);
1406 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
1407 	dasd_path_set_tbvpm(device, pm ? : dasd_path_get_notoperpm(device));
1408 	dasd_schedule_device_bh(device);
1409 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
1410 }
1411 
1412 static int dasd_eckd_read_features(struct dasd_device *device)
1413 {
1414 	struct dasd_eckd_private *private = device->private;
1415 	struct dasd_psf_prssd_data *prssdp;
1416 	struct dasd_rssd_features *features;
1417 	struct dasd_ccw_req *cqr;
1418 	struct ccw1 *ccw;
1419 	int rc;
1420 
1421 	memset(&private->features, 0, sizeof(struct dasd_rssd_features));
1422 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */	+ 1 /* RSSD */,
1423 				   (sizeof(struct dasd_psf_prssd_data) +
1424 				    sizeof(struct dasd_rssd_features)),
1425 				   device, NULL);
1426 	if (IS_ERR(cqr)) {
1427 		DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not "
1428 				"allocate initialization request");
1429 		return PTR_ERR(cqr);
1430 	}
1431 	cqr->startdev = device;
1432 	cqr->memdev = device;
1433 	cqr->block = NULL;
1434 	cqr->retries = 256;
1435 	cqr->expires = 10 * HZ;
1436 
1437 	/* Prepare for Read Subsystem Data */
1438 	prssdp = (struct dasd_psf_prssd_data *) cqr->data;
1439 	memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
1440 	prssdp->order = PSF_ORDER_PRSSD;
1441 	prssdp->suborder = 0x41;	/* Read Feature Codes */
1442 	/* all other bytes of prssdp must be zero */
1443 
1444 	ccw = cqr->cpaddr;
1445 	ccw->cmd_code = DASD_ECKD_CCW_PSF;
1446 	ccw->count = sizeof(struct dasd_psf_prssd_data);
1447 	ccw->flags |= CCW_FLAG_CC;
1448 	ccw->cda = (__u32)(addr_t) prssdp;
1449 
1450 	/* Read Subsystem Data - feature codes */
1451 	features = (struct dasd_rssd_features *) (prssdp + 1);
1452 	memset(features, 0, sizeof(struct dasd_rssd_features));
1453 
1454 	ccw++;
1455 	ccw->cmd_code = DASD_ECKD_CCW_RSSD;
1456 	ccw->count = sizeof(struct dasd_rssd_features);
1457 	ccw->cda = (__u32)(addr_t) features;
1458 
1459 	cqr->buildclk = get_tod_clock();
1460 	cqr->status = DASD_CQR_FILLED;
1461 	rc = dasd_sleep_on(cqr);
1462 	if (rc == 0) {
1463 		prssdp = (struct dasd_psf_prssd_data *) cqr->data;
1464 		features = (struct dasd_rssd_features *) (prssdp + 1);
1465 		memcpy(&private->features, features,
1466 		       sizeof(struct dasd_rssd_features));
1467 	} else
1468 		dev_warn(&device->cdev->dev, "Reading device feature codes"
1469 			 " failed with rc=%d\n", rc);
1470 	dasd_sfree_request(cqr, cqr->memdev);
1471 	return rc;
1472 }
1473 
1474 /* Read Volume Information - Volume Storage Query */
1475 static int dasd_eckd_read_vol_info(struct dasd_device *device)
1476 {
1477 	struct dasd_eckd_private *private = device->private;
1478 	struct dasd_psf_prssd_data *prssdp;
1479 	struct dasd_rssd_vsq *vsq;
1480 	struct dasd_ccw_req *cqr;
1481 	struct ccw1 *ccw;
1482 	int rc;
1483 
1484 	/* This command cannot be executed on an alias device */
1485 	if (private->uid.type == UA_BASE_PAV_ALIAS ||
1486 	    private->uid.type == UA_HYPER_PAV_ALIAS)
1487 		return 0;
1488 
1489 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 /* PSF + RSSD */,
1490 				   sizeof(*prssdp) + sizeof(*vsq), device, NULL);
1491 	if (IS_ERR(cqr)) {
1492 		DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1493 				"Could not allocate initialization request");
1494 		return PTR_ERR(cqr);
1495 	}
1496 
1497 	/* Prepare for Read Subsystem Data */
1498 	prssdp = cqr->data;
1499 	prssdp->order = PSF_ORDER_PRSSD;
1500 	prssdp->suborder = PSF_SUBORDER_VSQ;	/* Volume Storage Query */
1501 	prssdp->lss = private->ned->ID;
1502 	prssdp->volume = private->ned->unit_addr;
1503 
1504 	ccw = cqr->cpaddr;
1505 	ccw->cmd_code = DASD_ECKD_CCW_PSF;
1506 	ccw->count = sizeof(*prssdp);
1507 	ccw->flags |= CCW_FLAG_CC;
1508 	ccw->cda = (__u32)(addr_t)prssdp;
1509 
1510 	/* Read Subsystem Data - Volume Storage Query */
1511 	vsq = (struct dasd_rssd_vsq *)(prssdp + 1);
1512 	memset(vsq, 0, sizeof(*vsq));
1513 
1514 	ccw++;
1515 	ccw->cmd_code = DASD_ECKD_CCW_RSSD;
1516 	ccw->count = sizeof(*vsq);
1517 	ccw->flags |= CCW_FLAG_SLI;
1518 	ccw->cda = (__u32)(addr_t)vsq;
1519 
1520 	cqr->buildclk = get_tod_clock();
1521 	cqr->status = DASD_CQR_FILLED;
1522 	cqr->startdev = device;
1523 	cqr->memdev = device;
1524 	cqr->block = NULL;
1525 	cqr->retries = 256;
1526 	cqr->expires = device->default_expires * HZ;
1527 	/* The command might not be supported. Suppress the error output */
1528 	__set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
1529 
1530 	rc = dasd_sleep_on_interruptible(cqr);
1531 	if (rc == 0) {
1532 		memcpy(&private->vsq, vsq, sizeof(*vsq));
1533 	} else {
1534 		dev_warn(&device->cdev->dev,
1535 			 "Reading the volume storage information failed with rc=%d\n", rc);
1536 	}
1537 
1538 	dasd_sfree_request(cqr, cqr->memdev);
1539 
1540 	return rc;
1541 }
1542 
1543 static int dasd_eckd_is_ese(struct dasd_device *device)
1544 {
1545 	struct dasd_eckd_private *private = device->private;
1546 
1547 	return private->vsq.vol_info.ese;
1548 }
1549 
1550 static int dasd_eckd_ext_pool_id(struct dasd_device *device)
1551 {
1552 	struct dasd_eckd_private *private = device->private;
1553 
1554 	return private->vsq.extent_pool_id;
1555 }
1556 
1557 /*
1558  * This value represents the total amount of available space. As more space is
1559  * allocated by ESE volumes, this value will decrease.
1560  * The data for this value is therefore updated on any call.
1561  */
1562 static int dasd_eckd_space_configured(struct dasd_device *device)
1563 {
1564 	struct dasd_eckd_private *private = device->private;
1565 	int rc;
1566 
1567 	rc = dasd_eckd_read_vol_info(device);
1568 
1569 	return rc ? : private->vsq.space_configured;
1570 }
1571 
1572 /*
1573  * The value of space allocated by an ESE volume may have changed and is
1574  * therefore updated on any call.
1575  */
1576 static int dasd_eckd_space_allocated(struct dasd_device *device)
1577 {
1578 	struct dasd_eckd_private *private = device->private;
1579 	int rc;
1580 
1581 	rc = dasd_eckd_read_vol_info(device);
1582 
1583 	return rc ? : private->vsq.space_allocated;
1584 }
1585 
1586 static int dasd_eckd_logical_capacity(struct dasd_device *device)
1587 {
1588 	struct dasd_eckd_private *private = device->private;
1589 
1590 	return private->vsq.logical_capacity;
1591 }
1592 
1593 static void dasd_eckd_cpy_ext_pool_data(struct dasd_device *device,
1594 					struct dasd_rssd_lcq *lcq)
1595 {
1596 	struct dasd_eckd_private *private = device->private;
1597 	int pool_id = dasd_eckd_ext_pool_id(device);
1598 	struct dasd_ext_pool_sum eps;
1599 	int i;
1600 
1601 	for (i = 0; i < lcq->pool_count; i++) {
1602 		eps = lcq->ext_pool_sum[i];
1603 		if (eps.pool_id == pool_id) {
1604 			memcpy(&private->eps, &eps,
1605 			       sizeof(struct dasd_ext_pool_sum));
1606 		}
1607 	}
1608 }
1609 
1610 /* Read Extent Pool Information - Logical Configuration Query */
1611 static int dasd_eckd_read_ext_pool_info(struct dasd_device *device)
1612 {
1613 	struct dasd_eckd_private *private = device->private;
1614 	struct dasd_psf_prssd_data *prssdp;
1615 	struct dasd_rssd_lcq *lcq;
1616 	struct dasd_ccw_req *cqr;
1617 	struct ccw1 *ccw;
1618 	int rc;
1619 
1620 	/* This command cannot be executed on an alias device */
1621 	if (private->uid.type == UA_BASE_PAV_ALIAS ||
1622 	    private->uid.type == UA_HYPER_PAV_ALIAS)
1623 		return 0;
1624 
1625 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 /* PSF + RSSD */,
1626 				   sizeof(*prssdp) + sizeof(*lcq), device, NULL);
1627 	if (IS_ERR(cqr)) {
1628 		DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1629 				"Could not allocate initialization request");
1630 		return PTR_ERR(cqr);
1631 	}
1632 
1633 	/* Prepare for Read Subsystem Data */
1634 	prssdp = cqr->data;
1635 	memset(prssdp, 0, sizeof(*prssdp));
1636 	prssdp->order = PSF_ORDER_PRSSD;
1637 	prssdp->suborder = PSF_SUBORDER_LCQ;	/* Logical Configuration Query */
1638 
1639 	ccw = cqr->cpaddr;
1640 	ccw->cmd_code = DASD_ECKD_CCW_PSF;
1641 	ccw->count = sizeof(*prssdp);
1642 	ccw->flags |= CCW_FLAG_CC;
1643 	ccw->cda = (__u32)(addr_t)prssdp;
1644 
1645 	lcq = (struct dasd_rssd_lcq *)(prssdp + 1);
1646 	memset(lcq, 0, sizeof(*lcq));
1647 
1648 	ccw++;
1649 	ccw->cmd_code = DASD_ECKD_CCW_RSSD;
1650 	ccw->count = sizeof(*lcq);
1651 	ccw->flags |= CCW_FLAG_SLI;
1652 	ccw->cda = (__u32)(addr_t)lcq;
1653 
1654 	cqr->buildclk = get_tod_clock();
1655 	cqr->status = DASD_CQR_FILLED;
1656 	cqr->startdev = device;
1657 	cqr->memdev = device;
1658 	cqr->block = NULL;
1659 	cqr->retries = 256;
1660 	cqr->expires = device->default_expires * HZ;
1661 	/* The command might not be supported. Suppress the error output */
1662 	__set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
1663 
1664 	rc = dasd_sleep_on_interruptible(cqr);
1665 	if (rc == 0) {
1666 		dasd_eckd_cpy_ext_pool_data(device, lcq);
1667 	} else {
1668 		dev_warn(&device->cdev->dev,
1669 			 "Reading the logical configuration failed with rc=%d\n", rc);
1670 	}
1671 
1672 	dasd_sfree_request(cqr, cqr->memdev);
1673 
1674 	return rc;
1675 }
1676 
1677 /*
1678  * Depending on the device type, the extent size is specified either as
1679  * cylinders per extent (CKD) or size per extent (FBA)
1680  * A 1GB size corresponds to 1113cyl, and 16MB to 21cyl.
1681  */
1682 static int dasd_eckd_ext_size(struct dasd_device *device)
1683 {
1684 	struct dasd_eckd_private *private = device->private;
1685 	struct dasd_ext_pool_sum eps = private->eps;
1686 
1687 	if (!eps.flags.extent_size_valid)
1688 		return 0;
1689 	if (eps.extent_size.size_1G)
1690 		return 1113;
1691 	if (eps.extent_size.size_16M)
1692 		return 21;
1693 
1694 	return 0;
1695 }
1696 
1697 static int dasd_eckd_ext_pool_warn_thrshld(struct dasd_device *device)
1698 {
1699 	struct dasd_eckd_private *private = device->private;
1700 
1701 	return private->eps.warn_thrshld;
1702 }
1703 
1704 static int dasd_eckd_ext_pool_cap_at_warnlevel(struct dasd_device *device)
1705 {
1706 	struct dasd_eckd_private *private = device->private;
1707 
1708 	return private->eps.flags.capacity_at_warnlevel;
1709 }
1710 
1711 /*
1712  * Extent Pool out of space
1713  */
1714 static int dasd_eckd_ext_pool_oos(struct dasd_device *device)
1715 {
1716 	struct dasd_eckd_private *private = device->private;
1717 
1718 	return private->eps.flags.pool_oos;
1719 }
1720 
1721 /*
1722  * Build CP for Perform Subsystem Function - SSC.
1723  */
1724 static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
1725 						    int enable_pav)
1726 {
1727 	struct dasd_ccw_req *cqr;
1728 	struct dasd_psf_ssc_data *psf_ssc_data;
1729 	struct ccw1 *ccw;
1730 
1731 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
1732 				  sizeof(struct dasd_psf_ssc_data),
1733 				   device, NULL);
1734 
1735 	if (IS_ERR(cqr)) {
1736 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1737 			   "Could not allocate PSF-SSC request");
1738 		return cqr;
1739 	}
1740 	psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data;
1741 	psf_ssc_data->order = PSF_ORDER_SSC;
1742 	psf_ssc_data->suborder = 0xc0;
1743 	if (enable_pav) {
1744 		psf_ssc_data->suborder |= 0x08;
1745 		psf_ssc_data->reserved[0] = 0x88;
1746 	}
1747 	ccw = cqr->cpaddr;
1748 	ccw->cmd_code = DASD_ECKD_CCW_PSF;
1749 	ccw->cda = (__u32)(addr_t)psf_ssc_data;
1750 	ccw->count = 66;
1751 
1752 	cqr->startdev = device;
1753 	cqr->memdev = device;
1754 	cqr->block = NULL;
1755 	cqr->retries = 256;
1756 	cqr->expires = 10*HZ;
1757 	cqr->buildclk = get_tod_clock();
1758 	cqr->status = DASD_CQR_FILLED;
1759 	return cqr;
1760 }
1761 
1762 /*
1763  * Perform Subsystem Function.
1764  * It is necessary to trigger CIO for channel revalidation since this
1765  * call might change behaviour of DASD devices.
1766  */
1767 static int
1768 dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav,
1769 		  unsigned long flags)
1770 {
1771 	struct dasd_ccw_req *cqr;
1772 	int rc;
1773 
1774 	cqr = dasd_eckd_build_psf_ssc(device, enable_pav);
1775 	if (IS_ERR(cqr))
1776 		return PTR_ERR(cqr);
1777 
1778 	/*
1779 	 * set flags e.g. turn on failfast, to prevent blocking
1780 	 * the calling function should handle failed requests
1781 	 */
1782 	cqr->flags |= flags;
1783 
1784 	rc = dasd_sleep_on(cqr);
1785 	if (!rc)
1786 		/* trigger CIO to reprobe devices */
1787 		css_schedule_reprobe();
1788 	else if (cqr->intrc == -EAGAIN)
1789 		rc = -EAGAIN;
1790 
1791 	dasd_sfree_request(cqr, cqr->memdev);
1792 	return rc;
1793 }
1794 
1795 /*
1796  * Valide storage server of current device.
1797  */
1798 static int dasd_eckd_validate_server(struct dasd_device *device,
1799 				     unsigned long flags)
1800 {
1801 	struct dasd_eckd_private *private = device->private;
1802 	int enable_pav, rc;
1803 
1804 	if (private->uid.type == UA_BASE_PAV_ALIAS ||
1805 	    private->uid.type == UA_HYPER_PAV_ALIAS)
1806 		return 0;
1807 	if (dasd_nopav || MACHINE_IS_VM)
1808 		enable_pav = 0;
1809 	else
1810 		enable_pav = 1;
1811 	rc = dasd_eckd_psf_ssc(device, enable_pav, flags);
1812 
1813 	/* may be requested feature is not available on server,
1814 	 * therefore just report error and go ahead */
1815 	DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "PSF-SSC for SSID %04x "
1816 			"returned rc=%d", private->uid.ssid, rc);
1817 	return rc;
1818 }
1819 
1820 /*
1821  * worker to do a validate server in case of a lost pathgroup
1822  */
1823 static void dasd_eckd_do_validate_server(struct work_struct *work)
1824 {
1825 	struct dasd_device *device = container_of(work, struct dasd_device,
1826 						  kick_validate);
1827 	unsigned long flags = 0;
1828 
1829 	set_bit(DASD_CQR_FLAGS_FAILFAST, &flags);
1830 	if (dasd_eckd_validate_server(device, flags)
1831 	    == -EAGAIN) {
1832 		/* schedule worker again if failed */
1833 		schedule_work(&device->kick_validate);
1834 		return;
1835 	}
1836 
1837 	dasd_put_device(device);
1838 }
1839 
1840 static void dasd_eckd_kick_validate_server(struct dasd_device *device)
1841 {
1842 	dasd_get_device(device);
1843 	/* exit if device not online or in offline processing */
1844 	if (test_bit(DASD_FLAG_OFFLINE, &device->flags) ||
1845 	   device->state < DASD_STATE_ONLINE) {
1846 		dasd_put_device(device);
1847 		return;
1848 	}
1849 	/* queue call to do_validate_server to the kernel event daemon. */
1850 	if (!schedule_work(&device->kick_validate))
1851 		dasd_put_device(device);
1852 }
1853 
1854 /*
1855  * Check device characteristics.
1856  * If the device is accessible using ECKD discipline, the device is enabled.
1857  */
1858 static int
1859 dasd_eckd_check_characteristics(struct dasd_device *device)
1860 {
1861 	struct dasd_eckd_private *private = device->private;
1862 	struct dasd_block *block;
1863 	struct dasd_uid temp_uid;
1864 	int rc, i;
1865 	int readonly;
1866 	unsigned long value;
1867 
1868 	/* setup work queue for validate server*/
1869 	INIT_WORK(&device->kick_validate, dasd_eckd_do_validate_server);
1870 	/* setup work queue for summary unit check */
1871 	INIT_WORK(&device->suc_work, dasd_alias_handle_summary_unit_check);
1872 
1873 	if (!ccw_device_is_pathgroup(device->cdev)) {
1874 		dev_warn(&device->cdev->dev,
1875 			 "A channel path group could not be established\n");
1876 		return -EIO;
1877 	}
1878 	if (!ccw_device_is_multipath(device->cdev)) {
1879 		dev_info(&device->cdev->dev,
1880 			 "The DASD is not operating in multipath mode\n");
1881 	}
1882 	if (!private) {
1883 		private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
1884 		if (!private) {
1885 			dev_warn(&device->cdev->dev,
1886 				 "Allocating memory for private DASD data "
1887 				 "failed\n");
1888 			return -ENOMEM;
1889 		}
1890 		device->private = private;
1891 	} else {
1892 		memset(private, 0, sizeof(*private));
1893 	}
1894 	/* Invalidate status of initial analysis. */
1895 	private->init_cqr_status = -1;
1896 	/* Set default cache operations. */
1897 	private->attrib.operation = DASD_NORMAL_CACHE;
1898 	private->attrib.nr_cyl = 0;
1899 
1900 	/* Read Configuration Data */
1901 	rc = dasd_eckd_read_conf(device);
1902 	if (rc)
1903 		goto out_err1;
1904 
1905 	/* set some default values */
1906 	device->default_expires = DASD_EXPIRES;
1907 	device->default_retries = DASD_RETRIES;
1908 	device->path_thrhld = DASD_ECKD_PATH_THRHLD;
1909 	device->path_interval = DASD_ECKD_PATH_INTERVAL;
1910 
1911 	if (private->gneq) {
1912 		value = 1;
1913 		for (i = 0; i < private->gneq->timeout.value; i++)
1914 			value = 10 * value;
1915 		value = value * private->gneq->timeout.number;
1916 		/* do not accept useless values */
1917 		if (value != 0 && value <= DASD_EXPIRES_MAX)
1918 			device->default_expires = value;
1919 	}
1920 
1921 	dasd_eckd_get_uid(device, &temp_uid);
1922 	if (temp_uid.type == UA_BASE_DEVICE) {
1923 		block = dasd_alloc_block();
1924 		if (IS_ERR(block)) {
1925 			DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
1926 					"could not allocate dasd "
1927 					"block structure");
1928 			rc = PTR_ERR(block);
1929 			goto out_err1;
1930 		}
1931 		device->block = block;
1932 		block->base = device;
1933 	}
1934 
1935 	/* register lcu with alias handling, enable PAV */
1936 	rc = dasd_alias_make_device_known_to_lcu(device);
1937 	if (rc)
1938 		goto out_err2;
1939 
1940 	dasd_eckd_validate_server(device, 0);
1941 
1942 	/* device may report different configuration data after LCU setup */
1943 	rc = dasd_eckd_read_conf(device);
1944 	if (rc)
1945 		goto out_err3;
1946 
1947 	/* Read Feature Codes */
1948 	dasd_eckd_read_features(device);
1949 
1950 	/* Read Volume Information */
1951 	rc = dasd_eckd_read_vol_info(device);
1952 	if (rc)
1953 		goto out_err3;
1954 
1955 	/* Read Extent Pool Information */
1956 	rc = dasd_eckd_read_ext_pool_info(device);
1957 	if (rc)
1958 		goto out_err3;
1959 
1960 	/* Read Device Characteristics */
1961 	rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
1962 					 &private->rdc_data, 64);
1963 	if (rc) {
1964 		DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
1965 				"Read device characteristic failed, rc=%d", rc);
1966 		goto out_err3;
1967 	}
1968 
1969 	if ((device->features & DASD_FEATURE_USERAW) &&
1970 	    !(private->rdc_data.facilities.RT_in_LR)) {
1971 		dev_err(&device->cdev->dev, "The storage server does not "
1972 			"support raw-track access\n");
1973 		rc = -EINVAL;
1974 		goto out_err3;
1975 	}
1976 
1977 	/* find the valid cylinder size */
1978 	if (private->rdc_data.no_cyl == LV_COMPAT_CYL &&
1979 	    private->rdc_data.long_no_cyl)
1980 		private->real_cyl = private->rdc_data.long_no_cyl;
1981 	else
1982 		private->real_cyl = private->rdc_data.no_cyl;
1983 
1984 	private->fcx_max_data = get_fcx_max_data(device);
1985 
1986 	readonly = dasd_device_is_ro(device);
1987 	if (readonly)
1988 		set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
1989 
1990 	dev_info(&device->cdev->dev, "New DASD %04X/%02X (CU %04X/%02X) "
1991 		 "with %d cylinders, %d heads, %d sectors%s\n",
1992 		 private->rdc_data.dev_type,
1993 		 private->rdc_data.dev_model,
1994 		 private->rdc_data.cu_type,
1995 		 private->rdc_data.cu_model.model,
1996 		 private->real_cyl,
1997 		 private->rdc_data.trk_per_cyl,
1998 		 private->rdc_data.sec_per_trk,
1999 		 readonly ? ", read-only device" : "");
2000 	return 0;
2001 
2002 out_err3:
2003 	dasd_alias_disconnect_device_from_lcu(device);
2004 out_err2:
2005 	dasd_free_block(device->block);
2006 	device->block = NULL;
2007 out_err1:
2008 	kfree(private->conf_data);
2009 	kfree(device->private);
2010 	device->private = NULL;
2011 	return rc;
2012 }
2013 
2014 static void dasd_eckd_uncheck_device(struct dasd_device *device)
2015 {
2016 	struct dasd_eckd_private *private = device->private;
2017 	int i;
2018 
2019 	if (!private)
2020 		return;
2021 
2022 	dasd_alias_disconnect_device_from_lcu(device);
2023 	private->ned = NULL;
2024 	private->sneq = NULL;
2025 	private->vdsneq = NULL;
2026 	private->gneq = NULL;
2027 	private->conf_len = 0;
2028 	for (i = 0; i < 8; i++) {
2029 		kfree(device->path[i].conf_data);
2030 		if ((__u8 *)device->path[i].conf_data ==
2031 		    private->conf_data) {
2032 			private->conf_data = NULL;
2033 			private->conf_len = 0;
2034 		}
2035 		device->path[i].conf_data = NULL;
2036 		device->path[i].cssid = 0;
2037 		device->path[i].ssid = 0;
2038 		device->path[i].chpid = 0;
2039 	}
2040 	kfree(private->conf_data);
2041 	private->conf_data = NULL;
2042 }
2043 
2044 static struct dasd_ccw_req *
2045 dasd_eckd_analysis_ccw(struct dasd_device *device)
2046 {
2047 	struct dasd_eckd_private *private = device->private;
2048 	struct eckd_count *count_data;
2049 	struct LO_eckd_data *LO_data;
2050 	struct dasd_ccw_req *cqr;
2051 	struct ccw1 *ccw;
2052 	int cplength, datasize;
2053 	int i;
2054 
2055 	cplength = 8;
2056 	datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data);
2057 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device,
2058 				   NULL);
2059 	if (IS_ERR(cqr))
2060 		return cqr;
2061 	ccw = cqr->cpaddr;
2062 	/* Define extent for the first 2 tracks. */
2063 	define_extent(ccw++, cqr->data, 0, 1,
2064 		      DASD_ECKD_CCW_READ_COUNT, device, 0);
2065 	LO_data = cqr->data + sizeof(struct DE_eckd_data);
2066 	/* Locate record for the first 4 records on track 0. */
2067 	ccw[-1].flags |= CCW_FLAG_CC;
2068 	locate_record(ccw++, LO_data++, 0, 0, 4,
2069 		      DASD_ECKD_CCW_READ_COUNT, device, 0);
2070 
2071 	count_data = private->count_area;
2072 	for (i = 0; i < 4; i++) {
2073 		ccw[-1].flags |= CCW_FLAG_CC;
2074 		ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
2075 		ccw->flags = 0;
2076 		ccw->count = 8;
2077 		ccw->cda = (__u32)(addr_t) count_data;
2078 		ccw++;
2079 		count_data++;
2080 	}
2081 
2082 	/* Locate record for the first record on track 1. */
2083 	ccw[-1].flags |= CCW_FLAG_CC;
2084 	locate_record(ccw++, LO_data++, 1, 0, 1,
2085 		      DASD_ECKD_CCW_READ_COUNT, device, 0);
2086 	/* Read count ccw. */
2087 	ccw[-1].flags |= CCW_FLAG_CC;
2088 	ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
2089 	ccw->flags = 0;
2090 	ccw->count = 8;
2091 	ccw->cda = (__u32)(addr_t) count_data;
2092 
2093 	cqr->block = NULL;
2094 	cqr->startdev = device;
2095 	cqr->memdev = device;
2096 	cqr->retries = 255;
2097 	cqr->buildclk = get_tod_clock();
2098 	cqr->status = DASD_CQR_FILLED;
2099 	return cqr;
2100 }
2101 
2102 /* differentiate between 'no record found' and any other error */
2103 static int dasd_eckd_analysis_evaluation(struct dasd_ccw_req *init_cqr)
2104 {
2105 	char *sense;
2106 	if (init_cqr->status == DASD_CQR_DONE)
2107 		return INIT_CQR_OK;
2108 	else if (init_cqr->status == DASD_CQR_NEED_ERP ||
2109 		 init_cqr->status == DASD_CQR_FAILED) {
2110 		sense = dasd_get_sense(&init_cqr->irb);
2111 		if (sense && (sense[1] & SNS1_NO_REC_FOUND))
2112 			return INIT_CQR_UNFORMATTED;
2113 		else
2114 			return INIT_CQR_ERROR;
2115 	} else
2116 		return INIT_CQR_ERROR;
2117 }
2118 
2119 /*
2120  * This is the callback function for the init_analysis cqr. It saves
2121  * the status of the initial analysis ccw before it frees it and kicks
2122  * the device to continue the startup sequence. This will call
2123  * dasd_eckd_do_analysis again (if the devices has not been marked
2124  * for deletion in the meantime).
2125  */
2126 static void dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr,
2127 					void *data)
2128 {
2129 	struct dasd_device *device = init_cqr->startdev;
2130 	struct dasd_eckd_private *private = device->private;
2131 
2132 	private->init_cqr_status = dasd_eckd_analysis_evaluation(init_cqr);
2133 	dasd_sfree_request(init_cqr, device);
2134 	dasd_kick_device(device);
2135 }
2136 
2137 static int dasd_eckd_start_analysis(struct dasd_block *block)
2138 {
2139 	struct dasd_ccw_req *init_cqr;
2140 
2141 	init_cqr = dasd_eckd_analysis_ccw(block->base);
2142 	if (IS_ERR(init_cqr))
2143 		return PTR_ERR(init_cqr);
2144 	init_cqr->callback = dasd_eckd_analysis_callback;
2145 	init_cqr->callback_data = NULL;
2146 	init_cqr->expires = 5*HZ;
2147 	/* first try without ERP, so we can later handle unformatted
2148 	 * devices as special case
2149 	 */
2150 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &init_cqr->flags);
2151 	init_cqr->retries = 0;
2152 	dasd_add_request_head(init_cqr);
2153 	return -EAGAIN;
2154 }
2155 
2156 static int dasd_eckd_end_analysis(struct dasd_block *block)
2157 {
2158 	struct dasd_device *device = block->base;
2159 	struct dasd_eckd_private *private = device->private;
2160 	struct eckd_count *count_area;
2161 	unsigned int sb, blk_per_trk;
2162 	int status, i;
2163 	struct dasd_ccw_req *init_cqr;
2164 
2165 	status = private->init_cqr_status;
2166 	private->init_cqr_status = -1;
2167 	if (status == INIT_CQR_ERROR) {
2168 		/* try again, this time with full ERP */
2169 		init_cqr = dasd_eckd_analysis_ccw(device);
2170 		dasd_sleep_on(init_cqr);
2171 		status = dasd_eckd_analysis_evaluation(init_cqr);
2172 		dasd_sfree_request(init_cqr, device);
2173 	}
2174 
2175 	if (device->features & DASD_FEATURE_USERAW) {
2176 		block->bp_block = DASD_RAW_BLOCKSIZE;
2177 		blk_per_trk = DASD_RAW_BLOCK_PER_TRACK;
2178 		block->s2b_shift = 3;
2179 		goto raw;
2180 	}
2181 
2182 	if (status == INIT_CQR_UNFORMATTED) {
2183 		dev_warn(&device->cdev->dev, "The DASD is not formatted\n");
2184 		return -EMEDIUMTYPE;
2185 	} else if (status == INIT_CQR_ERROR) {
2186 		dev_err(&device->cdev->dev,
2187 			"Detecting the DASD disk layout failed because "
2188 			"of an I/O error\n");
2189 		return -EIO;
2190 	}
2191 
2192 	private->uses_cdl = 1;
2193 	/* Check Track 0 for Compatible Disk Layout */
2194 	count_area = NULL;
2195 	for (i = 0; i < 3; i++) {
2196 		if (private->count_area[i].kl != 4 ||
2197 		    private->count_area[i].dl != dasd_eckd_cdl_reclen(i) - 4 ||
2198 		    private->count_area[i].cyl != 0 ||
2199 		    private->count_area[i].head != count_area_head[i] ||
2200 		    private->count_area[i].record != count_area_rec[i]) {
2201 			private->uses_cdl = 0;
2202 			break;
2203 		}
2204 	}
2205 	if (i == 3)
2206 		count_area = &private->count_area[3];
2207 
2208 	if (private->uses_cdl == 0) {
2209 		for (i = 0; i < 5; i++) {
2210 			if ((private->count_area[i].kl != 0) ||
2211 			    (private->count_area[i].dl !=
2212 			     private->count_area[0].dl) ||
2213 			    private->count_area[i].cyl !=  0 ||
2214 			    private->count_area[i].head != count_area_head[i] ||
2215 			    private->count_area[i].record != count_area_rec[i])
2216 				break;
2217 		}
2218 		if (i == 5)
2219 			count_area = &private->count_area[0];
2220 	} else {
2221 		if (private->count_area[3].record == 1)
2222 			dev_warn(&device->cdev->dev,
2223 				 "Track 0 has no records following the VTOC\n");
2224 	}
2225 
2226 	if (count_area != NULL && count_area->kl == 0) {
2227 		/* we found notthing violating our disk layout */
2228 		if (dasd_check_blocksize(count_area->dl) == 0)
2229 			block->bp_block = count_area->dl;
2230 	}
2231 	if (block->bp_block == 0) {
2232 		dev_warn(&device->cdev->dev,
2233 			 "The disk layout of the DASD is not supported\n");
2234 		return -EMEDIUMTYPE;
2235 	}
2236 	block->s2b_shift = 0;	/* bits to shift 512 to get a block */
2237 	for (sb = 512; sb < block->bp_block; sb = sb << 1)
2238 		block->s2b_shift++;
2239 
2240 	blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block);
2241 
2242 raw:
2243 	block->blocks = ((unsigned long) private->real_cyl *
2244 			  private->rdc_data.trk_per_cyl *
2245 			  blk_per_trk);
2246 
2247 	dev_info(&device->cdev->dev,
2248 		 "DASD with %u KB/block, %lu KB total size, %u KB/track, "
2249 		 "%s\n", (block->bp_block >> 10),
2250 		 (((unsigned long) private->real_cyl *
2251 		   private->rdc_data.trk_per_cyl *
2252 		   blk_per_trk * (block->bp_block >> 9)) >> 1),
2253 		 ((blk_per_trk * block->bp_block) >> 10),
2254 		 private->uses_cdl ?
2255 		 "compatible disk layout" : "linux disk layout");
2256 
2257 	return 0;
2258 }
2259 
2260 static int dasd_eckd_do_analysis(struct dasd_block *block)
2261 {
2262 	struct dasd_eckd_private *private = block->base->private;
2263 
2264 	if (private->init_cqr_status < 0)
2265 		return dasd_eckd_start_analysis(block);
2266 	else
2267 		return dasd_eckd_end_analysis(block);
2268 }
2269 
2270 static int dasd_eckd_basic_to_ready(struct dasd_device *device)
2271 {
2272 	return dasd_alias_add_device(device);
2273 };
2274 
2275 static int dasd_eckd_online_to_ready(struct dasd_device *device)
2276 {
2277 	if (cancel_work_sync(&device->reload_device))
2278 		dasd_put_device(device);
2279 	if (cancel_work_sync(&device->kick_validate))
2280 		dasd_put_device(device);
2281 
2282 	return 0;
2283 };
2284 
2285 static int dasd_eckd_basic_to_known(struct dasd_device *device)
2286 {
2287 	return dasd_alias_remove_device(device);
2288 };
2289 
2290 static int
2291 dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo)
2292 {
2293 	struct dasd_eckd_private *private = block->base->private;
2294 
2295 	if (dasd_check_blocksize(block->bp_block) == 0) {
2296 		geo->sectors = recs_per_track(&private->rdc_data,
2297 					      0, block->bp_block);
2298 	}
2299 	geo->cylinders = private->rdc_data.no_cyl;
2300 	geo->heads = private->rdc_data.trk_per_cyl;
2301 	return 0;
2302 }
2303 
2304 /*
2305  * Build the TCW request for the format check
2306  */
2307 static struct dasd_ccw_req *
2308 dasd_eckd_build_check_tcw(struct dasd_device *base, struct format_data_t *fdata,
2309 			  int enable_pav, struct eckd_count *fmt_buffer,
2310 			  int rpt)
2311 {
2312 	struct dasd_eckd_private *start_priv;
2313 	struct dasd_device *startdev = NULL;
2314 	struct tidaw *last_tidaw = NULL;
2315 	struct dasd_ccw_req *cqr;
2316 	struct itcw *itcw;
2317 	int itcw_size;
2318 	int count;
2319 	int rc;
2320 	int i;
2321 
2322 	if (enable_pav)
2323 		startdev = dasd_alias_get_start_dev(base);
2324 
2325 	if (!startdev)
2326 		startdev = base;
2327 
2328 	start_priv = startdev->private;
2329 
2330 	count = rpt * (fdata->stop_unit - fdata->start_unit + 1);
2331 
2332 	/*
2333 	 * we're adding 'count' amount of tidaw to the itcw.
2334 	 * calculate the corresponding itcw_size
2335 	 */
2336 	itcw_size = itcw_calc_size(0, count, 0);
2337 
2338 	cqr = dasd_fmalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev);
2339 	if (IS_ERR(cqr))
2340 		return cqr;
2341 
2342 	start_priv->count++;
2343 
2344 	itcw = itcw_init(cqr->data, itcw_size, ITCW_OP_READ, 0, count, 0);
2345 	if (IS_ERR(itcw)) {
2346 		rc = -EINVAL;
2347 		goto out_err;
2348 	}
2349 
2350 	cqr->cpaddr = itcw_get_tcw(itcw);
2351 	rc = prepare_itcw(itcw, fdata->start_unit, fdata->stop_unit,
2352 			  DASD_ECKD_CCW_READ_COUNT_MT, base, startdev, 0, count,
2353 			  sizeof(struct eckd_count),
2354 			  count * sizeof(struct eckd_count), 0, rpt);
2355 	if (rc)
2356 		goto out_err;
2357 
2358 	for (i = 0; i < count; i++) {
2359 		last_tidaw = itcw_add_tidaw(itcw, 0, fmt_buffer++,
2360 					    sizeof(struct eckd_count));
2361 		if (IS_ERR(last_tidaw)) {
2362 			rc = -EINVAL;
2363 			goto out_err;
2364 		}
2365 	}
2366 
2367 	last_tidaw->flags |= TIDAW_FLAGS_LAST;
2368 	itcw_finalize(itcw);
2369 
2370 	cqr->cpmode = 1;
2371 	cqr->startdev = startdev;
2372 	cqr->memdev = startdev;
2373 	cqr->basedev = base;
2374 	cqr->retries = startdev->default_retries;
2375 	cqr->expires = startdev->default_expires * HZ;
2376 	cqr->buildclk = get_tod_clock();
2377 	cqr->status = DASD_CQR_FILLED;
2378 	/* Set flags to suppress output for expected errors */
2379 	set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
2380 	set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
2381 
2382 	return cqr;
2383 
2384 out_err:
2385 	dasd_sfree_request(cqr, startdev);
2386 
2387 	return ERR_PTR(rc);
2388 }
2389 
2390 /*
2391  * Build the CCW request for the format check
2392  */
2393 static struct dasd_ccw_req *
2394 dasd_eckd_build_check(struct dasd_device *base, struct format_data_t *fdata,
2395 		      int enable_pav, struct eckd_count *fmt_buffer, int rpt)
2396 {
2397 	struct dasd_eckd_private *start_priv;
2398 	struct dasd_eckd_private *base_priv;
2399 	struct dasd_device *startdev = NULL;
2400 	struct dasd_ccw_req *cqr;
2401 	struct ccw1 *ccw;
2402 	void *data;
2403 	int cplength, datasize;
2404 	int use_prefix;
2405 	int count;
2406 	int i;
2407 
2408 	if (enable_pav)
2409 		startdev = dasd_alias_get_start_dev(base);
2410 
2411 	if (!startdev)
2412 		startdev = base;
2413 
2414 	start_priv = startdev->private;
2415 	base_priv = base->private;
2416 
2417 	count = rpt * (fdata->stop_unit - fdata->start_unit + 1);
2418 
2419 	use_prefix = base_priv->features.feature[8] & 0x01;
2420 
2421 	if (use_prefix) {
2422 		cplength = 1;
2423 		datasize = sizeof(struct PFX_eckd_data);
2424 	} else {
2425 		cplength = 2;
2426 		datasize = sizeof(struct DE_eckd_data) +
2427 			sizeof(struct LO_eckd_data);
2428 	}
2429 	cplength += count;
2430 
2431 	cqr = dasd_fmalloc_request(DASD_ECKD_MAGIC, cplength, datasize, startdev);
2432 	if (IS_ERR(cqr))
2433 		return cqr;
2434 
2435 	start_priv->count++;
2436 	data = cqr->data;
2437 	ccw = cqr->cpaddr;
2438 
2439 	if (use_prefix) {
2440 		prefix_LRE(ccw++, data, fdata->start_unit, fdata->stop_unit,
2441 			   DASD_ECKD_CCW_READ_COUNT, base, startdev, 1, 0,
2442 			   count, 0, 0);
2443 	} else {
2444 		define_extent(ccw++, data, fdata->start_unit, fdata->stop_unit,
2445 			      DASD_ECKD_CCW_READ_COUNT, startdev, 0);
2446 
2447 		data += sizeof(struct DE_eckd_data);
2448 		ccw[-1].flags |= CCW_FLAG_CC;
2449 
2450 		locate_record(ccw++, data, fdata->start_unit, 0, count,
2451 			      DASD_ECKD_CCW_READ_COUNT, base, 0);
2452 	}
2453 
2454 	for (i = 0; i < count; i++) {
2455 		ccw[-1].flags |= CCW_FLAG_CC;
2456 		ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
2457 		ccw->flags = CCW_FLAG_SLI;
2458 		ccw->count = 8;
2459 		ccw->cda = (__u32)(addr_t) fmt_buffer;
2460 		ccw++;
2461 		fmt_buffer++;
2462 	}
2463 
2464 	cqr->startdev = startdev;
2465 	cqr->memdev = startdev;
2466 	cqr->basedev = base;
2467 	cqr->retries = DASD_RETRIES;
2468 	cqr->expires = startdev->default_expires * HZ;
2469 	cqr->buildclk = get_tod_clock();
2470 	cqr->status = DASD_CQR_FILLED;
2471 	/* Set flags to suppress output for expected errors */
2472 	set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
2473 
2474 	return cqr;
2475 }
2476 
2477 static struct dasd_ccw_req *
2478 dasd_eckd_build_format(struct dasd_device *base, struct dasd_device *startdev,
2479 		       struct format_data_t *fdata, int enable_pav)
2480 {
2481 	struct dasd_eckd_private *base_priv;
2482 	struct dasd_eckd_private *start_priv;
2483 	struct dasd_ccw_req *fcp;
2484 	struct eckd_count *ect;
2485 	struct ch_t address;
2486 	struct ccw1 *ccw;
2487 	void *data;
2488 	int rpt;
2489 	int cplength, datasize;
2490 	int i, j;
2491 	int intensity = 0;
2492 	int r0_perm;
2493 	int nr_tracks;
2494 	int use_prefix;
2495 
2496 	if (enable_pav)
2497 		startdev = dasd_alias_get_start_dev(base);
2498 
2499 	if (!startdev)
2500 		startdev = base;
2501 
2502 	start_priv = startdev->private;
2503 	base_priv = base->private;
2504 
2505 	rpt = recs_per_track(&base_priv->rdc_data, 0, fdata->blksize);
2506 
2507 	nr_tracks = fdata->stop_unit - fdata->start_unit + 1;
2508 
2509 	/*
2510 	 * fdata->intensity is a bit string that tells us what to do:
2511 	 *   Bit 0: write record zero
2512 	 *   Bit 1: write home address, currently not supported
2513 	 *   Bit 2: invalidate tracks
2514 	 *   Bit 3: use OS/390 compatible disk layout (cdl)
2515 	 *   Bit 4: do not allow storage subsystem to modify record zero
2516 	 * Only some bit combinations do make sense.
2517 	 */
2518 	if (fdata->intensity & 0x10) {
2519 		r0_perm = 0;
2520 		intensity = fdata->intensity & ~0x10;
2521 	} else {
2522 		r0_perm = 1;
2523 		intensity = fdata->intensity;
2524 	}
2525 
2526 	use_prefix = base_priv->features.feature[8] & 0x01;
2527 
2528 	switch (intensity) {
2529 	case 0x00:	/* Normal format */
2530 	case 0x08:	/* Normal format, use cdl. */
2531 		cplength = 2 + (rpt*nr_tracks);
2532 		if (use_prefix)
2533 			datasize = sizeof(struct PFX_eckd_data) +
2534 				sizeof(struct LO_eckd_data) +
2535 				rpt * nr_tracks * sizeof(struct eckd_count);
2536 		else
2537 			datasize = sizeof(struct DE_eckd_data) +
2538 				sizeof(struct LO_eckd_data) +
2539 				rpt * nr_tracks * sizeof(struct eckd_count);
2540 		break;
2541 	case 0x01:	/* Write record zero and format track. */
2542 	case 0x09:	/* Write record zero and format track, use cdl. */
2543 		cplength = 2 + rpt * nr_tracks;
2544 		if (use_prefix)
2545 			datasize = sizeof(struct PFX_eckd_data) +
2546 				sizeof(struct LO_eckd_data) +
2547 				sizeof(struct eckd_count) +
2548 				rpt * nr_tracks * sizeof(struct eckd_count);
2549 		else
2550 			datasize = sizeof(struct DE_eckd_data) +
2551 				sizeof(struct LO_eckd_data) +
2552 				sizeof(struct eckd_count) +
2553 				rpt * nr_tracks * sizeof(struct eckd_count);
2554 		break;
2555 	case 0x04:	/* Invalidate track. */
2556 	case 0x0c:	/* Invalidate track, use cdl. */
2557 		cplength = 3;
2558 		if (use_prefix)
2559 			datasize = sizeof(struct PFX_eckd_data) +
2560 				sizeof(struct LO_eckd_data) +
2561 				sizeof(struct eckd_count);
2562 		else
2563 			datasize = sizeof(struct DE_eckd_data) +
2564 				sizeof(struct LO_eckd_data) +
2565 				sizeof(struct eckd_count);
2566 		break;
2567 	default:
2568 		dev_warn(&startdev->cdev->dev,
2569 			 "An I/O control call used incorrect flags 0x%x\n",
2570 			 fdata->intensity);
2571 		return ERR_PTR(-EINVAL);
2572 	}
2573 
2574 	fcp = dasd_fmalloc_request(DASD_ECKD_MAGIC, cplength, datasize, startdev);
2575 	if (IS_ERR(fcp))
2576 		return fcp;
2577 
2578 	start_priv->count++;
2579 	data = fcp->data;
2580 	ccw = fcp->cpaddr;
2581 
2582 	switch (intensity & ~0x08) {
2583 	case 0x00: /* Normal format. */
2584 		if (use_prefix) {
2585 			prefix(ccw++, (struct PFX_eckd_data *) data,
2586 			       fdata->start_unit, fdata->stop_unit,
2587 			       DASD_ECKD_CCW_WRITE_CKD, base, startdev);
2588 			/* grant subsystem permission to format R0 */
2589 			if (r0_perm)
2590 				((struct PFX_eckd_data *)data)
2591 					->define_extent.ga_extended |= 0x04;
2592 			data += sizeof(struct PFX_eckd_data);
2593 		} else {
2594 			define_extent(ccw++, (struct DE_eckd_data *) data,
2595 				      fdata->start_unit, fdata->stop_unit,
2596 				      DASD_ECKD_CCW_WRITE_CKD, startdev, 0);
2597 			/* grant subsystem permission to format R0 */
2598 			if (r0_perm)
2599 				((struct DE_eckd_data *) data)
2600 					->ga_extended |= 0x04;
2601 			data += sizeof(struct DE_eckd_data);
2602 		}
2603 		ccw[-1].flags |= CCW_FLAG_CC;
2604 		locate_record(ccw++, (struct LO_eckd_data *) data,
2605 			      fdata->start_unit, 0, rpt*nr_tracks,
2606 			      DASD_ECKD_CCW_WRITE_CKD, base,
2607 			      fdata->blksize);
2608 		data += sizeof(struct LO_eckd_data);
2609 		break;
2610 	case 0x01: /* Write record zero + format track. */
2611 		if (use_prefix) {
2612 			prefix(ccw++, (struct PFX_eckd_data *) data,
2613 			       fdata->start_unit, fdata->stop_unit,
2614 			       DASD_ECKD_CCW_WRITE_RECORD_ZERO,
2615 			       base, startdev);
2616 			data += sizeof(struct PFX_eckd_data);
2617 		} else {
2618 			define_extent(ccw++, (struct DE_eckd_data *) data,
2619 			       fdata->start_unit, fdata->stop_unit,
2620 			       DASD_ECKD_CCW_WRITE_RECORD_ZERO, startdev, 0);
2621 			data += sizeof(struct DE_eckd_data);
2622 		}
2623 		ccw[-1].flags |= CCW_FLAG_CC;
2624 		locate_record(ccw++, (struct LO_eckd_data *) data,
2625 			      fdata->start_unit, 0, rpt * nr_tracks + 1,
2626 			      DASD_ECKD_CCW_WRITE_RECORD_ZERO, base,
2627 			      base->block->bp_block);
2628 		data += sizeof(struct LO_eckd_data);
2629 		break;
2630 	case 0x04: /* Invalidate track. */
2631 		if (use_prefix) {
2632 			prefix(ccw++, (struct PFX_eckd_data *) data,
2633 			       fdata->start_unit, fdata->stop_unit,
2634 			       DASD_ECKD_CCW_WRITE_CKD, base, startdev);
2635 			data += sizeof(struct PFX_eckd_data);
2636 		} else {
2637 			define_extent(ccw++, (struct DE_eckd_data *) data,
2638 			       fdata->start_unit, fdata->stop_unit,
2639 			       DASD_ECKD_CCW_WRITE_CKD, startdev, 0);
2640 			data += sizeof(struct DE_eckd_data);
2641 		}
2642 		ccw[-1].flags |= CCW_FLAG_CC;
2643 		locate_record(ccw++, (struct LO_eckd_data *) data,
2644 			      fdata->start_unit, 0, 1,
2645 			      DASD_ECKD_CCW_WRITE_CKD, base, 8);
2646 		data += sizeof(struct LO_eckd_data);
2647 		break;
2648 	}
2649 
2650 	for (j = 0; j < nr_tracks; j++) {
2651 		/* calculate cylinder and head for the current track */
2652 		set_ch_t(&address,
2653 			 (fdata->start_unit + j) /
2654 			 base_priv->rdc_data.trk_per_cyl,
2655 			 (fdata->start_unit + j) %
2656 			 base_priv->rdc_data.trk_per_cyl);
2657 		if (intensity & 0x01) {	/* write record zero */
2658 			ect = (struct eckd_count *) data;
2659 			data += sizeof(struct eckd_count);
2660 			ect->cyl = address.cyl;
2661 			ect->head = address.head;
2662 			ect->record = 0;
2663 			ect->kl = 0;
2664 			ect->dl = 8;
2665 			ccw[-1].flags |= CCW_FLAG_CC;
2666 			ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO;
2667 			ccw->flags = CCW_FLAG_SLI;
2668 			ccw->count = 8;
2669 			ccw->cda = (__u32)(addr_t) ect;
2670 			ccw++;
2671 		}
2672 		if ((intensity & ~0x08) & 0x04) {	/* erase track */
2673 			ect = (struct eckd_count *) data;
2674 			data += sizeof(struct eckd_count);
2675 			ect->cyl = address.cyl;
2676 			ect->head = address.head;
2677 			ect->record = 1;
2678 			ect->kl = 0;
2679 			ect->dl = 0;
2680 			ccw[-1].flags |= CCW_FLAG_CC;
2681 			ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
2682 			ccw->flags = CCW_FLAG_SLI;
2683 			ccw->count = 8;
2684 			ccw->cda = (__u32)(addr_t) ect;
2685 		} else {		/* write remaining records */
2686 			for (i = 0; i < rpt; i++) {
2687 				ect = (struct eckd_count *) data;
2688 				data += sizeof(struct eckd_count);
2689 				ect->cyl = address.cyl;
2690 				ect->head = address.head;
2691 				ect->record = i + 1;
2692 				ect->kl = 0;
2693 				ect->dl = fdata->blksize;
2694 				/*
2695 				 * Check for special tracks 0-1
2696 				 * when formatting CDL
2697 				 */
2698 				if ((intensity & 0x08) &&
2699 				    address.cyl == 0 && address.head == 0) {
2700 					if (i < 3) {
2701 						ect->kl = 4;
2702 						ect->dl = sizes_trk0[i] - 4;
2703 					}
2704 				}
2705 				if ((intensity & 0x08) &&
2706 				    address.cyl == 0 && address.head == 1) {
2707 					ect->kl = 44;
2708 					ect->dl = LABEL_SIZE - 44;
2709 				}
2710 				ccw[-1].flags |= CCW_FLAG_CC;
2711 				if (i != 0 || j == 0)
2712 					ccw->cmd_code =
2713 						DASD_ECKD_CCW_WRITE_CKD;
2714 				else
2715 					ccw->cmd_code =
2716 						DASD_ECKD_CCW_WRITE_CKD_MT;
2717 				ccw->flags = CCW_FLAG_SLI;
2718 				ccw->count = 8;
2719 				ccw->cda = (__u32)(addr_t) ect;
2720 				ccw++;
2721 			}
2722 		}
2723 	}
2724 
2725 	fcp->startdev = startdev;
2726 	fcp->memdev = startdev;
2727 	fcp->basedev = base;
2728 	fcp->retries = 256;
2729 	fcp->expires = startdev->default_expires * HZ;
2730 	fcp->buildclk = get_tod_clock();
2731 	fcp->status = DASD_CQR_FILLED;
2732 
2733 	return fcp;
2734 }
2735 
2736 /*
2737  * Wrapper function to build a CCW request depending on input data
2738  */
2739 static struct dasd_ccw_req *
2740 dasd_eckd_format_build_ccw_req(struct dasd_device *base,
2741 			       struct format_data_t *fdata, int enable_pav,
2742 			       int tpm, struct eckd_count *fmt_buffer, int rpt)
2743 {
2744 	struct dasd_ccw_req *ccw_req;
2745 
2746 	if (!fmt_buffer) {
2747 		ccw_req = dasd_eckd_build_format(base, NULL, fdata, enable_pav);
2748 	} else {
2749 		if (tpm)
2750 			ccw_req = dasd_eckd_build_check_tcw(base, fdata,
2751 							    enable_pav,
2752 							    fmt_buffer, rpt);
2753 		else
2754 			ccw_req = dasd_eckd_build_check(base, fdata, enable_pav,
2755 							fmt_buffer, rpt);
2756 	}
2757 
2758 	return ccw_req;
2759 }
2760 
2761 /*
2762  * Sanity checks on format_data
2763  */
2764 static int dasd_eckd_format_sanity_checks(struct dasd_device *base,
2765 					  struct format_data_t *fdata)
2766 {
2767 	struct dasd_eckd_private *private = base->private;
2768 
2769 	if (fdata->start_unit >=
2770 	    (private->real_cyl * private->rdc_data.trk_per_cyl)) {
2771 		dev_warn(&base->cdev->dev,
2772 			 "Start track number %u used in formatting is too big\n",
2773 			 fdata->start_unit);
2774 		return -EINVAL;
2775 	}
2776 	if (fdata->stop_unit >=
2777 	    (private->real_cyl * private->rdc_data.trk_per_cyl)) {
2778 		dev_warn(&base->cdev->dev,
2779 			 "Stop track number %u used in formatting is too big\n",
2780 			 fdata->stop_unit);
2781 		return -EINVAL;
2782 	}
2783 	if (fdata->start_unit > fdata->stop_unit) {
2784 		dev_warn(&base->cdev->dev,
2785 			 "Start track %u used in formatting exceeds end track\n",
2786 			 fdata->start_unit);
2787 		return -EINVAL;
2788 	}
2789 	if (dasd_check_blocksize(fdata->blksize) != 0) {
2790 		dev_warn(&base->cdev->dev,
2791 			 "The DASD cannot be formatted with block size %u\n",
2792 			 fdata->blksize);
2793 		return -EINVAL;
2794 	}
2795 	return 0;
2796 }
2797 
2798 /*
2799  * This function will process format_data originally coming from an IOCTL
2800  */
2801 static int dasd_eckd_format_process_data(struct dasd_device *base,
2802 					 struct format_data_t *fdata,
2803 					 int enable_pav, int tpm,
2804 					 struct eckd_count *fmt_buffer, int rpt,
2805 					 struct irb *irb)
2806 {
2807 	struct dasd_eckd_private *private = base->private;
2808 	struct dasd_ccw_req *cqr, *n;
2809 	struct list_head format_queue;
2810 	struct dasd_device *device;
2811 	char *sense = NULL;
2812 	int old_start, old_stop, format_step;
2813 	int step, retry;
2814 	int rc;
2815 
2816 	rc = dasd_eckd_format_sanity_checks(base, fdata);
2817 	if (rc)
2818 		return rc;
2819 
2820 	INIT_LIST_HEAD(&format_queue);
2821 
2822 	old_start = fdata->start_unit;
2823 	old_stop = fdata->stop_unit;
2824 
2825 	if (!tpm && fmt_buffer != NULL) {
2826 		/* Command Mode / Format Check */
2827 		format_step = 1;
2828 	} else if (tpm && fmt_buffer != NULL) {
2829 		/* Transport Mode / Format Check */
2830 		format_step = DASD_CQR_MAX_CCW / rpt;
2831 	} else {
2832 		/* Normal Formatting */
2833 		format_step = DASD_CQR_MAX_CCW /
2834 			recs_per_track(&private->rdc_data, 0, fdata->blksize);
2835 	}
2836 
2837 	do {
2838 		retry = 0;
2839 		while (fdata->start_unit <= old_stop) {
2840 			step = fdata->stop_unit - fdata->start_unit + 1;
2841 			if (step > format_step) {
2842 				fdata->stop_unit =
2843 					fdata->start_unit + format_step - 1;
2844 			}
2845 
2846 			cqr = dasd_eckd_format_build_ccw_req(base, fdata,
2847 							     enable_pav, tpm,
2848 							     fmt_buffer, rpt);
2849 			if (IS_ERR(cqr)) {
2850 				rc = PTR_ERR(cqr);
2851 				if (rc == -ENOMEM) {
2852 					if (list_empty(&format_queue))
2853 						goto out;
2854 					/*
2855 					 * not enough memory available, start
2856 					 * requests retry after first requests
2857 					 * were finished
2858 					 */
2859 					retry = 1;
2860 					break;
2861 				}
2862 				goto out_err;
2863 			}
2864 			list_add_tail(&cqr->blocklist, &format_queue);
2865 
2866 			if (fmt_buffer) {
2867 				step = fdata->stop_unit - fdata->start_unit + 1;
2868 				fmt_buffer += rpt * step;
2869 			}
2870 			fdata->start_unit = fdata->stop_unit + 1;
2871 			fdata->stop_unit = old_stop;
2872 		}
2873 
2874 		rc = dasd_sleep_on_queue(&format_queue);
2875 
2876 out_err:
2877 		list_for_each_entry_safe(cqr, n, &format_queue, blocklist) {
2878 			device = cqr->startdev;
2879 			private = device->private;
2880 
2881 			if (cqr->status == DASD_CQR_FAILED) {
2882 				/*
2883 				 * Only get sense data if called by format
2884 				 * check
2885 				 */
2886 				if (fmt_buffer && irb) {
2887 					sense = dasd_get_sense(&cqr->irb);
2888 					memcpy(irb, &cqr->irb, sizeof(*irb));
2889 				}
2890 				rc = -EIO;
2891 			}
2892 			list_del_init(&cqr->blocklist);
2893 			dasd_ffree_request(cqr, device);
2894 			private->count--;
2895 		}
2896 
2897 		if (rc && rc != -EIO)
2898 			goto out;
2899 		if (rc == -EIO) {
2900 			/*
2901 			 * In case fewer than the expected records are on the
2902 			 * track, we will most likely get a 'No Record Found'
2903 			 * error (in command mode) or a 'File Protected' error
2904 			 * (in transport mode). Those particular cases shouldn't
2905 			 * pass the -EIO to the IOCTL, therefore reset the rc
2906 			 * and continue.
2907 			 */
2908 			if (sense &&
2909 			    (sense[1] & SNS1_NO_REC_FOUND ||
2910 			     sense[1] & SNS1_FILE_PROTECTED))
2911 				retry = 1;
2912 			else
2913 				goto out;
2914 		}
2915 
2916 	} while (retry);
2917 
2918 out:
2919 	fdata->start_unit = old_start;
2920 	fdata->stop_unit = old_stop;
2921 
2922 	return rc;
2923 }
2924 
2925 static int dasd_eckd_format_device(struct dasd_device *base,
2926 				   struct format_data_t *fdata, int enable_pav)
2927 {
2928 	return dasd_eckd_format_process_data(base, fdata, enable_pav, 0, NULL,
2929 					     0, NULL);
2930 }
2931 
2932 /*
2933  * Callback function to free ESE format requests.
2934  */
2935 static void dasd_eckd_ese_format_cb(struct dasd_ccw_req *cqr, void *data)
2936 {
2937 	struct dasd_device *device = cqr->startdev;
2938 	struct dasd_eckd_private *private = device->private;
2939 
2940 	private->count--;
2941 	dasd_ffree_request(cqr, device);
2942 }
2943 
2944 static struct dasd_ccw_req *
2945 dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr)
2946 {
2947 	struct dasd_eckd_private *private;
2948 	struct format_data_t fdata;
2949 	unsigned int recs_per_trk;
2950 	struct dasd_ccw_req *fcqr;
2951 	struct dasd_device *base;
2952 	struct dasd_block *block;
2953 	unsigned int blksize;
2954 	struct request *req;
2955 	sector_t first_trk;
2956 	sector_t last_trk;
2957 	int rc;
2958 
2959 	req = cqr->callback_data;
2960 	base = cqr->block->base;
2961 	private = base->private;
2962 	block = base->block;
2963 	blksize = block->bp_block;
2964 	recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
2965 
2966 	first_trk = blk_rq_pos(req) >> block->s2b_shift;
2967 	sector_div(first_trk, recs_per_trk);
2968 	last_trk =
2969 		(blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
2970 	sector_div(last_trk, recs_per_trk);
2971 
2972 	fdata.start_unit = first_trk;
2973 	fdata.stop_unit = last_trk;
2974 	fdata.blksize = blksize;
2975 	fdata.intensity = private->uses_cdl ? DASD_FMT_INT_COMPAT : 0;
2976 
2977 	rc = dasd_eckd_format_sanity_checks(base, &fdata);
2978 	if (rc)
2979 		return ERR_PTR(-EINVAL);
2980 
2981 	/*
2982 	 * We're building the request with PAV disabled as we're reusing
2983 	 * the former startdev.
2984 	 */
2985 	fcqr = dasd_eckd_build_format(base, startdev, &fdata, 0);
2986 	if (IS_ERR(fcqr))
2987 		return fcqr;
2988 
2989 	fcqr->callback = dasd_eckd_ese_format_cb;
2990 
2991 	return fcqr;
2992 }
2993 
2994 /*
2995  * When data is read from an unformatted area of an ESE volume, this function
2996  * returns zeroed data and thereby mimics a read of zero data.
2997  */
2998 static void dasd_eckd_ese_read(struct dasd_ccw_req *cqr)
2999 {
3000 	unsigned int blksize, off;
3001 	struct dasd_device *base;
3002 	struct req_iterator iter;
3003 	struct request *req;
3004 	struct bio_vec bv;
3005 	char *dst;
3006 
3007 	req = (struct request *) cqr->callback_data;
3008 	base = cqr->block->base;
3009 	blksize = base->block->bp_block;
3010 
3011 	rq_for_each_segment(bv, req, iter) {
3012 		dst = page_address(bv.bv_page) + bv.bv_offset;
3013 		for (off = 0; off < bv.bv_len; off += blksize) {
3014 			if (dst && rq_data_dir(req) == READ) {
3015 				dst += off;
3016 				memset(dst, 0, blksize);
3017 			}
3018 		}
3019 	}
3020 }
3021 
3022 /*
3023  * Helper function to count consecutive records of a single track.
3024  */
3025 static int dasd_eckd_count_records(struct eckd_count *fmt_buffer, int start,
3026 				   int max)
3027 {
3028 	int head;
3029 	int i;
3030 
3031 	head = fmt_buffer[start].head;
3032 
3033 	/*
3034 	 * There are 3 conditions where we stop counting:
3035 	 * - if data reoccurs (same head and record may reoccur), which may
3036 	 *   happen due to the way DASD_ECKD_CCW_READ_COUNT works
3037 	 * - when the head changes, because we're iterating over several tracks
3038 	 *   then (DASD_ECKD_CCW_READ_COUNT_MT)
3039 	 * - when we've reached the end of sensible data in the buffer (the
3040 	 *   record will be 0 then)
3041 	 */
3042 	for (i = start; i < max; i++) {
3043 		if (i > start) {
3044 			if ((fmt_buffer[i].head == head &&
3045 			    fmt_buffer[i].record == 1) ||
3046 			    fmt_buffer[i].head != head ||
3047 			    fmt_buffer[i].record == 0)
3048 				break;
3049 		}
3050 	}
3051 
3052 	return i - start;
3053 }
3054 
3055 /*
3056  * Evaluate a given range of tracks. Data like number of records, blocksize,
3057  * record ids, and key length are compared with expected data.
3058  *
3059  * If a mismatch occurs, the corresponding error bit is set, as well as
3060  * additional information, depending on the error.
3061  */
3062 static void dasd_eckd_format_evaluate_tracks(struct eckd_count *fmt_buffer,
3063 					     struct format_check_t *cdata,
3064 					     int rpt_max, int rpt_exp,
3065 					     int trk_per_cyl, int tpm)
3066 {
3067 	struct ch_t geo;
3068 	int max_entries;
3069 	int count = 0;
3070 	int trkcount;
3071 	int blksize;
3072 	int pos = 0;
3073 	int i, j;
3074 	int kl;
3075 
3076 	trkcount = cdata->expect.stop_unit - cdata->expect.start_unit + 1;
3077 	max_entries = trkcount * rpt_max;
3078 
3079 	for (i = cdata->expect.start_unit; i <= cdata->expect.stop_unit; i++) {
3080 		/* Calculate the correct next starting position in the buffer */
3081 		if (tpm) {
3082 			while (fmt_buffer[pos].record == 0 &&
3083 			       fmt_buffer[pos].dl == 0) {
3084 				if (pos++ > max_entries)
3085 					break;
3086 			}
3087 		} else {
3088 			if (i != cdata->expect.start_unit)
3089 				pos += rpt_max - count;
3090 		}
3091 
3092 		/* Calculate the expected geo values for the current track */
3093 		set_ch_t(&geo, i / trk_per_cyl, i % trk_per_cyl);
3094 
3095 		/* Count and check number of records */
3096 		count = dasd_eckd_count_records(fmt_buffer, pos, pos + rpt_max);
3097 
3098 		if (count < rpt_exp) {
3099 			cdata->result = DASD_FMT_ERR_TOO_FEW_RECORDS;
3100 			break;
3101 		}
3102 		if (count > rpt_exp) {
3103 			cdata->result = DASD_FMT_ERR_TOO_MANY_RECORDS;
3104 			break;
3105 		}
3106 
3107 		for (j = 0; j < count; j++, pos++) {
3108 			blksize = cdata->expect.blksize;
3109 			kl = 0;
3110 
3111 			/*
3112 			 * Set special values when checking CDL formatted
3113 			 * devices.
3114 			 */
3115 			if ((cdata->expect.intensity & 0x08) &&
3116 			    geo.cyl == 0 && geo.head == 0) {
3117 				if (j < 3) {
3118 					blksize = sizes_trk0[j] - 4;
3119 					kl = 4;
3120 				}
3121 			}
3122 			if ((cdata->expect.intensity & 0x08) &&
3123 			    geo.cyl == 0 && geo.head == 1) {
3124 				blksize = LABEL_SIZE - 44;
3125 				kl = 44;
3126 			}
3127 
3128 			/* Check blocksize */
3129 			if (fmt_buffer[pos].dl != blksize) {
3130 				cdata->result = DASD_FMT_ERR_BLKSIZE;
3131 				goto out;
3132 			}
3133 			/* Check if key length is 0 */
3134 			if (fmt_buffer[pos].kl != kl) {
3135 				cdata->result = DASD_FMT_ERR_KEY_LENGTH;
3136 				goto out;
3137 			}
3138 			/* Check if record_id is correct */
3139 			if (fmt_buffer[pos].cyl != geo.cyl ||
3140 			    fmt_buffer[pos].head != geo.head ||
3141 			    fmt_buffer[pos].record != (j + 1)) {
3142 				cdata->result = DASD_FMT_ERR_RECORD_ID;
3143 				goto out;
3144 			}
3145 		}
3146 	}
3147 
3148 out:
3149 	/*
3150 	 * In case of no errors, we need to decrease by one
3151 	 * to get the correct positions.
3152 	 */
3153 	if (!cdata->result) {
3154 		i--;
3155 		pos--;
3156 	}
3157 
3158 	cdata->unit = i;
3159 	cdata->num_records = count;
3160 	cdata->rec = fmt_buffer[pos].record;
3161 	cdata->blksize = fmt_buffer[pos].dl;
3162 	cdata->key_length = fmt_buffer[pos].kl;
3163 }
3164 
3165 /*
3166  * Check the format of a range of tracks of a DASD.
3167  */
3168 static int dasd_eckd_check_device_format(struct dasd_device *base,
3169 					 struct format_check_t *cdata,
3170 					 int enable_pav)
3171 {
3172 	struct dasd_eckd_private *private = base->private;
3173 	struct eckd_count *fmt_buffer;
3174 	struct irb irb;
3175 	int rpt_max, rpt_exp;
3176 	int fmt_buffer_size;
3177 	int trk_per_cyl;
3178 	int trkcount;
3179 	int tpm = 0;
3180 	int rc;
3181 
3182 	trk_per_cyl = private->rdc_data.trk_per_cyl;
3183 
3184 	/* Get maximum and expected amount of records per track */
3185 	rpt_max = recs_per_track(&private->rdc_data, 0, 512) + 1;
3186 	rpt_exp = recs_per_track(&private->rdc_data, 0, cdata->expect.blksize);
3187 
3188 	trkcount = cdata->expect.stop_unit - cdata->expect.start_unit + 1;
3189 	fmt_buffer_size = trkcount * rpt_max * sizeof(struct eckd_count);
3190 
3191 	fmt_buffer = kzalloc(fmt_buffer_size, GFP_KERNEL | GFP_DMA);
3192 	if (!fmt_buffer)
3193 		return -ENOMEM;
3194 
3195 	/*
3196 	 * A certain FICON feature subset is needed to operate in transport
3197 	 * mode. Additionally, the support for transport mode is implicitly
3198 	 * checked by comparing the buffer size with fcx_max_data. As long as
3199 	 * the buffer size is smaller we can operate in transport mode and
3200 	 * process multiple tracks. If not, only one track at once is being
3201 	 * processed using command mode.
3202 	 */
3203 	if ((private->features.feature[40] & 0x04) &&
3204 	    fmt_buffer_size <= private->fcx_max_data)
3205 		tpm = 1;
3206 
3207 	rc = dasd_eckd_format_process_data(base, &cdata->expect, enable_pav,
3208 					   tpm, fmt_buffer, rpt_max, &irb);
3209 	if (rc && rc != -EIO)
3210 		goto out;
3211 	if (rc == -EIO) {
3212 		/*
3213 		 * If our first attempt with transport mode enabled comes back
3214 		 * with an incorrect length error, we're going to retry the
3215 		 * check with command mode.
3216 		 */
3217 		if (tpm && scsw_cstat(&irb.scsw) == 0x40) {
3218 			tpm = 0;
3219 			rc = dasd_eckd_format_process_data(base, &cdata->expect,
3220 							   enable_pav, tpm,
3221 							   fmt_buffer, rpt_max,
3222 							   &irb);
3223 			if (rc)
3224 				goto out;
3225 		} else {
3226 			goto out;
3227 		}
3228 	}
3229 
3230 	dasd_eckd_format_evaluate_tracks(fmt_buffer, cdata, rpt_max, rpt_exp,
3231 					 trk_per_cyl, tpm);
3232 
3233 out:
3234 	kfree(fmt_buffer);
3235 
3236 	return rc;
3237 }
3238 
3239 static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr)
3240 {
3241 	if (cqr->retries < 0) {
3242 		cqr->status = DASD_CQR_FAILED;
3243 		return;
3244 	}
3245 	cqr->status = DASD_CQR_FILLED;
3246 	if (cqr->block && (cqr->startdev != cqr->block->base)) {
3247 		dasd_eckd_reset_ccw_to_base_io(cqr);
3248 		cqr->startdev = cqr->block->base;
3249 		cqr->lpm = dasd_path_get_opm(cqr->block->base);
3250 	}
3251 };
3252 
3253 static dasd_erp_fn_t
3254 dasd_eckd_erp_action(struct dasd_ccw_req * cqr)
3255 {
3256 	struct dasd_device *device = (struct dasd_device *) cqr->startdev;
3257 	struct ccw_device *cdev = device->cdev;
3258 
3259 	switch (cdev->id.cu_type) {
3260 	case 0x3990:
3261 	case 0x2105:
3262 	case 0x2107:
3263 	case 0x1750:
3264 		return dasd_3990_erp_action;
3265 	case 0x9343:
3266 	case 0x3880:
3267 	default:
3268 		return dasd_default_erp_action;
3269 	}
3270 }
3271 
3272 static dasd_erp_fn_t
3273 dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr)
3274 {
3275 	return dasd_default_erp_postaction;
3276 }
3277 
3278 static void dasd_eckd_check_for_device_change(struct dasd_device *device,
3279 					      struct dasd_ccw_req *cqr,
3280 					      struct irb *irb)
3281 {
3282 	char mask;
3283 	char *sense = NULL;
3284 	struct dasd_eckd_private *private = device->private;
3285 
3286 	/* first of all check for state change pending interrupt */
3287 	mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
3288 	if ((scsw_dstat(&irb->scsw) & mask) == mask) {
3289 		/*
3290 		 * for alias only, not in offline processing
3291 		 * and only if not suspended
3292 		 */
3293 		if (!device->block && private->lcu &&
3294 		    device->state == DASD_STATE_ONLINE &&
3295 		    !test_bit(DASD_FLAG_OFFLINE, &device->flags) &&
3296 		    !test_bit(DASD_FLAG_SUSPENDED, &device->flags)) {
3297 			/* schedule worker to reload device */
3298 			dasd_reload_device(device);
3299 		}
3300 		dasd_generic_handle_state_change(device);
3301 		return;
3302 	}
3303 
3304 	sense = dasd_get_sense(irb);
3305 	if (!sense)
3306 		return;
3307 
3308 	/* summary unit check */
3309 	if ((sense[27] & DASD_SENSE_BIT_0) && (sense[7] == 0x0D) &&
3310 	    (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK)) {
3311 		if (test_and_set_bit(DASD_FLAG_SUC, &device->flags)) {
3312 			DBF_DEV_EVENT(DBF_WARNING, device, "%s",
3313 				      "eckd suc: device already notified");
3314 			return;
3315 		}
3316 		sense = dasd_get_sense(irb);
3317 		if (!sense) {
3318 			DBF_DEV_EVENT(DBF_WARNING, device, "%s",
3319 				      "eckd suc: no reason code available");
3320 			clear_bit(DASD_FLAG_SUC, &device->flags);
3321 			return;
3322 
3323 		}
3324 		private->suc_reason = sense[8];
3325 		DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x",
3326 			      "eckd handle summary unit check: reason",
3327 			      private->suc_reason);
3328 		dasd_get_device(device);
3329 		if (!schedule_work(&device->suc_work))
3330 			dasd_put_device(device);
3331 
3332 		return;
3333 	}
3334 
3335 	/* service information message SIM */
3336 	if (!cqr && !(sense[27] & DASD_SENSE_BIT_0) &&
3337 	    ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) {
3338 		dasd_3990_erp_handle_sim(device, sense);
3339 		return;
3340 	}
3341 
3342 	/* loss of device reservation is handled via base devices only
3343 	 * as alias devices may be used with several bases
3344 	 */
3345 	if (device->block && (sense[27] & DASD_SENSE_BIT_0) &&
3346 	    (sense[7] == 0x3F) &&
3347 	    (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) &&
3348 	    test_bit(DASD_FLAG_IS_RESERVED, &device->flags)) {
3349 		if (device->features & DASD_FEATURE_FAILONSLCK)
3350 			set_bit(DASD_FLAG_LOCK_STOLEN, &device->flags);
3351 		clear_bit(DASD_FLAG_IS_RESERVED, &device->flags);
3352 		dev_err(&device->cdev->dev,
3353 			"The device reservation was lost\n");
3354 	}
3355 }
3356 
3357 static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
3358 					       struct dasd_device *startdev,
3359 					       struct dasd_block *block,
3360 					       struct request *req,
3361 					       sector_t first_rec,
3362 					       sector_t last_rec,
3363 					       sector_t first_trk,
3364 					       sector_t last_trk,
3365 					       unsigned int first_offs,
3366 					       unsigned int last_offs,
3367 					       unsigned int blk_per_trk,
3368 					       unsigned int blksize)
3369 {
3370 	struct dasd_eckd_private *private;
3371 	unsigned long *idaws;
3372 	struct LO_eckd_data *LO_data;
3373 	struct dasd_ccw_req *cqr;
3374 	struct ccw1 *ccw;
3375 	struct req_iterator iter;
3376 	struct bio_vec bv;
3377 	char *dst;
3378 	unsigned int off;
3379 	int count, cidaw, cplength, datasize;
3380 	sector_t recid;
3381 	unsigned char cmd, rcmd;
3382 	int use_prefix;
3383 	struct dasd_device *basedev;
3384 
3385 	basedev = block->base;
3386 	private = basedev->private;
3387 	if (rq_data_dir(req) == READ)
3388 		cmd = DASD_ECKD_CCW_READ_MT;
3389 	else if (rq_data_dir(req) == WRITE)
3390 		cmd = DASD_ECKD_CCW_WRITE_MT;
3391 	else
3392 		return ERR_PTR(-EINVAL);
3393 
3394 	/* Check struct bio and count the number of blocks for the request. */
3395 	count = 0;
3396 	cidaw = 0;
3397 	rq_for_each_segment(bv, req, iter) {
3398 		if (bv.bv_len & (blksize - 1))
3399 			/* Eckd can only do full blocks. */
3400 			return ERR_PTR(-EINVAL);
3401 		count += bv.bv_len >> (block->s2b_shift + 9);
3402 		if (idal_is_needed (page_address(bv.bv_page), bv.bv_len))
3403 			cidaw += bv.bv_len >> (block->s2b_shift + 9);
3404 	}
3405 	/* Paranoia. */
3406 	if (count != last_rec - first_rec + 1)
3407 		return ERR_PTR(-EINVAL);
3408 
3409 	/* use the prefix command if available */
3410 	use_prefix = private->features.feature[8] & 0x01;
3411 	if (use_prefix) {
3412 		/* 1x prefix + number of blocks */
3413 		cplength = 2 + count;
3414 		/* 1x prefix + cidaws*sizeof(long) */
3415 		datasize = sizeof(struct PFX_eckd_data) +
3416 			sizeof(struct LO_eckd_data) +
3417 			cidaw * sizeof(unsigned long);
3418 	} else {
3419 		/* 1x define extent + 1x locate record + number of blocks */
3420 		cplength = 2 + count;
3421 		/* 1x define extent + 1x locate record + cidaws*sizeof(long) */
3422 		datasize = sizeof(struct DE_eckd_data) +
3423 			sizeof(struct LO_eckd_data) +
3424 			cidaw * sizeof(unsigned long);
3425 	}
3426 	/* Find out the number of additional locate record ccws for cdl. */
3427 	if (private->uses_cdl && first_rec < 2*blk_per_trk) {
3428 		if (last_rec >= 2*blk_per_trk)
3429 			count = 2*blk_per_trk - first_rec;
3430 		cplength += count;
3431 		datasize += count*sizeof(struct LO_eckd_data);
3432 	}
3433 	/* Allocate the ccw request. */
3434 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
3435 				   startdev, blk_mq_rq_to_pdu(req));
3436 	if (IS_ERR(cqr))
3437 		return cqr;
3438 	ccw = cqr->cpaddr;
3439 	/* First ccw is define extent or prefix. */
3440 	if (use_prefix) {
3441 		if (prefix(ccw++, cqr->data, first_trk,
3442 			   last_trk, cmd, basedev, startdev) == -EAGAIN) {
3443 			/* Clock not in sync and XRC is enabled.
3444 			 * Try again later.
3445 			 */
3446 			dasd_sfree_request(cqr, startdev);
3447 			return ERR_PTR(-EAGAIN);
3448 		}
3449 		idaws = (unsigned long *) (cqr->data +
3450 					   sizeof(struct PFX_eckd_data));
3451 	} else {
3452 		if (define_extent(ccw++, cqr->data, first_trk,
3453 				  last_trk, cmd, basedev, 0) == -EAGAIN) {
3454 			/* Clock not in sync and XRC is enabled.
3455 			 * Try again later.
3456 			 */
3457 			dasd_sfree_request(cqr, startdev);
3458 			return ERR_PTR(-EAGAIN);
3459 		}
3460 		idaws = (unsigned long *) (cqr->data +
3461 					   sizeof(struct DE_eckd_data));
3462 	}
3463 	/* Build locate_record+read/write/ccws. */
3464 	LO_data = (struct LO_eckd_data *) (idaws + cidaw);
3465 	recid = first_rec;
3466 	if (private->uses_cdl == 0 || recid > 2*blk_per_trk) {
3467 		/* Only standard blocks so there is just one locate record. */
3468 		ccw[-1].flags |= CCW_FLAG_CC;
3469 		locate_record(ccw++, LO_data++, first_trk, first_offs + 1,
3470 			      last_rec - recid + 1, cmd, basedev, blksize);
3471 	}
3472 	rq_for_each_segment(bv, req, iter) {
3473 		dst = page_address(bv.bv_page) + bv.bv_offset;
3474 		if (dasd_page_cache) {
3475 			char *copy = kmem_cache_alloc(dasd_page_cache,
3476 						      GFP_DMA | __GFP_NOWARN);
3477 			if (copy && rq_data_dir(req) == WRITE)
3478 				memcpy(copy + bv.bv_offset, dst, bv.bv_len);
3479 			if (copy)
3480 				dst = copy + bv.bv_offset;
3481 		}
3482 		for (off = 0; off < bv.bv_len; off += blksize) {
3483 			sector_t trkid = recid;
3484 			unsigned int recoffs = sector_div(trkid, blk_per_trk);
3485 			rcmd = cmd;
3486 			count = blksize;
3487 			/* Locate record for cdl special block ? */
3488 			if (private->uses_cdl && recid < 2*blk_per_trk) {
3489 				if (dasd_eckd_cdl_special(blk_per_trk, recid)){
3490 					rcmd |= 0x8;
3491 					count = dasd_eckd_cdl_reclen(recid);
3492 					if (count < blksize &&
3493 					    rq_data_dir(req) == READ)
3494 						memset(dst + count, 0xe5,
3495 						       blksize - count);
3496 				}
3497 				ccw[-1].flags |= CCW_FLAG_CC;
3498 				locate_record(ccw++, LO_data++,
3499 					      trkid, recoffs + 1,
3500 					      1, rcmd, basedev, count);
3501 			}
3502 			/* Locate record for standard blocks ? */
3503 			if (private->uses_cdl && recid == 2*blk_per_trk) {
3504 				ccw[-1].flags |= CCW_FLAG_CC;
3505 				locate_record(ccw++, LO_data++,
3506 					      trkid, recoffs + 1,
3507 					      last_rec - recid + 1,
3508 					      cmd, basedev, count);
3509 			}
3510 			/* Read/write ccw. */
3511 			ccw[-1].flags |= CCW_FLAG_CC;
3512 			ccw->cmd_code = rcmd;
3513 			ccw->count = count;
3514 			if (idal_is_needed(dst, blksize)) {
3515 				ccw->cda = (__u32)(addr_t) idaws;
3516 				ccw->flags = CCW_FLAG_IDA;
3517 				idaws = idal_create_words(idaws, dst, blksize);
3518 			} else {
3519 				ccw->cda = (__u32)(addr_t) dst;
3520 				ccw->flags = 0;
3521 			}
3522 			ccw++;
3523 			dst += blksize;
3524 			recid++;
3525 		}
3526 	}
3527 	if (blk_noretry_request(req) ||
3528 	    block->base->features & DASD_FEATURE_FAILFAST)
3529 		set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
3530 	cqr->startdev = startdev;
3531 	cqr->memdev = startdev;
3532 	cqr->block = block;
3533 	cqr->expires = startdev->default_expires * HZ;	/* default 5 minutes */
3534 	cqr->lpm = dasd_path_get_ppm(startdev);
3535 	cqr->retries = startdev->default_retries;
3536 	cqr->buildclk = get_tod_clock();
3537 	cqr->status = DASD_CQR_FILLED;
3538 
3539 	/* Set flags to suppress output for expected errors */
3540 	if (dasd_eckd_is_ese(basedev)) {
3541 		set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
3542 		set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
3543 		set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
3544 	}
3545 
3546 	return cqr;
3547 }
3548 
3549 static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
3550 					       struct dasd_device *startdev,
3551 					       struct dasd_block *block,
3552 					       struct request *req,
3553 					       sector_t first_rec,
3554 					       sector_t last_rec,
3555 					       sector_t first_trk,
3556 					       sector_t last_trk,
3557 					       unsigned int first_offs,
3558 					       unsigned int last_offs,
3559 					       unsigned int blk_per_trk,
3560 					       unsigned int blksize)
3561 {
3562 	unsigned long *idaws;
3563 	struct dasd_ccw_req *cqr;
3564 	struct ccw1 *ccw;
3565 	struct req_iterator iter;
3566 	struct bio_vec bv;
3567 	char *dst, *idaw_dst;
3568 	unsigned int cidaw, cplength, datasize;
3569 	unsigned int tlf;
3570 	sector_t recid;
3571 	unsigned char cmd;
3572 	struct dasd_device *basedev;
3573 	unsigned int trkcount, count, count_to_trk_end;
3574 	unsigned int idaw_len, seg_len, part_len, len_to_track_end;
3575 	unsigned char new_track, end_idaw;
3576 	sector_t trkid;
3577 	unsigned int recoffs;
3578 
3579 	basedev = block->base;
3580 	if (rq_data_dir(req) == READ)
3581 		cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
3582 	else if (rq_data_dir(req) == WRITE)
3583 		cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
3584 	else
3585 		return ERR_PTR(-EINVAL);
3586 
3587 	/* Track based I/O needs IDAWs for each page, and not just for
3588 	 * 64 bit addresses. We need additional idals for pages
3589 	 * that get filled from two tracks, so we use the number
3590 	 * of records as upper limit.
3591 	 */
3592 	cidaw = last_rec - first_rec + 1;
3593 	trkcount = last_trk - first_trk + 1;
3594 
3595 	/* 1x prefix + one read/write ccw per track */
3596 	cplength = 1 + trkcount;
3597 
3598 	datasize = sizeof(struct PFX_eckd_data) + cidaw * sizeof(unsigned long);
3599 
3600 	/* Allocate the ccw request. */
3601 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
3602 				   startdev, blk_mq_rq_to_pdu(req));
3603 	if (IS_ERR(cqr))
3604 		return cqr;
3605 	ccw = cqr->cpaddr;
3606 	/* transfer length factor: how many bytes to read from the last track */
3607 	if (first_trk == last_trk)
3608 		tlf = last_offs - first_offs + 1;
3609 	else
3610 		tlf = last_offs + 1;
3611 	tlf *= blksize;
3612 
3613 	if (prefix_LRE(ccw++, cqr->data, first_trk,
3614 		       last_trk, cmd, basedev, startdev,
3615 		       1 /* format */, first_offs + 1,
3616 		       trkcount, blksize,
3617 		       tlf) == -EAGAIN) {
3618 		/* Clock not in sync and XRC is enabled.
3619 		 * Try again later.
3620 		 */
3621 		dasd_sfree_request(cqr, startdev);
3622 		return ERR_PTR(-EAGAIN);
3623 	}
3624 
3625 	/*
3626 	 * The translation of request into ccw programs must meet the
3627 	 * following conditions:
3628 	 * - all idaws but the first and the last must address full pages
3629 	 *   (or 2K blocks on 31-bit)
3630 	 * - the scope of a ccw and it's idal ends with the track boundaries
3631 	 */
3632 	idaws = (unsigned long *) (cqr->data + sizeof(struct PFX_eckd_data));
3633 	recid = first_rec;
3634 	new_track = 1;
3635 	end_idaw = 0;
3636 	len_to_track_end = 0;
3637 	idaw_dst = NULL;
3638 	idaw_len = 0;
3639 	rq_for_each_segment(bv, req, iter) {
3640 		dst = page_address(bv.bv_page) + bv.bv_offset;
3641 		seg_len = bv.bv_len;
3642 		while (seg_len) {
3643 			if (new_track) {
3644 				trkid = recid;
3645 				recoffs = sector_div(trkid, blk_per_trk);
3646 				count_to_trk_end = blk_per_trk - recoffs;
3647 				count = min((last_rec - recid + 1),
3648 					    (sector_t)count_to_trk_end);
3649 				len_to_track_end = count * blksize;
3650 				ccw[-1].flags |= CCW_FLAG_CC;
3651 				ccw->cmd_code = cmd;
3652 				ccw->count = len_to_track_end;
3653 				ccw->cda = (__u32)(addr_t)idaws;
3654 				ccw->flags = CCW_FLAG_IDA;
3655 				ccw++;
3656 				recid += count;
3657 				new_track = 0;
3658 				/* first idaw for a ccw may start anywhere */
3659 				if (!idaw_dst)
3660 					idaw_dst = dst;
3661 			}
3662 			/* If we start a new idaw, we must make sure that it
3663 			 * starts on an IDA_BLOCK_SIZE boundary.
3664 			 * If we continue an idaw, we must make sure that the
3665 			 * current segment begins where the so far accumulated
3666 			 * idaw ends
3667 			 */
3668 			if (!idaw_dst) {
3669 				if (__pa(dst) & (IDA_BLOCK_SIZE-1)) {
3670 					dasd_sfree_request(cqr, startdev);
3671 					return ERR_PTR(-ERANGE);
3672 				} else
3673 					idaw_dst = dst;
3674 			}
3675 			if ((idaw_dst + idaw_len) != dst) {
3676 				dasd_sfree_request(cqr, startdev);
3677 				return ERR_PTR(-ERANGE);
3678 			}
3679 			part_len = min(seg_len, len_to_track_end);
3680 			seg_len -= part_len;
3681 			dst += part_len;
3682 			idaw_len += part_len;
3683 			len_to_track_end -= part_len;
3684 			/* collected memory area ends on an IDA_BLOCK border,
3685 			 * -> create an idaw
3686 			 * idal_create_words will handle cases where idaw_len
3687 			 * is larger then IDA_BLOCK_SIZE
3688 			 */
3689 			if (!(__pa(idaw_dst + idaw_len) & (IDA_BLOCK_SIZE-1)))
3690 				end_idaw = 1;
3691 			/* We also need to end the idaw at track end */
3692 			if (!len_to_track_end) {
3693 				new_track = 1;
3694 				end_idaw = 1;
3695 			}
3696 			if (end_idaw) {
3697 				idaws = idal_create_words(idaws, idaw_dst,
3698 							  idaw_len);
3699 				idaw_dst = NULL;
3700 				idaw_len = 0;
3701 				end_idaw = 0;
3702 			}
3703 		}
3704 	}
3705 
3706 	if (blk_noretry_request(req) ||
3707 	    block->base->features & DASD_FEATURE_FAILFAST)
3708 		set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
3709 	cqr->startdev = startdev;
3710 	cqr->memdev = startdev;
3711 	cqr->block = block;
3712 	cqr->expires = startdev->default_expires * HZ;	/* default 5 minutes */
3713 	cqr->lpm = dasd_path_get_ppm(startdev);
3714 	cqr->retries = startdev->default_retries;
3715 	cqr->buildclk = get_tod_clock();
3716 	cqr->status = DASD_CQR_FILLED;
3717 
3718 	/* Set flags to suppress output for expected errors */
3719 	if (dasd_eckd_is_ese(basedev))
3720 		set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
3721 
3722 	return cqr;
3723 }
3724 
3725 static int prepare_itcw(struct itcw *itcw,
3726 			unsigned int trk, unsigned int totrk, int cmd,
3727 			struct dasd_device *basedev,
3728 			struct dasd_device *startdev,
3729 			unsigned int rec_on_trk, int count,
3730 			unsigned int blksize,
3731 			unsigned int total_data_size,
3732 			unsigned int tlf,
3733 			unsigned int blk_per_trk)
3734 {
3735 	struct PFX_eckd_data pfxdata;
3736 	struct dasd_eckd_private *basepriv, *startpriv;
3737 	struct DE_eckd_data *dedata;
3738 	struct LRE_eckd_data *lredata;
3739 	struct dcw *dcw;
3740 
3741 	u32 begcyl, endcyl;
3742 	u16 heads, beghead, endhead;
3743 	u8 pfx_cmd;
3744 
3745 	int rc = 0;
3746 	int sector = 0;
3747 	int dn, d;
3748 
3749 
3750 	/* setup prefix data */
3751 	basepriv = basedev->private;
3752 	startpriv = startdev->private;
3753 	dedata = &pfxdata.define_extent;
3754 	lredata = &pfxdata.locate_record;
3755 
3756 	memset(&pfxdata, 0, sizeof(pfxdata));
3757 	pfxdata.format = 1; /* PFX with LRE */
3758 	pfxdata.base_address = basepriv->ned->unit_addr;
3759 	pfxdata.base_lss = basepriv->ned->ID;
3760 	pfxdata.validity.define_extent = 1;
3761 
3762 	/* private uid is kept up to date, conf_data may be outdated */
3763 	if (startpriv->uid.type == UA_BASE_PAV_ALIAS)
3764 		pfxdata.validity.verify_base = 1;
3765 
3766 	if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) {
3767 		pfxdata.validity.verify_base = 1;
3768 		pfxdata.validity.hyper_pav = 1;
3769 	}
3770 
3771 	switch (cmd) {
3772 	case DASD_ECKD_CCW_READ_TRACK_DATA:
3773 		dedata->mask.perm = 0x1;
3774 		dedata->attributes.operation = basepriv->attrib.operation;
3775 		dedata->blk_size = blksize;
3776 		dedata->ga_extended |= 0x42;
3777 		lredata->operation.orientation = 0x0;
3778 		lredata->operation.operation = 0x0C;
3779 		lredata->auxiliary.check_bytes = 0x01;
3780 		pfx_cmd = DASD_ECKD_CCW_PFX_READ;
3781 		break;
3782 	case DASD_ECKD_CCW_WRITE_TRACK_DATA:
3783 		dedata->mask.perm = 0x02;
3784 		dedata->attributes.operation = basepriv->attrib.operation;
3785 		dedata->blk_size = blksize;
3786 		rc = set_timestamp(NULL, dedata, basedev);
3787 		dedata->ga_extended |= 0x42;
3788 		lredata->operation.orientation = 0x0;
3789 		lredata->operation.operation = 0x3F;
3790 		lredata->extended_operation = 0x23;
3791 		lredata->auxiliary.check_bytes = 0x2;
3792 		/*
3793 		 * If XRC is supported the System Time Stamp is set. The
3794 		 * validity of the time stamp must be reflected in the prefix
3795 		 * data as well.
3796 		 */
3797 		if (dedata->ga_extended & 0x08 && dedata->ga_extended & 0x02)
3798 			pfxdata.validity.time_stamp = 1; /* 'Time Stamp Valid' */
3799 		pfx_cmd = DASD_ECKD_CCW_PFX;
3800 		break;
3801 	case DASD_ECKD_CCW_READ_COUNT_MT:
3802 		dedata->mask.perm = 0x1;
3803 		dedata->attributes.operation = DASD_BYPASS_CACHE;
3804 		dedata->ga_extended |= 0x42;
3805 		dedata->blk_size = blksize;
3806 		lredata->operation.orientation = 0x2;
3807 		lredata->operation.operation = 0x16;
3808 		lredata->auxiliary.check_bytes = 0x01;
3809 		pfx_cmd = DASD_ECKD_CCW_PFX_READ;
3810 		break;
3811 	default:
3812 		DBF_DEV_EVENT(DBF_ERR, basedev,
3813 			      "prepare itcw, unknown opcode 0x%x", cmd);
3814 		BUG();
3815 		break;
3816 	}
3817 	if (rc)
3818 		return rc;
3819 
3820 	dedata->attributes.mode = 0x3;	/* ECKD */
3821 
3822 	heads = basepriv->rdc_data.trk_per_cyl;
3823 	begcyl = trk / heads;
3824 	beghead = trk % heads;
3825 	endcyl = totrk / heads;
3826 	endhead = totrk % heads;
3827 
3828 	/* check for sequential prestage - enhance cylinder range */
3829 	if (dedata->attributes.operation == DASD_SEQ_PRESTAGE ||
3830 	    dedata->attributes.operation == DASD_SEQ_ACCESS) {
3831 
3832 		if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl)
3833 			endcyl += basepriv->attrib.nr_cyl;
3834 		else
3835 			endcyl = (basepriv->real_cyl - 1);
3836 	}
3837 
3838 	set_ch_t(&dedata->beg_ext, begcyl, beghead);
3839 	set_ch_t(&dedata->end_ext, endcyl, endhead);
3840 
3841 	dedata->ep_format = 0x20; /* records per track is valid */
3842 	dedata->ep_rec_per_track = blk_per_trk;
3843 
3844 	if (rec_on_trk) {
3845 		switch (basepriv->rdc_data.dev_type) {
3846 		case 0x3390:
3847 			dn = ceil_quot(blksize + 6, 232);
3848 			d = 9 + ceil_quot(blksize + 6 * (dn + 1), 34);
3849 			sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
3850 			break;
3851 		case 0x3380:
3852 			d = 7 + ceil_quot(blksize + 12, 32);
3853 			sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
3854 			break;
3855 		}
3856 	}
3857 
3858 	if (cmd == DASD_ECKD_CCW_READ_COUNT_MT) {
3859 		lredata->auxiliary.length_valid = 0;
3860 		lredata->auxiliary.length_scope = 0;
3861 		lredata->sector = 0xff;
3862 	} else {
3863 		lredata->auxiliary.length_valid = 1;
3864 		lredata->auxiliary.length_scope = 1;
3865 		lredata->sector = sector;
3866 	}
3867 	lredata->auxiliary.imbedded_ccw_valid = 1;
3868 	lredata->length = tlf;
3869 	lredata->imbedded_ccw = cmd;
3870 	lredata->count = count;
3871 	set_ch_t(&lredata->seek_addr, begcyl, beghead);
3872 	lredata->search_arg.cyl = lredata->seek_addr.cyl;
3873 	lredata->search_arg.head = lredata->seek_addr.head;
3874 	lredata->search_arg.record = rec_on_trk;
3875 
3876 	dcw = itcw_add_dcw(itcw, pfx_cmd, 0,
3877 		     &pfxdata, sizeof(pfxdata), total_data_size);
3878 	return PTR_ERR_OR_ZERO(dcw);
3879 }
3880 
3881 static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
3882 					       struct dasd_device *startdev,
3883 					       struct dasd_block *block,
3884 					       struct request *req,
3885 					       sector_t first_rec,
3886 					       sector_t last_rec,
3887 					       sector_t first_trk,
3888 					       sector_t last_trk,
3889 					       unsigned int first_offs,
3890 					       unsigned int last_offs,
3891 					       unsigned int blk_per_trk,
3892 					       unsigned int blksize)
3893 {
3894 	struct dasd_ccw_req *cqr;
3895 	struct req_iterator iter;
3896 	struct bio_vec bv;
3897 	char *dst;
3898 	unsigned int trkcount, ctidaw;
3899 	unsigned char cmd;
3900 	struct dasd_device *basedev;
3901 	unsigned int tlf;
3902 	struct itcw *itcw;
3903 	struct tidaw *last_tidaw = NULL;
3904 	int itcw_op;
3905 	size_t itcw_size;
3906 	u8 tidaw_flags;
3907 	unsigned int seg_len, part_len, len_to_track_end;
3908 	unsigned char new_track;
3909 	sector_t recid, trkid;
3910 	unsigned int offs;
3911 	unsigned int count, count_to_trk_end;
3912 	int ret;
3913 
3914 	basedev = block->base;
3915 	if (rq_data_dir(req) == READ) {
3916 		cmd = DASD_ECKD_CCW_READ_TRACK_DATA;
3917 		itcw_op = ITCW_OP_READ;
3918 	} else if (rq_data_dir(req) == WRITE) {
3919 		cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA;
3920 		itcw_op = ITCW_OP_WRITE;
3921 	} else
3922 		return ERR_PTR(-EINVAL);
3923 
3924 	/* trackbased I/O needs address all memory via TIDAWs,
3925 	 * not just for 64 bit addresses. This allows us to map
3926 	 * each segment directly to one tidaw.
3927 	 * In the case of write requests, additional tidaws may
3928 	 * be needed when a segment crosses a track boundary.
3929 	 */
3930 	trkcount = last_trk - first_trk + 1;
3931 	ctidaw = 0;
3932 	rq_for_each_segment(bv, req, iter) {
3933 		++ctidaw;
3934 	}
3935 	if (rq_data_dir(req) == WRITE)
3936 		ctidaw += (last_trk - first_trk);
3937 
3938 	/* Allocate the ccw request. */
3939 	itcw_size = itcw_calc_size(0, ctidaw, 0);
3940 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev,
3941 				   blk_mq_rq_to_pdu(req));
3942 	if (IS_ERR(cqr))
3943 		return cqr;
3944 
3945 	/* transfer length factor: how many bytes to read from the last track */
3946 	if (first_trk == last_trk)
3947 		tlf = last_offs - first_offs + 1;
3948 	else
3949 		tlf = last_offs + 1;
3950 	tlf *= blksize;
3951 
3952 	itcw = itcw_init(cqr->data, itcw_size, itcw_op, 0, ctidaw, 0);
3953 	if (IS_ERR(itcw)) {
3954 		ret = -EINVAL;
3955 		goto out_error;
3956 	}
3957 	cqr->cpaddr = itcw_get_tcw(itcw);
3958 	if (prepare_itcw(itcw, first_trk, last_trk,
3959 			 cmd, basedev, startdev,
3960 			 first_offs + 1,
3961 			 trkcount, blksize,
3962 			 (last_rec - first_rec + 1) * blksize,
3963 			 tlf, blk_per_trk) == -EAGAIN) {
3964 		/* Clock not in sync and XRC is enabled.
3965 		 * Try again later.
3966 		 */
3967 		ret = -EAGAIN;
3968 		goto out_error;
3969 	}
3970 	len_to_track_end = 0;
3971 	/*
3972 	 * A tidaw can address 4k of memory, but must not cross page boundaries
3973 	 * We can let the block layer handle this by setting
3974 	 * blk_queue_segment_boundary to page boundaries and
3975 	 * blk_max_segment_size to page size when setting up the request queue.
3976 	 * For write requests, a TIDAW must not cross track boundaries, because
3977 	 * we have to set the CBC flag on the last tidaw for each track.
3978 	 */
3979 	if (rq_data_dir(req) == WRITE) {
3980 		new_track = 1;
3981 		recid = first_rec;
3982 		rq_for_each_segment(bv, req, iter) {
3983 			dst = page_address(bv.bv_page) + bv.bv_offset;
3984 			seg_len = bv.bv_len;
3985 			while (seg_len) {
3986 				if (new_track) {
3987 					trkid = recid;
3988 					offs = sector_div(trkid, blk_per_trk);
3989 					count_to_trk_end = blk_per_trk - offs;
3990 					count = min((last_rec - recid + 1),
3991 						    (sector_t)count_to_trk_end);
3992 					len_to_track_end = count * blksize;
3993 					recid += count;
3994 					new_track = 0;
3995 				}
3996 				part_len = min(seg_len, len_to_track_end);
3997 				seg_len -= part_len;
3998 				len_to_track_end -= part_len;
3999 				/* We need to end the tidaw at track end */
4000 				if (!len_to_track_end) {
4001 					new_track = 1;
4002 					tidaw_flags = TIDAW_FLAGS_INSERT_CBC;
4003 				} else
4004 					tidaw_flags = 0;
4005 				last_tidaw = itcw_add_tidaw(itcw, tidaw_flags,
4006 							    dst, part_len);
4007 				if (IS_ERR(last_tidaw)) {
4008 					ret = -EINVAL;
4009 					goto out_error;
4010 				}
4011 				dst += part_len;
4012 			}
4013 		}
4014 	} else {
4015 		rq_for_each_segment(bv, req, iter) {
4016 			dst = page_address(bv.bv_page) + bv.bv_offset;
4017 			last_tidaw = itcw_add_tidaw(itcw, 0x00,
4018 						    dst, bv.bv_len);
4019 			if (IS_ERR(last_tidaw)) {
4020 				ret = -EINVAL;
4021 				goto out_error;
4022 			}
4023 		}
4024 	}
4025 	last_tidaw->flags |= TIDAW_FLAGS_LAST;
4026 	last_tidaw->flags &= ~TIDAW_FLAGS_INSERT_CBC;
4027 	itcw_finalize(itcw);
4028 
4029 	if (blk_noretry_request(req) ||
4030 	    block->base->features & DASD_FEATURE_FAILFAST)
4031 		set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4032 	cqr->cpmode = 1;
4033 	cqr->startdev = startdev;
4034 	cqr->memdev = startdev;
4035 	cqr->block = block;
4036 	cqr->expires = startdev->default_expires * HZ;	/* default 5 minutes */
4037 	cqr->lpm = dasd_path_get_ppm(startdev);
4038 	cqr->retries = startdev->default_retries;
4039 	cqr->buildclk = get_tod_clock();
4040 	cqr->status = DASD_CQR_FILLED;
4041 
4042 	/* Set flags to suppress output for expected errors */
4043 	if (dasd_eckd_is_ese(basedev)) {
4044 		set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags);
4045 		set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags);
4046 		set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags);
4047 	}
4048 
4049 	return cqr;
4050 out_error:
4051 	dasd_sfree_request(cqr, startdev);
4052 	return ERR_PTR(ret);
4053 }
4054 
4055 static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
4056 					       struct dasd_block *block,
4057 					       struct request *req)
4058 {
4059 	int cmdrtd, cmdwtd;
4060 	int use_prefix;
4061 	int fcx_multitrack;
4062 	struct dasd_eckd_private *private;
4063 	struct dasd_device *basedev;
4064 	sector_t first_rec, last_rec;
4065 	sector_t first_trk, last_trk;
4066 	unsigned int first_offs, last_offs;
4067 	unsigned int blk_per_trk, blksize;
4068 	int cdlspecial;
4069 	unsigned int data_size;
4070 	struct dasd_ccw_req *cqr;
4071 
4072 	basedev = block->base;
4073 	private = basedev->private;
4074 
4075 	/* Calculate number of blocks/records per track. */
4076 	blksize = block->bp_block;
4077 	blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
4078 	if (blk_per_trk == 0)
4079 		return ERR_PTR(-EINVAL);
4080 	/* Calculate record id of first and last block. */
4081 	first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift;
4082 	first_offs = sector_div(first_trk, blk_per_trk);
4083 	last_rec = last_trk =
4084 		(blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
4085 	last_offs = sector_div(last_trk, blk_per_trk);
4086 	cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk);
4087 
4088 	fcx_multitrack = private->features.feature[40] & 0x20;
4089 	data_size = blk_rq_bytes(req);
4090 	if (data_size % blksize)
4091 		return ERR_PTR(-EINVAL);
4092 	/* tpm write request add CBC data on each track boundary */
4093 	if (rq_data_dir(req) == WRITE)
4094 		data_size += (last_trk - first_trk) * 4;
4095 
4096 	/* is read track data and write track data in command mode supported? */
4097 	cmdrtd = private->features.feature[9] & 0x20;
4098 	cmdwtd = private->features.feature[12] & 0x40;
4099 	use_prefix = private->features.feature[8] & 0x01;
4100 
4101 	cqr = NULL;
4102 	if (cdlspecial || dasd_page_cache) {
4103 		/* do nothing, just fall through to the cmd mode single case */
4104 	} else if ((data_size <= private->fcx_max_data)
4105 		   && (fcx_multitrack || (first_trk == last_trk))) {
4106 		cqr = dasd_eckd_build_cp_tpm_track(startdev, block, req,
4107 						    first_rec, last_rec,
4108 						    first_trk, last_trk,
4109 						    first_offs, last_offs,
4110 						    blk_per_trk, blksize);
4111 		if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
4112 		    (PTR_ERR(cqr) != -ENOMEM))
4113 			cqr = NULL;
4114 	} else if (use_prefix &&
4115 		   (((rq_data_dir(req) == READ) && cmdrtd) ||
4116 		    ((rq_data_dir(req) == WRITE) && cmdwtd))) {
4117 		cqr = dasd_eckd_build_cp_cmd_track(startdev, block, req,
4118 						   first_rec, last_rec,
4119 						   first_trk, last_trk,
4120 						   first_offs, last_offs,
4121 						   blk_per_trk, blksize);
4122 		if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) &&
4123 		    (PTR_ERR(cqr) != -ENOMEM))
4124 			cqr = NULL;
4125 	}
4126 	if (!cqr)
4127 		cqr = dasd_eckd_build_cp_cmd_single(startdev, block, req,
4128 						    first_rec, last_rec,
4129 						    first_trk, last_trk,
4130 						    first_offs, last_offs,
4131 						    blk_per_trk, blksize);
4132 	return cqr;
4133 }
4134 
4135 static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
4136 						   struct dasd_block *block,
4137 						   struct request *req)
4138 {
4139 	sector_t start_padding_sectors, end_sector_offset, end_padding_sectors;
4140 	unsigned int seg_len, len_to_track_end;
4141 	unsigned int cidaw, cplength, datasize;
4142 	sector_t first_trk, last_trk, sectors;
4143 	struct dasd_eckd_private *base_priv;
4144 	struct dasd_device *basedev;
4145 	struct req_iterator iter;
4146 	struct dasd_ccw_req *cqr;
4147 	unsigned int first_offs;
4148 	unsigned int trkcount;
4149 	unsigned long *idaws;
4150 	unsigned int size;
4151 	unsigned char cmd;
4152 	struct bio_vec bv;
4153 	struct ccw1 *ccw;
4154 	int use_prefix;
4155 	void *data;
4156 	char *dst;
4157 
4158 	/*
4159 	 * raw track access needs to be mutiple of 64k and on 64k boundary
4160 	 * For read requests we can fix an incorrect alignment by padding
4161 	 * the request with dummy pages.
4162 	 */
4163 	start_padding_sectors = blk_rq_pos(req) % DASD_RAW_SECTORS_PER_TRACK;
4164 	end_sector_offset = (blk_rq_pos(req) + blk_rq_sectors(req)) %
4165 		DASD_RAW_SECTORS_PER_TRACK;
4166 	end_padding_sectors = (DASD_RAW_SECTORS_PER_TRACK - end_sector_offset) %
4167 		DASD_RAW_SECTORS_PER_TRACK;
4168 	basedev = block->base;
4169 	if ((start_padding_sectors || end_padding_sectors) &&
4170 	    (rq_data_dir(req) == WRITE)) {
4171 		DBF_DEV_EVENT(DBF_ERR, basedev,
4172 			      "raw write not track aligned (%llu,%llu) req %p",
4173 			      start_padding_sectors, end_padding_sectors, req);
4174 		return ERR_PTR(-EINVAL);
4175 	}
4176 
4177 	first_trk = blk_rq_pos(req) / DASD_RAW_SECTORS_PER_TRACK;
4178 	last_trk = (blk_rq_pos(req) + blk_rq_sectors(req) - 1) /
4179 		DASD_RAW_SECTORS_PER_TRACK;
4180 	trkcount = last_trk - first_trk + 1;
4181 	first_offs = 0;
4182 
4183 	if (rq_data_dir(req) == READ)
4184 		cmd = DASD_ECKD_CCW_READ_TRACK;
4185 	else if (rq_data_dir(req) == WRITE)
4186 		cmd = DASD_ECKD_CCW_WRITE_FULL_TRACK;
4187 	else
4188 		return ERR_PTR(-EINVAL);
4189 
4190 	/*
4191 	 * Raw track based I/O needs IDAWs for each page,
4192 	 * and not just for 64 bit addresses.
4193 	 */
4194 	cidaw = trkcount * DASD_RAW_BLOCK_PER_TRACK;
4195 
4196 	/*
4197 	 * struct PFX_eckd_data and struct LRE_eckd_data can have up to 2 bytes
4198 	 * of extended parameter. This is needed for write full track.
4199 	 */
4200 	base_priv = basedev->private;
4201 	use_prefix = base_priv->features.feature[8] & 0x01;
4202 	if (use_prefix) {
4203 		cplength = 1 + trkcount;
4204 		size = sizeof(struct PFX_eckd_data) + 2;
4205 	} else {
4206 		cplength = 2 + trkcount;
4207 		size = sizeof(struct DE_eckd_data) +
4208 			sizeof(struct LRE_eckd_data) + 2;
4209 	}
4210 	size = ALIGN(size, 8);
4211 
4212 	datasize = size + cidaw * sizeof(unsigned long);
4213 
4214 	/* Allocate the ccw request. */
4215 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength,
4216 				   datasize, startdev, blk_mq_rq_to_pdu(req));
4217 	if (IS_ERR(cqr))
4218 		return cqr;
4219 
4220 	ccw = cqr->cpaddr;
4221 	data = cqr->data;
4222 
4223 	if (use_prefix) {
4224 		prefix_LRE(ccw++, data, first_trk, last_trk, cmd, basedev,
4225 			   startdev, 1, first_offs + 1, trkcount, 0, 0);
4226 	} else {
4227 		define_extent(ccw++, data, first_trk, last_trk, cmd, basedev, 0);
4228 		ccw[-1].flags |= CCW_FLAG_CC;
4229 
4230 		data += sizeof(struct DE_eckd_data);
4231 		locate_record_ext(ccw++, data, first_trk, first_offs + 1,
4232 				  trkcount, cmd, basedev, 0, 0);
4233 	}
4234 
4235 	idaws = (unsigned long *)(cqr->data + size);
4236 	len_to_track_end = 0;
4237 	if (start_padding_sectors) {
4238 		ccw[-1].flags |= CCW_FLAG_CC;
4239 		ccw->cmd_code = cmd;
4240 		/* maximum 3390 track size */
4241 		ccw->count = 57326;
4242 		/* 64k map to one track */
4243 		len_to_track_end = 65536 - start_padding_sectors * 512;
4244 		ccw->cda = (__u32)(addr_t)idaws;
4245 		ccw->flags |= CCW_FLAG_IDA;
4246 		ccw->flags |= CCW_FLAG_SLI;
4247 		ccw++;
4248 		for (sectors = 0; sectors < start_padding_sectors; sectors += 8)
4249 			idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
4250 	}
4251 	rq_for_each_segment(bv, req, iter) {
4252 		dst = page_address(bv.bv_page) + bv.bv_offset;
4253 		seg_len = bv.bv_len;
4254 		if (cmd == DASD_ECKD_CCW_READ_TRACK)
4255 			memset(dst, 0, seg_len);
4256 		if (!len_to_track_end) {
4257 			ccw[-1].flags |= CCW_FLAG_CC;
4258 			ccw->cmd_code = cmd;
4259 			/* maximum 3390 track size */
4260 			ccw->count = 57326;
4261 			/* 64k map to one track */
4262 			len_to_track_end = 65536;
4263 			ccw->cda = (__u32)(addr_t)idaws;
4264 			ccw->flags |= CCW_FLAG_IDA;
4265 			ccw->flags |= CCW_FLAG_SLI;
4266 			ccw++;
4267 		}
4268 		len_to_track_end -= seg_len;
4269 		idaws = idal_create_words(idaws, dst, seg_len);
4270 	}
4271 	for (sectors = 0; sectors < end_padding_sectors; sectors += 8)
4272 		idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
4273 	if (blk_noretry_request(req) ||
4274 	    block->base->features & DASD_FEATURE_FAILFAST)
4275 		set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4276 	cqr->startdev = startdev;
4277 	cqr->memdev = startdev;
4278 	cqr->block = block;
4279 	cqr->expires = startdev->default_expires * HZ;
4280 	cqr->lpm = dasd_path_get_ppm(startdev);
4281 	cqr->retries = startdev->default_retries;
4282 	cqr->buildclk = get_tod_clock();
4283 	cqr->status = DASD_CQR_FILLED;
4284 
4285 	return cqr;
4286 }
4287 
4288 
4289 static int
4290 dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
4291 {
4292 	struct dasd_eckd_private *private;
4293 	struct ccw1 *ccw;
4294 	struct req_iterator iter;
4295 	struct bio_vec bv;
4296 	char *dst, *cda;
4297 	unsigned int blksize, blk_per_trk, off;
4298 	sector_t recid;
4299 	int status;
4300 
4301 	if (!dasd_page_cache)
4302 		goto out;
4303 	private = cqr->block->base->private;
4304 	blksize = cqr->block->bp_block;
4305 	blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
4306 	recid = blk_rq_pos(req) >> cqr->block->s2b_shift;
4307 	ccw = cqr->cpaddr;
4308 	/* Skip over define extent & locate record. */
4309 	ccw++;
4310 	if (private->uses_cdl == 0 || recid > 2*blk_per_trk)
4311 		ccw++;
4312 	rq_for_each_segment(bv, req, iter) {
4313 		dst = page_address(bv.bv_page) + bv.bv_offset;
4314 		for (off = 0; off < bv.bv_len; off += blksize) {
4315 			/* Skip locate record. */
4316 			if (private->uses_cdl && recid <= 2*blk_per_trk)
4317 				ccw++;
4318 			if (dst) {
4319 				if (ccw->flags & CCW_FLAG_IDA)
4320 					cda = *((char **)((addr_t) ccw->cda));
4321 				else
4322 					cda = (char *)((addr_t) ccw->cda);
4323 				if (dst != cda) {
4324 					if (rq_data_dir(req) == READ)
4325 						memcpy(dst, cda, bv.bv_len);
4326 					kmem_cache_free(dasd_page_cache,
4327 					    (void *)((addr_t)cda & PAGE_MASK));
4328 				}
4329 				dst = NULL;
4330 			}
4331 			ccw++;
4332 			recid++;
4333 		}
4334 	}
4335 out:
4336 	status = cqr->status == DASD_CQR_DONE;
4337 	dasd_sfree_request(cqr, cqr->memdev);
4338 	return status;
4339 }
4340 
4341 /*
4342  * Modify ccw/tcw in cqr so it can be started on a base device.
4343  *
4344  * Note that this is not enough to restart the cqr!
4345  * Either reset cqr->startdev as well (summary unit check handling)
4346  * or restart via separate cqr (as in ERP handling).
4347  */
4348 void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *cqr)
4349 {
4350 	struct ccw1 *ccw;
4351 	struct PFX_eckd_data *pfxdata;
4352 	struct tcw *tcw;
4353 	struct tccb *tccb;
4354 	struct dcw *dcw;
4355 
4356 	if (cqr->cpmode == 1) {
4357 		tcw = cqr->cpaddr;
4358 		tccb = tcw_get_tccb(tcw);
4359 		dcw = (struct dcw *)&tccb->tca[0];
4360 		pfxdata = (struct PFX_eckd_data *)&dcw->cd[0];
4361 		pfxdata->validity.verify_base = 0;
4362 		pfxdata->validity.hyper_pav = 0;
4363 	} else {
4364 		ccw = cqr->cpaddr;
4365 		pfxdata = cqr->data;
4366 		if (ccw->cmd_code == DASD_ECKD_CCW_PFX) {
4367 			pfxdata->validity.verify_base = 0;
4368 			pfxdata->validity.hyper_pav = 0;
4369 		}
4370 	}
4371 }
4372 
4373 #define DASD_ECKD_CHANQ_MAX_SIZE 4
4374 
4375 static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base,
4376 						     struct dasd_block *block,
4377 						     struct request *req)
4378 {
4379 	struct dasd_eckd_private *private;
4380 	struct dasd_device *startdev;
4381 	unsigned long flags;
4382 	struct dasd_ccw_req *cqr;
4383 
4384 	startdev = dasd_alias_get_start_dev(base);
4385 	if (!startdev)
4386 		startdev = base;
4387 	private = startdev->private;
4388 	if (private->count >= DASD_ECKD_CHANQ_MAX_SIZE)
4389 		return ERR_PTR(-EBUSY);
4390 
4391 	spin_lock_irqsave(get_ccwdev_lock(startdev->cdev), flags);
4392 	private->count++;
4393 	if ((base->features & DASD_FEATURE_USERAW))
4394 		cqr = dasd_eckd_build_cp_raw(startdev, block, req);
4395 	else
4396 		cqr = dasd_eckd_build_cp(startdev, block, req);
4397 	if (IS_ERR(cqr))
4398 		private->count--;
4399 	spin_unlock_irqrestore(get_ccwdev_lock(startdev->cdev), flags);
4400 	return cqr;
4401 }
4402 
4403 static int dasd_eckd_free_alias_cp(struct dasd_ccw_req *cqr,
4404 				   struct request *req)
4405 {
4406 	struct dasd_eckd_private *private;
4407 	unsigned long flags;
4408 
4409 	spin_lock_irqsave(get_ccwdev_lock(cqr->memdev->cdev), flags);
4410 	private = cqr->memdev->private;
4411 	private->count--;
4412 	spin_unlock_irqrestore(get_ccwdev_lock(cqr->memdev->cdev), flags);
4413 	return dasd_eckd_free_cp(cqr, req);
4414 }
4415 
4416 static int
4417 dasd_eckd_fill_info(struct dasd_device * device,
4418 		    struct dasd_information2_t * info)
4419 {
4420 	struct dasd_eckd_private *private = device->private;
4421 
4422 	info->label_block = 2;
4423 	info->FBA_layout = private->uses_cdl ? 0 : 1;
4424 	info->format = private->uses_cdl ? DASD_FORMAT_CDL : DASD_FORMAT_LDL;
4425 	info->characteristics_size = sizeof(private->rdc_data);
4426 	memcpy(info->characteristics, &private->rdc_data,
4427 	       sizeof(private->rdc_data));
4428 	info->confdata_size = min((unsigned long)private->conf_len,
4429 				  sizeof(info->configuration_data));
4430 	memcpy(info->configuration_data, private->conf_data,
4431 	       info->confdata_size);
4432 	return 0;
4433 }
4434 
4435 /*
4436  * SECTION: ioctl functions for eckd devices.
4437  */
4438 
4439 /*
4440  * Release device ioctl.
4441  * Buils a channel programm to releases a prior reserved
4442  * (see dasd_eckd_reserve) device.
4443  */
4444 static int
4445 dasd_eckd_release(struct dasd_device *device)
4446 {
4447 	struct dasd_ccw_req *cqr;
4448 	int rc;
4449 	struct ccw1 *ccw;
4450 	int useglobal;
4451 
4452 	if (!capable(CAP_SYS_ADMIN))
4453 		return -EACCES;
4454 
4455 	useglobal = 0;
4456 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
4457 	if (IS_ERR(cqr)) {
4458 		mutex_lock(&dasd_reserve_mutex);
4459 		useglobal = 1;
4460 		cqr = &dasd_reserve_req->cqr;
4461 		memset(cqr, 0, sizeof(*cqr));
4462 		memset(&dasd_reserve_req->ccw, 0,
4463 		       sizeof(dasd_reserve_req->ccw));
4464 		cqr->cpaddr = &dasd_reserve_req->ccw;
4465 		cqr->data = &dasd_reserve_req->data;
4466 		cqr->magic = DASD_ECKD_MAGIC;
4467 	}
4468 	ccw = cqr->cpaddr;
4469 	ccw->cmd_code = DASD_ECKD_CCW_RELEASE;
4470 	ccw->flags |= CCW_FLAG_SLI;
4471 	ccw->count = 32;
4472 	ccw->cda = (__u32)(addr_t) cqr->data;
4473 	cqr->startdev = device;
4474 	cqr->memdev = device;
4475 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
4476 	set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4477 	cqr->retries = 2;	/* set retry counter to enable basic ERP */
4478 	cqr->expires = 2 * HZ;
4479 	cqr->buildclk = get_tod_clock();
4480 	cqr->status = DASD_CQR_FILLED;
4481 
4482 	rc = dasd_sleep_on_immediatly(cqr);
4483 	if (!rc)
4484 		clear_bit(DASD_FLAG_IS_RESERVED, &device->flags);
4485 
4486 	if (useglobal)
4487 		mutex_unlock(&dasd_reserve_mutex);
4488 	else
4489 		dasd_sfree_request(cqr, cqr->memdev);
4490 	return rc;
4491 }
4492 
4493 /*
4494  * Reserve device ioctl.
4495  * Options are set to 'synchronous wait for interrupt' and
4496  * 'timeout the request'. This leads to a terminate IO if
4497  * the interrupt is outstanding for a certain time.
4498  */
4499 static int
4500 dasd_eckd_reserve(struct dasd_device *device)
4501 {
4502 	struct dasd_ccw_req *cqr;
4503 	int rc;
4504 	struct ccw1 *ccw;
4505 	int useglobal;
4506 
4507 	if (!capable(CAP_SYS_ADMIN))
4508 		return -EACCES;
4509 
4510 	useglobal = 0;
4511 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
4512 	if (IS_ERR(cqr)) {
4513 		mutex_lock(&dasd_reserve_mutex);
4514 		useglobal = 1;
4515 		cqr = &dasd_reserve_req->cqr;
4516 		memset(cqr, 0, sizeof(*cqr));
4517 		memset(&dasd_reserve_req->ccw, 0,
4518 		       sizeof(dasd_reserve_req->ccw));
4519 		cqr->cpaddr = &dasd_reserve_req->ccw;
4520 		cqr->data = &dasd_reserve_req->data;
4521 		cqr->magic = DASD_ECKD_MAGIC;
4522 	}
4523 	ccw = cqr->cpaddr;
4524 	ccw->cmd_code = DASD_ECKD_CCW_RESERVE;
4525 	ccw->flags |= CCW_FLAG_SLI;
4526 	ccw->count = 32;
4527 	ccw->cda = (__u32)(addr_t) cqr->data;
4528 	cqr->startdev = device;
4529 	cqr->memdev = device;
4530 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
4531 	set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4532 	cqr->retries = 2;	/* set retry counter to enable basic ERP */
4533 	cqr->expires = 2 * HZ;
4534 	cqr->buildclk = get_tod_clock();
4535 	cqr->status = DASD_CQR_FILLED;
4536 
4537 	rc = dasd_sleep_on_immediatly(cqr);
4538 	if (!rc)
4539 		set_bit(DASD_FLAG_IS_RESERVED, &device->flags);
4540 
4541 	if (useglobal)
4542 		mutex_unlock(&dasd_reserve_mutex);
4543 	else
4544 		dasd_sfree_request(cqr, cqr->memdev);
4545 	return rc;
4546 }
4547 
4548 /*
4549  * Steal lock ioctl - unconditional reserve device.
4550  * Buils a channel programm to break a device's reservation.
4551  * (unconditional reserve)
4552  */
4553 static int
4554 dasd_eckd_steal_lock(struct dasd_device *device)
4555 {
4556 	struct dasd_ccw_req *cqr;
4557 	int rc;
4558 	struct ccw1 *ccw;
4559 	int useglobal;
4560 
4561 	if (!capable(CAP_SYS_ADMIN))
4562 		return -EACCES;
4563 
4564 	useglobal = 0;
4565 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
4566 	if (IS_ERR(cqr)) {
4567 		mutex_lock(&dasd_reserve_mutex);
4568 		useglobal = 1;
4569 		cqr = &dasd_reserve_req->cqr;
4570 		memset(cqr, 0, sizeof(*cqr));
4571 		memset(&dasd_reserve_req->ccw, 0,
4572 		       sizeof(dasd_reserve_req->ccw));
4573 		cqr->cpaddr = &dasd_reserve_req->ccw;
4574 		cqr->data = &dasd_reserve_req->data;
4575 		cqr->magic = DASD_ECKD_MAGIC;
4576 	}
4577 	ccw = cqr->cpaddr;
4578 	ccw->cmd_code = DASD_ECKD_CCW_SLCK;
4579 	ccw->flags |= CCW_FLAG_SLI;
4580 	ccw->count = 32;
4581 	ccw->cda = (__u32)(addr_t) cqr->data;
4582 	cqr->startdev = device;
4583 	cqr->memdev = device;
4584 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
4585 	set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4586 	cqr->retries = 2;	/* set retry counter to enable basic ERP */
4587 	cqr->expires = 2 * HZ;
4588 	cqr->buildclk = get_tod_clock();
4589 	cqr->status = DASD_CQR_FILLED;
4590 
4591 	rc = dasd_sleep_on_immediatly(cqr);
4592 	if (!rc)
4593 		set_bit(DASD_FLAG_IS_RESERVED, &device->flags);
4594 
4595 	if (useglobal)
4596 		mutex_unlock(&dasd_reserve_mutex);
4597 	else
4598 		dasd_sfree_request(cqr, cqr->memdev);
4599 	return rc;
4600 }
4601 
4602 /*
4603  * SNID - Sense Path Group ID
4604  * This ioctl may be used in situations where I/O is stalled due to
4605  * a reserve, so if the normal dasd_smalloc_request fails, we use the
4606  * preallocated dasd_reserve_req.
4607  */
4608 static int dasd_eckd_snid(struct dasd_device *device,
4609 			  void __user *argp)
4610 {
4611 	struct dasd_ccw_req *cqr;
4612 	int rc;
4613 	struct ccw1 *ccw;
4614 	int useglobal;
4615 	struct dasd_snid_ioctl_data usrparm;
4616 
4617 	if (!capable(CAP_SYS_ADMIN))
4618 		return -EACCES;
4619 
4620 	if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
4621 		return -EFAULT;
4622 
4623 	useglobal = 0;
4624 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1,
4625 				   sizeof(struct dasd_snid_data), device,
4626 				   NULL);
4627 	if (IS_ERR(cqr)) {
4628 		mutex_lock(&dasd_reserve_mutex);
4629 		useglobal = 1;
4630 		cqr = &dasd_reserve_req->cqr;
4631 		memset(cqr, 0, sizeof(*cqr));
4632 		memset(&dasd_reserve_req->ccw, 0,
4633 		       sizeof(dasd_reserve_req->ccw));
4634 		cqr->cpaddr = &dasd_reserve_req->ccw;
4635 		cqr->data = &dasd_reserve_req->data;
4636 		cqr->magic = DASD_ECKD_MAGIC;
4637 	}
4638 	ccw = cqr->cpaddr;
4639 	ccw->cmd_code = DASD_ECKD_CCW_SNID;
4640 	ccw->flags |= CCW_FLAG_SLI;
4641 	ccw->count = 12;
4642 	ccw->cda = (__u32)(addr_t) cqr->data;
4643 	cqr->startdev = device;
4644 	cqr->memdev = device;
4645 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
4646 	set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
4647 	set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
4648 	cqr->retries = 5;
4649 	cqr->expires = 10 * HZ;
4650 	cqr->buildclk = get_tod_clock();
4651 	cqr->status = DASD_CQR_FILLED;
4652 	cqr->lpm = usrparm.path_mask;
4653 
4654 	rc = dasd_sleep_on_immediatly(cqr);
4655 	/* verify that I/O processing didn't modify the path mask */
4656 	if (!rc && usrparm.path_mask && (cqr->lpm != usrparm.path_mask))
4657 		rc = -EIO;
4658 	if (!rc) {
4659 		usrparm.data = *((struct dasd_snid_data *)cqr->data);
4660 		if (copy_to_user(argp, &usrparm, sizeof(usrparm)))
4661 			rc = -EFAULT;
4662 	}
4663 
4664 	if (useglobal)
4665 		mutex_unlock(&dasd_reserve_mutex);
4666 	else
4667 		dasd_sfree_request(cqr, cqr->memdev);
4668 	return rc;
4669 }
4670 
4671 /*
4672  * Read performance statistics
4673  */
4674 static int
4675 dasd_eckd_performance(struct dasd_device *device, void __user *argp)
4676 {
4677 	struct dasd_psf_prssd_data *prssdp;
4678 	struct dasd_rssd_perf_stats_t *stats;
4679 	struct dasd_ccw_req *cqr;
4680 	struct ccw1 *ccw;
4681 	int rc;
4682 
4683 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */  + 1 /* RSSD */,
4684 				   (sizeof(struct dasd_psf_prssd_data) +
4685 				    sizeof(struct dasd_rssd_perf_stats_t)),
4686 				   device, NULL);
4687 	if (IS_ERR(cqr)) {
4688 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
4689 			    "Could not allocate initialization request");
4690 		return PTR_ERR(cqr);
4691 	}
4692 	cqr->startdev = device;
4693 	cqr->memdev = device;
4694 	cqr->retries = 0;
4695 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
4696 	cqr->expires = 10 * HZ;
4697 
4698 	/* Prepare for Read Subsystem Data */
4699 	prssdp = (struct dasd_psf_prssd_data *) cqr->data;
4700 	memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
4701 	prssdp->order = PSF_ORDER_PRSSD;
4702 	prssdp->suborder = 0x01;	/* Performance Statistics */
4703 	prssdp->varies[1] = 0x01;	/* Perf Statistics for the Subsystem */
4704 
4705 	ccw = cqr->cpaddr;
4706 	ccw->cmd_code = DASD_ECKD_CCW_PSF;
4707 	ccw->count = sizeof(struct dasd_psf_prssd_data);
4708 	ccw->flags |= CCW_FLAG_CC;
4709 	ccw->cda = (__u32)(addr_t) prssdp;
4710 
4711 	/* Read Subsystem Data - Performance Statistics */
4712 	stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
4713 	memset(stats, 0, sizeof(struct dasd_rssd_perf_stats_t));
4714 
4715 	ccw++;
4716 	ccw->cmd_code = DASD_ECKD_CCW_RSSD;
4717 	ccw->count = sizeof(struct dasd_rssd_perf_stats_t);
4718 	ccw->cda = (__u32)(addr_t) stats;
4719 
4720 	cqr->buildclk = get_tod_clock();
4721 	cqr->status = DASD_CQR_FILLED;
4722 	rc = dasd_sleep_on(cqr);
4723 	if (rc == 0) {
4724 		prssdp = (struct dasd_psf_prssd_data *) cqr->data;
4725 		stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
4726 		if (copy_to_user(argp, stats,
4727 				 sizeof(struct dasd_rssd_perf_stats_t)))
4728 			rc = -EFAULT;
4729 	}
4730 	dasd_sfree_request(cqr, cqr->memdev);
4731 	return rc;
4732 }
4733 
4734 /*
4735  * Get attributes (cache operations)
4736  * Returnes the cache attributes used in Define Extend (DE).
4737  */
4738 static int
4739 dasd_eckd_get_attrib(struct dasd_device *device, void __user *argp)
4740 {
4741 	struct dasd_eckd_private *private = device->private;
4742 	struct attrib_data_t attrib = private->attrib;
4743 	int rc;
4744 
4745         if (!capable(CAP_SYS_ADMIN))
4746                 return -EACCES;
4747 	if (!argp)
4748                 return -EINVAL;
4749 
4750 	rc = 0;
4751 	if (copy_to_user(argp, (long *) &attrib,
4752 			 sizeof(struct attrib_data_t)))
4753 		rc = -EFAULT;
4754 
4755 	return rc;
4756 }
4757 
4758 /*
4759  * Set attributes (cache operations)
4760  * Stores the attributes for cache operation to be used in Define Extend (DE).
4761  */
4762 static int
4763 dasd_eckd_set_attrib(struct dasd_device *device, void __user *argp)
4764 {
4765 	struct dasd_eckd_private *private = device->private;
4766 	struct attrib_data_t attrib;
4767 
4768 	if (!capable(CAP_SYS_ADMIN))
4769 		return -EACCES;
4770 	if (!argp)
4771 		return -EINVAL;
4772 
4773 	if (copy_from_user(&attrib, argp, sizeof(struct attrib_data_t)))
4774 		return -EFAULT;
4775 	private->attrib = attrib;
4776 
4777 	dev_info(&device->cdev->dev,
4778 		 "The DASD cache mode was set to %x (%i cylinder prestage)\n",
4779 		 private->attrib.operation, private->attrib.nr_cyl);
4780 	return 0;
4781 }
4782 
4783 /*
4784  * Issue syscall I/O to EMC Symmetrix array.
4785  * CCWs are PSF and RSSD
4786  */
4787 static int dasd_symm_io(struct dasd_device *device, void __user *argp)
4788 {
4789 	struct dasd_symmio_parms usrparm;
4790 	char *psf_data, *rssd_result;
4791 	struct dasd_ccw_req *cqr;
4792 	struct ccw1 *ccw;
4793 	char psf0, psf1;
4794 	int rc;
4795 
4796 	if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO))
4797 		return -EACCES;
4798 	psf0 = psf1 = 0;
4799 
4800 	/* Copy parms from caller */
4801 	rc = -EFAULT;
4802 	if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
4803 		goto out;
4804 	if (is_compat_task()) {
4805 		/* Make sure pointers are sane even on 31 bit. */
4806 		rc = -EINVAL;
4807 		if ((usrparm.psf_data >> 32) != 0)
4808 			goto out;
4809 		if ((usrparm.rssd_result >> 32) != 0)
4810 			goto out;
4811 		usrparm.psf_data &= 0x7fffffffULL;
4812 		usrparm.rssd_result &= 0x7fffffffULL;
4813 	}
4814 	/* at least 2 bytes are accessed and should be allocated */
4815 	if (usrparm.psf_data_len < 2) {
4816 		DBF_DEV_EVENT(DBF_WARNING, device,
4817 			      "Symmetrix ioctl invalid data length %d",
4818 			      usrparm.psf_data_len);
4819 		rc = -EINVAL;
4820 		goto out;
4821 	}
4822 	/* alloc I/O data area */
4823 	psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA);
4824 	rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA);
4825 	if (!psf_data || !rssd_result) {
4826 		rc = -ENOMEM;
4827 		goto out_free;
4828 	}
4829 
4830 	/* get syscall header from user space */
4831 	rc = -EFAULT;
4832 	if (copy_from_user(psf_data,
4833 			   (void __user *)(unsigned long) usrparm.psf_data,
4834 			   usrparm.psf_data_len))
4835 		goto out_free;
4836 	psf0 = psf_data[0];
4837 	psf1 = psf_data[1];
4838 
4839 	/* setup CCWs for PSF + RSSD */
4840 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2, 0, device, NULL);
4841 	if (IS_ERR(cqr)) {
4842 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
4843 			"Could not allocate initialization request");
4844 		rc = PTR_ERR(cqr);
4845 		goto out_free;
4846 	}
4847 
4848 	cqr->startdev = device;
4849 	cqr->memdev = device;
4850 	cqr->retries = 3;
4851 	cqr->expires = 10 * HZ;
4852 	cqr->buildclk = get_tod_clock();
4853 	cqr->status = DASD_CQR_FILLED;
4854 
4855 	/* Build the ccws */
4856 	ccw = cqr->cpaddr;
4857 
4858 	/* PSF ccw */
4859 	ccw->cmd_code = DASD_ECKD_CCW_PSF;
4860 	ccw->count = usrparm.psf_data_len;
4861 	ccw->flags |= CCW_FLAG_CC;
4862 	ccw->cda = (__u32)(addr_t) psf_data;
4863 
4864 	ccw++;
4865 
4866 	/* RSSD ccw  */
4867 	ccw->cmd_code = DASD_ECKD_CCW_RSSD;
4868 	ccw->count = usrparm.rssd_result_len;
4869 	ccw->flags = CCW_FLAG_SLI ;
4870 	ccw->cda = (__u32)(addr_t) rssd_result;
4871 
4872 	rc = dasd_sleep_on(cqr);
4873 	if (rc)
4874 		goto out_sfree;
4875 
4876 	rc = -EFAULT;
4877 	if (copy_to_user((void __user *)(unsigned long) usrparm.rssd_result,
4878 			   rssd_result, usrparm.rssd_result_len))
4879 		goto out_sfree;
4880 	rc = 0;
4881 
4882 out_sfree:
4883 	dasd_sfree_request(cqr, cqr->memdev);
4884 out_free:
4885 	kfree(rssd_result);
4886 	kfree(psf_data);
4887 out:
4888 	DBF_DEV_EVENT(DBF_WARNING, device,
4889 		      "Symmetrix ioctl (0x%02x 0x%02x): rc=%d",
4890 		      (int) psf0, (int) psf1, rc);
4891 	return rc;
4892 }
4893 
4894 static int
4895 dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp)
4896 {
4897 	struct dasd_device *device = block->base;
4898 
4899 	switch (cmd) {
4900 	case BIODASDGATTR:
4901 		return dasd_eckd_get_attrib(device, argp);
4902 	case BIODASDSATTR:
4903 		return dasd_eckd_set_attrib(device, argp);
4904 	case BIODASDPSRD:
4905 		return dasd_eckd_performance(device, argp);
4906 	case BIODASDRLSE:
4907 		return dasd_eckd_release(device);
4908 	case BIODASDRSRV:
4909 		return dasd_eckd_reserve(device);
4910 	case BIODASDSLCK:
4911 		return dasd_eckd_steal_lock(device);
4912 	case BIODASDSNID:
4913 		return dasd_eckd_snid(device, argp);
4914 	case BIODASDSYMMIO:
4915 		return dasd_symm_io(device, argp);
4916 	default:
4917 		return -ENOTTY;
4918 	}
4919 }
4920 
4921 /*
4922  * Dump the range of CCWs into 'page' buffer
4923  * and return number of printed chars.
4924  */
4925 static int
4926 dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
4927 {
4928 	int len, count;
4929 	char *datap;
4930 
4931 	len = 0;
4932 	while (from <= to) {
4933 		len += sprintf(page + len, PRINTK_HEADER
4934 			       " CCW %p: %08X %08X DAT:",
4935 			       from, ((int *) from)[0], ((int *) from)[1]);
4936 
4937 		/* get pointer to data (consider IDALs) */
4938 		if (from->flags & CCW_FLAG_IDA)
4939 			datap = (char *) *((addr_t *) (addr_t) from->cda);
4940 		else
4941 			datap = (char *) ((addr_t) from->cda);
4942 
4943 		/* dump data (max 32 bytes) */
4944 		for (count = 0; count < from->count && count < 32; count++) {
4945 			if (count % 8 == 0) len += sprintf(page + len, " ");
4946 			if (count % 4 == 0) len += sprintf(page + len, " ");
4947 			len += sprintf(page + len, "%02x", datap[count]);
4948 		}
4949 		len += sprintf(page + len, "\n");
4950 		from++;
4951 	}
4952 	return len;
4953 }
4954 
4955 static void
4956 dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct irb *irb,
4957 			 char *reason)
4958 {
4959 	u64 *sense;
4960 	u64 *stat;
4961 
4962 	sense = (u64 *) dasd_get_sense(irb);
4963 	stat = (u64 *) &irb->scsw;
4964 	if (sense) {
4965 		DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : "
4966 			      "%016llx %016llx %016llx %016llx",
4967 			      reason, *stat, *((u32 *) (stat + 1)),
4968 			      sense[0], sense[1], sense[2], sense[3]);
4969 	} else {
4970 		DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : %s",
4971 			      reason, *stat, *((u32 *) (stat + 1)),
4972 			      "NO VALID SENSE");
4973 	}
4974 }
4975 
4976 /*
4977  * Print sense data and related channel program.
4978  * Parts are printed because printk buffer is only 1024 bytes.
4979  */
4980 static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
4981 				 struct dasd_ccw_req *req, struct irb *irb)
4982 {
4983 	char *page;
4984 	struct ccw1 *first, *last, *fail, *from, *to;
4985 	int len, sl, sct;
4986 
4987 	page = (char *) get_zeroed_page(GFP_ATOMIC);
4988 	if (page == NULL) {
4989 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
4990 			      "No memory to dump sense data\n");
4991 		return;
4992 	}
4993 	/* dump the sense data */
4994 	len = sprintf(page, PRINTK_HEADER
4995 		      " I/O status report for device %s:\n",
4996 		      dev_name(&device->cdev->dev));
4997 	len += sprintf(page + len, PRINTK_HEADER
4998 		       " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
4999 		       "CS:%02X RC:%d\n",
5000 		       req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
5001 		       scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
5002 		       scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
5003 		       req ? req->intrc : 0);
5004 	len += sprintf(page + len, PRINTK_HEADER
5005 		       " device %s: Failing CCW: %p\n",
5006 		       dev_name(&device->cdev->dev),
5007 		       (void *) (addr_t) irb->scsw.cmd.cpa);
5008 	if (irb->esw.esw0.erw.cons) {
5009 		for (sl = 0; sl < 4; sl++) {
5010 			len += sprintf(page + len, PRINTK_HEADER
5011 				       " Sense(hex) %2d-%2d:",
5012 				       (8 * sl), ((8 * sl) + 7));
5013 
5014 			for (sct = 0; sct < 8; sct++) {
5015 				len += sprintf(page + len, " %02x",
5016 					       irb->ecw[8 * sl + sct]);
5017 			}
5018 			len += sprintf(page + len, "\n");
5019 		}
5020 
5021 		if (irb->ecw[27] & DASD_SENSE_BIT_0) {
5022 			/* 24 Byte Sense Data */
5023 			sprintf(page + len, PRINTK_HEADER
5024 				" 24 Byte: %x MSG %x, "
5025 				"%s MSGb to SYSOP\n",
5026 				irb->ecw[7] >> 4, irb->ecw[7] & 0x0f,
5027 				irb->ecw[1] & 0x10 ? "" : "no");
5028 		} else {
5029 			/* 32 Byte Sense Data */
5030 			sprintf(page + len, PRINTK_HEADER
5031 				" 32 Byte: Format: %x "
5032 				"Exception class %x\n",
5033 				irb->ecw[6] & 0x0f, irb->ecw[22] >> 4);
5034 		}
5035 	} else {
5036 		sprintf(page + len, PRINTK_HEADER
5037 			" SORRY - NO VALID SENSE AVAILABLE\n");
5038 	}
5039 	printk(KERN_ERR "%s", page);
5040 
5041 	if (req) {
5042 		/* req == NULL for unsolicited interrupts */
5043 		/* dump the Channel Program (max 140 Bytes per line) */
5044 		/* Count CCW and print first CCWs (maximum 1024 % 140 = 7) */
5045 		first = req->cpaddr;
5046 		for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
5047 		to = min(first + 6, last);
5048 		len = sprintf(page, PRINTK_HEADER
5049 			      " Related CP in req: %p\n", req);
5050 		dasd_eckd_dump_ccw_range(first, to, page + len);
5051 		printk(KERN_ERR "%s", page);
5052 
5053 		/* print failing CCW area (maximum 4) */
5054 		/* scsw->cda is either valid or zero  */
5055 		len = 0;
5056 		from = ++to;
5057 		fail = (struct ccw1 *)(addr_t)
5058 				irb->scsw.cmd.cpa; /* failing CCW */
5059 		if (from <  fail - 2) {
5060 			from = fail - 2;     /* there is a gap - print header */
5061 			len += sprintf(page, PRINTK_HEADER "......\n");
5062 		}
5063 		to = min(fail + 1, last);
5064 		len += dasd_eckd_dump_ccw_range(from, to, page + len);
5065 
5066 		/* print last CCWs (maximum 2) */
5067 		from = max(from, ++to);
5068 		if (from < last - 1) {
5069 			from = last - 1;     /* there is a gap - print header */
5070 			len += sprintf(page + len, PRINTK_HEADER "......\n");
5071 		}
5072 		len += dasd_eckd_dump_ccw_range(from, last, page + len);
5073 		if (len > 0)
5074 			printk(KERN_ERR "%s", page);
5075 	}
5076 	free_page((unsigned long) page);
5077 }
5078 
5079 
5080 /*
5081  * Print sense data from a tcw.
5082  */
5083 static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
5084 				 struct dasd_ccw_req *req, struct irb *irb)
5085 {
5086 	char *page;
5087 	int len, sl, sct, residual;
5088 	struct tsb *tsb;
5089 	u8 *sense, *rcq;
5090 
5091 	page = (char *) get_zeroed_page(GFP_ATOMIC);
5092 	if (page == NULL) {
5093 		DBF_DEV_EVENT(DBF_WARNING, device, " %s",
5094 			    "No memory to dump sense data");
5095 		return;
5096 	}
5097 	/* dump the sense data */
5098 	len = sprintf(page, PRINTK_HEADER
5099 		      " I/O status report for device %s:\n",
5100 		      dev_name(&device->cdev->dev));
5101 	len += sprintf(page + len, PRINTK_HEADER
5102 		       " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
5103 		       "CS:%02X fcxs:%02X schxs:%02X RC:%d\n",
5104 		       req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
5105 		       scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
5106 		       scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
5107 		       irb->scsw.tm.fcxs,
5108 		       (irb->scsw.tm.ifob << 7) | irb->scsw.tm.sesq,
5109 		       req ? req->intrc : 0);
5110 	len += sprintf(page + len, PRINTK_HEADER
5111 		       " device %s: Failing TCW: %p\n",
5112 		       dev_name(&device->cdev->dev),
5113 		       (void *) (addr_t) irb->scsw.tm.tcw);
5114 
5115 	tsb = NULL;
5116 	sense = NULL;
5117 	if (irb->scsw.tm.tcw && (irb->scsw.tm.fcxs & 0x01))
5118 		tsb = tcw_get_tsb(
5119 			(struct tcw *)(unsigned long)irb->scsw.tm.tcw);
5120 
5121 	if (tsb) {
5122 		len += sprintf(page + len, PRINTK_HEADER
5123 			       " tsb->length %d\n", tsb->length);
5124 		len += sprintf(page + len, PRINTK_HEADER
5125 			       " tsb->flags %x\n", tsb->flags);
5126 		len += sprintf(page + len, PRINTK_HEADER
5127 			       " tsb->dcw_offset %d\n", tsb->dcw_offset);
5128 		len += sprintf(page + len, PRINTK_HEADER
5129 			       " tsb->count %d\n", tsb->count);
5130 		residual = tsb->count - 28;
5131 		len += sprintf(page + len, PRINTK_HEADER
5132 			       " residual %d\n", residual);
5133 
5134 		switch (tsb->flags & 0x07) {
5135 		case 1:	/* tsa_iostat */
5136 			len += sprintf(page + len, PRINTK_HEADER
5137 			       " tsb->tsa.iostat.dev_time %d\n",
5138 				       tsb->tsa.iostat.dev_time);
5139 			len += sprintf(page + len, PRINTK_HEADER
5140 			       " tsb->tsa.iostat.def_time %d\n",
5141 				       tsb->tsa.iostat.def_time);
5142 			len += sprintf(page + len, PRINTK_HEADER
5143 			       " tsb->tsa.iostat.queue_time %d\n",
5144 				       tsb->tsa.iostat.queue_time);
5145 			len += sprintf(page + len, PRINTK_HEADER
5146 			       " tsb->tsa.iostat.dev_busy_time %d\n",
5147 				       tsb->tsa.iostat.dev_busy_time);
5148 			len += sprintf(page + len, PRINTK_HEADER
5149 			       " tsb->tsa.iostat.dev_act_time %d\n",
5150 				       tsb->tsa.iostat.dev_act_time);
5151 			sense = tsb->tsa.iostat.sense;
5152 			break;
5153 		case 2: /* ts_ddpc */
5154 			len += sprintf(page + len, PRINTK_HEADER
5155 			       " tsb->tsa.ddpc.rc %d\n", tsb->tsa.ddpc.rc);
5156 			for (sl = 0; sl < 2; sl++) {
5157 				len += sprintf(page + len, PRINTK_HEADER
5158 					       " tsb->tsa.ddpc.rcq %2d-%2d: ",
5159 					       (8 * sl), ((8 * sl) + 7));
5160 				rcq = tsb->tsa.ddpc.rcq;
5161 				for (sct = 0; sct < 8; sct++) {
5162 					len += sprintf(page + len, " %02x",
5163 						       rcq[8 * sl + sct]);
5164 				}
5165 				len += sprintf(page + len, "\n");
5166 			}
5167 			sense = tsb->tsa.ddpc.sense;
5168 			break;
5169 		case 3: /* tsa_intrg */
5170 			len += sprintf(page + len, PRINTK_HEADER
5171 				      " tsb->tsa.intrg.: not supported yet\n");
5172 			break;
5173 		}
5174 
5175 		if (sense) {
5176 			for (sl = 0; sl < 4; sl++) {
5177 				len += sprintf(page + len, PRINTK_HEADER
5178 					       " Sense(hex) %2d-%2d:",
5179 					       (8 * sl), ((8 * sl) + 7));
5180 				for (sct = 0; sct < 8; sct++) {
5181 					len += sprintf(page + len, " %02x",
5182 						       sense[8 * sl + sct]);
5183 				}
5184 				len += sprintf(page + len, "\n");
5185 			}
5186 
5187 			if (sense[27] & DASD_SENSE_BIT_0) {
5188 				/* 24 Byte Sense Data */
5189 				sprintf(page + len, PRINTK_HEADER
5190 					" 24 Byte: %x MSG %x, "
5191 					"%s MSGb to SYSOP\n",
5192 					sense[7] >> 4, sense[7] & 0x0f,
5193 					sense[1] & 0x10 ? "" : "no");
5194 			} else {
5195 				/* 32 Byte Sense Data */
5196 				sprintf(page + len, PRINTK_HEADER
5197 					" 32 Byte: Format: %x "
5198 					"Exception class %x\n",
5199 					sense[6] & 0x0f, sense[22] >> 4);
5200 			}
5201 		} else {
5202 			sprintf(page + len, PRINTK_HEADER
5203 				" SORRY - NO VALID SENSE AVAILABLE\n");
5204 		}
5205 	} else {
5206 		sprintf(page + len, PRINTK_HEADER
5207 			" SORRY - NO TSB DATA AVAILABLE\n");
5208 	}
5209 	printk(KERN_ERR "%s", page);
5210 	free_page((unsigned long) page);
5211 }
5212 
5213 static void dasd_eckd_dump_sense(struct dasd_device *device,
5214 				 struct dasd_ccw_req *req, struct irb *irb)
5215 {
5216 	u8 *sense = dasd_get_sense(irb);
5217 
5218 	if (scsw_is_tm(&irb->scsw)) {
5219 		/*
5220 		 * In some cases the 'File Protected' or 'Incorrect Length'
5221 		 * error might be expected and log messages shouldn't be written
5222 		 * then. Check if the according suppress bit is set.
5223 		 */
5224 		if (sense && (sense[1] & SNS1_FILE_PROTECTED) &&
5225 		    test_bit(DASD_CQR_SUPPRESS_FP, &req->flags))
5226 			return;
5227 		if (scsw_cstat(&irb->scsw) == 0x40 &&
5228 		    test_bit(DASD_CQR_SUPPRESS_IL, &req->flags))
5229 			return;
5230 
5231 		dasd_eckd_dump_sense_tcw(device, req, irb);
5232 	} else {
5233 		/*
5234 		 * In some cases the 'Command Reject' or 'No Record Found'
5235 		 * error might be expected and log messages shouldn't be
5236 		 * written then. Check if the according suppress bit is set.
5237 		 */
5238 		if (sense && sense[0] & SNS0_CMD_REJECT &&
5239 		    test_bit(DASD_CQR_SUPPRESS_CR, &req->flags))
5240 			return;
5241 
5242 		if (sense && sense[1] & SNS1_NO_REC_FOUND &&
5243 		    test_bit(DASD_CQR_SUPPRESS_NRF, &req->flags))
5244 			return;
5245 
5246 		dasd_eckd_dump_sense_ccw(device, req, irb);
5247 	}
5248 }
5249 
5250 static int dasd_eckd_pm_freeze(struct dasd_device *device)
5251 {
5252 	/*
5253 	 * the device should be disconnected from our LCU structure
5254 	 * on restore we will reconnect it and reread LCU specific
5255 	 * information like PAV support that might have changed
5256 	 */
5257 	dasd_alias_remove_device(device);
5258 	dasd_alias_disconnect_device_from_lcu(device);
5259 
5260 	return 0;
5261 }
5262 
5263 static int dasd_eckd_restore_device(struct dasd_device *device)
5264 {
5265 	struct dasd_eckd_private *private = device->private;
5266 	struct dasd_eckd_characteristics temp_rdc_data;
5267 	int rc;
5268 	struct dasd_uid temp_uid;
5269 	unsigned long flags;
5270 	unsigned long cqr_flags = 0;
5271 
5272 	/* Read Configuration Data */
5273 	rc = dasd_eckd_read_conf(device);
5274 	if (rc) {
5275 		DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
5276 				"Read configuration data failed, rc=%d", rc);
5277 		goto out_err;
5278 	}
5279 
5280 	dasd_eckd_get_uid(device, &temp_uid);
5281 	/* Generate device unique id */
5282 	rc = dasd_eckd_generate_uid(device);
5283 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
5284 	if (memcmp(&private->uid, &temp_uid, sizeof(struct dasd_uid)) != 0)
5285 		dev_err(&device->cdev->dev, "The UID of the DASD has "
5286 			"changed\n");
5287 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
5288 	if (rc)
5289 		goto out_err;
5290 
5291 	/* register lcu with alias handling, enable PAV if this is a new lcu */
5292 	rc = dasd_alias_make_device_known_to_lcu(device);
5293 	if (rc)
5294 		goto out_err;
5295 
5296 	set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr_flags);
5297 	dasd_eckd_validate_server(device, cqr_flags);
5298 
5299 	/* RE-Read Configuration Data */
5300 	rc = dasd_eckd_read_conf(device);
5301 	if (rc) {
5302 		DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
5303 			"Read configuration data failed, rc=%d", rc);
5304 		goto out_err2;
5305 	}
5306 
5307 	/* Read Feature Codes */
5308 	dasd_eckd_read_features(device);
5309 
5310 	/* Read Volume Information */
5311 	rc = dasd_eckd_read_vol_info(device);
5312 	if (rc)
5313 		goto out_err2;
5314 
5315 	/* Read Extent Pool Information */
5316 	rc = dasd_eckd_read_ext_pool_info(device);
5317 	if (rc)
5318 		goto out_err2;
5319 
5320 	/* Read Device Characteristics */
5321 	rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
5322 					 &temp_rdc_data, 64);
5323 	if (rc) {
5324 		DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
5325 				"Read device characteristic failed, rc=%d", rc);
5326 		goto out_err2;
5327 	}
5328 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
5329 	memcpy(&private->rdc_data, &temp_rdc_data, sizeof(temp_rdc_data));
5330 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
5331 
5332 	/* add device to alias management */
5333 	dasd_alias_add_device(device);
5334 
5335 	return 0;
5336 
5337 out_err2:
5338 	dasd_alias_disconnect_device_from_lcu(device);
5339 out_err:
5340 	return -1;
5341 }
5342 
5343 static int dasd_eckd_reload_device(struct dasd_device *device)
5344 {
5345 	struct dasd_eckd_private *private = device->private;
5346 	int rc, old_base;
5347 	char print_uid[60];
5348 	struct dasd_uid uid;
5349 	unsigned long flags;
5350 
5351 	/*
5352 	 * remove device from alias handling to prevent new requests
5353 	 * from being scheduled on the wrong alias device
5354 	 */
5355 	dasd_alias_remove_device(device);
5356 
5357 	spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
5358 	old_base = private->uid.base_unit_addr;
5359 	spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
5360 
5361 	/* Read Configuration Data */
5362 	rc = dasd_eckd_read_conf(device);
5363 	if (rc)
5364 		goto out_err;
5365 
5366 	rc = dasd_eckd_generate_uid(device);
5367 	if (rc)
5368 		goto out_err;
5369 	/*
5370 	 * update unit address configuration and
5371 	 * add device to alias management
5372 	 */
5373 	dasd_alias_update_add_device(device);
5374 
5375 	dasd_eckd_get_uid(device, &uid);
5376 
5377 	if (old_base != uid.base_unit_addr) {
5378 		if (strlen(uid.vduit) > 0)
5379 			snprintf(print_uid, sizeof(print_uid),
5380 				 "%s.%s.%04x.%02x.%s", uid.vendor, uid.serial,
5381 				 uid.ssid, uid.base_unit_addr, uid.vduit);
5382 		else
5383 			snprintf(print_uid, sizeof(print_uid),
5384 				 "%s.%s.%04x.%02x", uid.vendor, uid.serial,
5385 				 uid.ssid, uid.base_unit_addr);
5386 
5387 		dev_info(&device->cdev->dev,
5388 			 "An Alias device was reassigned to a new base device "
5389 			 "with UID: %s\n", print_uid);
5390 	}
5391 	return 0;
5392 
5393 out_err:
5394 	return -1;
5395 }
5396 
5397 static int dasd_eckd_read_message_buffer(struct dasd_device *device,
5398 					 struct dasd_rssd_messages *messages,
5399 					 __u8 lpum)
5400 {
5401 	struct dasd_rssd_messages *message_buf;
5402 	struct dasd_psf_prssd_data *prssdp;
5403 	struct dasd_ccw_req *cqr;
5404 	struct ccw1 *ccw;
5405 	int rc;
5406 
5407 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */	+ 1 /* RSSD */,
5408 				   (sizeof(struct dasd_psf_prssd_data) +
5409 				    sizeof(struct dasd_rssd_messages)),
5410 				   device, NULL);
5411 	if (IS_ERR(cqr)) {
5412 		DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
5413 				"Could not allocate read message buffer request");
5414 		return PTR_ERR(cqr);
5415 	}
5416 
5417 	cqr->lpm = lpum;
5418 retry:
5419 	cqr->startdev = device;
5420 	cqr->memdev = device;
5421 	cqr->block = NULL;
5422 	cqr->expires = 10 * HZ;
5423 	set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
5424 	/* dasd_sleep_on_immediatly does not do complex error
5425 	 * recovery so clear erp flag and set retry counter to
5426 	 * do basic erp */
5427 	clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
5428 	cqr->retries = 256;
5429 
5430 	/* Prepare for Read Subsystem Data */
5431 	prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5432 	memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
5433 	prssdp->order = PSF_ORDER_PRSSD;
5434 	prssdp->suborder = 0x03;	/* Message Buffer */
5435 	/* all other bytes of prssdp must be zero */
5436 
5437 	ccw = cqr->cpaddr;
5438 	ccw->cmd_code = DASD_ECKD_CCW_PSF;
5439 	ccw->count = sizeof(struct dasd_psf_prssd_data);
5440 	ccw->flags |= CCW_FLAG_CC;
5441 	ccw->flags |= CCW_FLAG_SLI;
5442 	ccw->cda = (__u32)(addr_t) prssdp;
5443 
5444 	/* Read Subsystem Data - message buffer */
5445 	message_buf = (struct dasd_rssd_messages *) (prssdp + 1);
5446 	memset(message_buf, 0, sizeof(struct dasd_rssd_messages));
5447 
5448 	ccw++;
5449 	ccw->cmd_code = DASD_ECKD_CCW_RSSD;
5450 	ccw->count = sizeof(struct dasd_rssd_messages);
5451 	ccw->flags |= CCW_FLAG_SLI;
5452 	ccw->cda = (__u32)(addr_t) message_buf;
5453 
5454 	cqr->buildclk = get_tod_clock();
5455 	cqr->status = DASD_CQR_FILLED;
5456 	rc = dasd_sleep_on_immediatly(cqr);
5457 	if (rc == 0) {
5458 		prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5459 		message_buf = (struct dasd_rssd_messages *)
5460 			(prssdp + 1);
5461 		memcpy(messages, message_buf,
5462 		       sizeof(struct dasd_rssd_messages));
5463 	} else if (cqr->lpm) {
5464 		/*
5465 		 * on z/VM we might not be able to do I/O on the requested path
5466 		 * but instead we get the required information on any path
5467 		 * so retry with open path mask
5468 		 */
5469 		cqr->lpm = 0;
5470 		goto retry;
5471 	} else
5472 		DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
5473 				"Reading messages failed with rc=%d\n"
5474 				, rc);
5475 	dasd_sfree_request(cqr, cqr->memdev);
5476 	return rc;
5477 }
5478 
5479 static int dasd_eckd_query_host_access(struct dasd_device *device,
5480 				       struct dasd_psf_query_host_access *data)
5481 {
5482 	struct dasd_eckd_private *private = device->private;
5483 	struct dasd_psf_query_host_access *host_access;
5484 	struct dasd_psf_prssd_data *prssdp;
5485 	struct dasd_ccw_req *cqr;
5486 	struct ccw1 *ccw;
5487 	int rc;
5488 
5489 	/* not available for HYPER PAV alias devices */
5490 	if (!device->block && private->lcu->pav == HYPER_PAV)
5491 		return -EOPNOTSUPP;
5492 
5493 	/* may not be supported by the storage server */
5494 	if (!(private->features.feature[14] & 0x80))
5495 		return -EOPNOTSUPP;
5496 
5497 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */	+ 1 /* RSSD */,
5498 				   sizeof(struct dasd_psf_prssd_data) + 1,
5499 				   device, NULL);
5500 	if (IS_ERR(cqr)) {
5501 		DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
5502 				"Could not allocate read message buffer request");
5503 		return PTR_ERR(cqr);
5504 	}
5505 	host_access = kzalloc(sizeof(*host_access), GFP_KERNEL | GFP_DMA);
5506 	if (!host_access) {
5507 		dasd_sfree_request(cqr, device);
5508 		DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
5509 				"Could not allocate host_access buffer");
5510 		return -ENOMEM;
5511 	}
5512 	cqr->startdev = device;
5513 	cqr->memdev = device;
5514 	cqr->block = NULL;
5515 	cqr->retries = 256;
5516 	cqr->expires = 10 * HZ;
5517 
5518 	/* Prepare for Read Subsystem Data */
5519 	prssdp = (struct dasd_psf_prssd_data *) cqr->data;
5520 	memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
5521 	prssdp->order = PSF_ORDER_PRSSD;
5522 	prssdp->suborder = PSF_SUBORDER_QHA;	/* query host access */
5523 	/* LSS and Volume that will be queried */
5524 	prssdp->lss = private->ned->ID;
5525 	prssdp->volume = private->ned->unit_addr;
5526 	/* all other bytes of prssdp must be zero */
5527 
5528 	ccw = cqr->cpaddr;
5529 	ccw->cmd_code = DASD_ECKD_CCW_PSF;
5530 	ccw->count = sizeof(struct dasd_psf_prssd_data);
5531 	ccw->flags |= CCW_FLAG_CC;
5532 	ccw->flags |= CCW_FLAG_SLI;
5533 	ccw->cda = (__u32)(addr_t) prssdp;
5534 
5535 	/* Read Subsystem Data - query host access */
5536 	ccw++;
5537 	ccw->cmd_code = DASD_ECKD_CCW_RSSD;
5538 	ccw->count = sizeof(struct dasd_psf_query_host_access);
5539 	ccw->flags |= CCW_FLAG_SLI;
5540 	ccw->cda = (__u32)(addr_t) host_access;
5541 
5542 	cqr->buildclk = get_tod_clock();
5543 	cqr->status = DASD_CQR_FILLED;
5544 	/* the command might not be supported, suppress error message */
5545 	__set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags);
5546 	rc = dasd_sleep_on_interruptible(cqr);
5547 	if (rc == 0) {
5548 		*data = *host_access;
5549 	} else {
5550 		DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
5551 				"Reading host access data failed with rc=%d\n",
5552 				rc);
5553 		rc = -EOPNOTSUPP;
5554 	}
5555 
5556 	dasd_sfree_request(cqr, cqr->memdev);
5557 	kfree(host_access);
5558 	return rc;
5559 }
5560 /*
5561  * return number of grouped devices
5562  */
5563 static int dasd_eckd_host_access_count(struct dasd_device *device)
5564 {
5565 	struct dasd_psf_query_host_access *access;
5566 	struct dasd_ckd_path_group_entry *entry;
5567 	struct dasd_ckd_host_information *info;
5568 	int count = 0;
5569 	int rc, i;
5570 
5571 	access = kzalloc(sizeof(*access), GFP_NOIO);
5572 	if (!access) {
5573 		DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
5574 				"Could not allocate access buffer");
5575 		return -ENOMEM;
5576 	}
5577 	rc = dasd_eckd_query_host_access(device, access);
5578 	if (rc) {
5579 		kfree(access);
5580 		return rc;
5581 	}
5582 
5583 	info = (struct dasd_ckd_host_information *)
5584 		access->host_access_information;
5585 	for (i = 0; i < info->entry_count; i++) {
5586 		entry = (struct dasd_ckd_path_group_entry *)
5587 			(info->entry + i * info->entry_size);
5588 		if (entry->status_flags & DASD_ECKD_PG_GROUPED)
5589 			count++;
5590 	}
5591 
5592 	kfree(access);
5593 	return count;
5594 }
5595 
5596 /*
5597  * write host access information to a sequential file
5598  */
5599 static int dasd_hosts_print(struct dasd_device *device, struct seq_file *m)
5600 {
5601 	struct dasd_psf_query_host_access *access;
5602 	struct dasd_ckd_path_group_entry *entry;
5603 	struct dasd_ckd_host_information *info;
5604 	char sysplex[9] = "";
5605 	int rc, i;
5606 
5607 	access = kzalloc(sizeof(*access), GFP_NOIO);
5608 	if (!access) {
5609 		DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
5610 				"Could not allocate access buffer");
5611 		return -ENOMEM;
5612 	}
5613 	rc = dasd_eckd_query_host_access(device, access);
5614 	if (rc) {
5615 		kfree(access);
5616 		return rc;
5617 	}
5618 
5619 	info = (struct dasd_ckd_host_information *)
5620 		access->host_access_information;
5621 	for (i = 0; i < info->entry_count; i++) {
5622 		entry = (struct dasd_ckd_path_group_entry *)
5623 			(info->entry + i * info->entry_size);
5624 		/* PGID */
5625 		seq_printf(m, "pgid %*phN\n", 11, entry->pgid);
5626 		/* FLAGS */
5627 		seq_printf(m, "status_flags %02x\n", entry->status_flags);
5628 		/* SYSPLEX NAME */
5629 		memcpy(&sysplex, &entry->sysplex_name, sizeof(sysplex) - 1);
5630 		EBCASC(sysplex, sizeof(sysplex));
5631 		seq_printf(m, "sysplex_name %8s\n", sysplex);
5632 		/* SUPPORTED CYLINDER */
5633 		seq_printf(m, "supported_cylinder %d\n", entry->cylinder);
5634 		/* TIMESTAMP */
5635 		seq_printf(m, "timestamp %lu\n", (unsigned long)
5636 			   entry->timestamp);
5637 	}
5638 	kfree(access);
5639 
5640 	return 0;
5641 }
5642 
5643 /*
5644  * Perform Subsystem Function - CUIR response
5645  */
5646 static int
5647 dasd_eckd_psf_cuir_response(struct dasd_device *device, int response,
5648 			    __u32 message_id, __u8 lpum)
5649 {
5650 	struct dasd_psf_cuir_response *psf_cuir;
5651 	int pos = pathmask_to_pos(lpum);
5652 	struct dasd_ccw_req *cqr;
5653 	struct ccw1 *ccw;
5654 	int rc;
5655 
5656 	cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
5657 				   sizeof(struct dasd_psf_cuir_response),
5658 				   device, NULL);
5659 
5660 	if (IS_ERR(cqr)) {
5661 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
5662 			   "Could not allocate PSF-CUIR request");
5663 		return PTR_ERR(cqr);
5664 	}
5665 
5666 	psf_cuir = (struct dasd_psf_cuir_response *)cqr->data;
5667 	psf_cuir->order = PSF_ORDER_CUIR_RESPONSE;
5668 	psf_cuir->cc = response;
5669 	psf_cuir->chpid = device->path[pos].chpid;
5670 	psf_cuir->message_id = message_id;
5671 	psf_cuir->cssid = device->path[pos].cssid;
5672 	psf_cuir->ssid = device->path[pos].ssid;
5673 	ccw = cqr->cpaddr;
5674 	ccw->cmd_code = DASD_ECKD_CCW_PSF;
5675 	ccw->cda = (__u32)(addr_t)psf_cuir;
5676 	ccw->flags = CCW_FLAG_SLI;
5677 	ccw->count = sizeof(struct dasd_psf_cuir_response);
5678 
5679 	cqr->startdev = device;
5680 	cqr->memdev = device;
5681 	cqr->block = NULL;
5682 	cqr->retries = 256;
5683 	cqr->expires = 10*HZ;
5684 	cqr->buildclk = get_tod_clock();
5685 	cqr->status = DASD_CQR_FILLED;
5686 	set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags);
5687 
5688 	rc = dasd_sleep_on(cqr);
5689 
5690 	dasd_sfree_request(cqr, cqr->memdev);
5691 	return rc;
5692 }
5693 
5694 /*
5695  * return configuration data that is referenced by record selector
5696  * if a record selector is specified or per default return the
5697  * conf_data pointer for the path specified by lpum
5698  */
5699 static struct dasd_conf_data *dasd_eckd_get_ref_conf(struct dasd_device *device,
5700 						     __u8 lpum,
5701 						     struct dasd_cuir_message *cuir)
5702 {
5703 	struct dasd_conf_data *conf_data;
5704 	int path, pos;
5705 
5706 	if (cuir->record_selector == 0)
5707 		goto out;
5708 	for (path = 0x80, pos = 0; path; path >>= 1, pos++) {
5709 		conf_data = device->path[pos].conf_data;
5710 		if (conf_data->gneq.record_selector ==
5711 		    cuir->record_selector)
5712 			return conf_data;
5713 	}
5714 out:
5715 	return device->path[pathmask_to_pos(lpum)].conf_data;
5716 }
5717 
5718 /*
5719  * This function determines the scope of a reconfiguration request by
5720  * analysing the path and device selection data provided in the CUIR request.
5721  * Returns a path mask containing CUIR affected paths for the give device.
5722  *
5723  * If the CUIR request does not contain the required information return the
5724  * path mask of the path the attention message for the CUIR request was reveived
5725  * on.
5726  */
5727 static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum,
5728 				struct dasd_cuir_message *cuir)
5729 {
5730 	struct dasd_conf_data *ref_conf_data;
5731 	unsigned long bitmask = 0, mask = 0;
5732 	struct dasd_conf_data *conf_data;
5733 	unsigned int pos, path;
5734 	char *ref_gneq, *gneq;
5735 	char *ref_ned, *ned;
5736 	int tbcpm = 0;
5737 
5738 	/* if CUIR request does not specify the scope use the path
5739 	   the attention message was presented on */
5740 	if (!cuir->ned_map ||
5741 	    !(cuir->neq_map[0] | cuir->neq_map[1] | cuir->neq_map[2]))
5742 		return lpum;
5743 
5744 	/* get reference conf data */
5745 	ref_conf_data = dasd_eckd_get_ref_conf(device, lpum, cuir);
5746 	/* reference ned is determined by ned_map field */
5747 	pos = 8 - ffs(cuir->ned_map);
5748 	ref_ned = (char *)&ref_conf_data->neds[pos];
5749 	ref_gneq = (char *)&ref_conf_data->gneq;
5750 	/* transfer 24 bit neq_map to mask */
5751 	mask = cuir->neq_map[2];
5752 	mask |= cuir->neq_map[1] << 8;
5753 	mask |= cuir->neq_map[0] << 16;
5754 
5755 	for (path = 0; path < 8; path++) {
5756 		/* initialise data per path */
5757 		bitmask = mask;
5758 		conf_data = device->path[path].conf_data;
5759 		pos = 8 - ffs(cuir->ned_map);
5760 		ned = (char *) &conf_data->neds[pos];
5761 		/* compare reference ned and per path ned */
5762 		if (memcmp(ref_ned, ned, sizeof(*ned)) != 0)
5763 			continue;
5764 		gneq = (char *)&conf_data->gneq;
5765 		/* compare reference gneq and per_path gneq under
5766 		   24 bit mask where mask bit 0 equals byte 7 of
5767 		   the gneq and mask bit 24 equals byte 31 */
5768 		while (bitmask) {
5769 			pos = ffs(bitmask) - 1;
5770 			if (memcmp(&ref_gneq[31 - pos], &gneq[31 - pos], 1)
5771 			    != 0)
5772 				break;
5773 			clear_bit(pos, &bitmask);
5774 		}
5775 		if (bitmask)
5776 			continue;
5777 		/* device and path match the reference values
5778 		   add path to CUIR scope */
5779 		tbcpm |= 0x80 >> path;
5780 	}
5781 	return tbcpm;
5782 }
5783 
5784 static void dasd_eckd_cuir_notify_user(struct dasd_device *device,
5785 				       unsigned long paths, int action)
5786 {
5787 	int pos;
5788 
5789 	while (paths) {
5790 		/* get position of bit in mask */
5791 		pos = 8 - ffs(paths);
5792 		/* get channel path descriptor from this position */
5793 		if (action == CUIR_QUIESCE)
5794 			pr_warn("Service on the storage server caused path %x.%02x to go offline",
5795 				device->path[pos].cssid,
5796 				device->path[pos].chpid);
5797 		else if (action == CUIR_RESUME)
5798 			pr_info("Path %x.%02x is back online after service on the storage server",
5799 				device->path[pos].cssid,
5800 				device->path[pos].chpid);
5801 		clear_bit(7 - pos, &paths);
5802 	}
5803 }
5804 
5805 static int dasd_eckd_cuir_remove_path(struct dasd_device *device, __u8 lpum,
5806 				      struct dasd_cuir_message *cuir)
5807 {
5808 	unsigned long tbcpm;
5809 
5810 	tbcpm = dasd_eckd_cuir_scope(device, lpum, cuir);
5811 	/* nothing to do if path is not in use */
5812 	if (!(dasd_path_get_opm(device) & tbcpm))
5813 		return 0;
5814 	if (!(dasd_path_get_opm(device) & ~tbcpm)) {
5815 		/* no path would be left if the CUIR action is taken
5816 		   return error */
5817 		return -EINVAL;
5818 	}
5819 	/* remove device from operational path mask */
5820 	dasd_path_remove_opm(device, tbcpm);
5821 	dasd_path_add_cuirpm(device, tbcpm);
5822 	return tbcpm;
5823 }
5824 
5825 /*
5826  * walk through all devices and build a path mask to quiesce them
5827  * return an error if the last path to a device would be removed
5828  *
5829  * if only part of the devices are quiesced and an error
5830  * occurs no onlining necessary, the storage server will
5831  * notify the already set offline devices again
5832  */
5833 static int dasd_eckd_cuir_quiesce(struct dasd_device *device, __u8 lpum,
5834 				  struct dasd_cuir_message *cuir)
5835 {
5836 	struct dasd_eckd_private *private = device->private;
5837 	struct alias_pav_group *pavgroup, *tempgroup;
5838 	struct dasd_device *dev, *n;
5839 	unsigned long paths = 0;
5840 	unsigned long flags;
5841 	int tbcpm;
5842 
5843 	/* active devices */
5844 	list_for_each_entry_safe(dev, n, &private->lcu->active_devices,
5845 				 alias_list) {
5846 		spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
5847 		tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
5848 		spin_unlock_irqrestore(get_ccwdev_lock(dev->cdev), flags);
5849 		if (tbcpm < 0)
5850 			goto out_err;
5851 		paths |= tbcpm;
5852 	}
5853 	/* inactive devices */
5854 	list_for_each_entry_safe(dev, n, &private->lcu->inactive_devices,
5855 				 alias_list) {
5856 		spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
5857 		tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
5858 		spin_unlock_irqrestore(get_ccwdev_lock(dev->cdev), flags);
5859 		if (tbcpm < 0)
5860 			goto out_err;
5861 		paths |= tbcpm;
5862 	}
5863 	/* devices in PAV groups */
5864 	list_for_each_entry_safe(pavgroup, tempgroup,
5865 				 &private->lcu->grouplist, group) {
5866 		list_for_each_entry_safe(dev, n, &pavgroup->baselist,
5867 					 alias_list) {
5868 			spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
5869 			tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
5870 			spin_unlock_irqrestore(
5871 				get_ccwdev_lock(dev->cdev), flags);
5872 			if (tbcpm < 0)
5873 				goto out_err;
5874 			paths |= tbcpm;
5875 		}
5876 		list_for_each_entry_safe(dev, n, &pavgroup->aliaslist,
5877 					 alias_list) {
5878 			spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags);
5879 			tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir);
5880 			spin_unlock_irqrestore(
5881 				get_ccwdev_lock(dev->cdev), flags);
5882 			if (tbcpm < 0)
5883 				goto out_err;
5884 			paths |= tbcpm;
5885 		}
5886 	}
5887 	/* notify user about all paths affected by CUIR action */
5888 	dasd_eckd_cuir_notify_user(device, paths, CUIR_QUIESCE);
5889 	return 0;
5890 out_err:
5891 	return tbcpm;
5892 }
5893 
5894 static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum,
5895 				 struct dasd_cuir_message *cuir)
5896 {
5897 	struct dasd_eckd_private *private = device->private;
5898 	struct alias_pav_group *pavgroup, *tempgroup;
5899 	struct dasd_device *dev, *n;
5900 	unsigned long paths = 0;
5901 	int tbcpm;
5902 
5903 	/*
5904 	 * the path may have been added through a generic path event before
5905 	 * only trigger path verification if the path is not already in use
5906 	 */
5907 	list_for_each_entry_safe(dev, n,
5908 				 &private->lcu->active_devices,
5909 				 alias_list) {
5910 		tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
5911 		paths |= tbcpm;
5912 		if (!(dasd_path_get_opm(dev) & tbcpm)) {
5913 			dasd_path_add_tbvpm(dev, tbcpm);
5914 			dasd_schedule_device_bh(dev);
5915 		}
5916 	}
5917 	list_for_each_entry_safe(dev, n,
5918 				 &private->lcu->inactive_devices,
5919 				 alias_list) {
5920 		tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
5921 		paths |= tbcpm;
5922 		if (!(dasd_path_get_opm(dev) & tbcpm)) {
5923 			dasd_path_add_tbvpm(dev, tbcpm);
5924 			dasd_schedule_device_bh(dev);
5925 		}
5926 	}
5927 	/* devices in PAV groups */
5928 	list_for_each_entry_safe(pavgroup, tempgroup,
5929 				 &private->lcu->grouplist,
5930 				 group) {
5931 		list_for_each_entry_safe(dev, n,
5932 					 &pavgroup->baselist,
5933 					 alias_list) {
5934 			tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
5935 			paths |= tbcpm;
5936 			if (!(dasd_path_get_opm(dev) & tbcpm)) {
5937 				dasd_path_add_tbvpm(dev, tbcpm);
5938 				dasd_schedule_device_bh(dev);
5939 			}
5940 		}
5941 		list_for_each_entry_safe(dev, n,
5942 					 &pavgroup->aliaslist,
5943 					 alias_list) {
5944 			tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir);
5945 			paths |= tbcpm;
5946 			if (!(dasd_path_get_opm(dev) & tbcpm)) {
5947 				dasd_path_add_tbvpm(dev, tbcpm);
5948 				dasd_schedule_device_bh(dev);
5949 			}
5950 		}
5951 	}
5952 	/* notify user about all paths affected by CUIR action */
5953 	dasd_eckd_cuir_notify_user(device, paths, CUIR_RESUME);
5954 	return 0;
5955 }
5956 
5957 static void dasd_eckd_handle_cuir(struct dasd_device *device, void *messages,
5958 				 __u8 lpum)
5959 {
5960 	struct dasd_cuir_message *cuir = messages;
5961 	int response;
5962 
5963 	DBF_DEV_EVENT(DBF_WARNING, device,
5964 		      "CUIR request: %016llx %016llx %016llx %08x",
5965 		      ((u64 *)cuir)[0], ((u64 *)cuir)[1], ((u64 *)cuir)[2],
5966 		      ((u32 *)cuir)[3]);
5967 
5968 	if (cuir->code == CUIR_QUIESCE) {
5969 		/* quiesce */
5970 		if (dasd_eckd_cuir_quiesce(device, lpum, cuir))
5971 			response = PSF_CUIR_LAST_PATH;
5972 		else
5973 			response = PSF_CUIR_COMPLETED;
5974 	} else if (cuir->code == CUIR_RESUME) {
5975 		/* resume */
5976 		dasd_eckd_cuir_resume(device, lpum, cuir);
5977 		response = PSF_CUIR_COMPLETED;
5978 	} else
5979 		response = PSF_CUIR_NOT_SUPPORTED;
5980 
5981 	dasd_eckd_psf_cuir_response(device, response,
5982 				    cuir->message_id, lpum);
5983 	DBF_DEV_EVENT(DBF_WARNING, device,
5984 		      "CUIR response: %d on message ID %08x", response,
5985 		      cuir->message_id);
5986 	/* to make sure there is no attention left schedule work again */
5987 	device->discipline->check_attention(device, lpum);
5988 }
5989 
5990 static void dasd_eckd_check_attention_work(struct work_struct *work)
5991 {
5992 	struct check_attention_work_data *data;
5993 	struct dasd_rssd_messages *messages;
5994 	struct dasd_device *device;
5995 	int rc;
5996 
5997 	data = container_of(work, struct check_attention_work_data, worker);
5998 	device = data->device;
5999 	messages = kzalloc(sizeof(*messages), GFP_KERNEL);
6000 	if (!messages) {
6001 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
6002 			      "Could not allocate attention message buffer");
6003 		goto out;
6004 	}
6005 	rc = dasd_eckd_read_message_buffer(device, messages, data->lpum);
6006 	if (rc)
6007 		goto out;
6008 	if (messages->length == ATTENTION_LENGTH_CUIR &&
6009 	    messages->format == ATTENTION_FORMAT_CUIR)
6010 		dasd_eckd_handle_cuir(device, messages, data->lpum);
6011 out:
6012 	dasd_put_device(device);
6013 	kfree(messages);
6014 	kfree(data);
6015 }
6016 
6017 static int dasd_eckd_check_attention(struct dasd_device *device, __u8 lpum)
6018 {
6019 	struct check_attention_work_data *data;
6020 
6021 	data = kzalloc(sizeof(*data), GFP_ATOMIC);
6022 	if (!data)
6023 		return -ENOMEM;
6024 	INIT_WORK(&data->worker, dasd_eckd_check_attention_work);
6025 	dasd_get_device(device);
6026 	data->device = device;
6027 	data->lpum = lpum;
6028 	schedule_work(&data->worker);
6029 	return 0;
6030 }
6031 
6032 static int dasd_eckd_disable_hpf_path(struct dasd_device *device, __u8 lpum)
6033 {
6034 	if (~lpum & dasd_path_get_opm(device)) {
6035 		dasd_path_add_nohpfpm(device, lpum);
6036 		dasd_path_remove_opm(device, lpum);
6037 		dev_err(&device->cdev->dev,
6038 			"Channel path %02X lost HPF functionality and is disabled\n",
6039 			lpum);
6040 		return 1;
6041 	}
6042 	return 0;
6043 }
6044 
6045 static void dasd_eckd_disable_hpf_device(struct dasd_device *device)
6046 {
6047 	struct dasd_eckd_private *private = device->private;
6048 
6049 	dev_err(&device->cdev->dev,
6050 		"High Performance FICON disabled\n");
6051 	private->fcx_max_data = 0;
6052 }
6053 
6054 static int dasd_eckd_hpf_enabled(struct dasd_device *device)
6055 {
6056 	struct dasd_eckd_private *private = device->private;
6057 
6058 	return private->fcx_max_data ? 1 : 0;
6059 }
6060 
6061 static void dasd_eckd_handle_hpf_error(struct dasd_device *device,
6062 				       struct irb *irb)
6063 {
6064 	struct dasd_eckd_private *private = device->private;
6065 
6066 	if (!private->fcx_max_data) {
6067 		/* sanity check for no HPF, the error makes no sense */
6068 		DBF_DEV_EVENT(DBF_WARNING, device, "%s",
6069 			      "Trying to disable HPF for a non HPF device");
6070 		return;
6071 	}
6072 	if (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX) {
6073 		dasd_eckd_disable_hpf_device(device);
6074 	} else if (irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX) {
6075 		if (dasd_eckd_disable_hpf_path(device, irb->esw.esw1.lpum))
6076 			return;
6077 		dasd_eckd_disable_hpf_device(device);
6078 		dasd_path_set_tbvpm(device,
6079 				  dasd_path_get_hpfpm(device));
6080 	}
6081 	/*
6082 	 * prevent that any new I/O ist started on the device and schedule a
6083 	 * requeue of existing requests
6084 	 */
6085 	dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC);
6086 	dasd_schedule_requeue(device);
6087 }
6088 
6089 static struct ccw_driver dasd_eckd_driver = {
6090 	.driver = {
6091 		.name	= "dasd-eckd",
6092 		.owner	= THIS_MODULE,
6093 	},
6094 	.ids	     = dasd_eckd_ids,
6095 	.probe	     = dasd_eckd_probe,
6096 	.remove      = dasd_generic_remove,
6097 	.set_offline = dasd_generic_set_offline,
6098 	.set_online  = dasd_eckd_set_online,
6099 	.notify      = dasd_generic_notify,
6100 	.path_event  = dasd_generic_path_event,
6101 	.shutdown    = dasd_generic_shutdown,
6102 	.freeze      = dasd_generic_pm_freeze,
6103 	.thaw	     = dasd_generic_restore_device,
6104 	.restore     = dasd_generic_restore_device,
6105 	.uc_handler  = dasd_generic_uc_handler,
6106 	.int_class   = IRQIO_DAS,
6107 };
6108 
6109 /*
6110  * max_blocks is dependent on the amount of storage that is available
6111  * in the static io buffer for each device. Currently each device has
6112  * 8192 bytes (=2 pages). For 64 bit one dasd_mchunkt_t structure has
6113  * 24 bytes, the struct dasd_ccw_req has 136 bytes and each block can use
6114  * up to 16 bytes (8 for the ccw and 8 for the idal pointer). In
6115  * addition we have one define extent ccw + 16 bytes of data and one
6116  * locate record ccw + 16 bytes of data. That makes:
6117  * (8192 - 24 - 136 - 8 - 16 - 8 - 16) / 16 = 499 blocks at maximum.
6118  * We want to fit two into the available memory so that we can immediately
6119  * start the next request if one finishes off. That makes 249.5 blocks
6120  * for one request. Give a little safety and the result is 240.
6121  */
6122 static struct dasd_discipline dasd_eckd_discipline = {
6123 	.owner = THIS_MODULE,
6124 	.name = "ECKD",
6125 	.ebcname = "ECKD",
6126 	.max_blocks = 190,
6127 	.check_device = dasd_eckd_check_characteristics,
6128 	.uncheck_device = dasd_eckd_uncheck_device,
6129 	.do_analysis = dasd_eckd_do_analysis,
6130 	.verify_path = dasd_eckd_verify_path,
6131 	.basic_to_ready = dasd_eckd_basic_to_ready,
6132 	.online_to_ready = dasd_eckd_online_to_ready,
6133 	.basic_to_known = dasd_eckd_basic_to_known,
6134 	.fill_geometry = dasd_eckd_fill_geometry,
6135 	.start_IO = dasd_start_IO,
6136 	.term_IO = dasd_term_IO,
6137 	.handle_terminated_request = dasd_eckd_handle_terminated_request,
6138 	.format_device = dasd_eckd_format_device,
6139 	.check_device_format = dasd_eckd_check_device_format,
6140 	.erp_action = dasd_eckd_erp_action,
6141 	.erp_postaction = dasd_eckd_erp_postaction,
6142 	.check_for_device_change = dasd_eckd_check_for_device_change,
6143 	.build_cp = dasd_eckd_build_alias_cp,
6144 	.free_cp = dasd_eckd_free_alias_cp,
6145 	.dump_sense = dasd_eckd_dump_sense,
6146 	.dump_sense_dbf = dasd_eckd_dump_sense_dbf,
6147 	.fill_info = dasd_eckd_fill_info,
6148 	.ioctl = dasd_eckd_ioctl,
6149 	.freeze = dasd_eckd_pm_freeze,
6150 	.restore = dasd_eckd_restore_device,
6151 	.reload = dasd_eckd_reload_device,
6152 	.get_uid = dasd_eckd_get_uid,
6153 	.kick_validate = dasd_eckd_kick_validate_server,
6154 	.check_attention = dasd_eckd_check_attention,
6155 	.host_access_count = dasd_eckd_host_access_count,
6156 	.hosts_print = dasd_hosts_print,
6157 	.handle_hpf_error = dasd_eckd_handle_hpf_error,
6158 	.disable_hpf = dasd_eckd_disable_hpf_device,
6159 	.hpf_enabled = dasd_eckd_hpf_enabled,
6160 	.reset_path = dasd_eckd_reset_path,
6161 	.is_ese = dasd_eckd_is_ese,
6162 	.space_allocated = dasd_eckd_space_allocated,
6163 	.space_configured = dasd_eckd_space_configured,
6164 	.logical_capacity = dasd_eckd_logical_capacity,
6165 	.ext_pool_id = dasd_eckd_ext_pool_id,
6166 	.ext_size = dasd_eckd_ext_size,
6167 	.ext_pool_cap_at_warnlevel = dasd_eckd_ext_pool_cap_at_warnlevel,
6168 	.ext_pool_warn_thrshld = dasd_eckd_ext_pool_warn_thrshld,
6169 	.ext_pool_oos = dasd_eckd_ext_pool_oos,
6170 	.ese_format = dasd_eckd_ese_format,
6171 	.ese_read = dasd_eckd_ese_read,
6172 };
6173 
6174 static int __init
6175 dasd_eckd_init(void)
6176 {
6177 	int ret;
6178 
6179 	ASCEBC(dasd_eckd_discipline.ebcname, 4);
6180 	dasd_reserve_req = kmalloc(sizeof(*dasd_reserve_req),
6181 				   GFP_KERNEL | GFP_DMA);
6182 	if (!dasd_reserve_req)
6183 		return -ENOMEM;
6184 	path_verification_worker = kmalloc(sizeof(*path_verification_worker),
6185 				   GFP_KERNEL | GFP_DMA);
6186 	if (!path_verification_worker) {
6187 		kfree(dasd_reserve_req);
6188 		return -ENOMEM;
6189 	}
6190 	rawpadpage = (void *)__get_free_page(GFP_KERNEL);
6191 	if (!rawpadpage) {
6192 		kfree(path_verification_worker);
6193 		kfree(dasd_reserve_req);
6194 		return -ENOMEM;
6195 	}
6196 	ret = ccw_driver_register(&dasd_eckd_driver);
6197 	if (!ret)
6198 		wait_for_device_probe();
6199 	else {
6200 		kfree(path_verification_worker);
6201 		kfree(dasd_reserve_req);
6202 		free_page((unsigned long)rawpadpage);
6203 	}
6204 	return ret;
6205 }
6206 
6207 static void __exit
6208 dasd_eckd_cleanup(void)
6209 {
6210 	ccw_driver_unregister(&dasd_eckd_driver);
6211 	kfree(path_verification_worker);
6212 	kfree(dasd_reserve_req);
6213 	free_page((unsigned long)rawpadpage);
6214 }
6215 
6216 module_init(dasd_eckd_init);
6217 module_exit(dasd_eckd_cleanup);
6218