1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 4 * Horst Hummel <Horst.Hummel@de.ibm.com> 5 * Carsten Otte <Cotte@de.ibm.com> 6 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * Bugreports.to..: <Linux390@de.ibm.com> 8 * Copyright IBM Corp. 1999, 2009 9 * EMC Symmetrix ioctl Copyright EMC Corporation, 2008 10 * Author.........: Nigel Hislop <hislop_nigel@emc.com> 11 */ 12 13 #define KMSG_COMPONENT "dasd-eckd" 14 15 #include <linux/stddef.h> 16 #include <linux/kernel.h> 17 #include <linux/slab.h> 18 #include <linux/hdreg.h> /* HDIO_GETGEO */ 19 #include <linux/bio.h> 20 #include <linux/module.h> 21 #include <linux/compat.h> 22 #include <linux/init.h> 23 #include <linux/seq_file.h> 24 25 #include <asm/css_chars.h> 26 #include <asm/debug.h> 27 #include <asm/idals.h> 28 #include <asm/ebcdic.h> 29 #include <asm/io.h> 30 #include <linux/uaccess.h> 31 #include <asm/cio.h> 32 #include <asm/ccwdev.h> 33 #include <asm/itcw.h> 34 #include <asm/schid.h> 35 #include <asm/chpid.h> 36 37 #include "dasd_int.h" 38 #include "dasd_eckd.h" 39 40 #ifdef PRINTK_HEADER 41 #undef PRINTK_HEADER 42 #endif /* PRINTK_HEADER */ 43 #define PRINTK_HEADER "dasd(eckd):" 44 45 /* 46 * raw track access always map to 64k in memory 47 * so it maps to 16 blocks of 4k per track 48 */ 49 #define DASD_RAW_BLOCK_PER_TRACK 16 50 #define DASD_RAW_BLOCKSIZE 4096 51 /* 64k are 128 x 512 byte sectors */ 52 #define DASD_RAW_SECTORS_PER_TRACK 128 53 54 MODULE_LICENSE("GPL"); 55 56 static struct dasd_discipline dasd_eckd_discipline; 57 58 /* The ccw bus type uses this table to find devices that it sends to 59 * dasd_eckd_probe */ 60 static struct ccw_device_id dasd_eckd_ids[] = { 61 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1}, 62 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2}, 63 { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3380, 0), .driver_info = 0x3}, 64 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4}, 65 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5}, 66 { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6}, 67 { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3390, 0), .driver_info = 0x7}, 68 { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3380, 0), .driver_info = 0x8}, 69 { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3390, 0), .driver_info = 0x9}, 70 { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3380, 0), .driver_info = 0xa}, 71 { /* end of list */ }, 72 }; 73 74 MODULE_DEVICE_TABLE(ccw, dasd_eckd_ids); 75 76 static struct ccw_driver dasd_eckd_driver; /* see below */ 77 78 static void *rawpadpage; 79 80 #define INIT_CQR_OK 0 81 #define INIT_CQR_UNFORMATTED 1 82 #define INIT_CQR_ERROR 2 83 84 /* emergency request for reserve/release */ 85 static struct { 86 struct dasd_ccw_req cqr; 87 struct ccw1 ccw; 88 char data[32]; 89 } *dasd_reserve_req; 90 static DEFINE_MUTEX(dasd_reserve_mutex); 91 92 static struct { 93 struct dasd_ccw_req cqr; 94 struct ccw1 ccw[2]; 95 char data[40]; 96 } *dasd_vol_info_req; 97 static DEFINE_MUTEX(dasd_vol_info_mutex); 98 99 struct ext_pool_exhaust_work_data { 100 struct work_struct worker; 101 struct dasd_device *device; 102 struct dasd_device *base; 103 }; 104 105 /* definitions for the path verification worker */ 106 struct pe_handler_work_data { 107 struct work_struct worker; 108 struct dasd_device *device; 109 struct dasd_ccw_req cqr; 110 struct ccw1 ccw; 111 __u8 rcd_buffer[DASD_ECKD_RCD_DATA_SIZE]; 112 int isglobal; 113 __u8 tbvpm; 114 __u8 fcsecpm; 115 }; 116 static struct pe_handler_work_data *pe_handler_worker; 117 static DEFINE_MUTEX(dasd_pe_handler_mutex); 118 119 struct check_attention_work_data { 120 struct work_struct worker; 121 struct dasd_device *device; 122 __u8 lpum; 123 }; 124 125 static int dasd_eckd_ext_pool_id(struct dasd_device *); 126 static int prepare_itcw(struct itcw *, unsigned int, unsigned int, int, 127 struct dasd_device *, struct dasd_device *, 128 unsigned int, int, unsigned int, unsigned int, 129 unsigned int, unsigned int); 130 131 /* initial attempt at a probe function. this can be simplified once 132 * the other detection code is gone */ 133 static int 134 dasd_eckd_probe (struct ccw_device *cdev) 135 { 136 int ret; 137 138 /* set ECKD specific ccw-device options */ 139 ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE | 140 CCWDEV_DO_PATHGROUP | CCWDEV_DO_MULTIPATH); 141 if (ret) { 142 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s", 143 "dasd_eckd_probe: could not set " 144 "ccw-device options"); 145 return ret; 146 } 147 ret = dasd_generic_probe(cdev); 148 return ret; 149 } 150 151 static int 152 dasd_eckd_set_online(struct ccw_device *cdev) 153 { 154 return dasd_generic_set_online(cdev, &dasd_eckd_discipline); 155 } 156 157 static const int sizes_trk0[] = { 28, 148, 84 }; 158 #define LABEL_SIZE 140 159 160 /* head and record addresses of count_area read in analysis ccw */ 161 static const int count_area_head[] = { 0, 0, 0, 0, 1 }; 162 static const int count_area_rec[] = { 1, 2, 3, 4, 1 }; 163 164 static inline unsigned int 165 ceil_quot(unsigned int d1, unsigned int d2) 166 { 167 return (d1 + (d2 - 1)) / d2; 168 } 169 170 static unsigned int 171 recs_per_track(struct dasd_eckd_characteristics * rdc, 172 unsigned int kl, unsigned int dl) 173 { 174 int dn, kn; 175 176 switch (rdc->dev_type) { 177 case 0x3380: 178 if (kl) 179 return 1499 / (15 + 7 + ceil_quot(kl + 12, 32) + 180 ceil_quot(dl + 12, 32)); 181 else 182 return 1499 / (15 + ceil_quot(dl + 12, 32)); 183 case 0x3390: 184 dn = ceil_quot(dl + 6, 232) + 1; 185 if (kl) { 186 kn = ceil_quot(kl + 6, 232) + 1; 187 return 1729 / (10 + 9 + ceil_quot(kl + 6 * kn, 34) + 188 9 + ceil_quot(dl + 6 * dn, 34)); 189 } else 190 return 1729 / (10 + 9 + ceil_quot(dl + 6 * dn, 34)); 191 case 0x9345: 192 dn = ceil_quot(dl + 6, 232) + 1; 193 if (kl) { 194 kn = ceil_quot(kl + 6, 232) + 1; 195 return 1420 / (18 + 7 + ceil_quot(kl + 6 * kn, 34) + 196 ceil_quot(dl + 6 * dn, 34)); 197 } else 198 return 1420 / (18 + 7 + ceil_quot(dl + 6 * dn, 34)); 199 } 200 return 0; 201 } 202 203 static void set_ch_t(struct ch_t *geo, __u32 cyl, __u8 head) 204 { 205 geo->cyl = (__u16) cyl; 206 geo->head = cyl >> 16; 207 geo->head <<= 4; 208 geo->head |= head; 209 } 210 211 /* 212 * calculate failing track from sense data depending if 213 * it is an EAV device or not 214 */ 215 static int dasd_eckd_track_from_irb(struct irb *irb, struct dasd_device *device, 216 sector_t *track) 217 { 218 struct dasd_eckd_private *private = device->private; 219 u8 *sense = NULL; 220 u32 cyl; 221 u8 head; 222 223 sense = dasd_get_sense(irb); 224 if (!sense) { 225 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 226 "ESE error no sense data\n"); 227 return -EINVAL; 228 } 229 if (!(sense[27] & DASD_SENSE_BIT_2)) { 230 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 231 "ESE error no valid track data\n"); 232 return -EINVAL; 233 } 234 235 if (sense[27] & DASD_SENSE_BIT_3) { 236 /* enhanced addressing */ 237 cyl = sense[30] << 20; 238 cyl |= (sense[31] & 0xF0) << 12; 239 cyl |= sense[28] << 8; 240 cyl |= sense[29]; 241 } else { 242 cyl = sense[29] << 8; 243 cyl |= sense[30]; 244 } 245 head = sense[31] & 0x0F; 246 *track = cyl * private->rdc_data.trk_per_cyl + head; 247 return 0; 248 } 249 250 static int set_timestamp(struct ccw1 *ccw, struct DE_eckd_data *data, 251 struct dasd_device *device) 252 { 253 struct dasd_eckd_private *private = device->private; 254 int rc; 255 256 rc = get_phys_clock(&data->ep_sys_time); 257 /* 258 * Ignore return code if XRC is not supported or 259 * sync clock is switched off 260 */ 261 if ((rc && !private->rdc_data.facilities.XRC_supported) || 262 rc == -EOPNOTSUPP || rc == -EACCES) 263 return 0; 264 265 /* switch on System Time Stamp - needed for XRC Support */ 266 data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */ 267 data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */ 268 269 if (ccw) { 270 ccw->count = sizeof(struct DE_eckd_data); 271 ccw->flags |= CCW_FLAG_SLI; 272 } 273 274 return rc; 275 } 276 277 static int 278 define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk, 279 unsigned int totrk, int cmd, struct dasd_device *device, 280 int blksize) 281 { 282 struct dasd_eckd_private *private = device->private; 283 u16 heads, beghead, endhead; 284 u32 begcyl, endcyl; 285 int rc = 0; 286 287 if (ccw) { 288 ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT; 289 ccw->flags = 0; 290 ccw->count = 16; 291 ccw->cda = (__u32)__pa(data); 292 } 293 294 memset(data, 0, sizeof(struct DE_eckd_data)); 295 switch (cmd) { 296 case DASD_ECKD_CCW_READ_HOME_ADDRESS: 297 case DASD_ECKD_CCW_READ_RECORD_ZERO: 298 case DASD_ECKD_CCW_READ: 299 case DASD_ECKD_CCW_READ_MT: 300 case DASD_ECKD_CCW_READ_CKD: 301 case DASD_ECKD_CCW_READ_CKD_MT: 302 case DASD_ECKD_CCW_READ_KD: 303 case DASD_ECKD_CCW_READ_KD_MT: 304 data->mask.perm = 0x1; 305 data->attributes.operation = private->attrib.operation; 306 break; 307 case DASD_ECKD_CCW_READ_COUNT: 308 data->mask.perm = 0x1; 309 data->attributes.operation = DASD_BYPASS_CACHE; 310 break; 311 case DASD_ECKD_CCW_READ_TRACK: 312 case DASD_ECKD_CCW_READ_TRACK_DATA: 313 data->mask.perm = 0x1; 314 data->attributes.operation = private->attrib.operation; 315 data->blk_size = 0; 316 break; 317 case DASD_ECKD_CCW_WRITE: 318 case DASD_ECKD_CCW_WRITE_MT: 319 case DASD_ECKD_CCW_WRITE_KD: 320 case DASD_ECKD_CCW_WRITE_KD_MT: 321 data->mask.perm = 0x02; 322 data->attributes.operation = private->attrib.operation; 323 rc = set_timestamp(ccw, data, device); 324 break; 325 case DASD_ECKD_CCW_WRITE_CKD: 326 case DASD_ECKD_CCW_WRITE_CKD_MT: 327 data->attributes.operation = DASD_BYPASS_CACHE; 328 rc = set_timestamp(ccw, data, device); 329 break; 330 case DASD_ECKD_CCW_ERASE: 331 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS: 332 case DASD_ECKD_CCW_WRITE_RECORD_ZERO: 333 data->mask.perm = 0x3; 334 data->mask.auth = 0x1; 335 data->attributes.operation = DASD_BYPASS_CACHE; 336 rc = set_timestamp(ccw, data, device); 337 break; 338 case DASD_ECKD_CCW_WRITE_FULL_TRACK: 339 data->mask.perm = 0x03; 340 data->attributes.operation = private->attrib.operation; 341 data->blk_size = 0; 342 break; 343 case DASD_ECKD_CCW_WRITE_TRACK_DATA: 344 data->mask.perm = 0x02; 345 data->attributes.operation = private->attrib.operation; 346 data->blk_size = blksize; 347 rc = set_timestamp(ccw, data, device); 348 break; 349 default: 350 dev_err(&device->cdev->dev, 351 "0x%x is not a known command\n", cmd); 352 break; 353 } 354 355 data->attributes.mode = 0x3; /* ECKD */ 356 357 if ((private->rdc_data.cu_type == 0x2105 || 358 private->rdc_data.cu_type == 0x2107 || 359 private->rdc_data.cu_type == 0x1750) 360 && !(private->uses_cdl && trk < 2)) 361 data->ga_extended |= 0x40; /* Regular Data Format Mode */ 362 363 heads = private->rdc_data.trk_per_cyl; 364 begcyl = trk / heads; 365 beghead = trk % heads; 366 endcyl = totrk / heads; 367 endhead = totrk % heads; 368 369 /* check for sequential prestage - enhance cylinder range */ 370 if (data->attributes.operation == DASD_SEQ_PRESTAGE || 371 data->attributes.operation == DASD_SEQ_ACCESS) { 372 373 if (endcyl + private->attrib.nr_cyl < private->real_cyl) 374 endcyl += private->attrib.nr_cyl; 375 else 376 endcyl = (private->real_cyl - 1); 377 } 378 379 set_ch_t(&data->beg_ext, begcyl, beghead); 380 set_ch_t(&data->end_ext, endcyl, endhead); 381 return rc; 382 } 383 384 385 static void locate_record_ext(struct ccw1 *ccw, struct LRE_eckd_data *data, 386 unsigned int trk, unsigned int rec_on_trk, 387 int count, int cmd, struct dasd_device *device, 388 unsigned int reclen, unsigned int tlf) 389 { 390 struct dasd_eckd_private *private = device->private; 391 int sector; 392 int dn, d; 393 394 if (ccw) { 395 ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD_EXT; 396 ccw->flags = 0; 397 if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK) 398 ccw->count = 22; 399 else 400 ccw->count = 20; 401 ccw->cda = (__u32)__pa(data); 402 } 403 404 memset(data, 0, sizeof(*data)); 405 sector = 0; 406 if (rec_on_trk) { 407 switch (private->rdc_data.dev_type) { 408 case 0x3390: 409 dn = ceil_quot(reclen + 6, 232); 410 d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34); 411 sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8; 412 break; 413 case 0x3380: 414 d = 7 + ceil_quot(reclen + 12, 32); 415 sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7; 416 break; 417 } 418 } 419 data->sector = sector; 420 /* note: meaning of count depends on the operation 421 * for record based I/O it's the number of records, but for 422 * track based I/O it's the number of tracks 423 */ 424 data->count = count; 425 switch (cmd) { 426 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS: 427 data->operation.orientation = 0x3; 428 data->operation.operation = 0x03; 429 break; 430 case DASD_ECKD_CCW_READ_HOME_ADDRESS: 431 data->operation.orientation = 0x3; 432 data->operation.operation = 0x16; 433 break; 434 case DASD_ECKD_CCW_WRITE_RECORD_ZERO: 435 data->operation.orientation = 0x1; 436 data->operation.operation = 0x03; 437 data->count++; 438 break; 439 case DASD_ECKD_CCW_READ_RECORD_ZERO: 440 data->operation.orientation = 0x3; 441 data->operation.operation = 0x16; 442 data->count++; 443 break; 444 case DASD_ECKD_CCW_WRITE: 445 case DASD_ECKD_CCW_WRITE_MT: 446 case DASD_ECKD_CCW_WRITE_KD: 447 case DASD_ECKD_CCW_WRITE_KD_MT: 448 data->auxiliary.length_valid = 0x1; 449 data->length = reclen; 450 data->operation.operation = 0x01; 451 break; 452 case DASD_ECKD_CCW_WRITE_CKD: 453 case DASD_ECKD_CCW_WRITE_CKD_MT: 454 data->auxiliary.length_valid = 0x1; 455 data->length = reclen; 456 data->operation.operation = 0x03; 457 break; 458 case DASD_ECKD_CCW_WRITE_FULL_TRACK: 459 data->operation.orientation = 0x0; 460 data->operation.operation = 0x3F; 461 data->extended_operation = 0x11; 462 data->length = 0; 463 data->extended_parameter_length = 0x02; 464 if (data->count > 8) { 465 data->extended_parameter[0] = 0xFF; 466 data->extended_parameter[1] = 0xFF; 467 data->extended_parameter[1] <<= (16 - count); 468 } else { 469 data->extended_parameter[0] = 0xFF; 470 data->extended_parameter[0] <<= (8 - count); 471 data->extended_parameter[1] = 0x00; 472 } 473 data->sector = 0xFF; 474 break; 475 case DASD_ECKD_CCW_WRITE_TRACK_DATA: 476 data->auxiliary.length_valid = 0x1; 477 data->length = reclen; /* not tlf, as one might think */ 478 data->operation.operation = 0x3F; 479 data->extended_operation = 0x23; 480 break; 481 case DASD_ECKD_CCW_READ: 482 case DASD_ECKD_CCW_READ_MT: 483 case DASD_ECKD_CCW_READ_KD: 484 case DASD_ECKD_CCW_READ_KD_MT: 485 data->auxiliary.length_valid = 0x1; 486 data->length = reclen; 487 data->operation.operation = 0x06; 488 break; 489 case DASD_ECKD_CCW_READ_CKD: 490 case DASD_ECKD_CCW_READ_CKD_MT: 491 data->auxiliary.length_valid = 0x1; 492 data->length = reclen; 493 data->operation.operation = 0x16; 494 break; 495 case DASD_ECKD_CCW_READ_COUNT: 496 data->operation.operation = 0x06; 497 break; 498 case DASD_ECKD_CCW_READ_TRACK: 499 data->operation.orientation = 0x1; 500 data->operation.operation = 0x0C; 501 data->extended_parameter_length = 0; 502 data->sector = 0xFF; 503 break; 504 case DASD_ECKD_CCW_READ_TRACK_DATA: 505 data->auxiliary.length_valid = 0x1; 506 data->length = tlf; 507 data->operation.operation = 0x0C; 508 break; 509 case DASD_ECKD_CCW_ERASE: 510 data->length = reclen; 511 data->auxiliary.length_valid = 0x1; 512 data->operation.operation = 0x0b; 513 break; 514 default: 515 DBF_DEV_EVENT(DBF_ERR, device, 516 "fill LRE unknown opcode 0x%x", cmd); 517 BUG(); 518 } 519 set_ch_t(&data->seek_addr, 520 trk / private->rdc_data.trk_per_cyl, 521 trk % private->rdc_data.trk_per_cyl); 522 data->search_arg.cyl = data->seek_addr.cyl; 523 data->search_arg.head = data->seek_addr.head; 524 data->search_arg.record = rec_on_trk; 525 } 526 527 static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata, 528 unsigned int trk, unsigned int totrk, int cmd, 529 struct dasd_device *basedev, struct dasd_device *startdev, 530 unsigned int format, unsigned int rec_on_trk, int count, 531 unsigned int blksize, unsigned int tlf) 532 { 533 struct dasd_eckd_private *basepriv, *startpriv; 534 struct LRE_eckd_data *lredata; 535 struct DE_eckd_data *dedata; 536 int rc = 0; 537 538 basepriv = basedev->private; 539 startpriv = startdev->private; 540 dedata = &pfxdata->define_extent; 541 lredata = &pfxdata->locate_record; 542 543 ccw->cmd_code = DASD_ECKD_CCW_PFX; 544 ccw->flags = 0; 545 if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK) { 546 ccw->count = sizeof(*pfxdata) + 2; 547 ccw->cda = (__u32) __pa(pfxdata); 548 memset(pfxdata, 0, sizeof(*pfxdata) + 2); 549 } else { 550 ccw->count = sizeof(*pfxdata); 551 ccw->cda = (__u32) __pa(pfxdata); 552 memset(pfxdata, 0, sizeof(*pfxdata)); 553 } 554 555 /* prefix data */ 556 if (format > 1) { 557 DBF_DEV_EVENT(DBF_ERR, basedev, 558 "PFX LRE unknown format 0x%x", format); 559 BUG(); 560 return -EINVAL; 561 } 562 pfxdata->format = format; 563 pfxdata->base_address = basepriv->ned->unit_addr; 564 pfxdata->base_lss = basepriv->ned->ID; 565 pfxdata->validity.define_extent = 1; 566 567 /* private uid is kept up to date, conf_data may be outdated */ 568 if (startpriv->uid.type == UA_BASE_PAV_ALIAS) 569 pfxdata->validity.verify_base = 1; 570 571 if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) { 572 pfxdata->validity.verify_base = 1; 573 pfxdata->validity.hyper_pav = 1; 574 } 575 576 rc = define_extent(NULL, dedata, trk, totrk, cmd, basedev, blksize); 577 578 /* 579 * For some commands the System Time Stamp is set in the define extent 580 * data when XRC is supported. The validity of the time stamp must be 581 * reflected in the prefix data as well. 582 */ 583 if (dedata->ga_extended & 0x08 && dedata->ga_extended & 0x02) 584 pfxdata->validity.time_stamp = 1; /* 'Time Stamp Valid' */ 585 586 if (format == 1) { 587 locate_record_ext(NULL, lredata, trk, rec_on_trk, count, cmd, 588 basedev, blksize, tlf); 589 } 590 591 return rc; 592 } 593 594 static int prefix(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata, 595 unsigned int trk, unsigned int totrk, int cmd, 596 struct dasd_device *basedev, struct dasd_device *startdev) 597 { 598 return prefix_LRE(ccw, pfxdata, trk, totrk, cmd, basedev, startdev, 599 0, 0, 0, 0, 0); 600 } 601 602 static void 603 locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, unsigned int trk, 604 unsigned int rec_on_trk, int no_rec, int cmd, 605 struct dasd_device * device, int reclen) 606 { 607 struct dasd_eckd_private *private = device->private; 608 int sector; 609 int dn, d; 610 611 DBF_DEV_EVENT(DBF_INFO, device, 612 "Locate: trk %d, rec %d, no_rec %d, cmd %d, reclen %d", 613 trk, rec_on_trk, no_rec, cmd, reclen); 614 615 ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD; 616 ccw->flags = 0; 617 ccw->count = 16; 618 ccw->cda = (__u32) __pa(data); 619 620 memset(data, 0, sizeof(struct LO_eckd_data)); 621 sector = 0; 622 if (rec_on_trk) { 623 switch (private->rdc_data.dev_type) { 624 case 0x3390: 625 dn = ceil_quot(reclen + 6, 232); 626 d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34); 627 sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8; 628 break; 629 case 0x3380: 630 d = 7 + ceil_quot(reclen + 12, 32); 631 sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7; 632 break; 633 } 634 } 635 data->sector = sector; 636 data->count = no_rec; 637 switch (cmd) { 638 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS: 639 data->operation.orientation = 0x3; 640 data->operation.operation = 0x03; 641 break; 642 case DASD_ECKD_CCW_READ_HOME_ADDRESS: 643 data->operation.orientation = 0x3; 644 data->operation.operation = 0x16; 645 break; 646 case DASD_ECKD_CCW_WRITE_RECORD_ZERO: 647 data->operation.orientation = 0x1; 648 data->operation.operation = 0x03; 649 data->count++; 650 break; 651 case DASD_ECKD_CCW_READ_RECORD_ZERO: 652 data->operation.orientation = 0x3; 653 data->operation.operation = 0x16; 654 data->count++; 655 break; 656 case DASD_ECKD_CCW_WRITE: 657 case DASD_ECKD_CCW_WRITE_MT: 658 case DASD_ECKD_CCW_WRITE_KD: 659 case DASD_ECKD_CCW_WRITE_KD_MT: 660 data->auxiliary.last_bytes_used = 0x1; 661 data->length = reclen; 662 data->operation.operation = 0x01; 663 break; 664 case DASD_ECKD_CCW_WRITE_CKD: 665 case DASD_ECKD_CCW_WRITE_CKD_MT: 666 data->auxiliary.last_bytes_used = 0x1; 667 data->length = reclen; 668 data->operation.operation = 0x03; 669 break; 670 case DASD_ECKD_CCW_READ: 671 case DASD_ECKD_CCW_READ_MT: 672 case DASD_ECKD_CCW_READ_KD: 673 case DASD_ECKD_CCW_READ_KD_MT: 674 data->auxiliary.last_bytes_used = 0x1; 675 data->length = reclen; 676 data->operation.operation = 0x06; 677 break; 678 case DASD_ECKD_CCW_READ_CKD: 679 case DASD_ECKD_CCW_READ_CKD_MT: 680 data->auxiliary.last_bytes_used = 0x1; 681 data->length = reclen; 682 data->operation.operation = 0x16; 683 break; 684 case DASD_ECKD_CCW_READ_COUNT: 685 data->operation.operation = 0x06; 686 break; 687 case DASD_ECKD_CCW_ERASE: 688 data->length = reclen; 689 data->auxiliary.last_bytes_used = 0x1; 690 data->operation.operation = 0x0b; 691 break; 692 default: 693 DBF_DEV_EVENT(DBF_ERR, device, "unknown locate record " 694 "opcode 0x%x", cmd); 695 } 696 set_ch_t(&data->seek_addr, 697 trk / private->rdc_data.trk_per_cyl, 698 trk % private->rdc_data.trk_per_cyl); 699 data->search_arg.cyl = data->seek_addr.cyl; 700 data->search_arg.head = data->seek_addr.head; 701 data->search_arg.record = rec_on_trk; 702 } 703 704 /* 705 * Returns 1 if the block is one of the special blocks that needs 706 * to get read/written with the KD variant of the command. 707 * That is DASD_ECKD_READ_KD_MT instead of DASD_ECKD_READ_MT and 708 * DASD_ECKD_WRITE_KD_MT instead of DASD_ECKD_WRITE_MT. 709 * Luckily the KD variants differ only by one bit (0x08) from the 710 * normal variant. So don't wonder about code like: 711 * if (dasd_eckd_cdl_special(blk_per_trk, recid)) 712 * ccw->cmd_code |= 0x8; 713 */ 714 static inline int 715 dasd_eckd_cdl_special(int blk_per_trk, int recid) 716 { 717 if (recid < 3) 718 return 1; 719 if (recid < blk_per_trk) 720 return 0; 721 if (recid < 2 * blk_per_trk) 722 return 1; 723 return 0; 724 } 725 726 /* 727 * Returns the record size for the special blocks of the cdl format. 728 * Only returns something useful if dasd_eckd_cdl_special is true 729 * for the recid. 730 */ 731 static inline int 732 dasd_eckd_cdl_reclen(int recid) 733 { 734 if (recid < 3) 735 return sizes_trk0[recid]; 736 return LABEL_SIZE; 737 } 738 /* create unique id from private structure. */ 739 static void create_uid(struct dasd_eckd_private *private) 740 { 741 int count; 742 struct dasd_uid *uid; 743 744 uid = &private->uid; 745 memset(uid, 0, sizeof(struct dasd_uid)); 746 memcpy(uid->vendor, private->ned->HDA_manufacturer, 747 sizeof(uid->vendor) - 1); 748 EBCASC(uid->vendor, sizeof(uid->vendor) - 1); 749 memcpy(uid->serial, private->ned->HDA_location, 750 sizeof(uid->serial) - 1); 751 EBCASC(uid->serial, sizeof(uid->serial) - 1); 752 uid->ssid = private->gneq->subsystemID; 753 uid->real_unit_addr = private->ned->unit_addr; 754 if (private->sneq) { 755 uid->type = private->sneq->sua_flags; 756 if (uid->type == UA_BASE_PAV_ALIAS) 757 uid->base_unit_addr = private->sneq->base_unit_addr; 758 } else { 759 uid->type = UA_BASE_DEVICE; 760 } 761 if (private->vdsneq) { 762 for (count = 0; count < 16; count++) { 763 sprintf(uid->vduit+2*count, "%02x", 764 private->vdsneq->uit[count]); 765 } 766 } 767 } 768 769 /* 770 * Generate device unique id that specifies the physical device. 771 */ 772 static int dasd_eckd_generate_uid(struct dasd_device *device) 773 { 774 struct dasd_eckd_private *private = device->private; 775 unsigned long flags; 776 777 if (!private) 778 return -ENODEV; 779 if (!private->ned || !private->gneq) 780 return -ENODEV; 781 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 782 create_uid(private); 783 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 784 return 0; 785 } 786 787 static int dasd_eckd_get_uid(struct dasd_device *device, struct dasd_uid *uid) 788 { 789 struct dasd_eckd_private *private = device->private; 790 unsigned long flags; 791 792 if (private) { 793 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 794 *uid = private->uid; 795 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 796 return 0; 797 } 798 return -EINVAL; 799 } 800 801 /* 802 * compare device UID with data of a given dasd_eckd_private structure 803 * return 0 for match 804 */ 805 static int dasd_eckd_compare_path_uid(struct dasd_device *device, 806 struct dasd_eckd_private *private) 807 { 808 struct dasd_uid device_uid; 809 810 create_uid(private); 811 dasd_eckd_get_uid(device, &device_uid); 812 813 return memcmp(&device_uid, &private->uid, sizeof(struct dasd_uid)); 814 } 815 816 static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device, 817 struct dasd_ccw_req *cqr, 818 __u8 *rcd_buffer, 819 __u8 lpm) 820 { 821 struct ccw1 *ccw; 822 /* 823 * buffer has to start with EBCDIC "V1.0" to show 824 * support for virtual device SNEQ 825 */ 826 rcd_buffer[0] = 0xE5; 827 rcd_buffer[1] = 0xF1; 828 rcd_buffer[2] = 0x4B; 829 rcd_buffer[3] = 0xF0; 830 831 ccw = cqr->cpaddr; 832 ccw->cmd_code = DASD_ECKD_CCW_RCD; 833 ccw->flags = 0; 834 ccw->cda = (__u32)(addr_t)rcd_buffer; 835 ccw->count = DASD_ECKD_RCD_DATA_SIZE; 836 cqr->magic = DASD_ECKD_MAGIC; 837 838 cqr->startdev = device; 839 cqr->memdev = device; 840 cqr->block = NULL; 841 cqr->expires = 10*HZ; 842 cqr->lpm = lpm; 843 cqr->retries = 256; 844 cqr->buildclk = get_tod_clock(); 845 cqr->status = DASD_CQR_FILLED; 846 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags); 847 } 848 849 /* 850 * Wakeup helper for read_conf 851 * if the cqr is not done and needs some error recovery 852 * the buffer has to be re-initialized with the EBCDIC "V1.0" 853 * to show support for virtual device SNEQ 854 */ 855 static void read_conf_cb(struct dasd_ccw_req *cqr, void *data) 856 { 857 struct ccw1 *ccw; 858 __u8 *rcd_buffer; 859 860 if (cqr->status != DASD_CQR_DONE) { 861 ccw = cqr->cpaddr; 862 rcd_buffer = (__u8 *)((addr_t) ccw->cda); 863 memset(rcd_buffer, 0, sizeof(*rcd_buffer)); 864 865 rcd_buffer[0] = 0xE5; 866 rcd_buffer[1] = 0xF1; 867 rcd_buffer[2] = 0x4B; 868 rcd_buffer[3] = 0xF0; 869 } 870 dasd_wakeup_cb(cqr, data); 871 } 872 873 static int dasd_eckd_read_conf_immediately(struct dasd_device *device, 874 struct dasd_ccw_req *cqr, 875 __u8 *rcd_buffer, 876 __u8 lpm) 877 { 878 struct ciw *ciw; 879 int rc; 880 /* 881 * sanity check: scan for RCD command in extended SenseID data 882 * some devices do not support RCD 883 */ 884 ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD); 885 if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD) 886 return -EOPNOTSUPP; 887 888 dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buffer, lpm); 889 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 890 set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags); 891 cqr->retries = 5; 892 cqr->callback = read_conf_cb; 893 rc = dasd_sleep_on_immediatly(cqr); 894 return rc; 895 } 896 897 static int dasd_eckd_read_conf_lpm(struct dasd_device *device, 898 void **rcd_buffer, 899 int *rcd_buffer_size, __u8 lpm) 900 { 901 struct ciw *ciw; 902 char *rcd_buf = NULL; 903 int ret; 904 struct dasd_ccw_req *cqr; 905 906 /* 907 * sanity check: scan for RCD command in extended SenseID data 908 * some devices do not support RCD 909 */ 910 ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD); 911 if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD) { 912 ret = -EOPNOTSUPP; 913 goto out_error; 914 } 915 rcd_buf = kzalloc(DASD_ECKD_RCD_DATA_SIZE, GFP_KERNEL | GFP_DMA); 916 if (!rcd_buf) { 917 ret = -ENOMEM; 918 goto out_error; 919 } 920 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */, 921 0, /* use rcd_buf as data ara */ 922 device, NULL); 923 if (IS_ERR(cqr)) { 924 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 925 "Could not allocate RCD request"); 926 ret = -ENOMEM; 927 goto out_error; 928 } 929 dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buf, lpm); 930 cqr->callback = read_conf_cb; 931 ret = dasd_sleep_on(cqr); 932 /* 933 * on success we update the user input parms 934 */ 935 dasd_sfree_request(cqr, cqr->memdev); 936 if (ret) 937 goto out_error; 938 939 *rcd_buffer_size = DASD_ECKD_RCD_DATA_SIZE; 940 *rcd_buffer = rcd_buf; 941 return 0; 942 out_error: 943 kfree(rcd_buf); 944 *rcd_buffer = NULL; 945 *rcd_buffer_size = 0; 946 return ret; 947 } 948 949 static int dasd_eckd_identify_conf_parts(struct dasd_eckd_private *private) 950 { 951 952 struct dasd_sneq *sneq; 953 int i, count; 954 955 private->ned = NULL; 956 private->sneq = NULL; 957 private->vdsneq = NULL; 958 private->gneq = NULL; 959 count = private->conf_len / sizeof(struct dasd_sneq); 960 sneq = (struct dasd_sneq *)private->conf_data; 961 for (i = 0; i < count; ++i) { 962 if (sneq->flags.identifier == 1 && sneq->format == 1) 963 private->sneq = sneq; 964 else if (sneq->flags.identifier == 1 && sneq->format == 4) 965 private->vdsneq = (struct vd_sneq *)sneq; 966 else if (sneq->flags.identifier == 2) 967 private->gneq = (struct dasd_gneq *)sneq; 968 else if (sneq->flags.identifier == 3 && sneq->res1 == 1) 969 private->ned = (struct dasd_ned *)sneq; 970 sneq++; 971 } 972 if (!private->ned || !private->gneq) { 973 private->ned = NULL; 974 private->sneq = NULL; 975 private->vdsneq = NULL; 976 private->gneq = NULL; 977 return -EINVAL; 978 } 979 return 0; 980 981 }; 982 983 static unsigned char dasd_eckd_path_access(void *conf_data, int conf_len) 984 { 985 struct dasd_gneq *gneq; 986 int i, count, found; 987 988 count = conf_len / sizeof(*gneq); 989 gneq = (struct dasd_gneq *)conf_data; 990 found = 0; 991 for (i = 0; i < count; ++i) { 992 if (gneq->flags.identifier == 2) { 993 found = 1; 994 break; 995 } 996 gneq++; 997 } 998 if (found) 999 return ((char *)gneq)[18] & 0x07; 1000 else 1001 return 0; 1002 } 1003 1004 static void dasd_eckd_store_conf_data(struct dasd_device *device, 1005 struct dasd_conf_data *conf_data, int chp) 1006 { 1007 struct channel_path_desc_fmt0 *chp_desc; 1008 struct subchannel_id sch_id; 1009 1010 ccw_device_get_schid(device->cdev, &sch_id); 1011 /* 1012 * path handling and read_conf allocate data 1013 * free it before replacing the pointer 1014 */ 1015 kfree(device->path[chp].conf_data); 1016 device->path[chp].conf_data = conf_data; 1017 device->path[chp].cssid = sch_id.cssid; 1018 device->path[chp].ssid = sch_id.ssid; 1019 chp_desc = ccw_device_get_chp_desc(device->cdev, chp); 1020 if (chp_desc) 1021 device->path[chp].chpid = chp_desc->chpid; 1022 kfree(chp_desc); 1023 } 1024 1025 static void dasd_eckd_clear_conf_data(struct dasd_device *device) 1026 { 1027 struct dasd_eckd_private *private = device->private; 1028 int i; 1029 1030 private->conf_data = NULL; 1031 private->conf_len = 0; 1032 for (i = 0; i < 8; i++) { 1033 kfree(device->path[i].conf_data); 1034 device->path[i].conf_data = NULL; 1035 device->path[i].cssid = 0; 1036 device->path[i].ssid = 0; 1037 device->path[i].chpid = 0; 1038 dasd_path_notoper(device, i); 1039 } 1040 } 1041 1042 static void dasd_eckd_read_fc_security(struct dasd_device *device) 1043 { 1044 struct dasd_eckd_private *private = device->private; 1045 u8 esm_valid; 1046 u8 esm[8]; 1047 int chp; 1048 int rc; 1049 1050 rc = chsc_scud(private->uid.ssid, (u64 *)esm, &esm_valid); 1051 if (rc) { 1052 for (chp = 0; chp < 8; chp++) 1053 device->path[chp].fc_security = 0; 1054 return; 1055 } 1056 1057 for (chp = 0; chp < 8; chp++) { 1058 if (esm_valid & (0x80 >> chp)) 1059 device->path[chp].fc_security = esm[chp]; 1060 else 1061 device->path[chp].fc_security = 0; 1062 } 1063 } 1064 1065 static int dasd_eckd_read_conf(struct dasd_device *device) 1066 { 1067 void *conf_data; 1068 int conf_len, conf_data_saved; 1069 int rc, path_err, pos; 1070 __u8 lpm, opm; 1071 struct dasd_eckd_private *private, path_private; 1072 struct dasd_uid *uid; 1073 char print_path_uid[60], print_device_uid[60]; 1074 1075 private = device->private; 1076 opm = ccw_device_get_path_mask(device->cdev); 1077 conf_data_saved = 0; 1078 path_err = 0; 1079 /* get configuration data per operational path */ 1080 for (lpm = 0x80; lpm; lpm>>= 1) { 1081 if (!(lpm & opm)) 1082 continue; 1083 rc = dasd_eckd_read_conf_lpm(device, &conf_data, 1084 &conf_len, lpm); 1085 if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */ 1086 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, 1087 "Read configuration data returned " 1088 "error %d", rc); 1089 return rc; 1090 } 1091 if (conf_data == NULL) { 1092 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 1093 "No configuration data " 1094 "retrieved"); 1095 /* no further analysis possible */ 1096 dasd_path_add_opm(device, opm); 1097 continue; /* no error */ 1098 } 1099 /* save first valid configuration data */ 1100 if (!conf_data_saved) { 1101 /* initially clear previously stored conf_data */ 1102 dasd_eckd_clear_conf_data(device); 1103 private->conf_data = conf_data; 1104 private->conf_len = conf_len; 1105 if (dasd_eckd_identify_conf_parts(private)) { 1106 private->conf_data = NULL; 1107 private->conf_len = 0; 1108 kfree(conf_data); 1109 continue; 1110 } 1111 /* 1112 * build device UID that other path data 1113 * can be compared to it 1114 */ 1115 dasd_eckd_generate_uid(device); 1116 conf_data_saved++; 1117 } else { 1118 path_private.conf_data = conf_data; 1119 path_private.conf_len = DASD_ECKD_RCD_DATA_SIZE; 1120 if (dasd_eckd_identify_conf_parts( 1121 &path_private)) { 1122 path_private.conf_data = NULL; 1123 path_private.conf_len = 0; 1124 kfree(conf_data); 1125 continue; 1126 } 1127 if (dasd_eckd_compare_path_uid( 1128 device, &path_private)) { 1129 uid = &path_private.uid; 1130 if (strlen(uid->vduit) > 0) 1131 snprintf(print_path_uid, 1132 sizeof(print_path_uid), 1133 "%s.%s.%04x.%02x.%s", 1134 uid->vendor, uid->serial, 1135 uid->ssid, uid->real_unit_addr, 1136 uid->vduit); 1137 else 1138 snprintf(print_path_uid, 1139 sizeof(print_path_uid), 1140 "%s.%s.%04x.%02x", 1141 uid->vendor, uid->serial, 1142 uid->ssid, 1143 uid->real_unit_addr); 1144 uid = &private->uid; 1145 if (strlen(uid->vduit) > 0) 1146 snprintf(print_device_uid, 1147 sizeof(print_device_uid), 1148 "%s.%s.%04x.%02x.%s", 1149 uid->vendor, uid->serial, 1150 uid->ssid, uid->real_unit_addr, 1151 uid->vduit); 1152 else 1153 snprintf(print_device_uid, 1154 sizeof(print_device_uid), 1155 "%s.%s.%04x.%02x", 1156 uid->vendor, uid->serial, 1157 uid->ssid, 1158 uid->real_unit_addr); 1159 dev_err(&device->cdev->dev, 1160 "Not all channel paths lead to " 1161 "the same device, path %02X leads to " 1162 "device %s instead of %s\n", lpm, 1163 print_path_uid, print_device_uid); 1164 path_err = -EINVAL; 1165 dasd_path_add_cablepm(device, lpm); 1166 continue; 1167 } 1168 path_private.conf_data = NULL; 1169 path_private.conf_len = 0; 1170 } 1171 1172 pos = pathmask_to_pos(lpm); 1173 dasd_eckd_store_conf_data(device, conf_data, pos); 1174 1175 switch (dasd_eckd_path_access(conf_data, conf_len)) { 1176 case 0x02: 1177 dasd_path_add_nppm(device, lpm); 1178 break; 1179 case 0x03: 1180 dasd_path_add_ppm(device, lpm); 1181 break; 1182 } 1183 if (!dasd_path_get_opm(device)) { 1184 dasd_path_set_opm(device, lpm); 1185 dasd_generic_path_operational(device); 1186 } else { 1187 dasd_path_add_opm(device, lpm); 1188 } 1189 } 1190 1191 dasd_eckd_read_fc_security(device); 1192 1193 return path_err; 1194 } 1195 1196 static u32 get_fcx_max_data(struct dasd_device *device) 1197 { 1198 struct dasd_eckd_private *private = device->private; 1199 int fcx_in_css, fcx_in_gneq, fcx_in_features; 1200 unsigned int mdc; 1201 int tpm; 1202 1203 if (dasd_nofcx) 1204 return 0; 1205 /* is transport mode supported? */ 1206 fcx_in_css = css_general_characteristics.fcx; 1207 fcx_in_gneq = private->gneq->reserved2[7] & 0x04; 1208 fcx_in_features = private->features.feature[40] & 0x80; 1209 tpm = fcx_in_css && fcx_in_gneq && fcx_in_features; 1210 1211 if (!tpm) 1212 return 0; 1213 1214 mdc = ccw_device_get_mdc(device->cdev, 0); 1215 if (mdc == 0) { 1216 dev_warn(&device->cdev->dev, "Detecting the maximum supported data size for zHPF requests failed\n"); 1217 return 0; 1218 } else { 1219 return (u32)mdc * FCX_MAX_DATA_FACTOR; 1220 } 1221 } 1222 1223 static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm) 1224 { 1225 struct dasd_eckd_private *private = device->private; 1226 unsigned int mdc; 1227 u32 fcx_max_data; 1228 1229 if (private->fcx_max_data) { 1230 mdc = ccw_device_get_mdc(device->cdev, lpm); 1231 if (mdc == 0) { 1232 dev_warn(&device->cdev->dev, 1233 "Detecting the maximum data size for zHPF " 1234 "requests failed (rc=%d) for a new path %x\n", 1235 mdc, lpm); 1236 return mdc; 1237 } 1238 fcx_max_data = (u32)mdc * FCX_MAX_DATA_FACTOR; 1239 if (fcx_max_data < private->fcx_max_data) { 1240 dev_warn(&device->cdev->dev, 1241 "The maximum data size for zHPF requests %u " 1242 "on a new path %x is below the active maximum " 1243 "%u\n", fcx_max_data, lpm, 1244 private->fcx_max_data); 1245 return -EACCES; 1246 } 1247 } 1248 return 0; 1249 } 1250 1251 static int rebuild_device_uid(struct dasd_device *device, 1252 struct pe_handler_work_data *data) 1253 { 1254 struct dasd_eckd_private *private = device->private; 1255 __u8 lpm, opm = dasd_path_get_opm(device); 1256 int rc = -ENODEV; 1257 1258 for (lpm = 0x80; lpm; lpm >>= 1) { 1259 if (!(lpm & opm)) 1260 continue; 1261 memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer)); 1262 memset(&data->cqr, 0, sizeof(data->cqr)); 1263 data->cqr.cpaddr = &data->ccw; 1264 rc = dasd_eckd_read_conf_immediately(device, &data->cqr, 1265 data->rcd_buffer, 1266 lpm); 1267 1268 if (rc) { 1269 if (rc == -EOPNOTSUPP) /* -EOPNOTSUPP is ok */ 1270 continue; 1271 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, 1272 "Read configuration data " 1273 "returned error %d", rc); 1274 break; 1275 } 1276 memcpy(private->conf_data, data->rcd_buffer, 1277 DASD_ECKD_RCD_DATA_SIZE); 1278 if (dasd_eckd_identify_conf_parts(private)) { 1279 rc = -ENODEV; 1280 } else /* first valid path is enough */ 1281 break; 1282 } 1283 1284 if (!rc) 1285 rc = dasd_eckd_generate_uid(device); 1286 1287 return rc; 1288 } 1289 1290 static void dasd_eckd_path_available_action(struct dasd_device *device, 1291 struct pe_handler_work_data *data) 1292 { 1293 struct dasd_eckd_private path_private; 1294 struct dasd_uid *uid; 1295 __u8 path_rcd_buf[DASD_ECKD_RCD_DATA_SIZE]; 1296 __u8 lpm, opm, npm, ppm, epm, hpfpm, cablepm; 1297 struct dasd_conf_data *conf_data; 1298 unsigned long flags; 1299 char print_uid[60]; 1300 int rc, pos; 1301 1302 opm = 0; 1303 npm = 0; 1304 ppm = 0; 1305 epm = 0; 1306 hpfpm = 0; 1307 cablepm = 0; 1308 1309 for (lpm = 0x80; lpm; lpm >>= 1) { 1310 if (!(lpm & data->tbvpm)) 1311 continue; 1312 memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer)); 1313 memset(&data->cqr, 0, sizeof(data->cqr)); 1314 data->cqr.cpaddr = &data->ccw; 1315 rc = dasd_eckd_read_conf_immediately(device, &data->cqr, 1316 data->rcd_buffer, 1317 lpm); 1318 if (!rc) { 1319 switch (dasd_eckd_path_access(data->rcd_buffer, 1320 DASD_ECKD_RCD_DATA_SIZE) 1321 ) { 1322 case 0x02: 1323 npm |= lpm; 1324 break; 1325 case 0x03: 1326 ppm |= lpm; 1327 break; 1328 } 1329 opm |= lpm; 1330 } else if (rc == -EOPNOTSUPP) { 1331 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 1332 "path verification: No configuration " 1333 "data retrieved"); 1334 opm |= lpm; 1335 } else if (rc == -EAGAIN) { 1336 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 1337 "path verification: device is stopped," 1338 " try again later"); 1339 epm |= lpm; 1340 } else { 1341 dev_warn(&device->cdev->dev, 1342 "Reading device feature codes failed " 1343 "(rc=%d) for new path %x\n", rc, lpm); 1344 continue; 1345 } 1346 if (verify_fcx_max_data(device, lpm)) { 1347 opm &= ~lpm; 1348 npm &= ~lpm; 1349 ppm &= ~lpm; 1350 hpfpm |= lpm; 1351 continue; 1352 } 1353 1354 /* 1355 * save conf_data for comparison after 1356 * rebuild_device_uid may have changed 1357 * the original data 1358 */ 1359 memcpy(&path_rcd_buf, data->rcd_buffer, 1360 DASD_ECKD_RCD_DATA_SIZE); 1361 path_private.conf_data = (void *) &path_rcd_buf; 1362 path_private.conf_len = DASD_ECKD_RCD_DATA_SIZE; 1363 if (dasd_eckd_identify_conf_parts(&path_private)) { 1364 path_private.conf_data = NULL; 1365 path_private.conf_len = 0; 1366 continue; 1367 } 1368 1369 /* 1370 * compare path UID with device UID only if at least 1371 * one valid path is left 1372 * in other case the device UID may have changed and 1373 * the first working path UID will be used as device UID 1374 */ 1375 if (dasd_path_get_opm(device) && 1376 dasd_eckd_compare_path_uid(device, &path_private)) { 1377 /* 1378 * the comparison was not successful 1379 * rebuild the device UID with at least one 1380 * known path in case a z/VM hyperswap command 1381 * has changed the device 1382 * 1383 * after this compare again 1384 * 1385 * if either the rebuild or the recompare fails 1386 * the path can not be used 1387 */ 1388 if (rebuild_device_uid(device, data) || 1389 dasd_eckd_compare_path_uid( 1390 device, &path_private)) { 1391 uid = &path_private.uid; 1392 if (strlen(uid->vduit) > 0) 1393 snprintf(print_uid, sizeof(print_uid), 1394 "%s.%s.%04x.%02x.%s", 1395 uid->vendor, uid->serial, 1396 uid->ssid, uid->real_unit_addr, 1397 uid->vduit); 1398 else 1399 snprintf(print_uid, sizeof(print_uid), 1400 "%s.%s.%04x.%02x", 1401 uid->vendor, uid->serial, 1402 uid->ssid, 1403 uid->real_unit_addr); 1404 dev_err(&device->cdev->dev, 1405 "The newly added channel path %02X " 1406 "will not be used because it leads " 1407 "to a different device %s\n", 1408 lpm, print_uid); 1409 opm &= ~lpm; 1410 npm &= ~lpm; 1411 ppm &= ~lpm; 1412 cablepm |= lpm; 1413 continue; 1414 } 1415 } 1416 1417 conf_data = kzalloc(DASD_ECKD_RCD_DATA_SIZE, GFP_KERNEL); 1418 if (conf_data) { 1419 memcpy(conf_data, data->rcd_buffer, 1420 DASD_ECKD_RCD_DATA_SIZE); 1421 } 1422 pos = pathmask_to_pos(lpm); 1423 dasd_eckd_store_conf_data(device, conf_data, pos); 1424 1425 /* 1426 * There is a small chance that a path is lost again between 1427 * above path verification and the following modification of 1428 * the device opm mask. We could avoid that race here by using 1429 * yet another path mask, but we rather deal with this unlikely 1430 * situation in dasd_start_IO. 1431 */ 1432 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1433 if (!dasd_path_get_opm(device) && opm) { 1434 dasd_path_set_opm(device, opm); 1435 dasd_generic_path_operational(device); 1436 } else { 1437 dasd_path_add_opm(device, opm); 1438 } 1439 dasd_path_add_nppm(device, npm); 1440 dasd_path_add_ppm(device, ppm); 1441 dasd_path_add_tbvpm(device, epm); 1442 dasd_path_add_cablepm(device, cablepm); 1443 dasd_path_add_nohpfpm(device, hpfpm); 1444 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1445 1446 dasd_path_create_kobj(device, pos); 1447 } 1448 } 1449 1450 static void do_pe_handler_work(struct work_struct *work) 1451 { 1452 struct pe_handler_work_data *data; 1453 struct dasd_device *device; 1454 1455 data = container_of(work, struct pe_handler_work_data, worker); 1456 device = data->device; 1457 1458 /* delay path verification until device was resumed */ 1459 if (test_bit(DASD_FLAG_SUSPENDED, &device->flags)) { 1460 schedule_work(work); 1461 return; 1462 } 1463 /* check if path verification already running and delay if so */ 1464 if (test_and_set_bit(DASD_FLAG_PATH_VERIFY, &device->flags)) { 1465 schedule_work(work); 1466 return; 1467 } 1468 1469 if (data->tbvpm) 1470 dasd_eckd_path_available_action(device, data); 1471 if (data->fcsecpm) 1472 dasd_eckd_read_fc_security(device); 1473 1474 clear_bit(DASD_FLAG_PATH_VERIFY, &device->flags); 1475 dasd_put_device(device); 1476 if (data->isglobal) 1477 mutex_unlock(&dasd_pe_handler_mutex); 1478 else 1479 kfree(data); 1480 } 1481 1482 static int dasd_eckd_pe_handler(struct dasd_device *device, 1483 __u8 tbvpm, __u8 fcsecpm) 1484 { 1485 struct pe_handler_work_data *data; 1486 1487 data = kmalloc(sizeof(*data), GFP_ATOMIC | GFP_DMA); 1488 if (!data) { 1489 if (mutex_trylock(&dasd_pe_handler_mutex)) { 1490 data = pe_handler_worker; 1491 data->isglobal = 1; 1492 } else { 1493 return -ENOMEM; 1494 } 1495 } else { 1496 memset(data, 0, sizeof(*data)); 1497 data->isglobal = 0; 1498 } 1499 INIT_WORK(&data->worker, do_pe_handler_work); 1500 dasd_get_device(device); 1501 data->device = device; 1502 data->tbvpm = tbvpm; 1503 data->fcsecpm = fcsecpm; 1504 schedule_work(&data->worker); 1505 return 0; 1506 } 1507 1508 static void dasd_eckd_reset_path(struct dasd_device *device, __u8 pm) 1509 { 1510 struct dasd_eckd_private *private = device->private; 1511 unsigned long flags; 1512 1513 if (!private->fcx_max_data) 1514 private->fcx_max_data = get_fcx_max_data(device); 1515 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1516 dasd_path_set_tbvpm(device, pm ? : dasd_path_get_notoperpm(device)); 1517 dasd_schedule_device_bh(device); 1518 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1519 } 1520 1521 static int dasd_eckd_read_features(struct dasd_device *device) 1522 { 1523 struct dasd_eckd_private *private = device->private; 1524 struct dasd_psf_prssd_data *prssdp; 1525 struct dasd_rssd_features *features; 1526 struct dasd_ccw_req *cqr; 1527 struct ccw1 *ccw; 1528 int rc; 1529 1530 memset(&private->features, 0, sizeof(struct dasd_rssd_features)); 1531 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, 1532 (sizeof(struct dasd_psf_prssd_data) + 1533 sizeof(struct dasd_rssd_features)), 1534 device, NULL); 1535 if (IS_ERR(cqr)) { 1536 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not " 1537 "allocate initialization request"); 1538 return PTR_ERR(cqr); 1539 } 1540 cqr->startdev = device; 1541 cqr->memdev = device; 1542 cqr->block = NULL; 1543 cqr->retries = 256; 1544 cqr->expires = 10 * HZ; 1545 1546 /* Prepare for Read Subsystem Data */ 1547 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 1548 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data)); 1549 prssdp->order = PSF_ORDER_PRSSD; 1550 prssdp->suborder = 0x41; /* Read Feature Codes */ 1551 /* all other bytes of prssdp must be zero */ 1552 1553 ccw = cqr->cpaddr; 1554 ccw->cmd_code = DASD_ECKD_CCW_PSF; 1555 ccw->count = sizeof(struct dasd_psf_prssd_data); 1556 ccw->flags |= CCW_FLAG_CC; 1557 ccw->cda = (__u32)(addr_t) prssdp; 1558 1559 /* Read Subsystem Data - feature codes */ 1560 features = (struct dasd_rssd_features *) (prssdp + 1); 1561 memset(features, 0, sizeof(struct dasd_rssd_features)); 1562 1563 ccw++; 1564 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 1565 ccw->count = sizeof(struct dasd_rssd_features); 1566 ccw->cda = (__u32)(addr_t) features; 1567 1568 cqr->buildclk = get_tod_clock(); 1569 cqr->status = DASD_CQR_FILLED; 1570 rc = dasd_sleep_on(cqr); 1571 if (rc == 0) { 1572 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 1573 features = (struct dasd_rssd_features *) (prssdp + 1); 1574 memcpy(&private->features, features, 1575 sizeof(struct dasd_rssd_features)); 1576 } else 1577 dev_warn(&device->cdev->dev, "Reading device feature codes" 1578 " failed with rc=%d\n", rc); 1579 dasd_sfree_request(cqr, cqr->memdev); 1580 return rc; 1581 } 1582 1583 /* Read Volume Information - Volume Storage Query */ 1584 static int dasd_eckd_read_vol_info(struct dasd_device *device) 1585 { 1586 struct dasd_eckd_private *private = device->private; 1587 struct dasd_psf_prssd_data *prssdp; 1588 struct dasd_rssd_vsq *vsq; 1589 struct dasd_ccw_req *cqr; 1590 struct ccw1 *ccw; 1591 int useglobal; 1592 int rc; 1593 1594 /* This command cannot be executed on an alias device */ 1595 if (private->uid.type == UA_BASE_PAV_ALIAS || 1596 private->uid.type == UA_HYPER_PAV_ALIAS) 1597 return 0; 1598 1599 useglobal = 0; 1600 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 /* PSF + RSSD */, 1601 sizeof(*prssdp) + sizeof(*vsq), device, NULL); 1602 if (IS_ERR(cqr)) { 1603 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 1604 "Could not allocate initialization request"); 1605 mutex_lock(&dasd_vol_info_mutex); 1606 useglobal = 1; 1607 cqr = &dasd_vol_info_req->cqr; 1608 memset(cqr, 0, sizeof(*cqr)); 1609 memset(dasd_vol_info_req, 0, sizeof(*dasd_vol_info_req)); 1610 cqr->cpaddr = &dasd_vol_info_req->ccw; 1611 cqr->data = &dasd_vol_info_req->data; 1612 cqr->magic = DASD_ECKD_MAGIC; 1613 } 1614 1615 /* Prepare for Read Subsystem Data */ 1616 prssdp = cqr->data; 1617 prssdp->order = PSF_ORDER_PRSSD; 1618 prssdp->suborder = PSF_SUBORDER_VSQ; /* Volume Storage Query */ 1619 prssdp->lss = private->ned->ID; 1620 prssdp->volume = private->ned->unit_addr; 1621 1622 ccw = cqr->cpaddr; 1623 ccw->cmd_code = DASD_ECKD_CCW_PSF; 1624 ccw->count = sizeof(*prssdp); 1625 ccw->flags |= CCW_FLAG_CC; 1626 ccw->cda = (__u32)(addr_t)prssdp; 1627 1628 /* Read Subsystem Data - Volume Storage Query */ 1629 vsq = (struct dasd_rssd_vsq *)(prssdp + 1); 1630 memset(vsq, 0, sizeof(*vsq)); 1631 1632 ccw++; 1633 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 1634 ccw->count = sizeof(*vsq); 1635 ccw->flags |= CCW_FLAG_SLI; 1636 ccw->cda = (__u32)(addr_t)vsq; 1637 1638 cqr->buildclk = get_tod_clock(); 1639 cqr->status = DASD_CQR_FILLED; 1640 cqr->startdev = device; 1641 cqr->memdev = device; 1642 cqr->block = NULL; 1643 cqr->retries = 256; 1644 cqr->expires = device->default_expires * HZ; 1645 /* The command might not be supported. Suppress the error output */ 1646 __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags); 1647 1648 rc = dasd_sleep_on_interruptible(cqr); 1649 if (rc == 0) { 1650 memcpy(&private->vsq, vsq, sizeof(*vsq)); 1651 } else { 1652 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, 1653 "Reading the volume storage information failed with rc=%d", rc); 1654 } 1655 1656 if (useglobal) 1657 mutex_unlock(&dasd_vol_info_mutex); 1658 else 1659 dasd_sfree_request(cqr, cqr->memdev); 1660 1661 return rc; 1662 } 1663 1664 static int dasd_eckd_is_ese(struct dasd_device *device) 1665 { 1666 struct dasd_eckd_private *private = device->private; 1667 1668 return private->vsq.vol_info.ese; 1669 } 1670 1671 static int dasd_eckd_ext_pool_id(struct dasd_device *device) 1672 { 1673 struct dasd_eckd_private *private = device->private; 1674 1675 return private->vsq.extent_pool_id; 1676 } 1677 1678 /* 1679 * This value represents the total amount of available space. As more space is 1680 * allocated by ESE volumes, this value will decrease. 1681 * The data for this value is therefore updated on any call. 1682 */ 1683 static int dasd_eckd_space_configured(struct dasd_device *device) 1684 { 1685 struct dasd_eckd_private *private = device->private; 1686 int rc; 1687 1688 rc = dasd_eckd_read_vol_info(device); 1689 1690 return rc ? : private->vsq.space_configured; 1691 } 1692 1693 /* 1694 * The value of space allocated by an ESE volume may have changed and is 1695 * therefore updated on any call. 1696 */ 1697 static int dasd_eckd_space_allocated(struct dasd_device *device) 1698 { 1699 struct dasd_eckd_private *private = device->private; 1700 int rc; 1701 1702 rc = dasd_eckd_read_vol_info(device); 1703 1704 return rc ? : private->vsq.space_allocated; 1705 } 1706 1707 static int dasd_eckd_logical_capacity(struct dasd_device *device) 1708 { 1709 struct dasd_eckd_private *private = device->private; 1710 1711 return private->vsq.logical_capacity; 1712 } 1713 1714 static void dasd_eckd_ext_pool_exhaust_work(struct work_struct *work) 1715 { 1716 struct ext_pool_exhaust_work_data *data; 1717 struct dasd_device *device; 1718 struct dasd_device *base; 1719 1720 data = container_of(work, struct ext_pool_exhaust_work_data, worker); 1721 device = data->device; 1722 base = data->base; 1723 1724 if (!base) 1725 base = device; 1726 if (dasd_eckd_space_configured(base) != 0) { 1727 dasd_generic_space_avail(device); 1728 } else { 1729 dev_warn(&device->cdev->dev, "No space left in the extent pool\n"); 1730 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "out of space"); 1731 } 1732 1733 dasd_put_device(device); 1734 kfree(data); 1735 } 1736 1737 static int dasd_eckd_ext_pool_exhaust(struct dasd_device *device, 1738 struct dasd_ccw_req *cqr) 1739 { 1740 struct ext_pool_exhaust_work_data *data; 1741 1742 data = kzalloc(sizeof(*data), GFP_ATOMIC); 1743 if (!data) 1744 return -ENOMEM; 1745 INIT_WORK(&data->worker, dasd_eckd_ext_pool_exhaust_work); 1746 dasd_get_device(device); 1747 data->device = device; 1748 1749 if (cqr->block) 1750 data->base = cqr->block->base; 1751 else if (cqr->basedev) 1752 data->base = cqr->basedev; 1753 else 1754 data->base = NULL; 1755 1756 schedule_work(&data->worker); 1757 1758 return 0; 1759 } 1760 1761 static void dasd_eckd_cpy_ext_pool_data(struct dasd_device *device, 1762 struct dasd_rssd_lcq *lcq) 1763 { 1764 struct dasd_eckd_private *private = device->private; 1765 int pool_id = dasd_eckd_ext_pool_id(device); 1766 struct dasd_ext_pool_sum eps; 1767 int i; 1768 1769 for (i = 0; i < lcq->pool_count; i++) { 1770 eps = lcq->ext_pool_sum[i]; 1771 if (eps.pool_id == pool_id) { 1772 memcpy(&private->eps, &eps, 1773 sizeof(struct dasd_ext_pool_sum)); 1774 } 1775 } 1776 } 1777 1778 /* Read Extent Pool Information - Logical Configuration Query */ 1779 static int dasd_eckd_read_ext_pool_info(struct dasd_device *device) 1780 { 1781 struct dasd_eckd_private *private = device->private; 1782 struct dasd_psf_prssd_data *prssdp; 1783 struct dasd_rssd_lcq *lcq; 1784 struct dasd_ccw_req *cqr; 1785 struct ccw1 *ccw; 1786 int rc; 1787 1788 /* This command cannot be executed on an alias device */ 1789 if (private->uid.type == UA_BASE_PAV_ALIAS || 1790 private->uid.type == UA_HYPER_PAV_ALIAS) 1791 return 0; 1792 1793 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 /* PSF + RSSD */, 1794 sizeof(*prssdp) + sizeof(*lcq), device, NULL); 1795 if (IS_ERR(cqr)) { 1796 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 1797 "Could not allocate initialization request"); 1798 return PTR_ERR(cqr); 1799 } 1800 1801 /* Prepare for Read Subsystem Data */ 1802 prssdp = cqr->data; 1803 memset(prssdp, 0, sizeof(*prssdp)); 1804 prssdp->order = PSF_ORDER_PRSSD; 1805 prssdp->suborder = PSF_SUBORDER_LCQ; /* Logical Configuration Query */ 1806 1807 ccw = cqr->cpaddr; 1808 ccw->cmd_code = DASD_ECKD_CCW_PSF; 1809 ccw->count = sizeof(*prssdp); 1810 ccw->flags |= CCW_FLAG_CC; 1811 ccw->cda = (__u32)(addr_t)prssdp; 1812 1813 lcq = (struct dasd_rssd_lcq *)(prssdp + 1); 1814 memset(lcq, 0, sizeof(*lcq)); 1815 1816 ccw++; 1817 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 1818 ccw->count = sizeof(*lcq); 1819 ccw->flags |= CCW_FLAG_SLI; 1820 ccw->cda = (__u32)(addr_t)lcq; 1821 1822 cqr->buildclk = get_tod_clock(); 1823 cqr->status = DASD_CQR_FILLED; 1824 cqr->startdev = device; 1825 cqr->memdev = device; 1826 cqr->block = NULL; 1827 cqr->retries = 256; 1828 cqr->expires = device->default_expires * HZ; 1829 /* The command might not be supported. Suppress the error output */ 1830 __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags); 1831 1832 rc = dasd_sleep_on_interruptible(cqr); 1833 if (rc == 0) { 1834 dasd_eckd_cpy_ext_pool_data(device, lcq); 1835 } else { 1836 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, 1837 "Reading the logical configuration failed with rc=%d", rc); 1838 } 1839 1840 dasd_sfree_request(cqr, cqr->memdev); 1841 1842 return rc; 1843 } 1844 1845 /* 1846 * Depending on the device type, the extent size is specified either as 1847 * cylinders per extent (CKD) or size per extent (FBA) 1848 * A 1GB size corresponds to 1113cyl, and 16MB to 21cyl. 1849 */ 1850 static int dasd_eckd_ext_size(struct dasd_device *device) 1851 { 1852 struct dasd_eckd_private *private = device->private; 1853 struct dasd_ext_pool_sum eps = private->eps; 1854 1855 if (!eps.flags.extent_size_valid) 1856 return 0; 1857 if (eps.extent_size.size_1G) 1858 return 1113; 1859 if (eps.extent_size.size_16M) 1860 return 21; 1861 1862 return 0; 1863 } 1864 1865 static int dasd_eckd_ext_pool_warn_thrshld(struct dasd_device *device) 1866 { 1867 struct dasd_eckd_private *private = device->private; 1868 1869 return private->eps.warn_thrshld; 1870 } 1871 1872 static int dasd_eckd_ext_pool_cap_at_warnlevel(struct dasd_device *device) 1873 { 1874 struct dasd_eckd_private *private = device->private; 1875 1876 return private->eps.flags.capacity_at_warnlevel; 1877 } 1878 1879 /* 1880 * Extent Pool out of space 1881 */ 1882 static int dasd_eckd_ext_pool_oos(struct dasd_device *device) 1883 { 1884 struct dasd_eckd_private *private = device->private; 1885 1886 return private->eps.flags.pool_oos; 1887 } 1888 1889 /* 1890 * Build CP for Perform Subsystem Function - SSC. 1891 */ 1892 static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device, 1893 int enable_pav) 1894 { 1895 struct dasd_ccw_req *cqr; 1896 struct dasd_psf_ssc_data *psf_ssc_data; 1897 struct ccw1 *ccw; 1898 1899 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ , 1900 sizeof(struct dasd_psf_ssc_data), 1901 device, NULL); 1902 1903 if (IS_ERR(cqr)) { 1904 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1905 "Could not allocate PSF-SSC request"); 1906 return cqr; 1907 } 1908 psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data; 1909 psf_ssc_data->order = PSF_ORDER_SSC; 1910 psf_ssc_data->suborder = 0xc0; 1911 if (enable_pav) { 1912 psf_ssc_data->suborder |= 0x08; 1913 psf_ssc_data->reserved[0] = 0x88; 1914 } 1915 ccw = cqr->cpaddr; 1916 ccw->cmd_code = DASD_ECKD_CCW_PSF; 1917 ccw->cda = (__u32)(addr_t)psf_ssc_data; 1918 ccw->count = 66; 1919 1920 cqr->startdev = device; 1921 cqr->memdev = device; 1922 cqr->block = NULL; 1923 cqr->retries = 256; 1924 cqr->expires = 10*HZ; 1925 cqr->buildclk = get_tod_clock(); 1926 cqr->status = DASD_CQR_FILLED; 1927 return cqr; 1928 } 1929 1930 /* 1931 * Perform Subsystem Function. 1932 * It is necessary to trigger CIO for channel revalidation since this 1933 * call might change behaviour of DASD devices. 1934 */ 1935 static int 1936 dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav, 1937 unsigned long flags) 1938 { 1939 struct dasd_ccw_req *cqr; 1940 int rc; 1941 1942 cqr = dasd_eckd_build_psf_ssc(device, enable_pav); 1943 if (IS_ERR(cqr)) 1944 return PTR_ERR(cqr); 1945 1946 /* 1947 * set flags e.g. turn on failfast, to prevent blocking 1948 * the calling function should handle failed requests 1949 */ 1950 cqr->flags |= flags; 1951 1952 rc = dasd_sleep_on(cqr); 1953 if (!rc) 1954 /* trigger CIO to reprobe devices */ 1955 css_schedule_reprobe(); 1956 else if (cqr->intrc == -EAGAIN) 1957 rc = -EAGAIN; 1958 1959 dasd_sfree_request(cqr, cqr->memdev); 1960 return rc; 1961 } 1962 1963 /* 1964 * Valide storage server of current device. 1965 */ 1966 static int dasd_eckd_validate_server(struct dasd_device *device, 1967 unsigned long flags) 1968 { 1969 struct dasd_eckd_private *private = device->private; 1970 int enable_pav, rc; 1971 1972 if (private->uid.type == UA_BASE_PAV_ALIAS || 1973 private->uid.type == UA_HYPER_PAV_ALIAS) 1974 return 0; 1975 if (dasd_nopav || MACHINE_IS_VM) 1976 enable_pav = 0; 1977 else 1978 enable_pav = 1; 1979 rc = dasd_eckd_psf_ssc(device, enable_pav, flags); 1980 1981 /* may be requested feature is not available on server, 1982 * therefore just report error and go ahead */ 1983 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "PSF-SSC for SSID %04x " 1984 "returned rc=%d", private->uid.ssid, rc); 1985 return rc; 1986 } 1987 1988 /* 1989 * worker to do a validate server in case of a lost pathgroup 1990 */ 1991 static void dasd_eckd_do_validate_server(struct work_struct *work) 1992 { 1993 struct dasd_device *device = container_of(work, struct dasd_device, 1994 kick_validate); 1995 unsigned long flags = 0; 1996 1997 set_bit(DASD_CQR_FLAGS_FAILFAST, &flags); 1998 if (dasd_eckd_validate_server(device, flags) 1999 == -EAGAIN) { 2000 /* schedule worker again if failed */ 2001 schedule_work(&device->kick_validate); 2002 return; 2003 } 2004 2005 dasd_put_device(device); 2006 } 2007 2008 static void dasd_eckd_kick_validate_server(struct dasd_device *device) 2009 { 2010 dasd_get_device(device); 2011 /* exit if device not online or in offline processing */ 2012 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) || 2013 device->state < DASD_STATE_ONLINE) { 2014 dasd_put_device(device); 2015 return; 2016 } 2017 /* queue call to do_validate_server to the kernel event daemon. */ 2018 if (!schedule_work(&device->kick_validate)) 2019 dasd_put_device(device); 2020 } 2021 2022 /* 2023 * Check device characteristics. 2024 * If the device is accessible using ECKD discipline, the device is enabled. 2025 */ 2026 static int 2027 dasd_eckd_check_characteristics(struct dasd_device *device) 2028 { 2029 struct dasd_eckd_private *private = device->private; 2030 struct dasd_block *block; 2031 struct dasd_uid temp_uid; 2032 int rc, i; 2033 int readonly; 2034 unsigned long value; 2035 2036 /* setup work queue for validate server*/ 2037 INIT_WORK(&device->kick_validate, dasd_eckd_do_validate_server); 2038 /* setup work queue for summary unit check */ 2039 INIT_WORK(&device->suc_work, dasd_alias_handle_summary_unit_check); 2040 2041 if (!ccw_device_is_pathgroup(device->cdev)) { 2042 dev_warn(&device->cdev->dev, 2043 "A channel path group could not be established\n"); 2044 return -EIO; 2045 } 2046 if (!ccw_device_is_multipath(device->cdev)) { 2047 dev_info(&device->cdev->dev, 2048 "The DASD is not operating in multipath mode\n"); 2049 } 2050 if (!private) { 2051 private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA); 2052 if (!private) { 2053 dev_warn(&device->cdev->dev, 2054 "Allocating memory for private DASD data " 2055 "failed\n"); 2056 return -ENOMEM; 2057 } 2058 device->private = private; 2059 } else { 2060 memset(private, 0, sizeof(*private)); 2061 } 2062 /* Invalidate status of initial analysis. */ 2063 private->init_cqr_status = -1; 2064 /* Set default cache operations. */ 2065 private->attrib.operation = DASD_NORMAL_CACHE; 2066 private->attrib.nr_cyl = 0; 2067 2068 /* Read Configuration Data */ 2069 rc = dasd_eckd_read_conf(device); 2070 if (rc) 2071 goto out_err1; 2072 2073 /* set some default values */ 2074 device->default_expires = DASD_EXPIRES; 2075 device->default_retries = DASD_RETRIES; 2076 device->path_thrhld = DASD_ECKD_PATH_THRHLD; 2077 device->path_interval = DASD_ECKD_PATH_INTERVAL; 2078 2079 if (private->gneq) { 2080 value = 1; 2081 for (i = 0; i < private->gneq->timeout.value; i++) 2082 value = 10 * value; 2083 value = value * private->gneq->timeout.number; 2084 /* do not accept useless values */ 2085 if (value != 0 && value <= DASD_EXPIRES_MAX) 2086 device->default_expires = value; 2087 } 2088 2089 dasd_eckd_get_uid(device, &temp_uid); 2090 if (temp_uid.type == UA_BASE_DEVICE) { 2091 block = dasd_alloc_block(); 2092 if (IS_ERR(block)) { 2093 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 2094 "could not allocate dasd " 2095 "block structure"); 2096 rc = PTR_ERR(block); 2097 goto out_err1; 2098 } 2099 device->block = block; 2100 block->base = device; 2101 } 2102 2103 /* register lcu with alias handling, enable PAV */ 2104 rc = dasd_alias_make_device_known_to_lcu(device); 2105 if (rc) 2106 goto out_err2; 2107 2108 dasd_eckd_validate_server(device, 0); 2109 2110 /* device may report different configuration data after LCU setup */ 2111 rc = dasd_eckd_read_conf(device); 2112 if (rc) 2113 goto out_err3; 2114 2115 dasd_path_create_kobjects(device); 2116 2117 /* Read Feature Codes */ 2118 dasd_eckd_read_features(device); 2119 2120 /* Read Volume Information */ 2121 dasd_eckd_read_vol_info(device); 2122 2123 /* Read Extent Pool Information */ 2124 dasd_eckd_read_ext_pool_info(device); 2125 2126 /* Read Device Characteristics */ 2127 rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC, 2128 &private->rdc_data, 64); 2129 if (rc) { 2130 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, 2131 "Read device characteristic failed, rc=%d", rc); 2132 goto out_err3; 2133 } 2134 2135 if ((device->features & DASD_FEATURE_USERAW) && 2136 !(private->rdc_data.facilities.RT_in_LR)) { 2137 dev_err(&device->cdev->dev, "The storage server does not " 2138 "support raw-track access\n"); 2139 rc = -EINVAL; 2140 goto out_err3; 2141 } 2142 2143 /* find the valid cylinder size */ 2144 if (private->rdc_data.no_cyl == LV_COMPAT_CYL && 2145 private->rdc_data.long_no_cyl) 2146 private->real_cyl = private->rdc_data.long_no_cyl; 2147 else 2148 private->real_cyl = private->rdc_data.no_cyl; 2149 2150 private->fcx_max_data = get_fcx_max_data(device); 2151 2152 readonly = dasd_device_is_ro(device); 2153 if (readonly) 2154 set_bit(DASD_FLAG_DEVICE_RO, &device->flags); 2155 2156 dev_info(&device->cdev->dev, "New DASD %04X/%02X (CU %04X/%02X) " 2157 "with %d cylinders, %d heads, %d sectors%s\n", 2158 private->rdc_data.dev_type, 2159 private->rdc_data.dev_model, 2160 private->rdc_data.cu_type, 2161 private->rdc_data.cu_model.model, 2162 private->real_cyl, 2163 private->rdc_data.trk_per_cyl, 2164 private->rdc_data.sec_per_trk, 2165 readonly ? ", read-only device" : ""); 2166 return 0; 2167 2168 out_err3: 2169 dasd_alias_disconnect_device_from_lcu(device); 2170 out_err2: 2171 dasd_free_block(device->block); 2172 device->block = NULL; 2173 out_err1: 2174 dasd_eckd_clear_conf_data(device); 2175 dasd_path_remove_kobjects(device); 2176 kfree(device->private); 2177 device->private = NULL; 2178 return rc; 2179 } 2180 2181 static void dasd_eckd_uncheck_device(struct dasd_device *device) 2182 { 2183 struct dasd_eckd_private *private = device->private; 2184 2185 if (!private) 2186 return; 2187 2188 dasd_alias_disconnect_device_from_lcu(device); 2189 private->ned = NULL; 2190 private->sneq = NULL; 2191 private->vdsneq = NULL; 2192 private->gneq = NULL; 2193 dasd_eckd_clear_conf_data(device); 2194 dasd_path_remove_kobjects(device); 2195 } 2196 2197 static struct dasd_ccw_req * 2198 dasd_eckd_analysis_ccw(struct dasd_device *device) 2199 { 2200 struct dasd_eckd_private *private = device->private; 2201 struct eckd_count *count_data; 2202 struct LO_eckd_data *LO_data; 2203 struct dasd_ccw_req *cqr; 2204 struct ccw1 *ccw; 2205 int cplength, datasize; 2206 int i; 2207 2208 cplength = 8; 2209 datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data); 2210 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device, 2211 NULL); 2212 if (IS_ERR(cqr)) 2213 return cqr; 2214 ccw = cqr->cpaddr; 2215 /* Define extent for the first 2 tracks. */ 2216 define_extent(ccw++, cqr->data, 0, 1, 2217 DASD_ECKD_CCW_READ_COUNT, device, 0); 2218 LO_data = cqr->data + sizeof(struct DE_eckd_data); 2219 /* Locate record for the first 4 records on track 0. */ 2220 ccw[-1].flags |= CCW_FLAG_CC; 2221 locate_record(ccw++, LO_data++, 0, 0, 4, 2222 DASD_ECKD_CCW_READ_COUNT, device, 0); 2223 2224 count_data = private->count_area; 2225 for (i = 0; i < 4; i++) { 2226 ccw[-1].flags |= CCW_FLAG_CC; 2227 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT; 2228 ccw->flags = 0; 2229 ccw->count = 8; 2230 ccw->cda = (__u32)(addr_t) count_data; 2231 ccw++; 2232 count_data++; 2233 } 2234 2235 /* Locate record for the first record on track 1. */ 2236 ccw[-1].flags |= CCW_FLAG_CC; 2237 locate_record(ccw++, LO_data++, 1, 0, 1, 2238 DASD_ECKD_CCW_READ_COUNT, device, 0); 2239 /* Read count ccw. */ 2240 ccw[-1].flags |= CCW_FLAG_CC; 2241 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT; 2242 ccw->flags = 0; 2243 ccw->count = 8; 2244 ccw->cda = (__u32)(addr_t) count_data; 2245 2246 cqr->block = NULL; 2247 cqr->startdev = device; 2248 cqr->memdev = device; 2249 cqr->retries = 255; 2250 cqr->buildclk = get_tod_clock(); 2251 cqr->status = DASD_CQR_FILLED; 2252 /* Set flags to suppress output for expected errors */ 2253 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); 2254 2255 return cqr; 2256 } 2257 2258 /* differentiate between 'no record found' and any other error */ 2259 static int dasd_eckd_analysis_evaluation(struct dasd_ccw_req *init_cqr) 2260 { 2261 char *sense; 2262 if (init_cqr->status == DASD_CQR_DONE) 2263 return INIT_CQR_OK; 2264 else if (init_cqr->status == DASD_CQR_NEED_ERP || 2265 init_cqr->status == DASD_CQR_FAILED) { 2266 sense = dasd_get_sense(&init_cqr->irb); 2267 if (sense && (sense[1] & SNS1_NO_REC_FOUND)) 2268 return INIT_CQR_UNFORMATTED; 2269 else 2270 return INIT_CQR_ERROR; 2271 } else 2272 return INIT_CQR_ERROR; 2273 } 2274 2275 /* 2276 * This is the callback function for the init_analysis cqr. It saves 2277 * the status of the initial analysis ccw before it frees it and kicks 2278 * the device to continue the startup sequence. This will call 2279 * dasd_eckd_do_analysis again (if the devices has not been marked 2280 * for deletion in the meantime). 2281 */ 2282 static void dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr, 2283 void *data) 2284 { 2285 struct dasd_device *device = init_cqr->startdev; 2286 struct dasd_eckd_private *private = device->private; 2287 2288 private->init_cqr_status = dasd_eckd_analysis_evaluation(init_cqr); 2289 dasd_sfree_request(init_cqr, device); 2290 dasd_kick_device(device); 2291 } 2292 2293 static int dasd_eckd_start_analysis(struct dasd_block *block) 2294 { 2295 struct dasd_ccw_req *init_cqr; 2296 2297 init_cqr = dasd_eckd_analysis_ccw(block->base); 2298 if (IS_ERR(init_cqr)) 2299 return PTR_ERR(init_cqr); 2300 init_cqr->callback = dasd_eckd_analysis_callback; 2301 init_cqr->callback_data = NULL; 2302 init_cqr->expires = 5*HZ; 2303 /* first try without ERP, so we can later handle unformatted 2304 * devices as special case 2305 */ 2306 clear_bit(DASD_CQR_FLAGS_USE_ERP, &init_cqr->flags); 2307 init_cqr->retries = 0; 2308 dasd_add_request_head(init_cqr); 2309 return -EAGAIN; 2310 } 2311 2312 static int dasd_eckd_end_analysis(struct dasd_block *block) 2313 { 2314 struct dasd_device *device = block->base; 2315 struct dasd_eckd_private *private = device->private; 2316 struct eckd_count *count_area; 2317 unsigned int sb, blk_per_trk; 2318 int status, i; 2319 struct dasd_ccw_req *init_cqr; 2320 2321 status = private->init_cqr_status; 2322 private->init_cqr_status = -1; 2323 if (status == INIT_CQR_ERROR) { 2324 /* try again, this time with full ERP */ 2325 init_cqr = dasd_eckd_analysis_ccw(device); 2326 dasd_sleep_on(init_cqr); 2327 status = dasd_eckd_analysis_evaluation(init_cqr); 2328 dasd_sfree_request(init_cqr, device); 2329 } 2330 2331 if (device->features & DASD_FEATURE_USERAW) { 2332 block->bp_block = DASD_RAW_BLOCKSIZE; 2333 blk_per_trk = DASD_RAW_BLOCK_PER_TRACK; 2334 block->s2b_shift = 3; 2335 goto raw; 2336 } 2337 2338 if (status == INIT_CQR_UNFORMATTED) { 2339 dev_warn(&device->cdev->dev, "The DASD is not formatted\n"); 2340 return -EMEDIUMTYPE; 2341 } else if (status == INIT_CQR_ERROR) { 2342 dev_err(&device->cdev->dev, 2343 "Detecting the DASD disk layout failed because " 2344 "of an I/O error\n"); 2345 return -EIO; 2346 } 2347 2348 private->uses_cdl = 1; 2349 /* Check Track 0 for Compatible Disk Layout */ 2350 count_area = NULL; 2351 for (i = 0; i < 3; i++) { 2352 if (private->count_area[i].kl != 4 || 2353 private->count_area[i].dl != dasd_eckd_cdl_reclen(i) - 4 || 2354 private->count_area[i].cyl != 0 || 2355 private->count_area[i].head != count_area_head[i] || 2356 private->count_area[i].record != count_area_rec[i]) { 2357 private->uses_cdl = 0; 2358 break; 2359 } 2360 } 2361 if (i == 3) 2362 count_area = &private->count_area[3]; 2363 2364 if (private->uses_cdl == 0) { 2365 for (i = 0; i < 5; i++) { 2366 if ((private->count_area[i].kl != 0) || 2367 (private->count_area[i].dl != 2368 private->count_area[0].dl) || 2369 private->count_area[i].cyl != 0 || 2370 private->count_area[i].head != count_area_head[i] || 2371 private->count_area[i].record != count_area_rec[i]) 2372 break; 2373 } 2374 if (i == 5) 2375 count_area = &private->count_area[0]; 2376 } else { 2377 if (private->count_area[3].record == 1) 2378 dev_warn(&device->cdev->dev, 2379 "Track 0 has no records following the VTOC\n"); 2380 } 2381 2382 if (count_area != NULL && count_area->kl == 0) { 2383 /* we found notthing violating our disk layout */ 2384 if (dasd_check_blocksize(count_area->dl) == 0) 2385 block->bp_block = count_area->dl; 2386 } 2387 if (block->bp_block == 0) { 2388 dev_warn(&device->cdev->dev, 2389 "The disk layout of the DASD is not supported\n"); 2390 return -EMEDIUMTYPE; 2391 } 2392 block->s2b_shift = 0; /* bits to shift 512 to get a block */ 2393 for (sb = 512; sb < block->bp_block; sb = sb << 1) 2394 block->s2b_shift++; 2395 2396 blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block); 2397 2398 raw: 2399 block->blocks = ((unsigned long) private->real_cyl * 2400 private->rdc_data.trk_per_cyl * 2401 blk_per_trk); 2402 2403 dev_info(&device->cdev->dev, 2404 "DASD with %u KB/block, %lu KB total size, %u KB/track, " 2405 "%s\n", (block->bp_block >> 10), 2406 (((unsigned long) private->real_cyl * 2407 private->rdc_data.trk_per_cyl * 2408 blk_per_trk * (block->bp_block >> 9)) >> 1), 2409 ((blk_per_trk * block->bp_block) >> 10), 2410 private->uses_cdl ? 2411 "compatible disk layout" : "linux disk layout"); 2412 2413 return 0; 2414 } 2415 2416 static int dasd_eckd_do_analysis(struct dasd_block *block) 2417 { 2418 struct dasd_eckd_private *private = block->base->private; 2419 2420 if (private->init_cqr_status < 0) 2421 return dasd_eckd_start_analysis(block); 2422 else 2423 return dasd_eckd_end_analysis(block); 2424 } 2425 2426 static int dasd_eckd_basic_to_ready(struct dasd_device *device) 2427 { 2428 return dasd_alias_add_device(device); 2429 }; 2430 2431 static int dasd_eckd_online_to_ready(struct dasd_device *device) 2432 { 2433 if (cancel_work_sync(&device->reload_device)) 2434 dasd_put_device(device); 2435 if (cancel_work_sync(&device->kick_validate)) 2436 dasd_put_device(device); 2437 2438 return 0; 2439 }; 2440 2441 static int dasd_eckd_basic_to_known(struct dasd_device *device) 2442 { 2443 return dasd_alias_remove_device(device); 2444 }; 2445 2446 static int 2447 dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo) 2448 { 2449 struct dasd_eckd_private *private = block->base->private; 2450 2451 if (dasd_check_blocksize(block->bp_block) == 0) { 2452 geo->sectors = recs_per_track(&private->rdc_data, 2453 0, block->bp_block); 2454 } 2455 geo->cylinders = private->rdc_data.no_cyl; 2456 geo->heads = private->rdc_data.trk_per_cyl; 2457 return 0; 2458 } 2459 2460 /* 2461 * Build the TCW request for the format check 2462 */ 2463 static struct dasd_ccw_req * 2464 dasd_eckd_build_check_tcw(struct dasd_device *base, struct format_data_t *fdata, 2465 int enable_pav, struct eckd_count *fmt_buffer, 2466 int rpt) 2467 { 2468 struct dasd_eckd_private *start_priv; 2469 struct dasd_device *startdev = NULL; 2470 struct tidaw *last_tidaw = NULL; 2471 struct dasd_ccw_req *cqr; 2472 struct itcw *itcw; 2473 int itcw_size; 2474 int count; 2475 int rc; 2476 int i; 2477 2478 if (enable_pav) 2479 startdev = dasd_alias_get_start_dev(base); 2480 2481 if (!startdev) 2482 startdev = base; 2483 2484 start_priv = startdev->private; 2485 2486 count = rpt * (fdata->stop_unit - fdata->start_unit + 1); 2487 2488 /* 2489 * we're adding 'count' amount of tidaw to the itcw. 2490 * calculate the corresponding itcw_size 2491 */ 2492 itcw_size = itcw_calc_size(0, count, 0); 2493 2494 cqr = dasd_fmalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev); 2495 if (IS_ERR(cqr)) 2496 return cqr; 2497 2498 start_priv->count++; 2499 2500 itcw = itcw_init(cqr->data, itcw_size, ITCW_OP_READ, 0, count, 0); 2501 if (IS_ERR(itcw)) { 2502 rc = -EINVAL; 2503 goto out_err; 2504 } 2505 2506 cqr->cpaddr = itcw_get_tcw(itcw); 2507 rc = prepare_itcw(itcw, fdata->start_unit, fdata->stop_unit, 2508 DASD_ECKD_CCW_READ_COUNT_MT, base, startdev, 0, count, 2509 sizeof(struct eckd_count), 2510 count * sizeof(struct eckd_count), 0, rpt); 2511 if (rc) 2512 goto out_err; 2513 2514 for (i = 0; i < count; i++) { 2515 last_tidaw = itcw_add_tidaw(itcw, 0, fmt_buffer++, 2516 sizeof(struct eckd_count)); 2517 if (IS_ERR(last_tidaw)) { 2518 rc = -EINVAL; 2519 goto out_err; 2520 } 2521 } 2522 2523 last_tidaw->flags |= TIDAW_FLAGS_LAST; 2524 itcw_finalize(itcw); 2525 2526 cqr->cpmode = 1; 2527 cqr->startdev = startdev; 2528 cqr->memdev = startdev; 2529 cqr->basedev = base; 2530 cqr->retries = startdev->default_retries; 2531 cqr->expires = startdev->default_expires * HZ; 2532 cqr->buildclk = get_tod_clock(); 2533 cqr->status = DASD_CQR_FILLED; 2534 /* Set flags to suppress output for expected errors */ 2535 set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags); 2536 set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags); 2537 2538 return cqr; 2539 2540 out_err: 2541 dasd_sfree_request(cqr, startdev); 2542 2543 return ERR_PTR(rc); 2544 } 2545 2546 /* 2547 * Build the CCW request for the format check 2548 */ 2549 static struct dasd_ccw_req * 2550 dasd_eckd_build_check(struct dasd_device *base, struct format_data_t *fdata, 2551 int enable_pav, struct eckd_count *fmt_buffer, int rpt) 2552 { 2553 struct dasd_eckd_private *start_priv; 2554 struct dasd_eckd_private *base_priv; 2555 struct dasd_device *startdev = NULL; 2556 struct dasd_ccw_req *cqr; 2557 struct ccw1 *ccw; 2558 void *data; 2559 int cplength, datasize; 2560 int use_prefix; 2561 int count; 2562 int i; 2563 2564 if (enable_pav) 2565 startdev = dasd_alias_get_start_dev(base); 2566 2567 if (!startdev) 2568 startdev = base; 2569 2570 start_priv = startdev->private; 2571 base_priv = base->private; 2572 2573 count = rpt * (fdata->stop_unit - fdata->start_unit + 1); 2574 2575 use_prefix = base_priv->features.feature[8] & 0x01; 2576 2577 if (use_prefix) { 2578 cplength = 1; 2579 datasize = sizeof(struct PFX_eckd_data); 2580 } else { 2581 cplength = 2; 2582 datasize = sizeof(struct DE_eckd_data) + 2583 sizeof(struct LO_eckd_data); 2584 } 2585 cplength += count; 2586 2587 cqr = dasd_fmalloc_request(DASD_ECKD_MAGIC, cplength, datasize, startdev); 2588 if (IS_ERR(cqr)) 2589 return cqr; 2590 2591 start_priv->count++; 2592 data = cqr->data; 2593 ccw = cqr->cpaddr; 2594 2595 if (use_prefix) { 2596 prefix_LRE(ccw++, data, fdata->start_unit, fdata->stop_unit, 2597 DASD_ECKD_CCW_READ_COUNT, base, startdev, 1, 0, 2598 count, 0, 0); 2599 } else { 2600 define_extent(ccw++, data, fdata->start_unit, fdata->stop_unit, 2601 DASD_ECKD_CCW_READ_COUNT, startdev, 0); 2602 2603 data += sizeof(struct DE_eckd_data); 2604 ccw[-1].flags |= CCW_FLAG_CC; 2605 2606 locate_record(ccw++, data, fdata->start_unit, 0, count, 2607 DASD_ECKD_CCW_READ_COUNT, base, 0); 2608 } 2609 2610 for (i = 0; i < count; i++) { 2611 ccw[-1].flags |= CCW_FLAG_CC; 2612 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT; 2613 ccw->flags = CCW_FLAG_SLI; 2614 ccw->count = 8; 2615 ccw->cda = (__u32)(addr_t) fmt_buffer; 2616 ccw++; 2617 fmt_buffer++; 2618 } 2619 2620 cqr->startdev = startdev; 2621 cqr->memdev = startdev; 2622 cqr->basedev = base; 2623 cqr->retries = DASD_RETRIES; 2624 cqr->expires = startdev->default_expires * HZ; 2625 cqr->buildclk = get_tod_clock(); 2626 cqr->status = DASD_CQR_FILLED; 2627 /* Set flags to suppress output for expected errors */ 2628 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); 2629 2630 return cqr; 2631 } 2632 2633 static struct dasd_ccw_req * 2634 dasd_eckd_build_format(struct dasd_device *base, struct dasd_device *startdev, 2635 struct format_data_t *fdata, int enable_pav) 2636 { 2637 struct dasd_eckd_private *base_priv; 2638 struct dasd_eckd_private *start_priv; 2639 struct dasd_ccw_req *fcp; 2640 struct eckd_count *ect; 2641 struct ch_t address; 2642 struct ccw1 *ccw; 2643 void *data; 2644 int rpt; 2645 int cplength, datasize; 2646 int i, j; 2647 int intensity = 0; 2648 int r0_perm; 2649 int nr_tracks; 2650 int use_prefix; 2651 2652 if (enable_pav) 2653 startdev = dasd_alias_get_start_dev(base); 2654 2655 if (!startdev) 2656 startdev = base; 2657 2658 start_priv = startdev->private; 2659 base_priv = base->private; 2660 2661 rpt = recs_per_track(&base_priv->rdc_data, 0, fdata->blksize); 2662 2663 nr_tracks = fdata->stop_unit - fdata->start_unit + 1; 2664 2665 /* 2666 * fdata->intensity is a bit string that tells us what to do: 2667 * Bit 0: write record zero 2668 * Bit 1: write home address, currently not supported 2669 * Bit 2: invalidate tracks 2670 * Bit 3: use OS/390 compatible disk layout (cdl) 2671 * Bit 4: do not allow storage subsystem to modify record zero 2672 * Only some bit combinations do make sense. 2673 */ 2674 if (fdata->intensity & 0x10) { 2675 r0_perm = 0; 2676 intensity = fdata->intensity & ~0x10; 2677 } else { 2678 r0_perm = 1; 2679 intensity = fdata->intensity; 2680 } 2681 2682 use_prefix = base_priv->features.feature[8] & 0x01; 2683 2684 switch (intensity) { 2685 case 0x00: /* Normal format */ 2686 case 0x08: /* Normal format, use cdl. */ 2687 cplength = 2 + (rpt*nr_tracks); 2688 if (use_prefix) 2689 datasize = sizeof(struct PFX_eckd_data) + 2690 sizeof(struct LO_eckd_data) + 2691 rpt * nr_tracks * sizeof(struct eckd_count); 2692 else 2693 datasize = sizeof(struct DE_eckd_data) + 2694 sizeof(struct LO_eckd_data) + 2695 rpt * nr_tracks * sizeof(struct eckd_count); 2696 break; 2697 case 0x01: /* Write record zero and format track. */ 2698 case 0x09: /* Write record zero and format track, use cdl. */ 2699 cplength = 2 + rpt * nr_tracks; 2700 if (use_prefix) 2701 datasize = sizeof(struct PFX_eckd_data) + 2702 sizeof(struct LO_eckd_data) + 2703 sizeof(struct eckd_count) + 2704 rpt * nr_tracks * sizeof(struct eckd_count); 2705 else 2706 datasize = sizeof(struct DE_eckd_data) + 2707 sizeof(struct LO_eckd_data) + 2708 sizeof(struct eckd_count) + 2709 rpt * nr_tracks * sizeof(struct eckd_count); 2710 break; 2711 case 0x04: /* Invalidate track. */ 2712 case 0x0c: /* Invalidate track, use cdl. */ 2713 cplength = 3; 2714 if (use_prefix) 2715 datasize = sizeof(struct PFX_eckd_data) + 2716 sizeof(struct LO_eckd_data) + 2717 sizeof(struct eckd_count); 2718 else 2719 datasize = sizeof(struct DE_eckd_data) + 2720 sizeof(struct LO_eckd_data) + 2721 sizeof(struct eckd_count); 2722 break; 2723 default: 2724 dev_warn(&startdev->cdev->dev, 2725 "An I/O control call used incorrect flags 0x%x\n", 2726 fdata->intensity); 2727 return ERR_PTR(-EINVAL); 2728 } 2729 2730 fcp = dasd_fmalloc_request(DASD_ECKD_MAGIC, cplength, datasize, startdev); 2731 if (IS_ERR(fcp)) 2732 return fcp; 2733 2734 start_priv->count++; 2735 data = fcp->data; 2736 ccw = fcp->cpaddr; 2737 2738 switch (intensity & ~0x08) { 2739 case 0x00: /* Normal format. */ 2740 if (use_prefix) { 2741 prefix(ccw++, (struct PFX_eckd_data *) data, 2742 fdata->start_unit, fdata->stop_unit, 2743 DASD_ECKD_CCW_WRITE_CKD, base, startdev); 2744 /* grant subsystem permission to format R0 */ 2745 if (r0_perm) 2746 ((struct PFX_eckd_data *)data) 2747 ->define_extent.ga_extended |= 0x04; 2748 data += sizeof(struct PFX_eckd_data); 2749 } else { 2750 define_extent(ccw++, (struct DE_eckd_data *) data, 2751 fdata->start_unit, fdata->stop_unit, 2752 DASD_ECKD_CCW_WRITE_CKD, startdev, 0); 2753 /* grant subsystem permission to format R0 */ 2754 if (r0_perm) 2755 ((struct DE_eckd_data *) data) 2756 ->ga_extended |= 0x04; 2757 data += sizeof(struct DE_eckd_data); 2758 } 2759 ccw[-1].flags |= CCW_FLAG_CC; 2760 locate_record(ccw++, (struct LO_eckd_data *) data, 2761 fdata->start_unit, 0, rpt*nr_tracks, 2762 DASD_ECKD_CCW_WRITE_CKD, base, 2763 fdata->blksize); 2764 data += sizeof(struct LO_eckd_data); 2765 break; 2766 case 0x01: /* Write record zero + format track. */ 2767 if (use_prefix) { 2768 prefix(ccw++, (struct PFX_eckd_data *) data, 2769 fdata->start_unit, fdata->stop_unit, 2770 DASD_ECKD_CCW_WRITE_RECORD_ZERO, 2771 base, startdev); 2772 data += sizeof(struct PFX_eckd_data); 2773 } else { 2774 define_extent(ccw++, (struct DE_eckd_data *) data, 2775 fdata->start_unit, fdata->stop_unit, 2776 DASD_ECKD_CCW_WRITE_RECORD_ZERO, startdev, 0); 2777 data += sizeof(struct DE_eckd_data); 2778 } 2779 ccw[-1].flags |= CCW_FLAG_CC; 2780 locate_record(ccw++, (struct LO_eckd_data *) data, 2781 fdata->start_unit, 0, rpt * nr_tracks + 1, 2782 DASD_ECKD_CCW_WRITE_RECORD_ZERO, base, 2783 base->block->bp_block); 2784 data += sizeof(struct LO_eckd_data); 2785 break; 2786 case 0x04: /* Invalidate track. */ 2787 if (use_prefix) { 2788 prefix(ccw++, (struct PFX_eckd_data *) data, 2789 fdata->start_unit, fdata->stop_unit, 2790 DASD_ECKD_CCW_WRITE_CKD, base, startdev); 2791 data += sizeof(struct PFX_eckd_data); 2792 } else { 2793 define_extent(ccw++, (struct DE_eckd_data *) data, 2794 fdata->start_unit, fdata->stop_unit, 2795 DASD_ECKD_CCW_WRITE_CKD, startdev, 0); 2796 data += sizeof(struct DE_eckd_data); 2797 } 2798 ccw[-1].flags |= CCW_FLAG_CC; 2799 locate_record(ccw++, (struct LO_eckd_data *) data, 2800 fdata->start_unit, 0, 1, 2801 DASD_ECKD_CCW_WRITE_CKD, base, 8); 2802 data += sizeof(struct LO_eckd_data); 2803 break; 2804 } 2805 2806 for (j = 0; j < nr_tracks; j++) { 2807 /* calculate cylinder and head for the current track */ 2808 set_ch_t(&address, 2809 (fdata->start_unit + j) / 2810 base_priv->rdc_data.trk_per_cyl, 2811 (fdata->start_unit + j) % 2812 base_priv->rdc_data.trk_per_cyl); 2813 if (intensity & 0x01) { /* write record zero */ 2814 ect = (struct eckd_count *) data; 2815 data += sizeof(struct eckd_count); 2816 ect->cyl = address.cyl; 2817 ect->head = address.head; 2818 ect->record = 0; 2819 ect->kl = 0; 2820 ect->dl = 8; 2821 ccw[-1].flags |= CCW_FLAG_CC; 2822 ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO; 2823 ccw->flags = CCW_FLAG_SLI; 2824 ccw->count = 8; 2825 ccw->cda = (__u32)(addr_t) ect; 2826 ccw++; 2827 } 2828 if ((intensity & ~0x08) & 0x04) { /* erase track */ 2829 ect = (struct eckd_count *) data; 2830 data += sizeof(struct eckd_count); 2831 ect->cyl = address.cyl; 2832 ect->head = address.head; 2833 ect->record = 1; 2834 ect->kl = 0; 2835 ect->dl = 0; 2836 ccw[-1].flags |= CCW_FLAG_CC; 2837 ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD; 2838 ccw->flags = CCW_FLAG_SLI; 2839 ccw->count = 8; 2840 ccw->cda = (__u32)(addr_t) ect; 2841 } else { /* write remaining records */ 2842 for (i = 0; i < rpt; i++) { 2843 ect = (struct eckd_count *) data; 2844 data += sizeof(struct eckd_count); 2845 ect->cyl = address.cyl; 2846 ect->head = address.head; 2847 ect->record = i + 1; 2848 ect->kl = 0; 2849 ect->dl = fdata->blksize; 2850 /* 2851 * Check for special tracks 0-1 2852 * when formatting CDL 2853 */ 2854 if ((intensity & 0x08) && 2855 address.cyl == 0 && address.head == 0) { 2856 if (i < 3) { 2857 ect->kl = 4; 2858 ect->dl = sizes_trk0[i] - 4; 2859 } 2860 } 2861 if ((intensity & 0x08) && 2862 address.cyl == 0 && address.head == 1) { 2863 ect->kl = 44; 2864 ect->dl = LABEL_SIZE - 44; 2865 } 2866 ccw[-1].flags |= CCW_FLAG_CC; 2867 if (i != 0 || j == 0) 2868 ccw->cmd_code = 2869 DASD_ECKD_CCW_WRITE_CKD; 2870 else 2871 ccw->cmd_code = 2872 DASD_ECKD_CCW_WRITE_CKD_MT; 2873 ccw->flags = CCW_FLAG_SLI; 2874 ccw->count = 8; 2875 ccw->cda = (__u32)(addr_t) ect; 2876 ccw++; 2877 } 2878 } 2879 } 2880 2881 fcp->startdev = startdev; 2882 fcp->memdev = startdev; 2883 fcp->basedev = base; 2884 fcp->retries = 256; 2885 fcp->expires = startdev->default_expires * HZ; 2886 fcp->buildclk = get_tod_clock(); 2887 fcp->status = DASD_CQR_FILLED; 2888 2889 return fcp; 2890 } 2891 2892 /* 2893 * Wrapper function to build a CCW request depending on input data 2894 */ 2895 static struct dasd_ccw_req * 2896 dasd_eckd_format_build_ccw_req(struct dasd_device *base, 2897 struct format_data_t *fdata, int enable_pav, 2898 int tpm, struct eckd_count *fmt_buffer, int rpt) 2899 { 2900 struct dasd_ccw_req *ccw_req; 2901 2902 if (!fmt_buffer) { 2903 ccw_req = dasd_eckd_build_format(base, NULL, fdata, enable_pav); 2904 } else { 2905 if (tpm) 2906 ccw_req = dasd_eckd_build_check_tcw(base, fdata, 2907 enable_pav, 2908 fmt_buffer, rpt); 2909 else 2910 ccw_req = dasd_eckd_build_check(base, fdata, enable_pav, 2911 fmt_buffer, rpt); 2912 } 2913 2914 return ccw_req; 2915 } 2916 2917 /* 2918 * Sanity checks on format_data 2919 */ 2920 static int dasd_eckd_format_sanity_checks(struct dasd_device *base, 2921 struct format_data_t *fdata) 2922 { 2923 struct dasd_eckd_private *private = base->private; 2924 2925 if (fdata->start_unit >= 2926 (private->real_cyl * private->rdc_data.trk_per_cyl)) { 2927 dev_warn(&base->cdev->dev, 2928 "Start track number %u used in formatting is too big\n", 2929 fdata->start_unit); 2930 return -EINVAL; 2931 } 2932 if (fdata->stop_unit >= 2933 (private->real_cyl * private->rdc_data.trk_per_cyl)) { 2934 dev_warn(&base->cdev->dev, 2935 "Stop track number %u used in formatting is too big\n", 2936 fdata->stop_unit); 2937 return -EINVAL; 2938 } 2939 if (fdata->start_unit > fdata->stop_unit) { 2940 dev_warn(&base->cdev->dev, 2941 "Start track %u used in formatting exceeds end track\n", 2942 fdata->start_unit); 2943 return -EINVAL; 2944 } 2945 if (dasd_check_blocksize(fdata->blksize) != 0) { 2946 dev_warn(&base->cdev->dev, 2947 "The DASD cannot be formatted with block size %u\n", 2948 fdata->blksize); 2949 return -EINVAL; 2950 } 2951 return 0; 2952 } 2953 2954 /* 2955 * This function will process format_data originally coming from an IOCTL 2956 */ 2957 static int dasd_eckd_format_process_data(struct dasd_device *base, 2958 struct format_data_t *fdata, 2959 int enable_pav, int tpm, 2960 struct eckd_count *fmt_buffer, int rpt, 2961 struct irb *irb) 2962 { 2963 struct dasd_eckd_private *private = base->private; 2964 struct dasd_ccw_req *cqr, *n; 2965 struct list_head format_queue; 2966 struct dasd_device *device; 2967 char *sense = NULL; 2968 int old_start, old_stop, format_step; 2969 int step, retry; 2970 int rc; 2971 2972 rc = dasd_eckd_format_sanity_checks(base, fdata); 2973 if (rc) 2974 return rc; 2975 2976 INIT_LIST_HEAD(&format_queue); 2977 2978 old_start = fdata->start_unit; 2979 old_stop = fdata->stop_unit; 2980 2981 if (!tpm && fmt_buffer != NULL) { 2982 /* Command Mode / Format Check */ 2983 format_step = 1; 2984 } else if (tpm && fmt_buffer != NULL) { 2985 /* Transport Mode / Format Check */ 2986 format_step = DASD_CQR_MAX_CCW / rpt; 2987 } else { 2988 /* Normal Formatting */ 2989 format_step = DASD_CQR_MAX_CCW / 2990 recs_per_track(&private->rdc_data, 0, fdata->blksize); 2991 } 2992 2993 do { 2994 retry = 0; 2995 while (fdata->start_unit <= old_stop) { 2996 step = fdata->stop_unit - fdata->start_unit + 1; 2997 if (step > format_step) { 2998 fdata->stop_unit = 2999 fdata->start_unit + format_step - 1; 3000 } 3001 3002 cqr = dasd_eckd_format_build_ccw_req(base, fdata, 3003 enable_pav, tpm, 3004 fmt_buffer, rpt); 3005 if (IS_ERR(cqr)) { 3006 rc = PTR_ERR(cqr); 3007 if (rc == -ENOMEM) { 3008 if (list_empty(&format_queue)) 3009 goto out; 3010 /* 3011 * not enough memory available, start 3012 * requests retry after first requests 3013 * were finished 3014 */ 3015 retry = 1; 3016 break; 3017 } 3018 goto out_err; 3019 } 3020 list_add_tail(&cqr->blocklist, &format_queue); 3021 3022 if (fmt_buffer) { 3023 step = fdata->stop_unit - fdata->start_unit + 1; 3024 fmt_buffer += rpt * step; 3025 } 3026 fdata->start_unit = fdata->stop_unit + 1; 3027 fdata->stop_unit = old_stop; 3028 } 3029 3030 rc = dasd_sleep_on_queue(&format_queue); 3031 3032 out_err: 3033 list_for_each_entry_safe(cqr, n, &format_queue, blocklist) { 3034 device = cqr->startdev; 3035 private = device->private; 3036 3037 if (cqr->status == DASD_CQR_FAILED) { 3038 /* 3039 * Only get sense data if called by format 3040 * check 3041 */ 3042 if (fmt_buffer && irb) { 3043 sense = dasd_get_sense(&cqr->irb); 3044 memcpy(irb, &cqr->irb, sizeof(*irb)); 3045 } 3046 rc = -EIO; 3047 } 3048 list_del_init(&cqr->blocklist); 3049 dasd_ffree_request(cqr, device); 3050 private->count--; 3051 } 3052 3053 if (rc && rc != -EIO) 3054 goto out; 3055 if (rc == -EIO) { 3056 /* 3057 * In case fewer than the expected records are on the 3058 * track, we will most likely get a 'No Record Found' 3059 * error (in command mode) or a 'File Protected' error 3060 * (in transport mode). Those particular cases shouldn't 3061 * pass the -EIO to the IOCTL, therefore reset the rc 3062 * and continue. 3063 */ 3064 if (sense && 3065 (sense[1] & SNS1_NO_REC_FOUND || 3066 sense[1] & SNS1_FILE_PROTECTED)) 3067 retry = 1; 3068 else 3069 goto out; 3070 } 3071 3072 } while (retry); 3073 3074 out: 3075 fdata->start_unit = old_start; 3076 fdata->stop_unit = old_stop; 3077 3078 return rc; 3079 } 3080 3081 static int dasd_eckd_format_device(struct dasd_device *base, 3082 struct format_data_t *fdata, int enable_pav) 3083 { 3084 return dasd_eckd_format_process_data(base, fdata, enable_pav, 0, NULL, 3085 0, NULL); 3086 } 3087 3088 static bool test_and_set_format_track(struct dasd_format_entry *to_format, 3089 struct dasd_block *block) 3090 { 3091 struct dasd_format_entry *format; 3092 unsigned long flags; 3093 bool rc = false; 3094 3095 spin_lock_irqsave(&block->format_lock, flags); 3096 list_for_each_entry(format, &block->format_list, list) { 3097 if (format->track == to_format->track) { 3098 rc = true; 3099 goto out; 3100 } 3101 } 3102 list_add_tail(&to_format->list, &block->format_list); 3103 3104 out: 3105 spin_unlock_irqrestore(&block->format_lock, flags); 3106 return rc; 3107 } 3108 3109 static void clear_format_track(struct dasd_format_entry *format, 3110 struct dasd_block *block) 3111 { 3112 unsigned long flags; 3113 3114 spin_lock_irqsave(&block->format_lock, flags); 3115 list_del_init(&format->list); 3116 spin_unlock_irqrestore(&block->format_lock, flags); 3117 } 3118 3119 /* 3120 * Callback function to free ESE format requests. 3121 */ 3122 static void dasd_eckd_ese_format_cb(struct dasd_ccw_req *cqr, void *data) 3123 { 3124 struct dasd_device *device = cqr->startdev; 3125 struct dasd_eckd_private *private = device->private; 3126 struct dasd_format_entry *format = data; 3127 3128 clear_format_track(format, cqr->basedev->block); 3129 private->count--; 3130 dasd_ffree_request(cqr, device); 3131 } 3132 3133 static struct dasd_ccw_req * 3134 dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr, 3135 struct irb *irb) 3136 { 3137 struct dasd_eckd_private *private; 3138 struct dasd_format_entry *format; 3139 struct format_data_t fdata; 3140 unsigned int recs_per_trk; 3141 struct dasd_ccw_req *fcqr; 3142 struct dasd_device *base; 3143 struct dasd_block *block; 3144 unsigned int blksize; 3145 struct request *req; 3146 sector_t first_trk; 3147 sector_t last_trk; 3148 sector_t curr_trk; 3149 int rc; 3150 3151 req = cqr->callback_data; 3152 block = cqr->block; 3153 base = block->base; 3154 private = base->private; 3155 blksize = block->bp_block; 3156 recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize); 3157 format = &startdev->format_entry; 3158 3159 first_trk = blk_rq_pos(req) >> block->s2b_shift; 3160 sector_div(first_trk, recs_per_trk); 3161 last_trk = 3162 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; 3163 sector_div(last_trk, recs_per_trk); 3164 rc = dasd_eckd_track_from_irb(irb, base, &curr_trk); 3165 if (rc) 3166 return ERR_PTR(rc); 3167 3168 if (curr_trk < first_trk || curr_trk > last_trk) { 3169 DBF_DEV_EVENT(DBF_WARNING, startdev, 3170 "ESE error track %llu not within range %llu - %llu\n", 3171 curr_trk, first_trk, last_trk); 3172 return ERR_PTR(-EINVAL); 3173 } 3174 format->track = curr_trk; 3175 /* test if track is already in formatting by another thread */ 3176 if (test_and_set_format_track(format, block)) 3177 return ERR_PTR(-EEXIST); 3178 3179 fdata.start_unit = curr_trk; 3180 fdata.stop_unit = curr_trk; 3181 fdata.blksize = blksize; 3182 fdata.intensity = private->uses_cdl ? DASD_FMT_INT_COMPAT : 0; 3183 3184 rc = dasd_eckd_format_sanity_checks(base, &fdata); 3185 if (rc) 3186 return ERR_PTR(-EINVAL); 3187 3188 /* 3189 * We're building the request with PAV disabled as we're reusing 3190 * the former startdev. 3191 */ 3192 fcqr = dasd_eckd_build_format(base, startdev, &fdata, 0); 3193 if (IS_ERR(fcqr)) 3194 return fcqr; 3195 3196 fcqr->callback = dasd_eckd_ese_format_cb; 3197 fcqr->callback_data = (void *) format; 3198 3199 return fcqr; 3200 } 3201 3202 /* 3203 * When data is read from an unformatted area of an ESE volume, this function 3204 * returns zeroed data and thereby mimics a read of zero data. 3205 * 3206 * The first unformatted track is the one that got the NRF error, the address is 3207 * encoded in the sense data. 3208 * 3209 * All tracks before have returned valid data and should not be touched. 3210 * All tracks after the unformatted track might be formatted or not. This is 3211 * currently not known, remember the processed data and return the remainder of 3212 * the request to the blocklayer in __dasd_cleanup_cqr(). 3213 */ 3214 static int dasd_eckd_ese_read(struct dasd_ccw_req *cqr, struct irb *irb) 3215 { 3216 struct dasd_eckd_private *private; 3217 sector_t first_trk, last_trk; 3218 sector_t first_blk, last_blk; 3219 unsigned int blksize, off; 3220 unsigned int recs_per_trk; 3221 struct dasd_device *base; 3222 struct req_iterator iter; 3223 struct dasd_block *block; 3224 unsigned int skip_block; 3225 unsigned int blk_count; 3226 struct request *req; 3227 struct bio_vec bv; 3228 sector_t curr_trk; 3229 sector_t end_blk; 3230 char *dst; 3231 int rc; 3232 3233 req = (struct request *) cqr->callback_data; 3234 base = cqr->block->base; 3235 blksize = base->block->bp_block; 3236 block = cqr->block; 3237 private = base->private; 3238 skip_block = 0; 3239 blk_count = 0; 3240 3241 recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize); 3242 first_trk = first_blk = blk_rq_pos(req) >> block->s2b_shift; 3243 sector_div(first_trk, recs_per_trk); 3244 last_trk = last_blk = 3245 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; 3246 sector_div(last_trk, recs_per_trk); 3247 rc = dasd_eckd_track_from_irb(irb, base, &curr_trk); 3248 if (rc) 3249 return rc; 3250 3251 /* sanity check if the current track from sense data is valid */ 3252 if (curr_trk < first_trk || curr_trk > last_trk) { 3253 DBF_DEV_EVENT(DBF_WARNING, base, 3254 "ESE error track %llu not within range %llu - %llu\n", 3255 curr_trk, first_trk, last_trk); 3256 return -EINVAL; 3257 } 3258 3259 /* 3260 * if not the first track got the NRF error we have to skip over valid 3261 * blocks 3262 */ 3263 if (curr_trk != first_trk) 3264 skip_block = curr_trk * recs_per_trk - first_blk; 3265 3266 /* we have no information beyond the current track */ 3267 end_blk = (curr_trk + 1) * recs_per_trk; 3268 3269 rq_for_each_segment(bv, req, iter) { 3270 dst = page_address(bv.bv_page) + bv.bv_offset; 3271 for (off = 0; off < bv.bv_len; off += blksize) { 3272 if (first_blk + blk_count >= end_blk) { 3273 cqr->proc_bytes = blk_count * blksize; 3274 return 0; 3275 } 3276 if (dst && !skip_block) { 3277 dst += off; 3278 memset(dst, 0, blksize); 3279 } else { 3280 skip_block--; 3281 } 3282 blk_count++; 3283 } 3284 } 3285 return 0; 3286 } 3287 3288 /* 3289 * Helper function to count consecutive records of a single track. 3290 */ 3291 static int dasd_eckd_count_records(struct eckd_count *fmt_buffer, int start, 3292 int max) 3293 { 3294 int head; 3295 int i; 3296 3297 head = fmt_buffer[start].head; 3298 3299 /* 3300 * There are 3 conditions where we stop counting: 3301 * - if data reoccurs (same head and record may reoccur), which may 3302 * happen due to the way DASD_ECKD_CCW_READ_COUNT works 3303 * - when the head changes, because we're iterating over several tracks 3304 * then (DASD_ECKD_CCW_READ_COUNT_MT) 3305 * - when we've reached the end of sensible data in the buffer (the 3306 * record will be 0 then) 3307 */ 3308 for (i = start; i < max; i++) { 3309 if (i > start) { 3310 if ((fmt_buffer[i].head == head && 3311 fmt_buffer[i].record == 1) || 3312 fmt_buffer[i].head != head || 3313 fmt_buffer[i].record == 0) 3314 break; 3315 } 3316 } 3317 3318 return i - start; 3319 } 3320 3321 /* 3322 * Evaluate a given range of tracks. Data like number of records, blocksize, 3323 * record ids, and key length are compared with expected data. 3324 * 3325 * If a mismatch occurs, the corresponding error bit is set, as well as 3326 * additional information, depending on the error. 3327 */ 3328 static void dasd_eckd_format_evaluate_tracks(struct eckd_count *fmt_buffer, 3329 struct format_check_t *cdata, 3330 int rpt_max, int rpt_exp, 3331 int trk_per_cyl, int tpm) 3332 { 3333 struct ch_t geo; 3334 int max_entries; 3335 int count = 0; 3336 int trkcount; 3337 int blksize; 3338 int pos = 0; 3339 int i, j; 3340 int kl; 3341 3342 trkcount = cdata->expect.stop_unit - cdata->expect.start_unit + 1; 3343 max_entries = trkcount * rpt_max; 3344 3345 for (i = cdata->expect.start_unit; i <= cdata->expect.stop_unit; i++) { 3346 /* Calculate the correct next starting position in the buffer */ 3347 if (tpm) { 3348 while (fmt_buffer[pos].record == 0 && 3349 fmt_buffer[pos].dl == 0) { 3350 if (pos++ > max_entries) 3351 break; 3352 } 3353 } else { 3354 if (i != cdata->expect.start_unit) 3355 pos += rpt_max - count; 3356 } 3357 3358 /* Calculate the expected geo values for the current track */ 3359 set_ch_t(&geo, i / trk_per_cyl, i % trk_per_cyl); 3360 3361 /* Count and check number of records */ 3362 count = dasd_eckd_count_records(fmt_buffer, pos, pos + rpt_max); 3363 3364 if (count < rpt_exp) { 3365 cdata->result = DASD_FMT_ERR_TOO_FEW_RECORDS; 3366 break; 3367 } 3368 if (count > rpt_exp) { 3369 cdata->result = DASD_FMT_ERR_TOO_MANY_RECORDS; 3370 break; 3371 } 3372 3373 for (j = 0; j < count; j++, pos++) { 3374 blksize = cdata->expect.blksize; 3375 kl = 0; 3376 3377 /* 3378 * Set special values when checking CDL formatted 3379 * devices. 3380 */ 3381 if ((cdata->expect.intensity & 0x08) && 3382 geo.cyl == 0 && geo.head == 0) { 3383 if (j < 3) { 3384 blksize = sizes_trk0[j] - 4; 3385 kl = 4; 3386 } 3387 } 3388 if ((cdata->expect.intensity & 0x08) && 3389 geo.cyl == 0 && geo.head == 1) { 3390 blksize = LABEL_SIZE - 44; 3391 kl = 44; 3392 } 3393 3394 /* Check blocksize */ 3395 if (fmt_buffer[pos].dl != blksize) { 3396 cdata->result = DASD_FMT_ERR_BLKSIZE; 3397 goto out; 3398 } 3399 /* Check if key length is 0 */ 3400 if (fmt_buffer[pos].kl != kl) { 3401 cdata->result = DASD_FMT_ERR_KEY_LENGTH; 3402 goto out; 3403 } 3404 /* Check if record_id is correct */ 3405 if (fmt_buffer[pos].cyl != geo.cyl || 3406 fmt_buffer[pos].head != geo.head || 3407 fmt_buffer[pos].record != (j + 1)) { 3408 cdata->result = DASD_FMT_ERR_RECORD_ID; 3409 goto out; 3410 } 3411 } 3412 } 3413 3414 out: 3415 /* 3416 * In case of no errors, we need to decrease by one 3417 * to get the correct positions. 3418 */ 3419 if (!cdata->result) { 3420 i--; 3421 pos--; 3422 } 3423 3424 cdata->unit = i; 3425 cdata->num_records = count; 3426 cdata->rec = fmt_buffer[pos].record; 3427 cdata->blksize = fmt_buffer[pos].dl; 3428 cdata->key_length = fmt_buffer[pos].kl; 3429 } 3430 3431 /* 3432 * Check the format of a range of tracks of a DASD. 3433 */ 3434 static int dasd_eckd_check_device_format(struct dasd_device *base, 3435 struct format_check_t *cdata, 3436 int enable_pav) 3437 { 3438 struct dasd_eckd_private *private = base->private; 3439 struct eckd_count *fmt_buffer; 3440 struct irb irb; 3441 int rpt_max, rpt_exp; 3442 int fmt_buffer_size; 3443 int trk_per_cyl; 3444 int trkcount; 3445 int tpm = 0; 3446 int rc; 3447 3448 trk_per_cyl = private->rdc_data.trk_per_cyl; 3449 3450 /* Get maximum and expected amount of records per track */ 3451 rpt_max = recs_per_track(&private->rdc_data, 0, 512) + 1; 3452 rpt_exp = recs_per_track(&private->rdc_data, 0, cdata->expect.blksize); 3453 3454 trkcount = cdata->expect.stop_unit - cdata->expect.start_unit + 1; 3455 fmt_buffer_size = trkcount * rpt_max * sizeof(struct eckd_count); 3456 3457 fmt_buffer = kzalloc(fmt_buffer_size, GFP_KERNEL | GFP_DMA); 3458 if (!fmt_buffer) 3459 return -ENOMEM; 3460 3461 /* 3462 * A certain FICON feature subset is needed to operate in transport 3463 * mode. Additionally, the support for transport mode is implicitly 3464 * checked by comparing the buffer size with fcx_max_data. As long as 3465 * the buffer size is smaller we can operate in transport mode and 3466 * process multiple tracks. If not, only one track at once is being 3467 * processed using command mode. 3468 */ 3469 if ((private->features.feature[40] & 0x04) && 3470 fmt_buffer_size <= private->fcx_max_data) 3471 tpm = 1; 3472 3473 rc = dasd_eckd_format_process_data(base, &cdata->expect, enable_pav, 3474 tpm, fmt_buffer, rpt_max, &irb); 3475 if (rc && rc != -EIO) 3476 goto out; 3477 if (rc == -EIO) { 3478 /* 3479 * If our first attempt with transport mode enabled comes back 3480 * with an incorrect length error, we're going to retry the 3481 * check with command mode. 3482 */ 3483 if (tpm && scsw_cstat(&irb.scsw) == 0x40) { 3484 tpm = 0; 3485 rc = dasd_eckd_format_process_data(base, &cdata->expect, 3486 enable_pav, tpm, 3487 fmt_buffer, rpt_max, 3488 &irb); 3489 if (rc) 3490 goto out; 3491 } else { 3492 goto out; 3493 } 3494 } 3495 3496 dasd_eckd_format_evaluate_tracks(fmt_buffer, cdata, rpt_max, rpt_exp, 3497 trk_per_cyl, tpm); 3498 3499 out: 3500 kfree(fmt_buffer); 3501 3502 return rc; 3503 } 3504 3505 static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr) 3506 { 3507 if (cqr->retries < 0) { 3508 cqr->status = DASD_CQR_FAILED; 3509 return; 3510 } 3511 cqr->status = DASD_CQR_FILLED; 3512 if (cqr->block && (cqr->startdev != cqr->block->base)) { 3513 dasd_eckd_reset_ccw_to_base_io(cqr); 3514 cqr->startdev = cqr->block->base; 3515 cqr->lpm = dasd_path_get_opm(cqr->block->base); 3516 } 3517 }; 3518 3519 static dasd_erp_fn_t 3520 dasd_eckd_erp_action(struct dasd_ccw_req * cqr) 3521 { 3522 struct dasd_device *device = (struct dasd_device *) cqr->startdev; 3523 struct ccw_device *cdev = device->cdev; 3524 3525 switch (cdev->id.cu_type) { 3526 case 0x3990: 3527 case 0x2105: 3528 case 0x2107: 3529 case 0x1750: 3530 return dasd_3990_erp_action; 3531 case 0x9343: 3532 case 0x3880: 3533 default: 3534 return dasd_default_erp_action; 3535 } 3536 } 3537 3538 static dasd_erp_fn_t 3539 dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr) 3540 { 3541 return dasd_default_erp_postaction; 3542 } 3543 3544 static void dasd_eckd_check_for_device_change(struct dasd_device *device, 3545 struct dasd_ccw_req *cqr, 3546 struct irb *irb) 3547 { 3548 char mask; 3549 char *sense = NULL; 3550 struct dasd_eckd_private *private = device->private; 3551 3552 /* first of all check for state change pending interrupt */ 3553 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; 3554 if ((scsw_dstat(&irb->scsw) & mask) == mask) { 3555 /* 3556 * for alias only, not in offline processing 3557 * and only if not suspended 3558 */ 3559 if (!device->block && private->lcu && 3560 device->state == DASD_STATE_ONLINE && 3561 !test_bit(DASD_FLAG_OFFLINE, &device->flags) && 3562 !test_bit(DASD_FLAG_SUSPENDED, &device->flags)) { 3563 /* schedule worker to reload device */ 3564 dasd_reload_device(device); 3565 } 3566 dasd_generic_handle_state_change(device); 3567 return; 3568 } 3569 3570 sense = dasd_get_sense(irb); 3571 if (!sense) 3572 return; 3573 3574 /* summary unit check */ 3575 if ((sense[27] & DASD_SENSE_BIT_0) && (sense[7] == 0x0D) && 3576 (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK)) { 3577 if (test_and_set_bit(DASD_FLAG_SUC, &device->flags)) { 3578 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 3579 "eckd suc: device already notified"); 3580 return; 3581 } 3582 sense = dasd_get_sense(irb); 3583 if (!sense) { 3584 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 3585 "eckd suc: no reason code available"); 3586 clear_bit(DASD_FLAG_SUC, &device->flags); 3587 return; 3588 3589 } 3590 private->suc_reason = sense[8]; 3591 DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x", 3592 "eckd handle summary unit check: reason", 3593 private->suc_reason); 3594 dasd_get_device(device); 3595 if (!schedule_work(&device->suc_work)) 3596 dasd_put_device(device); 3597 3598 return; 3599 } 3600 3601 /* service information message SIM */ 3602 if (!cqr && !(sense[27] & DASD_SENSE_BIT_0) && 3603 ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) { 3604 dasd_3990_erp_handle_sim(device, sense); 3605 return; 3606 } 3607 3608 /* loss of device reservation is handled via base devices only 3609 * as alias devices may be used with several bases 3610 */ 3611 if (device->block && (sense[27] & DASD_SENSE_BIT_0) && 3612 (sense[7] == 0x3F) && 3613 (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) && 3614 test_bit(DASD_FLAG_IS_RESERVED, &device->flags)) { 3615 if (device->features & DASD_FEATURE_FAILONSLCK) 3616 set_bit(DASD_FLAG_LOCK_STOLEN, &device->flags); 3617 clear_bit(DASD_FLAG_IS_RESERVED, &device->flags); 3618 dev_err(&device->cdev->dev, 3619 "The device reservation was lost\n"); 3620 } 3621 } 3622 3623 static int dasd_eckd_ras_sanity_checks(struct dasd_device *device, 3624 unsigned int first_trk, 3625 unsigned int last_trk) 3626 { 3627 struct dasd_eckd_private *private = device->private; 3628 unsigned int trks_per_vol; 3629 int rc = 0; 3630 3631 trks_per_vol = private->real_cyl * private->rdc_data.trk_per_cyl; 3632 3633 if (first_trk >= trks_per_vol) { 3634 dev_warn(&device->cdev->dev, 3635 "Start track number %u used in the space release command is too big\n", 3636 first_trk); 3637 rc = -EINVAL; 3638 } else if (last_trk >= trks_per_vol) { 3639 dev_warn(&device->cdev->dev, 3640 "Stop track number %u used in the space release command is too big\n", 3641 last_trk); 3642 rc = -EINVAL; 3643 } else if (first_trk > last_trk) { 3644 dev_warn(&device->cdev->dev, 3645 "Start track %u used in the space release command exceeds the end track\n", 3646 first_trk); 3647 rc = -EINVAL; 3648 } 3649 return rc; 3650 } 3651 3652 /* 3653 * Helper function to count the amount of involved extents within a given range 3654 * with extent alignment in mind. 3655 */ 3656 static int count_exts(unsigned int from, unsigned int to, int trks_per_ext) 3657 { 3658 int cur_pos = 0; 3659 int count = 0; 3660 int tmp; 3661 3662 if (from == to) 3663 return 1; 3664 3665 /* Count first partial extent */ 3666 if (from % trks_per_ext != 0) { 3667 tmp = from + trks_per_ext - (from % trks_per_ext) - 1; 3668 if (tmp > to) 3669 tmp = to; 3670 cur_pos = tmp - from + 1; 3671 count++; 3672 } 3673 /* Count full extents */ 3674 if (to - (from + cur_pos) + 1 >= trks_per_ext) { 3675 tmp = to - ((to - trks_per_ext + 1) % trks_per_ext); 3676 count += (tmp - (from + cur_pos) + 1) / trks_per_ext; 3677 cur_pos = tmp; 3678 } 3679 /* Count last partial extent */ 3680 if (cur_pos < to) 3681 count++; 3682 3683 return count; 3684 } 3685 3686 /* 3687 * Release allocated space for a given range or an entire volume. 3688 */ 3689 static struct dasd_ccw_req * 3690 dasd_eckd_dso_ras(struct dasd_device *device, struct dasd_block *block, 3691 struct request *req, unsigned int first_trk, 3692 unsigned int last_trk, int by_extent) 3693 { 3694 struct dasd_eckd_private *private = device->private; 3695 struct dasd_dso_ras_ext_range *ras_range; 3696 struct dasd_rssd_features *features; 3697 struct dasd_dso_ras_data *ras_data; 3698 u16 heads, beg_head, end_head; 3699 int cur_to_trk, cur_from_trk; 3700 struct dasd_ccw_req *cqr; 3701 u32 beg_cyl, end_cyl; 3702 struct ccw1 *ccw; 3703 int trks_per_ext; 3704 size_t ras_size; 3705 size_t size; 3706 int nr_exts; 3707 void *rq; 3708 int i; 3709 3710 if (dasd_eckd_ras_sanity_checks(device, first_trk, last_trk)) 3711 return ERR_PTR(-EINVAL); 3712 3713 rq = req ? blk_mq_rq_to_pdu(req) : NULL; 3714 3715 features = &private->features; 3716 3717 trks_per_ext = dasd_eckd_ext_size(device) * private->rdc_data.trk_per_cyl; 3718 nr_exts = 0; 3719 if (by_extent) 3720 nr_exts = count_exts(first_trk, last_trk, trks_per_ext); 3721 ras_size = sizeof(*ras_data); 3722 size = ras_size + (nr_exts * sizeof(*ras_range)); 3723 3724 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, size, device, rq); 3725 if (IS_ERR(cqr)) { 3726 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 3727 "Could not allocate RAS request"); 3728 return cqr; 3729 } 3730 3731 ras_data = cqr->data; 3732 memset(ras_data, 0, size); 3733 3734 ras_data->order = DSO_ORDER_RAS; 3735 ras_data->flags.vol_type = 0; /* CKD volume */ 3736 /* Release specified extents or entire volume */ 3737 ras_data->op_flags.by_extent = by_extent; 3738 /* 3739 * This bit guarantees initialisation of tracks within an extent that is 3740 * not fully specified, but is only supported with a certain feature 3741 * subset. 3742 */ 3743 ras_data->op_flags.guarantee_init = !!(features->feature[56] & 0x01); 3744 ras_data->lss = private->ned->ID; 3745 ras_data->dev_addr = private->ned->unit_addr; 3746 ras_data->nr_exts = nr_exts; 3747 3748 if (by_extent) { 3749 heads = private->rdc_data.trk_per_cyl; 3750 cur_from_trk = first_trk; 3751 cur_to_trk = first_trk + trks_per_ext - 3752 (first_trk % trks_per_ext) - 1; 3753 if (cur_to_trk > last_trk) 3754 cur_to_trk = last_trk; 3755 ras_range = (struct dasd_dso_ras_ext_range *)(cqr->data + ras_size); 3756 3757 for (i = 0; i < nr_exts; i++) { 3758 beg_cyl = cur_from_trk / heads; 3759 beg_head = cur_from_trk % heads; 3760 end_cyl = cur_to_trk / heads; 3761 end_head = cur_to_trk % heads; 3762 3763 set_ch_t(&ras_range->beg_ext, beg_cyl, beg_head); 3764 set_ch_t(&ras_range->end_ext, end_cyl, end_head); 3765 3766 cur_from_trk = cur_to_trk + 1; 3767 cur_to_trk = cur_from_trk + trks_per_ext - 1; 3768 if (cur_to_trk > last_trk) 3769 cur_to_trk = last_trk; 3770 ras_range++; 3771 } 3772 } 3773 3774 ccw = cqr->cpaddr; 3775 ccw->cda = (__u32)(addr_t)cqr->data; 3776 ccw->cmd_code = DASD_ECKD_CCW_DSO; 3777 ccw->count = size; 3778 3779 cqr->startdev = device; 3780 cqr->memdev = device; 3781 cqr->block = block; 3782 cqr->retries = 256; 3783 cqr->expires = device->default_expires * HZ; 3784 cqr->buildclk = get_tod_clock(); 3785 cqr->status = DASD_CQR_FILLED; 3786 3787 return cqr; 3788 } 3789 3790 static int dasd_eckd_release_space_full(struct dasd_device *device) 3791 { 3792 struct dasd_ccw_req *cqr; 3793 int rc; 3794 3795 cqr = dasd_eckd_dso_ras(device, NULL, NULL, 0, 0, 0); 3796 if (IS_ERR(cqr)) 3797 return PTR_ERR(cqr); 3798 3799 rc = dasd_sleep_on_interruptible(cqr); 3800 3801 dasd_sfree_request(cqr, cqr->memdev); 3802 3803 return rc; 3804 } 3805 3806 static int dasd_eckd_release_space_trks(struct dasd_device *device, 3807 unsigned int from, unsigned int to) 3808 { 3809 struct dasd_eckd_private *private = device->private; 3810 struct dasd_block *block = device->block; 3811 struct dasd_ccw_req *cqr, *n; 3812 struct list_head ras_queue; 3813 unsigned int device_exts; 3814 int trks_per_ext; 3815 int stop, step; 3816 int cur_pos; 3817 int rc = 0; 3818 int retry; 3819 3820 INIT_LIST_HEAD(&ras_queue); 3821 3822 device_exts = private->real_cyl / dasd_eckd_ext_size(device); 3823 trks_per_ext = dasd_eckd_ext_size(device) * private->rdc_data.trk_per_cyl; 3824 3825 /* Make sure device limits are not exceeded */ 3826 step = trks_per_ext * min(device_exts, DASD_ECKD_RAS_EXTS_MAX); 3827 cur_pos = from; 3828 3829 do { 3830 retry = 0; 3831 while (cur_pos < to) { 3832 stop = cur_pos + step - 3833 ((cur_pos + step) % trks_per_ext) - 1; 3834 if (stop > to) 3835 stop = to; 3836 3837 cqr = dasd_eckd_dso_ras(device, NULL, NULL, cur_pos, stop, 1); 3838 if (IS_ERR(cqr)) { 3839 rc = PTR_ERR(cqr); 3840 if (rc == -ENOMEM) { 3841 if (list_empty(&ras_queue)) 3842 goto out; 3843 retry = 1; 3844 break; 3845 } 3846 goto err_out; 3847 } 3848 3849 spin_lock_irq(&block->queue_lock); 3850 list_add_tail(&cqr->blocklist, &ras_queue); 3851 spin_unlock_irq(&block->queue_lock); 3852 cur_pos = stop + 1; 3853 } 3854 3855 rc = dasd_sleep_on_queue_interruptible(&ras_queue); 3856 3857 err_out: 3858 list_for_each_entry_safe(cqr, n, &ras_queue, blocklist) { 3859 device = cqr->startdev; 3860 private = device->private; 3861 3862 spin_lock_irq(&block->queue_lock); 3863 list_del_init(&cqr->blocklist); 3864 spin_unlock_irq(&block->queue_lock); 3865 dasd_sfree_request(cqr, device); 3866 private->count--; 3867 } 3868 } while (retry); 3869 3870 out: 3871 return rc; 3872 } 3873 3874 static int dasd_eckd_release_space(struct dasd_device *device, 3875 struct format_data_t *rdata) 3876 { 3877 if (rdata->intensity & DASD_FMT_INT_ESE_FULL) 3878 return dasd_eckd_release_space_full(device); 3879 else if (rdata->intensity == 0) 3880 return dasd_eckd_release_space_trks(device, rdata->start_unit, 3881 rdata->stop_unit); 3882 else 3883 return -EINVAL; 3884 } 3885 3886 static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single( 3887 struct dasd_device *startdev, 3888 struct dasd_block *block, 3889 struct request *req, 3890 sector_t first_rec, 3891 sector_t last_rec, 3892 sector_t first_trk, 3893 sector_t last_trk, 3894 unsigned int first_offs, 3895 unsigned int last_offs, 3896 unsigned int blk_per_trk, 3897 unsigned int blksize) 3898 { 3899 struct dasd_eckd_private *private; 3900 unsigned long *idaws; 3901 struct LO_eckd_data *LO_data; 3902 struct dasd_ccw_req *cqr; 3903 struct ccw1 *ccw; 3904 struct req_iterator iter; 3905 struct bio_vec bv; 3906 char *dst; 3907 unsigned int off; 3908 int count, cidaw, cplength, datasize; 3909 sector_t recid; 3910 unsigned char cmd, rcmd; 3911 int use_prefix; 3912 struct dasd_device *basedev; 3913 3914 basedev = block->base; 3915 private = basedev->private; 3916 if (rq_data_dir(req) == READ) 3917 cmd = DASD_ECKD_CCW_READ_MT; 3918 else if (rq_data_dir(req) == WRITE) 3919 cmd = DASD_ECKD_CCW_WRITE_MT; 3920 else 3921 return ERR_PTR(-EINVAL); 3922 3923 /* Check struct bio and count the number of blocks for the request. */ 3924 count = 0; 3925 cidaw = 0; 3926 rq_for_each_segment(bv, req, iter) { 3927 if (bv.bv_len & (blksize - 1)) 3928 /* Eckd can only do full blocks. */ 3929 return ERR_PTR(-EINVAL); 3930 count += bv.bv_len >> (block->s2b_shift + 9); 3931 if (idal_is_needed (page_address(bv.bv_page), bv.bv_len)) 3932 cidaw += bv.bv_len >> (block->s2b_shift + 9); 3933 } 3934 /* Paranoia. */ 3935 if (count != last_rec - first_rec + 1) 3936 return ERR_PTR(-EINVAL); 3937 3938 /* use the prefix command if available */ 3939 use_prefix = private->features.feature[8] & 0x01; 3940 if (use_prefix) { 3941 /* 1x prefix + number of blocks */ 3942 cplength = 2 + count; 3943 /* 1x prefix + cidaws*sizeof(long) */ 3944 datasize = sizeof(struct PFX_eckd_data) + 3945 sizeof(struct LO_eckd_data) + 3946 cidaw * sizeof(unsigned long); 3947 } else { 3948 /* 1x define extent + 1x locate record + number of blocks */ 3949 cplength = 2 + count; 3950 /* 1x define extent + 1x locate record + cidaws*sizeof(long) */ 3951 datasize = sizeof(struct DE_eckd_data) + 3952 sizeof(struct LO_eckd_data) + 3953 cidaw * sizeof(unsigned long); 3954 } 3955 /* Find out the number of additional locate record ccws for cdl. */ 3956 if (private->uses_cdl && first_rec < 2*blk_per_trk) { 3957 if (last_rec >= 2*blk_per_trk) 3958 count = 2*blk_per_trk - first_rec; 3959 cplength += count; 3960 datasize += count*sizeof(struct LO_eckd_data); 3961 } 3962 /* Allocate the ccw request. */ 3963 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, 3964 startdev, blk_mq_rq_to_pdu(req)); 3965 if (IS_ERR(cqr)) 3966 return cqr; 3967 ccw = cqr->cpaddr; 3968 /* First ccw is define extent or prefix. */ 3969 if (use_prefix) { 3970 if (prefix(ccw++, cqr->data, first_trk, 3971 last_trk, cmd, basedev, startdev) == -EAGAIN) { 3972 /* Clock not in sync and XRC is enabled. 3973 * Try again later. 3974 */ 3975 dasd_sfree_request(cqr, startdev); 3976 return ERR_PTR(-EAGAIN); 3977 } 3978 idaws = (unsigned long *) (cqr->data + 3979 sizeof(struct PFX_eckd_data)); 3980 } else { 3981 if (define_extent(ccw++, cqr->data, first_trk, 3982 last_trk, cmd, basedev, 0) == -EAGAIN) { 3983 /* Clock not in sync and XRC is enabled. 3984 * Try again later. 3985 */ 3986 dasd_sfree_request(cqr, startdev); 3987 return ERR_PTR(-EAGAIN); 3988 } 3989 idaws = (unsigned long *) (cqr->data + 3990 sizeof(struct DE_eckd_data)); 3991 } 3992 /* Build locate_record+read/write/ccws. */ 3993 LO_data = (struct LO_eckd_data *) (idaws + cidaw); 3994 recid = first_rec; 3995 if (private->uses_cdl == 0 || recid > 2*blk_per_trk) { 3996 /* Only standard blocks so there is just one locate record. */ 3997 ccw[-1].flags |= CCW_FLAG_CC; 3998 locate_record(ccw++, LO_data++, first_trk, first_offs + 1, 3999 last_rec - recid + 1, cmd, basedev, blksize); 4000 } 4001 rq_for_each_segment(bv, req, iter) { 4002 dst = page_address(bv.bv_page) + bv.bv_offset; 4003 if (dasd_page_cache) { 4004 char *copy = kmem_cache_alloc(dasd_page_cache, 4005 GFP_DMA | __GFP_NOWARN); 4006 if (copy && rq_data_dir(req) == WRITE) 4007 memcpy(copy + bv.bv_offset, dst, bv.bv_len); 4008 if (copy) 4009 dst = copy + bv.bv_offset; 4010 } 4011 for (off = 0; off < bv.bv_len; off += blksize) { 4012 sector_t trkid = recid; 4013 unsigned int recoffs = sector_div(trkid, blk_per_trk); 4014 rcmd = cmd; 4015 count = blksize; 4016 /* Locate record for cdl special block ? */ 4017 if (private->uses_cdl && recid < 2*blk_per_trk) { 4018 if (dasd_eckd_cdl_special(blk_per_trk, recid)){ 4019 rcmd |= 0x8; 4020 count = dasd_eckd_cdl_reclen(recid); 4021 if (count < blksize && 4022 rq_data_dir(req) == READ) 4023 memset(dst + count, 0xe5, 4024 blksize - count); 4025 } 4026 ccw[-1].flags |= CCW_FLAG_CC; 4027 locate_record(ccw++, LO_data++, 4028 trkid, recoffs + 1, 4029 1, rcmd, basedev, count); 4030 } 4031 /* Locate record for standard blocks ? */ 4032 if (private->uses_cdl && recid == 2*blk_per_trk) { 4033 ccw[-1].flags |= CCW_FLAG_CC; 4034 locate_record(ccw++, LO_data++, 4035 trkid, recoffs + 1, 4036 last_rec - recid + 1, 4037 cmd, basedev, count); 4038 } 4039 /* Read/write ccw. */ 4040 ccw[-1].flags |= CCW_FLAG_CC; 4041 ccw->cmd_code = rcmd; 4042 ccw->count = count; 4043 if (idal_is_needed(dst, blksize)) { 4044 ccw->cda = (__u32)(addr_t) idaws; 4045 ccw->flags = CCW_FLAG_IDA; 4046 idaws = idal_create_words(idaws, dst, blksize); 4047 } else { 4048 ccw->cda = (__u32)(addr_t) dst; 4049 ccw->flags = 0; 4050 } 4051 ccw++; 4052 dst += blksize; 4053 recid++; 4054 } 4055 } 4056 if (blk_noretry_request(req) || 4057 block->base->features & DASD_FEATURE_FAILFAST) 4058 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 4059 cqr->startdev = startdev; 4060 cqr->memdev = startdev; 4061 cqr->block = block; 4062 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ 4063 cqr->lpm = dasd_path_get_ppm(startdev); 4064 cqr->retries = startdev->default_retries; 4065 cqr->buildclk = get_tod_clock(); 4066 cqr->status = DASD_CQR_FILLED; 4067 4068 /* Set flags to suppress output for expected errors */ 4069 if (dasd_eckd_is_ese(basedev)) { 4070 set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags); 4071 set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags); 4072 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); 4073 } 4074 4075 return cqr; 4076 } 4077 4078 static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track( 4079 struct dasd_device *startdev, 4080 struct dasd_block *block, 4081 struct request *req, 4082 sector_t first_rec, 4083 sector_t last_rec, 4084 sector_t first_trk, 4085 sector_t last_trk, 4086 unsigned int first_offs, 4087 unsigned int last_offs, 4088 unsigned int blk_per_trk, 4089 unsigned int blksize) 4090 { 4091 unsigned long *idaws; 4092 struct dasd_ccw_req *cqr; 4093 struct ccw1 *ccw; 4094 struct req_iterator iter; 4095 struct bio_vec bv; 4096 char *dst, *idaw_dst; 4097 unsigned int cidaw, cplength, datasize; 4098 unsigned int tlf; 4099 sector_t recid; 4100 unsigned char cmd; 4101 struct dasd_device *basedev; 4102 unsigned int trkcount, count, count_to_trk_end; 4103 unsigned int idaw_len, seg_len, part_len, len_to_track_end; 4104 unsigned char new_track, end_idaw; 4105 sector_t trkid; 4106 unsigned int recoffs; 4107 4108 basedev = block->base; 4109 if (rq_data_dir(req) == READ) 4110 cmd = DASD_ECKD_CCW_READ_TRACK_DATA; 4111 else if (rq_data_dir(req) == WRITE) 4112 cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA; 4113 else 4114 return ERR_PTR(-EINVAL); 4115 4116 /* Track based I/O needs IDAWs for each page, and not just for 4117 * 64 bit addresses. We need additional idals for pages 4118 * that get filled from two tracks, so we use the number 4119 * of records as upper limit. 4120 */ 4121 cidaw = last_rec - first_rec + 1; 4122 trkcount = last_trk - first_trk + 1; 4123 4124 /* 1x prefix + one read/write ccw per track */ 4125 cplength = 1 + trkcount; 4126 4127 datasize = sizeof(struct PFX_eckd_data) + cidaw * sizeof(unsigned long); 4128 4129 /* Allocate the ccw request. */ 4130 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, 4131 startdev, blk_mq_rq_to_pdu(req)); 4132 if (IS_ERR(cqr)) 4133 return cqr; 4134 ccw = cqr->cpaddr; 4135 /* transfer length factor: how many bytes to read from the last track */ 4136 if (first_trk == last_trk) 4137 tlf = last_offs - first_offs + 1; 4138 else 4139 tlf = last_offs + 1; 4140 tlf *= blksize; 4141 4142 if (prefix_LRE(ccw++, cqr->data, first_trk, 4143 last_trk, cmd, basedev, startdev, 4144 1 /* format */, first_offs + 1, 4145 trkcount, blksize, 4146 tlf) == -EAGAIN) { 4147 /* Clock not in sync and XRC is enabled. 4148 * Try again later. 4149 */ 4150 dasd_sfree_request(cqr, startdev); 4151 return ERR_PTR(-EAGAIN); 4152 } 4153 4154 /* 4155 * The translation of request into ccw programs must meet the 4156 * following conditions: 4157 * - all idaws but the first and the last must address full pages 4158 * (or 2K blocks on 31-bit) 4159 * - the scope of a ccw and it's idal ends with the track boundaries 4160 */ 4161 idaws = (unsigned long *) (cqr->data + sizeof(struct PFX_eckd_data)); 4162 recid = first_rec; 4163 new_track = 1; 4164 end_idaw = 0; 4165 len_to_track_end = 0; 4166 idaw_dst = NULL; 4167 idaw_len = 0; 4168 rq_for_each_segment(bv, req, iter) { 4169 dst = page_address(bv.bv_page) + bv.bv_offset; 4170 seg_len = bv.bv_len; 4171 while (seg_len) { 4172 if (new_track) { 4173 trkid = recid; 4174 recoffs = sector_div(trkid, blk_per_trk); 4175 count_to_trk_end = blk_per_trk - recoffs; 4176 count = min((last_rec - recid + 1), 4177 (sector_t)count_to_trk_end); 4178 len_to_track_end = count * blksize; 4179 ccw[-1].flags |= CCW_FLAG_CC; 4180 ccw->cmd_code = cmd; 4181 ccw->count = len_to_track_end; 4182 ccw->cda = (__u32)(addr_t)idaws; 4183 ccw->flags = CCW_FLAG_IDA; 4184 ccw++; 4185 recid += count; 4186 new_track = 0; 4187 /* first idaw for a ccw may start anywhere */ 4188 if (!idaw_dst) 4189 idaw_dst = dst; 4190 } 4191 /* If we start a new idaw, we must make sure that it 4192 * starts on an IDA_BLOCK_SIZE boundary. 4193 * If we continue an idaw, we must make sure that the 4194 * current segment begins where the so far accumulated 4195 * idaw ends 4196 */ 4197 if (!idaw_dst) { 4198 if (__pa(dst) & (IDA_BLOCK_SIZE-1)) { 4199 dasd_sfree_request(cqr, startdev); 4200 return ERR_PTR(-ERANGE); 4201 } else 4202 idaw_dst = dst; 4203 } 4204 if ((idaw_dst + idaw_len) != dst) { 4205 dasd_sfree_request(cqr, startdev); 4206 return ERR_PTR(-ERANGE); 4207 } 4208 part_len = min(seg_len, len_to_track_end); 4209 seg_len -= part_len; 4210 dst += part_len; 4211 idaw_len += part_len; 4212 len_to_track_end -= part_len; 4213 /* collected memory area ends on an IDA_BLOCK border, 4214 * -> create an idaw 4215 * idal_create_words will handle cases where idaw_len 4216 * is larger then IDA_BLOCK_SIZE 4217 */ 4218 if (!(__pa(idaw_dst + idaw_len) & (IDA_BLOCK_SIZE-1))) 4219 end_idaw = 1; 4220 /* We also need to end the idaw at track end */ 4221 if (!len_to_track_end) { 4222 new_track = 1; 4223 end_idaw = 1; 4224 } 4225 if (end_idaw) { 4226 idaws = idal_create_words(idaws, idaw_dst, 4227 idaw_len); 4228 idaw_dst = NULL; 4229 idaw_len = 0; 4230 end_idaw = 0; 4231 } 4232 } 4233 } 4234 4235 if (blk_noretry_request(req) || 4236 block->base->features & DASD_FEATURE_FAILFAST) 4237 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 4238 cqr->startdev = startdev; 4239 cqr->memdev = startdev; 4240 cqr->block = block; 4241 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ 4242 cqr->lpm = dasd_path_get_ppm(startdev); 4243 cqr->retries = startdev->default_retries; 4244 cqr->buildclk = get_tod_clock(); 4245 cqr->status = DASD_CQR_FILLED; 4246 4247 /* Set flags to suppress output for expected errors */ 4248 if (dasd_eckd_is_ese(basedev)) 4249 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); 4250 4251 return cqr; 4252 } 4253 4254 static int prepare_itcw(struct itcw *itcw, 4255 unsigned int trk, unsigned int totrk, int cmd, 4256 struct dasd_device *basedev, 4257 struct dasd_device *startdev, 4258 unsigned int rec_on_trk, int count, 4259 unsigned int blksize, 4260 unsigned int total_data_size, 4261 unsigned int tlf, 4262 unsigned int blk_per_trk) 4263 { 4264 struct PFX_eckd_data pfxdata; 4265 struct dasd_eckd_private *basepriv, *startpriv; 4266 struct DE_eckd_data *dedata; 4267 struct LRE_eckd_data *lredata; 4268 struct dcw *dcw; 4269 4270 u32 begcyl, endcyl; 4271 u16 heads, beghead, endhead; 4272 u8 pfx_cmd; 4273 4274 int rc = 0; 4275 int sector = 0; 4276 int dn, d; 4277 4278 4279 /* setup prefix data */ 4280 basepriv = basedev->private; 4281 startpriv = startdev->private; 4282 dedata = &pfxdata.define_extent; 4283 lredata = &pfxdata.locate_record; 4284 4285 memset(&pfxdata, 0, sizeof(pfxdata)); 4286 pfxdata.format = 1; /* PFX with LRE */ 4287 pfxdata.base_address = basepriv->ned->unit_addr; 4288 pfxdata.base_lss = basepriv->ned->ID; 4289 pfxdata.validity.define_extent = 1; 4290 4291 /* private uid is kept up to date, conf_data may be outdated */ 4292 if (startpriv->uid.type == UA_BASE_PAV_ALIAS) 4293 pfxdata.validity.verify_base = 1; 4294 4295 if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) { 4296 pfxdata.validity.verify_base = 1; 4297 pfxdata.validity.hyper_pav = 1; 4298 } 4299 4300 switch (cmd) { 4301 case DASD_ECKD_CCW_READ_TRACK_DATA: 4302 dedata->mask.perm = 0x1; 4303 dedata->attributes.operation = basepriv->attrib.operation; 4304 dedata->blk_size = blksize; 4305 dedata->ga_extended |= 0x42; 4306 lredata->operation.orientation = 0x0; 4307 lredata->operation.operation = 0x0C; 4308 lredata->auxiliary.check_bytes = 0x01; 4309 pfx_cmd = DASD_ECKD_CCW_PFX_READ; 4310 break; 4311 case DASD_ECKD_CCW_WRITE_TRACK_DATA: 4312 dedata->mask.perm = 0x02; 4313 dedata->attributes.operation = basepriv->attrib.operation; 4314 dedata->blk_size = blksize; 4315 rc = set_timestamp(NULL, dedata, basedev); 4316 dedata->ga_extended |= 0x42; 4317 lredata->operation.orientation = 0x0; 4318 lredata->operation.operation = 0x3F; 4319 lredata->extended_operation = 0x23; 4320 lredata->auxiliary.check_bytes = 0x2; 4321 /* 4322 * If XRC is supported the System Time Stamp is set. The 4323 * validity of the time stamp must be reflected in the prefix 4324 * data as well. 4325 */ 4326 if (dedata->ga_extended & 0x08 && dedata->ga_extended & 0x02) 4327 pfxdata.validity.time_stamp = 1; /* 'Time Stamp Valid' */ 4328 pfx_cmd = DASD_ECKD_CCW_PFX; 4329 break; 4330 case DASD_ECKD_CCW_READ_COUNT_MT: 4331 dedata->mask.perm = 0x1; 4332 dedata->attributes.operation = DASD_BYPASS_CACHE; 4333 dedata->ga_extended |= 0x42; 4334 dedata->blk_size = blksize; 4335 lredata->operation.orientation = 0x2; 4336 lredata->operation.operation = 0x16; 4337 lredata->auxiliary.check_bytes = 0x01; 4338 pfx_cmd = DASD_ECKD_CCW_PFX_READ; 4339 break; 4340 default: 4341 DBF_DEV_EVENT(DBF_ERR, basedev, 4342 "prepare itcw, unknown opcode 0x%x", cmd); 4343 BUG(); 4344 break; 4345 } 4346 if (rc) 4347 return rc; 4348 4349 dedata->attributes.mode = 0x3; /* ECKD */ 4350 4351 heads = basepriv->rdc_data.trk_per_cyl; 4352 begcyl = trk / heads; 4353 beghead = trk % heads; 4354 endcyl = totrk / heads; 4355 endhead = totrk % heads; 4356 4357 /* check for sequential prestage - enhance cylinder range */ 4358 if (dedata->attributes.operation == DASD_SEQ_PRESTAGE || 4359 dedata->attributes.operation == DASD_SEQ_ACCESS) { 4360 4361 if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl) 4362 endcyl += basepriv->attrib.nr_cyl; 4363 else 4364 endcyl = (basepriv->real_cyl - 1); 4365 } 4366 4367 set_ch_t(&dedata->beg_ext, begcyl, beghead); 4368 set_ch_t(&dedata->end_ext, endcyl, endhead); 4369 4370 dedata->ep_format = 0x20; /* records per track is valid */ 4371 dedata->ep_rec_per_track = blk_per_trk; 4372 4373 if (rec_on_trk) { 4374 switch (basepriv->rdc_data.dev_type) { 4375 case 0x3390: 4376 dn = ceil_quot(blksize + 6, 232); 4377 d = 9 + ceil_quot(blksize + 6 * (dn + 1), 34); 4378 sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8; 4379 break; 4380 case 0x3380: 4381 d = 7 + ceil_quot(blksize + 12, 32); 4382 sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7; 4383 break; 4384 } 4385 } 4386 4387 if (cmd == DASD_ECKD_CCW_READ_COUNT_MT) { 4388 lredata->auxiliary.length_valid = 0; 4389 lredata->auxiliary.length_scope = 0; 4390 lredata->sector = 0xff; 4391 } else { 4392 lredata->auxiliary.length_valid = 1; 4393 lredata->auxiliary.length_scope = 1; 4394 lredata->sector = sector; 4395 } 4396 lredata->auxiliary.imbedded_ccw_valid = 1; 4397 lredata->length = tlf; 4398 lredata->imbedded_ccw = cmd; 4399 lredata->count = count; 4400 set_ch_t(&lredata->seek_addr, begcyl, beghead); 4401 lredata->search_arg.cyl = lredata->seek_addr.cyl; 4402 lredata->search_arg.head = lredata->seek_addr.head; 4403 lredata->search_arg.record = rec_on_trk; 4404 4405 dcw = itcw_add_dcw(itcw, pfx_cmd, 0, 4406 &pfxdata, sizeof(pfxdata), total_data_size); 4407 return PTR_ERR_OR_ZERO(dcw); 4408 } 4409 4410 static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track( 4411 struct dasd_device *startdev, 4412 struct dasd_block *block, 4413 struct request *req, 4414 sector_t first_rec, 4415 sector_t last_rec, 4416 sector_t first_trk, 4417 sector_t last_trk, 4418 unsigned int first_offs, 4419 unsigned int last_offs, 4420 unsigned int blk_per_trk, 4421 unsigned int blksize) 4422 { 4423 struct dasd_ccw_req *cqr; 4424 struct req_iterator iter; 4425 struct bio_vec bv; 4426 char *dst; 4427 unsigned int trkcount, ctidaw; 4428 unsigned char cmd; 4429 struct dasd_device *basedev; 4430 unsigned int tlf; 4431 struct itcw *itcw; 4432 struct tidaw *last_tidaw = NULL; 4433 int itcw_op; 4434 size_t itcw_size; 4435 u8 tidaw_flags; 4436 unsigned int seg_len, part_len, len_to_track_end; 4437 unsigned char new_track; 4438 sector_t recid, trkid; 4439 unsigned int offs; 4440 unsigned int count, count_to_trk_end; 4441 int ret; 4442 4443 basedev = block->base; 4444 if (rq_data_dir(req) == READ) { 4445 cmd = DASD_ECKD_CCW_READ_TRACK_DATA; 4446 itcw_op = ITCW_OP_READ; 4447 } else if (rq_data_dir(req) == WRITE) { 4448 cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA; 4449 itcw_op = ITCW_OP_WRITE; 4450 } else 4451 return ERR_PTR(-EINVAL); 4452 4453 /* trackbased I/O needs address all memory via TIDAWs, 4454 * not just for 64 bit addresses. This allows us to map 4455 * each segment directly to one tidaw. 4456 * In the case of write requests, additional tidaws may 4457 * be needed when a segment crosses a track boundary. 4458 */ 4459 trkcount = last_trk - first_trk + 1; 4460 ctidaw = 0; 4461 rq_for_each_segment(bv, req, iter) { 4462 ++ctidaw; 4463 } 4464 if (rq_data_dir(req) == WRITE) 4465 ctidaw += (last_trk - first_trk); 4466 4467 /* Allocate the ccw request. */ 4468 itcw_size = itcw_calc_size(0, ctidaw, 0); 4469 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev, 4470 blk_mq_rq_to_pdu(req)); 4471 if (IS_ERR(cqr)) 4472 return cqr; 4473 4474 /* transfer length factor: how many bytes to read from the last track */ 4475 if (first_trk == last_trk) 4476 tlf = last_offs - first_offs + 1; 4477 else 4478 tlf = last_offs + 1; 4479 tlf *= blksize; 4480 4481 itcw = itcw_init(cqr->data, itcw_size, itcw_op, 0, ctidaw, 0); 4482 if (IS_ERR(itcw)) { 4483 ret = -EINVAL; 4484 goto out_error; 4485 } 4486 cqr->cpaddr = itcw_get_tcw(itcw); 4487 if (prepare_itcw(itcw, first_trk, last_trk, 4488 cmd, basedev, startdev, 4489 first_offs + 1, 4490 trkcount, blksize, 4491 (last_rec - first_rec + 1) * blksize, 4492 tlf, blk_per_trk) == -EAGAIN) { 4493 /* Clock not in sync and XRC is enabled. 4494 * Try again later. 4495 */ 4496 ret = -EAGAIN; 4497 goto out_error; 4498 } 4499 len_to_track_end = 0; 4500 /* 4501 * A tidaw can address 4k of memory, but must not cross page boundaries 4502 * We can let the block layer handle this by setting 4503 * blk_queue_segment_boundary to page boundaries and 4504 * blk_max_segment_size to page size when setting up the request queue. 4505 * For write requests, a TIDAW must not cross track boundaries, because 4506 * we have to set the CBC flag on the last tidaw for each track. 4507 */ 4508 if (rq_data_dir(req) == WRITE) { 4509 new_track = 1; 4510 recid = first_rec; 4511 rq_for_each_segment(bv, req, iter) { 4512 dst = page_address(bv.bv_page) + bv.bv_offset; 4513 seg_len = bv.bv_len; 4514 while (seg_len) { 4515 if (new_track) { 4516 trkid = recid; 4517 offs = sector_div(trkid, blk_per_trk); 4518 count_to_trk_end = blk_per_trk - offs; 4519 count = min((last_rec - recid + 1), 4520 (sector_t)count_to_trk_end); 4521 len_to_track_end = count * blksize; 4522 recid += count; 4523 new_track = 0; 4524 } 4525 part_len = min(seg_len, len_to_track_end); 4526 seg_len -= part_len; 4527 len_to_track_end -= part_len; 4528 /* We need to end the tidaw at track end */ 4529 if (!len_to_track_end) { 4530 new_track = 1; 4531 tidaw_flags = TIDAW_FLAGS_INSERT_CBC; 4532 } else 4533 tidaw_flags = 0; 4534 last_tidaw = itcw_add_tidaw(itcw, tidaw_flags, 4535 dst, part_len); 4536 if (IS_ERR(last_tidaw)) { 4537 ret = -EINVAL; 4538 goto out_error; 4539 } 4540 dst += part_len; 4541 } 4542 } 4543 } else { 4544 rq_for_each_segment(bv, req, iter) { 4545 dst = page_address(bv.bv_page) + bv.bv_offset; 4546 last_tidaw = itcw_add_tidaw(itcw, 0x00, 4547 dst, bv.bv_len); 4548 if (IS_ERR(last_tidaw)) { 4549 ret = -EINVAL; 4550 goto out_error; 4551 } 4552 } 4553 } 4554 last_tidaw->flags |= TIDAW_FLAGS_LAST; 4555 last_tidaw->flags &= ~TIDAW_FLAGS_INSERT_CBC; 4556 itcw_finalize(itcw); 4557 4558 if (blk_noretry_request(req) || 4559 block->base->features & DASD_FEATURE_FAILFAST) 4560 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 4561 cqr->cpmode = 1; 4562 cqr->startdev = startdev; 4563 cqr->memdev = startdev; 4564 cqr->block = block; 4565 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ 4566 cqr->lpm = dasd_path_get_ppm(startdev); 4567 cqr->retries = startdev->default_retries; 4568 cqr->buildclk = get_tod_clock(); 4569 cqr->status = DASD_CQR_FILLED; 4570 4571 /* Set flags to suppress output for expected errors */ 4572 if (dasd_eckd_is_ese(basedev)) { 4573 set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags); 4574 set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags); 4575 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); 4576 } 4577 4578 return cqr; 4579 out_error: 4580 dasd_sfree_request(cqr, startdev); 4581 return ERR_PTR(ret); 4582 } 4583 4584 static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev, 4585 struct dasd_block *block, 4586 struct request *req) 4587 { 4588 int cmdrtd, cmdwtd; 4589 int use_prefix; 4590 int fcx_multitrack; 4591 struct dasd_eckd_private *private; 4592 struct dasd_device *basedev; 4593 sector_t first_rec, last_rec; 4594 sector_t first_trk, last_trk; 4595 unsigned int first_offs, last_offs; 4596 unsigned int blk_per_trk, blksize; 4597 int cdlspecial; 4598 unsigned int data_size; 4599 struct dasd_ccw_req *cqr; 4600 4601 basedev = block->base; 4602 private = basedev->private; 4603 4604 /* Calculate number of blocks/records per track. */ 4605 blksize = block->bp_block; 4606 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize); 4607 if (blk_per_trk == 0) 4608 return ERR_PTR(-EINVAL); 4609 /* Calculate record id of first and last block. */ 4610 first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift; 4611 first_offs = sector_div(first_trk, blk_per_trk); 4612 last_rec = last_trk = 4613 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; 4614 last_offs = sector_div(last_trk, blk_per_trk); 4615 cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk); 4616 4617 fcx_multitrack = private->features.feature[40] & 0x20; 4618 data_size = blk_rq_bytes(req); 4619 if (data_size % blksize) 4620 return ERR_PTR(-EINVAL); 4621 /* tpm write request add CBC data on each track boundary */ 4622 if (rq_data_dir(req) == WRITE) 4623 data_size += (last_trk - first_trk) * 4; 4624 4625 /* is read track data and write track data in command mode supported? */ 4626 cmdrtd = private->features.feature[9] & 0x20; 4627 cmdwtd = private->features.feature[12] & 0x40; 4628 use_prefix = private->features.feature[8] & 0x01; 4629 4630 cqr = NULL; 4631 if (cdlspecial || dasd_page_cache) { 4632 /* do nothing, just fall through to the cmd mode single case */ 4633 } else if ((data_size <= private->fcx_max_data) 4634 && (fcx_multitrack || (first_trk == last_trk))) { 4635 cqr = dasd_eckd_build_cp_tpm_track(startdev, block, req, 4636 first_rec, last_rec, 4637 first_trk, last_trk, 4638 first_offs, last_offs, 4639 blk_per_trk, blksize); 4640 if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) && 4641 (PTR_ERR(cqr) != -ENOMEM)) 4642 cqr = NULL; 4643 } else if (use_prefix && 4644 (((rq_data_dir(req) == READ) && cmdrtd) || 4645 ((rq_data_dir(req) == WRITE) && cmdwtd))) { 4646 cqr = dasd_eckd_build_cp_cmd_track(startdev, block, req, 4647 first_rec, last_rec, 4648 first_trk, last_trk, 4649 first_offs, last_offs, 4650 blk_per_trk, blksize); 4651 if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) && 4652 (PTR_ERR(cqr) != -ENOMEM)) 4653 cqr = NULL; 4654 } 4655 if (!cqr) 4656 cqr = dasd_eckd_build_cp_cmd_single(startdev, block, req, 4657 first_rec, last_rec, 4658 first_trk, last_trk, 4659 first_offs, last_offs, 4660 blk_per_trk, blksize); 4661 return cqr; 4662 } 4663 4664 static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev, 4665 struct dasd_block *block, 4666 struct request *req) 4667 { 4668 sector_t start_padding_sectors, end_sector_offset, end_padding_sectors; 4669 unsigned int seg_len, len_to_track_end; 4670 unsigned int cidaw, cplength, datasize; 4671 sector_t first_trk, last_trk, sectors; 4672 struct dasd_eckd_private *base_priv; 4673 struct dasd_device *basedev; 4674 struct req_iterator iter; 4675 struct dasd_ccw_req *cqr; 4676 unsigned int first_offs; 4677 unsigned int trkcount; 4678 unsigned long *idaws; 4679 unsigned int size; 4680 unsigned char cmd; 4681 struct bio_vec bv; 4682 struct ccw1 *ccw; 4683 int use_prefix; 4684 void *data; 4685 char *dst; 4686 4687 /* 4688 * raw track access needs to be mutiple of 64k and on 64k boundary 4689 * For read requests we can fix an incorrect alignment by padding 4690 * the request with dummy pages. 4691 */ 4692 start_padding_sectors = blk_rq_pos(req) % DASD_RAW_SECTORS_PER_TRACK; 4693 end_sector_offset = (blk_rq_pos(req) + blk_rq_sectors(req)) % 4694 DASD_RAW_SECTORS_PER_TRACK; 4695 end_padding_sectors = (DASD_RAW_SECTORS_PER_TRACK - end_sector_offset) % 4696 DASD_RAW_SECTORS_PER_TRACK; 4697 basedev = block->base; 4698 if ((start_padding_sectors || end_padding_sectors) && 4699 (rq_data_dir(req) == WRITE)) { 4700 DBF_DEV_EVENT(DBF_ERR, basedev, 4701 "raw write not track aligned (%llu,%llu) req %p", 4702 start_padding_sectors, end_padding_sectors, req); 4703 return ERR_PTR(-EINVAL); 4704 } 4705 4706 first_trk = blk_rq_pos(req) / DASD_RAW_SECTORS_PER_TRACK; 4707 last_trk = (blk_rq_pos(req) + blk_rq_sectors(req) - 1) / 4708 DASD_RAW_SECTORS_PER_TRACK; 4709 trkcount = last_trk - first_trk + 1; 4710 first_offs = 0; 4711 4712 if (rq_data_dir(req) == READ) 4713 cmd = DASD_ECKD_CCW_READ_TRACK; 4714 else if (rq_data_dir(req) == WRITE) 4715 cmd = DASD_ECKD_CCW_WRITE_FULL_TRACK; 4716 else 4717 return ERR_PTR(-EINVAL); 4718 4719 /* 4720 * Raw track based I/O needs IDAWs for each page, 4721 * and not just for 64 bit addresses. 4722 */ 4723 cidaw = trkcount * DASD_RAW_BLOCK_PER_TRACK; 4724 4725 /* 4726 * struct PFX_eckd_data and struct LRE_eckd_data can have up to 2 bytes 4727 * of extended parameter. This is needed for write full track. 4728 */ 4729 base_priv = basedev->private; 4730 use_prefix = base_priv->features.feature[8] & 0x01; 4731 if (use_prefix) { 4732 cplength = 1 + trkcount; 4733 size = sizeof(struct PFX_eckd_data) + 2; 4734 } else { 4735 cplength = 2 + trkcount; 4736 size = sizeof(struct DE_eckd_data) + 4737 sizeof(struct LRE_eckd_data) + 2; 4738 } 4739 size = ALIGN(size, 8); 4740 4741 datasize = size + cidaw * sizeof(unsigned long); 4742 4743 /* Allocate the ccw request. */ 4744 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, 4745 datasize, startdev, blk_mq_rq_to_pdu(req)); 4746 if (IS_ERR(cqr)) 4747 return cqr; 4748 4749 ccw = cqr->cpaddr; 4750 data = cqr->data; 4751 4752 if (use_prefix) { 4753 prefix_LRE(ccw++, data, first_trk, last_trk, cmd, basedev, 4754 startdev, 1, first_offs + 1, trkcount, 0, 0); 4755 } else { 4756 define_extent(ccw++, data, first_trk, last_trk, cmd, basedev, 0); 4757 ccw[-1].flags |= CCW_FLAG_CC; 4758 4759 data += sizeof(struct DE_eckd_data); 4760 locate_record_ext(ccw++, data, first_trk, first_offs + 1, 4761 trkcount, cmd, basedev, 0, 0); 4762 } 4763 4764 idaws = (unsigned long *)(cqr->data + size); 4765 len_to_track_end = 0; 4766 if (start_padding_sectors) { 4767 ccw[-1].flags |= CCW_FLAG_CC; 4768 ccw->cmd_code = cmd; 4769 /* maximum 3390 track size */ 4770 ccw->count = 57326; 4771 /* 64k map to one track */ 4772 len_to_track_end = 65536 - start_padding_sectors * 512; 4773 ccw->cda = (__u32)(addr_t)idaws; 4774 ccw->flags |= CCW_FLAG_IDA; 4775 ccw->flags |= CCW_FLAG_SLI; 4776 ccw++; 4777 for (sectors = 0; sectors < start_padding_sectors; sectors += 8) 4778 idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE); 4779 } 4780 rq_for_each_segment(bv, req, iter) { 4781 dst = page_address(bv.bv_page) + bv.bv_offset; 4782 seg_len = bv.bv_len; 4783 if (cmd == DASD_ECKD_CCW_READ_TRACK) 4784 memset(dst, 0, seg_len); 4785 if (!len_to_track_end) { 4786 ccw[-1].flags |= CCW_FLAG_CC; 4787 ccw->cmd_code = cmd; 4788 /* maximum 3390 track size */ 4789 ccw->count = 57326; 4790 /* 64k map to one track */ 4791 len_to_track_end = 65536; 4792 ccw->cda = (__u32)(addr_t)idaws; 4793 ccw->flags |= CCW_FLAG_IDA; 4794 ccw->flags |= CCW_FLAG_SLI; 4795 ccw++; 4796 } 4797 len_to_track_end -= seg_len; 4798 idaws = idal_create_words(idaws, dst, seg_len); 4799 } 4800 for (sectors = 0; sectors < end_padding_sectors; sectors += 8) 4801 idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE); 4802 if (blk_noretry_request(req) || 4803 block->base->features & DASD_FEATURE_FAILFAST) 4804 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 4805 cqr->startdev = startdev; 4806 cqr->memdev = startdev; 4807 cqr->block = block; 4808 cqr->expires = startdev->default_expires * HZ; 4809 cqr->lpm = dasd_path_get_ppm(startdev); 4810 cqr->retries = startdev->default_retries; 4811 cqr->buildclk = get_tod_clock(); 4812 cqr->status = DASD_CQR_FILLED; 4813 4814 return cqr; 4815 } 4816 4817 4818 static int 4819 dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req) 4820 { 4821 struct dasd_eckd_private *private; 4822 struct ccw1 *ccw; 4823 struct req_iterator iter; 4824 struct bio_vec bv; 4825 char *dst, *cda; 4826 unsigned int blksize, blk_per_trk, off; 4827 sector_t recid; 4828 int status; 4829 4830 if (!dasd_page_cache) 4831 goto out; 4832 private = cqr->block->base->private; 4833 blksize = cqr->block->bp_block; 4834 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize); 4835 recid = blk_rq_pos(req) >> cqr->block->s2b_shift; 4836 ccw = cqr->cpaddr; 4837 /* Skip over define extent & locate record. */ 4838 ccw++; 4839 if (private->uses_cdl == 0 || recid > 2*blk_per_trk) 4840 ccw++; 4841 rq_for_each_segment(bv, req, iter) { 4842 dst = page_address(bv.bv_page) + bv.bv_offset; 4843 for (off = 0; off < bv.bv_len; off += blksize) { 4844 /* Skip locate record. */ 4845 if (private->uses_cdl && recid <= 2*blk_per_trk) 4846 ccw++; 4847 if (dst) { 4848 if (ccw->flags & CCW_FLAG_IDA) 4849 cda = *((char **)((addr_t) ccw->cda)); 4850 else 4851 cda = (char *)((addr_t) ccw->cda); 4852 if (dst != cda) { 4853 if (rq_data_dir(req) == READ) 4854 memcpy(dst, cda, bv.bv_len); 4855 kmem_cache_free(dasd_page_cache, 4856 (void *)((addr_t)cda & PAGE_MASK)); 4857 } 4858 dst = NULL; 4859 } 4860 ccw++; 4861 recid++; 4862 } 4863 } 4864 out: 4865 status = cqr->status == DASD_CQR_DONE; 4866 dasd_sfree_request(cqr, cqr->memdev); 4867 return status; 4868 } 4869 4870 /* 4871 * Modify ccw/tcw in cqr so it can be started on a base device. 4872 * 4873 * Note that this is not enough to restart the cqr! 4874 * Either reset cqr->startdev as well (summary unit check handling) 4875 * or restart via separate cqr (as in ERP handling). 4876 */ 4877 void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *cqr) 4878 { 4879 struct ccw1 *ccw; 4880 struct PFX_eckd_data *pfxdata; 4881 struct tcw *tcw; 4882 struct tccb *tccb; 4883 struct dcw *dcw; 4884 4885 if (cqr->cpmode == 1) { 4886 tcw = cqr->cpaddr; 4887 tccb = tcw_get_tccb(tcw); 4888 dcw = (struct dcw *)&tccb->tca[0]; 4889 pfxdata = (struct PFX_eckd_data *)&dcw->cd[0]; 4890 pfxdata->validity.verify_base = 0; 4891 pfxdata->validity.hyper_pav = 0; 4892 } else { 4893 ccw = cqr->cpaddr; 4894 pfxdata = cqr->data; 4895 if (ccw->cmd_code == DASD_ECKD_CCW_PFX) { 4896 pfxdata->validity.verify_base = 0; 4897 pfxdata->validity.hyper_pav = 0; 4898 } 4899 } 4900 } 4901 4902 #define DASD_ECKD_CHANQ_MAX_SIZE 4 4903 4904 static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base, 4905 struct dasd_block *block, 4906 struct request *req) 4907 { 4908 struct dasd_eckd_private *private; 4909 struct dasd_device *startdev; 4910 unsigned long flags; 4911 struct dasd_ccw_req *cqr; 4912 4913 startdev = dasd_alias_get_start_dev(base); 4914 if (!startdev) 4915 startdev = base; 4916 private = startdev->private; 4917 if (private->count >= DASD_ECKD_CHANQ_MAX_SIZE) 4918 return ERR_PTR(-EBUSY); 4919 4920 spin_lock_irqsave(get_ccwdev_lock(startdev->cdev), flags); 4921 private->count++; 4922 if ((base->features & DASD_FEATURE_USERAW)) 4923 cqr = dasd_eckd_build_cp_raw(startdev, block, req); 4924 else 4925 cqr = dasd_eckd_build_cp(startdev, block, req); 4926 if (IS_ERR(cqr)) 4927 private->count--; 4928 spin_unlock_irqrestore(get_ccwdev_lock(startdev->cdev), flags); 4929 return cqr; 4930 } 4931 4932 static int dasd_eckd_free_alias_cp(struct dasd_ccw_req *cqr, 4933 struct request *req) 4934 { 4935 struct dasd_eckd_private *private; 4936 unsigned long flags; 4937 4938 spin_lock_irqsave(get_ccwdev_lock(cqr->memdev->cdev), flags); 4939 private = cqr->memdev->private; 4940 private->count--; 4941 spin_unlock_irqrestore(get_ccwdev_lock(cqr->memdev->cdev), flags); 4942 return dasd_eckd_free_cp(cqr, req); 4943 } 4944 4945 static int 4946 dasd_eckd_fill_info(struct dasd_device * device, 4947 struct dasd_information2_t * info) 4948 { 4949 struct dasd_eckd_private *private = device->private; 4950 4951 info->label_block = 2; 4952 info->FBA_layout = private->uses_cdl ? 0 : 1; 4953 info->format = private->uses_cdl ? DASD_FORMAT_CDL : DASD_FORMAT_LDL; 4954 info->characteristics_size = sizeof(private->rdc_data); 4955 memcpy(info->characteristics, &private->rdc_data, 4956 sizeof(private->rdc_data)); 4957 info->confdata_size = min((unsigned long)private->conf_len, 4958 sizeof(info->configuration_data)); 4959 memcpy(info->configuration_data, private->conf_data, 4960 info->confdata_size); 4961 return 0; 4962 } 4963 4964 /* 4965 * SECTION: ioctl functions for eckd devices. 4966 */ 4967 4968 /* 4969 * Release device ioctl. 4970 * Buils a channel programm to releases a prior reserved 4971 * (see dasd_eckd_reserve) device. 4972 */ 4973 static int 4974 dasd_eckd_release(struct dasd_device *device) 4975 { 4976 struct dasd_ccw_req *cqr; 4977 int rc; 4978 struct ccw1 *ccw; 4979 int useglobal; 4980 4981 if (!capable(CAP_SYS_ADMIN)) 4982 return -EACCES; 4983 4984 useglobal = 0; 4985 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL); 4986 if (IS_ERR(cqr)) { 4987 mutex_lock(&dasd_reserve_mutex); 4988 useglobal = 1; 4989 cqr = &dasd_reserve_req->cqr; 4990 memset(cqr, 0, sizeof(*cqr)); 4991 memset(&dasd_reserve_req->ccw, 0, 4992 sizeof(dasd_reserve_req->ccw)); 4993 cqr->cpaddr = &dasd_reserve_req->ccw; 4994 cqr->data = &dasd_reserve_req->data; 4995 cqr->magic = DASD_ECKD_MAGIC; 4996 } 4997 ccw = cqr->cpaddr; 4998 ccw->cmd_code = DASD_ECKD_CCW_RELEASE; 4999 ccw->flags |= CCW_FLAG_SLI; 5000 ccw->count = 32; 5001 ccw->cda = (__u32)(addr_t) cqr->data; 5002 cqr->startdev = device; 5003 cqr->memdev = device; 5004 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 5005 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 5006 cqr->retries = 2; /* set retry counter to enable basic ERP */ 5007 cqr->expires = 2 * HZ; 5008 cqr->buildclk = get_tod_clock(); 5009 cqr->status = DASD_CQR_FILLED; 5010 5011 rc = dasd_sleep_on_immediatly(cqr); 5012 if (!rc) 5013 clear_bit(DASD_FLAG_IS_RESERVED, &device->flags); 5014 5015 if (useglobal) 5016 mutex_unlock(&dasd_reserve_mutex); 5017 else 5018 dasd_sfree_request(cqr, cqr->memdev); 5019 return rc; 5020 } 5021 5022 /* 5023 * Reserve device ioctl. 5024 * Options are set to 'synchronous wait for interrupt' and 5025 * 'timeout the request'. This leads to a terminate IO if 5026 * the interrupt is outstanding for a certain time. 5027 */ 5028 static int 5029 dasd_eckd_reserve(struct dasd_device *device) 5030 { 5031 struct dasd_ccw_req *cqr; 5032 int rc; 5033 struct ccw1 *ccw; 5034 int useglobal; 5035 5036 if (!capable(CAP_SYS_ADMIN)) 5037 return -EACCES; 5038 5039 useglobal = 0; 5040 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL); 5041 if (IS_ERR(cqr)) { 5042 mutex_lock(&dasd_reserve_mutex); 5043 useglobal = 1; 5044 cqr = &dasd_reserve_req->cqr; 5045 memset(cqr, 0, sizeof(*cqr)); 5046 memset(&dasd_reserve_req->ccw, 0, 5047 sizeof(dasd_reserve_req->ccw)); 5048 cqr->cpaddr = &dasd_reserve_req->ccw; 5049 cqr->data = &dasd_reserve_req->data; 5050 cqr->magic = DASD_ECKD_MAGIC; 5051 } 5052 ccw = cqr->cpaddr; 5053 ccw->cmd_code = DASD_ECKD_CCW_RESERVE; 5054 ccw->flags |= CCW_FLAG_SLI; 5055 ccw->count = 32; 5056 ccw->cda = (__u32)(addr_t) cqr->data; 5057 cqr->startdev = device; 5058 cqr->memdev = device; 5059 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 5060 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 5061 cqr->retries = 2; /* set retry counter to enable basic ERP */ 5062 cqr->expires = 2 * HZ; 5063 cqr->buildclk = get_tod_clock(); 5064 cqr->status = DASD_CQR_FILLED; 5065 5066 rc = dasd_sleep_on_immediatly(cqr); 5067 if (!rc) 5068 set_bit(DASD_FLAG_IS_RESERVED, &device->flags); 5069 5070 if (useglobal) 5071 mutex_unlock(&dasd_reserve_mutex); 5072 else 5073 dasd_sfree_request(cqr, cqr->memdev); 5074 return rc; 5075 } 5076 5077 /* 5078 * Steal lock ioctl - unconditional reserve device. 5079 * Buils a channel programm to break a device's reservation. 5080 * (unconditional reserve) 5081 */ 5082 static int 5083 dasd_eckd_steal_lock(struct dasd_device *device) 5084 { 5085 struct dasd_ccw_req *cqr; 5086 int rc; 5087 struct ccw1 *ccw; 5088 int useglobal; 5089 5090 if (!capable(CAP_SYS_ADMIN)) 5091 return -EACCES; 5092 5093 useglobal = 0; 5094 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL); 5095 if (IS_ERR(cqr)) { 5096 mutex_lock(&dasd_reserve_mutex); 5097 useglobal = 1; 5098 cqr = &dasd_reserve_req->cqr; 5099 memset(cqr, 0, sizeof(*cqr)); 5100 memset(&dasd_reserve_req->ccw, 0, 5101 sizeof(dasd_reserve_req->ccw)); 5102 cqr->cpaddr = &dasd_reserve_req->ccw; 5103 cqr->data = &dasd_reserve_req->data; 5104 cqr->magic = DASD_ECKD_MAGIC; 5105 } 5106 ccw = cqr->cpaddr; 5107 ccw->cmd_code = DASD_ECKD_CCW_SLCK; 5108 ccw->flags |= CCW_FLAG_SLI; 5109 ccw->count = 32; 5110 ccw->cda = (__u32)(addr_t) cqr->data; 5111 cqr->startdev = device; 5112 cqr->memdev = device; 5113 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 5114 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 5115 cqr->retries = 2; /* set retry counter to enable basic ERP */ 5116 cqr->expires = 2 * HZ; 5117 cqr->buildclk = get_tod_clock(); 5118 cqr->status = DASD_CQR_FILLED; 5119 5120 rc = dasd_sleep_on_immediatly(cqr); 5121 if (!rc) 5122 set_bit(DASD_FLAG_IS_RESERVED, &device->flags); 5123 5124 if (useglobal) 5125 mutex_unlock(&dasd_reserve_mutex); 5126 else 5127 dasd_sfree_request(cqr, cqr->memdev); 5128 return rc; 5129 } 5130 5131 /* 5132 * SNID - Sense Path Group ID 5133 * This ioctl may be used in situations where I/O is stalled due to 5134 * a reserve, so if the normal dasd_smalloc_request fails, we use the 5135 * preallocated dasd_reserve_req. 5136 */ 5137 static int dasd_eckd_snid(struct dasd_device *device, 5138 void __user *argp) 5139 { 5140 struct dasd_ccw_req *cqr; 5141 int rc; 5142 struct ccw1 *ccw; 5143 int useglobal; 5144 struct dasd_snid_ioctl_data usrparm; 5145 5146 if (!capable(CAP_SYS_ADMIN)) 5147 return -EACCES; 5148 5149 if (copy_from_user(&usrparm, argp, sizeof(usrparm))) 5150 return -EFAULT; 5151 5152 useglobal = 0; 5153 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 5154 sizeof(struct dasd_snid_data), device, 5155 NULL); 5156 if (IS_ERR(cqr)) { 5157 mutex_lock(&dasd_reserve_mutex); 5158 useglobal = 1; 5159 cqr = &dasd_reserve_req->cqr; 5160 memset(cqr, 0, sizeof(*cqr)); 5161 memset(&dasd_reserve_req->ccw, 0, 5162 sizeof(dasd_reserve_req->ccw)); 5163 cqr->cpaddr = &dasd_reserve_req->ccw; 5164 cqr->data = &dasd_reserve_req->data; 5165 cqr->magic = DASD_ECKD_MAGIC; 5166 } 5167 ccw = cqr->cpaddr; 5168 ccw->cmd_code = DASD_ECKD_CCW_SNID; 5169 ccw->flags |= CCW_FLAG_SLI; 5170 ccw->count = 12; 5171 ccw->cda = (__u32)(addr_t) cqr->data; 5172 cqr->startdev = device; 5173 cqr->memdev = device; 5174 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 5175 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 5176 set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags); 5177 cqr->retries = 5; 5178 cqr->expires = 10 * HZ; 5179 cqr->buildclk = get_tod_clock(); 5180 cqr->status = DASD_CQR_FILLED; 5181 cqr->lpm = usrparm.path_mask; 5182 5183 rc = dasd_sleep_on_immediatly(cqr); 5184 /* verify that I/O processing didn't modify the path mask */ 5185 if (!rc && usrparm.path_mask && (cqr->lpm != usrparm.path_mask)) 5186 rc = -EIO; 5187 if (!rc) { 5188 usrparm.data = *((struct dasd_snid_data *)cqr->data); 5189 if (copy_to_user(argp, &usrparm, sizeof(usrparm))) 5190 rc = -EFAULT; 5191 } 5192 5193 if (useglobal) 5194 mutex_unlock(&dasd_reserve_mutex); 5195 else 5196 dasd_sfree_request(cqr, cqr->memdev); 5197 return rc; 5198 } 5199 5200 /* 5201 * Read performance statistics 5202 */ 5203 static int 5204 dasd_eckd_performance(struct dasd_device *device, void __user *argp) 5205 { 5206 struct dasd_psf_prssd_data *prssdp; 5207 struct dasd_rssd_perf_stats_t *stats; 5208 struct dasd_ccw_req *cqr; 5209 struct ccw1 *ccw; 5210 int rc; 5211 5212 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, 5213 (sizeof(struct dasd_psf_prssd_data) + 5214 sizeof(struct dasd_rssd_perf_stats_t)), 5215 device, NULL); 5216 if (IS_ERR(cqr)) { 5217 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 5218 "Could not allocate initialization request"); 5219 return PTR_ERR(cqr); 5220 } 5221 cqr->startdev = device; 5222 cqr->memdev = device; 5223 cqr->retries = 0; 5224 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 5225 cqr->expires = 10 * HZ; 5226 5227 /* Prepare for Read Subsystem Data */ 5228 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 5229 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data)); 5230 prssdp->order = PSF_ORDER_PRSSD; 5231 prssdp->suborder = 0x01; /* Performance Statistics */ 5232 prssdp->varies[1] = 0x01; /* Perf Statistics for the Subsystem */ 5233 5234 ccw = cqr->cpaddr; 5235 ccw->cmd_code = DASD_ECKD_CCW_PSF; 5236 ccw->count = sizeof(struct dasd_psf_prssd_data); 5237 ccw->flags |= CCW_FLAG_CC; 5238 ccw->cda = (__u32)(addr_t) prssdp; 5239 5240 /* Read Subsystem Data - Performance Statistics */ 5241 stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1); 5242 memset(stats, 0, sizeof(struct dasd_rssd_perf_stats_t)); 5243 5244 ccw++; 5245 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 5246 ccw->count = sizeof(struct dasd_rssd_perf_stats_t); 5247 ccw->cda = (__u32)(addr_t) stats; 5248 5249 cqr->buildclk = get_tod_clock(); 5250 cqr->status = DASD_CQR_FILLED; 5251 rc = dasd_sleep_on(cqr); 5252 if (rc == 0) { 5253 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 5254 stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1); 5255 if (copy_to_user(argp, stats, 5256 sizeof(struct dasd_rssd_perf_stats_t))) 5257 rc = -EFAULT; 5258 } 5259 dasd_sfree_request(cqr, cqr->memdev); 5260 return rc; 5261 } 5262 5263 /* 5264 * Get attributes (cache operations) 5265 * Returnes the cache attributes used in Define Extend (DE). 5266 */ 5267 static int 5268 dasd_eckd_get_attrib(struct dasd_device *device, void __user *argp) 5269 { 5270 struct dasd_eckd_private *private = device->private; 5271 struct attrib_data_t attrib = private->attrib; 5272 int rc; 5273 5274 if (!capable(CAP_SYS_ADMIN)) 5275 return -EACCES; 5276 if (!argp) 5277 return -EINVAL; 5278 5279 rc = 0; 5280 if (copy_to_user(argp, (long *) &attrib, 5281 sizeof(struct attrib_data_t))) 5282 rc = -EFAULT; 5283 5284 return rc; 5285 } 5286 5287 /* 5288 * Set attributes (cache operations) 5289 * Stores the attributes for cache operation to be used in Define Extend (DE). 5290 */ 5291 static int 5292 dasd_eckd_set_attrib(struct dasd_device *device, void __user *argp) 5293 { 5294 struct dasd_eckd_private *private = device->private; 5295 struct attrib_data_t attrib; 5296 5297 if (!capable(CAP_SYS_ADMIN)) 5298 return -EACCES; 5299 if (!argp) 5300 return -EINVAL; 5301 5302 if (copy_from_user(&attrib, argp, sizeof(struct attrib_data_t))) 5303 return -EFAULT; 5304 private->attrib = attrib; 5305 5306 dev_info(&device->cdev->dev, 5307 "The DASD cache mode was set to %x (%i cylinder prestage)\n", 5308 private->attrib.operation, private->attrib.nr_cyl); 5309 return 0; 5310 } 5311 5312 /* 5313 * Issue syscall I/O to EMC Symmetrix array. 5314 * CCWs are PSF and RSSD 5315 */ 5316 static int dasd_symm_io(struct dasd_device *device, void __user *argp) 5317 { 5318 struct dasd_symmio_parms usrparm; 5319 char *psf_data, *rssd_result; 5320 struct dasd_ccw_req *cqr; 5321 struct ccw1 *ccw; 5322 char psf0, psf1; 5323 int rc; 5324 5325 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO)) 5326 return -EACCES; 5327 psf0 = psf1 = 0; 5328 5329 /* Copy parms from caller */ 5330 rc = -EFAULT; 5331 if (copy_from_user(&usrparm, argp, sizeof(usrparm))) 5332 goto out; 5333 if (is_compat_task()) { 5334 /* Make sure pointers are sane even on 31 bit. */ 5335 rc = -EINVAL; 5336 if ((usrparm.psf_data >> 32) != 0) 5337 goto out; 5338 if ((usrparm.rssd_result >> 32) != 0) 5339 goto out; 5340 usrparm.psf_data &= 0x7fffffffULL; 5341 usrparm.rssd_result &= 0x7fffffffULL; 5342 } 5343 /* at least 2 bytes are accessed and should be allocated */ 5344 if (usrparm.psf_data_len < 2) { 5345 DBF_DEV_EVENT(DBF_WARNING, device, 5346 "Symmetrix ioctl invalid data length %d", 5347 usrparm.psf_data_len); 5348 rc = -EINVAL; 5349 goto out; 5350 } 5351 /* alloc I/O data area */ 5352 psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA); 5353 rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA); 5354 if (!psf_data || !rssd_result) { 5355 rc = -ENOMEM; 5356 goto out_free; 5357 } 5358 5359 /* get syscall header from user space */ 5360 rc = -EFAULT; 5361 if (copy_from_user(psf_data, 5362 (void __user *)(unsigned long) usrparm.psf_data, 5363 usrparm.psf_data_len)) 5364 goto out_free; 5365 psf0 = psf_data[0]; 5366 psf1 = psf_data[1]; 5367 5368 /* setup CCWs for PSF + RSSD */ 5369 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2, 0, device, NULL); 5370 if (IS_ERR(cqr)) { 5371 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 5372 "Could not allocate initialization request"); 5373 rc = PTR_ERR(cqr); 5374 goto out_free; 5375 } 5376 5377 cqr->startdev = device; 5378 cqr->memdev = device; 5379 cqr->retries = 3; 5380 cqr->expires = 10 * HZ; 5381 cqr->buildclk = get_tod_clock(); 5382 cqr->status = DASD_CQR_FILLED; 5383 5384 /* Build the ccws */ 5385 ccw = cqr->cpaddr; 5386 5387 /* PSF ccw */ 5388 ccw->cmd_code = DASD_ECKD_CCW_PSF; 5389 ccw->count = usrparm.psf_data_len; 5390 ccw->flags |= CCW_FLAG_CC; 5391 ccw->cda = (__u32)(addr_t) psf_data; 5392 5393 ccw++; 5394 5395 /* RSSD ccw */ 5396 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 5397 ccw->count = usrparm.rssd_result_len; 5398 ccw->flags = CCW_FLAG_SLI ; 5399 ccw->cda = (__u32)(addr_t) rssd_result; 5400 5401 rc = dasd_sleep_on(cqr); 5402 if (rc) 5403 goto out_sfree; 5404 5405 rc = -EFAULT; 5406 if (copy_to_user((void __user *)(unsigned long) usrparm.rssd_result, 5407 rssd_result, usrparm.rssd_result_len)) 5408 goto out_sfree; 5409 rc = 0; 5410 5411 out_sfree: 5412 dasd_sfree_request(cqr, cqr->memdev); 5413 out_free: 5414 kfree(rssd_result); 5415 kfree(psf_data); 5416 out: 5417 DBF_DEV_EVENT(DBF_WARNING, device, 5418 "Symmetrix ioctl (0x%02x 0x%02x): rc=%d", 5419 (int) psf0, (int) psf1, rc); 5420 return rc; 5421 } 5422 5423 static int 5424 dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp) 5425 { 5426 struct dasd_device *device = block->base; 5427 5428 switch (cmd) { 5429 case BIODASDGATTR: 5430 return dasd_eckd_get_attrib(device, argp); 5431 case BIODASDSATTR: 5432 return dasd_eckd_set_attrib(device, argp); 5433 case BIODASDPSRD: 5434 return dasd_eckd_performance(device, argp); 5435 case BIODASDRLSE: 5436 return dasd_eckd_release(device); 5437 case BIODASDRSRV: 5438 return dasd_eckd_reserve(device); 5439 case BIODASDSLCK: 5440 return dasd_eckd_steal_lock(device); 5441 case BIODASDSNID: 5442 return dasd_eckd_snid(device, argp); 5443 case BIODASDSYMMIO: 5444 return dasd_symm_io(device, argp); 5445 default: 5446 return -ENOTTY; 5447 } 5448 } 5449 5450 /* 5451 * Dump the range of CCWs into 'page' buffer 5452 * and return number of printed chars. 5453 */ 5454 static int 5455 dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page) 5456 { 5457 int len, count; 5458 char *datap; 5459 5460 len = 0; 5461 while (from <= to) { 5462 len += sprintf(page + len, PRINTK_HEADER 5463 " CCW %p: %08X %08X DAT:", 5464 from, ((int *) from)[0], ((int *) from)[1]); 5465 5466 /* get pointer to data (consider IDALs) */ 5467 if (from->flags & CCW_FLAG_IDA) 5468 datap = (char *) *((addr_t *) (addr_t) from->cda); 5469 else 5470 datap = (char *) ((addr_t) from->cda); 5471 5472 /* dump data (max 32 bytes) */ 5473 for (count = 0; count < from->count && count < 32; count++) { 5474 if (count % 8 == 0) len += sprintf(page + len, " "); 5475 if (count % 4 == 0) len += sprintf(page + len, " "); 5476 len += sprintf(page + len, "%02x", datap[count]); 5477 } 5478 len += sprintf(page + len, "\n"); 5479 from++; 5480 } 5481 return len; 5482 } 5483 5484 static void 5485 dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct irb *irb, 5486 char *reason) 5487 { 5488 u64 *sense; 5489 u64 *stat; 5490 5491 sense = (u64 *) dasd_get_sense(irb); 5492 stat = (u64 *) &irb->scsw; 5493 if (sense) { 5494 DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : " 5495 "%016llx %016llx %016llx %016llx", 5496 reason, *stat, *((u32 *) (stat + 1)), 5497 sense[0], sense[1], sense[2], sense[3]); 5498 } else { 5499 DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : %s", 5500 reason, *stat, *((u32 *) (stat + 1)), 5501 "NO VALID SENSE"); 5502 } 5503 } 5504 5505 /* 5506 * Print sense data and related channel program. 5507 * Parts are printed because printk buffer is only 1024 bytes. 5508 */ 5509 static void dasd_eckd_dump_sense_ccw(struct dasd_device *device, 5510 struct dasd_ccw_req *req, struct irb *irb) 5511 { 5512 char *page; 5513 struct ccw1 *first, *last, *fail, *from, *to; 5514 int len, sl, sct; 5515 5516 page = (char *) get_zeroed_page(GFP_ATOMIC); 5517 if (page == NULL) { 5518 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 5519 "No memory to dump sense data\n"); 5520 return; 5521 } 5522 /* dump the sense data */ 5523 len = sprintf(page, PRINTK_HEADER 5524 " I/O status report for device %s:\n", 5525 dev_name(&device->cdev->dev)); 5526 len += sprintf(page + len, PRINTK_HEADER 5527 " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X " 5528 "CS:%02X RC:%d\n", 5529 req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw), 5530 scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw), 5531 scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw), 5532 req ? req->intrc : 0); 5533 len += sprintf(page + len, PRINTK_HEADER 5534 " device %s: Failing CCW: %p\n", 5535 dev_name(&device->cdev->dev), 5536 (void *) (addr_t) irb->scsw.cmd.cpa); 5537 if (irb->esw.esw0.erw.cons) { 5538 for (sl = 0; sl < 4; sl++) { 5539 len += sprintf(page + len, PRINTK_HEADER 5540 " Sense(hex) %2d-%2d:", 5541 (8 * sl), ((8 * sl) + 7)); 5542 5543 for (sct = 0; sct < 8; sct++) { 5544 len += sprintf(page + len, " %02x", 5545 irb->ecw[8 * sl + sct]); 5546 } 5547 len += sprintf(page + len, "\n"); 5548 } 5549 5550 if (irb->ecw[27] & DASD_SENSE_BIT_0) { 5551 /* 24 Byte Sense Data */ 5552 sprintf(page + len, PRINTK_HEADER 5553 " 24 Byte: %x MSG %x, " 5554 "%s MSGb to SYSOP\n", 5555 irb->ecw[7] >> 4, irb->ecw[7] & 0x0f, 5556 irb->ecw[1] & 0x10 ? "" : "no"); 5557 } else { 5558 /* 32 Byte Sense Data */ 5559 sprintf(page + len, PRINTK_HEADER 5560 " 32 Byte: Format: %x " 5561 "Exception class %x\n", 5562 irb->ecw[6] & 0x0f, irb->ecw[22] >> 4); 5563 } 5564 } else { 5565 sprintf(page + len, PRINTK_HEADER 5566 " SORRY - NO VALID SENSE AVAILABLE\n"); 5567 } 5568 printk(KERN_ERR "%s", page); 5569 5570 if (req) { 5571 /* req == NULL for unsolicited interrupts */ 5572 /* dump the Channel Program (max 140 Bytes per line) */ 5573 /* Count CCW and print first CCWs (maximum 1024 % 140 = 7) */ 5574 first = req->cpaddr; 5575 for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++); 5576 to = min(first + 6, last); 5577 len = sprintf(page, PRINTK_HEADER 5578 " Related CP in req: %p\n", req); 5579 dasd_eckd_dump_ccw_range(first, to, page + len); 5580 printk(KERN_ERR "%s", page); 5581 5582 /* print failing CCW area (maximum 4) */ 5583 /* scsw->cda is either valid or zero */ 5584 len = 0; 5585 from = ++to; 5586 fail = (struct ccw1 *)(addr_t) 5587 irb->scsw.cmd.cpa; /* failing CCW */ 5588 if (from < fail - 2) { 5589 from = fail - 2; /* there is a gap - print header */ 5590 len += sprintf(page, PRINTK_HEADER "......\n"); 5591 } 5592 to = min(fail + 1, last); 5593 len += dasd_eckd_dump_ccw_range(from, to, page + len); 5594 5595 /* print last CCWs (maximum 2) */ 5596 from = max(from, ++to); 5597 if (from < last - 1) { 5598 from = last - 1; /* there is a gap - print header */ 5599 len += sprintf(page + len, PRINTK_HEADER "......\n"); 5600 } 5601 len += dasd_eckd_dump_ccw_range(from, last, page + len); 5602 if (len > 0) 5603 printk(KERN_ERR "%s", page); 5604 } 5605 free_page((unsigned long) page); 5606 } 5607 5608 5609 /* 5610 * Print sense data from a tcw. 5611 */ 5612 static void dasd_eckd_dump_sense_tcw(struct dasd_device *device, 5613 struct dasd_ccw_req *req, struct irb *irb) 5614 { 5615 char *page; 5616 int len, sl, sct, residual; 5617 struct tsb *tsb; 5618 u8 *sense, *rcq; 5619 5620 page = (char *) get_zeroed_page(GFP_ATOMIC); 5621 if (page == NULL) { 5622 DBF_DEV_EVENT(DBF_WARNING, device, " %s", 5623 "No memory to dump sense data"); 5624 return; 5625 } 5626 /* dump the sense data */ 5627 len = sprintf(page, PRINTK_HEADER 5628 " I/O status report for device %s:\n", 5629 dev_name(&device->cdev->dev)); 5630 len += sprintf(page + len, PRINTK_HEADER 5631 " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X " 5632 "CS:%02X fcxs:%02X schxs:%02X RC:%d\n", 5633 req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw), 5634 scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw), 5635 scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw), 5636 irb->scsw.tm.fcxs, 5637 (irb->scsw.tm.ifob << 7) | irb->scsw.tm.sesq, 5638 req ? req->intrc : 0); 5639 len += sprintf(page + len, PRINTK_HEADER 5640 " device %s: Failing TCW: %p\n", 5641 dev_name(&device->cdev->dev), 5642 (void *) (addr_t) irb->scsw.tm.tcw); 5643 5644 tsb = NULL; 5645 sense = NULL; 5646 if (irb->scsw.tm.tcw && (irb->scsw.tm.fcxs & 0x01)) 5647 tsb = tcw_get_tsb( 5648 (struct tcw *)(unsigned long)irb->scsw.tm.tcw); 5649 5650 if (tsb) { 5651 len += sprintf(page + len, PRINTK_HEADER 5652 " tsb->length %d\n", tsb->length); 5653 len += sprintf(page + len, PRINTK_HEADER 5654 " tsb->flags %x\n", tsb->flags); 5655 len += sprintf(page + len, PRINTK_HEADER 5656 " tsb->dcw_offset %d\n", tsb->dcw_offset); 5657 len += sprintf(page + len, PRINTK_HEADER 5658 " tsb->count %d\n", tsb->count); 5659 residual = tsb->count - 28; 5660 len += sprintf(page + len, PRINTK_HEADER 5661 " residual %d\n", residual); 5662 5663 switch (tsb->flags & 0x07) { 5664 case 1: /* tsa_iostat */ 5665 len += sprintf(page + len, PRINTK_HEADER 5666 " tsb->tsa.iostat.dev_time %d\n", 5667 tsb->tsa.iostat.dev_time); 5668 len += sprintf(page + len, PRINTK_HEADER 5669 " tsb->tsa.iostat.def_time %d\n", 5670 tsb->tsa.iostat.def_time); 5671 len += sprintf(page + len, PRINTK_HEADER 5672 " tsb->tsa.iostat.queue_time %d\n", 5673 tsb->tsa.iostat.queue_time); 5674 len += sprintf(page + len, PRINTK_HEADER 5675 " tsb->tsa.iostat.dev_busy_time %d\n", 5676 tsb->tsa.iostat.dev_busy_time); 5677 len += sprintf(page + len, PRINTK_HEADER 5678 " tsb->tsa.iostat.dev_act_time %d\n", 5679 tsb->tsa.iostat.dev_act_time); 5680 sense = tsb->tsa.iostat.sense; 5681 break; 5682 case 2: /* ts_ddpc */ 5683 len += sprintf(page + len, PRINTK_HEADER 5684 " tsb->tsa.ddpc.rc %d\n", tsb->tsa.ddpc.rc); 5685 for (sl = 0; sl < 2; sl++) { 5686 len += sprintf(page + len, PRINTK_HEADER 5687 " tsb->tsa.ddpc.rcq %2d-%2d: ", 5688 (8 * sl), ((8 * sl) + 7)); 5689 rcq = tsb->tsa.ddpc.rcq; 5690 for (sct = 0; sct < 8; sct++) { 5691 len += sprintf(page + len, " %02x", 5692 rcq[8 * sl + sct]); 5693 } 5694 len += sprintf(page + len, "\n"); 5695 } 5696 sense = tsb->tsa.ddpc.sense; 5697 break; 5698 case 3: /* tsa_intrg */ 5699 len += sprintf(page + len, PRINTK_HEADER 5700 " tsb->tsa.intrg.: not supported yet\n"); 5701 break; 5702 } 5703 5704 if (sense) { 5705 for (sl = 0; sl < 4; sl++) { 5706 len += sprintf(page + len, PRINTK_HEADER 5707 " Sense(hex) %2d-%2d:", 5708 (8 * sl), ((8 * sl) + 7)); 5709 for (sct = 0; sct < 8; sct++) { 5710 len += sprintf(page + len, " %02x", 5711 sense[8 * sl + sct]); 5712 } 5713 len += sprintf(page + len, "\n"); 5714 } 5715 5716 if (sense[27] & DASD_SENSE_BIT_0) { 5717 /* 24 Byte Sense Data */ 5718 sprintf(page + len, PRINTK_HEADER 5719 " 24 Byte: %x MSG %x, " 5720 "%s MSGb to SYSOP\n", 5721 sense[7] >> 4, sense[7] & 0x0f, 5722 sense[1] & 0x10 ? "" : "no"); 5723 } else { 5724 /* 32 Byte Sense Data */ 5725 sprintf(page + len, PRINTK_HEADER 5726 " 32 Byte: Format: %x " 5727 "Exception class %x\n", 5728 sense[6] & 0x0f, sense[22] >> 4); 5729 } 5730 } else { 5731 sprintf(page + len, PRINTK_HEADER 5732 " SORRY - NO VALID SENSE AVAILABLE\n"); 5733 } 5734 } else { 5735 sprintf(page + len, PRINTK_HEADER 5736 " SORRY - NO TSB DATA AVAILABLE\n"); 5737 } 5738 printk(KERN_ERR "%s", page); 5739 free_page((unsigned long) page); 5740 } 5741 5742 static void dasd_eckd_dump_sense(struct dasd_device *device, 5743 struct dasd_ccw_req *req, struct irb *irb) 5744 { 5745 u8 *sense = dasd_get_sense(irb); 5746 5747 if (scsw_is_tm(&irb->scsw)) { 5748 /* 5749 * In some cases the 'File Protected' or 'Incorrect Length' 5750 * error might be expected and log messages shouldn't be written 5751 * then. Check if the according suppress bit is set. 5752 */ 5753 if (sense && (sense[1] & SNS1_FILE_PROTECTED) && 5754 test_bit(DASD_CQR_SUPPRESS_FP, &req->flags)) 5755 return; 5756 if (scsw_cstat(&irb->scsw) == 0x40 && 5757 test_bit(DASD_CQR_SUPPRESS_IL, &req->flags)) 5758 return; 5759 5760 dasd_eckd_dump_sense_tcw(device, req, irb); 5761 } else { 5762 /* 5763 * In some cases the 'Command Reject' or 'No Record Found' 5764 * error might be expected and log messages shouldn't be 5765 * written then. Check if the according suppress bit is set. 5766 */ 5767 if (sense && sense[0] & SNS0_CMD_REJECT && 5768 test_bit(DASD_CQR_SUPPRESS_CR, &req->flags)) 5769 return; 5770 5771 if (sense && sense[1] & SNS1_NO_REC_FOUND && 5772 test_bit(DASD_CQR_SUPPRESS_NRF, &req->flags)) 5773 return; 5774 5775 dasd_eckd_dump_sense_ccw(device, req, irb); 5776 } 5777 } 5778 5779 static int dasd_eckd_reload_device(struct dasd_device *device) 5780 { 5781 struct dasd_eckd_private *private = device->private; 5782 int rc, old_base; 5783 char print_uid[60]; 5784 struct dasd_uid uid; 5785 unsigned long flags; 5786 5787 /* 5788 * remove device from alias handling to prevent new requests 5789 * from being scheduled on the wrong alias device 5790 */ 5791 dasd_alias_remove_device(device); 5792 5793 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 5794 old_base = private->uid.base_unit_addr; 5795 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 5796 5797 /* Read Configuration Data */ 5798 rc = dasd_eckd_read_conf(device); 5799 if (rc) 5800 goto out_err; 5801 5802 rc = dasd_eckd_generate_uid(device); 5803 if (rc) 5804 goto out_err; 5805 /* 5806 * update unit address configuration and 5807 * add device to alias management 5808 */ 5809 dasd_alias_update_add_device(device); 5810 5811 dasd_eckd_get_uid(device, &uid); 5812 5813 if (old_base != uid.base_unit_addr) { 5814 if (strlen(uid.vduit) > 0) 5815 snprintf(print_uid, sizeof(print_uid), 5816 "%s.%s.%04x.%02x.%s", uid.vendor, uid.serial, 5817 uid.ssid, uid.base_unit_addr, uid.vduit); 5818 else 5819 snprintf(print_uid, sizeof(print_uid), 5820 "%s.%s.%04x.%02x", uid.vendor, uid.serial, 5821 uid.ssid, uid.base_unit_addr); 5822 5823 dev_info(&device->cdev->dev, 5824 "An Alias device was reassigned to a new base device " 5825 "with UID: %s\n", print_uid); 5826 } 5827 return 0; 5828 5829 out_err: 5830 return -1; 5831 } 5832 5833 static int dasd_eckd_read_message_buffer(struct dasd_device *device, 5834 struct dasd_rssd_messages *messages, 5835 __u8 lpum) 5836 { 5837 struct dasd_rssd_messages *message_buf; 5838 struct dasd_psf_prssd_data *prssdp; 5839 struct dasd_ccw_req *cqr; 5840 struct ccw1 *ccw; 5841 int rc; 5842 5843 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, 5844 (sizeof(struct dasd_psf_prssd_data) + 5845 sizeof(struct dasd_rssd_messages)), 5846 device, NULL); 5847 if (IS_ERR(cqr)) { 5848 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 5849 "Could not allocate read message buffer request"); 5850 return PTR_ERR(cqr); 5851 } 5852 5853 cqr->lpm = lpum; 5854 retry: 5855 cqr->startdev = device; 5856 cqr->memdev = device; 5857 cqr->block = NULL; 5858 cqr->expires = 10 * HZ; 5859 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags); 5860 /* dasd_sleep_on_immediatly does not do complex error 5861 * recovery so clear erp flag and set retry counter to 5862 * do basic erp */ 5863 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 5864 cqr->retries = 256; 5865 5866 /* Prepare for Read Subsystem Data */ 5867 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 5868 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data)); 5869 prssdp->order = PSF_ORDER_PRSSD; 5870 prssdp->suborder = 0x03; /* Message Buffer */ 5871 /* all other bytes of prssdp must be zero */ 5872 5873 ccw = cqr->cpaddr; 5874 ccw->cmd_code = DASD_ECKD_CCW_PSF; 5875 ccw->count = sizeof(struct dasd_psf_prssd_data); 5876 ccw->flags |= CCW_FLAG_CC; 5877 ccw->flags |= CCW_FLAG_SLI; 5878 ccw->cda = (__u32)(addr_t) prssdp; 5879 5880 /* Read Subsystem Data - message buffer */ 5881 message_buf = (struct dasd_rssd_messages *) (prssdp + 1); 5882 memset(message_buf, 0, sizeof(struct dasd_rssd_messages)); 5883 5884 ccw++; 5885 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 5886 ccw->count = sizeof(struct dasd_rssd_messages); 5887 ccw->flags |= CCW_FLAG_SLI; 5888 ccw->cda = (__u32)(addr_t) message_buf; 5889 5890 cqr->buildclk = get_tod_clock(); 5891 cqr->status = DASD_CQR_FILLED; 5892 rc = dasd_sleep_on_immediatly(cqr); 5893 if (rc == 0) { 5894 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 5895 message_buf = (struct dasd_rssd_messages *) 5896 (prssdp + 1); 5897 memcpy(messages, message_buf, 5898 sizeof(struct dasd_rssd_messages)); 5899 } else if (cqr->lpm) { 5900 /* 5901 * on z/VM we might not be able to do I/O on the requested path 5902 * but instead we get the required information on any path 5903 * so retry with open path mask 5904 */ 5905 cqr->lpm = 0; 5906 goto retry; 5907 } else 5908 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, 5909 "Reading messages failed with rc=%d\n" 5910 , rc); 5911 dasd_sfree_request(cqr, cqr->memdev); 5912 return rc; 5913 } 5914 5915 static int dasd_eckd_query_host_access(struct dasd_device *device, 5916 struct dasd_psf_query_host_access *data) 5917 { 5918 struct dasd_eckd_private *private = device->private; 5919 struct dasd_psf_query_host_access *host_access; 5920 struct dasd_psf_prssd_data *prssdp; 5921 struct dasd_ccw_req *cqr; 5922 struct ccw1 *ccw; 5923 int rc; 5924 5925 /* not available for HYPER PAV alias devices */ 5926 if (!device->block && private->lcu->pav == HYPER_PAV) 5927 return -EOPNOTSUPP; 5928 5929 /* may not be supported by the storage server */ 5930 if (!(private->features.feature[14] & 0x80)) 5931 return -EOPNOTSUPP; 5932 5933 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, 5934 sizeof(struct dasd_psf_prssd_data) + 1, 5935 device, NULL); 5936 if (IS_ERR(cqr)) { 5937 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 5938 "Could not allocate read message buffer request"); 5939 return PTR_ERR(cqr); 5940 } 5941 host_access = kzalloc(sizeof(*host_access), GFP_KERNEL | GFP_DMA); 5942 if (!host_access) { 5943 dasd_sfree_request(cqr, device); 5944 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 5945 "Could not allocate host_access buffer"); 5946 return -ENOMEM; 5947 } 5948 cqr->startdev = device; 5949 cqr->memdev = device; 5950 cqr->block = NULL; 5951 cqr->retries = 256; 5952 cqr->expires = 10 * HZ; 5953 5954 /* Prepare for Read Subsystem Data */ 5955 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 5956 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data)); 5957 prssdp->order = PSF_ORDER_PRSSD; 5958 prssdp->suborder = PSF_SUBORDER_QHA; /* query host access */ 5959 /* LSS and Volume that will be queried */ 5960 prssdp->lss = private->ned->ID; 5961 prssdp->volume = private->ned->unit_addr; 5962 /* all other bytes of prssdp must be zero */ 5963 5964 ccw = cqr->cpaddr; 5965 ccw->cmd_code = DASD_ECKD_CCW_PSF; 5966 ccw->count = sizeof(struct dasd_psf_prssd_data); 5967 ccw->flags |= CCW_FLAG_CC; 5968 ccw->flags |= CCW_FLAG_SLI; 5969 ccw->cda = (__u32)(addr_t) prssdp; 5970 5971 /* Read Subsystem Data - query host access */ 5972 ccw++; 5973 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 5974 ccw->count = sizeof(struct dasd_psf_query_host_access); 5975 ccw->flags |= CCW_FLAG_SLI; 5976 ccw->cda = (__u32)(addr_t) host_access; 5977 5978 cqr->buildclk = get_tod_clock(); 5979 cqr->status = DASD_CQR_FILLED; 5980 /* the command might not be supported, suppress error message */ 5981 __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags); 5982 rc = dasd_sleep_on_interruptible(cqr); 5983 if (rc == 0) { 5984 *data = *host_access; 5985 } else { 5986 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, 5987 "Reading host access data failed with rc=%d\n", 5988 rc); 5989 rc = -EOPNOTSUPP; 5990 } 5991 5992 dasd_sfree_request(cqr, cqr->memdev); 5993 kfree(host_access); 5994 return rc; 5995 } 5996 /* 5997 * return number of grouped devices 5998 */ 5999 static int dasd_eckd_host_access_count(struct dasd_device *device) 6000 { 6001 struct dasd_psf_query_host_access *access; 6002 struct dasd_ckd_path_group_entry *entry; 6003 struct dasd_ckd_host_information *info; 6004 int count = 0; 6005 int rc, i; 6006 6007 access = kzalloc(sizeof(*access), GFP_NOIO); 6008 if (!access) { 6009 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 6010 "Could not allocate access buffer"); 6011 return -ENOMEM; 6012 } 6013 rc = dasd_eckd_query_host_access(device, access); 6014 if (rc) { 6015 kfree(access); 6016 return rc; 6017 } 6018 6019 info = (struct dasd_ckd_host_information *) 6020 access->host_access_information; 6021 for (i = 0; i < info->entry_count; i++) { 6022 entry = (struct dasd_ckd_path_group_entry *) 6023 (info->entry + i * info->entry_size); 6024 if (entry->status_flags & DASD_ECKD_PG_GROUPED) 6025 count++; 6026 } 6027 6028 kfree(access); 6029 return count; 6030 } 6031 6032 /* 6033 * write host access information to a sequential file 6034 */ 6035 static int dasd_hosts_print(struct dasd_device *device, struct seq_file *m) 6036 { 6037 struct dasd_psf_query_host_access *access; 6038 struct dasd_ckd_path_group_entry *entry; 6039 struct dasd_ckd_host_information *info; 6040 char sysplex[9] = ""; 6041 int rc, i; 6042 6043 access = kzalloc(sizeof(*access), GFP_NOIO); 6044 if (!access) { 6045 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 6046 "Could not allocate access buffer"); 6047 return -ENOMEM; 6048 } 6049 rc = dasd_eckd_query_host_access(device, access); 6050 if (rc) { 6051 kfree(access); 6052 return rc; 6053 } 6054 6055 info = (struct dasd_ckd_host_information *) 6056 access->host_access_information; 6057 for (i = 0; i < info->entry_count; i++) { 6058 entry = (struct dasd_ckd_path_group_entry *) 6059 (info->entry + i * info->entry_size); 6060 /* PGID */ 6061 seq_printf(m, "pgid %*phN\n", 11, entry->pgid); 6062 /* FLAGS */ 6063 seq_printf(m, "status_flags %02x\n", entry->status_flags); 6064 /* SYSPLEX NAME */ 6065 memcpy(&sysplex, &entry->sysplex_name, sizeof(sysplex) - 1); 6066 EBCASC(sysplex, sizeof(sysplex)); 6067 seq_printf(m, "sysplex_name %8s\n", sysplex); 6068 /* SUPPORTED CYLINDER */ 6069 seq_printf(m, "supported_cylinder %d\n", entry->cylinder); 6070 /* TIMESTAMP */ 6071 seq_printf(m, "timestamp %lu\n", (unsigned long) 6072 entry->timestamp); 6073 } 6074 kfree(access); 6075 6076 return 0; 6077 } 6078 6079 /* 6080 * Perform Subsystem Function - CUIR response 6081 */ 6082 static int 6083 dasd_eckd_psf_cuir_response(struct dasd_device *device, int response, 6084 __u32 message_id, __u8 lpum) 6085 { 6086 struct dasd_psf_cuir_response *psf_cuir; 6087 int pos = pathmask_to_pos(lpum); 6088 struct dasd_ccw_req *cqr; 6089 struct ccw1 *ccw; 6090 int rc; 6091 6092 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ , 6093 sizeof(struct dasd_psf_cuir_response), 6094 device, NULL); 6095 6096 if (IS_ERR(cqr)) { 6097 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 6098 "Could not allocate PSF-CUIR request"); 6099 return PTR_ERR(cqr); 6100 } 6101 6102 psf_cuir = (struct dasd_psf_cuir_response *)cqr->data; 6103 psf_cuir->order = PSF_ORDER_CUIR_RESPONSE; 6104 psf_cuir->cc = response; 6105 psf_cuir->chpid = device->path[pos].chpid; 6106 psf_cuir->message_id = message_id; 6107 psf_cuir->cssid = device->path[pos].cssid; 6108 psf_cuir->ssid = device->path[pos].ssid; 6109 ccw = cqr->cpaddr; 6110 ccw->cmd_code = DASD_ECKD_CCW_PSF; 6111 ccw->cda = (__u32)(addr_t)psf_cuir; 6112 ccw->flags = CCW_FLAG_SLI; 6113 ccw->count = sizeof(struct dasd_psf_cuir_response); 6114 6115 cqr->startdev = device; 6116 cqr->memdev = device; 6117 cqr->block = NULL; 6118 cqr->retries = 256; 6119 cqr->expires = 10*HZ; 6120 cqr->buildclk = get_tod_clock(); 6121 cqr->status = DASD_CQR_FILLED; 6122 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags); 6123 6124 rc = dasd_sleep_on(cqr); 6125 6126 dasd_sfree_request(cqr, cqr->memdev); 6127 return rc; 6128 } 6129 6130 /* 6131 * return configuration data that is referenced by record selector 6132 * if a record selector is specified or per default return the 6133 * conf_data pointer for the path specified by lpum 6134 */ 6135 static struct dasd_conf_data *dasd_eckd_get_ref_conf(struct dasd_device *device, 6136 __u8 lpum, 6137 struct dasd_cuir_message *cuir) 6138 { 6139 struct dasd_conf_data *conf_data; 6140 int path, pos; 6141 6142 if (cuir->record_selector == 0) 6143 goto out; 6144 for (path = 0x80, pos = 0; path; path >>= 1, pos++) { 6145 conf_data = device->path[pos].conf_data; 6146 if (conf_data->gneq.record_selector == 6147 cuir->record_selector) 6148 return conf_data; 6149 } 6150 out: 6151 return device->path[pathmask_to_pos(lpum)].conf_data; 6152 } 6153 6154 /* 6155 * This function determines the scope of a reconfiguration request by 6156 * analysing the path and device selection data provided in the CUIR request. 6157 * Returns a path mask containing CUIR affected paths for the give device. 6158 * 6159 * If the CUIR request does not contain the required information return the 6160 * path mask of the path the attention message for the CUIR request was reveived 6161 * on. 6162 */ 6163 static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum, 6164 struct dasd_cuir_message *cuir) 6165 { 6166 struct dasd_conf_data *ref_conf_data; 6167 unsigned long bitmask = 0, mask = 0; 6168 struct dasd_conf_data *conf_data; 6169 unsigned int pos, path; 6170 char *ref_gneq, *gneq; 6171 char *ref_ned, *ned; 6172 int tbcpm = 0; 6173 6174 /* if CUIR request does not specify the scope use the path 6175 the attention message was presented on */ 6176 if (!cuir->ned_map || 6177 !(cuir->neq_map[0] | cuir->neq_map[1] | cuir->neq_map[2])) 6178 return lpum; 6179 6180 /* get reference conf data */ 6181 ref_conf_data = dasd_eckd_get_ref_conf(device, lpum, cuir); 6182 /* reference ned is determined by ned_map field */ 6183 pos = 8 - ffs(cuir->ned_map); 6184 ref_ned = (char *)&ref_conf_data->neds[pos]; 6185 ref_gneq = (char *)&ref_conf_data->gneq; 6186 /* transfer 24 bit neq_map to mask */ 6187 mask = cuir->neq_map[2]; 6188 mask |= cuir->neq_map[1] << 8; 6189 mask |= cuir->neq_map[0] << 16; 6190 6191 for (path = 0; path < 8; path++) { 6192 /* initialise data per path */ 6193 bitmask = mask; 6194 conf_data = device->path[path].conf_data; 6195 pos = 8 - ffs(cuir->ned_map); 6196 ned = (char *) &conf_data->neds[pos]; 6197 /* compare reference ned and per path ned */ 6198 if (memcmp(ref_ned, ned, sizeof(*ned)) != 0) 6199 continue; 6200 gneq = (char *)&conf_data->gneq; 6201 /* compare reference gneq and per_path gneq under 6202 24 bit mask where mask bit 0 equals byte 7 of 6203 the gneq and mask bit 24 equals byte 31 */ 6204 while (bitmask) { 6205 pos = ffs(bitmask) - 1; 6206 if (memcmp(&ref_gneq[31 - pos], &gneq[31 - pos], 1) 6207 != 0) 6208 break; 6209 clear_bit(pos, &bitmask); 6210 } 6211 if (bitmask) 6212 continue; 6213 /* device and path match the reference values 6214 add path to CUIR scope */ 6215 tbcpm |= 0x80 >> path; 6216 } 6217 return tbcpm; 6218 } 6219 6220 static void dasd_eckd_cuir_notify_user(struct dasd_device *device, 6221 unsigned long paths, int action) 6222 { 6223 int pos; 6224 6225 while (paths) { 6226 /* get position of bit in mask */ 6227 pos = 8 - ffs(paths); 6228 /* get channel path descriptor from this position */ 6229 if (action == CUIR_QUIESCE) 6230 pr_warn("Service on the storage server caused path %x.%02x to go offline", 6231 device->path[pos].cssid, 6232 device->path[pos].chpid); 6233 else if (action == CUIR_RESUME) 6234 pr_info("Path %x.%02x is back online after service on the storage server", 6235 device->path[pos].cssid, 6236 device->path[pos].chpid); 6237 clear_bit(7 - pos, &paths); 6238 } 6239 } 6240 6241 static int dasd_eckd_cuir_remove_path(struct dasd_device *device, __u8 lpum, 6242 struct dasd_cuir_message *cuir) 6243 { 6244 unsigned long tbcpm; 6245 6246 tbcpm = dasd_eckd_cuir_scope(device, lpum, cuir); 6247 /* nothing to do if path is not in use */ 6248 if (!(dasd_path_get_opm(device) & tbcpm)) 6249 return 0; 6250 if (!(dasd_path_get_opm(device) & ~tbcpm)) { 6251 /* no path would be left if the CUIR action is taken 6252 return error */ 6253 return -EINVAL; 6254 } 6255 /* remove device from operational path mask */ 6256 dasd_path_remove_opm(device, tbcpm); 6257 dasd_path_add_cuirpm(device, tbcpm); 6258 return tbcpm; 6259 } 6260 6261 /* 6262 * walk through all devices and build a path mask to quiesce them 6263 * return an error if the last path to a device would be removed 6264 * 6265 * if only part of the devices are quiesced and an error 6266 * occurs no onlining necessary, the storage server will 6267 * notify the already set offline devices again 6268 */ 6269 static int dasd_eckd_cuir_quiesce(struct dasd_device *device, __u8 lpum, 6270 struct dasd_cuir_message *cuir) 6271 { 6272 struct dasd_eckd_private *private = device->private; 6273 struct alias_pav_group *pavgroup, *tempgroup; 6274 struct dasd_device *dev, *n; 6275 unsigned long paths = 0; 6276 unsigned long flags; 6277 int tbcpm; 6278 6279 /* active devices */ 6280 list_for_each_entry_safe(dev, n, &private->lcu->active_devices, 6281 alias_list) { 6282 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags); 6283 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir); 6284 spin_unlock_irqrestore(get_ccwdev_lock(dev->cdev), flags); 6285 if (tbcpm < 0) 6286 goto out_err; 6287 paths |= tbcpm; 6288 } 6289 /* inactive devices */ 6290 list_for_each_entry_safe(dev, n, &private->lcu->inactive_devices, 6291 alias_list) { 6292 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags); 6293 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir); 6294 spin_unlock_irqrestore(get_ccwdev_lock(dev->cdev), flags); 6295 if (tbcpm < 0) 6296 goto out_err; 6297 paths |= tbcpm; 6298 } 6299 /* devices in PAV groups */ 6300 list_for_each_entry_safe(pavgroup, tempgroup, 6301 &private->lcu->grouplist, group) { 6302 list_for_each_entry_safe(dev, n, &pavgroup->baselist, 6303 alias_list) { 6304 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags); 6305 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir); 6306 spin_unlock_irqrestore( 6307 get_ccwdev_lock(dev->cdev), flags); 6308 if (tbcpm < 0) 6309 goto out_err; 6310 paths |= tbcpm; 6311 } 6312 list_for_each_entry_safe(dev, n, &pavgroup->aliaslist, 6313 alias_list) { 6314 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags); 6315 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir); 6316 spin_unlock_irqrestore( 6317 get_ccwdev_lock(dev->cdev), flags); 6318 if (tbcpm < 0) 6319 goto out_err; 6320 paths |= tbcpm; 6321 } 6322 } 6323 /* notify user about all paths affected by CUIR action */ 6324 dasd_eckd_cuir_notify_user(device, paths, CUIR_QUIESCE); 6325 return 0; 6326 out_err: 6327 return tbcpm; 6328 } 6329 6330 static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum, 6331 struct dasd_cuir_message *cuir) 6332 { 6333 struct dasd_eckd_private *private = device->private; 6334 struct alias_pav_group *pavgroup, *tempgroup; 6335 struct dasd_device *dev, *n; 6336 unsigned long paths = 0; 6337 int tbcpm; 6338 6339 /* 6340 * the path may have been added through a generic path event before 6341 * only trigger path verification if the path is not already in use 6342 */ 6343 list_for_each_entry_safe(dev, n, 6344 &private->lcu->active_devices, 6345 alias_list) { 6346 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir); 6347 paths |= tbcpm; 6348 if (!(dasd_path_get_opm(dev) & tbcpm)) { 6349 dasd_path_add_tbvpm(dev, tbcpm); 6350 dasd_schedule_device_bh(dev); 6351 } 6352 } 6353 list_for_each_entry_safe(dev, n, 6354 &private->lcu->inactive_devices, 6355 alias_list) { 6356 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir); 6357 paths |= tbcpm; 6358 if (!(dasd_path_get_opm(dev) & tbcpm)) { 6359 dasd_path_add_tbvpm(dev, tbcpm); 6360 dasd_schedule_device_bh(dev); 6361 } 6362 } 6363 /* devices in PAV groups */ 6364 list_for_each_entry_safe(pavgroup, tempgroup, 6365 &private->lcu->grouplist, 6366 group) { 6367 list_for_each_entry_safe(dev, n, 6368 &pavgroup->baselist, 6369 alias_list) { 6370 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir); 6371 paths |= tbcpm; 6372 if (!(dasd_path_get_opm(dev) & tbcpm)) { 6373 dasd_path_add_tbvpm(dev, tbcpm); 6374 dasd_schedule_device_bh(dev); 6375 } 6376 } 6377 list_for_each_entry_safe(dev, n, 6378 &pavgroup->aliaslist, 6379 alias_list) { 6380 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir); 6381 paths |= tbcpm; 6382 if (!(dasd_path_get_opm(dev) & tbcpm)) { 6383 dasd_path_add_tbvpm(dev, tbcpm); 6384 dasd_schedule_device_bh(dev); 6385 } 6386 } 6387 } 6388 /* notify user about all paths affected by CUIR action */ 6389 dasd_eckd_cuir_notify_user(device, paths, CUIR_RESUME); 6390 return 0; 6391 } 6392 6393 static void dasd_eckd_handle_cuir(struct dasd_device *device, void *messages, 6394 __u8 lpum) 6395 { 6396 struct dasd_cuir_message *cuir = messages; 6397 int response; 6398 6399 DBF_DEV_EVENT(DBF_WARNING, device, 6400 "CUIR request: %016llx %016llx %016llx %08x", 6401 ((u64 *)cuir)[0], ((u64 *)cuir)[1], ((u64 *)cuir)[2], 6402 ((u32 *)cuir)[3]); 6403 6404 if (cuir->code == CUIR_QUIESCE) { 6405 /* quiesce */ 6406 if (dasd_eckd_cuir_quiesce(device, lpum, cuir)) 6407 response = PSF_CUIR_LAST_PATH; 6408 else 6409 response = PSF_CUIR_COMPLETED; 6410 } else if (cuir->code == CUIR_RESUME) { 6411 /* resume */ 6412 dasd_eckd_cuir_resume(device, lpum, cuir); 6413 response = PSF_CUIR_COMPLETED; 6414 } else 6415 response = PSF_CUIR_NOT_SUPPORTED; 6416 6417 dasd_eckd_psf_cuir_response(device, response, 6418 cuir->message_id, lpum); 6419 DBF_DEV_EVENT(DBF_WARNING, device, 6420 "CUIR response: %d on message ID %08x", response, 6421 cuir->message_id); 6422 /* to make sure there is no attention left schedule work again */ 6423 device->discipline->check_attention(device, lpum); 6424 } 6425 6426 static void dasd_eckd_oos_resume(struct dasd_device *device) 6427 { 6428 struct dasd_eckd_private *private = device->private; 6429 struct alias_pav_group *pavgroup, *tempgroup; 6430 struct dasd_device *dev, *n; 6431 unsigned long flags; 6432 6433 spin_lock_irqsave(&private->lcu->lock, flags); 6434 list_for_each_entry_safe(dev, n, &private->lcu->active_devices, 6435 alias_list) { 6436 if (dev->stopped & DASD_STOPPED_NOSPC) 6437 dasd_generic_space_avail(dev); 6438 } 6439 list_for_each_entry_safe(dev, n, &private->lcu->inactive_devices, 6440 alias_list) { 6441 if (dev->stopped & DASD_STOPPED_NOSPC) 6442 dasd_generic_space_avail(dev); 6443 } 6444 /* devices in PAV groups */ 6445 list_for_each_entry_safe(pavgroup, tempgroup, 6446 &private->lcu->grouplist, 6447 group) { 6448 list_for_each_entry_safe(dev, n, &pavgroup->baselist, 6449 alias_list) { 6450 if (dev->stopped & DASD_STOPPED_NOSPC) 6451 dasd_generic_space_avail(dev); 6452 } 6453 list_for_each_entry_safe(dev, n, &pavgroup->aliaslist, 6454 alias_list) { 6455 if (dev->stopped & DASD_STOPPED_NOSPC) 6456 dasd_generic_space_avail(dev); 6457 } 6458 } 6459 spin_unlock_irqrestore(&private->lcu->lock, flags); 6460 } 6461 6462 static void dasd_eckd_handle_oos(struct dasd_device *device, void *messages, 6463 __u8 lpum) 6464 { 6465 struct dasd_oos_message *oos = messages; 6466 6467 switch (oos->code) { 6468 case REPO_WARN: 6469 case POOL_WARN: 6470 dev_warn(&device->cdev->dev, 6471 "Extent pool usage has reached a critical value\n"); 6472 dasd_eckd_oos_resume(device); 6473 break; 6474 case REPO_EXHAUST: 6475 case POOL_EXHAUST: 6476 dev_warn(&device->cdev->dev, 6477 "Extent pool is exhausted\n"); 6478 break; 6479 case REPO_RELIEVE: 6480 case POOL_RELIEVE: 6481 dev_info(&device->cdev->dev, 6482 "Extent pool physical space constraint has been relieved\n"); 6483 break; 6484 } 6485 6486 /* In any case, update related data */ 6487 dasd_eckd_read_ext_pool_info(device); 6488 6489 /* to make sure there is no attention left schedule work again */ 6490 device->discipline->check_attention(device, lpum); 6491 } 6492 6493 static void dasd_eckd_check_attention_work(struct work_struct *work) 6494 { 6495 struct check_attention_work_data *data; 6496 struct dasd_rssd_messages *messages; 6497 struct dasd_device *device; 6498 int rc; 6499 6500 data = container_of(work, struct check_attention_work_data, worker); 6501 device = data->device; 6502 messages = kzalloc(sizeof(*messages), GFP_KERNEL); 6503 if (!messages) { 6504 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 6505 "Could not allocate attention message buffer"); 6506 goto out; 6507 } 6508 rc = dasd_eckd_read_message_buffer(device, messages, data->lpum); 6509 if (rc) 6510 goto out; 6511 6512 if (messages->length == ATTENTION_LENGTH_CUIR && 6513 messages->format == ATTENTION_FORMAT_CUIR) 6514 dasd_eckd_handle_cuir(device, messages, data->lpum); 6515 if (messages->length == ATTENTION_LENGTH_OOS && 6516 messages->format == ATTENTION_FORMAT_OOS) 6517 dasd_eckd_handle_oos(device, messages, data->lpum); 6518 6519 out: 6520 dasd_put_device(device); 6521 kfree(messages); 6522 kfree(data); 6523 } 6524 6525 static int dasd_eckd_check_attention(struct dasd_device *device, __u8 lpum) 6526 { 6527 struct check_attention_work_data *data; 6528 6529 data = kzalloc(sizeof(*data), GFP_ATOMIC); 6530 if (!data) 6531 return -ENOMEM; 6532 INIT_WORK(&data->worker, dasd_eckd_check_attention_work); 6533 dasd_get_device(device); 6534 data->device = device; 6535 data->lpum = lpum; 6536 schedule_work(&data->worker); 6537 return 0; 6538 } 6539 6540 static int dasd_eckd_disable_hpf_path(struct dasd_device *device, __u8 lpum) 6541 { 6542 if (~lpum & dasd_path_get_opm(device)) { 6543 dasd_path_add_nohpfpm(device, lpum); 6544 dasd_path_remove_opm(device, lpum); 6545 dev_err(&device->cdev->dev, 6546 "Channel path %02X lost HPF functionality and is disabled\n", 6547 lpum); 6548 return 1; 6549 } 6550 return 0; 6551 } 6552 6553 static void dasd_eckd_disable_hpf_device(struct dasd_device *device) 6554 { 6555 struct dasd_eckd_private *private = device->private; 6556 6557 dev_err(&device->cdev->dev, 6558 "High Performance FICON disabled\n"); 6559 private->fcx_max_data = 0; 6560 } 6561 6562 static int dasd_eckd_hpf_enabled(struct dasd_device *device) 6563 { 6564 struct dasd_eckd_private *private = device->private; 6565 6566 return private->fcx_max_data ? 1 : 0; 6567 } 6568 6569 static void dasd_eckd_handle_hpf_error(struct dasd_device *device, 6570 struct irb *irb) 6571 { 6572 struct dasd_eckd_private *private = device->private; 6573 6574 if (!private->fcx_max_data) { 6575 /* sanity check for no HPF, the error makes no sense */ 6576 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 6577 "Trying to disable HPF for a non HPF device"); 6578 return; 6579 } 6580 if (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX) { 6581 dasd_eckd_disable_hpf_device(device); 6582 } else if (irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX) { 6583 if (dasd_eckd_disable_hpf_path(device, irb->esw.esw1.lpum)) 6584 return; 6585 dasd_eckd_disable_hpf_device(device); 6586 dasd_path_set_tbvpm(device, 6587 dasd_path_get_hpfpm(device)); 6588 } 6589 /* 6590 * prevent that any new I/O ist started on the device and schedule a 6591 * requeue of existing requests 6592 */ 6593 dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC); 6594 dasd_schedule_requeue(device); 6595 } 6596 6597 /* 6598 * Initialize block layer request queue. 6599 */ 6600 static void dasd_eckd_setup_blk_queue(struct dasd_block *block) 6601 { 6602 unsigned int logical_block_size = block->bp_block; 6603 struct request_queue *q = block->request_queue; 6604 struct dasd_device *device = block->base; 6605 int max; 6606 6607 if (device->features & DASD_FEATURE_USERAW) { 6608 /* 6609 * the max_blocks value for raw_track access is 256 6610 * it is higher than the native ECKD value because we 6611 * only need one ccw per track 6612 * so the max_hw_sectors are 6613 * 2048 x 512B = 1024kB = 16 tracks 6614 */ 6615 max = DASD_ECKD_MAX_BLOCKS_RAW << block->s2b_shift; 6616 } else { 6617 max = DASD_ECKD_MAX_BLOCKS << block->s2b_shift; 6618 } 6619 blk_queue_flag_set(QUEUE_FLAG_NONROT, q); 6620 q->limits.max_dev_sectors = max; 6621 blk_queue_logical_block_size(q, logical_block_size); 6622 blk_queue_max_hw_sectors(q, max); 6623 blk_queue_max_segments(q, USHRT_MAX); 6624 /* With page sized segments each segment can be translated into one idaw/tidaw */ 6625 blk_queue_max_segment_size(q, PAGE_SIZE); 6626 blk_queue_segment_boundary(q, PAGE_SIZE - 1); 6627 } 6628 6629 static struct ccw_driver dasd_eckd_driver = { 6630 .driver = { 6631 .name = "dasd-eckd", 6632 .owner = THIS_MODULE, 6633 .dev_groups = dasd_dev_groups, 6634 }, 6635 .ids = dasd_eckd_ids, 6636 .probe = dasd_eckd_probe, 6637 .remove = dasd_generic_remove, 6638 .set_offline = dasd_generic_set_offline, 6639 .set_online = dasd_eckd_set_online, 6640 .notify = dasd_generic_notify, 6641 .path_event = dasd_generic_path_event, 6642 .shutdown = dasd_generic_shutdown, 6643 .uc_handler = dasd_generic_uc_handler, 6644 .int_class = IRQIO_DAS, 6645 }; 6646 6647 static struct dasd_discipline dasd_eckd_discipline = { 6648 .owner = THIS_MODULE, 6649 .name = "ECKD", 6650 .ebcname = "ECKD", 6651 .check_device = dasd_eckd_check_characteristics, 6652 .uncheck_device = dasd_eckd_uncheck_device, 6653 .do_analysis = dasd_eckd_do_analysis, 6654 .pe_handler = dasd_eckd_pe_handler, 6655 .basic_to_ready = dasd_eckd_basic_to_ready, 6656 .online_to_ready = dasd_eckd_online_to_ready, 6657 .basic_to_known = dasd_eckd_basic_to_known, 6658 .setup_blk_queue = dasd_eckd_setup_blk_queue, 6659 .fill_geometry = dasd_eckd_fill_geometry, 6660 .start_IO = dasd_start_IO, 6661 .term_IO = dasd_term_IO, 6662 .handle_terminated_request = dasd_eckd_handle_terminated_request, 6663 .format_device = dasd_eckd_format_device, 6664 .check_device_format = dasd_eckd_check_device_format, 6665 .erp_action = dasd_eckd_erp_action, 6666 .erp_postaction = dasd_eckd_erp_postaction, 6667 .check_for_device_change = dasd_eckd_check_for_device_change, 6668 .build_cp = dasd_eckd_build_alias_cp, 6669 .free_cp = dasd_eckd_free_alias_cp, 6670 .dump_sense = dasd_eckd_dump_sense, 6671 .dump_sense_dbf = dasd_eckd_dump_sense_dbf, 6672 .fill_info = dasd_eckd_fill_info, 6673 .ioctl = dasd_eckd_ioctl, 6674 .reload = dasd_eckd_reload_device, 6675 .get_uid = dasd_eckd_get_uid, 6676 .kick_validate = dasd_eckd_kick_validate_server, 6677 .check_attention = dasd_eckd_check_attention, 6678 .host_access_count = dasd_eckd_host_access_count, 6679 .hosts_print = dasd_hosts_print, 6680 .handle_hpf_error = dasd_eckd_handle_hpf_error, 6681 .disable_hpf = dasd_eckd_disable_hpf_device, 6682 .hpf_enabled = dasd_eckd_hpf_enabled, 6683 .reset_path = dasd_eckd_reset_path, 6684 .is_ese = dasd_eckd_is_ese, 6685 .space_allocated = dasd_eckd_space_allocated, 6686 .space_configured = dasd_eckd_space_configured, 6687 .logical_capacity = dasd_eckd_logical_capacity, 6688 .release_space = dasd_eckd_release_space, 6689 .ext_pool_id = dasd_eckd_ext_pool_id, 6690 .ext_size = dasd_eckd_ext_size, 6691 .ext_pool_cap_at_warnlevel = dasd_eckd_ext_pool_cap_at_warnlevel, 6692 .ext_pool_warn_thrshld = dasd_eckd_ext_pool_warn_thrshld, 6693 .ext_pool_oos = dasd_eckd_ext_pool_oos, 6694 .ext_pool_exhaust = dasd_eckd_ext_pool_exhaust, 6695 .ese_format = dasd_eckd_ese_format, 6696 .ese_read = dasd_eckd_ese_read, 6697 }; 6698 6699 static int __init 6700 dasd_eckd_init(void) 6701 { 6702 int ret; 6703 6704 ASCEBC(dasd_eckd_discipline.ebcname, 4); 6705 dasd_reserve_req = kmalloc(sizeof(*dasd_reserve_req), 6706 GFP_KERNEL | GFP_DMA); 6707 if (!dasd_reserve_req) 6708 return -ENOMEM; 6709 dasd_vol_info_req = kmalloc(sizeof(*dasd_vol_info_req), 6710 GFP_KERNEL | GFP_DMA); 6711 if (!dasd_vol_info_req) 6712 return -ENOMEM; 6713 pe_handler_worker = kmalloc(sizeof(*pe_handler_worker), 6714 GFP_KERNEL | GFP_DMA); 6715 if (!pe_handler_worker) { 6716 kfree(dasd_reserve_req); 6717 kfree(dasd_vol_info_req); 6718 return -ENOMEM; 6719 } 6720 rawpadpage = (void *)__get_free_page(GFP_KERNEL); 6721 if (!rawpadpage) { 6722 kfree(pe_handler_worker); 6723 kfree(dasd_reserve_req); 6724 kfree(dasd_vol_info_req); 6725 return -ENOMEM; 6726 } 6727 ret = ccw_driver_register(&dasd_eckd_driver); 6728 if (!ret) 6729 wait_for_device_probe(); 6730 else { 6731 kfree(pe_handler_worker); 6732 kfree(dasd_reserve_req); 6733 kfree(dasd_vol_info_req); 6734 free_page((unsigned long)rawpadpage); 6735 } 6736 return ret; 6737 } 6738 6739 static void __exit 6740 dasd_eckd_cleanup(void) 6741 { 6742 ccw_driver_unregister(&dasd_eckd_driver); 6743 kfree(pe_handler_worker); 6744 kfree(dasd_reserve_req); 6745 free_page((unsigned long)rawpadpage); 6746 } 6747 6748 module_init(dasd_eckd_init); 6749 module_exit(dasd_eckd_cleanup); 6750