1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 4 * Horst Hummel <Horst.Hummel@de.ibm.com> 5 * Carsten Otte <Cotte@de.ibm.com> 6 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * Bugreports.to..: <Linux390@de.ibm.com> 8 * Copyright IBM Corp. 1999, 2009 9 * EMC Symmetrix ioctl Copyright EMC Corporation, 2008 10 * Author.........: Nigel Hislop <hislop_nigel@emc.com> 11 */ 12 13 #define KMSG_COMPONENT "dasd-eckd" 14 15 #include <linux/stddef.h> 16 #include <linux/kernel.h> 17 #include <linux/slab.h> 18 #include <linux/hdreg.h> /* HDIO_GETGEO */ 19 #include <linux/bio.h> 20 #include <linux/module.h> 21 #include <linux/compat.h> 22 #include <linux/init.h> 23 #include <linux/seq_file.h> 24 25 #include <asm/css_chars.h> 26 #include <asm/debug.h> 27 #include <asm/idals.h> 28 #include <asm/ebcdic.h> 29 #include <asm/io.h> 30 #include <linux/uaccess.h> 31 #include <asm/cio.h> 32 #include <asm/ccwdev.h> 33 #include <asm/itcw.h> 34 #include <asm/schid.h> 35 #include <asm/chpid.h> 36 37 #include "dasd_int.h" 38 #include "dasd_eckd.h" 39 40 #ifdef PRINTK_HEADER 41 #undef PRINTK_HEADER 42 #endif /* PRINTK_HEADER */ 43 #define PRINTK_HEADER "dasd(eckd):" 44 45 /* 46 * raw track access always map to 64k in memory 47 * so it maps to 16 blocks of 4k per track 48 */ 49 #define DASD_RAW_BLOCK_PER_TRACK 16 50 #define DASD_RAW_BLOCKSIZE 4096 51 /* 64k are 128 x 512 byte sectors */ 52 #define DASD_RAW_SECTORS_PER_TRACK 128 53 54 MODULE_LICENSE("GPL"); 55 56 static struct dasd_discipline dasd_eckd_discipline; 57 58 /* The ccw bus type uses this table to find devices that it sends to 59 * dasd_eckd_probe */ 60 static struct ccw_device_id dasd_eckd_ids[] = { 61 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1}, 62 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2}, 63 { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3380, 0), .driver_info = 0x3}, 64 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4}, 65 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5}, 66 { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6}, 67 { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3390, 0), .driver_info = 0x7}, 68 { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3380, 0), .driver_info = 0x8}, 69 { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3390, 0), .driver_info = 0x9}, 70 { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3380, 0), .driver_info = 0xa}, 71 { /* end of list */ }, 72 }; 73 74 MODULE_DEVICE_TABLE(ccw, dasd_eckd_ids); 75 76 static struct ccw_driver dasd_eckd_driver; /* see below */ 77 78 static void *rawpadpage; 79 80 #define INIT_CQR_OK 0 81 #define INIT_CQR_UNFORMATTED 1 82 #define INIT_CQR_ERROR 2 83 84 /* emergency request for reserve/release */ 85 static struct { 86 struct dasd_ccw_req cqr; 87 struct ccw1 ccw; 88 char data[32]; 89 } *dasd_reserve_req; 90 static DEFINE_MUTEX(dasd_reserve_mutex); 91 92 static struct { 93 struct dasd_ccw_req cqr; 94 struct ccw1 ccw[2]; 95 char data[40]; 96 } *dasd_vol_info_req; 97 static DEFINE_MUTEX(dasd_vol_info_mutex); 98 99 struct ext_pool_exhaust_work_data { 100 struct work_struct worker; 101 struct dasd_device *device; 102 struct dasd_device *base; 103 }; 104 105 /* definitions for the path verification worker */ 106 struct pe_handler_work_data { 107 struct work_struct worker; 108 struct dasd_device *device; 109 struct dasd_ccw_req cqr; 110 struct ccw1 ccw; 111 __u8 rcd_buffer[DASD_ECKD_RCD_DATA_SIZE]; 112 int isglobal; 113 __u8 tbvpm; 114 __u8 fcsecpm; 115 }; 116 static struct pe_handler_work_data *pe_handler_worker; 117 static DEFINE_MUTEX(dasd_pe_handler_mutex); 118 119 struct check_attention_work_data { 120 struct work_struct worker; 121 struct dasd_device *device; 122 __u8 lpum; 123 }; 124 125 static int dasd_eckd_ext_pool_id(struct dasd_device *); 126 static int prepare_itcw(struct itcw *, unsigned int, unsigned int, int, 127 struct dasd_device *, struct dasd_device *, 128 unsigned int, int, unsigned int, unsigned int, 129 unsigned int, unsigned int); 130 131 /* initial attempt at a probe function. this can be simplified once 132 * the other detection code is gone */ 133 static int 134 dasd_eckd_probe (struct ccw_device *cdev) 135 { 136 int ret; 137 138 /* set ECKD specific ccw-device options */ 139 ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE | 140 CCWDEV_DO_PATHGROUP | CCWDEV_DO_MULTIPATH); 141 if (ret) { 142 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s", 143 "dasd_eckd_probe: could not set " 144 "ccw-device options"); 145 return ret; 146 } 147 ret = dasd_generic_probe(cdev); 148 return ret; 149 } 150 151 static int 152 dasd_eckd_set_online(struct ccw_device *cdev) 153 { 154 return dasd_generic_set_online(cdev, &dasd_eckd_discipline); 155 } 156 157 static const int sizes_trk0[] = { 28, 148, 84 }; 158 #define LABEL_SIZE 140 159 160 /* head and record addresses of count_area read in analysis ccw */ 161 static const int count_area_head[] = { 0, 0, 0, 0, 1 }; 162 static const int count_area_rec[] = { 1, 2, 3, 4, 1 }; 163 164 static inline unsigned int 165 ceil_quot(unsigned int d1, unsigned int d2) 166 { 167 return (d1 + (d2 - 1)) / d2; 168 } 169 170 static unsigned int 171 recs_per_track(struct dasd_eckd_characteristics * rdc, 172 unsigned int kl, unsigned int dl) 173 { 174 int dn, kn; 175 176 switch (rdc->dev_type) { 177 case 0x3380: 178 if (kl) 179 return 1499 / (15 + 7 + ceil_quot(kl + 12, 32) + 180 ceil_quot(dl + 12, 32)); 181 else 182 return 1499 / (15 + ceil_quot(dl + 12, 32)); 183 case 0x3390: 184 dn = ceil_quot(dl + 6, 232) + 1; 185 if (kl) { 186 kn = ceil_quot(kl + 6, 232) + 1; 187 return 1729 / (10 + 9 + ceil_quot(kl + 6 * kn, 34) + 188 9 + ceil_quot(dl + 6 * dn, 34)); 189 } else 190 return 1729 / (10 + 9 + ceil_quot(dl + 6 * dn, 34)); 191 case 0x9345: 192 dn = ceil_quot(dl + 6, 232) + 1; 193 if (kl) { 194 kn = ceil_quot(kl + 6, 232) + 1; 195 return 1420 / (18 + 7 + ceil_quot(kl + 6 * kn, 34) + 196 ceil_quot(dl + 6 * dn, 34)); 197 } else 198 return 1420 / (18 + 7 + ceil_quot(dl + 6 * dn, 34)); 199 } 200 return 0; 201 } 202 203 static void set_ch_t(struct ch_t *geo, __u32 cyl, __u8 head) 204 { 205 geo->cyl = (__u16) cyl; 206 geo->head = cyl >> 16; 207 geo->head <<= 4; 208 geo->head |= head; 209 } 210 211 /* 212 * calculate failing track from sense data depending if 213 * it is an EAV device or not 214 */ 215 static int dasd_eckd_track_from_irb(struct irb *irb, struct dasd_device *device, 216 sector_t *track) 217 { 218 struct dasd_eckd_private *private = device->private; 219 u8 *sense = NULL; 220 u32 cyl; 221 u8 head; 222 223 sense = dasd_get_sense(irb); 224 if (!sense) { 225 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 226 "ESE error no sense data\n"); 227 return -EINVAL; 228 } 229 if (!(sense[27] & DASD_SENSE_BIT_2)) { 230 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 231 "ESE error no valid track data\n"); 232 return -EINVAL; 233 } 234 235 if (sense[27] & DASD_SENSE_BIT_3) { 236 /* enhanced addressing */ 237 cyl = sense[30] << 20; 238 cyl |= (sense[31] & 0xF0) << 12; 239 cyl |= sense[28] << 8; 240 cyl |= sense[29]; 241 } else { 242 cyl = sense[29] << 8; 243 cyl |= sense[30]; 244 } 245 head = sense[31] & 0x0F; 246 *track = cyl * private->rdc_data.trk_per_cyl + head; 247 return 0; 248 } 249 250 static int set_timestamp(struct ccw1 *ccw, struct DE_eckd_data *data, 251 struct dasd_device *device) 252 { 253 struct dasd_eckd_private *private = device->private; 254 int rc; 255 256 rc = get_phys_clock(&data->ep_sys_time); 257 /* 258 * Ignore return code if XRC is not supported or 259 * sync clock is switched off 260 */ 261 if ((rc && !private->rdc_data.facilities.XRC_supported) || 262 rc == -EOPNOTSUPP || rc == -EACCES) 263 return 0; 264 265 /* switch on System Time Stamp - needed for XRC Support */ 266 data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */ 267 data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */ 268 269 if (ccw) { 270 ccw->count = sizeof(struct DE_eckd_data); 271 ccw->flags |= CCW_FLAG_SLI; 272 } 273 274 return rc; 275 } 276 277 static int 278 define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk, 279 unsigned int totrk, int cmd, struct dasd_device *device, 280 int blksize) 281 { 282 struct dasd_eckd_private *private = device->private; 283 u16 heads, beghead, endhead; 284 u32 begcyl, endcyl; 285 int rc = 0; 286 287 if (ccw) { 288 ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT; 289 ccw->flags = 0; 290 ccw->count = 16; 291 ccw->cda = (__u32)__pa(data); 292 } 293 294 memset(data, 0, sizeof(struct DE_eckd_data)); 295 switch (cmd) { 296 case DASD_ECKD_CCW_READ_HOME_ADDRESS: 297 case DASD_ECKD_CCW_READ_RECORD_ZERO: 298 case DASD_ECKD_CCW_READ: 299 case DASD_ECKD_CCW_READ_MT: 300 case DASD_ECKD_CCW_READ_CKD: 301 case DASD_ECKD_CCW_READ_CKD_MT: 302 case DASD_ECKD_CCW_READ_KD: 303 case DASD_ECKD_CCW_READ_KD_MT: 304 data->mask.perm = 0x1; 305 data->attributes.operation = private->attrib.operation; 306 break; 307 case DASD_ECKD_CCW_READ_COUNT: 308 data->mask.perm = 0x1; 309 data->attributes.operation = DASD_BYPASS_CACHE; 310 break; 311 case DASD_ECKD_CCW_READ_TRACK: 312 case DASD_ECKD_CCW_READ_TRACK_DATA: 313 data->mask.perm = 0x1; 314 data->attributes.operation = private->attrib.operation; 315 data->blk_size = 0; 316 break; 317 case DASD_ECKD_CCW_WRITE: 318 case DASD_ECKD_CCW_WRITE_MT: 319 case DASD_ECKD_CCW_WRITE_KD: 320 case DASD_ECKD_CCW_WRITE_KD_MT: 321 data->mask.perm = 0x02; 322 data->attributes.operation = private->attrib.operation; 323 rc = set_timestamp(ccw, data, device); 324 break; 325 case DASD_ECKD_CCW_WRITE_CKD: 326 case DASD_ECKD_CCW_WRITE_CKD_MT: 327 data->attributes.operation = DASD_BYPASS_CACHE; 328 rc = set_timestamp(ccw, data, device); 329 break; 330 case DASD_ECKD_CCW_ERASE: 331 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS: 332 case DASD_ECKD_CCW_WRITE_RECORD_ZERO: 333 data->mask.perm = 0x3; 334 data->mask.auth = 0x1; 335 data->attributes.operation = DASD_BYPASS_CACHE; 336 rc = set_timestamp(ccw, data, device); 337 break; 338 case DASD_ECKD_CCW_WRITE_FULL_TRACK: 339 data->mask.perm = 0x03; 340 data->attributes.operation = private->attrib.operation; 341 data->blk_size = 0; 342 break; 343 case DASD_ECKD_CCW_WRITE_TRACK_DATA: 344 data->mask.perm = 0x02; 345 data->attributes.operation = private->attrib.operation; 346 data->blk_size = blksize; 347 rc = set_timestamp(ccw, data, device); 348 break; 349 default: 350 dev_err(&device->cdev->dev, 351 "0x%x is not a known command\n", cmd); 352 break; 353 } 354 355 data->attributes.mode = 0x3; /* ECKD */ 356 357 if ((private->rdc_data.cu_type == 0x2105 || 358 private->rdc_data.cu_type == 0x2107 || 359 private->rdc_data.cu_type == 0x1750) 360 && !(private->uses_cdl && trk < 2)) 361 data->ga_extended |= 0x40; /* Regular Data Format Mode */ 362 363 heads = private->rdc_data.trk_per_cyl; 364 begcyl = trk / heads; 365 beghead = trk % heads; 366 endcyl = totrk / heads; 367 endhead = totrk % heads; 368 369 /* check for sequential prestage - enhance cylinder range */ 370 if (data->attributes.operation == DASD_SEQ_PRESTAGE || 371 data->attributes.operation == DASD_SEQ_ACCESS) { 372 373 if (endcyl + private->attrib.nr_cyl < private->real_cyl) 374 endcyl += private->attrib.nr_cyl; 375 else 376 endcyl = (private->real_cyl - 1); 377 } 378 379 set_ch_t(&data->beg_ext, begcyl, beghead); 380 set_ch_t(&data->end_ext, endcyl, endhead); 381 return rc; 382 } 383 384 385 static void locate_record_ext(struct ccw1 *ccw, struct LRE_eckd_data *data, 386 unsigned int trk, unsigned int rec_on_trk, 387 int count, int cmd, struct dasd_device *device, 388 unsigned int reclen, unsigned int tlf) 389 { 390 struct dasd_eckd_private *private = device->private; 391 int sector; 392 int dn, d; 393 394 if (ccw) { 395 ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD_EXT; 396 ccw->flags = 0; 397 if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK) 398 ccw->count = 22; 399 else 400 ccw->count = 20; 401 ccw->cda = (__u32)__pa(data); 402 } 403 404 memset(data, 0, sizeof(*data)); 405 sector = 0; 406 if (rec_on_trk) { 407 switch (private->rdc_data.dev_type) { 408 case 0x3390: 409 dn = ceil_quot(reclen + 6, 232); 410 d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34); 411 sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8; 412 break; 413 case 0x3380: 414 d = 7 + ceil_quot(reclen + 12, 32); 415 sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7; 416 break; 417 } 418 } 419 data->sector = sector; 420 /* note: meaning of count depends on the operation 421 * for record based I/O it's the number of records, but for 422 * track based I/O it's the number of tracks 423 */ 424 data->count = count; 425 switch (cmd) { 426 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS: 427 data->operation.orientation = 0x3; 428 data->operation.operation = 0x03; 429 break; 430 case DASD_ECKD_CCW_READ_HOME_ADDRESS: 431 data->operation.orientation = 0x3; 432 data->operation.operation = 0x16; 433 break; 434 case DASD_ECKD_CCW_WRITE_RECORD_ZERO: 435 data->operation.orientation = 0x1; 436 data->operation.operation = 0x03; 437 data->count++; 438 break; 439 case DASD_ECKD_CCW_READ_RECORD_ZERO: 440 data->operation.orientation = 0x3; 441 data->operation.operation = 0x16; 442 data->count++; 443 break; 444 case DASD_ECKD_CCW_WRITE: 445 case DASD_ECKD_CCW_WRITE_MT: 446 case DASD_ECKD_CCW_WRITE_KD: 447 case DASD_ECKD_CCW_WRITE_KD_MT: 448 data->auxiliary.length_valid = 0x1; 449 data->length = reclen; 450 data->operation.operation = 0x01; 451 break; 452 case DASD_ECKD_CCW_WRITE_CKD: 453 case DASD_ECKD_CCW_WRITE_CKD_MT: 454 data->auxiliary.length_valid = 0x1; 455 data->length = reclen; 456 data->operation.operation = 0x03; 457 break; 458 case DASD_ECKD_CCW_WRITE_FULL_TRACK: 459 data->operation.orientation = 0x0; 460 data->operation.operation = 0x3F; 461 data->extended_operation = 0x11; 462 data->length = 0; 463 data->extended_parameter_length = 0x02; 464 if (data->count > 8) { 465 data->extended_parameter[0] = 0xFF; 466 data->extended_parameter[1] = 0xFF; 467 data->extended_parameter[1] <<= (16 - count); 468 } else { 469 data->extended_parameter[0] = 0xFF; 470 data->extended_parameter[0] <<= (8 - count); 471 data->extended_parameter[1] = 0x00; 472 } 473 data->sector = 0xFF; 474 break; 475 case DASD_ECKD_CCW_WRITE_TRACK_DATA: 476 data->auxiliary.length_valid = 0x1; 477 data->length = reclen; /* not tlf, as one might think */ 478 data->operation.operation = 0x3F; 479 data->extended_operation = 0x23; 480 break; 481 case DASD_ECKD_CCW_READ: 482 case DASD_ECKD_CCW_READ_MT: 483 case DASD_ECKD_CCW_READ_KD: 484 case DASD_ECKD_CCW_READ_KD_MT: 485 data->auxiliary.length_valid = 0x1; 486 data->length = reclen; 487 data->operation.operation = 0x06; 488 break; 489 case DASD_ECKD_CCW_READ_CKD: 490 case DASD_ECKD_CCW_READ_CKD_MT: 491 data->auxiliary.length_valid = 0x1; 492 data->length = reclen; 493 data->operation.operation = 0x16; 494 break; 495 case DASD_ECKD_CCW_READ_COUNT: 496 data->operation.operation = 0x06; 497 break; 498 case DASD_ECKD_CCW_READ_TRACK: 499 data->operation.orientation = 0x1; 500 data->operation.operation = 0x0C; 501 data->extended_parameter_length = 0; 502 data->sector = 0xFF; 503 break; 504 case DASD_ECKD_CCW_READ_TRACK_DATA: 505 data->auxiliary.length_valid = 0x1; 506 data->length = tlf; 507 data->operation.operation = 0x0C; 508 break; 509 case DASD_ECKD_CCW_ERASE: 510 data->length = reclen; 511 data->auxiliary.length_valid = 0x1; 512 data->operation.operation = 0x0b; 513 break; 514 default: 515 DBF_DEV_EVENT(DBF_ERR, device, 516 "fill LRE unknown opcode 0x%x", cmd); 517 BUG(); 518 } 519 set_ch_t(&data->seek_addr, 520 trk / private->rdc_data.trk_per_cyl, 521 trk % private->rdc_data.trk_per_cyl); 522 data->search_arg.cyl = data->seek_addr.cyl; 523 data->search_arg.head = data->seek_addr.head; 524 data->search_arg.record = rec_on_trk; 525 } 526 527 static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata, 528 unsigned int trk, unsigned int totrk, int cmd, 529 struct dasd_device *basedev, struct dasd_device *startdev, 530 unsigned int format, unsigned int rec_on_trk, int count, 531 unsigned int blksize, unsigned int tlf) 532 { 533 struct dasd_eckd_private *basepriv, *startpriv; 534 struct LRE_eckd_data *lredata; 535 struct DE_eckd_data *dedata; 536 int rc = 0; 537 538 basepriv = basedev->private; 539 startpriv = startdev->private; 540 dedata = &pfxdata->define_extent; 541 lredata = &pfxdata->locate_record; 542 543 ccw->cmd_code = DASD_ECKD_CCW_PFX; 544 ccw->flags = 0; 545 if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK) { 546 ccw->count = sizeof(*pfxdata) + 2; 547 ccw->cda = (__u32) __pa(pfxdata); 548 memset(pfxdata, 0, sizeof(*pfxdata) + 2); 549 } else { 550 ccw->count = sizeof(*pfxdata); 551 ccw->cda = (__u32) __pa(pfxdata); 552 memset(pfxdata, 0, sizeof(*pfxdata)); 553 } 554 555 /* prefix data */ 556 if (format > 1) { 557 DBF_DEV_EVENT(DBF_ERR, basedev, 558 "PFX LRE unknown format 0x%x", format); 559 BUG(); 560 return -EINVAL; 561 } 562 pfxdata->format = format; 563 pfxdata->base_address = basepriv->ned->unit_addr; 564 pfxdata->base_lss = basepriv->ned->ID; 565 pfxdata->validity.define_extent = 1; 566 567 /* private uid is kept up to date, conf_data may be outdated */ 568 if (startpriv->uid.type == UA_BASE_PAV_ALIAS) 569 pfxdata->validity.verify_base = 1; 570 571 if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) { 572 pfxdata->validity.verify_base = 1; 573 pfxdata->validity.hyper_pav = 1; 574 } 575 576 rc = define_extent(NULL, dedata, trk, totrk, cmd, basedev, blksize); 577 578 /* 579 * For some commands the System Time Stamp is set in the define extent 580 * data when XRC is supported. The validity of the time stamp must be 581 * reflected in the prefix data as well. 582 */ 583 if (dedata->ga_extended & 0x08 && dedata->ga_extended & 0x02) 584 pfxdata->validity.time_stamp = 1; /* 'Time Stamp Valid' */ 585 586 if (format == 1) { 587 locate_record_ext(NULL, lredata, trk, rec_on_trk, count, cmd, 588 basedev, blksize, tlf); 589 } 590 591 return rc; 592 } 593 594 static int prefix(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata, 595 unsigned int trk, unsigned int totrk, int cmd, 596 struct dasd_device *basedev, struct dasd_device *startdev) 597 { 598 return prefix_LRE(ccw, pfxdata, trk, totrk, cmd, basedev, startdev, 599 0, 0, 0, 0, 0); 600 } 601 602 static void 603 locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, unsigned int trk, 604 unsigned int rec_on_trk, int no_rec, int cmd, 605 struct dasd_device * device, int reclen) 606 { 607 struct dasd_eckd_private *private = device->private; 608 int sector; 609 int dn, d; 610 611 DBF_DEV_EVENT(DBF_INFO, device, 612 "Locate: trk %d, rec %d, no_rec %d, cmd %d, reclen %d", 613 trk, rec_on_trk, no_rec, cmd, reclen); 614 615 ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD; 616 ccw->flags = 0; 617 ccw->count = 16; 618 ccw->cda = (__u32) __pa(data); 619 620 memset(data, 0, sizeof(struct LO_eckd_data)); 621 sector = 0; 622 if (rec_on_trk) { 623 switch (private->rdc_data.dev_type) { 624 case 0x3390: 625 dn = ceil_quot(reclen + 6, 232); 626 d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34); 627 sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8; 628 break; 629 case 0x3380: 630 d = 7 + ceil_quot(reclen + 12, 32); 631 sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7; 632 break; 633 } 634 } 635 data->sector = sector; 636 data->count = no_rec; 637 switch (cmd) { 638 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS: 639 data->operation.orientation = 0x3; 640 data->operation.operation = 0x03; 641 break; 642 case DASD_ECKD_CCW_READ_HOME_ADDRESS: 643 data->operation.orientation = 0x3; 644 data->operation.operation = 0x16; 645 break; 646 case DASD_ECKD_CCW_WRITE_RECORD_ZERO: 647 data->operation.orientation = 0x1; 648 data->operation.operation = 0x03; 649 data->count++; 650 break; 651 case DASD_ECKD_CCW_READ_RECORD_ZERO: 652 data->operation.orientation = 0x3; 653 data->operation.operation = 0x16; 654 data->count++; 655 break; 656 case DASD_ECKD_CCW_WRITE: 657 case DASD_ECKD_CCW_WRITE_MT: 658 case DASD_ECKD_CCW_WRITE_KD: 659 case DASD_ECKD_CCW_WRITE_KD_MT: 660 data->auxiliary.last_bytes_used = 0x1; 661 data->length = reclen; 662 data->operation.operation = 0x01; 663 break; 664 case DASD_ECKD_CCW_WRITE_CKD: 665 case DASD_ECKD_CCW_WRITE_CKD_MT: 666 data->auxiliary.last_bytes_used = 0x1; 667 data->length = reclen; 668 data->operation.operation = 0x03; 669 break; 670 case DASD_ECKD_CCW_READ: 671 case DASD_ECKD_CCW_READ_MT: 672 case DASD_ECKD_CCW_READ_KD: 673 case DASD_ECKD_CCW_READ_KD_MT: 674 data->auxiliary.last_bytes_used = 0x1; 675 data->length = reclen; 676 data->operation.operation = 0x06; 677 break; 678 case DASD_ECKD_CCW_READ_CKD: 679 case DASD_ECKD_CCW_READ_CKD_MT: 680 data->auxiliary.last_bytes_used = 0x1; 681 data->length = reclen; 682 data->operation.operation = 0x16; 683 break; 684 case DASD_ECKD_CCW_READ_COUNT: 685 data->operation.operation = 0x06; 686 break; 687 case DASD_ECKD_CCW_ERASE: 688 data->length = reclen; 689 data->auxiliary.last_bytes_used = 0x1; 690 data->operation.operation = 0x0b; 691 break; 692 default: 693 DBF_DEV_EVENT(DBF_ERR, device, "unknown locate record " 694 "opcode 0x%x", cmd); 695 } 696 set_ch_t(&data->seek_addr, 697 trk / private->rdc_data.trk_per_cyl, 698 trk % private->rdc_data.trk_per_cyl); 699 data->search_arg.cyl = data->seek_addr.cyl; 700 data->search_arg.head = data->seek_addr.head; 701 data->search_arg.record = rec_on_trk; 702 } 703 704 /* 705 * Returns 1 if the block is one of the special blocks that needs 706 * to get read/written with the KD variant of the command. 707 * That is DASD_ECKD_READ_KD_MT instead of DASD_ECKD_READ_MT and 708 * DASD_ECKD_WRITE_KD_MT instead of DASD_ECKD_WRITE_MT. 709 * Luckily the KD variants differ only by one bit (0x08) from the 710 * normal variant. So don't wonder about code like: 711 * if (dasd_eckd_cdl_special(blk_per_trk, recid)) 712 * ccw->cmd_code |= 0x8; 713 */ 714 static inline int 715 dasd_eckd_cdl_special(int blk_per_trk, int recid) 716 { 717 if (recid < 3) 718 return 1; 719 if (recid < blk_per_trk) 720 return 0; 721 if (recid < 2 * blk_per_trk) 722 return 1; 723 return 0; 724 } 725 726 /* 727 * Returns the record size for the special blocks of the cdl format. 728 * Only returns something useful if dasd_eckd_cdl_special is true 729 * for the recid. 730 */ 731 static inline int 732 dasd_eckd_cdl_reclen(int recid) 733 { 734 if (recid < 3) 735 return sizes_trk0[recid]; 736 return LABEL_SIZE; 737 } 738 /* create unique id from private structure. */ 739 static void create_uid(struct dasd_eckd_private *private) 740 { 741 int count; 742 struct dasd_uid *uid; 743 744 uid = &private->uid; 745 memset(uid, 0, sizeof(struct dasd_uid)); 746 memcpy(uid->vendor, private->ned->HDA_manufacturer, 747 sizeof(uid->vendor) - 1); 748 EBCASC(uid->vendor, sizeof(uid->vendor) - 1); 749 memcpy(uid->serial, &private->ned->serial, 750 sizeof(uid->serial) - 1); 751 EBCASC(uid->serial, sizeof(uid->serial) - 1); 752 uid->ssid = private->gneq->subsystemID; 753 uid->real_unit_addr = private->ned->unit_addr; 754 if (private->sneq) { 755 uid->type = private->sneq->sua_flags; 756 if (uid->type == UA_BASE_PAV_ALIAS) 757 uid->base_unit_addr = private->sneq->base_unit_addr; 758 } else { 759 uid->type = UA_BASE_DEVICE; 760 } 761 if (private->vdsneq) { 762 for (count = 0; count < 16; count++) { 763 sprintf(uid->vduit+2*count, "%02x", 764 private->vdsneq->uit[count]); 765 } 766 } 767 } 768 769 /* 770 * Generate device unique id that specifies the physical device. 771 */ 772 static int dasd_eckd_generate_uid(struct dasd_device *device) 773 { 774 struct dasd_eckd_private *private = device->private; 775 unsigned long flags; 776 777 if (!private) 778 return -ENODEV; 779 if (!private->ned || !private->gneq) 780 return -ENODEV; 781 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 782 create_uid(private); 783 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 784 return 0; 785 } 786 787 static int dasd_eckd_get_uid(struct dasd_device *device, struct dasd_uid *uid) 788 { 789 struct dasd_eckd_private *private = device->private; 790 unsigned long flags; 791 792 if (private) { 793 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 794 *uid = private->uid; 795 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 796 return 0; 797 } 798 return -EINVAL; 799 } 800 801 /* 802 * compare device UID with data of a given dasd_eckd_private structure 803 * return 0 for match 804 */ 805 static int dasd_eckd_compare_path_uid(struct dasd_device *device, 806 struct dasd_eckd_private *private) 807 { 808 struct dasd_uid device_uid; 809 810 create_uid(private); 811 dasd_eckd_get_uid(device, &device_uid); 812 813 return memcmp(&device_uid, &private->uid, sizeof(struct dasd_uid)); 814 } 815 816 static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device, 817 struct dasd_ccw_req *cqr, 818 __u8 *rcd_buffer, 819 __u8 lpm) 820 { 821 struct ccw1 *ccw; 822 /* 823 * buffer has to start with EBCDIC "V1.0" to show 824 * support for virtual device SNEQ 825 */ 826 rcd_buffer[0] = 0xE5; 827 rcd_buffer[1] = 0xF1; 828 rcd_buffer[2] = 0x4B; 829 rcd_buffer[3] = 0xF0; 830 831 ccw = cqr->cpaddr; 832 ccw->cmd_code = DASD_ECKD_CCW_RCD; 833 ccw->flags = 0; 834 ccw->cda = (__u32)(addr_t)rcd_buffer; 835 ccw->count = DASD_ECKD_RCD_DATA_SIZE; 836 cqr->magic = DASD_ECKD_MAGIC; 837 838 cqr->startdev = device; 839 cqr->memdev = device; 840 cqr->block = NULL; 841 cqr->expires = 10*HZ; 842 cqr->lpm = lpm; 843 cqr->retries = 256; 844 cqr->buildclk = get_tod_clock(); 845 cqr->status = DASD_CQR_FILLED; 846 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags); 847 } 848 849 /* 850 * Wakeup helper for read_conf 851 * if the cqr is not done and needs some error recovery 852 * the buffer has to be re-initialized with the EBCDIC "V1.0" 853 * to show support for virtual device SNEQ 854 */ 855 static void read_conf_cb(struct dasd_ccw_req *cqr, void *data) 856 { 857 struct ccw1 *ccw; 858 __u8 *rcd_buffer; 859 860 if (cqr->status != DASD_CQR_DONE) { 861 ccw = cqr->cpaddr; 862 rcd_buffer = (__u8 *)((addr_t) ccw->cda); 863 memset(rcd_buffer, 0, sizeof(*rcd_buffer)); 864 865 rcd_buffer[0] = 0xE5; 866 rcd_buffer[1] = 0xF1; 867 rcd_buffer[2] = 0x4B; 868 rcd_buffer[3] = 0xF0; 869 } 870 dasd_wakeup_cb(cqr, data); 871 } 872 873 static int dasd_eckd_read_conf_immediately(struct dasd_device *device, 874 struct dasd_ccw_req *cqr, 875 __u8 *rcd_buffer, 876 __u8 lpm) 877 { 878 struct ciw *ciw; 879 int rc; 880 /* 881 * sanity check: scan for RCD command in extended SenseID data 882 * some devices do not support RCD 883 */ 884 ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD); 885 if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD) 886 return -EOPNOTSUPP; 887 888 dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buffer, lpm); 889 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 890 set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags); 891 cqr->retries = 5; 892 cqr->callback = read_conf_cb; 893 rc = dasd_sleep_on_immediatly(cqr); 894 return rc; 895 } 896 897 static int dasd_eckd_read_conf_lpm(struct dasd_device *device, 898 void **rcd_buffer, 899 int *rcd_buffer_size, __u8 lpm) 900 { 901 struct ciw *ciw; 902 char *rcd_buf = NULL; 903 int ret; 904 struct dasd_ccw_req *cqr; 905 906 /* 907 * sanity check: scan for RCD command in extended SenseID data 908 * some devices do not support RCD 909 */ 910 ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD); 911 if (!ciw || ciw->cmd != DASD_ECKD_CCW_RCD) { 912 ret = -EOPNOTSUPP; 913 goto out_error; 914 } 915 rcd_buf = kzalloc(DASD_ECKD_RCD_DATA_SIZE, GFP_KERNEL | GFP_DMA); 916 if (!rcd_buf) { 917 ret = -ENOMEM; 918 goto out_error; 919 } 920 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */, 921 0, /* use rcd_buf as data ara */ 922 device, NULL); 923 if (IS_ERR(cqr)) { 924 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 925 "Could not allocate RCD request"); 926 ret = -ENOMEM; 927 goto out_error; 928 } 929 dasd_eckd_fill_rcd_cqr(device, cqr, rcd_buf, lpm); 930 cqr->callback = read_conf_cb; 931 ret = dasd_sleep_on(cqr); 932 /* 933 * on success we update the user input parms 934 */ 935 dasd_sfree_request(cqr, cqr->memdev); 936 if (ret) 937 goto out_error; 938 939 *rcd_buffer_size = DASD_ECKD_RCD_DATA_SIZE; 940 *rcd_buffer = rcd_buf; 941 return 0; 942 out_error: 943 kfree(rcd_buf); 944 *rcd_buffer = NULL; 945 *rcd_buffer_size = 0; 946 return ret; 947 } 948 949 static int dasd_eckd_identify_conf_parts(struct dasd_eckd_private *private) 950 { 951 952 struct dasd_sneq *sneq; 953 int i, count; 954 955 private->ned = NULL; 956 private->sneq = NULL; 957 private->vdsneq = NULL; 958 private->gneq = NULL; 959 count = private->conf_len / sizeof(struct dasd_sneq); 960 sneq = (struct dasd_sneq *)private->conf_data; 961 for (i = 0; i < count; ++i) { 962 if (sneq->flags.identifier == 1 && sneq->format == 1) 963 private->sneq = sneq; 964 else if (sneq->flags.identifier == 1 && sneq->format == 4) 965 private->vdsneq = (struct vd_sneq *)sneq; 966 else if (sneq->flags.identifier == 2) 967 private->gneq = (struct dasd_gneq *)sneq; 968 else if (sneq->flags.identifier == 3 && sneq->res1 == 1) 969 private->ned = (struct dasd_ned *)sneq; 970 sneq++; 971 } 972 if (!private->ned || !private->gneq) { 973 private->ned = NULL; 974 private->sneq = NULL; 975 private->vdsneq = NULL; 976 private->gneq = NULL; 977 return -EINVAL; 978 } 979 return 0; 980 981 }; 982 983 static unsigned char dasd_eckd_path_access(void *conf_data, int conf_len) 984 { 985 struct dasd_gneq *gneq; 986 int i, count, found; 987 988 count = conf_len / sizeof(*gneq); 989 gneq = (struct dasd_gneq *)conf_data; 990 found = 0; 991 for (i = 0; i < count; ++i) { 992 if (gneq->flags.identifier == 2) { 993 found = 1; 994 break; 995 } 996 gneq++; 997 } 998 if (found) 999 return ((char *)gneq)[18] & 0x07; 1000 else 1001 return 0; 1002 } 1003 1004 static void dasd_eckd_store_conf_data(struct dasd_device *device, 1005 struct dasd_conf_data *conf_data, int chp) 1006 { 1007 struct dasd_eckd_private *private = device->private; 1008 struct channel_path_desc_fmt0 *chp_desc; 1009 struct subchannel_id sch_id; 1010 void *cdp; 1011 1012 /* 1013 * path handling and read_conf allocate data 1014 * free it before replacing the pointer 1015 * also replace the old private->conf_data pointer 1016 * with the new one if this points to the same data 1017 */ 1018 cdp = device->path[chp].conf_data; 1019 if (private->conf_data == cdp) { 1020 private->conf_data = (void *)conf_data; 1021 dasd_eckd_identify_conf_parts(private); 1022 } 1023 ccw_device_get_schid(device->cdev, &sch_id); 1024 device->path[chp].conf_data = conf_data; 1025 device->path[chp].cssid = sch_id.cssid; 1026 device->path[chp].ssid = sch_id.ssid; 1027 chp_desc = ccw_device_get_chp_desc(device->cdev, chp); 1028 if (chp_desc) 1029 device->path[chp].chpid = chp_desc->chpid; 1030 kfree(chp_desc); 1031 kfree(cdp); 1032 } 1033 1034 static void dasd_eckd_clear_conf_data(struct dasd_device *device) 1035 { 1036 struct dasd_eckd_private *private = device->private; 1037 int i; 1038 1039 private->conf_data = NULL; 1040 private->conf_len = 0; 1041 for (i = 0; i < 8; i++) { 1042 kfree(device->path[i].conf_data); 1043 device->path[i].conf_data = NULL; 1044 device->path[i].cssid = 0; 1045 device->path[i].ssid = 0; 1046 device->path[i].chpid = 0; 1047 dasd_path_notoper(device, i); 1048 } 1049 } 1050 1051 static void dasd_eckd_read_fc_security(struct dasd_device *device) 1052 { 1053 struct dasd_eckd_private *private = device->private; 1054 u8 esm_valid; 1055 u8 esm[8]; 1056 int chp; 1057 int rc; 1058 1059 rc = chsc_scud(private->uid.ssid, (u64 *)esm, &esm_valid); 1060 if (rc) { 1061 for (chp = 0; chp < 8; chp++) 1062 device->path[chp].fc_security = 0; 1063 return; 1064 } 1065 1066 for (chp = 0; chp < 8; chp++) { 1067 if (esm_valid & (0x80 >> chp)) 1068 device->path[chp].fc_security = esm[chp]; 1069 else 1070 device->path[chp].fc_security = 0; 1071 } 1072 } 1073 1074 static int dasd_eckd_read_conf(struct dasd_device *device) 1075 { 1076 void *conf_data; 1077 int conf_len, conf_data_saved; 1078 int rc, path_err, pos; 1079 __u8 lpm, opm; 1080 struct dasd_eckd_private *private, path_private; 1081 struct dasd_uid *uid; 1082 char print_path_uid[60], print_device_uid[60]; 1083 1084 private = device->private; 1085 opm = ccw_device_get_path_mask(device->cdev); 1086 conf_data_saved = 0; 1087 path_err = 0; 1088 /* get configuration data per operational path */ 1089 for (lpm = 0x80; lpm; lpm>>= 1) { 1090 if (!(lpm & opm)) 1091 continue; 1092 rc = dasd_eckd_read_conf_lpm(device, &conf_data, 1093 &conf_len, lpm); 1094 if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */ 1095 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, 1096 "Read configuration data returned " 1097 "error %d", rc); 1098 return rc; 1099 } 1100 if (conf_data == NULL) { 1101 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 1102 "No configuration data " 1103 "retrieved"); 1104 /* no further analysis possible */ 1105 dasd_path_add_opm(device, opm); 1106 continue; /* no error */ 1107 } 1108 /* save first valid configuration data */ 1109 if (!conf_data_saved) { 1110 /* initially clear previously stored conf_data */ 1111 dasd_eckd_clear_conf_data(device); 1112 private->conf_data = conf_data; 1113 private->conf_len = conf_len; 1114 if (dasd_eckd_identify_conf_parts(private)) { 1115 private->conf_data = NULL; 1116 private->conf_len = 0; 1117 kfree(conf_data); 1118 continue; 1119 } 1120 /* 1121 * build device UID that other path data 1122 * can be compared to it 1123 */ 1124 dasd_eckd_generate_uid(device); 1125 conf_data_saved++; 1126 } else { 1127 path_private.conf_data = conf_data; 1128 path_private.conf_len = DASD_ECKD_RCD_DATA_SIZE; 1129 if (dasd_eckd_identify_conf_parts( 1130 &path_private)) { 1131 path_private.conf_data = NULL; 1132 path_private.conf_len = 0; 1133 kfree(conf_data); 1134 continue; 1135 } 1136 if (dasd_eckd_compare_path_uid( 1137 device, &path_private)) { 1138 uid = &path_private.uid; 1139 if (strlen(uid->vduit) > 0) 1140 snprintf(print_path_uid, 1141 sizeof(print_path_uid), 1142 "%s.%s.%04x.%02x.%s", 1143 uid->vendor, uid->serial, 1144 uid->ssid, uid->real_unit_addr, 1145 uid->vduit); 1146 else 1147 snprintf(print_path_uid, 1148 sizeof(print_path_uid), 1149 "%s.%s.%04x.%02x", 1150 uid->vendor, uid->serial, 1151 uid->ssid, 1152 uid->real_unit_addr); 1153 uid = &private->uid; 1154 if (strlen(uid->vduit) > 0) 1155 snprintf(print_device_uid, 1156 sizeof(print_device_uid), 1157 "%s.%s.%04x.%02x.%s", 1158 uid->vendor, uid->serial, 1159 uid->ssid, uid->real_unit_addr, 1160 uid->vduit); 1161 else 1162 snprintf(print_device_uid, 1163 sizeof(print_device_uid), 1164 "%s.%s.%04x.%02x", 1165 uid->vendor, uid->serial, 1166 uid->ssid, 1167 uid->real_unit_addr); 1168 dev_err(&device->cdev->dev, 1169 "Not all channel paths lead to " 1170 "the same device, path %02X leads to " 1171 "device %s instead of %s\n", lpm, 1172 print_path_uid, print_device_uid); 1173 path_err = -EINVAL; 1174 dasd_path_add_cablepm(device, lpm); 1175 continue; 1176 } 1177 path_private.conf_data = NULL; 1178 path_private.conf_len = 0; 1179 } 1180 1181 pos = pathmask_to_pos(lpm); 1182 dasd_eckd_store_conf_data(device, conf_data, pos); 1183 1184 switch (dasd_eckd_path_access(conf_data, conf_len)) { 1185 case 0x02: 1186 dasd_path_add_nppm(device, lpm); 1187 break; 1188 case 0x03: 1189 dasd_path_add_ppm(device, lpm); 1190 break; 1191 } 1192 if (!dasd_path_get_opm(device)) { 1193 dasd_path_set_opm(device, lpm); 1194 dasd_generic_path_operational(device); 1195 } else { 1196 dasd_path_add_opm(device, lpm); 1197 } 1198 } 1199 1200 dasd_eckd_read_fc_security(device); 1201 1202 return path_err; 1203 } 1204 1205 static u32 get_fcx_max_data(struct dasd_device *device) 1206 { 1207 struct dasd_eckd_private *private = device->private; 1208 int fcx_in_css, fcx_in_gneq, fcx_in_features; 1209 unsigned int mdc; 1210 int tpm; 1211 1212 if (dasd_nofcx) 1213 return 0; 1214 /* is transport mode supported? */ 1215 fcx_in_css = css_general_characteristics.fcx; 1216 fcx_in_gneq = private->gneq->reserved2[7] & 0x04; 1217 fcx_in_features = private->features.feature[40] & 0x80; 1218 tpm = fcx_in_css && fcx_in_gneq && fcx_in_features; 1219 1220 if (!tpm) 1221 return 0; 1222 1223 mdc = ccw_device_get_mdc(device->cdev, 0); 1224 if (mdc == 0) { 1225 dev_warn(&device->cdev->dev, "Detecting the maximum supported data size for zHPF requests failed\n"); 1226 return 0; 1227 } else { 1228 return (u32)mdc * FCX_MAX_DATA_FACTOR; 1229 } 1230 } 1231 1232 static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm) 1233 { 1234 struct dasd_eckd_private *private = device->private; 1235 unsigned int mdc; 1236 u32 fcx_max_data; 1237 1238 if (private->fcx_max_data) { 1239 mdc = ccw_device_get_mdc(device->cdev, lpm); 1240 if (mdc == 0) { 1241 dev_warn(&device->cdev->dev, 1242 "Detecting the maximum data size for zHPF " 1243 "requests failed (rc=%d) for a new path %x\n", 1244 mdc, lpm); 1245 return mdc; 1246 } 1247 fcx_max_data = (u32)mdc * FCX_MAX_DATA_FACTOR; 1248 if (fcx_max_data < private->fcx_max_data) { 1249 dev_warn(&device->cdev->dev, 1250 "The maximum data size for zHPF requests %u " 1251 "on a new path %x is below the active maximum " 1252 "%u\n", fcx_max_data, lpm, 1253 private->fcx_max_data); 1254 return -EACCES; 1255 } 1256 } 1257 return 0; 1258 } 1259 1260 static int rebuild_device_uid(struct dasd_device *device, 1261 struct pe_handler_work_data *data) 1262 { 1263 struct dasd_eckd_private *private = device->private; 1264 __u8 lpm, opm = dasd_path_get_opm(device); 1265 int rc = -ENODEV; 1266 1267 for (lpm = 0x80; lpm; lpm >>= 1) { 1268 if (!(lpm & opm)) 1269 continue; 1270 memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer)); 1271 memset(&data->cqr, 0, sizeof(data->cqr)); 1272 data->cqr.cpaddr = &data->ccw; 1273 rc = dasd_eckd_read_conf_immediately(device, &data->cqr, 1274 data->rcd_buffer, 1275 lpm); 1276 1277 if (rc) { 1278 if (rc == -EOPNOTSUPP) /* -EOPNOTSUPP is ok */ 1279 continue; 1280 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, 1281 "Read configuration data " 1282 "returned error %d", rc); 1283 break; 1284 } 1285 memcpy(private->conf_data, data->rcd_buffer, 1286 DASD_ECKD_RCD_DATA_SIZE); 1287 if (dasd_eckd_identify_conf_parts(private)) { 1288 rc = -ENODEV; 1289 } else /* first valid path is enough */ 1290 break; 1291 } 1292 1293 if (!rc) 1294 rc = dasd_eckd_generate_uid(device); 1295 1296 return rc; 1297 } 1298 1299 static void dasd_eckd_path_available_action(struct dasd_device *device, 1300 struct pe_handler_work_data *data) 1301 { 1302 struct dasd_eckd_private path_private; 1303 struct dasd_uid *uid; 1304 __u8 path_rcd_buf[DASD_ECKD_RCD_DATA_SIZE]; 1305 __u8 lpm, opm, npm, ppm, epm, hpfpm, cablepm; 1306 struct dasd_conf_data *conf_data; 1307 unsigned long flags; 1308 char print_uid[60]; 1309 int rc, pos; 1310 1311 opm = 0; 1312 npm = 0; 1313 ppm = 0; 1314 epm = 0; 1315 hpfpm = 0; 1316 cablepm = 0; 1317 1318 for (lpm = 0x80; lpm; lpm >>= 1) { 1319 if (!(lpm & data->tbvpm)) 1320 continue; 1321 memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer)); 1322 memset(&data->cqr, 0, sizeof(data->cqr)); 1323 data->cqr.cpaddr = &data->ccw; 1324 rc = dasd_eckd_read_conf_immediately(device, &data->cqr, 1325 data->rcd_buffer, 1326 lpm); 1327 if (!rc) { 1328 switch (dasd_eckd_path_access(data->rcd_buffer, 1329 DASD_ECKD_RCD_DATA_SIZE) 1330 ) { 1331 case 0x02: 1332 npm |= lpm; 1333 break; 1334 case 0x03: 1335 ppm |= lpm; 1336 break; 1337 } 1338 opm |= lpm; 1339 } else if (rc == -EOPNOTSUPP) { 1340 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 1341 "path verification: No configuration " 1342 "data retrieved"); 1343 opm |= lpm; 1344 } else if (rc == -EAGAIN) { 1345 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 1346 "path verification: device is stopped," 1347 " try again later"); 1348 epm |= lpm; 1349 } else { 1350 dev_warn(&device->cdev->dev, 1351 "Reading device feature codes failed " 1352 "(rc=%d) for new path %x\n", rc, lpm); 1353 continue; 1354 } 1355 if (verify_fcx_max_data(device, lpm)) { 1356 opm &= ~lpm; 1357 npm &= ~lpm; 1358 ppm &= ~lpm; 1359 hpfpm |= lpm; 1360 continue; 1361 } 1362 1363 /* 1364 * save conf_data for comparison after 1365 * rebuild_device_uid may have changed 1366 * the original data 1367 */ 1368 memcpy(&path_rcd_buf, data->rcd_buffer, 1369 DASD_ECKD_RCD_DATA_SIZE); 1370 path_private.conf_data = (void *) &path_rcd_buf; 1371 path_private.conf_len = DASD_ECKD_RCD_DATA_SIZE; 1372 if (dasd_eckd_identify_conf_parts(&path_private)) { 1373 path_private.conf_data = NULL; 1374 path_private.conf_len = 0; 1375 continue; 1376 } 1377 1378 /* 1379 * compare path UID with device UID only if at least 1380 * one valid path is left 1381 * in other case the device UID may have changed and 1382 * the first working path UID will be used as device UID 1383 */ 1384 if (dasd_path_get_opm(device) && 1385 dasd_eckd_compare_path_uid(device, &path_private)) { 1386 /* 1387 * the comparison was not successful 1388 * rebuild the device UID with at least one 1389 * known path in case a z/VM hyperswap command 1390 * has changed the device 1391 * 1392 * after this compare again 1393 * 1394 * if either the rebuild or the recompare fails 1395 * the path can not be used 1396 */ 1397 if (rebuild_device_uid(device, data) || 1398 dasd_eckd_compare_path_uid( 1399 device, &path_private)) { 1400 uid = &path_private.uid; 1401 if (strlen(uid->vduit) > 0) 1402 snprintf(print_uid, sizeof(print_uid), 1403 "%s.%s.%04x.%02x.%s", 1404 uid->vendor, uid->serial, 1405 uid->ssid, uid->real_unit_addr, 1406 uid->vduit); 1407 else 1408 snprintf(print_uid, sizeof(print_uid), 1409 "%s.%s.%04x.%02x", 1410 uid->vendor, uid->serial, 1411 uid->ssid, 1412 uid->real_unit_addr); 1413 dev_err(&device->cdev->dev, 1414 "The newly added channel path %02X " 1415 "will not be used because it leads " 1416 "to a different device %s\n", 1417 lpm, print_uid); 1418 opm &= ~lpm; 1419 npm &= ~lpm; 1420 ppm &= ~lpm; 1421 cablepm |= lpm; 1422 continue; 1423 } 1424 } 1425 1426 conf_data = kzalloc(DASD_ECKD_RCD_DATA_SIZE, GFP_KERNEL); 1427 if (conf_data) { 1428 memcpy(conf_data, data->rcd_buffer, 1429 DASD_ECKD_RCD_DATA_SIZE); 1430 } 1431 pos = pathmask_to_pos(lpm); 1432 dasd_eckd_store_conf_data(device, conf_data, pos); 1433 1434 /* 1435 * There is a small chance that a path is lost again between 1436 * above path verification and the following modification of 1437 * the device opm mask. We could avoid that race here by using 1438 * yet another path mask, but we rather deal with this unlikely 1439 * situation in dasd_start_IO. 1440 */ 1441 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1442 if (!dasd_path_get_opm(device) && opm) { 1443 dasd_path_set_opm(device, opm); 1444 dasd_generic_path_operational(device); 1445 } else { 1446 dasd_path_add_opm(device, opm); 1447 } 1448 dasd_path_add_nppm(device, npm); 1449 dasd_path_add_ppm(device, ppm); 1450 dasd_path_add_tbvpm(device, epm); 1451 dasd_path_add_cablepm(device, cablepm); 1452 dasd_path_add_nohpfpm(device, hpfpm); 1453 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1454 1455 dasd_path_create_kobj(device, pos); 1456 } 1457 } 1458 1459 static void do_pe_handler_work(struct work_struct *work) 1460 { 1461 struct pe_handler_work_data *data; 1462 struct dasd_device *device; 1463 1464 data = container_of(work, struct pe_handler_work_data, worker); 1465 device = data->device; 1466 1467 /* delay path verification until device was resumed */ 1468 if (test_bit(DASD_FLAG_SUSPENDED, &device->flags)) { 1469 schedule_work(work); 1470 return; 1471 } 1472 /* check if path verification already running and delay if so */ 1473 if (test_and_set_bit(DASD_FLAG_PATH_VERIFY, &device->flags)) { 1474 schedule_work(work); 1475 return; 1476 } 1477 1478 if (data->tbvpm) 1479 dasd_eckd_path_available_action(device, data); 1480 if (data->fcsecpm) 1481 dasd_eckd_read_fc_security(device); 1482 1483 clear_bit(DASD_FLAG_PATH_VERIFY, &device->flags); 1484 dasd_put_device(device); 1485 if (data->isglobal) 1486 mutex_unlock(&dasd_pe_handler_mutex); 1487 else 1488 kfree(data); 1489 } 1490 1491 static int dasd_eckd_pe_handler(struct dasd_device *device, 1492 __u8 tbvpm, __u8 fcsecpm) 1493 { 1494 struct pe_handler_work_data *data; 1495 1496 data = kmalloc(sizeof(*data), GFP_ATOMIC | GFP_DMA); 1497 if (!data) { 1498 if (mutex_trylock(&dasd_pe_handler_mutex)) { 1499 data = pe_handler_worker; 1500 data->isglobal = 1; 1501 } else { 1502 return -ENOMEM; 1503 } 1504 } else { 1505 memset(data, 0, sizeof(*data)); 1506 data->isglobal = 0; 1507 } 1508 INIT_WORK(&data->worker, do_pe_handler_work); 1509 dasd_get_device(device); 1510 data->device = device; 1511 data->tbvpm = tbvpm; 1512 data->fcsecpm = fcsecpm; 1513 schedule_work(&data->worker); 1514 return 0; 1515 } 1516 1517 static void dasd_eckd_reset_path(struct dasd_device *device, __u8 pm) 1518 { 1519 struct dasd_eckd_private *private = device->private; 1520 unsigned long flags; 1521 1522 if (!private->fcx_max_data) 1523 private->fcx_max_data = get_fcx_max_data(device); 1524 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1525 dasd_path_set_tbvpm(device, pm ? : dasd_path_get_notoperpm(device)); 1526 dasd_schedule_device_bh(device); 1527 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1528 } 1529 1530 static int dasd_eckd_read_features(struct dasd_device *device) 1531 { 1532 struct dasd_eckd_private *private = device->private; 1533 struct dasd_psf_prssd_data *prssdp; 1534 struct dasd_rssd_features *features; 1535 struct dasd_ccw_req *cqr; 1536 struct ccw1 *ccw; 1537 int rc; 1538 1539 memset(&private->features, 0, sizeof(struct dasd_rssd_features)); 1540 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, 1541 (sizeof(struct dasd_psf_prssd_data) + 1542 sizeof(struct dasd_rssd_features)), 1543 device, NULL); 1544 if (IS_ERR(cqr)) { 1545 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not " 1546 "allocate initialization request"); 1547 return PTR_ERR(cqr); 1548 } 1549 cqr->startdev = device; 1550 cqr->memdev = device; 1551 cqr->block = NULL; 1552 cqr->retries = 256; 1553 cqr->expires = 10 * HZ; 1554 1555 /* Prepare for Read Subsystem Data */ 1556 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 1557 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data)); 1558 prssdp->order = PSF_ORDER_PRSSD; 1559 prssdp->suborder = 0x41; /* Read Feature Codes */ 1560 /* all other bytes of prssdp must be zero */ 1561 1562 ccw = cqr->cpaddr; 1563 ccw->cmd_code = DASD_ECKD_CCW_PSF; 1564 ccw->count = sizeof(struct dasd_psf_prssd_data); 1565 ccw->flags |= CCW_FLAG_CC; 1566 ccw->cda = (__u32)(addr_t) prssdp; 1567 1568 /* Read Subsystem Data - feature codes */ 1569 features = (struct dasd_rssd_features *) (prssdp + 1); 1570 memset(features, 0, sizeof(struct dasd_rssd_features)); 1571 1572 ccw++; 1573 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 1574 ccw->count = sizeof(struct dasd_rssd_features); 1575 ccw->cda = (__u32)(addr_t) features; 1576 1577 cqr->buildclk = get_tod_clock(); 1578 cqr->status = DASD_CQR_FILLED; 1579 rc = dasd_sleep_on(cqr); 1580 if (rc == 0) { 1581 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 1582 features = (struct dasd_rssd_features *) (prssdp + 1); 1583 memcpy(&private->features, features, 1584 sizeof(struct dasd_rssd_features)); 1585 } else 1586 dev_warn(&device->cdev->dev, "Reading device feature codes" 1587 " failed with rc=%d\n", rc); 1588 dasd_sfree_request(cqr, cqr->memdev); 1589 return rc; 1590 } 1591 1592 /* Read Volume Information - Volume Storage Query */ 1593 static int dasd_eckd_read_vol_info(struct dasd_device *device) 1594 { 1595 struct dasd_eckd_private *private = device->private; 1596 struct dasd_psf_prssd_data *prssdp; 1597 struct dasd_rssd_vsq *vsq; 1598 struct dasd_ccw_req *cqr; 1599 struct ccw1 *ccw; 1600 int useglobal; 1601 int rc; 1602 1603 /* This command cannot be executed on an alias device */ 1604 if (private->uid.type == UA_BASE_PAV_ALIAS || 1605 private->uid.type == UA_HYPER_PAV_ALIAS) 1606 return 0; 1607 1608 useglobal = 0; 1609 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 /* PSF + RSSD */, 1610 sizeof(*prssdp) + sizeof(*vsq), device, NULL); 1611 if (IS_ERR(cqr)) { 1612 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 1613 "Could not allocate initialization request"); 1614 mutex_lock(&dasd_vol_info_mutex); 1615 useglobal = 1; 1616 cqr = &dasd_vol_info_req->cqr; 1617 memset(cqr, 0, sizeof(*cqr)); 1618 memset(dasd_vol_info_req, 0, sizeof(*dasd_vol_info_req)); 1619 cqr->cpaddr = &dasd_vol_info_req->ccw; 1620 cqr->data = &dasd_vol_info_req->data; 1621 cqr->magic = DASD_ECKD_MAGIC; 1622 } 1623 1624 /* Prepare for Read Subsystem Data */ 1625 prssdp = cqr->data; 1626 prssdp->order = PSF_ORDER_PRSSD; 1627 prssdp->suborder = PSF_SUBORDER_VSQ; /* Volume Storage Query */ 1628 prssdp->lss = private->ned->ID; 1629 prssdp->volume = private->ned->unit_addr; 1630 1631 ccw = cqr->cpaddr; 1632 ccw->cmd_code = DASD_ECKD_CCW_PSF; 1633 ccw->count = sizeof(*prssdp); 1634 ccw->flags |= CCW_FLAG_CC; 1635 ccw->cda = (__u32)(addr_t)prssdp; 1636 1637 /* Read Subsystem Data - Volume Storage Query */ 1638 vsq = (struct dasd_rssd_vsq *)(prssdp + 1); 1639 memset(vsq, 0, sizeof(*vsq)); 1640 1641 ccw++; 1642 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 1643 ccw->count = sizeof(*vsq); 1644 ccw->flags |= CCW_FLAG_SLI; 1645 ccw->cda = (__u32)(addr_t)vsq; 1646 1647 cqr->buildclk = get_tod_clock(); 1648 cqr->status = DASD_CQR_FILLED; 1649 cqr->startdev = device; 1650 cqr->memdev = device; 1651 cqr->block = NULL; 1652 cqr->retries = 256; 1653 cqr->expires = device->default_expires * HZ; 1654 /* The command might not be supported. Suppress the error output */ 1655 __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags); 1656 1657 rc = dasd_sleep_on_interruptible(cqr); 1658 if (rc == 0) { 1659 memcpy(&private->vsq, vsq, sizeof(*vsq)); 1660 } else { 1661 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, 1662 "Reading the volume storage information failed with rc=%d", rc); 1663 } 1664 1665 if (useglobal) 1666 mutex_unlock(&dasd_vol_info_mutex); 1667 else 1668 dasd_sfree_request(cqr, cqr->memdev); 1669 1670 return rc; 1671 } 1672 1673 static int dasd_eckd_is_ese(struct dasd_device *device) 1674 { 1675 struct dasd_eckd_private *private = device->private; 1676 1677 return private->vsq.vol_info.ese; 1678 } 1679 1680 static int dasd_eckd_ext_pool_id(struct dasd_device *device) 1681 { 1682 struct dasd_eckd_private *private = device->private; 1683 1684 return private->vsq.extent_pool_id; 1685 } 1686 1687 /* 1688 * This value represents the total amount of available space. As more space is 1689 * allocated by ESE volumes, this value will decrease. 1690 * The data for this value is therefore updated on any call. 1691 */ 1692 static int dasd_eckd_space_configured(struct dasd_device *device) 1693 { 1694 struct dasd_eckd_private *private = device->private; 1695 int rc; 1696 1697 rc = dasd_eckd_read_vol_info(device); 1698 1699 return rc ? : private->vsq.space_configured; 1700 } 1701 1702 /* 1703 * The value of space allocated by an ESE volume may have changed and is 1704 * therefore updated on any call. 1705 */ 1706 static int dasd_eckd_space_allocated(struct dasd_device *device) 1707 { 1708 struct dasd_eckd_private *private = device->private; 1709 int rc; 1710 1711 rc = dasd_eckd_read_vol_info(device); 1712 1713 return rc ? : private->vsq.space_allocated; 1714 } 1715 1716 static int dasd_eckd_logical_capacity(struct dasd_device *device) 1717 { 1718 struct dasd_eckd_private *private = device->private; 1719 1720 return private->vsq.logical_capacity; 1721 } 1722 1723 static void dasd_eckd_ext_pool_exhaust_work(struct work_struct *work) 1724 { 1725 struct ext_pool_exhaust_work_data *data; 1726 struct dasd_device *device; 1727 struct dasd_device *base; 1728 1729 data = container_of(work, struct ext_pool_exhaust_work_data, worker); 1730 device = data->device; 1731 base = data->base; 1732 1733 if (!base) 1734 base = device; 1735 if (dasd_eckd_space_configured(base) != 0) { 1736 dasd_generic_space_avail(device); 1737 } else { 1738 dev_warn(&device->cdev->dev, "No space left in the extent pool\n"); 1739 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "out of space"); 1740 } 1741 1742 dasd_put_device(device); 1743 kfree(data); 1744 } 1745 1746 static int dasd_eckd_ext_pool_exhaust(struct dasd_device *device, 1747 struct dasd_ccw_req *cqr) 1748 { 1749 struct ext_pool_exhaust_work_data *data; 1750 1751 data = kzalloc(sizeof(*data), GFP_ATOMIC); 1752 if (!data) 1753 return -ENOMEM; 1754 INIT_WORK(&data->worker, dasd_eckd_ext_pool_exhaust_work); 1755 dasd_get_device(device); 1756 data->device = device; 1757 1758 if (cqr->block) 1759 data->base = cqr->block->base; 1760 else if (cqr->basedev) 1761 data->base = cqr->basedev; 1762 else 1763 data->base = NULL; 1764 1765 schedule_work(&data->worker); 1766 1767 return 0; 1768 } 1769 1770 static void dasd_eckd_cpy_ext_pool_data(struct dasd_device *device, 1771 struct dasd_rssd_lcq *lcq) 1772 { 1773 struct dasd_eckd_private *private = device->private; 1774 int pool_id = dasd_eckd_ext_pool_id(device); 1775 struct dasd_ext_pool_sum eps; 1776 int i; 1777 1778 for (i = 0; i < lcq->pool_count; i++) { 1779 eps = lcq->ext_pool_sum[i]; 1780 if (eps.pool_id == pool_id) { 1781 memcpy(&private->eps, &eps, 1782 sizeof(struct dasd_ext_pool_sum)); 1783 } 1784 } 1785 } 1786 1787 /* Read Extent Pool Information - Logical Configuration Query */ 1788 static int dasd_eckd_read_ext_pool_info(struct dasd_device *device) 1789 { 1790 struct dasd_eckd_private *private = device->private; 1791 struct dasd_psf_prssd_data *prssdp; 1792 struct dasd_rssd_lcq *lcq; 1793 struct dasd_ccw_req *cqr; 1794 struct ccw1 *ccw; 1795 int rc; 1796 1797 /* This command cannot be executed on an alias device */ 1798 if (private->uid.type == UA_BASE_PAV_ALIAS || 1799 private->uid.type == UA_HYPER_PAV_ALIAS) 1800 return 0; 1801 1802 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 /* PSF + RSSD */, 1803 sizeof(*prssdp) + sizeof(*lcq), device, NULL); 1804 if (IS_ERR(cqr)) { 1805 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 1806 "Could not allocate initialization request"); 1807 return PTR_ERR(cqr); 1808 } 1809 1810 /* Prepare for Read Subsystem Data */ 1811 prssdp = cqr->data; 1812 memset(prssdp, 0, sizeof(*prssdp)); 1813 prssdp->order = PSF_ORDER_PRSSD; 1814 prssdp->suborder = PSF_SUBORDER_LCQ; /* Logical Configuration Query */ 1815 1816 ccw = cqr->cpaddr; 1817 ccw->cmd_code = DASD_ECKD_CCW_PSF; 1818 ccw->count = sizeof(*prssdp); 1819 ccw->flags |= CCW_FLAG_CC; 1820 ccw->cda = (__u32)(addr_t)prssdp; 1821 1822 lcq = (struct dasd_rssd_lcq *)(prssdp + 1); 1823 memset(lcq, 0, sizeof(*lcq)); 1824 1825 ccw++; 1826 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 1827 ccw->count = sizeof(*lcq); 1828 ccw->flags |= CCW_FLAG_SLI; 1829 ccw->cda = (__u32)(addr_t)lcq; 1830 1831 cqr->buildclk = get_tod_clock(); 1832 cqr->status = DASD_CQR_FILLED; 1833 cqr->startdev = device; 1834 cqr->memdev = device; 1835 cqr->block = NULL; 1836 cqr->retries = 256; 1837 cqr->expires = device->default_expires * HZ; 1838 /* The command might not be supported. Suppress the error output */ 1839 __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags); 1840 1841 rc = dasd_sleep_on_interruptible(cqr); 1842 if (rc == 0) { 1843 dasd_eckd_cpy_ext_pool_data(device, lcq); 1844 } else { 1845 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, 1846 "Reading the logical configuration failed with rc=%d", rc); 1847 } 1848 1849 dasd_sfree_request(cqr, cqr->memdev); 1850 1851 return rc; 1852 } 1853 1854 /* 1855 * Depending on the device type, the extent size is specified either as 1856 * cylinders per extent (CKD) or size per extent (FBA) 1857 * A 1GB size corresponds to 1113cyl, and 16MB to 21cyl. 1858 */ 1859 static int dasd_eckd_ext_size(struct dasd_device *device) 1860 { 1861 struct dasd_eckd_private *private = device->private; 1862 struct dasd_ext_pool_sum eps = private->eps; 1863 1864 if (!eps.flags.extent_size_valid) 1865 return 0; 1866 if (eps.extent_size.size_1G) 1867 return 1113; 1868 if (eps.extent_size.size_16M) 1869 return 21; 1870 1871 return 0; 1872 } 1873 1874 static int dasd_eckd_ext_pool_warn_thrshld(struct dasd_device *device) 1875 { 1876 struct dasd_eckd_private *private = device->private; 1877 1878 return private->eps.warn_thrshld; 1879 } 1880 1881 static int dasd_eckd_ext_pool_cap_at_warnlevel(struct dasd_device *device) 1882 { 1883 struct dasd_eckd_private *private = device->private; 1884 1885 return private->eps.flags.capacity_at_warnlevel; 1886 } 1887 1888 /* 1889 * Extent Pool out of space 1890 */ 1891 static int dasd_eckd_ext_pool_oos(struct dasd_device *device) 1892 { 1893 struct dasd_eckd_private *private = device->private; 1894 1895 return private->eps.flags.pool_oos; 1896 } 1897 1898 /* 1899 * Build CP for Perform Subsystem Function - SSC. 1900 */ 1901 static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device, 1902 int enable_pav) 1903 { 1904 struct dasd_ccw_req *cqr; 1905 struct dasd_psf_ssc_data *psf_ssc_data; 1906 struct ccw1 *ccw; 1907 1908 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ , 1909 sizeof(struct dasd_psf_ssc_data), 1910 device, NULL); 1911 1912 if (IS_ERR(cqr)) { 1913 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1914 "Could not allocate PSF-SSC request"); 1915 return cqr; 1916 } 1917 psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data; 1918 psf_ssc_data->order = PSF_ORDER_SSC; 1919 psf_ssc_data->suborder = 0xc0; 1920 if (enable_pav) { 1921 psf_ssc_data->suborder |= 0x08; 1922 psf_ssc_data->reserved[0] = 0x88; 1923 } 1924 ccw = cqr->cpaddr; 1925 ccw->cmd_code = DASD_ECKD_CCW_PSF; 1926 ccw->cda = (__u32)(addr_t)psf_ssc_data; 1927 ccw->count = 66; 1928 1929 cqr->startdev = device; 1930 cqr->memdev = device; 1931 cqr->block = NULL; 1932 cqr->retries = 256; 1933 cqr->expires = 10*HZ; 1934 cqr->buildclk = get_tod_clock(); 1935 cqr->status = DASD_CQR_FILLED; 1936 return cqr; 1937 } 1938 1939 /* 1940 * Perform Subsystem Function. 1941 * It is necessary to trigger CIO for channel revalidation since this 1942 * call might change behaviour of DASD devices. 1943 */ 1944 static int 1945 dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav, 1946 unsigned long flags) 1947 { 1948 struct dasd_ccw_req *cqr; 1949 int rc; 1950 1951 cqr = dasd_eckd_build_psf_ssc(device, enable_pav); 1952 if (IS_ERR(cqr)) 1953 return PTR_ERR(cqr); 1954 1955 /* 1956 * set flags e.g. turn on failfast, to prevent blocking 1957 * the calling function should handle failed requests 1958 */ 1959 cqr->flags |= flags; 1960 1961 rc = dasd_sleep_on(cqr); 1962 if (!rc) 1963 /* trigger CIO to reprobe devices */ 1964 css_schedule_reprobe(); 1965 else if (cqr->intrc == -EAGAIN) 1966 rc = -EAGAIN; 1967 1968 dasd_sfree_request(cqr, cqr->memdev); 1969 return rc; 1970 } 1971 1972 /* 1973 * Valide storage server of current device. 1974 */ 1975 static int dasd_eckd_validate_server(struct dasd_device *device, 1976 unsigned long flags) 1977 { 1978 struct dasd_eckd_private *private = device->private; 1979 int enable_pav, rc; 1980 1981 if (private->uid.type == UA_BASE_PAV_ALIAS || 1982 private->uid.type == UA_HYPER_PAV_ALIAS) 1983 return 0; 1984 if (dasd_nopav || MACHINE_IS_VM) 1985 enable_pav = 0; 1986 else 1987 enable_pav = 1; 1988 rc = dasd_eckd_psf_ssc(device, enable_pav, flags); 1989 1990 /* may be requested feature is not available on server, 1991 * therefore just report error and go ahead */ 1992 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "PSF-SSC for SSID %04x " 1993 "returned rc=%d", private->uid.ssid, rc); 1994 return rc; 1995 } 1996 1997 /* 1998 * worker to do a validate server in case of a lost pathgroup 1999 */ 2000 static void dasd_eckd_do_validate_server(struct work_struct *work) 2001 { 2002 struct dasd_device *device = container_of(work, struct dasd_device, 2003 kick_validate); 2004 unsigned long flags = 0; 2005 2006 set_bit(DASD_CQR_FLAGS_FAILFAST, &flags); 2007 if (dasd_eckd_validate_server(device, flags) 2008 == -EAGAIN) { 2009 /* schedule worker again if failed */ 2010 schedule_work(&device->kick_validate); 2011 return; 2012 } 2013 2014 dasd_put_device(device); 2015 } 2016 2017 static void dasd_eckd_kick_validate_server(struct dasd_device *device) 2018 { 2019 dasd_get_device(device); 2020 /* exit if device not online or in offline processing */ 2021 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) || 2022 device->state < DASD_STATE_ONLINE) { 2023 dasd_put_device(device); 2024 return; 2025 } 2026 /* queue call to do_validate_server to the kernel event daemon. */ 2027 if (!schedule_work(&device->kick_validate)) 2028 dasd_put_device(device); 2029 } 2030 2031 /* 2032 * Check device characteristics. 2033 * If the device is accessible using ECKD discipline, the device is enabled. 2034 */ 2035 static int 2036 dasd_eckd_check_characteristics(struct dasd_device *device) 2037 { 2038 struct dasd_eckd_private *private = device->private; 2039 struct dasd_block *block; 2040 struct dasd_uid temp_uid; 2041 int rc, i; 2042 int readonly; 2043 unsigned long value; 2044 2045 /* setup work queue for validate server*/ 2046 INIT_WORK(&device->kick_validate, dasd_eckd_do_validate_server); 2047 /* setup work queue for summary unit check */ 2048 INIT_WORK(&device->suc_work, dasd_alias_handle_summary_unit_check); 2049 2050 if (!ccw_device_is_pathgroup(device->cdev)) { 2051 dev_warn(&device->cdev->dev, 2052 "A channel path group could not be established\n"); 2053 return -EIO; 2054 } 2055 if (!ccw_device_is_multipath(device->cdev)) { 2056 dev_info(&device->cdev->dev, 2057 "The DASD is not operating in multipath mode\n"); 2058 } 2059 if (!private) { 2060 private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA); 2061 if (!private) { 2062 dev_warn(&device->cdev->dev, 2063 "Allocating memory for private DASD data " 2064 "failed\n"); 2065 return -ENOMEM; 2066 } 2067 device->private = private; 2068 } else { 2069 memset(private, 0, sizeof(*private)); 2070 } 2071 /* Invalidate status of initial analysis. */ 2072 private->init_cqr_status = -1; 2073 /* Set default cache operations. */ 2074 private->attrib.operation = DASD_NORMAL_CACHE; 2075 private->attrib.nr_cyl = 0; 2076 2077 /* Read Configuration Data */ 2078 rc = dasd_eckd_read_conf(device); 2079 if (rc) 2080 goto out_err1; 2081 2082 /* set some default values */ 2083 device->default_expires = DASD_EXPIRES; 2084 device->default_retries = DASD_RETRIES; 2085 device->path_thrhld = DASD_ECKD_PATH_THRHLD; 2086 device->path_interval = DASD_ECKD_PATH_INTERVAL; 2087 2088 if (private->gneq) { 2089 value = 1; 2090 for (i = 0; i < private->gneq->timeout.value; i++) 2091 value = 10 * value; 2092 value = value * private->gneq->timeout.number; 2093 /* do not accept useless values */ 2094 if (value != 0 && value <= DASD_EXPIRES_MAX) 2095 device->default_expires = value; 2096 } 2097 2098 dasd_eckd_get_uid(device, &temp_uid); 2099 if (temp_uid.type == UA_BASE_DEVICE) { 2100 block = dasd_alloc_block(); 2101 if (IS_ERR(block)) { 2102 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 2103 "could not allocate dasd " 2104 "block structure"); 2105 rc = PTR_ERR(block); 2106 goto out_err1; 2107 } 2108 device->block = block; 2109 block->base = device; 2110 } 2111 2112 /* register lcu with alias handling, enable PAV */ 2113 rc = dasd_alias_make_device_known_to_lcu(device); 2114 if (rc) 2115 goto out_err2; 2116 2117 dasd_eckd_validate_server(device, 0); 2118 2119 /* device may report different configuration data after LCU setup */ 2120 rc = dasd_eckd_read_conf(device); 2121 if (rc) 2122 goto out_err3; 2123 2124 dasd_path_create_kobjects(device); 2125 2126 /* Read Feature Codes */ 2127 dasd_eckd_read_features(device); 2128 2129 /* Read Volume Information */ 2130 dasd_eckd_read_vol_info(device); 2131 2132 /* Read Extent Pool Information */ 2133 dasd_eckd_read_ext_pool_info(device); 2134 2135 /* Read Device Characteristics */ 2136 rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC, 2137 &private->rdc_data, 64); 2138 if (rc) { 2139 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, 2140 "Read device characteristic failed, rc=%d", rc); 2141 goto out_err3; 2142 } 2143 2144 if ((device->features & DASD_FEATURE_USERAW) && 2145 !(private->rdc_data.facilities.RT_in_LR)) { 2146 dev_err(&device->cdev->dev, "The storage server does not " 2147 "support raw-track access\n"); 2148 rc = -EINVAL; 2149 goto out_err3; 2150 } 2151 2152 /* find the valid cylinder size */ 2153 if (private->rdc_data.no_cyl == LV_COMPAT_CYL && 2154 private->rdc_data.long_no_cyl) 2155 private->real_cyl = private->rdc_data.long_no_cyl; 2156 else 2157 private->real_cyl = private->rdc_data.no_cyl; 2158 2159 private->fcx_max_data = get_fcx_max_data(device); 2160 2161 readonly = dasd_device_is_ro(device); 2162 if (readonly) 2163 set_bit(DASD_FLAG_DEVICE_RO, &device->flags); 2164 2165 dev_info(&device->cdev->dev, "New DASD %04X/%02X (CU %04X/%02X) " 2166 "with %d cylinders, %d heads, %d sectors%s\n", 2167 private->rdc_data.dev_type, 2168 private->rdc_data.dev_model, 2169 private->rdc_data.cu_type, 2170 private->rdc_data.cu_model.model, 2171 private->real_cyl, 2172 private->rdc_data.trk_per_cyl, 2173 private->rdc_data.sec_per_trk, 2174 readonly ? ", read-only device" : ""); 2175 return 0; 2176 2177 out_err3: 2178 dasd_alias_disconnect_device_from_lcu(device); 2179 out_err2: 2180 dasd_free_block(device->block); 2181 device->block = NULL; 2182 out_err1: 2183 dasd_eckd_clear_conf_data(device); 2184 dasd_path_remove_kobjects(device); 2185 kfree(device->private); 2186 device->private = NULL; 2187 return rc; 2188 } 2189 2190 static void dasd_eckd_uncheck_device(struct dasd_device *device) 2191 { 2192 struct dasd_eckd_private *private = device->private; 2193 2194 if (!private) 2195 return; 2196 2197 dasd_alias_disconnect_device_from_lcu(device); 2198 private->ned = NULL; 2199 private->sneq = NULL; 2200 private->vdsneq = NULL; 2201 private->gneq = NULL; 2202 dasd_eckd_clear_conf_data(device); 2203 dasd_path_remove_kobjects(device); 2204 } 2205 2206 static struct dasd_ccw_req * 2207 dasd_eckd_analysis_ccw(struct dasd_device *device) 2208 { 2209 struct dasd_eckd_private *private = device->private; 2210 struct eckd_count *count_data; 2211 struct LO_eckd_data *LO_data; 2212 struct dasd_ccw_req *cqr; 2213 struct ccw1 *ccw; 2214 int cplength, datasize; 2215 int i; 2216 2217 cplength = 8; 2218 datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data); 2219 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device, 2220 NULL); 2221 if (IS_ERR(cqr)) 2222 return cqr; 2223 ccw = cqr->cpaddr; 2224 /* Define extent for the first 2 tracks. */ 2225 define_extent(ccw++, cqr->data, 0, 1, 2226 DASD_ECKD_CCW_READ_COUNT, device, 0); 2227 LO_data = cqr->data + sizeof(struct DE_eckd_data); 2228 /* Locate record for the first 4 records on track 0. */ 2229 ccw[-1].flags |= CCW_FLAG_CC; 2230 locate_record(ccw++, LO_data++, 0, 0, 4, 2231 DASD_ECKD_CCW_READ_COUNT, device, 0); 2232 2233 count_data = private->count_area; 2234 for (i = 0; i < 4; i++) { 2235 ccw[-1].flags |= CCW_FLAG_CC; 2236 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT; 2237 ccw->flags = 0; 2238 ccw->count = 8; 2239 ccw->cda = (__u32)(addr_t) count_data; 2240 ccw++; 2241 count_data++; 2242 } 2243 2244 /* Locate record for the first record on track 1. */ 2245 ccw[-1].flags |= CCW_FLAG_CC; 2246 locate_record(ccw++, LO_data++, 1, 0, 1, 2247 DASD_ECKD_CCW_READ_COUNT, device, 0); 2248 /* Read count ccw. */ 2249 ccw[-1].flags |= CCW_FLAG_CC; 2250 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT; 2251 ccw->flags = 0; 2252 ccw->count = 8; 2253 ccw->cda = (__u32)(addr_t) count_data; 2254 2255 cqr->block = NULL; 2256 cqr->startdev = device; 2257 cqr->memdev = device; 2258 cqr->retries = 255; 2259 cqr->buildclk = get_tod_clock(); 2260 cqr->status = DASD_CQR_FILLED; 2261 /* Set flags to suppress output for expected errors */ 2262 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); 2263 2264 return cqr; 2265 } 2266 2267 /* differentiate between 'no record found' and any other error */ 2268 static int dasd_eckd_analysis_evaluation(struct dasd_ccw_req *init_cqr) 2269 { 2270 char *sense; 2271 if (init_cqr->status == DASD_CQR_DONE) 2272 return INIT_CQR_OK; 2273 else if (init_cqr->status == DASD_CQR_NEED_ERP || 2274 init_cqr->status == DASD_CQR_FAILED) { 2275 sense = dasd_get_sense(&init_cqr->irb); 2276 if (sense && (sense[1] & SNS1_NO_REC_FOUND)) 2277 return INIT_CQR_UNFORMATTED; 2278 else 2279 return INIT_CQR_ERROR; 2280 } else 2281 return INIT_CQR_ERROR; 2282 } 2283 2284 /* 2285 * This is the callback function for the init_analysis cqr. It saves 2286 * the status of the initial analysis ccw before it frees it and kicks 2287 * the device to continue the startup sequence. This will call 2288 * dasd_eckd_do_analysis again (if the devices has not been marked 2289 * for deletion in the meantime). 2290 */ 2291 static void dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr, 2292 void *data) 2293 { 2294 struct dasd_device *device = init_cqr->startdev; 2295 struct dasd_eckd_private *private = device->private; 2296 2297 private->init_cqr_status = dasd_eckd_analysis_evaluation(init_cqr); 2298 dasd_sfree_request(init_cqr, device); 2299 dasd_kick_device(device); 2300 } 2301 2302 static int dasd_eckd_start_analysis(struct dasd_block *block) 2303 { 2304 struct dasd_ccw_req *init_cqr; 2305 2306 init_cqr = dasd_eckd_analysis_ccw(block->base); 2307 if (IS_ERR(init_cqr)) 2308 return PTR_ERR(init_cqr); 2309 init_cqr->callback = dasd_eckd_analysis_callback; 2310 init_cqr->callback_data = NULL; 2311 init_cqr->expires = 5*HZ; 2312 /* first try without ERP, so we can later handle unformatted 2313 * devices as special case 2314 */ 2315 clear_bit(DASD_CQR_FLAGS_USE_ERP, &init_cqr->flags); 2316 init_cqr->retries = 0; 2317 dasd_add_request_head(init_cqr); 2318 return -EAGAIN; 2319 } 2320 2321 static int dasd_eckd_end_analysis(struct dasd_block *block) 2322 { 2323 struct dasd_device *device = block->base; 2324 struct dasd_eckd_private *private = device->private; 2325 struct eckd_count *count_area; 2326 unsigned int sb, blk_per_trk; 2327 int status, i; 2328 struct dasd_ccw_req *init_cqr; 2329 2330 status = private->init_cqr_status; 2331 private->init_cqr_status = -1; 2332 if (status == INIT_CQR_ERROR) { 2333 /* try again, this time with full ERP */ 2334 init_cqr = dasd_eckd_analysis_ccw(device); 2335 dasd_sleep_on(init_cqr); 2336 status = dasd_eckd_analysis_evaluation(init_cqr); 2337 dasd_sfree_request(init_cqr, device); 2338 } 2339 2340 if (device->features & DASD_FEATURE_USERAW) { 2341 block->bp_block = DASD_RAW_BLOCKSIZE; 2342 blk_per_trk = DASD_RAW_BLOCK_PER_TRACK; 2343 block->s2b_shift = 3; 2344 goto raw; 2345 } 2346 2347 if (status == INIT_CQR_UNFORMATTED) { 2348 dev_warn(&device->cdev->dev, "The DASD is not formatted\n"); 2349 return -EMEDIUMTYPE; 2350 } else if (status == INIT_CQR_ERROR) { 2351 dev_err(&device->cdev->dev, 2352 "Detecting the DASD disk layout failed because " 2353 "of an I/O error\n"); 2354 return -EIO; 2355 } 2356 2357 private->uses_cdl = 1; 2358 /* Check Track 0 for Compatible Disk Layout */ 2359 count_area = NULL; 2360 for (i = 0; i < 3; i++) { 2361 if (private->count_area[i].kl != 4 || 2362 private->count_area[i].dl != dasd_eckd_cdl_reclen(i) - 4 || 2363 private->count_area[i].cyl != 0 || 2364 private->count_area[i].head != count_area_head[i] || 2365 private->count_area[i].record != count_area_rec[i]) { 2366 private->uses_cdl = 0; 2367 break; 2368 } 2369 } 2370 if (i == 3) 2371 count_area = &private->count_area[3]; 2372 2373 if (private->uses_cdl == 0) { 2374 for (i = 0; i < 5; i++) { 2375 if ((private->count_area[i].kl != 0) || 2376 (private->count_area[i].dl != 2377 private->count_area[0].dl) || 2378 private->count_area[i].cyl != 0 || 2379 private->count_area[i].head != count_area_head[i] || 2380 private->count_area[i].record != count_area_rec[i]) 2381 break; 2382 } 2383 if (i == 5) 2384 count_area = &private->count_area[0]; 2385 } else { 2386 if (private->count_area[3].record == 1) 2387 dev_warn(&device->cdev->dev, 2388 "Track 0 has no records following the VTOC\n"); 2389 } 2390 2391 if (count_area != NULL && count_area->kl == 0) { 2392 /* we found notthing violating our disk layout */ 2393 if (dasd_check_blocksize(count_area->dl) == 0) 2394 block->bp_block = count_area->dl; 2395 } 2396 if (block->bp_block == 0) { 2397 dev_warn(&device->cdev->dev, 2398 "The disk layout of the DASD is not supported\n"); 2399 return -EMEDIUMTYPE; 2400 } 2401 block->s2b_shift = 0; /* bits to shift 512 to get a block */ 2402 for (sb = 512; sb < block->bp_block; sb = sb << 1) 2403 block->s2b_shift++; 2404 2405 blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block); 2406 2407 raw: 2408 block->blocks = ((unsigned long) private->real_cyl * 2409 private->rdc_data.trk_per_cyl * 2410 blk_per_trk); 2411 2412 dev_info(&device->cdev->dev, 2413 "DASD with %u KB/block, %lu KB total size, %u KB/track, " 2414 "%s\n", (block->bp_block >> 10), 2415 (((unsigned long) private->real_cyl * 2416 private->rdc_data.trk_per_cyl * 2417 blk_per_trk * (block->bp_block >> 9)) >> 1), 2418 ((blk_per_trk * block->bp_block) >> 10), 2419 private->uses_cdl ? 2420 "compatible disk layout" : "linux disk layout"); 2421 2422 return 0; 2423 } 2424 2425 static int dasd_eckd_do_analysis(struct dasd_block *block) 2426 { 2427 struct dasd_eckd_private *private = block->base->private; 2428 2429 if (private->init_cqr_status < 0) 2430 return dasd_eckd_start_analysis(block); 2431 else 2432 return dasd_eckd_end_analysis(block); 2433 } 2434 2435 static int dasd_eckd_basic_to_ready(struct dasd_device *device) 2436 { 2437 return dasd_alias_add_device(device); 2438 }; 2439 2440 static int dasd_eckd_online_to_ready(struct dasd_device *device) 2441 { 2442 if (cancel_work_sync(&device->reload_device)) 2443 dasd_put_device(device); 2444 if (cancel_work_sync(&device->kick_validate)) 2445 dasd_put_device(device); 2446 2447 return 0; 2448 }; 2449 2450 static int dasd_eckd_basic_to_known(struct dasd_device *device) 2451 { 2452 return dasd_alias_remove_device(device); 2453 }; 2454 2455 static int 2456 dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo) 2457 { 2458 struct dasd_eckd_private *private = block->base->private; 2459 2460 if (dasd_check_blocksize(block->bp_block) == 0) { 2461 geo->sectors = recs_per_track(&private->rdc_data, 2462 0, block->bp_block); 2463 } 2464 geo->cylinders = private->rdc_data.no_cyl; 2465 geo->heads = private->rdc_data.trk_per_cyl; 2466 return 0; 2467 } 2468 2469 /* 2470 * Build the TCW request for the format check 2471 */ 2472 static struct dasd_ccw_req * 2473 dasd_eckd_build_check_tcw(struct dasd_device *base, struct format_data_t *fdata, 2474 int enable_pav, struct eckd_count *fmt_buffer, 2475 int rpt) 2476 { 2477 struct dasd_eckd_private *start_priv; 2478 struct dasd_device *startdev = NULL; 2479 struct tidaw *last_tidaw = NULL; 2480 struct dasd_ccw_req *cqr; 2481 struct itcw *itcw; 2482 int itcw_size; 2483 int count; 2484 int rc; 2485 int i; 2486 2487 if (enable_pav) 2488 startdev = dasd_alias_get_start_dev(base); 2489 2490 if (!startdev) 2491 startdev = base; 2492 2493 start_priv = startdev->private; 2494 2495 count = rpt * (fdata->stop_unit - fdata->start_unit + 1); 2496 2497 /* 2498 * we're adding 'count' amount of tidaw to the itcw. 2499 * calculate the corresponding itcw_size 2500 */ 2501 itcw_size = itcw_calc_size(0, count, 0); 2502 2503 cqr = dasd_fmalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev); 2504 if (IS_ERR(cqr)) 2505 return cqr; 2506 2507 start_priv->count++; 2508 2509 itcw = itcw_init(cqr->data, itcw_size, ITCW_OP_READ, 0, count, 0); 2510 if (IS_ERR(itcw)) { 2511 rc = -EINVAL; 2512 goto out_err; 2513 } 2514 2515 cqr->cpaddr = itcw_get_tcw(itcw); 2516 rc = prepare_itcw(itcw, fdata->start_unit, fdata->stop_unit, 2517 DASD_ECKD_CCW_READ_COUNT_MT, base, startdev, 0, count, 2518 sizeof(struct eckd_count), 2519 count * sizeof(struct eckd_count), 0, rpt); 2520 if (rc) 2521 goto out_err; 2522 2523 for (i = 0; i < count; i++) { 2524 last_tidaw = itcw_add_tidaw(itcw, 0, fmt_buffer++, 2525 sizeof(struct eckd_count)); 2526 if (IS_ERR(last_tidaw)) { 2527 rc = -EINVAL; 2528 goto out_err; 2529 } 2530 } 2531 2532 last_tidaw->flags |= TIDAW_FLAGS_LAST; 2533 itcw_finalize(itcw); 2534 2535 cqr->cpmode = 1; 2536 cqr->startdev = startdev; 2537 cqr->memdev = startdev; 2538 cqr->basedev = base; 2539 cqr->retries = startdev->default_retries; 2540 cqr->expires = startdev->default_expires * HZ; 2541 cqr->buildclk = get_tod_clock(); 2542 cqr->status = DASD_CQR_FILLED; 2543 /* Set flags to suppress output for expected errors */ 2544 set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags); 2545 set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags); 2546 2547 return cqr; 2548 2549 out_err: 2550 dasd_sfree_request(cqr, startdev); 2551 2552 return ERR_PTR(rc); 2553 } 2554 2555 /* 2556 * Build the CCW request for the format check 2557 */ 2558 static struct dasd_ccw_req * 2559 dasd_eckd_build_check(struct dasd_device *base, struct format_data_t *fdata, 2560 int enable_pav, struct eckd_count *fmt_buffer, int rpt) 2561 { 2562 struct dasd_eckd_private *start_priv; 2563 struct dasd_eckd_private *base_priv; 2564 struct dasd_device *startdev = NULL; 2565 struct dasd_ccw_req *cqr; 2566 struct ccw1 *ccw; 2567 void *data; 2568 int cplength, datasize; 2569 int use_prefix; 2570 int count; 2571 int i; 2572 2573 if (enable_pav) 2574 startdev = dasd_alias_get_start_dev(base); 2575 2576 if (!startdev) 2577 startdev = base; 2578 2579 start_priv = startdev->private; 2580 base_priv = base->private; 2581 2582 count = rpt * (fdata->stop_unit - fdata->start_unit + 1); 2583 2584 use_prefix = base_priv->features.feature[8] & 0x01; 2585 2586 if (use_prefix) { 2587 cplength = 1; 2588 datasize = sizeof(struct PFX_eckd_data); 2589 } else { 2590 cplength = 2; 2591 datasize = sizeof(struct DE_eckd_data) + 2592 sizeof(struct LO_eckd_data); 2593 } 2594 cplength += count; 2595 2596 cqr = dasd_fmalloc_request(DASD_ECKD_MAGIC, cplength, datasize, startdev); 2597 if (IS_ERR(cqr)) 2598 return cqr; 2599 2600 start_priv->count++; 2601 data = cqr->data; 2602 ccw = cqr->cpaddr; 2603 2604 if (use_prefix) { 2605 prefix_LRE(ccw++, data, fdata->start_unit, fdata->stop_unit, 2606 DASD_ECKD_CCW_READ_COUNT, base, startdev, 1, 0, 2607 count, 0, 0); 2608 } else { 2609 define_extent(ccw++, data, fdata->start_unit, fdata->stop_unit, 2610 DASD_ECKD_CCW_READ_COUNT, startdev, 0); 2611 2612 data += sizeof(struct DE_eckd_data); 2613 ccw[-1].flags |= CCW_FLAG_CC; 2614 2615 locate_record(ccw++, data, fdata->start_unit, 0, count, 2616 DASD_ECKD_CCW_READ_COUNT, base, 0); 2617 } 2618 2619 for (i = 0; i < count; i++) { 2620 ccw[-1].flags |= CCW_FLAG_CC; 2621 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT; 2622 ccw->flags = CCW_FLAG_SLI; 2623 ccw->count = 8; 2624 ccw->cda = (__u32)(addr_t) fmt_buffer; 2625 ccw++; 2626 fmt_buffer++; 2627 } 2628 2629 cqr->startdev = startdev; 2630 cqr->memdev = startdev; 2631 cqr->basedev = base; 2632 cqr->retries = DASD_RETRIES; 2633 cqr->expires = startdev->default_expires * HZ; 2634 cqr->buildclk = get_tod_clock(); 2635 cqr->status = DASD_CQR_FILLED; 2636 /* Set flags to suppress output for expected errors */ 2637 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); 2638 2639 return cqr; 2640 } 2641 2642 static struct dasd_ccw_req * 2643 dasd_eckd_build_format(struct dasd_device *base, struct dasd_device *startdev, 2644 struct format_data_t *fdata, int enable_pav) 2645 { 2646 struct dasd_eckd_private *base_priv; 2647 struct dasd_eckd_private *start_priv; 2648 struct dasd_ccw_req *fcp; 2649 struct eckd_count *ect; 2650 struct ch_t address; 2651 struct ccw1 *ccw; 2652 void *data; 2653 int rpt; 2654 int cplength, datasize; 2655 int i, j; 2656 int intensity = 0; 2657 int r0_perm; 2658 int nr_tracks; 2659 int use_prefix; 2660 2661 if (enable_pav) 2662 startdev = dasd_alias_get_start_dev(base); 2663 2664 if (!startdev) 2665 startdev = base; 2666 2667 start_priv = startdev->private; 2668 base_priv = base->private; 2669 2670 rpt = recs_per_track(&base_priv->rdc_data, 0, fdata->blksize); 2671 2672 nr_tracks = fdata->stop_unit - fdata->start_unit + 1; 2673 2674 /* 2675 * fdata->intensity is a bit string that tells us what to do: 2676 * Bit 0: write record zero 2677 * Bit 1: write home address, currently not supported 2678 * Bit 2: invalidate tracks 2679 * Bit 3: use OS/390 compatible disk layout (cdl) 2680 * Bit 4: do not allow storage subsystem to modify record zero 2681 * Only some bit combinations do make sense. 2682 */ 2683 if (fdata->intensity & 0x10) { 2684 r0_perm = 0; 2685 intensity = fdata->intensity & ~0x10; 2686 } else { 2687 r0_perm = 1; 2688 intensity = fdata->intensity; 2689 } 2690 2691 use_prefix = base_priv->features.feature[8] & 0x01; 2692 2693 switch (intensity) { 2694 case 0x00: /* Normal format */ 2695 case 0x08: /* Normal format, use cdl. */ 2696 cplength = 2 + (rpt*nr_tracks); 2697 if (use_prefix) 2698 datasize = sizeof(struct PFX_eckd_data) + 2699 sizeof(struct LO_eckd_data) + 2700 rpt * nr_tracks * sizeof(struct eckd_count); 2701 else 2702 datasize = sizeof(struct DE_eckd_data) + 2703 sizeof(struct LO_eckd_data) + 2704 rpt * nr_tracks * sizeof(struct eckd_count); 2705 break; 2706 case 0x01: /* Write record zero and format track. */ 2707 case 0x09: /* Write record zero and format track, use cdl. */ 2708 cplength = 2 + rpt * nr_tracks; 2709 if (use_prefix) 2710 datasize = sizeof(struct PFX_eckd_data) + 2711 sizeof(struct LO_eckd_data) + 2712 sizeof(struct eckd_count) + 2713 rpt * nr_tracks * sizeof(struct eckd_count); 2714 else 2715 datasize = sizeof(struct DE_eckd_data) + 2716 sizeof(struct LO_eckd_data) + 2717 sizeof(struct eckd_count) + 2718 rpt * nr_tracks * sizeof(struct eckd_count); 2719 break; 2720 case 0x04: /* Invalidate track. */ 2721 case 0x0c: /* Invalidate track, use cdl. */ 2722 cplength = 3; 2723 if (use_prefix) 2724 datasize = sizeof(struct PFX_eckd_data) + 2725 sizeof(struct LO_eckd_data) + 2726 sizeof(struct eckd_count); 2727 else 2728 datasize = sizeof(struct DE_eckd_data) + 2729 sizeof(struct LO_eckd_data) + 2730 sizeof(struct eckd_count); 2731 break; 2732 default: 2733 dev_warn(&startdev->cdev->dev, 2734 "An I/O control call used incorrect flags 0x%x\n", 2735 fdata->intensity); 2736 return ERR_PTR(-EINVAL); 2737 } 2738 2739 fcp = dasd_fmalloc_request(DASD_ECKD_MAGIC, cplength, datasize, startdev); 2740 if (IS_ERR(fcp)) 2741 return fcp; 2742 2743 start_priv->count++; 2744 data = fcp->data; 2745 ccw = fcp->cpaddr; 2746 2747 switch (intensity & ~0x08) { 2748 case 0x00: /* Normal format. */ 2749 if (use_prefix) { 2750 prefix(ccw++, (struct PFX_eckd_data *) data, 2751 fdata->start_unit, fdata->stop_unit, 2752 DASD_ECKD_CCW_WRITE_CKD, base, startdev); 2753 /* grant subsystem permission to format R0 */ 2754 if (r0_perm) 2755 ((struct PFX_eckd_data *)data) 2756 ->define_extent.ga_extended |= 0x04; 2757 data += sizeof(struct PFX_eckd_data); 2758 } else { 2759 define_extent(ccw++, (struct DE_eckd_data *) data, 2760 fdata->start_unit, fdata->stop_unit, 2761 DASD_ECKD_CCW_WRITE_CKD, startdev, 0); 2762 /* grant subsystem permission to format R0 */ 2763 if (r0_perm) 2764 ((struct DE_eckd_data *) data) 2765 ->ga_extended |= 0x04; 2766 data += sizeof(struct DE_eckd_data); 2767 } 2768 ccw[-1].flags |= CCW_FLAG_CC; 2769 locate_record(ccw++, (struct LO_eckd_data *) data, 2770 fdata->start_unit, 0, rpt*nr_tracks, 2771 DASD_ECKD_CCW_WRITE_CKD, base, 2772 fdata->blksize); 2773 data += sizeof(struct LO_eckd_data); 2774 break; 2775 case 0x01: /* Write record zero + format track. */ 2776 if (use_prefix) { 2777 prefix(ccw++, (struct PFX_eckd_data *) data, 2778 fdata->start_unit, fdata->stop_unit, 2779 DASD_ECKD_CCW_WRITE_RECORD_ZERO, 2780 base, startdev); 2781 data += sizeof(struct PFX_eckd_data); 2782 } else { 2783 define_extent(ccw++, (struct DE_eckd_data *) data, 2784 fdata->start_unit, fdata->stop_unit, 2785 DASD_ECKD_CCW_WRITE_RECORD_ZERO, startdev, 0); 2786 data += sizeof(struct DE_eckd_data); 2787 } 2788 ccw[-1].flags |= CCW_FLAG_CC; 2789 locate_record(ccw++, (struct LO_eckd_data *) data, 2790 fdata->start_unit, 0, rpt * nr_tracks + 1, 2791 DASD_ECKD_CCW_WRITE_RECORD_ZERO, base, 2792 base->block->bp_block); 2793 data += sizeof(struct LO_eckd_data); 2794 break; 2795 case 0x04: /* Invalidate track. */ 2796 if (use_prefix) { 2797 prefix(ccw++, (struct PFX_eckd_data *) data, 2798 fdata->start_unit, fdata->stop_unit, 2799 DASD_ECKD_CCW_WRITE_CKD, base, startdev); 2800 data += sizeof(struct PFX_eckd_data); 2801 } else { 2802 define_extent(ccw++, (struct DE_eckd_data *) data, 2803 fdata->start_unit, fdata->stop_unit, 2804 DASD_ECKD_CCW_WRITE_CKD, startdev, 0); 2805 data += sizeof(struct DE_eckd_data); 2806 } 2807 ccw[-1].flags |= CCW_FLAG_CC; 2808 locate_record(ccw++, (struct LO_eckd_data *) data, 2809 fdata->start_unit, 0, 1, 2810 DASD_ECKD_CCW_WRITE_CKD, base, 8); 2811 data += sizeof(struct LO_eckd_data); 2812 break; 2813 } 2814 2815 for (j = 0; j < nr_tracks; j++) { 2816 /* calculate cylinder and head for the current track */ 2817 set_ch_t(&address, 2818 (fdata->start_unit + j) / 2819 base_priv->rdc_data.trk_per_cyl, 2820 (fdata->start_unit + j) % 2821 base_priv->rdc_data.trk_per_cyl); 2822 if (intensity & 0x01) { /* write record zero */ 2823 ect = (struct eckd_count *) data; 2824 data += sizeof(struct eckd_count); 2825 ect->cyl = address.cyl; 2826 ect->head = address.head; 2827 ect->record = 0; 2828 ect->kl = 0; 2829 ect->dl = 8; 2830 ccw[-1].flags |= CCW_FLAG_CC; 2831 ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO; 2832 ccw->flags = CCW_FLAG_SLI; 2833 ccw->count = 8; 2834 ccw->cda = (__u32)(addr_t) ect; 2835 ccw++; 2836 } 2837 if ((intensity & ~0x08) & 0x04) { /* erase track */ 2838 ect = (struct eckd_count *) data; 2839 data += sizeof(struct eckd_count); 2840 ect->cyl = address.cyl; 2841 ect->head = address.head; 2842 ect->record = 1; 2843 ect->kl = 0; 2844 ect->dl = 0; 2845 ccw[-1].flags |= CCW_FLAG_CC; 2846 ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD; 2847 ccw->flags = CCW_FLAG_SLI; 2848 ccw->count = 8; 2849 ccw->cda = (__u32)(addr_t) ect; 2850 } else { /* write remaining records */ 2851 for (i = 0; i < rpt; i++) { 2852 ect = (struct eckd_count *) data; 2853 data += sizeof(struct eckd_count); 2854 ect->cyl = address.cyl; 2855 ect->head = address.head; 2856 ect->record = i + 1; 2857 ect->kl = 0; 2858 ect->dl = fdata->blksize; 2859 /* 2860 * Check for special tracks 0-1 2861 * when formatting CDL 2862 */ 2863 if ((intensity & 0x08) && 2864 address.cyl == 0 && address.head == 0) { 2865 if (i < 3) { 2866 ect->kl = 4; 2867 ect->dl = sizes_trk0[i] - 4; 2868 } 2869 } 2870 if ((intensity & 0x08) && 2871 address.cyl == 0 && address.head == 1) { 2872 ect->kl = 44; 2873 ect->dl = LABEL_SIZE - 44; 2874 } 2875 ccw[-1].flags |= CCW_FLAG_CC; 2876 if (i != 0 || j == 0) 2877 ccw->cmd_code = 2878 DASD_ECKD_CCW_WRITE_CKD; 2879 else 2880 ccw->cmd_code = 2881 DASD_ECKD_CCW_WRITE_CKD_MT; 2882 ccw->flags = CCW_FLAG_SLI; 2883 ccw->count = 8; 2884 ccw->cda = (__u32)(addr_t) ect; 2885 ccw++; 2886 } 2887 } 2888 } 2889 2890 fcp->startdev = startdev; 2891 fcp->memdev = startdev; 2892 fcp->basedev = base; 2893 fcp->retries = 256; 2894 fcp->expires = startdev->default_expires * HZ; 2895 fcp->buildclk = get_tod_clock(); 2896 fcp->status = DASD_CQR_FILLED; 2897 2898 return fcp; 2899 } 2900 2901 /* 2902 * Wrapper function to build a CCW request depending on input data 2903 */ 2904 static struct dasd_ccw_req * 2905 dasd_eckd_format_build_ccw_req(struct dasd_device *base, 2906 struct format_data_t *fdata, int enable_pav, 2907 int tpm, struct eckd_count *fmt_buffer, int rpt) 2908 { 2909 struct dasd_ccw_req *ccw_req; 2910 2911 if (!fmt_buffer) { 2912 ccw_req = dasd_eckd_build_format(base, NULL, fdata, enable_pav); 2913 } else { 2914 if (tpm) 2915 ccw_req = dasd_eckd_build_check_tcw(base, fdata, 2916 enable_pav, 2917 fmt_buffer, rpt); 2918 else 2919 ccw_req = dasd_eckd_build_check(base, fdata, enable_pav, 2920 fmt_buffer, rpt); 2921 } 2922 2923 return ccw_req; 2924 } 2925 2926 /* 2927 * Sanity checks on format_data 2928 */ 2929 static int dasd_eckd_format_sanity_checks(struct dasd_device *base, 2930 struct format_data_t *fdata) 2931 { 2932 struct dasd_eckd_private *private = base->private; 2933 2934 if (fdata->start_unit >= 2935 (private->real_cyl * private->rdc_data.trk_per_cyl)) { 2936 dev_warn(&base->cdev->dev, 2937 "Start track number %u used in formatting is too big\n", 2938 fdata->start_unit); 2939 return -EINVAL; 2940 } 2941 if (fdata->stop_unit >= 2942 (private->real_cyl * private->rdc_data.trk_per_cyl)) { 2943 dev_warn(&base->cdev->dev, 2944 "Stop track number %u used in formatting is too big\n", 2945 fdata->stop_unit); 2946 return -EINVAL; 2947 } 2948 if (fdata->start_unit > fdata->stop_unit) { 2949 dev_warn(&base->cdev->dev, 2950 "Start track %u used in formatting exceeds end track\n", 2951 fdata->start_unit); 2952 return -EINVAL; 2953 } 2954 if (dasd_check_blocksize(fdata->blksize) != 0) { 2955 dev_warn(&base->cdev->dev, 2956 "The DASD cannot be formatted with block size %u\n", 2957 fdata->blksize); 2958 return -EINVAL; 2959 } 2960 return 0; 2961 } 2962 2963 /* 2964 * This function will process format_data originally coming from an IOCTL 2965 */ 2966 static int dasd_eckd_format_process_data(struct dasd_device *base, 2967 struct format_data_t *fdata, 2968 int enable_pav, int tpm, 2969 struct eckd_count *fmt_buffer, int rpt, 2970 struct irb *irb) 2971 { 2972 struct dasd_eckd_private *private = base->private; 2973 struct dasd_ccw_req *cqr, *n; 2974 struct list_head format_queue; 2975 struct dasd_device *device; 2976 char *sense = NULL; 2977 int old_start, old_stop, format_step; 2978 int step, retry; 2979 int rc; 2980 2981 rc = dasd_eckd_format_sanity_checks(base, fdata); 2982 if (rc) 2983 return rc; 2984 2985 INIT_LIST_HEAD(&format_queue); 2986 2987 old_start = fdata->start_unit; 2988 old_stop = fdata->stop_unit; 2989 2990 if (!tpm && fmt_buffer != NULL) { 2991 /* Command Mode / Format Check */ 2992 format_step = 1; 2993 } else if (tpm && fmt_buffer != NULL) { 2994 /* Transport Mode / Format Check */ 2995 format_step = DASD_CQR_MAX_CCW / rpt; 2996 } else { 2997 /* Normal Formatting */ 2998 format_step = DASD_CQR_MAX_CCW / 2999 recs_per_track(&private->rdc_data, 0, fdata->blksize); 3000 } 3001 3002 do { 3003 retry = 0; 3004 while (fdata->start_unit <= old_stop) { 3005 step = fdata->stop_unit - fdata->start_unit + 1; 3006 if (step > format_step) { 3007 fdata->stop_unit = 3008 fdata->start_unit + format_step - 1; 3009 } 3010 3011 cqr = dasd_eckd_format_build_ccw_req(base, fdata, 3012 enable_pav, tpm, 3013 fmt_buffer, rpt); 3014 if (IS_ERR(cqr)) { 3015 rc = PTR_ERR(cqr); 3016 if (rc == -ENOMEM) { 3017 if (list_empty(&format_queue)) 3018 goto out; 3019 /* 3020 * not enough memory available, start 3021 * requests retry after first requests 3022 * were finished 3023 */ 3024 retry = 1; 3025 break; 3026 } 3027 goto out_err; 3028 } 3029 list_add_tail(&cqr->blocklist, &format_queue); 3030 3031 if (fmt_buffer) { 3032 step = fdata->stop_unit - fdata->start_unit + 1; 3033 fmt_buffer += rpt * step; 3034 } 3035 fdata->start_unit = fdata->stop_unit + 1; 3036 fdata->stop_unit = old_stop; 3037 } 3038 3039 rc = dasd_sleep_on_queue(&format_queue); 3040 3041 out_err: 3042 list_for_each_entry_safe(cqr, n, &format_queue, blocklist) { 3043 device = cqr->startdev; 3044 private = device->private; 3045 3046 if (cqr->status == DASD_CQR_FAILED) { 3047 /* 3048 * Only get sense data if called by format 3049 * check 3050 */ 3051 if (fmt_buffer && irb) { 3052 sense = dasd_get_sense(&cqr->irb); 3053 memcpy(irb, &cqr->irb, sizeof(*irb)); 3054 } 3055 rc = -EIO; 3056 } 3057 list_del_init(&cqr->blocklist); 3058 dasd_ffree_request(cqr, device); 3059 private->count--; 3060 } 3061 3062 if (rc && rc != -EIO) 3063 goto out; 3064 if (rc == -EIO) { 3065 /* 3066 * In case fewer than the expected records are on the 3067 * track, we will most likely get a 'No Record Found' 3068 * error (in command mode) or a 'File Protected' error 3069 * (in transport mode). Those particular cases shouldn't 3070 * pass the -EIO to the IOCTL, therefore reset the rc 3071 * and continue. 3072 */ 3073 if (sense && 3074 (sense[1] & SNS1_NO_REC_FOUND || 3075 sense[1] & SNS1_FILE_PROTECTED)) 3076 retry = 1; 3077 else 3078 goto out; 3079 } 3080 3081 } while (retry); 3082 3083 out: 3084 fdata->start_unit = old_start; 3085 fdata->stop_unit = old_stop; 3086 3087 return rc; 3088 } 3089 3090 static int dasd_eckd_format_device(struct dasd_device *base, 3091 struct format_data_t *fdata, int enable_pav) 3092 { 3093 return dasd_eckd_format_process_data(base, fdata, enable_pav, 0, NULL, 3094 0, NULL); 3095 } 3096 3097 static bool test_and_set_format_track(struct dasd_format_entry *to_format, 3098 struct dasd_block *block) 3099 { 3100 struct dasd_format_entry *format; 3101 unsigned long flags; 3102 bool rc = false; 3103 3104 spin_lock_irqsave(&block->format_lock, flags); 3105 list_for_each_entry(format, &block->format_list, list) { 3106 if (format->track == to_format->track) { 3107 rc = true; 3108 goto out; 3109 } 3110 } 3111 list_add_tail(&to_format->list, &block->format_list); 3112 3113 out: 3114 spin_unlock_irqrestore(&block->format_lock, flags); 3115 return rc; 3116 } 3117 3118 static void clear_format_track(struct dasd_format_entry *format, 3119 struct dasd_block *block) 3120 { 3121 unsigned long flags; 3122 3123 spin_lock_irqsave(&block->format_lock, flags); 3124 list_del_init(&format->list); 3125 spin_unlock_irqrestore(&block->format_lock, flags); 3126 } 3127 3128 /* 3129 * Callback function to free ESE format requests. 3130 */ 3131 static void dasd_eckd_ese_format_cb(struct dasd_ccw_req *cqr, void *data) 3132 { 3133 struct dasd_device *device = cqr->startdev; 3134 struct dasd_eckd_private *private = device->private; 3135 struct dasd_format_entry *format = data; 3136 3137 clear_format_track(format, cqr->basedev->block); 3138 private->count--; 3139 dasd_ffree_request(cqr, device); 3140 } 3141 3142 static struct dasd_ccw_req * 3143 dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr, 3144 struct irb *irb) 3145 { 3146 struct dasd_eckd_private *private; 3147 struct dasd_format_entry *format; 3148 struct format_data_t fdata; 3149 unsigned int recs_per_trk; 3150 struct dasd_ccw_req *fcqr; 3151 struct dasd_device *base; 3152 struct dasd_block *block; 3153 unsigned int blksize; 3154 struct request *req; 3155 sector_t first_trk; 3156 sector_t last_trk; 3157 sector_t curr_trk; 3158 int rc; 3159 3160 req = cqr->callback_data; 3161 block = cqr->block; 3162 base = block->base; 3163 private = base->private; 3164 blksize = block->bp_block; 3165 recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize); 3166 format = &startdev->format_entry; 3167 3168 first_trk = blk_rq_pos(req) >> block->s2b_shift; 3169 sector_div(first_trk, recs_per_trk); 3170 last_trk = 3171 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; 3172 sector_div(last_trk, recs_per_trk); 3173 rc = dasd_eckd_track_from_irb(irb, base, &curr_trk); 3174 if (rc) 3175 return ERR_PTR(rc); 3176 3177 if (curr_trk < first_trk || curr_trk > last_trk) { 3178 DBF_DEV_EVENT(DBF_WARNING, startdev, 3179 "ESE error track %llu not within range %llu - %llu\n", 3180 curr_trk, first_trk, last_trk); 3181 return ERR_PTR(-EINVAL); 3182 } 3183 format->track = curr_trk; 3184 /* test if track is already in formatting by another thread */ 3185 if (test_and_set_format_track(format, block)) 3186 return ERR_PTR(-EEXIST); 3187 3188 fdata.start_unit = curr_trk; 3189 fdata.stop_unit = curr_trk; 3190 fdata.blksize = blksize; 3191 fdata.intensity = private->uses_cdl ? DASD_FMT_INT_COMPAT : 0; 3192 3193 rc = dasd_eckd_format_sanity_checks(base, &fdata); 3194 if (rc) 3195 return ERR_PTR(-EINVAL); 3196 3197 /* 3198 * We're building the request with PAV disabled as we're reusing 3199 * the former startdev. 3200 */ 3201 fcqr = dasd_eckd_build_format(base, startdev, &fdata, 0); 3202 if (IS_ERR(fcqr)) 3203 return fcqr; 3204 3205 fcqr->callback = dasd_eckd_ese_format_cb; 3206 fcqr->callback_data = (void *) format; 3207 3208 return fcqr; 3209 } 3210 3211 /* 3212 * When data is read from an unformatted area of an ESE volume, this function 3213 * returns zeroed data and thereby mimics a read of zero data. 3214 * 3215 * The first unformatted track is the one that got the NRF error, the address is 3216 * encoded in the sense data. 3217 * 3218 * All tracks before have returned valid data and should not be touched. 3219 * All tracks after the unformatted track might be formatted or not. This is 3220 * currently not known, remember the processed data and return the remainder of 3221 * the request to the blocklayer in __dasd_cleanup_cqr(). 3222 */ 3223 static int dasd_eckd_ese_read(struct dasd_ccw_req *cqr, struct irb *irb) 3224 { 3225 struct dasd_eckd_private *private; 3226 sector_t first_trk, last_trk; 3227 sector_t first_blk, last_blk; 3228 unsigned int blksize, off; 3229 unsigned int recs_per_trk; 3230 struct dasd_device *base; 3231 struct req_iterator iter; 3232 struct dasd_block *block; 3233 unsigned int skip_block; 3234 unsigned int blk_count; 3235 struct request *req; 3236 struct bio_vec bv; 3237 sector_t curr_trk; 3238 sector_t end_blk; 3239 char *dst; 3240 int rc; 3241 3242 req = (struct request *) cqr->callback_data; 3243 base = cqr->block->base; 3244 blksize = base->block->bp_block; 3245 block = cqr->block; 3246 private = base->private; 3247 skip_block = 0; 3248 blk_count = 0; 3249 3250 recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize); 3251 first_trk = first_blk = blk_rq_pos(req) >> block->s2b_shift; 3252 sector_div(first_trk, recs_per_trk); 3253 last_trk = last_blk = 3254 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; 3255 sector_div(last_trk, recs_per_trk); 3256 rc = dasd_eckd_track_from_irb(irb, base, &curr_trk); 3257 if (rc) 3258 return rc; 3259 3260 /* sanity check if the current track from sense data is valid */ 3261 if (curr_trk < first_trk || curr_trk > last_trk) { 3262 DBF_DEV_EVENT(DBF_WARNING, base, 3263 "ESE error track %llu not within range %llu - %llu\n", 3264 curr_trk, first_trk, last_trk); 3265 return -EINVAL; 3266 } 3267 3268 /* 3269 * if not the first track got the NRF error we have to skip over valid 3270 * blocks 3271 */ 3272 if (curr_trk != first_trk) 3273 skip_block = curr_trk * recs_per_trk - first_blk; 3274 3275 /* we have no information beyond the current track */ 3276 end_blk = (curr_trk + 1) * recs_per_trk; 3277 3278 rq_for_each_segment(bv, req, iter) { 3279 dst = bvec_virt(&bv); 3280 for (off = 0; off < bv.bv_len; off += blksize) { 3281 if (first_blk + blk_count >= end_blk) { 3282 cqr->proc_bytes = blk_count * blksize; 3283 return 0; 3284 } 3285 if (dst && !skip_block) { 3286 dst += off; 3287 memset(dst, 0, blksize); 3288 } else { 3289 skip_block--; 3290 } 3291 blk_count++; 3292 } 3293 } 3294 return 0; 3295 } 3296 3297 /* 3298 * Helper function to count consecutive records of a single track. 3299 */ 3300 static int dasd_eckd_count_records(struct eckd_count *fmt_buffer, int start, 3301 int max) 3302 { 3303 int head; 3304 int i; 3305 3306 head = fmt_buffer[start].head; 3307 3308 /* 3309 * There are 3 conditions where we stop counting: 3310 * - if data reoccurs (same head and record may reoccur), which may 3311 * happen due to the way DASD_ECKD_CCW_READ_COUNT works 3312 * - when the head changes, because we're iterating over several tracks 3313 * then (DASD_ECKD_CCW_READ_COUNT_MT) 3314 * - when we've reached the end of sensible data in the buffer (the 3315 * record will be 0 then) 3316 */ 3317 for (i = start; i < max; i++) { 3318 if (i > start) { 3319 if ((fmt_buffer[i].head == head && 3320 fmt_buffer[i].record == 1) || 3321 fmt_buffer[i].head != head || 3322 fmt_buffer[i].record == 0) 3323 break; 3324 } 3325 } 3326 3327 return i - start; 3328 } 3329 3330 /* 3331 * Evaluate a given range of tracks. Data like number of records, blocksize, 3332 * record ids, and key length are compared with expected data. 3333 * 3334 * If a mismatch occurs, the corresponding error bit is set, as well as 3335 * additional information, depending on the error. 3336 */ 3337 static void dasd_eckd_format_evaluate_tracks(struct eckd_count *fmt_buffer, 3338 struct format_check_t *cdata, 3339 int rpt_max, int rpt_exp, 3340 int trk_per_cyl, int tpm) 3341 { 3342 struct ch_t geo; 3343 int max_entries; 3344 int count = 0; 3345 int trkcount; 3346 int blksize; 3347 int pos = 0; 3348 int i, j; 3349 int kl; 3350 3351 trkcount = cdata->expect.stop_unit - cdata->expect.start_unit + 1; 3352 max_entries = trkcount * rpt_max; 3353 3354 for (i = cdata->expect.start_unit; i <= cdata->expect.stop_unit; i++) { 3355 /* Calculate the correct next starting position in the buffer */ 3356 if (tpm) { 3357 while (fmt_buffer[pos].record == 0 && 3358 fmt_buffer[pos].dl == 0) { 3359 if (pos++ > max_entries) 3360 break; 3361 } 3362 } else { 3363 if (i != cdata->expect.start_unit) 3364 pos += rpt_max - count; 3365 } 3366 3367 /* Calculate the expected geo values for the current track */ 3368 set_ch_t(&geo, i / trk_per_cyl, i % trk_per_cyl); 3369 3370 /* Count and check number of records */ 3371 count = dasd_eckd_count_records(fmt_buffer, pos, pos + rpt_max); 3372 3373 if (count < rpt_exp) { 3374 cdata->result = DASD_FMT_ERR_TOO_FEW_RECORDS; 3375 break; 3376 } 3377 if (count > rpt_exp) { 3378 cdata->result = DASD_FMT_ERR_TOO_MANY_RECORDS; 3379 break; 3380 } 3381 3382 for (j = 0; j < count; j++, pos++) { 3383 blksize = cdata->expect.blksize; 3384 kl = 0; 3385 3386 /* 3387 * Set special values when checking CDL formatted 3388 * devices. 3389 */ 3390 if ((cdata->expect.intensity & 0x08) && 3391 geo.cyl == 0 && geo.head == 0) { 3392 if (j < 3) { 3393 blksize = sizes_trk0[j] - 4; 3394 kl = 4; 3395 } 3396 } 3397 if ((cdata->expect.intensity & 0x08) && 3398 geo.cyl == 0 && geo.head == 1) { 3399 blksize = LABEL_SIZE - 44; 3400 kl = 44; 3401 } 3402 3403 /* Check blocksize */ 3404 if (fmt_buffer[pos].dl != blksize) { 3405 cdata->result = DASD_FMT_ERR_BLKSIZE; 3406 goto out; 3407 } 3408 /* Check if key length is 0 */ 3409 if (fmt_buffer[pos].kl != kl) { 3410 cdata->result = DASD_FMT_ERR_KEY_LENGTH; 3411 goto out; 3412 } 3413 /* Check if record_id is correct */ 3414 if (fmt_buffer[pos].cyl != geo.cyl || 3415 fmt_buffer[pos].head != geo.head || 3416 fmt_buffer[pos].record != (j + 1)) { 3417 cdata->result = DASD_FMT_ERR_RECORD_ID; 3418 goto out; 3419 } 3420 } 3421 } 3422 3423 out: 3424 /* 3425 * In case of no errors, we need to decrease by one 3426 * to get the correct positions. 3427 */ 3428 if (!cdata->result) { 3429 i--; 3430 pos--; 3431 } 3432 3433 cdata->unit = i; 3434 cdata->num_records = count; 3435 cdata->rec = fmt_buffer[pos].record; 3436 cdata->blksize = fmt_buffer[pos].dl; 3437 cdata->key_length = fmt_buffer[pos].kl; 3438 } 3439 3440 /* 3441 * Check the format of a range of tracks of a DASD. 3442 */ 3443 static int dasd_eckd_check_device_format(struct dasd_device *base, 3444 struct format_check_t *cdata, 3445 int enable_pav) 3446 { 3447 struct dasd_eckd_private *private = base->private; 3448 struct eckd_count *fmt_buffer; 3449 struct irb irb; 3450 int rpt_max, rpt_exp; 3451 int fmt_buffer_size; 3452 int trk_per_cyl; 3453 int trkcount; 3454 int tpm = 0; 3455 int rc; 3456 3457 trk_per_cyl = private->rdc_data.trk_per_cyl; 3458 3459 /* Get maximum and expected amount of records per track */ 3460 rpt_max = recs_per_track(&private->rdc_data, 0, 512) + 1; 3461 rpt_exp = recs_per_track(&private->rdc_data, 0, cdata->expect.blksize); 3462 3463 trkcount = cdata->expect.stop_unit - cdata->expect.start_unit + 1; 3464 fmt_buffer_size = trkcount * rpt_max * sizeof(struct eckd_count); 3465 3466 fmt_buffer = kzalloc(fmt_buffer_size, GFP_KERNEL | GFP_DMA); 3467 if (!fmt_buffer) 3468 return -ENOMEM; 3469 3470 /* 3471 * A certain FICON feature subset is needed to operate in transport 3472 * mode. Additionally, the support for transport mode is implicitly 3473 * checked by comparing the buffer size with fcx_max_data. As long as 3474 * the buffer size is smaller we can operate in transport mode and 3475 * process multiple tracks. If not, only one track at once is being 3476 * processed using command mode. 3477 */ 3478 if ((private->features.feature[40] & 0x04) && 3479 fmt_buffer_size <= private->fcx_max_data) 3480 tpm = 1; 3481 3482 rc = dasd_eckd_format_process_data(base, &cdata->expect, enable_pav, 3483 tpm, fmt_buffer, rpt_max, &irb); 3484 if (rc && rc != -EIO) 3485 goto out; 3486 if (rc == -EIO) { 3487 /* 3488 * If our first attempt with transport mode enabled comes back 3489 * with an incorrect length error, we're going to retry the 3490 * check with command mode. 3491 */ 3492 if (tpm && scsw_cstat(&irb.scsw) == 0x40) { 3493 tpm = 0; 3494 rc = dasd_eckd_format_process_data(base, &cdata->expect, 3495 enable_pav, tpm, 3496 fmt_buffer, rpt_max, 3497 &irb); 3498 if (rc) 3499 goto out; 3500 } else { 3501 goto out; 3502 } 3503 } 3504 3505 dasd_eckd_format_evaluate_tracks(fmt_buffer, cdata, rpt_max, rpt_exp, 3506 trk_per_cyl, tpm); 3507 3508 out: 3509 kfree(fmt_buffer); 3510 3511 return rc; 3512 } 3513 3514 static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr) 3515 { 3516 if (cqr->retries < 0) { 3517 cqr->status = DASD_CQR_FAILED; 3518 return; 3519 } 3520 cqr->status = DASD_CQR_FILLED; 3521 if (cqr->block && (cqr->startdev != cqr->block->base)) { 3522 dasd_eckd_reset_ccw_to_base_io(cqr); 3523 cqr->startdev = cqr->block->base; 3524 cqr->lpm = dasd_path_get_opm(cqr->block->base); 3525 } 3526 }; 3527 3528 static dasd_erp_fn_t 3529 dasd_eckd_erp_action(struct dasd_ccw_req * cqr) 3530 { 3531 struct dasd_device *device = (struct dasd_device *) cqr->startdev; 3532 struct ccw_device *cdev = device->cdev; 3533 3534 switch (cdev->id.cu_type) { 3535 case 0x3990: 3536 case 0x2105: 3537 case 0x2107: 3538 case 0x1750: 3539 return dasd_3990_erp_action; 3540 case 0x9343: 3541 case 0x3880: 3542 default: 3543 return dasd_default_erp_action; 3544 } 3545 } 3546 3547 static dasd_erp_fn_t 3548 dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr) 3549 { 3550 return dasd_default_erp_postaction; 3551 } 3552 3553 static void dasd_eckd_check_for_device_change(struct dasd_device *device, 3554 struct dasd_ccw_req *cqr, 3555 struct irb *irb) 3556 { 3557 char mask; 3558 char *sense = NULL; 3559 struct dasd_eckd_private *private = device->private; 3560 3561 /* first of all check for state change pending interrupt */ 3562 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; 3563 if ((scsw_dstat(&irb->scsw) & mask) == mask) { 3564 /* 3565 * for alias only, not in offline processing 3566 * and only if not suspended 3567 */ 3568 if (!device->block && private->lcu && 3569 device->state == DASD_STATE_ONLINE && 3570 !test_bit(DASD_FLAG_OFFLINE, &device->flags) && 3571 !test_bit(DASD_FLAG_SUSPENDED, &device->flags)) { 3572 /* schedule worker to reload device */ 3573 dasd_reload_device(device); 3574 } 3575 dasd_generic_handle_state_change(device); 3576 return; 3577 } 3578 3579 sense = dasd_get_sense(irb); 3580 if (!sense) 3581 return; 3582 3583 /* summary unit check */ 3584 if ((sense[27] & DASD_SENSE_BIT_0) && (sense[7] == 0x0D) && 3585 (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK)) { 3586 if (test_and_set_bit(DASD_FLAG_SUC, &device->flags)) { 3587 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 3588 "eckd suc: device already notified"); 3589 return; 3590 } 3591 sense = dasd_get_sense(irb); 3592 if (!sense) { 3593 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 3594 "eckd suc: no reason code available"); 3595 clear_bit(DASD_FLAG_SUC, &device->flags); 3596 return; 3597 3598 } 3599 private->suc_reason = sense[8]; 3600 DBF_DEV_EVENT(DBF_NOTICE, device, "%s %x", 3601 "eckd handle summary unit check: reason", 3602 private->suc_reason); 3603 dasd_get_device(device); 3604 if (!schedule_work(&device->suc_work)) 3605 dasd_put_device(device); 3606 3607 return; 3608 } 3609 3610 /* service information message SIM */ 3611 if (!cqr && !(sense[27] & DASD_SENSE_BIT_0) && 3612 ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) { 3613 dasd_3990_erp_handle_sim(device, sense); 3614 return; 3615 } 3616 3617 /* loss of device reservation is handled via base devices only 3618 * as alias devices may be used with several bases 3619 */ 3620 if (device->block && (sense[27] & DASD_SENSE_BIT_0) && 3621 (sense[7] == 0x3F) && 3622 (scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) && 3623 test_bit(DASD_FLAG_IS_RESERVED, &device->flags)) { 3624 if (device->features & DASD_FEATURE_FAILONSLCK) 3625 set_bit(DASD_FLAG_LOCK_STOLEN, &device->flags); 3626 clear_bit(DASD_FLAG_IS_RESERVED, &device->flags); 3627 dev_err(&device->cdev->dev, 3628 "The device reservation was lost\n"); 3629 } 3630 } 3631 3632 static int dasd_eckd_ras_sanity_checks(struct dasd_device *device, 3633 unsigned int first_trk, 3634 unsigned int last_trk) 3635 { 3636 struct dasd_eckd_private *private = device->private; 3637 unsigned int trks_per_vol; 3638 int rc = 0; 3639 3640 trks_per_vol = private->real_cyl * private->rdc_data.trk_per_cyl; 3641 3642 if (first_trk >= trks_per_vol) { 3643 dev_warn(&device->cdev->dev, 3644 "Start track number %u used in the space release command is too big\n", 3645 first_trk); 3646 rc = -EINVAL; 3647 } else if (last_trk >= trks_per_vol) { 3648 dev_warn(&device->cdev->dev, 3649 "Stop track number %u used in the space release command is too big\n", 3650 last_trk); 3651 rc = -EINVAL; 3652 } else if (first_trk > last_trk) { 3653 dev_warn(&device->cdev->dev, 3654 "Start track %u used in the space release command exceeds the end track\n", 3655 first_trk); 3656 rc = -EINVAL; 3657 } 3658 return rc; 3659 } 3660 3661 /* 3662 * Helper function to count the amount of involved extents within a given range 3663 * with extent alignment in mind. 3664 */ 3665 static int count_exts(unsigned int from, unsigned int to, int trks_per_ext) 3666 { 3667 int cur_pos = 0; 3668 int count = 0; 3669 int tmp; 3670 3671 if (from == to) 3672 return 1; 3673 3674 /* Count first partial extent */ 3675 if (from % trks_per_ext != 0) { 3676 tmp = from + trks_per_ext - (from % trks_per_ext) - 1; 3677 if (tmp > to) 3678 tmp = to; 3679 cur_pos = tmp - from + 1; 3680 count++; 3681 } 3682 /* Count full extents */ 3683 if (to - (from + cur_pos) + 1 >= trks_per_ext) { 3684 tmp = to - ((to - trks_per_ext + 1) % trks_per_ext); 3685 count += (tmp - (from + cur_pos) + 1) / trks_per_ext; 3686 cur_pos = tmp; 3687 } 3688 /* Count last partial extent */ 3689 if (cur_pos < to) 3690 count++; 3691 3692 return count; 3693 } 3694 3695 /* 3696 * Release allocated space for a given range or an entire volume. 3697 */ 3698 static struct dasd_ccw_req * 3699 dasd_eckd_dso_ras(struct dasd_device *device, struct dasd_block *block, 3700 struct request *req, unsigned int first_trk, 3701 unsigned int last_trk, int by_extent) 3702 { 3703 struct dasd_eckd_private *private = device->private; 3704 struct dasd_dso_ras_ext_range *ras_range; 3705 struct dasd_rssd_features *features; 3706 struct dasd_dso_ras_data *ras_data; 3707 u16 heads, beg_head, end_head; 3708 int cur_to_trk, cur_from_trk; 3709 struct dasd_ccw_req *cqr; 3710 u32 beg_cyl, end_cyl; 3711 struct ccw1 *ccw; 3712 int trks_per_ext; 3713 size_t ras_size; 3714 size_t size; 3715 int nr_exts; 3716 void *rq; 3717 int i; 3718 3719 if (dasd_eckd_ras_sanity_checks(device, first_trk, last_trk)) 3720 return ERR_PTR(-EINVAL); 3721 3722 rq = req ? blk_mq_rq_to_pdu(req) : NULL; 3723 3724 features = &private->features; 3725 3726 trks_per_ext = dasd_eckd_ext_size(device) * private->rdc_data.trk_per_cyl; 3727 nr_exts = 0; 3728 if (by_extent) 3729 nr_exts = count_exts(first_trk, last_trk, trks_per_ext); 3730 ras_size = sizeof(*ras_data); 3731 size = ras_size + (nr_exts * sizeof(*ras_range)); 3732 3733 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, size, device, rq); 3734 if (IS_ERR(cqr)) { 3735 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 3736 "Could not allocate RAS request"); 3737 return cqr; 3738 } 3739 3740 ras_data = cqr->data; 3741 memset(ras_data, 0, size); 3742 3743 ras_data->order = DSO_ORDER_RAS; 3744 ras_data->flags.vol_type = 0; /* CKD volume */ 3745 /* Release specified extents or entire volume */ 3746 ras_data->op_flags.by_extent = by_extent; 3747 /* 3748 * This bit guarantees initialisation of tracks within an extent that is 3749 * not fully specified, but is only supported with a certain feature 3750 * subset. 3751 */ 3752 ras_data->op_flags.guarantee_init = !!(features->feature[56] & 0x01); 3753 ras_data->lss = private->ned->ID; 3754 ras_data->dev_addr = private->ned->unit_addr; 3755 ras_data->nr_exts = nr_exts; 3756 3757 if (by_extent) { 3758 heads = private->rdc_data.trk_per_cyl; 3759 cur_from_trk = first_trk; 3760 cur_to_trk = first_trk + trks_per_ext - 3761 (first_trk % trks_per_ext) - 1; 3762 if (cur_to_trk > last_trk) 3763 cur_to_trk = last_trk; 3764 ras_range = (struct dasd_dso_ras_ext_range *)(cqr->data + ras_size); 3765 3766 for (i = 0; i < nr_exts; i++) { 3767 beg_cyl = cur_from_trk / heads; 3768 beg_head = cur_from_trk % heads; 3769 end_cyl = cur_to_trk / heads; 3770 end_head = cur_to_trk % heads; 3771 3772 set_ch_t(&ras_range->beg_ext, beg_cyl, beg_head); 3773 set_ch_t(&ras_range->end_ext, end_cyl, end_head); 3774 3775 cur_from_trk = cur_to_trk + 1; 3776 cur_to_trk = cur_from_trk + trks_per_ext - 1; 3777 if (cur_to_trk > last_trk) 3778 cur_to_trk = last_trk; 3779 ras_range++; 3780 } 3781 } 3782 3783 ccw = cqr->cpaddr; 3784 ccw->cda = (__u32)(addr_t)cqr->data; 3785 ccw->cmd_code = DASD_ECKD_CCW_DSO; 3786 ccw->count = size; 3787 3788 cqr->startdev = device; 3789 cqr->memdev = device; 3790 cqr->block = block; 3791 cqr->retries = 256; 3792 cqr->expires = device->default_expires * HZ; 3793 cqr->buildclk = get_tod_clock(); 3794 cqr->status = DASD_CQR_FILLED; 3795 3796 return cqr; 3797 } 3798 3799 static int dasd_eckd_release_space_full(struct dasd_device *device) 3800 { 3801 struct dasd_ccw_req *cqr; 3802 int rc; 3803 3804 cqr = dasd_eckd_dso_ras(device, NULL, NULL, 0, 0, 0); 3805 if (IS_ERR(cqr)) 3806 return PTR_ERR(cqr); 3807 3808 rc = dasd_sleep_on_interruptible(cqr); 3809 3810 dasd_sfree_request(cqr, cqr->memdev); 3811 3812 return rc; 3813 } 3814 3815 static int dasd_eckd_release_space_trks(struct dasd_device *device, 3816 unsigned int from, unsigned int to) 3817 { 3818 struct dasd_eckd_private *private = device->private; 3819 struct dasd_block *block = device->block; 3820 struct dasd_ccw_req *cqr, *n; 3821 struct list_head ras_queue; 3822 unsigned int device_exts; 3823 int trks_per_ext; 3824 int stop, step; 3825 int cur_pos; 3826 int rc = 0; 3827 int retry; 3828 3829 INIT_LIST_HEAD(&ras_queue); 3830 3831 device_exts = private->real_cyl / dasd_eckd_ext_size(device); 3832 trks_per_ext = dasd_eckd_ext_size(device) * private->rdc_data.trk_per_cyl; 3833 3834 /* Make sure device limits are not exceeded */ 3835 step = trks_per_ext * min(device_exts, DASD_ECKD_RAS_EXTS_MAX); 3836 cur_pos = from; 3837 3838 do { 3839 retry = 0; 3840 while (cur_pos < to) { 3841 stop = cur_pos + step - 3842 ((cur_pos + step) % trks_per_ext) - 1; 3843 if (stop > to) 3844 stop = to; 3845 3846 cqr = dasd_eckd_dso_ras(device, NULL, NULL, cur_pos, stop, 1); 3847 if (IS_ERR(cqr)) { 3848 rc = PTR_ERR(cqr); 3849 if (rc == -ENOMEM) { 3850 if (list_empty(&ras_queue)) 3851 goto out; 3852 retry = 1; 3853 break; 3854 } 3855 goto err_out; 3856 } 3857 3858 spin_lock_irq(&block->queue_lock); 3859 list_add_tail(&cqr->blocklist, &ras_queue); 3860 spin_unlock_irq(&block->queue_lock); 3861 cur_pos = stop + 1; 3862 } 3863 3864 rc = dasd_sleep_on_queue_interruptible(&ras_queue); 3865 3866 err_out: 3867 list_for_each_entry_safe(cqr, n, &ras_queue, blocklist) { 3868 device = cqr->startdev; 3869 private = device->private; 3870 3871 spin_lock_irq(&block->queue_lock); 3872 list_del_init(&cqr->blocklist); 3873 spin_unlock_irq(&block->queue_lock); 3874 dasd_sfree_request(cqr, device); 3875 private->count--; 3876 } 3877 } while (retry); 3878 3879 out: 3880 return rc; 3881 } 3882 3883 static int dasd_eckd_release_space(struct dasd_device *device, 3884 struct format_data_t *rdata) 3885 { 3886 if (rdata->intensity & DASD_FMT_INT_ESE_FULL) 3887 return dasd_eckd_release_space_full(device); 3888 else if (rdata->intensity == 0) 3889 return dasd_eckd_release_space_trks(device, rdata->start_unit, 3890 rdata->stop_unit); 3891 else 3892 return -EINVAL; 3893 } 3894 3895 static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single( 3896 struct dasd_device *startdev, 3897 struct dasd_block *block, 3898 struct request *req, 3899 sector_t first_rec, 3900 sector_t last_rec, 3901 sector_t first_trk, 3902 sector_t last_trk, 3903 unsigned int first_offs, 3904 unsigned int last_offs, 3905 unsigned int blk_per_trk, 3906 unsigned int blksize) 3907 { 3908 struct dasd_eckd_private *private; 3909 unsigned long *idaws; 3910 struct LO_eckd_data *LO_data; 3911 struct dasd_ccw_req *cqr; 3912 struct ccw1 *ccw; 3913 struct req_iterator iter; 3914 struct bio_vec bv; 3915 char *dst; 3916 unsigned int off; 3917 int count, cidaw, cplength, datasize; 3918 sector_t recid; 3919 unsigned char cmd, rcmd; 3920 int use_prefix; 3921 struct dasd_device *basedev; 3922 3923 basedev = block->base; 3924 private = basedev->private; 3925 if (rq_data_dir(req) == READ) 3926 cmd = DASD_ECKD_CCW_READ_MT; 3927 else if (rq_data_dir(req) == WRITE) 3928 cmd = DASD_ECKD_CCW_WRITE_MT; 3929 else 3930 return ERR_PTR(-EINVAL); 3931 3932 /* Check struct bio and count the number of blocks for the request. */ 3933 count = 0; 3934 cidaw = 0; 3935 rq_for_each_segment(bv, req, iter) { 3936 if (bv.bv_len & (blksize - 1)) 3937 /* Eckd can only do full blocks. */ 3938 return ERR_PTR(-EINVAL); 3939 count += bv.bv_len >> (block->s2b_shift + 9); 3940 if (idal_is_needed (page_address(bv.bv_page), bv.bv_len)) 3941 cidaw += bv.bv_len >> (block->s2b_shift + 9); 3942 } 3943 /* Paranoia. */ 3944 if (count != last_rec - first_rec + 1) 3945 return ERR_PTR(-EINVAL); 3946 3947 /* use the prefix command if available */ 3948 use_prefix = private->features.feature[8] & 0x01; 3949 if (use_prefix) { 3950 /* 1x prefix + number of blocks */ 3951 cplength = 2 + count; 3952 /* 1x prefix + cidaws*sizeof(long) */ 3953 datasize = sizeof(struct PFX_eckd_data) + 3954 sizeof(struct LO_eckd_data) + 3955 cidaw * sizeof(unsigned long); 3956 } else { 3957 /* 1x define extent + 1x locate record + number of blocks */ 3958 cplength = 2 + count; 3959 /* 1x define extent + 1x locate record + cidaws*sizeof(long) */ 3960 datasize = sizeof(struct DE_eckd_data) + 3961 sizeof(struct LO_eckd_data) + 3962 cidaw * sizeof(unsigned long); 3963 } 3964 /* Find out the number of additional locate record ccws for cdl. */ 3965 if (private->uses_cdl && first_rec < 2*blk_per_trk) { 3966 if (last_rec >= 2*blk_per_trk) 3967 count = 2*blk_per_trk - first_rec; 3968 cplength += count; 3969 datasize += count*sizeof(struct LO_eckd_data); 3970 } 3971 /* Allocate the ccw request. */ 3972 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, 3973 startdev, blk_mq_rq_to_pdu(req)); 3974 if (IS_ERR(cqr)) 3975 return cqr; 3976 ccw = cqr->cpaddr; 3977 /* First ccw is define extent or prefix. */ 3978 if (use_prefix) { 3979 if (prefix(ccw++, cqr->data, first_trk, 3980 last_trk, cmd, basedev, startdev) == -EAGAIN) { 3981 /* Clock not in sync and XRC is enabled. 3982 * Try again later. 3983 */ 3984 dasd_sfree_request(cqr, startdev); 3985 return ERR_PTR(-EAGAIN); 3986 } 3987 idaws = (unsigned long *) (cqr->data + 3988 sizeof(struct PFX_eckd_data)); 3989 } else { 3990 if (define_extent(ccw++, cqr->data, first_trk, 3991 last_trk, cmd, basedev, 0) == -EAGAIN) { 3992 /* Clock not in sync and XRC is enabled. 3993 * Try again later. 3994 */ 3995 dasd_sfree_request(cqr, startdev); 3996 return ERR_PTR(-EAGAIN); 3997 } 3998 idaws = (unsigned long *) (cqr->data + 3999 sizeof(struct DE_eckd_data)); 4000 } 4001 /* Build locate_record+read/write/ccws. */ 4002 LO_data = (struct LO_eckd_data *) (idaws + cidaw); 4003 recid = first_rec; 4004 if (private->uses_cdl == 0 || recid > 2*blk_per_trk) { 4005 /* Only standard blocks so there is just one locate record. */ 4006 ccw[-1].flags |= CCW_FLAG_CC; 4007 locate_record(ccw++, LO_data++, first_trk, first_offs + 1, 4008 last_rec - recid + 1, cmd, basedev, blksize); 4009 } 4010 rq_for_each_segment(bv, req, iter) { 4011 dst = bvec_virt(&bv); 4012 if (dasd_page_cache) { 4013 char *copy = kmem_cache_alloc(dasd_page_cache, 4014 GFP_DMA | __GFP_NOWARN); 4015 if (copy && rq_data_dir(req) == WRITE) 4016 memcpy(copy + bv.bv_offset, dst, bv.bv_len); 4017 if (copy) 4018 dst = copy + bv.bv_offset; 4019 } 4020 for (off = 0; off < bv.bv_len; off += blksize) { 4021 sector_t trkid = recid; 4022 unsigned int recoffs = sector_div(trkid, blk_per_trk); 4023 rcmd = cmd; 4024 count = blksize; 4025 /* Locate record for cdl special block ? */ 4026 if (private->uses_cdl && recid < 2*blk_per_trk) { 4027 if (dasd_eckd_cdl_special(blk_per_trk, recid)){ 4028 rcmd |= 0x8; 4029 count = dasd_eckd_cdl_reclen(recid); 4030 if (count < blksize && 4031 rq_data_dir(req) == READ) 4032 memset(dst + count, 0xe5, 4033 blksize - count); 4034 } 4035 ccw[-1].flags |= CCW_FLAG_CC; 4036 locate_record(ccw++, LO_data++, 4037 trkid, recoffs + 1, 4038 1, rcmd, basedev, count); 4039 } 4040 /* Locate record for standard blocks ? */ 4041 if (private->uses_cdl && recid == 2*blk_per_trk) { 4042 ccw[-1].flags |= CCW_FLAG_CC; 4043 locate_record(ccw++, LO_data++, 4044 trkid, recoffs + 1, 4045 last_rec - recid + 1, 4046 cmd, basedev, count); 4047 } 4048 /* Read/write ccw. */ 4049 ccw[-1].flags |= CCW_FLAG_CC; 4050 ccw->cmd_code = rcmd; 4051 ccw->count = count; 4052 if (idal_is_needed(dst, blksize)) { 4053 ccw->cda = (__u32)(addr_t) idaws; 4054 ccw->flags = CCW_FLAG_IDA; 4055 idaws = idal_create_words(idaws, dst, blksize); 4056 } else { 4057 ccw->cda = (__u32)(addr_t) dst; 4058 ccw->flags = 0; 4059 } 4060 ccw++; 4061 dst += blksize; 4062 recid++; 4063 } 4064 } 4065 if (blk_noretry_request(req) || 4066 block->base->features & DASD_FEATURE_FAILFAST) 4067 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 4068 cqr->startdev = startdev; 4069 cqr->memdev = startdev; 4070 cqr->block = block; 4071 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ 4072 cqr->lpm = dasd_path_get_ppm(startdev); 4073 cqr->retries = startdev->default_retries; 4074 cqr->buildclk = get_tod_clock(); 4075 cqr->status = DASD_CQR_FILLED; 4076 4077 /* Set flags to suppress output for expected errors */ 4078 if (dasd_eckd_is_ese(basedev)) { 4079 set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags); 4080 set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags); 4081 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); 4082 } 4083 4084 return cqr; 4085 } 4086 4087 static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track( 4088 struct dasd_device *startdev, 4089 struct dasd_block *block, 4090 struct request *req, 4091 sector_t first_rec, 4092 sector_t last_rec, 4093 sector_t first_trk, 4094 sector_t last_trk, 4095 unsigned int first_offs, 4096 unsigned int last_offs, 4097 unsigned int blk_per_trk, 4098 unsigned int blksize) 4099 { 4100 unsigned long *idaws; 4101 struct dasd_ccw_req *cqr; 4102 struct ccw1 *ccw; 4103 struct req_iterator iter; 4104 struct bio_vec bv; 4105 char *dst, *idaw_dst; 4106 unsigned int cidaw, cplength, datasize; 4107 unsigned int tlf; 4108 sector_t recid; 4109 unsigned char cmd; 4110 struct dasd_device *basedev; 4111 unsigned int trkcount, count, count_to_trk_end; 4112 unsigned int idaw_len, seg_len, part_len, len_to_track_end; 4113 unsigned char new_track, end_idaw; 4114 sector_t trkid; 4115 unsigned int recoffs; 4116 4117 basedev = block->base; 4118 if (rq_data_dir(req) == READ) 4119 cmd = DASD_ECKD_CCW_READ_TRACK_DATA; 4120 else if (rq_data_dir(req) == WRITE) 4121 cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA; 4122 else 4123 return ERR_PTR(-EINVAL); 4124 4125 /* Track based I/O needs IDAWs for each page, and not just for 4126 * 64 bit addresses. We need additional idals for pages 4127 * that get filled from two tracks, so we use the number 4128 * of records as upper limit. 4129 */ 4130 cidaw = last_rec - first_rec + 1; 4131 trkcount = last_trk - first_trk + 1; 4132 4133 /* 1x prefix + one read/write ccw per track */ 4134 cplength = 1 + trkcount; 4135 4136 datasize = sizeof(struct PFX_eckd_data) + cidaw * sizeof(unsigned long); 4137 4138 /* Allocate the ccw request. */ 4139 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, 4140 startdev, blk_mq_rq_to_pdu(req)); 4141 if (IS_ERR(cqr)) 4142 return cqr; 4143 ccw = cqr->cpaddr; 4144 /* transfer length factor: how many bytes to read from the last track */ 4145 if (first_trk == last_trk) 4146 tlf = last_offs - first_offs + 1; 4147 else 4148 tlf = last_offs + 1; 4149 tlf *= blksize; 4150 4151 if (prefix_LRE(ccw++, cqr->data, first_trk, 4152 last_trk, cmd, basedev, startdev, 4153 1 /* format */, first_offs + 1, 4154 trkcount, blksize, 4155 tlf) == -EAGAIN) { 4156 /* Clock not in sync and XRC is enabled. 4157 * Try again later. 4158 */ 4159 dasd_sfree_request(cqr, startdev); 4160 return ERR_PTR(-EAGAIN); 4161 } 4162 4163 /* 4164 * The translation of request into ccw programs must meet the 4165 * following conditions: 4166 * - all idaws but the first and the last must address full pages 4167 * (or 2K blocks on 31-bit) 4168 * - the scope of a ccw and it's idal ends with the track boundaries 4169 */ 4170 idaws = (unsigned long *) (cqr->data + sizeof(struct PFX_eckd_data)); 4171 recid = first_rec; 4172 new_track = 1; 4173 end_idaw = 0; 4174 len_to_track_end = 0; 4175 idaw_dst = NULL; 4176 idaw_len = 0; 4177 rq_for_each_segment(bv, req, iter) { 4178 dst = bvec_virt(&bv); 4179 seg_len = bv.bv_len; 4180 while (seg_len) { 4181 if (new_track) { 4182 trkid = recid; 4183 recoffs = sector_div(trkid, blk_per_trk); 4184 count_to_trk_end = blk_per_trk - recoffs; 4185 count = min((last_rec - recid + 1), 4186 (sector_t)count_to_trk_end); 4187 len_to_track_end = count * blksize; 4188 ccw[-1].flags |= CCW_FLAG_CC; 4189 ccw->cmd_code = cmd; 4190 ccw->count = len_to_track_end; 4191 ccw->cda = (__u32)(addr_t)idaws; 4192 ccw->flags = CCW_FLAG_IDA; 4193 ccw++; 4194 recid += count; 4195 new_track = 0; 4196 /* first idaw for a ccw may start anywhere */ 4197 if (!idaw_dst) 4198 idaw_dst = dst; 4199 } 4200 /* If we start a new idaw, we must make sure that it 4201 * starts on an IDA_BLOCK_SIZE boundary. 4202 * If we continue an idaw, we must make sure that the 4203 * current segment begins where the so far accumulated 4204 * idaw ends 4205 */ 4206 if (!idaw_dst) { 4207 if (__pa(dst) & (IDA_BLOCK_SIZE-1)) { 4208 dasd_sfree_request(cqr, startdev); 4209 return ERR_PTR(-ERANGE); 4210 } else 4211 idaw_dst = dst; 4212 } 4213 if ((idaw_dst + idaw_len) != dst) { 4214 dasd_sfree_request(cqr, startdev); 4215 return ERR_PTR(-ERANGE); 4216 } 4217 part_len = min(seg_len, len_to_track_end); 4218 seg_len -= part_len; 4219 dst += part_len; 4220 idaw_len += part_len; 4221 len_to_track_end -= part_len; 4222 /* collected memory area ends on an IDA_BLOCK border, 4223 * -> create an idaw 4224 * idal_create_words will handle cases where idaw_len 4225 * is larger then IDA_BLOCK_SIZE 4226 */ 4227 if (!(__pa(idaw_dst + idaw_len) & (IDA_BLOCK_SIZE-1))) 4228 end_idaw = 1; 4229 /* We also need to end the idaw at track end */ 4230 if (!len_to_track_end) { 4231 new_track = 1; 4232 end_idaw = 1; 4233 } 4234 if (end_idaw) { 4235 idaws = idal_create_words(idaws, idaw_dst, 4236 idaw_len); 4237 idaw_dst = NULL; 4238 idaw_len = 0; 4239 end_idaw = 0; 4240 } 4241 } 4242 } 4243 4244 if (blk_noretry_request(req) || 4245 block->base->features & DASD_FEATURE_FAILFAST) 4246 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 4247 cqr->startdev = startdev; 4248 cqr->memdev = startdev; 4249 cqr->block = block; 4250 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ 4251 cqr->lpm = dasd_path_get_ppm(startdev); 4252 cqr->retries = startdev->default_retries; 4253 cqr->buildclk = get_tod_clock(); 4254 cqr->status = DASD_CQR_FILLED; 4255 4256 /* Set flags to suppress output for expected errors */ 4257 if (dasd_eckd_is_ese(basedev)) 4258 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); 4259 4260 return cqr; 4261 } 4262 4263 static int prepare_itcw(struct itcw *itcw, 4264 unsigned int trk, unsigned int totrk, int cmd, 4265 struct dasd_device *basedev, 4266 struct dasd_device *startdev, 4267 unsigned int rec_on_trk, int count, 4268 unsigned int blksize, 4269 unsigned int total_data_size, 4270 unsigned int tlf, 4271 unsigned int blk_per_trk) 4272 { 4273 struct PFX_eckd_data pfxdata; 4274 struct dasd_eckd_private *basepriv, *startpriv; 4275 struct DE_eckd_data *dedata; 4276 struct LRE_eckd_data *lredata; 4277 struct dcw *dcw; 4278 4279 u32 begcyl, endcyl; 4280 u16 heads, beghead, endhead; 4281 u8 pfx_cmd; 4282 4283 int rc = 0; 4284 int sector = 0; 4285 int dn, d; 4286 4287 4288 /* setup prefix data */ 4289 basepriv = basedev->private; 4290 startpriv = startdev->private; 4291 dedata = &pfxdata.define_extent; 4292 lredata = &pfxdata.locate_record; 4293 4294 memset(&pfxdata, 0, sizeof(pfxdata)); 4295 pfxdata.format = 1; /* PFX with LRE */ 4296 pfxdata.base_address = basepriv->ned->unit_addr; 4297 pfxdata.base_lss = basepriv->ned->ID; 4298 pfxdata.validity.define_extent = 1; 4299 4300 /* private uid is kept up to date, conf_data may be outdated */ 4301 if (startpriv->uid.type == UA_BASE_PAV_ALIAS) 4302 pfxdata.validity.verify_base = 1; 4303 4304 if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) { 4305 pfxdata.validity.verify_base = 1; 4306 pfxdata.validity.hyper_pav = 1; 4307 } 4308 4309 switch (cmd) { 4310 case DASD_ECKD_CCW_READ_TRACK_DATA: 4311 dedata->mask.perm = 0x1; 4312 dedata->attributes.operation = basepriv->attrib.operation; 4313 dedata->blk_size = blksize; 4314 dedata->ga_extended |= 0x42; 4315 lredata->operation.orientation = 0x0; 4316 lredata->operation.operation = 0x0C; 4317 lredata->auxiliary.check_bytes = 0x01; 4318 pfx_cmd = DASD_ECKD_CCW_PFX_READ; 4319 break; 4320 case DASD_ECKD_CCW_WRITE_TRACK_DATA: 4321 dedata->mask.perm = 0x02; 4322 dedata->attributes.operation = basepriv->attrib.operation; 4323 dedata->blk_size = blksize; 4324 rc = set_timestamp(NULL, dedata, basedev); 4325 dedata->ga_extended |= 0x42; 4326 lredata->operation.orientation = 0x0; 4327 lredata->operation.operation = 0x3F; 4328 lredata->extended_operation = 0x23; 4329 lredata->auxiliary.check_bytes = 0x2; 4330 /* 4331 * If XRC is supported the System Time Stamp is set. The 4332 * validity of the time stamp must be reflected in the prefix 4333 * data as well. 4334 */ 4335 if (dedata->ga_extended & 0x08 && dedata->ga_extended & 0x02) 4336 pfxdata.validity.time_stamp = 1; /* 'Time Stamp Valid' */ 4337 pfx_cmd = DASD_ECKD_CCW_PFX; 4338 break; 4339 case DASD_ECKD_CCW_READ_COUNT_MT: 4340 dedata->mask.perm = 0x1; 4341 dedata->attributes.operation = DASD_BYPASS_CACHE; 4342 dedata->ga_extended |= 0x42; 4343 dedata->blk_size = blksize; 4344 lredata->operation.orientation = 0x2; 4345 lredata->operation.operation = 0x16; 4346 lredata->auxiliary.check_bytes = 0x01; 4347 pfx_cmd = DASD_ECKD_CCW_PFX_READ; 4348 break; 4349 default: 4350 DBF_DEV_EVENT(DBF_ERR, basedev, 4351 "prepare itcw, unknown opcode 0x%x", cmd); 4352 BUG(); 4353 break; 4354 } 4355 if (rc) 4356 return rc; 4357 4358 dedata->attributes.mode = 0x3; /* ECKD */ 4359 4360 heads = basepriv->rdc_data.trk_per_cyl; 4361 begcyl = trk / heads; 4362 beghead = trk % heads; 4363 endcyl = totrk / heads; 4364 endhead = totrk % heads; 4365 4366 /* check for sequential prestage - enhance cylinder range */ 4367 if (dedata->attributes.operation == DASD_SEQ_PRESTAGE || 4368 dedata->attributes.operation == DASD_SEQ_ACCESS) { 4369 4370 if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl) 4371 endcyl += basepriv->attrib.nr_cyl; 4372 else 4373 endcyl = (basepriv->real_cyl - 1); 4374 } 4375 4376 set_ch_t(&dedata->beg_ext, begcyl, beghead); 4377 set_ch_t(&dedata->end_ext, endcyl, endhead); 4378 4379 dedata->ep_format = 0x20; /* records per track is valid */ 4380 dedata->ep_rec_per_track = blk_per_trk; 4381 4382 if (rec_on_trk) { 4383 switch (basepriv->rdc_data.dev_type) { 4384 case 0x3390: 4385 dn = ceil_quot(blksize + 6, 232); 4386 d = 9 + ceil_quot(blksize + 6 * (dn + 1), 34); 4387 sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8; 4388 break; 4389 case 0x3380: 4390 d = 7 + ceil_quot(blksize + 12, 32); 4391 sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7; 4392 break; 4393 } 4394 } 4395 4396 if (cmd == DASD_ECKD_CCW_READ_COUNT_MT) { 4397 lredata->auxiliary.length_valid = 0; 4398 lredata->auxiliary.length_scope = 0; 4399 lredata->sector = 0xff; 4400 } else { 4401 lredata->auxiliary.length_valid = 1; 4402 lredata->auxiliary.length_scope = 1; 4403 lredata->sector = sector; 4404 } 4405 lredata->auxiliary.imbedded_ccw_valid = 1; 4406 lredata->length = tlf; 4407 lredata->imbedded_ccw = cmd; 4408 lredata->count = count; 4409 set_ch_t(&lredata->seek_addr, begcyl, beghead); 4410 lredata->search_arg.cyl = lredata->seek_addr.cyl; 4411 lredata->search_arg.head = lredata->seek_addr.head; 4412 lredata->search_arg.record = rec_on_trk; 4413 4414 dcw = itcw_add_dcw(itcw, pfx_cmd, 0, 4415 &pfxdata, sizeof(pfxdata), total_data_size); 4416 return PTR_ERR_OR_ZERO(dcw); 4417 } 4418 4419 static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track( 4420 struct dasd_device *startdev, 4421 struct dasd_block *block, 4422 struct request *req, 4423 sector_t first_rec, 4424 sector_t last_rec, 4425 sector_t first_trk, 4426 sector_t last_trk, 4427 unsigned int first_offs, 4428 unsigned int last_offs, 4429 unsigned int blk_per_trk, 4430 unsigned int blksize) 4431 { 4432 struct dasd_ccw_req *cqr; 4433 struct req_iterator iter; 4434 struct bio_vec bv; 4435 char *dst; 4436 unsigned int trkcount, ctidaw; 4437 unsigned char cmd; 4438 struct dasd_device *basedev; 4439 unsigned int tlf; 4440 struct itcw *itcw; 4441 struct tidaw *last_tidaw = NULL; 4442 int itcw_op; 4443 size_t itcw_size; 4444 u8 tidaw_flags; 4445 unsigned int seg_len, part_len, len_to_track_end; 4446 unsigned char new_track; 4447 sector_t recid, trkid; 4448 unsigned int offs; 4449 unsigned int count, count_to_trk_end; 4450 int ret; 4451 4452 basedev = block->base; 4453 if (rq_data_dir(req) == READ) { 4454 cmd = DASD_ECKD_CCW_READ_TRACK_DATA; 4455 itcw_op = ITCW_OP_READ; 4456 } else if (rq_data_dir(req) == WRITE) { 4457 cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA; 4458 itcw_op = ITCW_OP_WRITE; 4459 } else 4460 return ERR_PTR(-EINVAL); 4461 4462 /* trackbased I/O needs address all memory via TIDAWs, 4463 * not just for 64 bit addresses. This allows us to map 4464 * each segment directly to one tidaw. 4465 * In the case of write requests, additional tidaws may 4466 * be needed when a segment crosses a track boundary. 4467 */ 4468 trkcount = last_trk - first_trk + 1; 4469 ctidaw = 0; 4470 rq_for_each_segment(bv, req, iter) { 4471 ++ctidaw; 4472 } 4473 if (rq_data_dir(req) == WRITE) 4474 ctidaw += (last_trk - first_trk); 4475 4476 /* Allocate the ccw request. */ 4477 itcw_size = itcw_calc_size(0, ctidaw, 0); 4478 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev, 4479 blk_mq_rq_to_pdu(req)); 4480 if (IS_ERR(cqr)) 4481 return cqr; 4482 4483 /* transfer length factor: how many bytes to read from the last track */ 4484 if (first_trk == last_trk) 4485 tlf = last_offs - first_offs + 1; 4486 else 4487 tlf = last_offs + 1; 4488 tlf *= blksize; 4489 4490 itcw = itcw_init(cqr->data, itcw_size, itcw_op, 0, ctidaw, 0); 4491 if (IS_ERR(itcw)) { 4492 ret = -EINVAL; 4493 goto out_error; 4494 } 4495 cqr->cpaddr = itcw_get_tcw(itcw); 4496 if (prepare_itcw(itcw, first_trk, last_trk, 4497 cmd, basedev, startdev, 4498 first_offs + 1, 4499 trkcount, blksize, 4500 (last_rec - first_rec + 1) * blksize, 4501 tlf, blk_per_trk) == -EAGAIN) { 4502 /* Clock not in sync and XRC is enabled. 4503 * Try again later. 4504 */ 4505 ret = -EAGAIN; 4506 goto out_error; 4507 } 4508 len_to_track_end = 0; 4509 /* 4510 * A tidaw can address 4k of memory, but must not cross page boundaries 4511 * We can let the block layer handle this by setting 4512 * blk_queue_segment_boundary to page boundaries and 4513 * blk_max_segment_size to page size when setting up the request queue. 4514 * For write requests, a TIDAW must not cross track boundaries, because 4515 * we have to set the CBC flag on the last tidaw for each track. 4516 */ 4517 if (rq_data_dir(req) == WRITE) { 4518 new_track = 1; 4519 recid = first_rec; 4520 rq_for_each_segment(bv, req, iter) { 4521 dst = bvec_virt(&bv); 4522 seg_len = bv.bv_len; 4523 while (seg_len) { 4524 if (new_track) { 4525 trkid = recid; 4526 offs = sector_div(trkid, blk_per_trk); 4527 count_to_trk_end = blk_per_trk - offs; 4528 count = min((last_rec - recid + 1), 4529 (sector_t)count_to_trk_end); 4530 len_to_track_end = count * blksize; 4531 recid += count; 4532 new_track = 0; 4533 } 4534 part_len = min(seg_len, len_to_track_end); 4535 seg_len -= part_len; 4536 len_to_track_end -= part_len; 4537 /* We need to end the tidaw at track end */ 4538 if (!len_to_track_end) { 4539 new_track = 1; 4540 tidaw_flags = TIDAW_FLAGS_INSERT_CBC; 4541 } else 4542 tidaw_flags = 0; 4543 last_tidaw = itcw_add_tidaw(itcw, tidaw_flags, 4544 dst, part_len); 4545 if (IS_ERR(last_tidaw)) { 4546 ret = -EINVAL; 4547 goto out_error; 4548 } 4549 dst += part_len; 4550 } 4551 } 4552 } else { 4553 rq_for_each_segment(bv, req, iter) { 4554 dst = bvec_virt(&bv); 4555 last_tidaw = itcw_add_tidaw(itcw, 0x00, 4556 dst, bv.bv_len); 4557 if (IS_ERR(last_tidaw)) { 4558 ret = -EINVAL; 4559 goto out_error; 4560 } 4561 } 4562 } 4563 last_tidaw->flags |= TIDAW_FLAGS_LAST; 4564 last_tidaw->flags &= ~TIDAW_FLAGS_INSERT_CBC; 4565 itcw_finalize(itcw); 4566 4567 if (blk_noretry_request(req) || 4568 block->base->features & DASD_FEATURE_FAILFAST) 4569 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 4570 cqr->cpmode = 1; 4571 cqr->startdev = startdev; 4572 cqr->memdev = startdev; 4573 cqr->block = block; 4574 cqr->expires = startdev->default_expires * HZ; /* default 5 minutes */ 4575 cqr->lpm = dasd_path_get_ppm(startdev); 4576 cqr->retries = startdev->default_retries; 4577 cqr->buildclk = get_tod_clock(); 4578 cqr->status = DASD_CQR_FILLED; 4579 4580 /* Set flags to suppress output for expected errors */ 4581 if (dasd_eckd_is_ese(basedev)) { 4582 set_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags); 4583 set_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags); 4584 set_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); 4585 } 4586 4587 return cqr; 4588 out_error: 4589 dasd_sfree_request(cqr, startdev); 4590 return ERR_PTR(ret); 4591 } 4592 4593 static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev, 4594 struct dasd_block *block, 4595 struct request *req) 4596 { 4597 int cmdrtd, cmdwtd; 4598 int use_prefix; 4599 int fcx_multitrack; 4600 struct dasd_eckd_private *private; 4601 struct dasd_device *basedev; 4602 sector_t first_rec, last_rec; 4603 sector_t first_trk, last_trk; 4604 unsigned int first_offs, last_offs; 4605 unsigned int blk_per_trk, blksize; 4606 int cdlspecial; 4607 unsigned int data_size; 4608 struct dasd_ccw_req *cqr; 4609 4610 basedev = block->base; 4611 private = basedev->private; 4612 4613 /* Calculate number of blocks/records per track. */ 4614 blksize = block->bp_block; 4615 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize); 4616 if (blk_per_trk == 0) 4617 return ERR_PTR(-EINVAL); 4618 /* Calculate record id of first and last block. */ 4619 first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift; 4620 first_offs = sector_div(first_trk, blk_per_trk); 4621 last_rec = last_trk = 4622 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; 4623 last_offs = sector_div(last_trk, blk_per_trk); 4624 cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk); 4625 4626 fcx_multitrack = private->features.feature[40] & 0x20; 4627 data_size = blk_rq_bytes(req); 4628 if (data_size % blksize) 4629 return ERR_PTR(-EINVAL); 4630 /* tpm write request add CBC data on each track boundary */ 4631 if (rq_data_dir(req) == WRITE) 4632 data_size += (last_trk - first_trk) * 4; 4633 4634 /* is read track data and write track data in command mode supported? */ 4635 cmdrtd = private->features.feature[9] & 0x20; 4636 cmdwtd = private->features.feature[12] & 0x40; 4637 use_prefix = private->features.feature[8] & 0x01; 4638 4639 cqr = NULL; 4640 if (cdlspecial || dasd_page_cache) { 4641 /* do nothing, just fall through to the cmd mode single case */ 4642 } else if ((data_size <= private->fcx_max_data) 4643 && (fcx_multitrack || (first_trk == last_trk))) { 4644 cqr = dasd_eckd_build_cp_tpm_track(startdev, block, req, 4645 first_rec, last_rec, 4646 first_trk, last_trk, 4647 first_offs, last_offs, 4648 blk_per_trk, blksize); 4649 if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) && 4650 (PTR_ERR(cqr) != -ENOMEM)) 4651 cqr = NULL; 4652 } else if (use_prefix && 4653 (((rq_data_dir(req) == READ) && cmdrtd) || 4654 ((rq_data_dir(req) == WRITE) && cmdwtd))) { 4655 cqr = dasd_eckd_build_cp_cmd_track(startdev, block, req, 4656 first_rec, last_rec, 4657 first_trk, last_trk, 4658 first_offs, last_offs, 4659 blk_per_trk, blksize); 4660 if (IS_ERR(cqr) && (PTR_ERR(cqr) != -EAGAIN) && 4661 (PTR_ERR(cqr) != -ENOMEM)) 4662 cqr = NULL; 4663 } 4664 if (!cqr) 4665 cqr = dasd_eckd_build_cp_cmd_single(startdev, block, req, 4666 first_rec, last_rec, 4667 first_trk, last_trk, 4668 first_offs, last_offs, 4669 blk_per_trk, blksize); 4670 return cqr; 4671 } 4672 4673 static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev, 4674 struct dasd_block *block, 4675 struct request *req) 4676 { 4677 sector_t start_padding_sectors, end_sector_offset, end_padding_sectors; 4678 unsigned int seg_len, len_to_track_end; 4679 unsigned int cidaw, cplength, datasize; 4680 sector_t first_trk, last_trk, sectors; 4681 struct dasd_eckd_private *base_priv; 4682 struct dasd_device *basedev; 4683 struct req_iterator iter; 4684 struct dasd_ccw_req *cqr; 4685 unsigned int first_offs; 4686 unsigned int trkcount; 4687 unsigned long *idaws; 4688 unsigned int size; 4689 unsigned char cmd; 4690 struct bio_vec bv; 4691 struct ccw1 *ccw; 4692 int use_prefix; 4693 void *data; 4694 char *dst; 4695 4696 /* 4697 * raw track access needs to be mutiple of 64k and on 64k boundary 4698 * For read requests we can fix an incorrect alignment by padding 4699 * the request with dummy pages. 4700 */ 4701 start_padding_sectors = blk_rq_pos(req) % DASD_RAW_SECTORS_PER_TRACK; 4702 end_sector_offset = (blk_rq_pos(req) + blk_rq_sectors(req)) % 4703 DASD_RAW_SECTORS_PER_TRACK; 4704 end_padding_sectors = (DASD_RAW_SECTORS_PER_TRACK - end_sector_offset) % 4705 DASD_RAW_SECTORS_PER_TRACK; 4706 basedev = block->base; 4707 if ((start_padding_sectors || end_padding_sectors) && 4708 (rq_data_dir(req) == WRITE)) { 4709 DBF_DEV_EVENT(DBF_ERR, basedev, 4710 "raw write not track aligned (%llu,%llu) req %p", 4711 start_padding_sectors, end_padding_sectors, req); 4712 return ERR_PTR(-EINVAL); 4713 } 4714 4715 first_trk = blk_rq_pos(req) / DASD_RAW_SECTORS_PER_TRACK; 4716 last_trk = (blk_rq_pos(req) + blk_rq_sectors(req) - 1) / 4717 DASD_RAW_SECTORS_PER_TRACK; 4718 trkcount = last_trk - first_trk + 1; 4719 first_offs = 0; 4720 4721 if (rq_data_dir(req) == READ) 4722 cmd = DASD_ECKD_CCW_READ_TRACK; 4723 else if (rq_data_dir(req) == WRITE) 4724 cmd = DASD_ECKD_CCW_WRITE_FULL_TRACK; 4725 else 4726 return ERR_PTR(-EINVAL); 4727 4728 /* 4729 * Raw track based I/O needs IDAWs for each page, 4730 * and not just for 64 bit addresses. 4731 */ 4732 cidaw = trkcount * DASD_RAW_BLOCK_PER_TRACK; 4733 4734 /* 4735 * struct PFX_eckd_data and struct LRE_eckd_data can have up to 2 bytes 4736 * of extended parameter. This is needed for write full track. 4737 */ 4738 base_priv = basedev->private; 4739 use_prefix = base_priv->features.feature[8] & 0x01; 4740 if (use_prefix) { 4741 cplength = 1 + trkcount; 4742 size = sizeof(struct PFX_eckd_data) + 2; 4743 } else { 4744 cplength = 2 + trkcount; 4745 size = sizeof(struct DE_eckd_data) + 4746 sizeof(struct LRE_eckd_data) + 2; 4747 } 4748 size = ALIGN(size, 8); 4749 4750 datasize = size + cidaw * sizeof(unsigned long); 4751 4752 /* Allocate the ccw request. */ 4753 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, 4754 datasize, startdev, blk_mq_rq_to_pdu(req)); 4755 if (IS_ERR(cqr)) 4756 return cqr; 4757 4758 ccw = cqr->cpaddr; 4759 data = cqr->data; 4760 4761 if (use_prefix) { 4762 prefix_LRE(ccw++, data, first_trk, last_trk, cmd, basedev, 4763 startdev, 1, first_offs + 1, trkcount, 0, 0); 4764 } else { 4765 define_extent(ccw++, data, first_trk, last_trk, cmd, basedev, 0); 4766 ccw[-1].flags |= CCW_FLAG_CC; 4767 4768 data += sizeof(struct DE_eckd_data); 4769 locate_record_ext(ccw++, data, first_trk, first_offs + 1, 4770 trkcount, cmd, basedev, 0, 0); 4771 } 4772 4773 idaws = (unsigned long *)(cqr->data + size); 4774 len_to_track_end = 0; 4775 if (start_padding_sectors) { 4776 ccw[-1].flags |= CCW_FLAG_CC; 4777 ccw->cmd_code = cmd; 4778 /* maximum 3390 track size */ 4779 ccw->count = 57326; 4780 /* 64k map to one track */ 4781 len_to_track_end = 65536 - start_padding_sectors * 512; 4782 ccw->cda = (__u32)(addr_t)idaws; 4783 ccw->flags |= CCW_FLAG_IDA; 4784 ccw->flags |= CCW_FLAG_SLI; 4785 ccw++; 4786 for (sectors = 0; sectors < start_padding_sectors; sectors += 8) 4787 idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE); 4788 } 4789 rq_for_each_segment(bv, req, iter) { 4790 dst = bvec_virt(&bv); 4791 seg_len = bv.bv_len; 4792 if (cmd == DASD_ECKD_CCW_READ_TRACK) 4793 memset(dst, 0, seg_len); 4794 if (!len_to_track_end) { 4795 ccw[-1].flags |= CCW_FLAG_CC; 4796 ccw->cmd_code = cmd; 4797 /* maximum 3390 track size */ 4798 ccw->count = 57326; 4799 /* 64k map to one track */ 4800 len_to_track_end = 65536; 4801 ccw->cda = (__u32)(addr_t)idaws; 4802 ccw->flags |= CCW_FLAG_IDA; 4803 ccw->flags |= CCW_FLAG_SLI; 4804 ccw++; 4805 } 4806 len_to_track_end -= seg_len; 4807 idaws = idal_create_words(idaws, dst, seg_len); 4808 } 4809 for (sectors = 0; sectors < end_padding_sectors; sectors += 8) 4810 idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE); 4811 if (blk_noretry_request(req) || 4812 block->base->features & DASD_FEATURE_FAILFAST) 4813 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 4814 cqr->startdev = startdev; 4815 cqr->memdev = startdev; 4816 cqr->block = block; 4817 cqr->expires = startdev->default_expires * HZ; 4818 cqr->lpm = dasd_path_get_ppm(startdev); 4819 cqr->retries = startdev->default_retries; 4820 cqr->buildclk = get_tod_clock(); 4821 cqr->status = DASD_CQR_FILLED; 4822 4823 return cqr; 4824 } 4825 4826 4827 static int 4828 dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req) 4829 { 4830 struct dasd_eckd_private *private; 4831 struct ccw1 *ccw; 4832 struct req_iterator iter; 4833 struct bio_vec bv; 4834 char *dst, *cda; 4835 unsigned int blksize, blk_per_trk, off; 4836 sector_t recid; 4837 int status; 4838 4839 if (!dasd_page_cache) 4840 goto out; 4841 private = cqr->block->base->private; 4842 blksize = cqr->block->bp_block; 4843 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize); 4844 recid = blk_rq_pos(req) >> cqr->block->s2b_shift; 4845 ccw = cqr->cpaddr; 4846 /* Skip over define extent & locate record. */ 4847 ccw++; 4848 if (private->uses_cdl == 0 || recid > 2*blk_per_trk) 4849 ccw++; 4850 rq_for_each_segment(bv, req, iter) { 4851 dst = bvec_virt(&bv); 4852 for (off = 0; off < bv.bv_len; off += blksize) { 4853 /* Skip locate record. */ 4854 if (private->uses_cdl && recid <= 2*blk_per_trk) 4855 ccw++; 4856 if (dst) { 4857 if (ccw->flags & CCW_FLAG_IDA) 4858 cda = *((char **)((addr_t) ccw->cda)); 4859 else 4860 cda = (char *)((addr_t) ccw->cda); 4861 if (dst != cda) { 4862 if (rq_data_dir(req) == READ) 4863 memcpy(dst, cda, bv.bv_len); 4864 kmem_cache_free(dasd_page_cache, 4865 (void *)((addr_t)cda & PAGE_MASK)); 4866 } 4867 dst = NULL; 4868 } 4869 ccw++; 4870 recid++; 4871 } 4872 } 4873 out: 4874 status = cqr->status == DASD_CQR_DONE; 4875 dasd_sfree_request(cqr, cqr->memdev); 4876 return status; 4877 } 4878 4879 /* 4880 * Modify ccw/tcw in cqr so it can be started on a base device. 4881 * 4882 * Note that this is not enough to restart the cqr! 4883 * Either reset cqr->startdev as well (summary unit check handling) 4884 * or restart via separate cqr (as in ERP handling). 4885 */ 4886 void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *cqr) 4887 { 4888 struct ccw1 *ccw; 4889 struct PFX_eckd_data *pfxdata; 4890 struct tcw *tcw; 4891 struct tccb *tccb; 4892 struct dcw *dcw; 4893 4894 if (cqr->cpmode == 1) { 4895 tcw = cqr->cpaddr; 4896 tccb = tcw_get_tccb(tcw); 4897 dcw = (struct dcw *)&tccb->tca[0]; 4898 pfxdata = (struct PFX_eckd_data *)&dcw->cd[0]; 4899 pfxdata->validity.verify_base = 0; 4900 pfxdata->validity.hyper_pav = 0; 4901 } else { 4902 ccw = cqr->cpaddr; 4903 pfxdata = cqr->data; 4904 if (ccw->cmd_code == DASD_ECKD_CCW_PFX) { 4905 pfxdata->validity.verify_base = 0; 4906 pfxdata->validity.hyper_pav = 0; 4907 } 4908 } 4909 } 4910 4911 #define DASD_ECKD_CHANQ_MAX_SIZE 4 4912 4913 static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base, 4914 struct dasd_block *block, 4915 struct request *req) 4916 { 4917 struct dasd_eckd_private *private; 4918 struct dasd_device *startdev; 4919 unsigned long flags; 4920 struct dasd_ccw_req *cqr; 4921 4922 startdev = dasd_alias_get_start_dev(base); 4923 if (!startdev) 4924 startdev = base; 4925 private = startdev->private; 4926 if (private->count >= DASD_ECKD_CHANQ_MAX_SIZE) 4927 return ERR_PTR(-EBUSY); 4928 4929 spin_lock_irqsave(get_ccwdev_lock(startdev->cdev), flags); 4930 private->count++; 4931 if ((base->features & DASD_FEATURE_USERAW)) 4932 cqr = dasd_eckd_build_cp_raw(startdev, block, req); 4933 else 4934 cqr = dasd_eckd_build_cp(startdev, block, req); 4935 if (IS_ERR(cqr)) 4936 private->count--; 4937 spin_unlock_irqrestore(get_ccwdev_lock(startdev->cdev), flags); 4938 return cqr; 4939 } 4940 4941 static int dasd_eckd_free_alias_cp(struct dasd_ccw_req *cqr, 4942 struct request *req) 4943 { 4944 struct dasd_eckd_private *private; 4945 unsigned long flags; 4946 4947 spin_lock_irqsave(get_ccwdev_lock(cqr->memdev->cdev), flags); 4948 private = cqr->memdev->private; 4949 private->count--; 4950 spin_unlock_irqrestore(get_ccwdev_lock(cqr->memdev->cdev), flags); 4951 return dasd_eckd_free_cp(cqr, req); 4952 } 4953 4954 static int 4955 dasd_eckd_fill_info(struct dasd_device * device, 4956 struct dasd_information2_t * info) 4957 { 4958 struct dasd_eckd_private *private = device->private; 4959 4960 info->label_block = 2; 4961 info->FBA_layout = private->uses_cdl ? 0 : 1; 4962 info->format = private->uses_cdl ? DASD_FORMAT_CDL : DASD_FORMAT_LDL; 4963 info->characteristics_size = sizeof(private->rdc_data); 4964 memcpy(info->characteristics, &private->rdc_data, 4965 sizeof(private->rdc_data)); 4966 info->confdata_size = min((unsigned long)private->conf_len, 4967 sizeof(info->configuration_data)); 4968 memcpy(info->configuration_data, private->conf_data, 4969 info->confdata_size); 4970 return 0; 4971 } 4972 4973 /* 4974 * SECTION: ioctl functions for eckd devices. 4975 */ 4976 4977 /* 4978 * Release device ioctl. 4979 * Buils a channel programm to releases a prior reserved 4980 * (see dasd_eckd_reserve) device. 4981 */ 4982 static int 4983 dasd_eckd_release(struct dasd_device *device) 4984 { 4985 struct dasd_ccw_req *cqr; 4986 int rc; 4987 struct ccw1 *ccw; 4988 int useglobal; 4989 4990 if (!capable(CAP_SYS_ADMIN)) 4991 return -EACCES; 4992 4993 useglobal = 0; 4994 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL); 4995 if (IS_ERR(cqr)) { 4996 mutex_lock(&dasd_reserve_mutex); 4997 useglobal = 1; 4998 cqr = &dasd_reserve_req->cqr; 4999 memset(cqr, 0, sizeof(*cqr)); 5000 memset(&dasd_reserve_req->ccw, 0, 5001 sizeof(dasd_reserve_req->ccw)); 5002 cqr->cpaddr = &dasd_reserve_req->ccw; 5003 cqr->data = &dasd_reserve_req->data; 5004 cqr->magic = DASD_ECKD_MAGIC; 5005 } 5006 ccw = cqr->cpaddr; 5007 ccw->cmd_code = DASD_ECKD_CCW_RELEASE; 5008 ccw->flags |= CCW_FLAG_SLI; 5009 ccw->count = 32; 5010 ccw->cda = (__u32)(addr_t) cqr->data; 5011 cqr->startdev = device; 5012 cqr->memdev = device; 5013 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 5014 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 5015 cqr->retries = 2; /* set retry counter to enable basic ERP */ 5016 cqr->expires = 2 * HZ; 5017 cqr->buildclk = get_tod_clock(); 5018 cqr->status = DASD_CQR_FILLED; 5019 5020 rc = dasd_sleep_on_immediatly(cqr); 5021 if (!rc) 5022 clear_bit(DASD_FLAG_IS_RESERVED, &device->flags); 5023 5024 if (useglobal) 5025 mutex_unlock(&dasd_reserve_mutex); 5026 else 5027 dasd_sfree_request(cqr, cqr->memdev); 5028 return rc; 5029 } 5030 5031 /* 5032 * Reserve device ioctl. 5033 * Options are set to 'synchronous wait for interrupt' and 5034 * 'timeout the request'. This leads to a terminate IO if 5035 * the interrupt is outstanding for a certain time. 5036 */ 5037 static int 5038 dasd_eckd_reserve(struct dasd_device *device) 5039 { 5040 struct dasd_ccw_req *cqr; 5041 int rc; 5042 struct ccw1 *ccw; 5043 int useglobal; 5044 5045 if (!capable(CAP_SYS_ADMIN)) 5046 return -EACCES; 5047 5048 useglobal = 0; 5049 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL); 5050 if (IS_ERR(cqr)) { 5051 mutex_lock(&dasd_reserve_mutex); 5052 useglobal = 1; 5053 cqr = &dasd_reserve_req->cqr; 5054 memset(cqr, 0, sizeof(*cqr)); 5055 memset(&dasd_reserve_req->ccw, 0, 5056 sizeof(dasd_reserve_req->ccw)); 5057 cqr->cpaddr = &dasd_reserve_req->ccw; 5058 cqr->data = &dasd_reserve_req->data; 5059 cqr->magic = DASD_ECKD_MAGIC; 5060 } 5061 ccw = cqr->cpaddr; 5062 ccw->cmd_code = DASD_ECKD_CCW_RESERVE; 5063 ccw->flags |= CCW_FLAG_SLI; 5064 ccw->count = 32; 5065 ccw->cda = (__u32)(addr_t) cqr->data; 5066 cqr->startdev = device; 5067 cqr->memdev = device; 5068 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 5069 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 5070 cqr->retries = 2; /* set retry counter to enable basic ERP */ 5071 cqr->expires = 2 * HZ; 5072 cqr->buildclk = get_tod_clock(); 5073 cqr->status = DASD_CQR_FILLED; 5074 5075 rc = dasd_sleep_on_immediatly(cqr); 5076 if (!rc) 5077 set_bit(DASD_FLAG_IS_RESERVED, &device->flags); 5078 5079 if (useglobal) 5080 mutex_unlock(&dasd_reserve_mutex); 5081 else 5082 dasd_sfree_request(cqr, cqr->memdev); 5083 return rc; 5084 } 5085 5086 /* 5087 * Steal lock ioctl - unconditional reserve device. 5088 * Buils a channel programm to break a device's reservation. 5089 * (unconditional reserve) 5090 */ 5091 static int 5092 dasd_eckd_steal_lock(struct dasd_device *device) 5093 { 5094 struct dasd_ccw_req *cqr; 5095 int rc; 5096 struct ccw1 *ccw; 5097 int useglobal; 5098 5099 if (!capable(CAP_SYS_ADMIN)) 5100 return -EACCES; 5101 5102 useglobal = 0; 5103 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL); 5104 if (IS_ERR(cqr)) { 5105 mutex_lock(&dasd_reserve_mutex); 5106 useglobal = 1; 5107 cqr = &dasd_reserve_req->cqr; 5108 memset(cqr, 0, sizeof(*cqr)); 5109 memset(&dasd_reserve_req->ccw, 0, 5110 sizeof(dasd_reserve_req->ccw)); 5111 cqr->cpaddr = &dasd_reserve_req->ccw; 5112 cqr->data = &dasd_reserve_req->data; 5113 cqr->magic = DASD_ECKD_MAGIC; 5114 } 5115 ccw = cqr->cpaddr; 5116 ccw->cmd_code = DASD_ECKD_CCW_SLCK; 5117 ccw->flags |= CCW_FLAG_SLI; 5118 ccw->count = 32; 5119 ccw->cda = (__u32)(addr_t) cqr->data; 5120 cqr->startdev = device; 5121 cqr->memdev = device; 5122 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 5123 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 5124 cqr->retries = 2; /* set retry counter to enable basic ERP */ 5125 cqr->expires = 2 * HZ; 5126 cqr->buildclk = get_tod_clock(); 5127 cqr->status = DASD_CQR_FILLED; 5128 5129 rc = dasd_sleep_on_immediatly(cqr); 5130 if (!rc) 5131 set_bit(DASD_FLAG_IS_RESERVED, &device->flags); 5132 5133 if (useglobal) 5134 mutex_unlock(&dasd_reserve_mutex); 5135 else 5136 dasd_sfree_request(cqr, cqr->memdev); 5137 return rc; 5138 } 5139 5140 /* 5141 * SNID - Sense Path Group ID 5142 * This ioctl may be used in situations where I/O is stalled due to 5143 * a reserve, so if the normal dasd_smalloc_request fails, we use the 5144 * preallocated dasd_reserve_req. 5145 */ 5146 static int dasd_eckd_snid(struct dasd_device *device, 5147 void __user *argp) 5148 { 5149 struct dasd_ccw_req *cqr; 5150 int rc; 5151 struct ccw1 *ccw; 5152 int useglobal; 5153 struct dasd_snid_ioctl_data usrparm; 5154 5155 if (!capable(CAP_SYS_ADMIN)) 5156 return -EACCES; 5157 5158 if (copy_from_user(&usrparm, argp, sizeof(usrparm))) 5159 return -EFAULT; 5160 5161 useglobal = 0; 5162 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 5163 sizeof(struct dasd_snid_data), device, 5164 NULL); 5165 if (IS_ERR(cqr)) { 5166 mutex_lock(&dasd_reserve_mutex); 5167 useglobal = 1; 5168 cqr = &dasd_reserve_req->cqr; 5169 memset(cqr, 0, sizeof(*cqr)); 5170 memset(&dasd_reserve_req->ccw, 0, 5171 sizeof(dasd_reserve_req->ccw)); 5172 cqr->cpaddr = &dasd_reserve_req->ccw; 5173 cqr->data = &dasd_reserve_req->data; 5174 cqr->magic = DASD_ECKD_MAGIC; 5175 } 5176 ccw = cqr->cpaddr; 5177 ccw->cmd_code = DASD_ECKD_CCW_SNID; 5178 ccw->flags |= CCW_FLAG_SLI; 5179 ccw->count = 12; 5180 ccw->cda = (__u32)(addr_t) cqr->data; 5181 cqr->startdev = device; 5182 cqr->memdev = device; 5183 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 5184 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 5185 set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags); 5186 cqr->retries = 5; 5187 cqr->expires = 10 * HZ; 5188 cqr->buildclk = get_tod_clock(); 5189 cqr->status = DASD_CQR_FILLED; 5190 cqr->lpm = usrparm.path_mask; 5191 5192 rc = dasd_sleep_on_immediatly(cqr); 5193 /* verify that I/O processing didn't modify the path mask */ 5194 if (!rc && usrparm.path_mask && (cqr->lpm != usrparm.path_mask)) 5195 rc = -EIO; 5196 if (!rc) { 5197 usrparm.data = *((struct dasd_snid_data *)cqr->data); 5198 if (copy_to_user(argp, &usrparm, sizeof(usrparm))) 5199 rc = -EFAULT; 5200 } 5201 5202 if (useglobal) 5203 mutex_unlock(&dasd_reserve_mutex); 5204 else 5205 dasd_sfree_request(cqr, cqr->memdev); 5206 return rc; 5207 } 5208 5209 /* 5210 * Read performance statistics 5211 */ 5212 static int 5213 dasd_eckd_performance(struct dasd_device *device, void __user *argp) 5214 { 5215 struct dasd_psf_prssd_data *prssdp; 5216 struct dasd_rssd_perf_stats_t *stats; 5217 struct dasd_ccw_req *cqr; 5218 struct ccw1 *ccw; 5219 int rc; 5220 5221 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, 5222 (sizeof(struct dasd_psf_prssd_data) + 5223 sizeof(struct dasd_rssd_perf_stats_t)), 5224 device, NULL); 5225 if (IS_ERR(cqr)) { 5226 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 5227 "Could not allocate initialization request"); 5228 return PTR_ERR(cqr); 5229 } 5230 cqr->startdev = device; 5231 cqr->memdev = device; 5232 cqr->retries = 0; 5233 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 5234 cqr->expires = 10 * HZ; 5235 5236 /* Prepare for Read Subsystem Data */ 5237 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 5238 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data)); 5239 prssdp->order = PSF_ORDER_PRSSD; 5240 prssdp->suborder = 0x01; /* Performance Statistics */ 5241 prssdp->varies[1] = 0x01; /* Perf Statistics for the Subsystem */ 5242 5243 ccw = cqr->cpaddr; 5244 ccw->cmd_code = DASD_ECKD_CCW_PSF; 5245 ccw->count = sizeof(struct dasd_psf_prssd_data); 5246 ccw->flags |= CCW_FLAG_CC; 5247 ccw->cda = (__u32)(addr_t) prssdp; 5248 5249 /* Read Subsystem Data - Performance Statistics */ 5250 stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1); 5251 memset(stats, 0, sizeof(struct dasd_rssd_perf_stats_t)); 5252 5253 ccw++; 5254 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 5255 ccw->count = sizeof(struct dasd_rssd_perf_stats_t); 5256 ccw->cda = (__u32)(addr_t) stats; 5257 5258 cqr->buildclk = get_tod_clock(); 5259 cqr->status = DASD_CQR_FILLED; 5260 rc = dasd_sleep_on(cqr); 5261 if (rc == 0) { 5262 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 5263 stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1); 5264 if (copy_to_user(argp, stats, 5265 sizeof(struct dasd_rssd_perf_stats_t))) 5266 rc = -EFAULT; 5267 } 5268 dasd_sfree_request(cqr, cqr->memdev); 5269 return rc; 5270 } 5271 5272 /* 5273 * Get attributes (cache operations) 5274 * Returnes the cache attributes used in Define Extend (DE). 5275 */ 5276 static int 5277 dasd_eckd_get_attrib(struct dasd_device *device, void __user *argp) 5278 { 5279 struct dasd_eckd_private *private = device->private; 5280 struct attrib_data_t attrib = private->attrib; 5281 int rc; 5282 5283 if (!capable(CAP_SYS_ADMIN)) 5284 return -EACCES; 5285 if (!argp) 5286 return -EINVAL; 5287 5288 rc = 0; 5289 if (copy_to_user(argp, (long *) &attrib, 5290 sizeof(struct attrib_data_t))) 5291 rc = -EFAULT; 5292 5293 return rc; 5294 } 5295 5296 /* 5297 * Set attributes (cache operations) 5298 * Stores the attributes for cache operation to be used in Define Extend (DE). 5299 */ 5300 static int 5301 dasd_eckd_set_attrib(struct dasd_device *device, void __user *argp) 5302 { 5303 struct dasd_eckd_private *private = device->private; 5304 struct attrib_data_t attrib; 5305 5306 if (!capable(CAP_SYS_ADMIN)) 5307 return -EACCES; 5308 if (!argp) 5309 return -EINVAL; 5310 5311 if (copy_from_user(&attrib, argp, sizeof(struct attrib_data_t))) 5312 return -EFAULT; 5313 private->attrib = attrib; 5314 5315 dev_info(&device->cdev->dev, 5316 "The DASD cache mode was set to %x (%i cylinder prestage)\n", 5317 private->attrib.operation, private->attrib.nr_cyl); 5318 return 0; 5319 } 5320 5321 /* 5322 * Issue syscall I/O to EMC Symmetrix array. 5323 * CCWs are PSF and RSSD 5324 */ 5325 static int dasd_symm_io(struct dasd_device *device, void __user *argp) 5326 { 5327 struct dasd_symmio_parms usrparm; 5328 char *psf_data, *rssd_result; 5329 struct dasd_ccw_req *cqr; 5330 struct ccw1 *ccw; 5331 char psf0, psf1; 5332 int rc; 5333 5334 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO)) 5335 return -EACCES; 5336 psf0 = psf1 = 0; 5337 5338 /* Copy parms from caller */ 5339 rc = -EFAULT; 5340 if (copy_from_user(&usrparm, argp, sizeof(usrparm))) 5341 goto out; 5342 if (is_compat_task()) { 5343 /* Make sure pointers are sane even on 31 bit. */ 5344 rc = -EINVAL; 5345 if ((usrparm.psf_data >> 32) != 0) 5346 goto out; 5347 if ((usrparm.rssd_result >> 32) != 0) 5348 goto out; 5349 usrparm.psf_data &= 0x7fffffffULL; 5350 usrparm.rssd_result &= 0x7fffffffULL; 5351 } 5352 /* at least 2 bytes are accessed and should be allocated */ 5353 if (usrparm.psf_data_len < 2) { 5354 DBF_DEV_EVENT(DBF_WARNING, device, 5355 "Symmetrix ioctl invalid data length %d", 5356 usrparm.psf_data_len); 5357 rc = -EINVAL; 5358 goto out; 5359 } 5360 /* alloc I/O data area */ 5361 psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA); 5362 rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA); 5363 if (!psf_data || !rssd_result) { 5364 rc = -ENOMEM; 5365 goto out_free; 5366 } 5367 5368 /* get syscall header from user space */ 5369 rc = -EFAULT; 5370 if (copy_from_user(psf_data, 5371 (void __user *)(unsigned long) usrparm.psf_data, 5372 usrparm.psf_data_len)) 5373 goto out_free; 5374 psf0 = psf_data[0]; 5375 psf1 = psf_data[1]; 5376 5377 /* setup CCWs for PSF + RSSD */ 5378 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2, 0, device, NULL); 5379 if (IS_ERR(cqr)) { 5380 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 5381 "Could not allocate initialization request"); 5382 rc = PTR_ERR(cqr); 5383 goto out_free; 5384 } 5385 5386 cqr->startdev = device; 5387 cqr->memdev = device; 5388 cqr->retries = 3; 5389 cqr->expires = 10 * HZ; 5390 cqr->buildclk = get_tod_clock(); 5391 cqr->status = DASD_CQR_FILLED; 5392 5393 /* Build the ccws */ 5394 ccw = cqr->cpaddr; 5395 5396 /* PSF ccw */ 5397 ccw->cmd_code = DASD_ECKD_CCW_PSF; 5398 ccw->count = usrparm.psf_data_len; 5399 ccw->flags |= CCW_FLAG_CC; 5400 ccw->cda = (__u32)(addr_t) psf_data; 5401 5402 ccw++; 5403 5404 /* RSSD ccw */ 5405 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 5406 ccw->count = usrparm.rssd_result_len; 5407 ccw->flags = CCW_FLAG_SLI ; 5408 ccw->cda = (__u32)(addr_t) rssd_result; 5409 5410 rc = dasd_sleep_on(cqr); 5411 if (rc) 5412 goto out_sfree; 5413 5414 rc = -EFAULT; 5415 if (copy_to_user((void __user *)(unsigned long) usrparm.rssd_result, 5416 rssd_result, usrparm.rssd_result_len)) 5417 goto out_sfree; 5418 rc = 0; 5419 5420 out_sfree: 5421 dasd_sfree_request(cqr, cqr->memdev); 5422 out_free: 5423 kfree(rssd_result); 5424 kfree(psf_data); 5425 out: 5426 DBF_DEV_EVENT(DBF_WARNING, device, 5427 "Symmetrix ioctl (0x%02x 0x%02x): rc=%d", 5428 (int) psf0, (int) psf1, rc); 5429 return rc; 5430 } 5431 5432 static int 5433 dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp) 5434 { 5435 struct dasd_device *device = block->base; 5436 5437 switch (cmd) { 5438 case BIODASDGATTR: 5439 return dasd_eckd_get_attrib(device, argp); 5440 case BIODASDSATTR: 5441 return dasd_eckd_set_attrib(device, argp); 5442 case BIODASDPSRD: 5443 return dasd_eckd_performance(device, argp); 5444 case BIODASDRLSE: 5445 return dasd_eckd_release(device); 5446 case BIODASDRSRV: 5447 return dasd_eckd_reserve(device); 5448 case BIODASDSLCK: 5449 return dasd_eckd_steal_lock(device); 5450 case BIODASDSNID: 5451 return dasd_eckd_snid(device, argp); 5452 case BIODASDSYMMIO: 5453 return dasd_symm_io(device, argp); 5454 default: 5455 return -ENOTTY; 5456 } 5457 } 5458 5459 /* 5460 * Dump the range of CCWs into 'page' buffer 5461 * and return number of printed chars. 5462 */ 5463 static int 5464 dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page) 5465 { 5466 int len, count; 5467 char *datap; 5468 5469 len = 0; 5470 while (from <= to) { 5471 len += sprintf(page + len, PRINTK_HEADER 5472 " CCW %p: %08X %08X DAT:", 5473 from, ((int *) from)[0], ((int *) from)[1]); 5474 5475 /* get pointer to data (consider IDALs) */ 5476 if (from->flags & CCW_FLAG_IDA) 5477 datap = (char *) *((addr_t *) (addr_t) from->cda); 5478 else 5479 datap = (char *) ((addr_t) from->cda); 5480 5481 /* dump data (max 32 bytes) */ 5482 for (count = 0; count < from->count && count < 32; count++) { 5483 if (count % 8 == 0) len += sprintf(page + len, " "); 5484 if (count % 4 == 0) len += sprintf(page + len, " "); 5485 len += sprintf(page + len, "%02x", datap[count]); 5486 } 5487 len += sprintf(page + len, "\n"); 5488 from++; 5489 } 5490 return len; 5491 } 5492 5493 static void 5494 dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct irb *irb, 5495 char *reason) 5496 { 5497 u64 *sense; 5498 u64 *stat; 5499 5500 sense = (u64 *) dasd_get_sense(irb); 5501 stat = (u64 *) &irb->scsw; 5502 if (sense) { 5503 DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : " 5504 "%016llx %016llx %016llx %016llx", 5505 reason, *stat, *((u32 *) (stat + 1)), 5506 sense[0], sense[1], sense[2], sense[3]); 5507 } else { 5508 DBF_DEV_EVENT(DBF_EMERG, device, "%s: %016llx %08x : %s", 5509 reason, *stat, *((u32 *) (stat + 1)), 5510 "NO VALID SENSE"); 5511 } 5512 } 5513 5514 /* 5515 * Print sense data and related channel program. 5516 * Parts are printed because printk buffer is only 1024 bytes. 5517 */ 5518 static void dasd_eckd_dump_sense_ccw(struct dasd_device *device, 5519 struct dasd_ccw_req *req, struct irb *irb) 5520 { 5521 char *page; 5522 struct ccw1 *first, *last, *fail, *from, *to; 5523 int len, sl, sct; 5524 5525 page = (char *) get_zeroed_page(GFP_ATOMIC); 5526 if (page == NULL) { 5527 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 5528 "No memory to dump sense data\n"); 5529 return; 5530 } 5531 /* dump the sense data */ 5532 len = sprintf(page, PRINTK_HEADER 5533 " I/O status report for device %s:\n", 5534 dev_name(&device->cdev->dev)); 5535 len += sprintf(page + len, PRINTK_HEADER 5536 " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X " 5537 "CS:%02X RC:%d\n", 5538 req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw), 5539 scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw), 5540 scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw), 5541 req ? req->intrc : 0); 5542 len += sprintf(page + len, PRINTK_HEADER 5543 " device %s: Failing CCW: %p\n", 5544 dev_name(&device->cdev->dev), 5545 (void *) (addr_t) irb->scsw.cmd.cpa); 5546 if (irb->esw.esw0.erw.cons) { 5547 for (sl = 0; sl < 4; sl++) { 5548 len += sprintf(page + len, PRINTK_HEADER 5549 " Sense(hex) %2d-%2d:", 5550 (8 * sl), ((8 * sl) + 7)); 5551 5552 for (sct = 0; sct < 8; sct++) { 5553 len += sprintf(page + len, " %02x", 5554 irb->ecw[8 * sl + sct]); 5555 } 5556 len += sprintf(page + len, "\n"); 5557 } 5558 5559 if (irb->ecw[27] & DASD_SENSE_BIT_0) { 5560 /* 24 Byte Sense Data */ 5561 sprintf(page + len, PRINTK_HEADER 5562 " 24 Byte: %x MSG %x, " 5563 "%s MSGb to SYSOP\n", 5564 irb->ecw[7] >> 4, irb->ecw[7] & 0x0f, 5565 irb->ecw[1] & 0x10 ? "" : "no"); 5566 } else { 5567 /* 32 Byte Sense Data */ 5568 sprintf(page + len, PRINTK_HEADER 5569 " 32 Byte: Format: %x " 5570 "Exception class %x\n", 5571 irb->ecw[6] & 0x0f, irb->ecw[22] >> 4); 5572 } 5573 } else { 5574 sprintf(page + len, PRINTK_HEADER 5575 " SORRY - NO VALID SENSE AVAILABLE\n"); 5576 } 5577 printk(KERN_ERR "%s", page); 5578 5579 if (req) { 5580 /* req == NULL for unsolicited interrupts */ 5581 /* dump the Channel Program (max 140 Bytes per line) */ 5582 /* Count CCW and print first CCWs (maximum 1024 % 140 = 7) */ 5583 first = req->cpaddr; 5584 for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++); 5585 to = min(first + 6, last); 5586 len = sprintf(page, PRINTK_HEADER 5587 " Related CP in req: %p\n", req); 5588 dasd_eckd_dump_ccw_range(first, to, page + len); 5589 printk(KERN_ERR "%s", page); 5590 5591 /* print failing CCW area (maximum 4) */ 5592 /* scsw->cda is either valid or zero */ 5593 len = 0; 5594 from = ++to; 5595 fail = (struct ccw1 *)(addr_t) 5596 irb->scsw.cmd.cpa; /* failing CCW */ 5597 if (from < fail - 2) { 5598 from = fail - 2; /* there is a gap - print header */ 5599 len += sprintf(page, PRINTK_HEADER "......\n"); 5600 } 5601 to = min(fail + 1, last); 5602 len += dasd_eckd_dump_ccw_range(from, to, page + len); 5603 5604 /* print last CCWs (maximum 2) */ 5605 from = max(from, ++to); 5606 if (from < last - 1) { 5607 from = last - 1; /* there is a gap - print header */ 5608 len += sprintf(page + len, PRINTK_HEADER "......\n"); 5609 } 5610 len += dasd_eckd_dump_ccw_range(from, last, page + len); 5611 if (len > 0) 5612 printk(KERN_ERR "%s", page); 5613 } 5614 free_page((unsigned long) page); 5615 } 5616 5617 5618 /* 5619 * Print sense data from a tcw. 5620 */ 5621 static void dasd_eckd_dump_sense_tcw(struct dasd_device *device, 5622 struct dasd_ccw_req *req, struct irb *irb) 5623 { 5624 char *page; 5625 int len, sl, sct, residual; 5626 struct tsb *tsb; 5627 u8 *sense, *rcq; 5628 5629 page = (char *) get_zeroed_page(GFP_ATOMIC); 5630 if (page == NULL) { 5631 DBF_DEV_EVENT(DBF_WARNING, device, " %s", 5632 "No memory to dump sense data"); 5633 return; 5634 } 5635 /* dump the sense data */ 5636 len = sprintf(page, PRINTK_HEADER 5637 " I/O status report for device %s:\n", 5638 dev_name(&device->cdev->dev)); 5639 len += sprintf(page + len, PRINTK_HEADER 5640 " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X " 5641 "CS:%02X fcxs:%02X schxs:%02X RC:%d\n", 5642 req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw), 5643 scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw), 5644 scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw), 5645 irb->scsw.tm.fcxs, 5646 (irb->scsw.tm.ifob << 7) | irb->scsw.tm.sesq, 5647 req ? req->intrc : 0); 5648 len += sprintf(page + len, PRINTK_HEADER 5649 " device %s: Failing TCW: %p\n", 5650 dev_name(&device->cdev->dev), 5651 (void *) (addr_t) irb->scsw.tm.tcw); 5652 5653 tsb = NULL; 5654 sense = NULL; 5655 if (irb->scsw.tm.tcw && (irb->scsw.tm.fcxs & 0x01)) 5656 tsb = tcw_get_tsb( 5657 (struct tcw *)(unsigned long)irb->scsw.tm.tcw); 5658 5659 if (tsb) { 5660 len += sprintf(page + len, PRINTK_HEADER 5661 " tsb->length %d\n", tsb->length); 5662 len += sprintf(page + len, PRINTK_HEADER 5663 " tsb->flags %x\n", tsb->flags); 5664 len += sprintf(page + len, PRINTK_HEADER 5665 " tsb->dcw_offset %d\n", tsb->dcw_offset); 5666 len += sprintf(page + len, PRINTK_HEADER 5667 " tsb->count %d\n", tsb->count); 5668 residual = tsb->count - 28; 5669 len += sprintf(page + len, PRINTK_HEADER 5670 " residual %d\n", residual); 5671 5672 switch (tsb->flags & 0x07) { 5673 case 1: /* tsa_iostat */ 5674 len += sprintf(page + len, PRINTK_HEADER 5675 " tsb->tsa.iostat.dev_time %d\n", 5676 tsb->tsa.iostat.dev_time); 5677 len += sprintf(page + len, PRINTK_HEADER 5678 " tsb->tsa.iostat.def_time %d\n", 5679 tsb->tsa.iostat.def_time); 5680 len += sprintf(page + len, PRINTK_HEADER 5681 " tsb->tsa.iostat.queue_time %d\n", 5682 tsb->tsa.iostat.queue_time); 5683 len += sprintf(page + len, PRINTK_HEADER 5684 " tsb->tsa.iostat.dev_busy_time %d\n", 5685 tsb->tsa.iostat.dev_busy_time); 5686 len += sprintf(page + len, PRINTK_HEADER 5687 " tsb->tsa.iostat.dev_act_time %d\n", 5688 tsb->tsa.iostat.dev_act_time); 5689 sense = tsb->tsa.iostat.sense; 5690 break; 5691 case 2: /* ts_ddpc */ 5692 len += sprintf(page + len, PRINTK_HEADER 5693 " tsb->tsa.ddpc.rc %d\n", tsb->tsa.ddpc.rc); 5694 for (sl = 0; sl < 2; sl++) { 5695 len += sprintf(page + len, PRINTK_HEADER 5696 " tsb->tsa.ddpc.rcq %2d-%2d: ", 5697 (8 * sl), ((8 * sl) + 7)); 5698 rcq = tsb->tsa.ddpc.rcq; 5699 for (sct = 0; sct < 8; sct++) { 5700 len += sprintf(page + len, " %02x", 5701 rcq[8 * sl + sct]); 5702 } 5703 len += sprintf(page + len, "\n"); 5704 } 5705 sense = tsb->tsa.ddpc.sense; 5706 break; 5707 case 3: /* tsa_intrg */ 5708 len += sprintf(page + len, PRINTK_HEADER 5709 " tsb->tsa.intrg.: not supported yet\n"); 5710 break; 5711 } 5712 5713 if (sense) { 5714 for (sl = 0; sl < 4; sl++) { 5715 len += sprintf(page + len, PRINTK_HEADER 5716 " Sense(hex) %2d-%2d:", 5717 (8 * sl), ((8 * sl) + 7)); 5718 for (sct = 0; sct < 8; sct++) { 5719 len += sprintf(page + len, " %02x", 5720 sense[8 * sl + sct]); 5721 } 5722 len += sprintf(page + len, "\n"); 5723 } 5724 5725 if (sense[27] & DASD_SENSE_BIT_0) { 5726 /* 24 Byte Sense Data */ 5727 sprintf(page + len, PRINTK_HEADER 5728 " 24 Byte: %x MSG %x, " 5729 "%s MSGb to SYSOP\n", 5730 sense[7] >> 4, sense[7] & 0x0f, 5731 sense[1] & 0x10 ? "" : "no"); 5732 } else { 5733 /* 32 Byte Sense Data */ 5734 sprintf(page + len, PRINTK_HEADER 5735 " 32 Byte: Format: %x " 5736 "Exception class %x\n", 5737 sense[6] & 0x0f, sense[22] >> 4); 5738 } 5739 } else { 5740 sprintf(page + len, PRINTK_HEADER 5741 " SORRY - NO VALID SENSE AVAILABLE\n"); 5742 } 5743 } else { 5744 sprintf(page + len, PRINTK_HEADER 5745 " SORRY - NO TSB DATA AVAILABLE\n"); 5746 } 5747 printk(KERN_ERR "%s", page); 5748 free_page((unsigned long) page); 5749 } 5750 5751 static void dasd_eckd_dump_sense(struct dasd_device *device, 5752 struct dasd_ccw_req *req, struct irb *irb) 5753 { 5754 u8 *sense = dasd_get_sense(irb); 5755 5756 if (scsw_is_tm(&irb->scsw)) { 5757 /* 5758 * In some cases the 'File Protected' or 'Incorrect Length' 5759 * error might be expected and log messages shouldn't be written 5760 * then. Check if the according suppress bit is set. 5761 */ 5762 if (sense && (sense[1] & SNS1_FILE_PROTECTED) && 5763 test_bit(DASD_CQR_SUPPRESS_FP, &req->flags)) 5764 return; 5765 if (scsw_cstat(&irb->scsw) == 0x40 && 5766 test_bit(DASD_CQR_SUPPRESS_IL, &req->flags)) 5767 return; 5768 5769 dasd_eckd_dump_sense_tcw(device, req, irb); 5770 } else { 5771 /* 5772 * In some cases the 'Command Reject' or 'No Record Found' 5773 * error might be expected and log messages shouldn't be 5774 * written then. Check if the according suppress bit is set. 5775 */ 5776 if (sense && sense[0] & SNS0_CMD_REJECT && 5777 test_bit(DASD_CQR_SUPPRESS_CR, &req->flags)) 5778 return; 5779 5780 if (sense && sense[1] & SNS1_NO_REC_FOUND && 5781 test_bit(DASD_CQR_SUPPRESS_NRF, &req->flags)) 5782 return; 5783 5784 dasd_eckd_dump_sense_ccw(device, req, irb); 5785 } 5786 } 5787 5788 static int dasd_eckd_reload_device(struct dasd_device *device) 5789 { 5790 struct dasd_eckd_private *private = device->private; 5791 int rc, old_base; 5792 char print_uid[60]; 5793 struct dasd_uid uid; 5794 unsigned long flags; 5795 5796 /* 5797 * remove device from alias handling to prevent new requests 5798 * from being scheduled on the wrong alias device 5799 */ 5800 dasd_alias_remove_device(device); 5801 5802 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 5803 old_base = private->uid.base_unit_addr; 5804 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 5805 5806 /* Read Configuration Data */ 5807 rc = dasd_eckd_read_conf(device); 5808 if (rc) 5809 goto out_err; 5810 5811 rc = dasd_eckd_generate_uid(device); 5812 if (rc) 5813 goto out_err; 5814 /* 5815 * update unit address configuration and 5816 * add device to alias management 5817 */ 5818 dasd_alias_update_add_device(device); 5819 5820 dasd_eckd_get_uid(device, &uid); 5821 5822 if (old_base != uid.base_unit_addr) { 5823 if (strlen(uid.vduit) > 0) 5824 snprintf(print_uid, sizeof(print_uid), 5825 "%s.%s.%04x.%02x.%s", uid.vendor, uid.serial, 5826 uid.ssid, uid.base_unit_addr, uid.vduit); 5827 else 5828 snprintf(print_uid, sizeof(print_uid), 5829 "%s.%s.%04x.%02x", uid.vendor, uid.serial, 5830 uid.ssid, uid.base_unit_addr); 5831 5832 dev_info(&device->cdev->dev, 5833 "An Alias device was reassigned to a new base device " 5834 "with UID: %s\n", print_uid); 5835 } 5836 return 0; 5837 5838 out_err: 5839 return -1; 5840 } 5841 5842 static int dasd_eckd_read_message_buffer(struct dasd_device *device, 5843 struct dasd_rssd_messages *messages, 5844 __u8 lpum) 5845 { 5846 struct dasd_rssd_messages *message_buf; 5847 struct dasd_psf_prssd_data *prssdp; 5848 struct dasd_ccw_req *cqr; 5849 struct ccw1 *ccw; 5850 int rc; 5851 5852 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, 5853 (sizeof(struct dasd_psf_prssd_data) + 5854 sizeof(struct dasd_rssd_messages)), 5855 device, NULL); 5856 if (IS_ERR(cqr)) { 5857 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 5858 "Could not allocate read message buffer request"); 5859 return PTR_ERR(cqr); 5860 } 5861 5862 cqr->lpm = lpum; 5863 retry: 5864 cqr->startdev = device; 5865 cqr->memdev = device; 5866 cqr->block = NULL; 5867 cqr->expires = 10 * HZ; 5868 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags); 5869 /* dasd_sleep_on_immediatly does not do complex error 5870 * recovery so clear erp flag and set retry counter to 5871 * do basic erp */ 5872 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 5873 cqr->retries = 256; 5874 5875 /* Prepare for Read Subsystem Data */ 5876 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 5877 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data)); 5878 prssdp->order = PSF_ORDER_PRSSD; 5879 prssdp->suborder = 0x03; /* Message Buffer */ 5880 /* all other bytes of prssdp must be zero */ 5881 5882 ccw = cqr->cpaddr; 5883 ccw->cmd_code = DASD_ECKD_CCW_PSF; 5884 ccw->count = sizeof(struct dasd_psf_prssd_data); 5885 ccw->flags |= CCW_FLAG_CC; 5886 ccw->flags |= CCW_FLAG_SLI; 5887 ccw->cda = (__u32)(addr_t) prssdp; 5888 5889 /* Read Subsystem Data - message buffer */ 5890 message_buf = (struct dasd_rssd_messages *) (prssdp + 1); 5891 memset(message_buf, 0, sizeof(struct dasd_rssd_messages)); 5892 5893 ccw++; 5894 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 5895 ccw->count = sizeof(struct dasd_rssd_messages); 5896 ccw->flags |= CCW_FLAG_SLI; 5897 ccw->cda = (__u32)(addr_t) message_buf; 5898 5899 cqr->buildclk = get_tod_clock(); 5900 cqr->status = DASD_CQR_FILLED; 5901 rc = dasd_sleep_on_immediatly(cqr); 5902 if (rc == 0) { 5903 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 5904 message_buf = (struct dasd_rssd_messages *) 5905 (prssdp + 1); 5906 memcpy(messages, message_buf, 5907 sizeof(struct dasd_rssd_messages)); 5908 } else if (cqr->lpm) { 5909 /* 5910 * on z/VM we might not be able to do I/O on the requested path 5911 * but instead we get the required information on any path 5912 * so retry with open path mask 5913 */ 5914 cqr->lpm = 0; 5915 goto retry; 5916 } else 5917 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, 5918 "Reading messages failed with rc=%d\n" 5919 , rc); 5920 dasd_sfree_request(cqr, cqr->memdev); 5921 return rc; 5922 } 5923 5924 static int dasd_eckd_query_host_access(struct dasd_device *device, 5925 struct dasd_psf_query_host_access *data) 5926 { 5927 struct dasd_eckd_private *private = device->private; 5928 struct dasd_psf_query_host_access *host_access; 5929 struct dasd_psf_prssd_data *prssdp; 5930 struct dasd_ccw_req *cqr; 5931 struct ccw1 *ccw; 5932 int rc; 5933 5934 /* not available for HYPER PAV alias devices */ 5935 if (!device->block && private->lcu->pav == HYPER_PAV) 5936 return -EOPNOTSUPP; 5937 5938 /* may not be supported by the storage server */ 5939 if (!(private->features.feature[14] & 0x80)) 5940 return -EOPNOTSUPP; 5941 5942 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, 5943 sizeof(struct dasd_psf_prssd_data) + 1, 5944 device, NULL); 5945 if (IS_ERR(cqr)) { 5946 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 5947 "Could not allocate read message buffer request"); 5948 return PTR_ERR(cqr); 5949 } 5950 host_access = kzalloc(sizeof(*host_access), GFP_KERNEL | GFP_DMA); 5951 if (!host_access) { 5952 dasd_sfree_request(cqr, device); 5953 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 5954 "Could not allocate host_access buffer"); 5955 return -ENOMEM; 5956 } 5957 cqr->startdev = device; 5958 cqr->memdev = device; 5959 cqr->block = NULL; 5960 cqr->retries = 256; 5961 cqr->expires = 10 * HZ; 5962 5963 /* Prepare for Read Subsystem Data */ 5964 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 5965 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data)); 5966 prssdp->order = PSF_ORDER_PRSSD; 5967 prssdp->suborder = PSF_SUBORDER_QHA; /* query host access */ 5968 /* LSS and Volume that will be queried */ 5969 prssdp->lss = private->ned->ID; 5970 prssdp->volume = private->ned->unit_addr; 5971 /* all other bytes of prssdp must be zero */ 5972 5973 ccw = cqr->cpaddr; 5974 ccw->cmd_code = DASD_ECKD_CCW_PSF; 5975 ccw->count = sizeof(struct dasd_psf_prssd_data); 5976 ccw->flags |= CCW_FLAG_CC; 5977 ccw->flags |= CCW_FLAG_SLI; 5978 ccw->cda = (__u32)(addr_t) prssdp; 5979 5980 /* Read Subsystem Data - query host access */ 5981 ccw++; 5982 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 5983 ccw->count = sizeof(struct dasd_psf_query_host_access); 5984 ccw->flags |= CCW_FLAG_SLI; 5985 ccw->cda = (__u32)(addr_t) host_access; 5986 5987 cqr->buildclk = get_tod_clock(); 5988 cqr->status = DASD_CQR_FILLED; 5989 /* the command might not be supported, suppress error message */ 5990 __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags); 5991 rc = dasd_sleep_on_interruptible(cqr); 5992 if (rc == 0) { 5993 *data = *host_access; 5994 } else { 5995 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, 5996 "Reading host access data failed with rc=%d\n", 5997 rc); 5998 rc = -EOPNOTSUPP; 5999 } 6000 6001 dasd_sfree_request(cqr, cqr->memdev); 6002 kfree(host_access); 6003 return rc; 6004 } 6005 /* 6006 * return number of grouped devices 6007 */ 6008 static int dasd_eckd_host_access_count(struct dasd_device *device) 6009 { 6010 struct dasd_psf_query_host_access *access; 6011 struct dasd_ckd_path_group_entry *entry; 6012 struct dasd_ckd_host_information *info; 6013 int count = 0; 6014 int rc, i; 6015 6016 access = kzalloc(sizeof(*access), GFP_NOIO); 6017 if (!access) { 6018 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 6019 "Could not allocate access buffer"); 6020 return -ENOMEM; 6021 } 6022 rc = dasd_eckd_query_host_access(device, access); 6023 if (rc) { 6024 kfree(access); 6025 return rc; 6026 } 6027 6028 info = (struct dasd_ckd_host_information *) 6029 access->host_access_information; 6030 for (i = 0; i < info->entry_count; i++) { 6031 entry = (struct dasd_ckd_path_group_entry *) 6032 (info->entry + i * info->entry_size); 6033 if (entry->status_flags & DASD_ECKD_PG_GROUPED) 6034 count++; 6035 } 6036 6037 kfree(access); 6038 return count; 6039 } 6040 6041 /* 6042 * write host access information to a sequential file 6043 */ 6044 static int dasd_hosts_print(struct dasd_device *device, struct seq_file *m) 6045 { 6046 struct dasd_psf_query_host_access *access; 6047 struct dasd_ckd_path_group_entry *entry; 6048 struct dasd_ckd_host_information *info; 6049 char sysplex[9] = ""; 6050 int rc, i; 6051 6052 access = kzalloc(sizeof(*access), GFP_NOIO); 6053 if (!access) { 6054 DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", 6055 "Could not allocate access buffer"); 6056 return -ENOMEM; 6057 } 6058 rc = dasd_eckd_query_host_access(device, access); 6059 if (rc) { 6060 kfree(access); 6061 return rc; 6062 } 6063 6064 info = (struct dasd_ckd_host_information *) 6065 access->host_access_information; 6066 for (i = 0; i < info->entry_count; i++) { 6067 entry = (struct dasd_ckd_path_group_entry *) 6068 (info->entry + i * info->entry_size); 6069 /* PGID */ 6070 seq_printf(m, "pgid %*phN\n", 11, entry->pgid); 6071 /* FLAGS */ 6072 seq_printf(m, "status_flags %02x\n", entry->status_flags); 6073 /* SYSPLEX NAME */ 6074 memcpy(&sysplex, &entry->sysplex_name, sizeof(sysplex) - 1); 6075 EBCASC(sysplex, sizeof(sysplex)); 6076 seq_printf(m, "sysplex_name %8s\n", sysplex); 6077 /* SUPPORTED CYLINDER */ 6078 seq_printf(m, "supported_cylinder %d\n", entry->cylinder); 6079 /* TIMESTAMP */ 6080 seq_printf(m, "timestamp %lu\n", (unsigned long) 6081 entry->timestamp); 6082 } 6083 kfree(access); 6084 6085 return 0; 6086 } 6087 6088 /* 6089 * Perform Subsystem Function - CUIR response 6090 */ 6091 static int 6092 dasd_eckd_psf_cuir_response(struct dasd_device *device, int response, 6093 __u32 message_id, __u8 lpum) 6094 { 6095 struct dasd_psf_cuir_response *psf_cuir; 6096 int pos = pathmask_to_pos(lpum); 6097 struct dasd_ccw_req *cqr; 6098 struct ccw1 *ccw; 6099 int rc; 6100 6101 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ , 6102 sizeof(struct dasd_psf_cuir_response), 6103 device, NULL); 6104 6105 if (IS_ERR(cqr)) { 6106 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 6107 "Could not allocate PSF-CUIR request"); 6108 return PTR_ERR(cqr); 6109 } 6110 6111 psf_cuir = (struct dasd_psf_cuir_response *)cqr->data; 6112 psf_cuir->order = PSF_ORDER_CUIR_RESPONSE; 6113 psf_cuir->cc = response; 6114 psf_cuir->chpid = device->path[pos].chpid; 6115 psf_cuir->message_id = message_id; 6116 psf_cuir->cssid = device->path[pos].cssid; 6117 psf_cuir->ssid = device->path[pos].ssid; 6118 ccw = cqr->cpaddr; 6119 ccw->cmd_code = DASD_ECKD_CCW_PSF; 6120 ccw->cda = (__u32)(addr_t)psf_cuir; 6121 ccw->flags = CCW_FLAG_SLI; 6122 ccw->count = sizeof(struct dasd_psf_cuir_response); 6123 6124 cqr->startdev = device; 6125 cqr->memdev = device; 6126 cqr->block = NULL; 6127 cqr->retries = 256; 6128 cqr->expires = 10*HZ; 6129 cqr->buildclk = get_tod_clock(); 6130 cqr->status = DASD_CQR_FILLED; 6131 set_bit(DASD_CQR_VERIFY_PATH, &cqr->flags); 6132 6133 rc = dasd_sleep_on(cqr); 6134 6135 dasd_sfree_request(cqr, cqr->memdev); 6136 return rc; 6137 } 6138 6139 /* 6140 * return configuration data that is referenced by record selector 6141 * if a record selector is specified or per default return the 6142 * conf_data pointer for the path specified by lpum 6143 */ 6144 static struct dasd_conf_data *dasd_eckd_get_ref_conf(struct dasd_device *device, 6145 __u8 lpum, 6146 struct dasd_cuir_message *cuir) 6147 { 6148 struct dasd_conf_data *conf_data; 6149 int path, pos; 6150 6151 if (cuir->record_selector == 0) 6152 goto out; 6153 for (path = 0x80, pos = 0; path; path >>= 1, pos++) { 6154 conf_data = device->path[pos].conf_data; 6155 if (conf_data->gneq.record_selector == 6156 cuir->record_selector) 6157 return conf_data; 6158 } 6159 out: 6160 return device->path[pathmask_to_pos(lpum)].conf_data; 6161 } 6162 6163 /* 6164 * This function determines the scope of a reconfiguration request by 6165 * analysing the path and device selection data provided in the CUIR request. 6166 * Returns a path mask containing CUIR affected paths for the give device. 6167 * 6168 * If the CUIR request does not contain the required information return the 6169 * path mask of the path the attention message for the CUIR request was reveived 6170 * on. 6171 */ 6172 static int dasd_eckd_cuir_scope(struct dasd_device *device, __u8 lpum, 6173 struct dasd_cuir_message *cuir) 6174 { 6175 struct dasd_conf_data *ref_conf_data; 6176 unsigned long bitmask = 0, mask = 0; 6177 struct dasd_conf_data *conf_data; 6178 unsigned int pos, path; 6179 char *ref_gneq, *gneq; 6180 char *ref_ned, *ned; 6181 int tbcpm = 0; 6182 6183 /* if CUIR request does not specify the scope use the path 6184 the attention message was presented on */ 6185 if (!cuir->ned_map || 6186 !(cuir->neq_map[0] | cuir->neq_map[1] | cuir->neq_map[2])) 6187 return lpum; 6188 6189 /* get reference conf data */ 6190 ref_conf_data = dasd_eckd_get_ref_conf(device, lpum, cuir); 6191 /* reference ned is determined by ned_map field */ 6192 pos = 8 - ffs(cuir->ned_map); 6193 ref_ned = (char *)&ref_conf_data->neds[pos]; 6194 ref_gneq = (char *)&ref_conf_data->gneq; 6195 /* transfer 24 bit neq_map to mask */ 6196 mask = cuir->neq_map[2]; 6197 mask |= cuir->neq_map[1] << 8; 6198 mask |= cuir->neq_map[0] << 16; 6199 6200 for (path = 0; path < 8; path++) { 6201 /* initialise data per path */ 6202 bitmask = mask; 6203 conf_data = device->path[path].conf_data; 6204 pos = 8 - ffs(cuir->ned_map); 6205 ned = (char *) &conf_data->neds[pos]; 6206 /* compare reference ned and per path ned */ 6207 if (memcmp(ref_ned, ned, sizeof(*ned)) != 0) 6208 continue; 6209 gneq = (char *)&conf_data->gneq; 6210 /* compare reference gneq and per_path gneq under 6211 24 bit mask where mask bit 0 equals byte 7 of 6212 the gneq and mask bit 24 equals byte 31 */ 6213 while (bitmask) { 6214 pos = ffs(bitmask) - 1; 6215 if (memcmp(&ref_gneq[31 - pos], &gneq[31 - pos], 1) 6216 != 0) 6217 break; 6218 clear_bit(pos, &bitmask); 6219 } 6220 if (bitmask) 6221 continue; 6222 /* device and path match the reference values 6223 add path to CUIR scope */ 6224 tbcpm |= 0x80 >> path; 6225 } 6226 return tbcpm; 6227 } 6228 6229 static void dasd_eckd_cuir_notify_user(struct dasd_device *device, 6230 unsigned long paths, int action) 6231 { 6232 int pos; 6233 6234 while (paths) { 6235 /* get position of bit in mask */ 6236 pos = 8 - ffs(paths); 6237 /* get channel path descriptor from this position */ 6238 if (action == CUIR_QUIESCE) 6239 pr_warn("Service on the storage server caused path %x.%02x to go offline", 6240 device->path[pos].cssid, 6241 device->path[pos].chpid); 6242 else if (action == CUIR_RESUME) 6243 pr_info("Path %x.%02x is back online after service on the storage server", 6244 device->path[pos].cssid, 6245 device->path[pos].chpid); 6246 clear_bit(7 - pos, &paths); 6247 } 6248 } 6249 6250 static int dasd_eckd_cuir_remove_path(struct dasd_device *device, __u8 lpum, 6251 struct dasd_cuir_message *cuir) 6252 { 6253 unsigned long tbcpm; 6254 6255 tbcpm = dasd_eckd_cuir_scope(device, lpum, cuir); 6256 /* nothing to do if path is not in use */ 6257 if (!(dasd_path_get_opm(device) & tbcpm)) 6258 return 0; 6259 if (!(dasd_path_get_opm(device) & ~tbcpm)) { 6260 /* no path would be left if the CUIR action is taken 6261 return error */ 6262 return -EINVAL; 6263 } 6264 /* remove device from operational path mask */ 6265 dasd_path_remove_opm(device, tbcpm); 6266 dasd_path_add_cuirpm(device, tbcpm); 6267 return tbcpm; 6268 } 6269 6270 /* 6271 * walk through all devices and build a path mask to quiesce them 6272 * return an error if the last path to a device would be removed 6273 * 6274 * if only part of the devices are quiesced and an error 6275 * occurs no onlining necessary, the storage server will 6276 * notify the already set offline devices again 6277 */ 6278 static int dasd_eckd_cuir_quiesce(struct dasd_device *device, __u8 lpum, 6279 struct dasd_cuir_message *cuir) 6280 { 6281 struct dasd_eckd_private *private = device->private; 6282 struct alias_pav_group *pavgroup, *tempgroup; 6283 struct dasd_device *dev, *n; 6284 unsigned long paths = 0; 6285 unsigned long flags; 6286 int tbcpm; 6287 6288 /* active devices */ 6289 list_for_each_entry_safe(dev, n, &private->lcu->active_devices, 6290 alias_list) { 6291 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags); 6292 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir); 6293 spin_unlock_irqrestore(get_ccwdev_lock(dev->cdev), flags); 6294 if (tbcpm < 0) 6295 goto out_err; 6296 paths |= tbcpm; 6297 } 6298 /* inactive devices */ 6299 list_for_each_entry_safe(dev, n, &private->lcu->inactive_devices, 6300 alias_list) { 6301 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags); 6302 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir); 6303 spin_unlock_irqrestore(get_ccwdev_lock(dev->cdev), flags); 6304 if (tbcpm < 0) 6305 goto out_err; 6306 paths |= tbcpm; 6307 } 6308 /* devices in PAV groups */ 6309 list_for_each_entry_safe(pavgroup, tempgroup, 6310 &private->lcu->grouplist, group) { 6311 list_for_each_entry_safe(dev, n, &pavgroup->baselist, 6312 alias_list) { 6313 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags); 6314 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir); 6315 spin_unlock_irqrestore( 6316 get_ccwdev_lock(dev->cdev), flags); 6317 if (tbcpm < 0) 6318 goto out_err; 6319 paths |= tbcpm; 6320 } 6321 list_for_each_entry_safe(dev, n, &pavgroup->aliaslist, 6322 alias_list) { 6323 spin_lock_irqsave(get_ccwdev_lock(dev->cdev), flags); 6324 tbcpm = dasd_eckd_cuir_remove_path(dev, lpum, cuir); 6325 spin_unlock_irqrestore( 6326 get_ccwdev_lock(dev->cdev), flags); 6327 if (tbcpm < 0) 6328 goto out_err; 6329 paths |= tbcpm; 6330 } 6331 } 6332 /* notify user about all paths affected by CUIR action */ 6333 dasd_eckd_cuir_notify_user(device, paths, CUIR_QUIESCE); 6334 return 0; 6335 out_err: 6336 return tbcpm; 6337 } 6338 6339 static int dasd_eckd_cuir_resume(struct dasd_device *device, __u8 lpum, 6340 struct dasd_cuir_message *cuir) 6341 { 6342 struct dasd_eckd_private *private = device->private; 6343 struct alias_pav_group *pavgroup, *tempgroup; 6344 struct dasd_device *dev, *n; 6345 unsigned long paths = 0; 6346 int tbcpm; 6347 6348 /* 6349 * the path may have been added through a generic path event before 6350 * only trigger path verification if the path is not already in use 6351 */ 6352 list_for_each_entry_safe(dev, n, 6353 &private->lcu->active_devices, 6354 alias_list) { 6355 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir); 6356 paths |= tbcpm; 6357 if (!(dasd_path_get_opm(dev) & tbcpm)) { 6358 dasd_path_add_tbvpm(dev, tbcpm); 6359 dasd_schedule_device_bh(dev); 6360 } 6361 } 6362 list_for_each_entry_safe(dev, n, 6363 &private->lcu->inactive_devices, 6364 alias_list) { 6365 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir); 6366 paths |= tbcpm; 6367 if (!(dasd_path_get_opm(dev) & tbcpm)) { 6368 dasd_path_add_tbvpm(dev, tbcpm); 6369 dasd_schedule_device_bh(dev); 6370 } 6371 } 6372 /* devices in PAV groups */ 6373 list_for_each_entry_safe(pavgroup, tempgroup, 6374 &private->lcu->grouplist, 6375 group) { 6376 list_for_each_entry_safe(dev, n, 6377 &pavgroup->baselist, 6378 alias_list) { 6379 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir); 6380 paths |= tbcpm; 6381 if (!(dasd_path_get_opm(dev) & tbcpm)) { 6382 dasd_path_add_tbvpm(dev, tbcpm); 6383 dasd_schedule_device_bh(dev); 6384 } 6385 } 6386 list_for_each_entry_safe(dev, n, 6387 &pavgroup->aliaslist, 6388 alias_list) { 6389 tbcpm = dasd_eckd_cuir_scope(dev, lpum, cuir); 6390 paths |= tbcpm; 6391 if (!(dasd_path_get_opm(dev) & tbcpm)) { 6392 dasd_path_add_tbvpm(dev, tbcpm); 6393 dasd_schedule_device_bh(dev); 6394 } 6395 } 6396 } 6397 /* notify user about all paths affected by CUIR action */ 6398 dasd_eckd_cuir_notify_user(device, paths, CUIR_RESUME); 6399 return 0; 6400 } 6401 6402 static void dasd_eckd_handle_cuir(struct dasd_device *device, void *messages, 6403 __u8 lpum) 6404 { 6405 struct dasd_cuir_message *cuir = messages; 6406 int response; 6407 6408 DBF_DEV_EVENT(DBF_WARNING, device, 6409 "CUIR request: %016llx %016llx %016llx %08x", 6410 ((u64 *)cuir)[0], ((u64 *)cuir)[1], ((u64 *)cuir)[2], 6411 ((u32 *)cuir)[3]); 6412 6413 if (cuir->code == CUIR_QUIESCE) { 6414 /* quiesce */ 6415 if (dasd_eckd_cuir_quiesce(device, lpum, cuir)) 6416 response = PSF_CUIR_LAST_PATH; 6417 else 6418 response = PSF_CUIR_COMPLETED; 6419 } else if (cuir->code == CUIR_RESUME) { 6420 /* resume */ 6421 dasd_eckd_cuir_resume(device, lpum, cuir); 6422 response = PSF_CUIR_COMPLETED; 6423 } else 6424 response = PSF_CUIR_NOT_SUPPORTED; 6425 6426 dasd_eckd_psf_cuir_response(device, response, 6427 cuir->message_id, lpum); 6428 DBF_DEV_EVENT(DBF_WARNING, device, 6429 "CUIR response: %d on message ID %08x", response, 6430 cuir->message_id); 6431 /* to make sure there is no attention left schedule work again */ 6432 device->discipline->check_attention(device, lpum); 6433 } 6434 6435 static void dasd_eckd_oos_resume(struct dasd_device *device) 6436 { 6437 struct dasd_eckd_private *private = device->private; 6438 struct alias_pav_group *pavgroup, *tempgroup; 6439 struct dasd_device *dev, *n; 6440 unsigned long flags; 6441 6442 spin_lock_irqsave(&private->lcu->lock, flags); 6443 list_for_each_entry_safe(dev, n, &private->lcu->active_devices, 6444 alias_list) { 6445 if (dev->stopped & DASD_STOPPED_NOSPC) 6446 dasd_generic_space_avail(dev); 6447 } 6448 list_for_each_entry_safe(dev, n, &private->lcu->inactive_devices, 6449 alias_list) { 6450 if (dev->stopped & DASD_STOPPED_NOSPC) 6451 dasd_generic_space_avail(dev); 6452 } 6453 /* devices in PAV groups */ 6454 list_for_each_entry_safe(pavgroup, tempgroup, 6455 &private->lcu->grouplist, 6456 group) { 6457 list_for_each_entry_safe(dev, n, &pavgroup->baselist, 6458 alias_list) { 6459 if (dev->stopped & DASD_STOPPED_NOSPC) 6460 dasd_generic_space_avail(dev); 6461 } 6462 list_for_each_entry_safe(dev, n, &pavgroup->aliaslist, 6463 alias_list) { 6464 if (dev->stopped & DASD_STOPPED_NOSPC) 6465 dasd_generic_space_avail(dev); 6466 } 6467 } 6468 spin_unlock_irqrestore(&private->lcu->lock, flags); 6469 } 6470 6471 static void dasd_eckd_handle_oos(struct dasd_device *device, void *messages, 6472 __u8 lpum) 6473 { 6474 struct dasd_oos_message *oos = messages; 6475 6476 switch (oos->code) { 6477 case REPO_WARN: 6478 case POOL_WARN: 6479 dev_warn(&device->cdev->dev, 6480 "Extent pool usage has reached a critical value\n"); 6481 dasd_eckd_oos_resume(device); 6482 break; 6483 case REPO_EXHAUST: 6484 case POOL_EXHAUST: 6485 dev_warn(&device->cdev->dev, 6486 "Extent pool is exhausted\n"); 6487 break; 6488 case REPO_RELIEVE: 6489 case POOL_RELIEVE: 6490 dev_info(&device->cdev->dev, 6491 "Extent pool physical space constraint has been relieved\n"); 6492 break; 6493 } 6494 6495 /* In any case, update related data */ 6496 dasd_eckd_read_ext_pool_info(device); 6497 6498 /* to make sure there is no attention left schedule work again */ 6499 device->discipline->check_attention(device, lpum); 6500 } 6501 6502 static void dasd_eckd_check_attention_work(struct work_struct *work) 6503 { 6504 struct check_attention_work_data *data; 6505 struct dasd_rssd_messages *messages; 6506 struct dasd_device *device; 6507 int rc; 6508 6509 data = container_of(work, struct check_attention_work_data, worker); 6510 device = data->device; 6511 messages = kzalloc(sizeof(*messages), GFP_KERNEL); 6512 if (!messages) { 6513 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 6514 "Could not allocate attention message buffer"); 6515 goto out; 6516 } 6517 rc = dasd_eckd_read_message_buffer(device, messages, data->lpum); 6518 if (rc) 6519 goto out; 6520 6521 if (messages->length == ATTENTION_LENGTH_CUIR && 6522 messages->format == ATTENTION_FORMAT_CUIR) 6523 dasd_eckd_handle_cuir(device, messages, data->lpum); 6524 if (messages->length == ATTENTION_LENGTH_OOS && 6525 messages->format == ATTENTION_FORMAT_OOS) 6526 dasd_eckd_handle_oos(device, messages, data->lpum); 6527 6528 out: 6529 dasd_put_device(device); 6530 kfree(messages); 6531 kfree(data); 6532 } 6533 6534 static int dasd_eckd_check_attention(struct dasd_device *device, __u8 lpum) 6535 { 6536 struct check_attention_work_data *data; 6537 6538 data = kzalloc(sizeof(*data), GFP_ATOMIC); 6539 if (!data) 6540 return -ENOMEM; 6541 INIT_WORK(&data->worker, dasd_eckd_check_attention_work); 6542 dasd_get_device(device); 6543 data->device = device; 6544 data->lpum = lpum; 6545 schedule_work(&data->worker); 6546 return 0; 6547 } 6548 6549 static int dasd_eckd_disable_hpf_path(struct dasd_device *device, __u8 lpum) 6550 { 6551 if (~lpum & dasd_path_get_opm(device)) { 6552 dasd_path_add_nohpfpm(device, lpum); 6553 dasd_path_remove_opm(device, lpum); 6554 dev_err(&device->cdev->dev, 6555 "Channel path %02X lost HPF functionality and is disabled\n", 6556 lpum); 6557 return 1; 6558 } 6559 return 0; 6560 } 6561 6562 static void dasd_eckd_disable_hpf_device(struct dasd_device *device) 6563 { 6564 struct dasd_eckd_private *private = device->private; 6565 6566 dev_err(&device->cdev->dev, 6567 "High Performance FICON disabled\n"); 6568 private->fcx_max_data = 0; 6569 } 6570 6571 static int dasd_eckd_hpf_enabled(struct dasd_device *device) 6572 { 6573 struct dasd_eckd_private *private = device->private; 6574 6575 return private->fcx_max_data ? 1 : 0; 6576 } 6577 6578 static void dasd_eckd_handle_hpf_error(struct dasd_device *device, 6579 struct irb *irb) 6580 { 6581 struct dasd_eckd_private *private = device->private; 6582 6583 if (!private->fcx_max_data) { 6584 /* sanity check for no HPF, the error makes no sense */ 6585 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 6586 "Trying to disable HPF for a non HPF device"); 6587 return; 6588 } 6589 if (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX) { 6590 dasd_eckd_disable_hpf_device(device); 6591 } else if (irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX) { 6592 if (dasd_eckd_disable_hpf_path(device, irb->esw.esw1.lpum)) 6593 return; 6594 dasd_eckd_disable_hpf_device(device); 6595 dasd_path_set_tbvpm(device, 6596 dasd_path_get_hpfpm(device)); 6597 } 6598 /* 6599 * prevent that any new I/O ist started on the device and schedule a 6600 * requeue of existing requests 6601 */ 6602 dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC); 6603 dasd_schedule_requeue(device); 6604 } 6605 6606 /* 6607 * Initialize block layer request queue. 6608 */ 6609 static void dasd_eckd_setup_blk_queue(struct dasd_block *block) 6610 { 6611 unsigned int logical_block_size = block->bp_block; 6612 struct request_queue *q = block->request_queue; 6613 struct dasd_device *device = block->base; 6614 int max; 6615 6616 if (device->features & DASD_FEATURE_USERAW) { 6617 /* 6618 * the max_blocks value for raw_track access is 256 6619 * it is higher than the native ECKD value because we 6620 * only need one ccw per track 6621 * so the max_hw_sectors are 6622 * 2048 x 512B = 1024kB = 16 tracks 6623 */ 6624 max = DASD_ECKD_MAX_BLOCKS_RAW << block->s2b_shift; 6625 } else { 6626 max = DASD_ECKD_MAX_BLOCKS << block->s2b_shift; 6627 } 6628 blk_queue_flag_set(QUEUE_FLAG_NONROT, q); 6629 q->limits.max_dev_sectors = max; 6630 blk_queue_logical_block_size(q, logical_block_size); 6631 blk_queue_max_hw_sectors(q, max); 6632 blk_queue_max_segments(q, USHRT_MAX); 6633 /* With page sized segments each segment can be translated into one idaw/tidaw */ 6634 blk_queue_max_segment_size(q, PAGE_SIZE); 6635 blk_queue_segment_boundary(q, PAGE_SIZE - 1); 6636 } 6637 6638 static struct ccw_driver dasd_eckd_driver = { 6639 .driver = { 6640 .name = "dasd-eckd", 6641 .owner = THIS_MODULE, 6642 .dev_groups = dasd_dev_groups, 6643 }, 6644 .ids = dasd_eckd_ids, 6645 .probe = dasd_eckd_probe, 6646 .remove = dasd_generic_remove, 6647 .set_offline = dasd_generic_set_offline, 6648 .set_online = dasd_eckd_set_online, 6649 .notify = dasd_generic_notify, 6650 .path_event = dasd_generic_path_event, 6651 .shutdown = dasd_generic_shutdown, 6652 .uc_handler = dasd_generic_uc_handler, 6653 .int_class = IRQIO_DAS, 6654 }; 6655 6656 static struct dasd_discipline dasd_eckd_discipline = { 6657 .owner = THIS_MODULE, 6658 .name = "ECKD", 6659 .ebcname = "ECKD", 6660 .check_device = dasd_eckd_check_characteristics, 6661 .uncheck_device = dasd_eckd_uncheck_device, 6662 .do_analysis = dasd_eckd_do_analysis, 6663 .pe_handler = dasd_eckd_pe_handler, 6664 .basic_to_ready = dasd_eckd_basic_to_ready, 6665 .online_to_ready = dasd_eckd_online_to_ready, 6666 .basic_to_known = dasd_eckd_basic_to_known, 6667 .setup_blk_queue = dasd_eckd_setup_blk_queue, 6668 .fill_geometry = dasd_eckd_fill_geometry, 6669 .start_IO = dasd_start_IO, 6670 .term_IO = dasd_term_IO, 6671 .handle_terminated_request = dasd_eckd_handle_terminated_request, 6672 .format_device = dasd_eckd_format_device, 6673 .check_device_format = dasd_eckd_check_device_format, 6674 .erp_action = dasd_eckd_erp_action, 6675 .erp_postaction = dasd_eckd_erp_postaction, 6676 .check_for_device_change = dasd_eckd_check_for_device_change, 6677 .build_cp = dasd_eckd_build_alias_cp, 6678 .free_cp = dasd_eckd_free_alias_cp, 6679 .dump_sense = dasd_eckd_dump_sense, 6680 .dump_sense_dbf = dasd_eckd_dump_sense_dbf, 6681 .fill_info = dasd_eckd_fill_info, 6682 .ioctl = dasd_eckd_ioctl, 6683 .reload = dasd_eckd_reload_device, 6684 .get_uid = dasd_eckd_get_uid, 6685 .kick_validate = dasd_eckd_kick_validate_server, 6686 .check_attention = dasd_eckd_check_attention, 6687 .host_access_count = dasd_eckd_host_access_count, 6688 .hosts_print = dasd_hosts_print, 6689 .handle_hpf_error = dasd_eckd_handle_hpf_error, 6690 .disable_hpf = dasd_eckd_disable_hpf_device, 6691 .hpf_enabled = dasd_eckd_hpf_enabled, 6692 .reset_path = dasd_eckd_reset_path, 6693 .is_ese = dasd_eckd_is_ese, 6694 .space_allocated = dasd_eckd_space_allocated, 6695 .space_configured = dasd_eckd_space_configured, 6696 .logical_capacity = dasd_eckd_logical_capacity, 6697 .release_space = dasd_eckd_release_space, 6698 .ext_pool_id = dasd_eckd_ext_pool_id, 6699 .ext_size = dasd_eckd_ext_size, 6700 .ext_pool_cap_at_warnlevel = dasd_eckd_ext_pool_cap_at_warnlevel, 6701 .ext_pool_warn_thrshld = dasd_eckd_ext_pool_warn_thrshld, 6702 .ext_pool_oos = dasd_eckd_ext_pool_oos, 6703 .ext_pool_exhaust = dasd_eckd_ext_pool_exhaust, 6704 .ese_format = dasd_eckd_ese_format, 6705 .ese_read = dasd_eckd_ese_read, 6706 }; 6707 6708 static int __init 6709 dasd_eckd_init(void) 6710 { 6711 int ret; 6712 6713 ASCEBC(dasd_eckd_discipline.ebcname, 4); 6714 dasd_reserve_req = kmalloc(sizeof(*dasd_reserve_req), 6715 GFP_KERNEL | GFP_DMA); 6716 if (!dasd_reserve_req) 6717 return -ENOMEM; 6718 dasd_vol_info_req = kmalloc(sizeof(*dasd_vol_info_req), 6719 GFP_KERNEL | GFP_DMA); 6720 if (!dasd_vol_info_req) 6721 return -ENOMEM; 6722 pe_handler_worker = kmalloc(sizeof(*pe_handler_worker), 6723 GFP_KERNEL | GFP_DMA); 6724 if (!pe_handler_worker) { 6725 kfree(dasd_reserve_req); 6726 kfree(dasd_vol_info_req); 6727 return -ENOMEM; 6728 } 6729 rawpadpage = (void *)__get_free_page(GFP_KERNEL); 6730 if (!rawpadpage) { 6731 kfree(pe_handler_worker); 6732 kfree(dasd_reserve_req); 6733 kfree(dasd_vol_info_req); 6734 return -ENOMEM; 6735 } 6736 ret = ccw_driver_register(&dasd_eckd_driver); 6737 if (!ret) 6738 wait_for_device_probe(); 6739 else { 6740 kfree(pe_handler_worker); 6741 kfree(dasd_reserve_req); 6742 kfree(dasd_vol_info_req); 6743 free_page((unsigned long)rawpadpage); 6744 } 6745 return ret; 6746 } 6747 6748 static void __exit 6749 dasd_eckd_cleanup(void) 6750 { 6751 ccw_driver_unregister(&dasd_eckd_driver); 6752 kfree(pe_handler_worker); 6753 kfree(dasd_reserve_req); 6754 free_page((unsigned long)rawpadpage); 6755 } 6756 6757 module_init(dasd_eckd_init); 6758 module_exit(dasd_eckd_cleanup); 6759