1 /* 2 * File...........: linux/drivers/s390/block/dasd_eckd.c 3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 4 * Horst Hummel <Horst.Hummel@de.ibm.com> 5 * Carsten Otte <Cotte@de.ibm.com> 6 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * Bugreports.to..: <Linux390@de.ibm.com> 8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 9 * 10 */ 11 12 #include <linux/stddef.h> 13 #include <linux/kernel.h> 14 #include <linux/slab.h> 15 #include <linux/hdreg.h> /* HDIO_GETGEO */ 16 #include <linux/bio.h> 17 #include <linux/module.h> 18 #include <linux/init.h> 19 20 #include <asm/debug.h> 21 #include <asm/idals.h> 22 #include <asm/ebcdic.h> 23 #include <asm/io.h> 24 #include <asm/todclk.h> 25 #include <asm/uaccess.h> 26 #include <asm/cio.h> 27 #include <asm/ccwdev.h> 28 29 #include "dasd_int.h" 30 #include "dasd_eckd.h" 31 32 #ifdef PRINTK_HEADER 33 #undef PRINTK_HEADER 34 #endif /* PRINTK_HEADER */ 35 #define PRINTK_HEADER "dasd(eckd):" 36 37 #define ECKD_C0(i) (i->home_bytes) 38 #define ECKD_F(i) (i->formula) 39 #define ECKD_F1(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f1):\ 40 (i->factors.f_0x02.f1)) 41 #define ECKD_F2(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f2):\ 42 (i->factors.f_0x02.f2)) 43 #define ECKD_F3(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f3):\ 44 (i->factors.f_0x02.f3)) 45 #define ECKD_F4(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f4):0) 46 #define ECKD_F5(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f5):0) 47 #define ECKD_F6(i) (i->factor6) 48 #define ECKD_F7(i) (i->factor7) 49 #define ECKD_F8(i) (i->factor8) 50 51 MODULE_LICENSE("GPL"); 52 53 static struct dasd_discipline dasd_eckd_discipline; 54 55 /* The ccw bus type uses this table to find devices that it sends to 56 * dasd_eckd_probe */ 57 static struct ccw_device_id dasd_eckd_ids[] = { 58 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1}, 59 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2}, 60 { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3390, 0), .driver_info = 0x3}, 61 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4}, 62 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5}, 63 { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6}, 64 { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3390, 0), .driver_info = 0x7}, 65 { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3380, 0), .driver_info = 0x8}, 66 { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3390, 0), .driver_info = 0x9}, 67 { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3380, 0), .driver_info = 0xa}, 68 { /* end of list */ }, 69 }; 70 71 MODULE_DEVICE_TABLE(ccw, dasd_eckd_ids); 72 73 static struct ccw_driver dasd_eckd_driver; /* see below */ 74 75 /* initial attempt at a probe function. this can be simplified once 76 * the other detection code is gone */ 77 static int 78 dasd_eckd_probe (struct ccw_device *cdev) 79 { 80 int ret; 81 82 /* set ECKD specific ccw-device options */ 83 ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE); 84 if (ret) { 85 printk(KERN_WARNING 86 "dasd_eckd_probe: could not set ccw-device options " 87 "for %s\n", cdev->dev.bus_id); 88 return ret; 89 } 90 ret = dasd_generic_probe(cdev, &dasd_eckd_discipline); 91 return ret; 92 } 93 94 static int 95 dasd_eckd_set_online(struct ccw_device *cdev) 96 { 97 return dasd_generic_set_online(cdev, &dasd_eckd_discipline); 98 } 99 100 static struct ccw_driver dasd_eckd_driver = { 101 .name = "dasd-eckd", 102 .owner = THIS_MODULE, 103 .ids = dasd_eckd_ids, 104 .probe = dasd_eckd_probe, 105 .remove = dasd_generic_remove, 106 .set_offline = dasd_generic_set_offline, 107 .set_online = dasd_eckd_set_online, 108 .notify = dasd_generic_notify, 109 }; 110 111 static const int sizes_trk0[] = { 28, 148, 84 }; 112 #define LABEL_SIZE 140 113 114 static inline unsigned int 115 round_up_multiple(unsigned int no, unsigned int mult) 116 { 117 int rem = no % mult; 118 return (rem ? no - rem + mult : no); 119 } 120 121 static inline unsigned int 122 ceil_quot(unsigned int d1, unsigned int d2) 123 { 124 return (d1 + (d2 - 1)) / d2; 125 } 126 127 static unsigned int 128 recs_per_track(struct dasd_eckd_characteristics * rdc, 129 unsigned int kl, unsigned int dl) 130 { 131 int dn, kn; 132 133 switch (rdc->dev_type) { 134 case 0x3380: 135 if (kl) 136 return 1499 / (15 + 7 + ceil_quot(kl + 12, 32) + 137 ceil_quot(dl + 12, 32)); 138 else 139 return 1499 / (15 + ceil_quot(dl + 12, 32)); 140 case 0x3390: 141 dn = ceil_quot(dl + 6, 232) + 1; 142 if (kl) { 143 kn = ceil_quot(kl + 6, 232) + 1; 144 return 1729 / (10 + 9 + ceil_quot(kl + 6 * kn, 34) + 145 9 + ceil_quot(dl + 6 * dn, 34)); 146 } else 147 return 1729 / (10 + 9 + ceil_quot(dl + 6 * dn, 34)); 148 case 0x9345: 149 dn = ceil_quot(dl + 6, 232) + 1; 150 if (kl) { 151 kn = ceil_quot(kl + 6, 232) + 1; 152 return 1420 / (18 + 7 + ceil_quot(kl + 6 * kn, 34) + 153 ceil_quot(dl + 6 * dn, 34)); 154 } else 155 return 1420 / (18 + 7 + ceil_quot(dl + 6 * dn, 34)); 156 } 157 return 0; 158 } 159 160 static int 161 check_XRC (struct ccw1 *de_ccw, 162 struct DE_eckd_data *data, 163 struct dasd_device *device) 164 { 165 struct dasd_eckd_private *private; 166 int rc; 167 168 private = (struct dasd_eckd_private *) device->private; 169 if (!private->rdc_data.facilities.XRC_supported) 170 return 0; 171 172 /* switch on System Time Stamp - needed for XRC Support */ 173 data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */ 174 data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */ 175 176 rc = get_sync_clock(&data->ep_sys_time); 177 /* Ignore return code if sync clock is switched off. */ 178 if (rc == -ENOSYS || rc == -EACCES) 179 rc = 0; 180 181 de_ccw->count = sizeof(struct DE_eckd_data); 182 de_ccw->flags |= CCW_FLAG_SLI; 183 return rc; 184 } 185 186 static int 187 define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk, 188 int totrk, int cmd, struct dasd_device * device) 189 { 190 struct dasd_eckd_private *private; 191 struct ch_t geo, beg, end; 192 int rc = 0; 193 194 private = (struct dasd_eckd_private *) device->private; 195 196 ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT; 197 ccw->flags = 0; 198 ccw->count = 16; 199 ccw->cda = (__u32) __pa(data); 200 201 memset(data, 0, sizeof(struct DE_eckd_data)); 202 switch (cmd) { 203 case DASD_ECKD_CCW_READ_HOME_ADDRESS: 204 case DASD_ECKD_CCW_READ_RECORD_ZERO: 205 case DASD_ECKD_CCW_READ: 206 case DASD_ECKD_CCW_READ_MT: 207 case DASD_ECKD_CCW_READ_CKD: 208 case DASD_ECKD_CCW_READ_CKD_MT: 209 case DASD_ECKD_CCW_READ_KD: 210 case DASD_ECKD_CCW_READ_KD_MT: 211 case DASD_ECKD_CCW_READ_COUNT: 212 data->mask.perm = 0x1; 213 data->attributes.operation = private->attrib.operation; 214 break; 215 case DASD_ECKD_CCW_WRITE: 216 case DASD_ECKD_CCW_WRITE_MT: 217 case DASD_ECKD_CCW_WRITE_KD: 218 case DASD_ECKD_CCW_WRITE_KD_MT: 219 data->mask.perm = 0x02; 220 data->attributes.operation = private->attrib.operation; 221 rc = check_XRC (ccw, data, device); 222 break; 223 case DASD_ECKD_CCW_WRITE_CKD: 224 case DASD_ECKD_CCW_WRITE_CKD_MT: 225 data->attributes.operation = DASD_BYPASS_CACHE; 226 rc = check_XRC (ccw, data, device); 227 break; 228 case DASD_ECKD_CCW_ERASE: 229 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS: 230 case DASD_ECKD_CCW_WRITE_RECORD_ZERO: 231 data->mask.perm = 0x3; 232 data->mask.auth = 0x1; 233 data->attributes.operation = DASD_BYPASS_CACHE; 234 rc = check_XRC (ccw, data, device); 235 break; 236 default: 237 DEV_MESSAGE(KERN_ERR, device, "unknown opcode 0x%x", cmd); 238 break; 239 } 240 241 data->attributes.mode = 0x3; /* ECKD */ 242 243 if ((private->rdc_data.cu_type == 0x2105 || 244 private->rdc_data.cu_type == 0x2107 || 245 private->rdc_data.cu_type == 0x1750) 246 && !(private->uses_cdl && trk < 2)) 247 data->ga_extended |= 0x40; /* Regular Data Format Mode */ 248 249 geo.cyl = private->rdc_data.no_cyl; 250 geo.head = private->rdc_data.trk_per_cyl; 251 beg.cyl = trk / geo.head; 252 beg.head = trk % geo.head; 253 end.cyl = totrk / geo.head; 254 end.head = totrk % geo.head; 255 256 /* check for sequential prestage - enhance cylinder range */ 257 if (data->attributes.operation == DASD_SEQ_PRESTAGE || 258 data->attributes.operation == DASD_SEQ_ACCESS) { 259 260 if (end.cyl + private->attrib.nr_cyl < geo.cyl) 261 end.cyl += private->attrib.nr_cyl; 262 else 263 end.cyl = (geo.cyl - 1); 264 } 265 266 data->beg_ext.cyl = beg.cyl; 267 data->beg_ext.head = beg.head; 268 data->end_ext.cyl = end.cyl; 269 data->end_ext.head = end.head; 270 return rc; 271 } 272 273 static int check_XRC_on_prefix(struct PFX_eckd_data *pfxdata, 274 struct dasd_device *device) 275 { 276 struct dasd_eckd_private *private; 277 int rc; 278 279 private = (struct dasd_eckd_private *) device->private; 280 if (!private->rdc_data.facilities.XRC_supported) 281 return 0; 282 283 /* switch on System Time Stamp - needed for XRC Support */ 284 pfxdata->define_extend.ga_extended |= 0x08; /* 'Time Stamp Valid' */ 285 pfxdata->define_extend.ga_extended |= 0x02; /* 'Extended Parameter' */ 286 pfxdata->validity.time_stamp = 1; /* 'Time Stamp Valid' */ 287 288 rc = get_sync_clock(&pfxdata->define_extend.ep_sys_time); 289 /* Ignore return code if sync clock is switched off. */ 290 if (rc == -ENOSYS || rc == -EACCES) 291 rc = 0; 292 return rc; 293 } 294 295 static int prefix(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata, int trk, 296 int totrk, int cmd, struct dasd_device *basedev, 297 struct dasd_device *startdev) 298 { 299 struct dasd_eckd_private *basepriv, *startpriv; 300 struct DE_eckd_data *data; 301 struct ch_t geo, beg, end; 302 int rc = 0; 303 304 basepriv = (struct dasd_eckd_private *) basedev->private; 305 startpriv = (struct dasd_eckd_private *) startdev->private; 306 data = &pfxdata->define_extend; 307 308 ccw->cmd_code = DASD_ECKD_CCW_PFX; 309 ccw->flags = 0; 310 ccw->count = sizeof(*pfxdata); 311 ccw->cda = (__u32) __pa(pfxdata); 312 313 memset(pfxdata, 0, sizeof(*pfxdata)); 314 /* prefix data */ 315 pfxdata->format = 0; 316 pfxdata->base_address = basepriv->ned->unit_addr; 317 pfxdata->base_lss = basepriv->ned->ID; 318 pfxdata->validity.define_extend = 1; 319 320 /* private uid is kept up to date, conf_data may be outdated */ 321 if (startpriv->uid.type != UA_BASE_DEVICE) { 322 pfxdata->validity.verify_base = 1; 323 if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) 324 pfxdata->validity.hyper_pav = 1; 325 } 326 327 /* define extend data (mostly)*/ 328 switch (cmd) { 329 case DASD_ECKD_CCW_READ_HOME_ADDRESS: 330 case DASD_ECKD_CCW_READ_RECORD_ZERO: 331 case DASD_ECKD_CCW_READ: 332 case DASD_ECKD_CCW_READ_MT: 333 case DASD_ECKD_CCW_READ_CKD: 334 case DASD_ECKD_CCW_READ_CKD_MT: 335 case DASD_ECKD_CCW_READ_KD: 336 case DASD_ECKD_CCW_READ_KD_MT: 337 case DASD_ECKD_CCW_READ_COUNT: 338 data->mask.perm = 0x1; 339 data->attributes.operation = basepriv->attrib.operation; 340 break; 341 case DASD_ECKD_CCW_WRITE: 342 case DASD_ECKD_CCW_WRITE_MT: 343 case DASD_ECKD_CCW_WRITE_KD: 344 case DASD_ECKD_CCW_WRITE_KD_MT: 345 data->mask.perm = 0x02; 346 data->attributes.operation = basepriv->attrib.operation; 347 rc = check_XRC_on_prefix(pfxdata, basedev); 348 break; 349 case DASD_ECKD_CCW_WRITE_CKD: 350 case DASD_ECKD_CCW_WRITE_CKD_MT: 351 data->attributes.operation = DASD_BYPASS_CACHE; 352 rc = check_XRC_on_prefix(pfxdata, basedev); 353 break; 354 case DASD_ECKD_CCW_ERASE: 355 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS: 356 case DASD_ECKD_CCW_WRITE_RECORD_ZERO: 357 data->mask.perm = 0x3; 358 data->mask.auth = 0x1; 359 data->attributes.operation = DASD_BYPASS_CACHE; 360 rc = check_XRC_on_prefix(pfxdata, basedev); 361 break; 362 default: 363 DEV_MESSAGE(KERN_ERR, basedev, "unknown opcode 0x%x", cmd); 364 break; 365 } 366 367 data->attributes.mode = 0x3; /* ECKD */ 368 369 if ((basepriv->rdc_data.cu_type == 0x2105 || 370 basepriv->rdc_data.cu_type == 0x2107 || 371 basepriv->rdc_data.cu_type == 0x1750) 372 && !(basepriv->uses_cdl && trk < 2)) 373 data->ga_extended |= 0x40; /* Regular Data Format Mode */ 374 375 geo.cyl = basepriv->rdc_data.no_cyl; 376 geo.head = basepriv->rdc_data.trk_per_cyl; 377 beg.cyl = trk / geo.head; 378 beg.head = trk % geo.head; 379 end.cyl = totrk / geo.head; 380 end.head = totrk % geo.head; 381 382 /* check for sequential prestage - enhance cylinder range */ 383 if (data->attributes.operation == DASD_SEQ_PRESTAGE || 384 data->attributes.operation == DASD_SEQ_ACCESS) { 385 386 if (end.cyl + basepriv->attrib.nr_cyl < geo.cyl) 387 end.cyl += basepriv->attrib.nr_cyl; 388 else 389 end.cyl = (geo.cyl - 1); 390 } 391 392 data->beg_ext.cyl = beg.cyl; 393 data->beg_ext.head = beg.head; 394 data->end_ext.cyl = end.cyl; 395 data->end_ext.head = end.head; 396 return rc; 397 } 398 399 static void 400 locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, int trk, 401 int rec_on_trk, int no_rec, int cmd, 402 struct dasd_device * device, int reclen) 403 { 404 struct dasd_eckd_private *private; 405 int sector; 406 int dn, d; 407 408 private = (struct dasd_eckd_private *) device->private; 409 410 DBF_DEV_EVENT(DBF_INFO, device, 411 "Locate: trk %d, rec %d, no_rec %d, cmd %d, reclen %d", 412 trk, rec_on_trk, no_rec, cmd, reclen); 413 414 ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD; 415 ccw->flags = 0; 416 ccw->count = 16; 417 ccw->cda = (__u32) __pa(data); 418 419 memset(data, 0, sizeof(struct LO_eckd_data)); 420 sector = 0; 421 if (rec_on_trk) { 422 switch (private->rdc_data.dev_type) { 423 case 0x3390: 424 dn = ceil_quot(reclen + 6, 232); 425 d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34); 426 sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8; 427 break; 428 case 0x3380: 429 d = 7 + ceil_quot(reclen + 12, 32); 430 sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7; 431 break; 432 } 433 } 434 data->sector = sector; 435 data->count = no_rec; 436 switch (cmd) { 437 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS: 438 data->operation.orientation = 0x3; 439 data->operation.operation = 0x03; 440 break; 441 case DASD_ECKD_CCW_READ_HOME_ADDRESS: 442 data->operation.orientation = 0x3; 443 data->operation.operation = 0x16; 444 break; 445 case DASD_ECKD_CCW_WRITE_RECORD_ZERO: 446 data->operation.orientation = 0x1; 447 data->operation.operation = 0x03; 448 data->count++; 449 break; 450 case DASD_ECKD_CCW_READ_RECORD_ZERO: 451 data->operation.orientation = 0x3; 452 data->operation.operation = 0x16; 453 data->count++; 454 break; 455 case DASD_ECKD_CCW_WRITE: 456 case DASD_ECKD_CCW_WRITE_MT: 457 case DASD_ECKD_CCW_WRITE_KD: 458 case DASD_ECKD_CCW_WRITE_KD_MT: 459 data->auxiliary.last_bytes_used = 0x1; 460 data->length = reclen; 461 data->operation.operation = 0x01; 462 break; 463 case DASD_ECKD_CCW_WRITE_CKD: 464 case DASD_ECKD_CCW_WRITE_CKD_MT: 465 data->auxiliary.last_bytes_used = 0x1; 466 data->length = reclen; 467 data->operation.operation = 0x03; 468 break; 469 case DASD_ECKD_CCW_READ: 470 case DASD_ECKD_CCW_READ_MT: 471 case DASD_ECKD_CCW_READ_KD: 472 case DASD_ECKD_CCW_READ_KD_MT: 473 data->auxiliary.last_bytes_used = 0x1; 474 data->length = reclen; 475 data->operation.operation = 0x06; 476 break; 477 case DASD_ECKD_CCW_READ_CKD: 478 case DASD_ECKD_CCW_READ_CKD_MT: 479 data->auxiliary.last_bytes_used = 0x1; 480 data->length = reclen; 481 data->operation.operation = 0x16; 482 break; 483 case DASD_ECKD_CCW_READ_COUNT: 484 data->operation.operation = 0x06; 485 break; 486 case DASD_ECKD_CCW_ERASE: 487 data->length = reclen; 488 data->auxiliary.last_bytes_used = 0x1; 489 data->operation.operation = 0x0b; 490 break; 491 default: 492 DEV_MESSAGE(KERN_ERR, device, "unknown opcode 0x%x", cmd); 493 } 494 data->seek_addr.cyl = data->search_arg.cyl = 495 trk / private->rdc_data.trk_per_cyl; 496 data->seek_addr.head = data->search_arg.head = 497 trk % private->rdc_data.trk_per_cyl; 498 data->search_arg.record = rec_on_trk; 499 } 500 501 /* 502 * Returns 1 if the block is one of the special blocks that needs 503 * to get read/written with the KD variant of the command. 504 * That is DASD_ECKD_READ_KD_MT instead of DASD_ECKD_READ_MT and 505 * DASD_ECKD_WRITE_KD_MT instead of DASD_ECKD_WRITE_MT. 506 * Luckily the KD variants differ only by one bit (0x08) from the 507 * normal variant. So don't wonder about code like: 508 * if (dasd_eckd_cdl_special(blk_per_trk, recid)) 509 * ccw->cmd_code |= 0x8; 510 */ 511 static inline int 512 dasd_eckd_cdl_special(int blk_per_trk, int recid) 513 { 514 if (recid < 3) 515 return 1; 516 if (recid < blk_per_trk) 517 return 0; 518 if (recid < 2 * blk_per_trk) 519 return 1; 520 return 0; 521 } 522 523 /* 524 * Returns the record size for the special blocks of the cdl format. 525 * Only returns something useful if dasd_eckd_cdl_special is true 526 * for the recid. 527 */ 528 static inline int 529 dasd_eckd_cdl_reclen(int recid) 530 { 531 if (recid < 3) 532 return sizes_trk0[recid]; 533 return LABEL_SIZE; 534 } 535 536 /* 537 * Generate device unique id that specifies the physical device. 538 */ 539 static int dasd_eckd_generate_uid(struct dasd_device *device, 540 struct dasd_uid *uid) 541 { 542 struct dasd_eckd_private *private; 543 int count; 544 545 private = (struct dasd_eckd_private *) device->private; 546 if (!private) 547 return -ENODEV; 548 if (!private->ned || !private->gneq) 549 return -ENODEV; 550 551 memset(uid, 0, sizeof(struct dasd_uid)); 552 memcpy(uid->vendor, private->ned->HDA_manufacturer, 553 sizeof(uid->vendor) - 1); 554 EBCASC(uid->vendor, sizeof(uid->vendor) - 1); 555 memcpy(uid->serial, private->ned->HDA_location, 556 sizeof(uid->serial) - 1); 557 EBCASC(uid->serial, sizeof(uid->serial) - 1); 558 uid->ssid = private->gneq->subsystemID; 559 uid->real_unit_addr = private->ned->unit_addr;; 560 if (private->sneq) { 561 uid->type = private->sneq->sua_flags; 562 if (uid->type == UA_BASE_PAV_ALIAS) 563 uid->base_unit_addr = private->sneq->base_unit_addr; 564 } else { 565 uid->type = UA_BASE_DEVICE; 566 } 567 if (private->vdsneq) { 568 for (count = 0; count < 16; count++) { 569 sprintf(uid->vduit+2*count, "%02x", 570 private->vdsneq->uit[count]); 571 } 572 } 573 return 0; 574 } 575 576 static struct dasd_ccw_req *dasd_eckd_build_rcd_lpm(struct dasd_device *device, 577 void *rcd_buffer, 578 struct ciw *ciw, __u8 lpm) 579 { 580 struct dasd_ccw_req *cqr; 581 struct ccw1 *ccw; 582 583 cqr = dasd_smalloc_request("ECKD", 1 /* RCD */, ciw->count, device); 584 585 if (IS_ERR(cqr)) { 586 DEV_MESSAGE(KERN_WARNING, device, "%s", 587 "Could not allocate RCD request"); 588 return cqr; 589 } 590 591 ccw = cqr->cpaddr; 592 ccw->cmd_code = ciw->cmd; 593 ccw->cda = (__u32)(addr_t)rcd_buffer; 594 ccw->count = ciw->count; 595 596 cqr->startdev = device; 597 cqr->memdev = device; 598 cqr->block = NULL; 599 cqr->expires = 10*HZ; 600 cqr->lpm = lpm; 601 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 602 cqr->retries = 2; 603 cqr->buildclk = get_clock(); 604 cqr->status = DASD_CQR_FILLED; 605 return cqr; 606 } 607 608 static int dasd_eckd_read_conf_lpm(struct dasd_device *device, 609 void **rcd_buffer, 610 int *rcd_buffer_size, __u8 lpm) 611 { 612 struct ciw *ciw; 613 char *rcd_buf = NULL; 614 int ret; 615 struct dasd_ccw_req *cqr; 616 617 /* 618 * scan for RCD command in extended SenseID data 619 */ 620 ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD); 621 if (!ciw || ciw->cmd == 0) { 622 ret = -EOPNOTSUPP; 623 goto out_error; 624 } 625 rcd_buf = kzalloc(ciw->count, GFP_KERNEL | GFP_DMA); 626 if (!rcd_buf) { 627 ret = -ENOMEM; 628 goto out_error; 629 } 630 631 /* 632 * buffer has to start with EBCDIC "V1.0" to show 633 * support for virtual device SNEQ 634 */ 635 rcd_buf[0] = 0xE5; 636 rcd_buf[1] = 0xF1; 637 rcd_buf[2] = 0x4B; 638 rcd_buf[3] = 0xF0; 639 cqr = dasd_eckd_build_rcd_lpm(device, rcd_buf, ciw, lpm); 640 if (IS_ERR(cqr)) { 641 ret = PTR_ERR(cqr); 642 goto out_error; 643 } 644 ret = dasd_sleep_on(cqr); 645 /* 646 * on success we update the user input parms 647 */ 648 dasd_sfree_request(cqr, cqr->memdev); 649 if (ret) 650 goto out_error; 651 652 *rcd_buffer_size = ciw->count; 653 *rcd_buffer = rcd_buf; 654 return 0; 655 out_error: 656 kfree(rcd_buf); 657 *rcd_buffer = NULL; 658 *rcd_buffer_size = 0; 659 return ret; 660 } 661 662 static int dasd_eckd_identify_conf_parts(struct dasd_eckd_private *private) 663 { 664 665 struct dasd_sneq *sneq; 666 int i, count; 667 668 private->ned = NULL; 669 private->sneq = NULL; 670 private->vdsneq = NULL; 671 private->gneq = NULL; 672 count = private->conf_len / sizeof(struct dasd_sneq); 673 sneq = (struct dasd_sneq *)private->conf_data; 674 for (i = 0; i < count; ++i) { 675 if (sneq->flags.identifier == 1 && sneq->format == 1) 676 private->sneq = sneq; 677 else if (sneq->flags.identifier == 1 && sneq->format == 4) 678 private->vdsneq = (struct vd_sneq *)sneq; 679 else if (sneq->flags.identifier == 2) 680 private->gneq = (struct dasd_gneq *)sneq; 681 else if (sneq->flags.identifier == 3 && sneq->res1 == 1) 682 private->ned = (struct dasd_ned *)sneq; 683 sneq++; 684 } 685 if (!private->ned || !private->gneq) { 686 private->ned = NULL; 687 private->sneq = NULL; 688 private->vdsneq = NULL; 689 private->gneq = NULL; 690 return -EINVAL; 691 } 692 return 0; 693 694 }; 695 696 static unsigned char dasd_eckd_path_access(void *conf_data, int conf_len) 697 { 698 struct dasd_gneq *gneq; 699 int i, count, found; 700 701 count = conf_len / sizeof(*gneq); 702 gneq = (struct dasd_gneq *)conf_data; 703 found = 0; 704 for (i = 0; i < count; ++i) { 705 if (gneq->flags.identifier == 2) { 706 found = 1; 707 break; 708 } 709 gneq++; 710 } 711 if (found) 712 return ((char *)gneq)[18] & 0x07; 713 else 714 return 0; 715 } 716 717 static int dasd_eckd_read_conf(struct dasd_device *device) 718 { 719 void *conf_data; 720 int conf_len, conf_data_saved; 721 int rc; 722 __u8 lpm; 723 struct dasd_eckd_private *private; 724 struct dasd_eckd_path *path_data; 725 726 private = (struct dasd_eckd_private *) device->private; 727 path_data = (struct dasd_eckd_path *) &private->path_data; 728 path_data->opm = ccw_device_get_path_mask(device->cdev); 729 lpm = 0x80; 730 conf_data_saved = 0; 731 /* get configuration data per operational path */ 732 for (lpm = 0x80; lpm; lpm>>= 1) { 733 if (lpm & path_data->opm){ 734 rc = dasd_eckd_read_conf_lpm(device, &conf_data, 735 &conf_len, lpm); 736 if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */ 737 MESSAGE(KERN_WARNING, 738 "Read configuration data returned " 739 "error %d", rc); 740 return rc; 741 } 742 if (conf_data == NULL) { 743 MESSAGE(KERN_WARNING, "%s", "No configuration " 744 "data retrieved"); 745 continue; /* no error */ 746 } 747 /* save first valid configuration data */ 748 if (!conf_data_saved) { 749 kfree(private->conf_data); 750 private->conf_data = conf_data; 751 private->conf_len = conf_len; 752 if (dasd_eckd_identify_conf_parts(private)) { 753 private->conf_data = NULL; 754 private->conf_len = 0; 755 kfree(conf_data); 756 continue; 757 } 758 conf_data_saved++; 759 } 760 switch (dasd_eckd_path_access(conf_data, conf_len)) { 761 case 0x02: 762 path_data->npm |= lpm; 763 break; 764 case 0x03: 765 path_data->ppm |= lpm; 766 break; 767 } 768 if (conf_data != private->conf_data) 769 kfree(conf_data); 770 } 771 } 772 return 0; 773 } 774 775 static int dasd_eckd_read_features(struct dasd_device *device) 776 { 777 struct dasd_psf_prssd_data *prssdp; 778 struct dasd_rssd_features *features; 779 struct dasd_ccw_req *cqr; 780 struct ccw1 *ccw; 781 int rc; 782 struct dasd_eckd_private *private; 783 784 private = (struct dasd_eckd_private *) device->private; 785 cqr = dasd_smalloc_request(dasd_eckd_discipline.name, 786 1 /* PSF */ + 1 /* RSSD */ , 787 (sizeof(struct dasd_psf_prssd_data) + 788 sizeof(struct dasd_rssd_features)), 789 device); 790 if (IS_ERR(cqr)) { 791 DEV_MESSAGE(KERN_WARNING, device, "%s", 792 "Could not allocate initialization request"); 793 return PTR_ERR(cqr); 794 } 795 cqr->startdev = device; 796 cqr->memdev = device; 797 cqr->block = NULL; 798 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 799 cqr->retries = 5; 800 cqr->expires = 10 * HZ; 801 802 /* Prepare for Read Subsystem Data */ 803 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 804 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data)); 805 prssdp->order = PSF_ORDER_PRSSD; 806 prssdp->suborder = 0x41; /* Read Feature Codes */ 807 /* all other bytes of prssdp must be zero */ 808 809 ccw = cqr->cpaddr; 810 ccw->cmd_code = DASD_ECKD_CCW_PSF; 811 ccw->count = sizeof(struct dasd_psf_prssd_data); 812 ccw->flags |= CCW_FLAG_CC; 813 ccw->cda = (__u32)(addr_t) prssdp; 814 815 /* Read Subsystem Data - feature codes */ 816 features = (struct dasd_rssd_features *) (prssdp + 1); 817 memset(features, 0, sizeof(struct dasd_rssd_features)); 818 819 ccw++; 820 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 821 ccw->count = sizeof(struct dasd_rssd_features); 822 ccw->cda = (__u32)(addr_t) features; 823 824 cqr->buildclk = get_clock(); 825 cqr->status = DASD_CQR_FILLED; 826 rc = dasd_sleep_on(cqr); 827 if (rc == 0) { 828 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 829 features = (struct dasd_rssd_features *) (prssdp + 1); 830 memcpy(&private->features, features, 831 sizeof(struct dasd_rssd_features)); 832 } 833 dasd_sfree_request(cqr, cqr->memdev); 834 return rc; 835 } 836 837 838 /* 839 * Build CP for Perform Subsystem Function - SSC. 840 */ 841 static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device) 842 { 843 struct dasd_ccw_req *cqr; 844 struct dasd_psf_ssc_data *psf_ssc_data; 845 struct ccw1 *ccw; 846 847 cqr = dasd_smalloc_request("ECKD", 1 /* PSF */ , 848 sizeof(struct dasd_psf_ssc_data), 849 device); 850 851 if (IS_ERR(cqr)) { 852 DEV_MESSAGE(KERN_WARNING, device, "%s", 853 "Could not allocate PSF-SSC request"); 854 return cqr; 855 } 856 psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data; 857 psf_ssc_data->order = PSF_ORDER_SSC; 858 psf_ssc_data->suborder = 0x88; 859 psf_ssc_data->reserved[0] = 0x88; 860 861 ccw = cqr->cpaddr; 862 ccw->cmd_code = DASD_ECKD_CCW_PSF; 863 ccw->cda = (__u32)(addr_t)psf_ssc_data; 864 ccw->count = 66; 865 866 cqr->startdev = device; 867 cqr->memdev = device; 868 cqr->block = NULL; 869 cqr->expires = 10*HZ; 870 cqr->buildclk = get_clock(); 871 cqr->status = DASD_CQR_FILLED; 872 return cqr; 873 } 874 875 /* 876 * Perform Subsystem Function. 877 * It is necessary to trigger CIO for channel revalidation since this 878 * call might change behaviour of DASD devices. 879 */ 880 static int 881 dasd_eckd_psf_ssc(struct dasd_device *device) 882 { 883 struct dasd_ccw_req *cqr; 884 int rc; 885 886 cqr = dasd_eckd_build_psf_ssc(device); 887 if (IS_ERR(cqr)) 888 return PTR_ERR(cqr); 889 890 rc = dasd_sleep_on(cqr); 891 if (!rc) 892 /* trigger CIO to reprobe devices */ 893 css_schedule_reprobe(); 894 dasd_sfree_request(cqr, cqr->memdev); 895 return rc; 896 } 897 898 /* 899 * Valide storage server of current device. 900 */ 901 static int dasd_eckd_validate_server(struct dasd_device *device) 902 { 903 int rc; 904 struct dasd_eckd_private *private; 905 906 /* Currently PAV is the only reason to 'validate' server on LPAR */ 907 if (dasd_nopav || MACHINE_IS_VM) 908 return 0; 909 910 rc = dasd_eckd_psf_ssc(device); 911 /* may be requested feature is not available on server, 912 * therefore just report error and go ahead */ 913 private = (struct dasd_eckd_private *) device->private; 914 DEV_MESSAGE(KERN_INFO, device, 915 "PSF-SSC on storage subsystem %s.%s.%04x returned rc=%d", 916 private->uid.vendor, private->uid.serial, 917 private->uid.ssid, rc); 918 /* RE-Read Configuration Data */ 919 return dasd_eckd_read_conf(device); 920 } 921 922 /* 923 * Check device characteristics. 924 * If the device is accessible using ECKD discipline, the device is enabled. 925 */ 926 static int 927 dasd_eckd_check_characteristics(struct dasd_device *device) 928 { 929 struct dasd_eckd_private *private; 930 struct dasd_block *block; 931 void *rdc_data; 932 int is_known, rc; 933 934 private = (struct dasd_eckd_private *) device->private; 935 if (private == NULL) { 936 private = kzalloc(sizeof(struct dasd_eckd_private), 937 GFP_KERNEL | GFP_DMA); 938 if (private == NULL) { 939 DEV_MESSAGE(KERN_WARNING, device, "%s", 940 "memory allocation failed for private " 941 "data"); 942 return -ENOMEM; 943 } 944 device->private = (void *) private; 945 } 946 /* Invalidate status of initial analysis. */ 947 private->init_cqr_status = -1; 948 /* Set default cache operations. */ 949 private->attrib.operation = DASD_NORMAL_CACHE; 950 private->attrib.nr_cyl = 0; 951 952 /* Read Configuration Data */ 953 rc = dasd_eckd_read_conf(device); 954 if (rc) 955 goto out_err1; 956 957 /* Generate device unique id and register in devmap */ 958 rc = dasd_eckd_generate_uid(device, &private->uid); 959 if (rc) 960 goto out_err1; 961 dasd_set_uid(device->cdev, &private->uid); 962 963 if (private->uid.type == UA_BASE_DEVICE) { 964 block = dasd_alloc_block(); 965 if (IS_ERR(block)) { 966 DEV_MESSAGE(KERN_WARNING, device, "%s", 967 "could not allocate dasd block structure"); 968 rc = PTR_ERR(block); 969 goto out_err1; 970 } 971 device->block = block; 972 block->base = device; 973 } 974 975 /* register lcu with alias handling, enable PAV if this is a new lcu */ 976 is_known = dasd_alias_make_device_known_to_lcu(device); 977 if (is_known < 0) { 978 rc = is_known; 979 goto out_err2; 980 } 981 if (!is_known) { 982 /* new lcu found */ 983 rc = dasd_eckd_validate_server(device); /* will switch pav on */ 984 if (rc) 985 goto out_err3; 986 } 987 988 /* Read Feature Codes */ 989 rc = dasd_eckd_read_features(device); 990 if (rc) 991 goto out_err3; 992 993 /* Read Device Characteristics */ 994 rdc_data = (void *) &(private->rdc_data); 995 memset(rdc_data, 0, sizeof(rdc_data)); 996 rc = dasd_generic_read_dev_chars(device, "ECKD", &rdc_data, 64); 997 if (rc) { 998 DEV_MESSAGE(KERN_WARNING, device, 999 "Read device characteristics returned " 1000 "rc=%d", rc); 1001 goto out_err3; 1002 } 1003 DEV_MESSAGE(KERN_INFO, device, 1004 "%04X/%02X(CU:%04X/%02X) Cyl:%d Head:%d Sec:%d", 1005 private->rdc_data.dev_type, 1006 private->rdc_data.dev_model, 1007 private->rdc_data.cu_type, 1008 private->rdc_data.cu_model.model, 1009 private->rdc_data.no_cyl, 1010 private->rdc_data.trk_per_cyl, 1011 private->rdc_data.sec_per_trk); 1012 return 0; 1013 1014 out_err3: 1015 dasd_alias_disconnect_device_from_lcu(device); 1016 out_err2: 1017 dasd_free_block(device->block); 1018 device->block = NULL; 1019 out_err1: 1020 kfree(private->conf_data); 1021 kfree(device->private); 1022 device->private = NULL; 1023 return rc; 1024 } 1025 1026 static void dasd_eckd_uncheck_device(struct dasd_device *device) 1027 { 1028 struct dasd_eckd_private *private; 1029 1030 private = (struct dasd_eckd_private *) device->private; 1031 dasd_alias_disconnect_device_from_lcu(device); 1032 private->ned = NULL; 1033 private->sneq = NULL; 1034 private->vdsneq = NULL; 1035 private->gneq = NULL; 1036 private->conf_len = 0; 1037 kfree(private->conf_data); 1038 private->conf_data = NULL; 1039 } 1040 1041 static struct dasd_ccw_req * 1042 dasd_eckd_analysis_ccw(struct dasd_device *device) 1043 { 1044 struct dasd_eckd_private *private; 1045 struct eckd_count *count_data; 1046 struct LO_eckd_data *LO_data; 1047 struct dasd_ccw_req *cqr; 1048 struct ccw1 *ccw; 1049 int cplength, datasize; 1050 int i; 1051 1052 private = (struct dasd_eckd_private *) device->private; 1053 1054 cplength = 8; 1055 datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data); 1056 cqr = dasd_smalloc_request(dasd_eckd_discipline.name, 1057 cplength, datasize, device); 1058 if (IS_ERR(cqr)) 1059 return cqr; 1060 ccw = cqr->cpaddr; 1061 /* Define extent for the first 3 tracks. */ 1062 define_extent(ccw++, cqr->data, 0, 2, 1063 DASD_ECKD_CCW_READ_COUNT, device); 1064 LO_data = cqr->data + sizeof(struct DE_eckd_data); 1065 /* Locate record for the first 4 records on track 0. */ 1066 ccw[-1].flags |= CCW_FLAG_CC; 1067 locate_record(ccw++, LO_data++, 0, 0, 4, 1068 DASD_ECKD_CCW_READ_COUNT, device, 0); 1069 1070 count_data = private->count_area; 1071 for (i = 0; i < 4; i++) { 1072 ccw[-1].flags |= CCW_FLAG_CC; 1073 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT; 1074 ccw->flags = 0; 1075 ccw->count = 8; 1076 ccw->cda = (__u32)(addr_t) count_data; 1077 ccw++; 1078 count_data++; 1079 } 1080 1081 /* Locate record for the first record on track 2. */ 1082 ccw[-1].flags |= CCW_FLAG_CC; 1083 locate_record(ccw++, LO_data++, 2, 0, 1, 1084 DASD_ECKD_CCW_READ_COUNT, device, 0); 1085 /* Read count ccw. */ 1086 ccw[-1].flags |= CCW_FLAG_CC; 1087 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT; 1088 ccw->flags = 0; 1089 ccw->count = 8; 1090 ccw->cda = (__u32)(addr_t) count_data; 1091 1092 cqr->block = NULL; 1093 cqr->startdev = device; 1094 cqr->memdev = device; 1095 cqr->retries = 0; 1096 cqr->buildclk = get_clock(); 1097 cqr->status = DASD_CQR_FILLED; 1098 return cqr; 1099 } 1100 1101 /* 1102 * This is the callback function for the init_analysis cqr. It saves 1103 * the status of the initial analysis ccw before it frees it and kicks 1104 * the device to continue the startup sequence. This will call 1105 * dasd_eckd_do_analysis again (if the devices has not been marked 1106 * for deletion in the meantime). 1107 */ 1108 static void 1109 dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr, void *data) 1110 { 1111 struct dasd_eckd_private *private; 1112 struct dasd_device *device; 1113 1114 device = init_cqr->startdev; 1115 private = (struct dasd_eckd_private *) device->private; 1116 private->init_cqr_status = init_cqr->status; 1117 dasd_sfree_request(init_cqr, device); 1118 dasd_kick_device(device); 1119 } 1120 1121 static int 1122 dasd_eckd_start_analysis(struct dasd_block *block) 1123 { 1124 struct dasd_eckd_private *private; 1125 struct dasd_ccw_req *init_cqr; 1126 1127 private = (struct dasd_eckd_private *) block->base->private; 1128 init_cqr = dasd_eckd_analysis_ccw(block->base); 1129 if (IS_ERR(init_cqr)) 1130 return PTR_ERR(init_cqr); 1131 init_cqr->callback = dasd_eckd_analysis_callback; 1132 init_cqr->callback_data = NULL; 1133 init_cqr->expires = 5*HZ; 1134 dasd_add_request_head(init_cqr); 1135 return -EAGAIN; 1136 } 1137 1138 static int 1139 dasd_eckd_end_analysis(struct dasd_block *block) 1140 { 1141 struct dasd_device *device; 1142 struct dasd_eckd_private *private; 1143 struct eckd_count *count_area; 1144 unsigned int sb, blk_per_trk; 1145 int status, i; 1146 1147 device = block->base; 1148 private = (struct dasd_eckd_private *) device->private; 1149 status = private->init_cqr_status; 1150 private->init_cqr_status = -1; 1151 if (status != DASD_CQR_DONE) { 1152 DEV_MESSAGE(KERN_WARNING, device, "%s", 1153 "volume analysis returned unformatted disk"); 1154 return -EMEDIUMTYPE; 1155 } 1156 1157 private->uses_cdl = 1; 1158 /* Calculate number of blocks/records per track. */ 1159 blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block); 1160 /* Check Track 0 for Compatible Disk Layout */ 1161 count_area = NULL; 1162 for (i = 0; i < 3; i++) { 1163 if (private->count_area[i].kl != 4 || 1164 private->count_area[i].dl != dasd_eckd_cdl_reclen(i) - 4) { 1165 private->uses_cdl = 0; 1166 break; 1167 } 1168 } 1169 if (i == 3) 1170 count_area = &private->count_area[4]; 1171 1172 if (private->uses_cdl == 0) { 1173 for (i = 0; i < 5; i++) { 1174 if ((private->count_area[i].kl != 0) || 1175 (private->count_area[i].dl != 1176 private->count_area[0].dl)) 1177 break; 1178 } 1179 if (i == 5) 1180 count_area = &private->count_area[0]; 1181 } else { 1182 if (private->count_area[3].record == 1) 1183 DEV_MESSAGE(KERN_WARNING, device, "%s", 1184 "Trk 0: no records after VTOC!"); 1185 } 1186 if (count_area != NULL && count_area->kl == 0) { 1187 /* we found notthing violating our disk layout */ 1188 if (dasd_check_blocksize(count_area->dl) == 0) 1189 block->bp_block = count_area->dl; 1190 } 1191 if (block->bp_block == 0) { 1192 DEV_MESSAGE(KERN_WARNING, device, "%s", 1193 "Volume has incompatible disk layout"); 1194 return -EMEDIUMTYPE; 1195 } 1196 block->s2b_shift = 0; /* bits to shift 512 to get a block */ 1197 for (sb = 512; sb < block->bp_block; sb = sb << 1) 1198 block->s2b_shift++; 1199 1200 blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block); 1201 block->blocks = (private->rdc_data.no_cyl * 1202 private->rdc_data.trk_per_cyl * 1203 blk_per_trk); 1204 1205 DEV_MESSAGE(KERN_INFO, device, 1206 "(%dkB blks): %dkB at %dkB/trk %s", 1207 (block->bp_block >> 10), 1208 ((private->rdc_data.no_cyl * 1209 private->rdc_data.trk_per_cyl * 1210 blk_per_trk * (block->bp_block >> 9)) >> 1), 1211 ((blk_per_trk * block->bp_block) >> 10), 1212 private->uses_cdl ? 1213 "compatible disk layout" : "linux disk layout"); 1214 1215 return 0; 1216 } 1217 1218 static int dasd_eckd_do_analysis(struct dasd_block *block) 1219 { 1220 struct dasd_eckd_private *private; 1221 1222 private = (struct dasd_eckd_private *) block->base->private; 1223 if (private->init_cqr_status < 0) 1224 return dasd_eckd_start_analysis(block); 1225 else 1226 return dasd_eckd_end_analysis(block); 1227 } 1228 1229 static int dasd_eckd_ready_to_online(struct dasd_device *device) 1230 { 1231 return dasd_alias_add_device(device); 1232 }; 1233 1234 static int dasd_eckd_online_to_ready(struct dasd_device *device) 1235 { 1236 return dasd_alias_remove_device(device); 1237 }; 1238 1239 static int 1240 dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo) 1241 { 1242 struct dasd_eckd_private *private; 1243 1244 private = (struct dasd_eckd_private *) block->base->private; 1245 if (dasd_check_blocksize(block->bp_block) == 0) { 1246 geo->sectors = recs_per_track(&private->rdc_data, 1247 0, block->bp_block); 1248 } 1249 geo->cylinders = private->rdc_data.no_cyl; 1250 geo->heads = private->rdc_data.trk_per_cyl; 1251 return 0; 1252 } 1253 1254 static struct dasd_ccw_req * 1255 dasd_eckd_format_device(struct dasd_device * device, 1256 struct format_data_t * fdata) 1257 { 1258 struct dasd_eckd_private *private; 1259 struct dasd_ccw_req *fcp; 1260 struct eckd_count *ect; 1261 struct ccw1 *ccw; 1262 void *data; 1263 int rpt, cyl, head; 1264 int cplength, datasize; 1265 int i; 1266 1267 private = (struct dasd_eckd_private *) device->private; 1268 rpt = recs_per_track(&private->rdc_data, 0, fdata->blksize); 1269 cyl = fdata->start_unit / private->rdc_data.trk_per_cyl; 1270 head = fdata->start_unit % private->rdc_data.trk_per_cyl; 1271 1272 /* Sanity checks. */ 1273 if (fdata->start_unit >= 1274 (private->rdc_data.no_cyl * private->rdc_data.trk_per_cyl)) { 1275 DEV_MESSAGE(KERN_INFO, device, "Track no %d too big!", 1276 fdata->start_unit); 1277 return ERR_PTR(-EINVAL); 1278 } 1279 if (fdata->start_unit > fdata->stop_unit) { 1280 DEV_MESSAGE(KERN_INFO, device, "Track %d reached! ending.", 1281 fdata->start_unit); 1282 return ERR_PTR(-EINVAL); 1283 } 1284 if (dasd_check_blocksize(fdata->blksize) != 0) { 1285 DEV_MESSAGE(KERN_WARNING, device, 1286 "Invalid blocksize %d...terminating!", 1287 fdata->blksize); 1288 return ERR_PTR(-EINVAL); 1289 } 1290 1291 /* 1292 * fdata->intensity is a bit string that tells us what to do: 1293 * Bit 0: write record zero 1294 * Bit 1: write home address, currently not supported 1295 * Bit 2: invalidate tracks 1296 * Bit 3: use OS/390 compatible disk layout (cdl) 1297 * Only some bit combinations do make sense. 1298 */ 1299 switch (fdata->intensity) { 1300 case 0x00: /* Normal format */ 1301 case 0x08: /* Normal format, use cdl. */ 1302 cplength = 2 + rpt; 1303 datasize = sizeof(struct DE_eckd_data) + 1304 sizeof(struct LO_eckd_data) + 1305 rpt * sizeof(struct eckd_count); 1306 break; 1307 case 0x01: /* Write record zero and format track. */ 1308 case 0x09: /* Write record zero and format track, use cdl. */ 1309 cplength = 3 + rpt; 1310 datasize = sizeof(struct DE_eckd_data) + 1311 sizeof(struct LO_eckd_data) + 1312 sizeof(struct eckd_count) + 1313 rpt * sizeof(struct eckd_count); 1314 break; 1315 case 0x04: /* Invalidate track. */ 1316 case 0x0c: /* Invalidate track, use cdl. */ 1317 cplength = 3; 1318 datasize = sizeof(struct DE_eckd_data) + 1319 sizeof(struct LO_eckd_data) + 1320 sizeof(struct eckd_count); 1321 break; 1322 default: 1323 DEV_MESSAGE(KERN_WARNING, device, "Invalid flags 0x%x.", 1324 fdata->intensity); 1325 return ERR_PTR(-EINVAL); 1326 } 1327 /* Allocate the format ccw request. */ 1328 fcp = dasd_smalloc_request(dasd_eckd_discipline.name, 1329 cplength, datasize, device); 1330 if (IS_ERR(fcp)) 1331 return fcp; 1332 1333 data = fcp->data; 1334 ccw = fcp->cpaddr; 1335 1336 switch (fdata->intensity & ~0x08) { 1337 case 0x00: /* Normal format. */ 1338 define_extent(ccw++, (struct DE_eckd_data *) data, 1339 fdata->start_unit, fdata->start_unit, 1340 DASD_ECKD_CCW_WRITE_CKD, device); 1341 data += sizeof(struct DE_eckd_data); 1342 ccw[-1].flags |= CCW_FLAG_CC; 1343 locate_record(ccw++, (struct LO_eckd_data *) data, 1344 fdata->start_unit, 0, rpt, 1345 DASD_ECKD_CCW_WRITE_CKD, device, 1346 fdata->blksize); 1347 data += sizeof(struct LO_eckd_data); 1348 break; 1349 case 0x01: /* Write record zero + format track. */ 1350 define_extent(ccw++, (struct DE_eckd_data *) data, 1351 fdata->start_unit, fdata->start_unit, 1352 DASD_ECKD_CCW_WRITE_RECORD_ZERO, 1353 device); 1354 data += sizeof(struct DE_eckd_data); 1355 ccw[-1].flags |= CCW_FLAG_CC; 1356 locate_record(ccw++, (struct LO_eckd_data *) data, 1357 fdata->start_unit, 0, rpt + 1, 1358 DASD_ECKD_CCW_WRITE_RECORD_ZERO, device, 1359 device->block->bp_block); 1360 data += sizeof(struct LO_eckd_data); 1361 break; 1362 case 0x04: /* Invalidate track. */ 1363 define_extent(ccw++, (struct DE_eckd_data *) data, 1364 fdata->start_unit, fdata->start_unit, 1365 DASD_ECKD_CCW_WRITE_CKD, device); 1366 data += sizeof(struct DE_eckd_data); 1367 ccw[-1].flags |= CCW_FLAG_CC; 1368 locate_record(ccw++, (struct LO_eckd_data *) data, 1369 fdata->start_unit, 0, 1, 1370 DASD_ECKD_CCW_WRITE_CKD, device, 8); 1371 data += sizeof(struct LO_eckd_data); 1372 break; 1373 } 1374 if (fdata->intensity & 0x01) { /* write record zero */ 1375 ect = (struct eckd_count *) data; 1376 data += sizeof(struct eckd_count); 1377 ect->cyl = cyl; 1378 ect->head = head; 1379 ect->record = 0; 1380 ect->kl = 0; 1381 ect->dl = 8; 1382 ccw[-1].flags |= CCW_FLAG_CC; 1383 ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO; 1384 ccw->flags = CCW_FLAG_SLI; 1385 ccw->count = 8; 1386 ccw->cda = (__u32)(addr_t) ect; 1387 ccw++; 1388 } 1389 if ((fdata->intensity & ~0x08) & 0x04) { /* erase track */ 1390 ect = (struct eckd_count *) data; 1391 data += sizeof(struct eckd_count); 1392 ect->cyl = cyl; 1393 ect->head = head; 1394 ect->record = 1; 1395 ect->kl = 0; 1396 ect->dl = 0; 1397 ccw[-1].flags |= CCW_FLAG_CC; 1398 ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD; 1399 ccw->flags = CCW_FLAG_SLI; 1400 ccw->count = 8; 1401 ccw->cda = (__u32)(addr_t) ect; 1402 } else { /* write remaining records */ 1403 for (i = 0; i < rpt; i++) { 1404 ect = (struct eckd_count *) data; 1405 data += sizeof(struct eckd_count); 1406 ect->cyl = cyl; 1407 ect->head = head; 1408 ect->record = i + 1; 1409 ect->kl = 0; 1410 ect->dl = fdata->blksize; 1411 /* Check for special tracks 0-1 when formatting CDL */ 1412 if ((fdata->intensity & 0x08) && 1413 fdata->start_unit == 0) { 1414 if (i < 3) { 1415 ect->kl = 4; 1416 ect->dl = sizes_trk0[i] - 4; 1417 } 1418 } 1419 if ((fdata->intensity & 0x08) && 1420 fdata->start_unit == 1) { 1421 ect->kl = 44; 1422 ect->dl = LABEL_SIZE - 44; 1423 } 1424 ccw[-1].flags |= CCW_FLAG_CC; 1425 ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD; 1426 ccw->flags = CCW_FLAG_SLI; 1427 ccw->count = 8; 1428 ccw->cda = (__u32)(addr_t) ect; 1429 ccw++; 1430 } 1431 } 1432 fcp->startdev = device; 1433 fcp->memdev = device; 1434 clear_bit(DASD_CQR_FLAGS_USE_ERP, &fcp->flags); 1435 fcp->retries = 5; /* set retry counter to enable default ERP */ 1436 fcp->buildclk = get_clock(); 1437 fcp->status = DASD_CQR_FILLED; 1438 return fcp; 1439 } 1440 1441 static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr) 1442 { 1443 cqr->status = DASD_CQR_FILLED; 1444 if (cqr->block && (cqr->startdev != cqr->block->base)) { 1445 dasd_eckd_reset_ccw_to_base_io(cqr); 1446 cqr->startdev = cqr->block->base; 1447 } 1448 }; 1449 1450 static dasd_erp_fn_t 1451 dasd_eckd_erp_action(struct dasd_ccw_req * cqr) 1452 { 1453 struct dasd_device *device = (struct dasd_device *) cqr->startdev; 1454 struct ccw_device *cdev = device->cdev; 1455 1456 switch (cdev->id.cu_type) { 1457 case 0x3990: 1458 case 0x2105: 1459 case 0x2107: 1460 case 0x1750: 1461 return dasd_3990_erp_action; 1462 case 0x9343: 1463 case 0x3880: 1464 default: 1465 return dasd_default_erp_action; 1466 } 1467 } 1468 1469 static dasd_erp_fn_t 1470 dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr) 1471 { 1472 return dasd_default_erp_postaction; 1473 } 1474 1475 1476 static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device, 1477 struct irb *irb) 1478 { 1479 char mask; 1480 1481 /* first of all check for state change pending interrupt */ 1482 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; 1483 if ((irb->scsw.cmd.dstat & mask) == mask) { 1484 dasd_generic_handle_state_change(device); 1485 return; 1486 } 1487 1488 /* summary unit check */ 1489 if ((irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) && 1490 (irb->ecw[7] == 0x0D)) { 1491 dasd_alias_handle_summary_unit_check(device, irb); 1492 return; 1493 } 1494 1495 1496 /* service information message SIM */ 1497 if (irb->esw.esw0.erw.cons && (irb->ecw[27] & DASD_SENSE_BIT_0) && 1498 ((irb->ecw[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) { 1499 dasd_3990_erp_handle_sim(device, irb->ecw); 1500 dasd_schedule_device_bh(device); 1501 return; 1502 } 1503 1504 /* just report other unsolicited interrupts */ 1505 DEV_MESSAGE(KERN_DEBUG, device, "%s", 1506 "unsolicited interrupt received"); 1507 device->discipline->dump_sense(device, NULL, irb); 1508 dasd_schedule_device_bh(device); 1509 1510 return; 1511 }; 1512 1513 static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev, 1514 struct dasd_block *block, 1515 struct request *req) 1516 { 1517 struct dasd_eckd_private *private; 1518 unsigned long *idaws; 1519 struct LO_eckd_data *LO_data; 1520 struct dasd_ccw_req *cqr; 1521 struct ccw1 *ccw; 1522 struct req_iterator iter; 1523 struct bio_vec *bv; 1524 char *dst; 1525 unsigned int blksize, blk_per_trk, off; 1526 int count, cidaw, cplength, datasize; 1527 sector_t recid, first_rec, last_rec; 1528 sector_t first_trk, last_trk; 1529 unsigned int first_offs, last_offs; 1530 unsigned char cmd, rcmd; 1531 int use_prefix; 1532 struct dasd_device *basedev; 1533 1534 basedev = block->base; 1535 private = (struct dasd_eckd_private *) basedev->private; 1536 if (rq_data_dir(req) == READ) 1537 cmd = DASD_ECKD_CCW_READ_MT; 1538 else if (rq_data_dir(req) == WRITE) 1539 cmd = DASD_ECKD_CCW_WRITE_MT; 1540 else 1541 return ERR_PTR(-EINVAL); 1542 /* Calculate number of blocks/records per track. */ 1543 blksize = block->bp_block; 1544 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize); 1545 /* Calculate record id of first and last block. */ 1546 first_rec = first_trk = req->sector >> block->s2b_shift; 1547 first_offs = sector_div(first_trk, blk_per_trk); 1548 last_rec = last_trk = 1549 (req->sector + req->nr_sectors - 1) >> block->s2b_shift; 1550 last_offs = sector_div(last_trk, blk_per_trk); 1551 /* Check struct bio and count the number of blocks for the request. */ 1552 count = 0; 1553 cidaw = 0; 1554 rq_for_each_segment(bv, req, iter) { 1555 if (bv->bv_len & (blksize - 1)) 1556 /* Eckd can only do full blocks. */ 1557 return ERR_PTR(-EINVAL); 1558 count += bv->bv_len >> (block->s2b_shift + 9); 1559 #if defined(CONFIG_64BIT) 1560 if (idal_is_needed (page_address(bv->bv_page), bv->bv_len)) 1561 cidaw += bv->bv_len >> (block->s2b_shift + 9); 1562 #endif 1563 } 1564 /* Paranoia. */ 1565 if (count != last_rec - first_rec + 1) 1566 return ERR_PTR(-EINVAL); 1567 1568 /* use the prefix command if available */ 1569 use_prefix = private->features.feature[8] & 0x01; 1570 if (use_prefix) { 1571 /* 1x prefix + number of blocks */ 1572 cplength = 2 + count; 1573 /* 1x prefix + cidaws*sizeof(long) */ 1574 datasize = sizeof(struct PFX_eckd_data) + 1575 sizeof(struct LO_eckd_data) + 1576 cidaw * sizeof(unsigned long); 1577 } else { 1578 /* 1x define extent + 1x locate record + number of blocks */ 1579 cplength = 2 + count; 1580 /* 1x define extent + 1x locate record + cidaws*sizeof(long) */ 1581 datasize = sizeof(struct DE_eckd_data) + 1582 sizeof(struct LO_eckd_data) + 1583 cidaw * sizeof(unsigned long); 1584 } 1585 /* Find out the number of additional locate record ccws for cdl. */ 1586 if (private->uses_cdl && first_rec < 2*blk_per_trk) { 1587 if (last_rec >= 2*blk_per_trk) 1588 count = 2*blk_per_trk - first_rec; 1589 cplength += count; 1590 datasize += count*sizeof(struct LO_eckd_data); 1591 } 1592 /* Allocate the ccw request. */ 1593 cqr = dasd_smalloc_request(dasd_eckd_discipline.name, 1594 cplength, datasize, startdev); 1595 if (IS_ERR(cqr)) 1596 return cqr; 1597 ccw = cqr->cpaddr; 1598 /* First ccw is define extent or prefix. */ 1599 if (use_prefix) { 1600 if (prefix(ccw++, cqr->data, first_trk, 1601 last_trk, cmd, basedev, startdev) == -EAGAIN) { 1602 /* Clock not in sync and XRC is enabled. 1603 * Try again later. 1604 */ 1605 dasd_sfree_request(cqr, startdev); 1606 return ERR_PTR(-EAGAIN); 1607 } 1608 idaws = (unsigned long *) (cqr->data + 1609 sizeof(struct PFX_eckd_data)); 1610 } else { 1611 if (define_extent(ccw++, cqr->data, first_trk, 1612 last_trk, cmd, startdev) == -EAGAIN) { 1613 /* Clock not in sync and XRC is enabled. 1614 * Try again later. 1615 */ 1616 dasd_sfree_request(cqr, startdev); 1617 return ERR_PTR(-EAGAIN); 1618 } 1619 idaws = (unsigned long *) (cqr->data + 1620 sizeof(struct DE_eckd_data)); 1621 } 1622 /* Build locate_record+read/write/ccws. */ 1623 LO_data = (struct LO_eckd_data *) (idaws + cidaw); 1624 recid = first_rec; 1625 if (private->uses_cdl == 0 || recid > 2*blk_per_trk) { 1626 /* Only standard blocks so there is just one locate record. */ 1627 ccw[-1].flags |= CCW_FLAG_CC; 1628 locate_record(ccw++, LO_data++, first_trk, first_offs + 1, 1629 last_rec - recid + 1, cmd, basedev, blksize); 1630 } 1631 rq_for_each_segment(bv, req, iter) { 1632 dst = page_address(bv->bv_page) + bv->bv_offset; 1633 if (dasd_page_cache) { 1634 char *copy = kmem_cache_alloc(dasd_page_cache, 1635 GFP_DMA | __GFP_NOWARN); 1636 if (copy && rq_data_dir(req) == WRITE) 1637 memcpy(copy + bv->bv_offset, dst, bv->bv_len); 1638 if (copy) 1639 dst = copy + bv->bv_offset; 1640 } 1641 for (off = 0; off < bv->bv_len; off += blksize) { 1642 sector_t trkid = recid; 1643 unsigned int recoffs = sector_div(trkid, blk_per_trk); 1644 rcmd = cmd; 1645 count = blksize; 1646 /* Locate record for cdl special block ? */ 1647 if (private->uses_cdl && recid < 2*blk_per_trk) { 1648 if (dasd_eckd_cdl_special(blk_per_trk, recid)){ 1649 rcmd |= 0x8; 1650 count = dasd_eckd_cdl_reclen(recid); 1651 if (count < blksize && 1652 rq_data_dir(req) == READ) 1653 memset(dst + count, 0xe5, 1654 blksize - count); 1655 } 1656 ccw[-1].flags |= CCW_FLAG_CC; 1657 locate_record(ccw++, LO_data++, 1658 trkid, recoffs + 1, 1659 1, rcmd, basedev, count); 1660 } 1661 /* Locate record for standard blocks ? */ 1662 if (private->uses_cdl && recid == 2*blk_per_trk) { 1663 ccw[-1].flags |= CCW_FLAG_CC; 1664 locate_record(ccw++, LO_data++, 1665 trkid, recoffs + 1, 1666 last_rec - recid + 1, 1667 cmd, basedev, count); 1668 } 1669 /* Read/write ccw. */ 1670 ccw[-1].flags |= CCW_FLAG_CC; 1671 ccw->cmd_code = rcmd; 1672 ccw->count = count; 1673 if (idal_is_needed(dst, blksize)) { 1674 ccw->cda = (__u32)(addr_t) idaws; 1675 ccw->flags = CCW_FLAG_IDA; 1676 idaws = idal_create_words(idaws, dst, blksize); 1677 } else { 1678 ccw->cda = (__u32)(addr_t) dst; 1679 ccw->flags = 0; 1680 } 1681 ccw++; 1682 dst += blksize; 1683 recid++; 1684 } 1685 } 1686 if (req->cmd_flags & REQ_FAILFAST) 1687 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 1688 cqr->startdev = startdev; 1689 cqr->memdev = startdev; 1690 cqr->block = block; 1691 cqr->expires = 5 * 60 * HZ; /* 5 minutes */ 1692 cqr->lpm = private->path_data.ppm; 1693 cqr->retries = 256; 1694 cqr->buildclk = get_clock(); 1695 cqr->status = DASD_CQR_FILLED; 1696 return cqr; 1697 } 1698 1699 static int 1700 dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req) 1701 { 1702 struct dasd_eckd_private *private; 1703 struct ccw1 *ccw; 1704 struct req_iterator iter; 1705 struct bio_vec *bv; 1706 char *dst, *cda; 1707 unsigned int blksize, blk_per_trk, off; 1708 sector_t recid; 1709 int status; 1710 1711 if (!dasd_page_cache) 1712 goto out; 1713 private = (struct dasd_eckd_private *) cqr->block->base->private; 1714 blksize = cqr->block->bp_block; 1715 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize); 1716 recid = req->sector >> cqr->block->s2b_shift; 1717 ccw = cqr->cpaddr; 1718 /* Skip over define extent & locate record. */ 1719 ccw++; 1720 if (private->uses_cdl == 0 || recid > 2*blk_per_trk) 1721 ccw++; 1722 rq_for_each_segment(bv, req, iter) { 1723 dst = page_address(bv->bv_page) + bv->bv_offset; 1724 for (off = 0; off < bv->bv_len; off += blksize) { 1725 /* Skip locate record. */ 1726 if (private->uses_cdl && recid <= 2*blk_per_trk) 1727 ccw++; 1728 if (dst) { 1729 if (ccw->flags & CCW_FLAG_IDA) 1730 cda = *((char **)((addr_t) ccw->cda)); 1731 else 1732 cda = (char *)((addr_t) ccw->cda); 1733 if (dst != cda) { 1734 if (rq_data_dir(req) == READ) 1735 memcpy(dst, cda, bv->bv_len); 1736 kmem_cache_free(dasd_page_cache, 1737 (void *)((addr_t)cda & PAGE_MASK)); 1738 } 1739 dst = NULL; 1740 } 1741 ccw++; 1742 recid++; 1743 } 1744 } 1745 out: 1746 status = cqr->status == DASD_CQR_DONE; 1747 dasd_sfree_request(cqr, cqr->memdev); 1748 return status; 1749 } 1750 1751 /* 1752 * Modify ccw chain in cqr so it can be started on a base device. 1753 * 1754 * Note that this is not enough to restart the cqr! 1755 * Either reset cqr->startdev as well (summary unit check handling) 1756 * or restart via separate cqr (as in ERP handling). 1757 */ 1758 void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *cqr) 1759 { 1760 struct ccw1 *ccw; 1761 struct PFX_eckd_data *pfxdata; 1762 1763 ccw = cqr->cpaddr; 1764 pfxdata = cqr->data; 1765 1766 if (ccw->cmd_code == DASD_ECKD_CCW_PFX) { 1767 pfxdata->validity.verify_base = 0; 1768 pfxdata->validity.hyper_pav = 0; 1769 } 1770 } 1771 1772 #define DASD_ECKD_CHANQ_MAX_SIZE 4 1773 1774 static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base, 1775 struct dasd_block *block, 1776 struct request *req) 1777 { 1778 struct dasd_eckd_private *private; 1779 struct dasd_device *startdev; 1780 unsigned long flags; 1781 struct dasd_ccw_req *cqr; 1782 1783 startdev = dasd_alias_get_start_dev(base); 1784 if (!startdev) 1785 startdev = base; 1786 private = (struct dasd_eckd_private *) startdev->private; 1787 if (private->count >= DASD_ECKD_CHANQ_MAX_SIZE) 1788 return ERR_PTR(-EBUSY); 1789 1790 spin_lock_irqsave(get_ccwdev_lock(startdev->cdev), flags); 1791 private->count++; 1792 cqr = dasd_eckd_build_cp(startdev, block, req); 1793 if (IS_ERR(cqr)) 1794 private->count--; 1795 spin_unlock_irqrestore(get_ccwdev_lock(startdev->cdev), flags); 1796 return cqr; 1797 } 1798 1799 static int dasd_eckd_free_alias_cp(struct dasd_ccw_req *cqr, 1800 struct request *req) 1801 { 1802 struct dasd_eckd_private *private; 1803 unsigned long flags; 1804 1805 spin_lock_irqsave(get_ccwdev_lock(cqr->memdev->cdev), flags); 1806 private = (struct dasd_eckd_private *) cqr->memdev->private; 1807 private->count--; 1808 spin_unlock_irqrestore(get_ccwdev_lock(cqr->memdev->cdev), flags); 1809 return dasd_eckd_free_cp(cqr, req); 1810 } 1811 1812 static int 1813 dasd_eckd_fill_info(struct dasd_device * device, 1814 struct dasd_information2_t * info) 1815 { 1816 struct dasd_eckd_private *private; 1817 1818 private = (struct dasd_eckd_private *) device->private; 1819 info->label_block = 2; 1820 info->FBA_layout = private->uses_cdl ? 0 : 1; 1821 info->format = private->uses_cdl ? DASD_FORMAT_CDL : DASD_FORMAT_LDL; 1822 info->characteristics_size = sizeof(struct dasd_eckd_characteristics); 1823 memcpy(info->characteristics, &private->rdc_data, 1824 sizeof(struct dasd_eckd_characteristics)); 1825 info->confdata_size = min((unsigned long)private->conf_len, 1826 sizeof(info->configuration_data)); 1827 memcpy(info->configuration_data, private->conf_data, 1828 info->confdata_size); 1829 return 0; 1830 } 1831 1832 /* 1833 * SECTION: ioctl functions for eckd devices. 1834 */ 1835 1836 /* 1837 * Release device ioctl. 1838 * Buils a channel programm to releases a prior reserved 1839 * (see dasd_eckd_reserve) device. 1840 */ 1841 static int 1842 dasd_eckd_release(struct dasd_device *device) 1843 { 1844 struct dasd_ccw_req *cqr; 1845 int rc; 1846 1847 if (!capable(CAP_SYS_ADMIN)) 1848 return -EACCES; 1849 1850 cqr = dasd_smalloc_request(dasd_eckd_discipline.name, 1851 1, 32, device); 1852 if (IS_ERR(cqr)) { 1853 DEV_MESSAGE(KERN_WARNING, device, "%s", 1854 "Could not allocate initialization request"); 1855 return PTR_ERR(cqr); 1856 } 1857 cqr->cpaddr->cmd_code = DASD_ECKD_CCW_RELEASE; 1858 cqr->cpaddr->flags |= CCW_FLAG_SLI; 1859 cqr->cpaddr->count = 32; 1860 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data; 1861 cqr->startdev = device; 1862 cqr->memdev = device; 1863 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1864 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 1865 cqr->retries = 2; /* set retry counter to enable basic ERP */ 1866 cqr->expires = 2 * HZ; 1867 cqr->buildclk = get_clock(); 1868 cqr->status = DASD_CQR_FILLED; 1869 1870 rc = dasd_sleep_on_immediatly(cqr); 1871 1872 dasd_sfree_request(cqr, cqr->memdev); 1873 return rc; 1874 } 1875 1876 /* 1877 * Reserve device ioctl. 1878 * Options are set to 'synchronous wait for interrupt' and 1879 * 'timeout the request'. This leads to a terminate IO if 1880 * the interrupt is outstanding for a certain time. 1881 */ 1882 static int 1883 dasd_eckd_reserve(struct dasd_device *device) 1884 { 1885 struct dasd_ccw_req *cqr; 1886 int rc; 1887 1888 if (!capable(CAP_SYS_ADMIN)) 1889 return -EACCES; 1890 1891 cqr = dasd_smalloc_request(dasd_eckd_discipline.name, 1892 1, 32, device); 1893 if (IS_ERR(cqr)) { 1894 DEV_MESSAGE(KERN_WARNING, device, "%s", 1895 "Could not allocate initialization request"); 1896 return PTR_ERR(cqr); 1897 } 1898 cqr->cpaddr->cmd_code = DASD_ECKD_CCW_RESERVE; 1899 cqr->cpaddr->flags |= CCW_FLAG_SLI; 1900 cqr->cpaddr->count = 32; 1901 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data; 1902 cqr->startdev = device; 1903 cqr->memdev = device; 1904 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1905 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 1906 cqr->retries = 2; /* set retry counter to enable basic ERP */ 1907 cqr->expires = 2 * HZ; 1908 cqr->buildclk = get_clock(); 1909 cqr->status = DASD_CQR_FILLED; 1910 1911 rc = dasd_sleep_on_immediatly(cqr); 1912 1913 dasd_sfree_request(cqr, cqr->memdev); 1914 return rc; 1915 } 1916 1917 /* 1918 * Steal lock ioctl - unconditional reserve device. 1919 * Buils a channel programm to break a device's reservation. 1920 * (unconditional reserve) 1921 */ 1922 static int 1923 dasd_eckd_steal_lock(struct dasd_device *device) 1924 { 1925 struct dasd_ccw_req *cqr; 1926 int rc; 1927 1928 if (!capable(CAP_SYS_ADMIN)) 1929 return -EACCES; 1930 1931 cqr = dasd_smalloc_request(dasd_eckd_discipline.name, 1932 1, 32, device); 1933 if (IS_ERR(cqr)) { 1934 DEV_MESSAGE(KERN_WARNING, device, "%s", 1935 "Could not allocate initialization request"); 1936 return PTR_ERR(cqr); 1937 } 1938 cqr->cpaddr->cmd_code = DASD_ECKD_CCW_SLCK; 1939 cqr->cpaddr->flags |= CCW_FLAG_SLI; 1940 cqr->cpaddr->count = 32; 1941 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data; 1942 cqr->startdev = device; 1943 cqr->memdev = device; 1944 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1945 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 1946 cqr->retries = 2; /* set retry counter to enable basic ERP */ 1947 cqr->expires = 2 * HZ; 1948 cqr->buildclk = get_clock(); 1949 cqr->status = DASD_CQR_FILLED; 1950 1951 rc = dasd_sleep_on_immediatly(cqr); 1952 1953 dasd_sfree_request(cqr, cqr->memdev); 1954 return rc; 1955 } 1956 1957 /* 1958 * Read performance statistics 1959 */ 1960 static int 1961 dasd_eckd_performance(struct dasd_device *device, void __user *argp) 1962 { 1963 struct dasd_psf_prssd_data *prssdp; 1964 struct dasd_rssd_perf_stats_t *stats; 1965 struct dasd_ccw_req *cqr; 1966 struct ccw1 *ccw; 1967 int rc; 1968 1969 cqr = dasd_smalloc_request(dasd_eckd_discipline.name, 1970 1 /* PSF */ + 1 /* RSSD */ , 1971 (sizeof(struct dasd_psf_prssd_data) + 1972 sizeof(struct dasd_rssd_perf_stats_t)), 1973 device); 1974 if (IS_ERR(cqr)) { 1975 DEV_MESSAGE(KERN_WARNING, device, "%s", 1976 "Could not allocate initialization request"); 1977 return PTR_ERR(cqr); 1978 } 1979 cqr->startdev = device; 1980 cqr->memdev = device; 1981 cqr->retries = 0; 1982 cqr->expires = 10 * HZ; 1983 1984 /* Prepare for Read Subsystem Data */ 1985 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 1986 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data)); 1987 prssdp->order = PSF_ORDER_PRSSD; 1988 prssdp->suborder = 0x01; /* Performance Statistics */ 1989 prssdp->varies[1] = 0x01; /* Perf Statistics for the Subsystem */ 1990 1991 ccw = cqr->cpaddr; 1992 ccw->cmd_code = DASD_ECKD_CCW_PSF; 1993 ccw->count = sizeof(struct dasd_psf_prssd_data); 1994 ccw->flags |= CCW_FLAG_CC; 1995 ccw->cda = (__u32)(addr_t) prssdp; 1996 1997 /* Read Subsystem Data - Performance Statistics */ 1998 stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1); 1999 memset(stats, 0, sizeof(struct dasd_rssd_perf_stats_t)); 2000 2001 ccw++; 2002 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 2003 ccw->count = sizeof(struct dasd_rssd_perf_stats_t); 2004 ccw->cda = (__u32)(addr_t) stats; 2005 2006 cqr->buildclk = get_clock(); 2007 cqr->status = DASD_CQR_FILLED; 2008 rc = dasd_sleep_on(cqr); 2009 if (rc == 0) { 2010 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 2011 stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1); 2012 if (copy_to_user(argp, stats, 2013 sizeof(struct dasd_rssd_perf_stats_t))) 2014 rc = -EFAULT; 2015 } 2016 dasd_sfree_request(cqr, cqr->memdev); 2017 return rc; 2018 } 2019 2020 /* 2021 * Get attributes (cache operations) 2022 * Returnes the cache attributes used in Define Extend (DE). 2023 */ 2024 static int 2025 dasd_eckd_get_attrib(struct dasd_device *device, void __user *argp) 2026 { 2027 struct dasd_eckd_private *private = 2028 (struct dasd_eckd_private *)device->private; 2029 struct attrib_data_t attrib = private->attrib; 2030 int rc; 2031 2032 if (!capable(CAP_SYS_ADMIN)) 2033 return -EACCES; 2034 if (!argp) 2035 return -EINVAL; 2036 2037 rc = 0; 2038 if (copy_to_user(argp, (long *) &attrib, 2039 sizeof(struct attrib_data_t))) 2040 rc = -EFAULT; 2041 2042 return rc; 2043 } 2044 2045 /* 2046 * Set attributes (cache operations) 2047 * Stores the attributes for cache operation to be used in Define Extend (DE). 2048 */ 2049 static int 2050 dasd_eckd_set_attrib(struct dasd_device *device, void __user *argp) 2051 { 2052 struct dasd_eckd_private *private = 2053 (struct dasd_eckd_private *)device->private; 2054 struct attrib_data_t attrib; 2055 2056 if (!capable(CAP_SYS_ADMIN)) 2057 return -EACCES; 2058 if (!argp) 2059 return -EINVAL; 2060 2061 if (copy_from_user(&attrib, argp, sizeof(struct attrib_data_t))) 2062 return -EFAULT; 2063 private->attrib = attrib; 2064 2065 DEV_MESSAGE(KERN_INFO, device, 2066 "cache operation mode set to %x (%i cylinder prestage)", 2067 private->attrib.operation, private->attrib.nr_cyl); 2068 return 0; 2069 } 2070 2071 static int 2072 dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp) 2073 { 2074 struct dasd_device *device = block->base; 2075 2076 switch (cmd) { 2077 case BIODASDGATTR: 2078 return dasd_eckd_get_attrib(device, argp); 2079 case BIODASDSATTR: 2080 return dasd_eckd_set_attrib(device, argp); 2081 case BIODASDPSRD: 2082 return dasd_eckd_performance(device, argp); 2083 case BIODASDRLSE: 2084 return dasd_eckd_release(device); 2085 case BIODASDRSRV: 2086 return dasd_eckd_reserve(device); 2087 case BIODASDSLCK: 2088 return dasd_eckd_steal_lock(device); 2089 default: 2090 return -ENOIOCTLCMD; 2091 } 2092 } 2093 2094 /* 2095 * Dump the range of CCWs into 'page' buffer 2096 * and return number of printed chars. 2097 */ 2098 static int 2099 dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page) 2100 { 2101 int len, count; 2102 char *datap; 2103 2104 len = 0; 2105 while (from <= to) { 2106 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 2107 " CCW %p: %08X %08X DAT:", 2108 from, ((int *) from)[0], ((int *) from)[1]); 2109 2110 /* get pointer to data (consider IDALs) */ 2111 if (from->flags & CCW_FLAG_IDA) 2112 datap = (char *) *((addr_t *) (addr_t) from->cda); 2113 else 2114 datap = (char *) ((addr_t) from->cda); 2115 2116 /* dump data (max 32 bytes) */ 2117 for (count = 0; count < from->count && count < 32; count++) { 2118 if (count % 8 == 0) len += sprintf(page + len, " "); 2119 if (count % 4 == 0) len += sprintf(page + len, " "); 2120 len += sprintf(page + len, "%02x", datap[count]); 2121 } 2122 len += sprintf(page + len, "\n"); 2123 from++; 2124 } 2125 return len; 2126 } 2127 2128 /* 2129 * Print sense data and related channel program. 2130 * Parts are printed because printk buffer is only 1024 bytes. 2131 */ 2132 static void dasd_eckd_dump_sense(struct dasd_device *device, 2133 struct dasd_ccw_req *req, struct irb *irb) 2134 { 2135 char *page; 2136 struct ccw1 *first, *last, *fail, *from, *to; 2137 int len, sl, sct; 2138 2139 page = (char *) get_zeroed_page(GFP_ATOMIC); 2140 if (page == NULL) { 2141 DEV_MESSAGE(KERN_ERR, device, " %s", 2142 "No memory to dump sense data"); 2143 return; 2144 } 2145 /* dump the sense data */ 2146 len = sprintf(page, KERN_ERR PRINTK_HEADER 2147 " I/O status report for device %s:\n", 2148 device->cdev->dev.bus_id); 2149 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 2150 " in req: %p CS: 0x%02X DS: 0x%02X\n", req, 2151 irb->scsw.cmd.cstat, irb->scsw.cmd.dstat); 2152 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 2153 " device %s: Failing CCW: %p\n", 2154 device->cdev->dev.bus_id, 2155 (void *) (addr_t) irb->scsw.cmd.cpa); 2156 if (irb->esw.esw0.erw.cons) { 2157 for (sl = 0; sl < 4; sl++) { 2158 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 2159 " Sense(hex) %2d-%2d:", 2160 (8 * sl), ((8 * sl) + 7)); 2161 2162 for (sct = 0; sct < 8; sct++) { 2163 len += sprintf(page + len, " %02x", 2164 irb->ecw[8 * sl + sct]); 2165 } 2166 len += sprintf(page + len, "\n"); 2167 } 2168 2169 if (irb->ecw[27] & DASD_SENSE_BIT_0) { 2170 /* 24 Byte Sense Data */ 2171 sprintf(page + len, KERN_ERR PRINTK_HEADER 2172 " 24 Byte: %x MSG %x, " 2173 "%s MSGb to SYSOP\n", 2174 irb->ecw[7] >> 4, irb->ecw[7] & 0x0f, 2175 irb->ecw[1] & 0x10 ? "" : "no"); 2176 } else { 2177 /* 32 Byte Sense Data */ 2178 sprintf(page + len, KERN_ERR PRINTK_HEADER 2179 " 32 Byte: Format: %x " 2180 "Exception class %x\n", 2181 irb->ecw[6] & 0x0f, irb->ecw[22] >> 4); 2182 } 2183 } else { 2184 sprintf(page + len, KERN_ERR PRINTK_HEADER 2185 " SORRY - NO VALID SENSE AVAILABLE\n"); 2186 } 2187 printk("%s", page); 2188 2189 if (req) { 2190 /* req == NULL for unsolicited interrupts */ 2191 /* dump the Channel Program (max 140 Bytes per line) */ 2192 /* Count CCW and print first CCWs (maximum 1024 % 140 = 7) */ 2193 first = req->cpaddr; 2194 for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++); 2195 to = min(first + 6, last); 2196 len = sprintf(page, KERN_ERR PRINTK_HEADER 2197 " Related CP in req: %p\n", req); 2198 dasd_eckd_dump_ccw_range(first, to, page + len); 2199 printk("%s", page); 2200 2201 /* print failing CCW area (maximum 4) */ 2202 /* scsw->cda is either valid or zero */ 2203 len = 0; 2204 from = ++to; 2205 fail = (struct ccw1 *)(addr_t) 2206 irb->scsw.cmd.cpa; /* failing CCW */ 2207 if (from < fail - 2) { 2208 from = fail - 2; /* there is a gap - print header */ 2209 len += sprintf(page, KERN_ERR PRINTK_HEADER "......\n"); 2210 } 2211 to = min(fail + 1, last); 2212 len += dasd_eckd_dump_ccw_range(from, to, page + len); 2213 2214 /* print last CCWs (maximum 2) */ 2215 from = max(from, ++to); 2216 if (from < last - 1) { 2217 from = last - 1; /* there is a gap - print header */ 2218 len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n"); 2219 } 2220 len += dasd_eckd_dump_ccw_range(from, last, page + len); 2221 if (len > 0) 2222 printk("%s", page); 2223 } 2224 free_page((unsigned long) page); 2225 } 2226 2227 /* 2228 * max_blocks is dependent on the amount of storage that is available 2229 * in the static io buffer for each device. Currently each device has 2230 * 8192 bytes (=2 pages). For 64 bit one dasd_mchunkt_t structure has 2231 * 24 bytes, the struct dasd_ccw_req has 136 bytes and each block can use 2232 * up to 16 bytes (8 for the ccw and 8 for the idal pointer). In 2233 * addition we have one define extent ccw + 16 bytes of data and one 2234 * locate record ccw + 16 bytes of data. That makes: 2235 * (8192 - 24 - 136 - 8 - 16 - 8 - 16) / 16 = 499 blocks at maximum. 2236 * We want to fit two into the available memory so that we can immediately 2237 * start the next request if one finishes off. That makes 249.5 blocks 2238 * for one request. Give a little safety and the result is 240. 2239 */ 2240 static struct dasd_discipline dasd_eckd_discipline = { 2241 .owner = THIS_MODULE, 2242 .name = "ECKD", 2243 .ebcname = "ECKD", 2244 .max_blocks = 240, 2245 .check_device = dasd_eckd_check_characteristics, 2246 .uncheck_device = dasd_eckd_uncheck_device, 2247 .do_analysis = dasd_eckd_do_analysis, 2248 .ready_to_online = dasd_eckd_ready_to_online, 2249 .online_to_ready = dasd_eckd_online_to_ready, 2250 .fill_geometry = dasd_eckd_fill_geometry, 2251 .start_IO = dasd_start_IO, 2252 .term_IO = dasd_term_IO, 2253 .handle_terminated_request = dasd_eckd_handle_terminated_request, 2254 .format_device = dasd_eckd_format_device, 2255 .erp_action = dasd_eckd_erp_action, 2256 .erp_postaction = dasd_eckd_erp_postaction, 2257 .handle_unsolicited_interrupt = dasd_eckd_handle_unsolicited_interrupt, 2258 .build_cp = dasd_eckd_build_alias_cp, 2259 .free_cp = dasd_eckd_free_alias_cp, 2260 .dump_sense = dasd_eckd_dump_sense, 2261 .fill_info = dasd_eckd_fill_info, 2262 .ioctl = dasd_eckd_ioctl, 2263 }; 2264 2265 static int __init 2266 dasd_eckd_init(void) 2267 { 2268 ASCEBC(dasd_eckd_discipline.ebcname, 4); 2269 return ccw_driver_register(&dasd_eckd_driver); 2270 } 2271 2272 static void __exit 2273 dasd_eckd_cleanup(void) 2274 { 2275 ccw_driver_unregister(&dasd_eckd_driver); 2276 } 2277 2278 module_init(dasd_eckd_init); 2279 module_exit(dasd_eckd_cleanup); 2280