1 /* 2 * File...........: linux/drivers/s390/block/dasd_eckd.c 3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 4 * Horst Hummel <Horst.Hummel@de.ibm.com> 5 * Carsten Otte <Cotte@de.ibm.com> 6 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * Bugreports.to..: <Linux390@de.ibm.com> 8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 9 * EMC Symmetrix ioctl Copyright EMC Corporation, 2008 10 * Author.........: Nigel Hislop <hislop_nigel@emc.com> 11 * 12 */ 13 14 #define KMSG_COMPONENT "dasd" 15 16 #include <linux/stddef.h> 17 #include <linux/kernel.h> 18 #include <linux/slab.h> 19 #include <linux/hdreg.h> /* HDIO_GETGEO */ 20 #include <linux/bio.h> 21 #include <linux/module.h> 22 #include <linux/init.h> 23 24 #include <asm/debug.h> 25 #include <asm/idals.h> 26 #include <asm/ebcdic.h> 27 #include <asm/io.h> 28 #include <asm/todclk.h> 29 #include <asm/uaccess.h> 30 #include <asm/cio.h> 31 #include <asm/ccwdev.h> 32 #include <asm/itcw.h> 33 34 #include "dasd_int.h" 35 #include "dasd_eckd.h" 36 #include "../cio/chsc.h" 37 38 39 #ifdef PRINTK_HEADER 40 #undef PRINTK_HEADER 41 #endif /* PRINTK_HEADER */ 42 #define PRINTK_HEADER "dasd(eckd):" 43 44 #define ECKD_C0(i) (i->home_bytes) 45 #define ECKD_F(i) (i->formula) 46 #define ECKD_F1(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f1):\ 47 (i->factors.f_0x02.f1)) 48 #define ECKD_F2(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f2):\ 49 (i->factors.f_0x02.f2)) 50 #define ECKD_F3(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f3):\ 51 (i->factors.f_0x02.f3)) 52 #define ECKD_F4(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f4):0) 53 #define ECKD_F5(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f5):0) 54 #define ECKD_F6(i) (i->factor6) 55 #define ECKD_F7(i) (i->factor7) 56 #define ECKD_F8(i) (i->factor8) 57 58 MODULE_LICENSE("GPL"); 59 60 static struct dasd_discipline dasd_eckd_discipline; 61 62 /* The ccw bus type uses this table to find devices that it sends to 63 * dasd_eckd_probe */ 64 static struct ccw_device_id dasd_eckd_ids[] = { 65 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1}, 66 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2}, 67 { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3390, 0), .driver_info = 0x3}, 68 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4}, 69 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5}, 70 { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6}, 71 { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3390, 0), .driver_info = 0x7}, 72 { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3380, 0), .driver_info = 0x8}, 73 { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3390, 0), .driver_info = 0x9}, 74 { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3380, 0), .driver_info = 0xa}, 75 { /* end of list */ }, 76 }; 77 78 MODULE_DEVICE_TABLE(ccw, dasd_eckd_ids); 79 80 static struct ccw_driver dasd_eckd_driver; /* see below */ 81 82 /* initial attempt at a probe function. this can be simplified once 83 * the other detection code is gone */ 84 static int 85 dasd_eckd_probe (struct ccw_device *cdev) 86 { 87 int ret; 88 89 /* set ECKD specific ccw-device options */ 90 ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE); 91 if (ret) { 92 DBF_EVENT(DBF_WARNING, 93 "dasd_eckd_probe: could not set ccw-device options " 94 "for %s\n", dev_name(&cdev->dev)); 95 return ret; 96 } 97 ret = dasd_generic_probe(cdev, &dasd_eckd_discipline); 98 return ret; 99 } 100 101 static int 102 dasd_eckd_set_online(struct ccw_device *cdev) 103 { 104 return dasd_generic_set_online(cdev, &dasd_eckd_discipline); 105 } 106 107 static struct ccw_driver dasd_eckd_driver = { 108 .name = "dasd-eckd", 109 .owner = THIS_MODULE, 110 .ids = dasd_eckd_ids, 111 .probe = dasd_eckd_probe, 112 .remove = dasd_generic_remove, 113 .set_offline = dasd_generic_set_offline, 114 .set_online = dasd_eckd_set_online, 115 .notify = dasd_generic_notify, 116 }; 117 118 static const int sizes_trk0[] = { 28, 148, 84 }; 119 #define LABEL_SIZE 140 120 121 static inline unsigned int 122 round_up_multiple(unsigned int no, unsigned int mult) 123 { 124 int rem = no % mult; 125 return (rem ? no - rem + mult : no); 126 } 127 128 static inline unsigned int 129 ceil_quot(unsigned int d1, unsigned int d2) 130 { 131 return (d1 + (d2 - 1)) / d2; 132 } 133 134 static unsigned int 135 recs_per_track(struct dasd_eckd_characteristics * rdc, 136 unsigned int kl, unsigned int dl) 137 { 138 int dn, kn; 139 140 switch (rdc->dev_type) { 141 case 0x3380: 142 if (kl) 143 return 1499 / (15 + 7 + ceil_quot(kl + 12, 32) + 144 ceil_quot(dl + 12, 32)); 145 else 146 return 1499 / (15 + ceil_quot(dl + 12, 32)); 147 case 0x3390: 148 dn = ceil_quot(dl + 6, 232) + 1; 149 if (kl) { 150 kn = ceil_quot(kl + 6, 232) + 1; 151 return 1729 / (10 + 9 + ceil_quot(kl + 6 * kn, 34) + 152 9 + ceil_quot(dl + 6 * dn, 34)); 153 } else 154 return 1729 / (10 + 9 + ceil_quot(dl + 6 * dn, 34)); 155 case 0x9345: 156 dn = ceil_quot(dl + 6, 232) + 1; 157 if (kl) { 158 kn = ceil_quot(kl + 6, 232) + 1; 159 return 1420 / (18 + 7 + ceil_quot(kl + 6 * kn, 34) + 160 ceil_quot(dl + 6 * dn, 34)); 161 } else 162 return 1420 / (18 + 7 + ceil_quot(dl + 6 * dn, 34)); 163 } 164 return 0; 165 } 166 167 static void set_ch_t(struct ch_t *geo, __u32 cyl, __u8 head) 168 { 169 geo->cyl = (__u16) cyl; 170 geo->head = cyl >> 16; 171 geo->head <<= 4; 172 geo->head |= head; 173 } 174 175 static int 176 check_XRC (struct ccw1 *de_ccw, 177 struct DE_eckd_data *data, 178 struct dasd_device *device) 179 { 180 struct dasd_eckd_private *private; 181 int rc; 182 183 private = (struct dasd_eckd_private *) device->private; 184 if (!private->rdc_data.facilities.XRC_supported) 185 return 0; 186 187 /* switch on System Time Stamp - needed for XRC Support */ 188 data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */ 189 data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */ 190 191 rc = get_sync_clock(&data->ep_sys_time); 192 /* Ignore return code if sync clock is switched off. */ 193 if (rc == -ENOSYS || rc == -EACCES) 194 rc = 0; 195 196 de_ccw->count = sizeof(struct DE_eckd_data); 197 de_ccw->flags |= CCW_FLAG_SLI; 198 return rc; 199 } 200 201 static int 202 define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk, 203 unsigned int totrk, int cmd, struct dasd_device *device) 204 { 205 struct dasd_eckd_private *private; 206 u32 begcyl, endcyl; 207 u16 heads, beghead, endhead; 208 int rc = 0; 209 210 private = (struct dasd_eckd_private *) device->private; 211 212 ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT; 213 ccw->flags = 0; 214 ccw->count = 16; 215 ccw->cda = (__u32) __pa(data); 216 217 memset(data, 0, sizeof(struct DE_eckd_data)); 218 switch (cmd) { 219 case DASD_ECKD_CCW_READ_HOME_ADDRESS: 220 case DASD_ECKD_CCW_READ_RECORD_ZERO: 221 case DASD_ECKD_CCW_READ: 222 case DASD_ECKD_CCW_READ_MT: 223 case DASD_ECKD_CCW_READ_CKD: 224 case DASD_ECKD_CCW_READ_CKD_MT: 225 case DASD_ECKD_CCW_READ_KD: 226 case DASD_ECKD_CCW_READ_KD_MT: 227 case DASD_ECKD_CCW_READ_COUNT: 228 data->mask.perm = 0x1; 229 data->attributes.operation = private->attrib.operation; 230 break; 231 case DASD_ECKD_CCW_WRITE: 232 case DASD_ECKD_CCW_WRITE_MT: 233 case DASD_ECKD_CCW_WRITE_KD: 234 case DASD_ECKD_CCW_WRITE_KD_MT: 235 data->mask.perm = 0x02; 236 data->attributes.operation = private->attrib.operation; 237 rc = check_XRC (ccw, data, device); 238 break; 239 case DASD_ECKD_CCW_WRITE_CKD: 240 case DASD_ECKD_CCW_WRITE_CKD_MT: 241 data->attributes.operation = DASD_BYPASS_CACHE; 242 rc = check_XRC (ccw, data, device); 243 break; 244 case DASD_ECKD_CCW_ERASE: 245 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS: 246 case DASD_ECKD_CCW_WRITE_RECORD_ZERO: 247 data->mask.perm = 0x3; 248 data->mask.auth = 0x1; 249 data->attributes.operation = DASD_BYPASS_CACHE; 250 rc = check_XRC (ccw, data, device); 251 break; 252 default: 253 dev_err(&device->cdev->dev, 254 "0x%x is not a known command\n", cmd); 255 break; 256 } 257 258 data->attributes.mode = 0x3; /* ECKD */ 259 260 if ((private->rdc_data.cu_type == 0x2105 || 261 private->rdc_data.cu_type == 0x2107 || 262 private->rdc_data.cu_type == 0x1750) 263 && !(private->uses_cdl && trk < 2)) 264 data->ga_extended |= 0x40; /* Regular Data Format Mode */ 265 266 heads = private->rdc_data.trk_per_cyl; 267 begcyl = trk / heads; 268 beghead = trk % heads; 269 endcyl = totrk / heads; 270 endhead = totrk % heads; 271 272 /* check for sequential prestage - enhance cylinder range */ 273 if (data->attributes.operation == DASD_SEQ_PRESTAGE || 274 data->attributes.operation == DASD_SEQ_ACCESS) { 275 276 if (endcyl + private->attrib.nr_cyl < private->real_cyl) 277 endcyl += private->attrib.nr_cyl; 278 else 279 endcyl = (private->real_cyl - 1); 280 } 281 282 set_ch_t(&data->beg_ext, begcyl, beghead); 283 set_ch_t(&data->end_ext, endcyl, endhead); 284 return rc; 285 } 286 287 static int check_XRC_on_prefix(struct PFX_eckd_data *pfxdata, 288 struct dasd_device *device) 289 { 290 struct dasd_eckd_private *private; 291 int rc; 292 293 private = (struct dasd_eckd_private *) device->private; 294 if (!private->rdc_data.facilities.XRC_supported) 295 return 0; 296 297 /* switch on System Time Stamp - needed for XRC Support */ 298 pfxdata->define_extent.ga_extended |= 0x08; /* 'Time Stamp Valid' */ 299 pfxdata->define_extent.ga_extended |= 0x02; /* 'Extended Parameter' */ 300 pfxdata->validity.time_stamp = 1; /* 'Time Stamp Valid' */ 301 302 rc = get_sync_clock(&pfxdata->define_extent.ep_sys_time); 303 /* Ignore return code if sync clock is switched off. */ 304 if (rc == -ENOSYS || rc == -EACCES) 305 rc = 0; 306 return rc; 307 } 308 309 static void fill_LRE_data(struct LRE_eckd_data *data, unsigned int trk, 310 unsigned int rec_on_trk, int count, int cmd, 311 struct dasd_device *device, unsigned int reclen, 312 unsigned int tlf) 313 { 314 struct dasd_eckd_private *private; 315 int sector; 316 int dn, d; 317 318 private = (struct dasd_eckd_private *) device->private; 319 320 memset(data, 0, sizeof(*data)); 321 sector = 0; 322 if (rec_on_trk) { 323 switch (private->rdc_data.dev_type) { 324 case 0x3390: 325 dn = ceil_quot(reclen + 6, 232); 326 d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34); 327 sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8; 328 break; 329 case 0x3380: 330 d = 7 + ceil_quot(reclen + 12, 32); 331 sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7; 332 break; 333 } 334 } 335 data->sector = sector; 336 /* note: meaning of count depends on the operation 337 * for record based I/O it's the number of records, but for 338 * track based I/O it's the number of tracks 339 */ 340 data->count = count; 341 switch (cmd) { 342 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS: 343 data->operation.orientation = 0x3; 344 data->operation.operation = 0x03; 345 break; 346 case DASD_ECKD_CCW_READ_HOME_ADDRESS: 347 data->operation.orientation = 0x3; 348 data->operation.operation = 0x16; 349 break; 350 case DASD_ECKD_CCW_WRITE_RECORD_ZERO: 351 data->operation.orientation = 0x1; 352 data->operation.operation = 0x03; 353 data->count++; 354 break; 355 case DASD_ECKD_CCW_READ_RECORD_ZERO: 356 data->operation.orientation = 0x3; 357 data->operation.operation = 0x16; 358 data->count++; 359 break; 360 case DASD_ECKD_CCW_WRITE: 361 case DASD_ECKD_CCW_WRITE_MT: 362 case DASD_ECKD_CCW_WRITE_KD: 363 case DASD_ECKD_CCW_WRITE_KD_MT: 364 data->auxiliary.length_valid = 0x1; 365 data->length = reclen; 366 data->operation.operation = 0x01; 367 break; 368 case DASD_ECKD_CCW_WRITE_CKD: 369 case DASD_ECKD_CCW_WRITE_CKD_MT: 370 data->auxiliary.length_valid = 0x1; 371 data->length = reclen; 372 data->operation.operation = 0x03; 373 break; 374 case DASD_ECKD_CCW_WRITE_TRACK_DATA: 375 data->auxiliary.length_valid = 0x1; 376 data->length = reclen; /* not tlf, as one might think */ 377 data->operation.operation = 0x3F; 378 data->extended_operation = 0x23; 379 break; 380 case DASD_ECKD_CCW_READ: 381 case DASD_ECKD_CCW_READ_MT: 382 case DASD_ECKD_CCW_READ_KD: 383 case DASD_ECKD_CCW_READ_KD_MT: 384 data->auxiliary.length_valid = 0x1; 385 data->length = reclen; 386 data->operation.operation = 0x06; 387 break; 388 case DASD_ECKD_CCW_READ_CKD: 389 case DASD_ECKD_CCW_READ_CKD_MT: 390 data->auxiliary.length_valid = 0x1; 391 data->length = reclen; 392 data->operation.operation = 0x16; 393 break; 394 case DASD_ECKD_CCW_READ_COUNT: 395 data->operation.operation = 0x06; 396 break; 397 case DASD_ECKD_CCW_READ_TRACK_DATA: 398 data->auxiliary.length_valid = 0x1; 399 data->length = tlf; 400 data->operation.operation = 0x0C; 401 break; 402 case DASD_ECKD_CCW_ERASE: 403 data->length = reclen; 404 data->auxiliary.length_valid = 0x1; 405 data->operation.operation = 0x0b; 406 break; 407 default: 408 DBF_DEV_EVENT(DBF_ERR, device, 409 "fill LRE unknown opcode 0x%x", cmd); 410 BUG(); 411 } 412 set_ch_t(&data->seek_addr, 413 trk / private->rdc_data.trk_per_cyl, 414 trk % private->rdc_data.trk_per_cyl); 415 data->search_arg.cyl = data->seek_addr.cyl; 416 data->search_arg.head = data->seek_addr.head; 417 data->search_arg.record = rec_on_trk; 418 } 419 420 static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata, 421 unsigned int trk, unsigned int totrk, int cmd, 422 struct dasd_device *basedev, struct dasd_device *startdev, 423 unsigned char format, unsigned int rec_on_trk, int count, 424 unsigned int blksize, unsigned int tlf) 425 { 426 struct dasd_eckd_private *basepriv, *startpriv; 427 struct DE_eckd_data *dedata; 428 struct LRE_eckd_data *lredata; 429 u32 begcyl, endcyl; 430 u16 heads, beghead, endhead; 431 int rc = 0; 432 433 basepriv = (struct dasd_eckd_private *) basedev->private; 434 startpriv = (struct dasd_eckd_private *) startdev->private; 435 dedata = &pfxdata->define_extent; 436 lredata = &pfxdata->locate_record; 437 438 ccw->cmd_code = DASD_ECKD_CCW_PFX; 439 ccw->flags = 0; 440 ccw->count = sizeof(*pfxdata); 441 ccw->cda = (__u32) __pa(pfxdata); 442 443 memset(pfxdata, 0, sizeof(*pfxdata)); 444 /* prefix data */ 445 if (format > 1) { 446 DBF_DEV_EVENT(DBF_ERR, basedev, 447 "PFX LRE unknown format 0x%x", format); 448 BUG(); 449 return -EINVAL; 450 } 451 pfxdata->format = format; 452 pfxdata->base_address = basepriv->ned->unit_addr; 453 pfxdata->base_lss = basepriv->ned->ID; 454 pfxdata->validity.define_extent = 1; 455 456 /* private uid is kept up to date, conf_data may be outdated */ 457 if (startpriv->uid.type != UA_BASE_DEVICE) { 458 pfxdata->validity.verify_base = 1; 459 if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) 460 pfxdata->validity.hyper_pav = 1; 461 } 462 463 /* define extend data (mostly)*/ 464 switch (cmd) { 465 case DASD_ECKD_CCW_READ_HOME_ADDRESS: 466 case DASD_ECKD_CCW_READ_RECORD_ZERO: 467 case DASD_ECKD_CCW_READ: 468 case DASD_ECKD_CCW_READ_MT: 469 case DASD_ECKD_CCW_READ_CKD: 470 case DASD_ECKD_CCW_READ_CKD_MT: 471 case DASD_ECKD_CCW_READ_KD: 472 case DASD_ECKD_CCW_READ_KD_MT: 473 case DASD_ECKD_CCW_READ_COUNT: 474 dedata->mask.perm = 0x1; 475 dedata->attributes.operation = basepriv->attrib.operation; 476 break; 477 case DASD_ECKD_CCW_READ_TRACK_DATA: 478 dedata->mask.perm = 0x1; 479 dedata->attributes.operation = basepriv->attrib.operation; 480 dedata->blk_size = 0; 481 break; 482 case DASD_ECKD_CCW_WRITE: 483 case DASD_ECKD_CCW_WRITE_MT: 484 case DASD_ECKD_CCW_WRITE_KD: 485 case DASD_ECKD_CCW_WRITE_KD_MT: 486 dedata->mask.perm = 0x02; 487 dedata->attributes.operation = basepriv->attrib.operation; 488 rc = check_XRC_on_prefix(pfxdata, basedev); 489 break; 490 case DASD_ECKD_CCW_WRITE_CKD: 491 case DASD_ECKD_CCW_WRITE_CKD_MT: 492 dedata->attributes.operation = DASD_BYPASS_CACHE; 493 rc = check_XRC_on_prefix(pfxdata, basedev); 494 break; 495 case DASD_ECKD_CCW_ERASE: 496 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS: 497 case DASD_ECKD_CCW_WRITE_RECORD_ZERO: 498 dedata->mask.perm = 0x3; 499 dedata->mask.auth = 0x1; 500 dedata->attributes.operation = DASD_BYPASS_CACHE; 501 rc = check_XRC_on_prefix(pfxdata, basedev); 502 break; 503 case DASD_ECKD_CCW_WRITE_TRACK_DATA: 504 dedata->mask.perm = 0x02; 505 dedata->attributes.operation = basepriv->attrib.operation; 506 dedata->blk_size = blksize; 507 rc = check_XRC_on_prefix(pfxdata, basedev); 508 break; 509 default: 510 DBF_DEV_EVENT(DBF_ERR, basedev, 511 "PFX LRE unknown opcode 0x%x", cmd); 512 BUG(); 513 return -EINVAL; 514 } 515 516 dedata->attributes.mode = 0x3; /* ECKD */ 517 518 if ((basepriv->rdc_data.cu_type == 0x2105 || 519 basepriv->rdc_data.cu_type == 0x2107 || 520 basepriv->rdc_data.cu_type == 0x1750) 521 && !(basepriv->uses_cdl && trk < 2)) 522 dedata->ga_extended |= 0x40; /* Regular Data Format Mode */ 523 524 heads = basepriv->rdc_data.trk_per_cyl; 525 begcyl = trk / heads; 526 beghead = trk % heads; 527 endcyl = totrk / heads; 528 endhead = totrk % heads; 529 530 /* check for sequential prestage - enhance cylinder range */ 531 if (dedata->attributes.operation == DASD_SEQ_PRESTAGE || 532 dedata->attributes.operation == DASD_SEQ_ACCESS) { 533 534 if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl) 535 endcyl += basepriv->attrib.nr_cyl; 536 else 537 endcyl = (basepriv->real_cyl - 1); 538 } 539 540 set_ch_t(&dedata->beg_ext, begcyl, beghead); 541 set_ch_t(&dedata->end_ext, endcyl, endhead); 542 543 if (format == 1) { 544 fill_LRE_data(lredata, trk, rec_on_trk, count, cmd, 545 basedev, blksize, tlf); 546 } 547 548 return rc; 549 } 550 551 static int prefix(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata, 552 unsigned int trk, unsigned int totrk, int cmd, 553 struct dasd_device *basedev, struct dasd_device *startdev) 554 { 555 return prefix_LRE(ccw, pfxdata, trk, totrk, cmd, basedev, startdev, 556 0, 0, 0, 0, 0); 557 } 558 559 static void 560 locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, unsigned int trk, 561 unsigned int rec_on_trk, int no_rec, int cmd, 562 struct dasd_device * device, int reclen) 563 { 564 struct dasd_eckd_private *private; 565 int sector; 566 int dn, d; 567 568 private = (struct dasd_eckd_private *) device->private; 569 570 DBF_DEV_EVENT(DBF_INFO, device, 571 "Locate: trk %d, rec %d, no_rec %d, cmd %d, reclen %d", 572 trk, rec_on_trk, no_rec, cmd, reclen); 573 574 ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD; 575 ccw->flags = 0; 576 ccw->count = 16; 577 ccw->cda = (__u32) __pa(data); 578 579 memset(data, 0, sizeof(struct LO_eckd_data)); 580 sector = 0; 581 if (rec_on_trk) { 582 switch (private->rdc_data.dev_type) { 583 case 0x3390: 584 dn = ceil_quot(reclen + 6, 232); 585 d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34); 586 sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8; 587 break; 588 case 0x3380: 589 d = 7 + ceil_quot(reclen + 12, 32); 590 sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7; 591 break; 592 } 593 } 594 data->sector = sector; 595 data->count = no_rec; 596 switch (cmd) { 597 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS: 598 data->operation.orientation = 0x3; 599 data->operation.operation = 0x03; 600 break; 601 case DASD_ECKD_CCW_READ_HOME_ADDRESS: 602 data->operation.orientation = 0x3; 603 data->operation.operation = 0x16; 604 break; 605 case DASD_ECKD_CCW_WRITE_RECORD_ZERO: 606 data->operation.orientation = 0x1; 607 data->operation.operation = 0x03; 608 data->count++; 609 break; 610 case DASD_ECKD_CCW_READ_RECORD_ZERO: 611 data->operation.orientation = 0x3; 612 data->operation.operation = 0x16; 613 data->count++; 614 break; 615 case DASD_ECKD_CCW_WRITE: 616 case DASD_ECKD_CCW_WRITE_MT: 617 case DASD_ECKD_CCW_WRITE_KD: 618 case DASD_ECKD_CCW_WRITE_KD_MT: 619 data->auxiliary.last_bytes_used = 0x1; 620 data->length = reclen; 621 data->operation.operation = 0x01; 622 break; 623 case DASD_ECKD_CCW_WRITE_CKD: 624 case DASD_ECKD_CCW_WRITE_CKD_MT: 625 data->auxiliary.last_bytes_used = 0x1; 626 data->length = reclen; 627 data->operation.operation = 0x03; 628 break; 629 case DASD_ECKD_CCW_READ: 630 case DASD_ECKD_CCW_READ_MT: 631 case DASD_ECKD_CCW_READ_KD: 632 case DASD_ECKD_CCW_READ_KD_MT: 633 data->auxiliary.last_bytes_used = 0x1; 634 data->length = reclen; 635 data->operation.operation = 0x06; 636 break; 637 case DASD_ECKD_CCW_READ_CKD: 638 case DASD_ECKD_CCW_READ_CKD_MT: 639 data->auxiliary.last_bytes_used = 0x1; 640 data->length = reclen; 641 data->operation.operation = 0x16; 642 break; 643 case DASD_ECKD_CCW_READ_COUNT: 644 data->operation.operation = 0x06; 645 break; 646 case DASD_ECKD_CCW_ERASE: 647 data->length = reclen; 648 data->auxiliary.last_bytes_used = 0x1; 649 data->operation.operation = 0x0b; 650 break; 651 default: 652 DBF_DEV_EVENT(DBF_ERR, device, "unknown locate record " 653 "opcode 0x%x", cmd); 654 } 655 set_ch_t(&data->seek_addr, 656 trk / private->rdc_data.trk_per_cyl, 657 trk % private->rdc_data.trk_per_cyl); 658 data->search_arg.cyl = data->seek_addr.cyl; 659 data->search_arg.head = data->seek_addr.head; 660 data->search_arg.record = rec_on_trk; 661 } 662 663 /* 664 * Returns 1 if the block is one of the special blocks that needs 665 * to get read/written with the KD variant of the command. 666 * That is DASD_ECKD_READ_KD_MT instead of DASD_ECKD_READ_MT and 667 * DASD_ECKD_WRITE_KD_MT instead of DASD_ECKD_WRITE_MT. 668 * Luckily the KD variants differ only by one bit (0x08) from the 669 * normal variant. So don't wonder about code like: 670 * if (dasd_eckd_cdl_special(blk_per_trk, recid)) 671 * ccw->cmd_code |= 0x8; 672 */ 673 static inline int 674 dasd_eckd_cdl_special(int blk_per_trk, int recid) 675 { 676 if (recid < 3) 677 return 1; 678 if (recid < blk_per_trk) 679 return 0; 680 if (recid < 2 * blk_per_trk) 681 return 1; 682 return 0; 683 } 684 685 /* 686 * Returns the record size for the special blocks of the cdl format. 687 * Only returns something useful if dasd_eckd_cdl_special is true 688 * for the recid. 689 */ 690 static inline int 691 dasd_eckd_cdl_reclen(int recid) 692 { 693 if (recid < 3) 694 return sizes_trk0[recid]; 695 return LABEL_SIZE; 696 } 697 698 /* 699 * Generate device unique id that specifies the physical device. 700 */ 701 static int dasd_eckd_generate_uid(struct dasd_device *device, 702 struct dasd_uid *uid) 703 { 704 struct dasd_eckd_private *private; 705 int count; 706 707 private = (struct dasd_eckd_private *) device->private; 708 if (!private) 709 return -ENODEV; 710 if (!private->ned || !private->gneq) 711 return -ENODEV; 712 713 memset(uid, 0, sizeof(struct dasd_uid)); 714 memcpy(uid->vendor, private->ned->HDA_manufacturer, 715 sizeof(uid->vendor) - 1); 716 EBCASC(uid->vendor, sizeof(uid->vendor) - 1); 717 memcpy(uid->serial, private->ned->HDA_location, 718 sizeof(uid->serial) - 1); 719 EBCASC(uid->serial, sizeof(uid->serial) - 1); 720 uid->ssid = private->gneq->subsystemID; 721 uid->real_unit_addr = private->ned->unit_addr;; 722 if (private->sneq) { 723 uid->type = private->sneq->sua_flags; 724 if (uid->type == UA_BASE_PAV_ALIAS) 725 uid->base_unit_addr = private->sneq->base_unit_addr; 726 } else { 727 uid->type = UA_BASE_DEVICE; 728 } 729 if (private->vdsneq) { 730 for (count = 0; count < 16; count++) { 731 sprintf(uid->vduit+2*count, "%02x", 732 private->vdsneq->uit[count]); 733 } 734 } 735 return 0; 736 } 737 738 static struct dasd_ccw_req *dasd_eckd_build_rcd_lpm(struct dasd_device *device, 739 void *rcd_buffer, 740 struct ciw *ciw, __u8 lpm) 741 { 742 struct dasd_ccw_req *cqr; 743 struct ccw1 *ccw; 744 745 cqr = dasd_smalloc_request("ECKD", 1 /* RCD */, ciw->count, device); 746 747 if (IS_ERR(cqr)) { 748 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 749 "Could not allocate RCD request"); 750 return cqr; 751 } 752 753 ccw = cqr->cpaddr; 754 ccw->cmd_code = ciw->cmd; 755 ccw->cda = (__u32)(addr_t)rcd_buffer; 756 ccw->count = ciw->count; 757 758 cqr->startdev = device; 759 cqr->memdev = device; 760 cqr->block = NULL; 761 cqr->expires = 10*HZ; 762 cqr->lpm = lpm; 763 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 764 cqr->retries = 2; 765 cqr->buildclk = get_clock(); 766 cqr->status = DASD_CQR_FILLED; 767 return cqr; 768 } 769 770 static int dasd_eckd_read_conf_lpm(struct dasd_device *device, 771 void **rcd_buffer, 772 int *rcd_buffer_size, __u8 lpm) 773 { 774 struct ciw *ciw; 775 char *rcd_buf = NULL; 776 int ret; 777 struct dasd_ccw_req *cqr; 778 779 /* 780 * scan for RCD command in extended SenseID data 781 */ 782 ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD); 783 if (!ciw || ciw->cmd == 0) { 784 ret = -EOPNOTSUPP; 785 goto out_error; 786 } 787 rcd_buf = kzalloc(ciw->count, GFP_KERNEL | GFP_DMA); 788 if (!rcd_buf) { 789 ret = -ENOMEM; 790 goto out_error; 791 } 792 793 /* 794 * buffer has to start with EBCDIC "V1.0" to show 795 * support for virtual device SNEQ 796 */ 797 rcd_buf[0] = 0xE5; 798 rcd_buf[1] = 0xF1; 799 rcd_buf[2] = 0x4B; 800 rcd_buf[3] = 0xF0; 801 cqr = dasd_eckd_build_rcd_lpm(device, rcd_buf, ciw, lpm); 802 if (IS_ERR(cqr)) { 803 ret = PTR_ERR(cqr); 804 goto out_error; 805 } 806 ret = dasd_sleep_on(cqr); 807 /* 808 * on success we update the user input parms 809 */ 810 dasd_sfree_request(cqr, cqr->memdev); 811 if (ret) 812 goto out_error; 813 814 *rcd_buffer_size = ciw->count; 815 *rcd_buffer = rcd_buf; 816 return 0; 817 out_error: 818 kfree(rcd_buf); 819 *rcd_buffer = NULL; 820 *rcd_buffer_size = 0; 821 return ret; 822 } 823 824 static int dasd_eckd_identify_conf_parts(struct dasd_eckd_private *private) 825 { 826 827 struct dasd_sneq *sneq; 828 int i, count; 829 830 private->ned = NULL; 831 private->sneq = NULL; 832 private->vdsneq = NULL; 833 private->gneq = NULL; 834 count = private->conf_len / sizeof(struct dasd_sneq); 835 sneq = (struct dasd_sneq *)private->conf_data; 836 for (i = 0; i < count; ++i) { 837 if (sneq->flags.identifier == 1 && sneq->format == 1) 838 private->sneq = sneq; 839 else if (sneq->flags.identifier == 1 && sneq->format == 4) 840 private->vdsneq = (struct vd_sneq *)sneq; 841 else if (sneq->flags.identifier == 2) 842 private->gneq = (struct dasd_gneq *)sneq; 843 else if (sneq->flags.identifier == 3 && sneq->res1 == 1) 844 private->ned = (struct dasd_ned *)sneq; 845 sneq++; 846 } 847 if (!private->ned || !private->gneq) { 848 private->ned = NULL; 849 private->sneq = NULL; 850 private->vdsneq = NULL; 851 private->gneq = NULL; 852 return -EINVAL; 853 } 854 return 0; 855 856 }; 857 858 static unsigned char dasd_eckd_path_access(void *conf_data, int conf_len) 859 { 860 struct dasd_gneq *gneq; 861 int i, count, found; 862 863 count = conf_len / sizeof(*gneq); 864 gneq = (struct dasd_gneq *)conf_data; 865 found = 0; 866 for (i = 0; i < count; ++i) { 867 if (gneq->flags.identifier == 2) { 868 found = 1; 869 break; 870 } 871 gneq++; 872 } 873 if (found) 874 return ((char *)gneq)[18] & 0x07; 875 else 876 return 0; 877 } 878 879 static int dasd_eckd_read_conf(struct dasd_device *device) 880 { 881 void *conf_data; 882 int conf_len, conf_data_saved; 883 int rc; 884 __u8 lpm; 885 struct dasd_eckd_private *private; 886 struct dasd_eckd_path *path_data; 887 888 private = (struct dasd_eckd_private *) device->private; 889 path_data = (struct dasd_eckd_path *) &private->path_data; 890 path_data->opm = ccw_device_get_path_mask(device->cdev); 891 lpm = 0x80; 892 conf_data_saved = 0; 893 /* get configuration data per operational path */ 894 for (lpm = 0x80; lpm; lpm>>= 1) { 895 if (lpm & path_data->opm){ 896 rc = dasd_eckd_read_conf_lpm(device, &conf_data, 897 &conf_len, lpm); 898 if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */ 899 DBF_EVENT(DBF_WARNING, 900 "Read configuration data returned " 901 "error %d for device: %s", rc, 902 dev_name(&device->cdev->dev)); 903 return rc; 904 } 905 if (conf_data == NULL) { 906 DBF_EVENT(DBF_WARNING, "No configuration " 907 "data retrieved for device: %s", 908 dev_name(&device->cdev->dev)); 909 continue; /* no error */ 910 } 911 /* save first valid configuration data */ 912 if (!conf_data_saved) { 913 kfree(private->conf_data); 914 private->conf_data = conf_data; 915 private->conf_len = conf_len; 916 if (dasd_eckd_identify_conf_parts(private)) { 917 private->conf_data = NULL; 918 private->conf_len = 0; 919 kfree(conf_data); 920 continue; 921 } 922 conf_data_saved++; 923 } 924 switch (dasd_eckd_path_access(conf_data, conf_len)) { 925 case 0x02: 926 path_data->npm |= lpm; 927 break; 928 case 0x03: 929 path_data->ppm |= lpm; 930 break; 931 } 932 if (conf_data != private->conf_data) 933 kfree(conf_data); 934 } 935 } 936 return 0; 937 } 938 939 static int dasd_eckd_read_features(struct dasd_device *device) 940 { 941 struct dasd_psf_prssd_data *prssdp; 942 struct dasd_rssd_features *features; 943 struct dasd_ccw_req *cqr; 944 struct ccw1 *ccw; 945 int rc; 946 struct dasd_eckd_private *private; 947 948 private = (struct dasd_eckd_private *) device->private; 949 cqr = dasd_smalloc_request(dasd_eckd_discipline.name, 950 1 /* PSF */ + 1 /* RSSD */ , 951 (sizeof(struct dasd_psf_prssd_data) + 952 sizeof(struct dasd_rssd_features)), 953 device); 954 if (IS_ERR(cqr)) { 955 DBF_EVENT(DBF_WARNING, "Could not allocate initialization " 956 "request for device: %s", 957 dev_name(&device->cdev->dev)); 958 return PTR_ERR(cqr); 959 } 960 cqr->startdev = device; 961 cqr->memdev = device; 962 cqr->block = NULL; 963 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 964 cqr->retries = 5; 965 cqr->expires = 10 * HZ; 966 967 /* Prepare for Read Subsystem Data */ 968 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 969 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data)); 970 prssdp->order = PSF_ORDER_PRSSD; 971 prssdp->suborder = 0x41; /* Read Feature Codes */ 972 /* all other bytes of prssdp must be zero */ 973 974 ccw = cqr->cpaddr; 975 ccw->cmd_code = DASD_ECKD_CCW_PSF; 976 ccw->count = sizeof(struct dasd_psf_prssd_data); 977 ccw->flags |= CCW_FLAG_CC; 978 ccw->cda = (__u32)(addr_t) prssdp; 979 980 /* Read Subsystem Data - feature codes */ 981 features = (struct dasd_rssd_features *) (prssdp + 1); 982 memset(features, 0, sizeof(struct dasd_rssd_features)); 983 984 ccw++; 985 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 986 ccw->count = sizeof(struct dasd_rssd_features); 987 ccw->cda = (__u32)(addr_t) features; 988 989 cqr->buildclk = get_clock(); 990 cqr->status = DASD_CQR_FILLED; 991 rc = dasd_sleep_on(cqr); 992 if (rc == 0) { 993 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 994 features = (struct dasd_rssd_features *) (prssdp + 1); 995 memcpy(&private->features, features, 996 sizeof(struct dasd_rssd_features)); 997 } 998 dasd_sfree_request(cqr, cqr->memdev); 999 return rc; 1000 } 1001 1002 1003 /* 1004 * Build CP for Perform Subsystem Function - SSC. 1005 */ 1006 static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device, 1007 int enable_pav) 1008 { 1009 struct dasd_ccw_req *cqr; 1010 struct dasd_psf_ssc_data *psf_ssc_data; 1011 struct ccw1 *ccw; 1012 1013 cqr = dasd_smalloc_request("ECKD", 1 /* PSF */ , 1014 sizeof(struct dasd_psf_ssc_data), 1015 device); 1016 1017 if (IS_ERR(cqr)) { 1018 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1019 "Could not allocate PSF-SSC request"); 1020 return cqr; 1021 } 1022 psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data; 1023 psf_ssc_data->order = PSF_ORDER_SSC; 1024 psf_ssc_data->suborder = 0x40; 1025 if (enable_pav) { 1026 psf_ssc_data->suborder |= 0x88; 1027 psf_ssc_data->reserved[0] = 0x88; 1028 } 1029 ccw = cqr->cpaddr; 1030 ccw->cmd_code = DASD_ECKD_CCW_PSF; 1031 ccw->cda = (__u32)(addr_t)psf_ssc_data; 1032 ccw->count = 66; 1033 1034 cqr->startdev = device; 1035 cqr->memdev = device; 1036 cqr->block = NULL; 1037 cqr->expires = 10*HZ; 1038 cqr->buildclk = get_clock(); 1039 cqr->status = DASD_CQR_FILLED; 1040 return cqr; 1041 } 1042 1043 /* 1044 * Perform Subsystem Function. 1045 * It is necessary to trigger CIO for channel revalidation since this 1046 * call might change behaviour of DASD devices. 1047 */ 1048 static int 1049 dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav) 1050 { 1051 struct dasd_ccw_req *cqr; 1052 int rc; 1053 1054 cqr = dasd_eckd_build_psf_ssc(device, enable_pav); 1055 if (IS_ERR(cqr)) 1056 return PTR_ERR(cqr); 1057 1058 rc = dasd_sleep_on(cqr); 1059 if (!rc) 1060 /* trigger CIO to reprobe devices */ 1061 css_schedule_reprobe(); 1062 dasd_sfree_request(cqr, cqr->memdev); 1063 return rc; 1064 } 1065 1066 /* 1067 * Valide storage server of current device. 1068 */ 1069 static int dasd_eckd_validate_server(struct dasd_device *device) 1070 { 1071 int rc; 1072 struct dasd_eckd_private *private; 1073 int enable_pav; 1074 1075 if (dasd_nopav || MACHINE_IS_VM) 1076 enable_pav = 0; 1077 else 1078 enable_pav = 1; 1079 rc = dasd_eckd_psf_ssc(device, enable_pav); 1080 /* may be requested feature is not available on server, 1081 * therefore just report error and go ahead */ 1082 private = (struct dasd_eckd_private *) device->private; 1083 DBF_EVENT(DBF_WARNING, "PSF-SSC on storage subsystem %s.%s.%04x " 1084 "returned rc=%d for device: %s", 1085 private->uid.vendor, private->uid.serial, 1086 private->uid.ssid, rc, dev_name(&device->cdev->dev)); 1087 /* RE-Read Configuration Data */ 1088 return dasd_eckd_read_conf(device); 1089 } 1090 1091 /* 1092 * Check device characteristics. 1093 * If the device is accessible using ECKD discipline, the device is enabled. 1094 */ 1095 static int 1096 dasd_eckd_check_characteristics(struct dasd_device *device) 1097 { 1098 struct dasd_eckd_private *private; 1099 struct dasd_block *block; 1100 void *rdc_data; 1101 int is_known, rc; 1102 1103 private = (struct dasd_eckd_private *) device->private; 1104 if (private == NULL) { 1105 private = kzalloc(sizeof(struct dasd_eckd_private), 1106 GFP_KERNEL | GFP_DMA); 1107 if (private == NULL) { 1108 dev_warn(&device->cdev->dev, 1109 "Allocating memory for private DASD data " 1110 "failed\n"); 1111 return -ENOMEM; 1112 } 1113 device->private = (void *) private; 1114 } 1115 /* Invalidate status of initial analysis. */ 1116 private->init_cqr_status = -1; 1117 /* Set default cache operations. */ 1118 private->attrib.operation = DASD_NORMAL_CACHE; 1119 private->attrib.nr_cyl = 0; 1120 1121 /* Read Configuration Data */ 1122 rc = dasd_eckd_read_conf(device); 1123 if (rc) 1124 goto out_err1; 1125 1126 /* Generate device unique id and register in devmap */ 1127 rc = dasd_eckd_generate_uid(device, &private->uid); 1128 if (rc) 1129 goto out_err1; 1130 dasd_set_uid(device->cdev, &private->uid); 1131 1132 if (private->uid.type == UA_BASE_DEVICE) { 1133 block = dasd_alloc_block(); 1134 if (IS_ERR(block)) { 1135 DBF_EVENT(DBF_WARNING, "could not allocate dasd " 1136 "block structure for device: %s", 1137 dev_name(&device->cdev->dev)); 1138 rc = PTR_ERR(block); 1139 goto out_err1; 1140 } 1141 device->block = block; 1142 block->base = device; 1143 } 1144 1145 /* register lcu with alias handling, enable PAV if this is a new lcu */ 1146 is_known = dasd_alias_make_device_known_to_lcu(device); 1147 if (is_known < 0) { 1148 rc = is_known; 1149 goto out_err2; 1150 } 1151 if (!is_known) { 1152 /* new lcu found */ 1153 rc = dasd_eckd_validate_server(device); /* will switch pav on */ 1154 if (rc) 1155 goto out_err3; 1156 } 1157 1158 /* Read Feature Codes */ 1159 rc = dasd_eckd_read_features(device); 1160 if (rc) 1161 goto out_err3; 1162 1163 /* Read Device Characteristics */ 1164 rdc_data = (void *) &(private->rdc_data); 1165 memset(rdc_data, 0, sizeof(rdc_data)); 1166 rc = dasd_generic_read_dev_chars(device, "ECKD", &rdc_data, 64); 1167 if (rc) { 1168 DBF_EVENT(DBF_WARNING, 1169 "Read device characteristics failed, rc=%d for " 1170 "device: %s", rc, dev_name(&device->cdev->dev)); 1171 goto out_err3; 1172 } 1173 /* find the vaild cylinder size */ 1174 if (private->rdc_data.no_cyl == LV_COMPAT_CYL && 1175 private->rdc_data.long_no_cyl) 1176 private->real_cyl = private->rdc_data.long_no_cyl; 1177 else 1178 private->real_cyl = private->rdc_data.no_cyl; 1179 1180 dev_info(&device->cdev->dev, "New DASD %04X/%02X (CU %04X/%02X) " 1181 "with %d cylinders, %d heads, %d sectors\n", 1182 private->rdc_data.dev_type, 1183 private->rdc_data.dev_model, 1184 private->rdc_data.cu_type, 1185 private->rdc_data.cu_model.model, 1186 private->real_cyl, 1187 private->rdc_data.trk_per_cyl, 1188 private->rdc_data.sec_per_trk); 1189 return 0; 1190 1191 out_err3: 1192 dasd_alias_disconnect_device_from_lcu(device); 1193 out_err2: 1194 dasd_free_block(device->block); 1195 device->block = NULL; 1196 out_err1: 1197 kfree(private->conf_data); 1198 kfree(device->private); 1199 device->private = NULL; 1200 return rc; 1201 } 1202 1203 static void dasd_eckd_uncheck_device(struct dasd_device *device) 1204 { 1205 struct dasd_eckd_private *private; 1206 1207 private = (struct dasd_eckd_private *) device->private; 1208 dasd_alias_disconnect_device_from_lcu(device); 1209 private->ned = NULL; 1210 private->sneq = NULL; 1211 private->vdsneq = NULL; 1212 private->gneq = NULL; 1213 private->conf_len = 0; 1214 kfree(private->conf_data); 1215 private->conf_data = NULL; 1216 } 1217 1218 static struct dasd_ccw_req * 1219 dasd_eckd_analysis_ccw(struct dasd_device *device) 1220 { 1221 struct dasd_eckd_private *private; 1222 struct eckd_count *count_data; 1223 struct LO_eckd_data *LO_data; 1224 struct dasd_ccw_req *cqr; 1225 struct ccw1 *ccw; 1226 int cplength, datasize; 1227 int i; 1228 1229 private = (struct dasd_eckd_private *) device->private; 1230 1231 cplength = 8; 1232 datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data); 1233 cqr = dasd_smalloc_request(dasd_eckd_discipline.name, 1234 cplength, datasize, device); 1235 if (IS_ERR(cqr)) 1236 return cqr; 1237 ccw = cqr->cpaddr; 1238 /* Define extent for the first 3 tracks. */ 1239 define_extent(ccw++, cqr->data, 0, 2, 1240 DASD_ECKD_CCW_READ_COUNT, device); 1241 LO_data = cqr->data + sizeof(struct DE_eckd_data); 1242 /* Locate record for the first 4 records on track 0. */ 1243 ccw[-1].flags |= CCW_FLAG_CC; 1244 locate_record(ccw++, LO_data++, 0, 0, 4, 1245 DASD_ECKD_CCW_READ_COUNT, device, 0); 1246 1247 count_data = private->count_area; 1248 for (i = 0; i < 4; i++) { 1249 ccw[-1].flags |= CCW_FLAG_CC; 1250 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT; 1251 ccw->flags = 0; 1252 ccw->count = 8; 1253 ccw->cda = (__u32)(addr_t) count_data; 1254 ccw++; 1255 count_data++; 1256 } 1257 1258 /* Locate record for the first record on track 2. */ 1259 ccw[-1].flags |= CCW_FLAG_CC; 1260 locate_record(ccw++, LO_data++, 2, 0, 1, 1261 DASD_ECKD_CCW_READ_COUNT, device, 0); 1262 /* Read count ccw. */ 1263 ccw[-1].flags |= CCW_FLAG_CC; 1264 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT; 1265 ccw->flags = 0; 1266 ccw->count = 8; 1267 ccw->cda = (__u32)(addr_t) count_data; 1268 1269 cqr->block = NULL; 1270 cqr->startdev = device; 1271 cqr->memdev = device; 1272 cqr->retries = 0; 1273 cqr->buildclk = get_clock(); 1274 cqr->status = DASD_CQR_FILLED; 1275 return cqr; 1276 } 1277 1278 /* 1279 * This is the callback function for the init_analysis cqr. It saves 1280 * the status of the initial analysis ccw before it frees it and kicks 1281 * the device to continue the startup sequence. This will call 1282 * dasd_eckd_do_analysis again (if the devices has not been marked 1283 * for deletion in the meantime). 1284 */ 1285 static void 1286 dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr, void *data) 1287 { 1288 struct dasd_eckd_private *private; 1289 struct dasd_device *device; 1290 1291 device = init_cqr->startdev; 1292 private = (struct dasd_eckd_private *) device->private; 1293 private->init_cqr_status = init_cqr->status; 1294 dasd_sfree_request(init_cqr, device); 1295 dasd_kick_device(device); 1296 } 1297 1298 static int 1299 dasd_eckd_start_analysis(struct dasd_block *block) 1300 { 1301 struct dasd_eckd_private *private; 1302 struct dasd_ccw_req *init_cqr; 1303 1304 private = (struct dasd_eckd_private *) block->base->private; 1305 init_cqr = dasd_eckd_analysis_ccw(block->base); 1306 if (IS_ERR(init_cqr)) 1307 return PTR_ERR(init_cqr); 1308 init_cqr->callback = dasd_eckd_analysis_callback; 1309 init_cqr->callback_data = NULL; 1310 init_cqr->expires = 5*HZ; 1311 dasd_add_request_head(init_cqr); 1312 return -EAGAIN; 1313 } 1314 1315 static int 1316 dasd_eckd_end_analysis(struct dasd_block *block) 1317 { 1318 struct dasd_device *device; 1319 struct dasd_eckd_private *private; 1320 struct eckd_count *count_area; 1321 unsigned int sb, blk_per_trk; 1322 int status, i; 1323 1324 device = block->base; 1325 private = (struct dasd_eckd_private *) device->private; 1326 status = private->init_cqr_status; 1327 private->init_cqr_status = -1; 1328 if (status != DASD_CQR_DONE) { 1329 dev_warn(&device->cdev->dev, 1330 "The DASD is not formatted\n"); 1331 return -EMEDIUMTYPE; 1332 } 1333 1334 private->uses_cdl = 1; 1335 /* Check Track 0 for Compatible Disk Layout */ 1336 count_area = NULL; 1337 for (i = 0; i < 3; i++) { 1338 if (private->count_area[i].kl != 4 || 1339 private->count_area[i].dl != dasd_eckd_cdl_reclen(i) - 4) { 1340 private->uses_cdl = 0; 1341 break; 1342 } 1343 } 1344 if (i == 3) 1345 count_area = &private->count_area[4]; 1346 1347 if (private->uses_cdl == 0) { 1348 for (i = 0; i < 5; i++) { 1349 if ((private->count_area[i].kl != 0) || 1350 (private->count_area[i].dl != 1351 private->count_area[0].dl)) 1352 break; 1353 } 1354 if (i == 5) 1355 count_area = &private->count_area[0]; 1356 } else { 1357 if (private->count_area[3].record == 1) 1358 dev_warn(&device->cdev->dev, 1359 "Track 0 has no records following the VTOC\n"); 1360 } 1361 if (count_area != NULL && count_area->kl == 0) { 1362 /* we found notthing violating our disk layout */ 1363 if (dasd_check_blocksize(count_area->dl) == 0) 1364 block->bp_block = count_area->dl; 1365 } 1366 if (block->bp_block == 0) { 1367 dev_warn(&device->cdev->dev, 1368 "The disk layout of the DASD is not supported\n"); 1369 return -EMEDIUMTYPE; 1370 } 1371 block->s2b_shift = 0; /* bits to shift 512 to get a block */ 1372 for (sb = 512; sb < block->bp_block; sb = sb << 1) 1373 block->s2b_shift++; 1374 1375 blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block); 1376 block->blocks = (private->real_cyl * 1377 private->rdc_data.trk_per_cyl * 1378 blk_per_trk); 1379 1380 dev_info(&device->cdev->dev, 1381 "DASD with %d KB/block, %d KB total size, %d KB/track, " 1382 "%s\n", (block->bp_block >> 10), 1383 ((private->real_cyl * 1384 private->rdc_data.trk_per_cyl * 1385 blk_per_trk * (block->bp_block >> 9)) >> 1), 1386 ((blk_per_trk * block->bp_block) >> 10), 1387 private->uses_cdl ? 1388 "compatible disk layout" : "linux disk layout"); 1389 1390 return 0; 1391 } 1392 1393 static int dasd_eckd_do_analysis(struct dasd_block *block) 1394 { 1395 struct dasd_eckd_private *private; 1396 1397 private = (struct dasd_eckd_private *) block->base->private; 1398 if (private->init_cqr_status < 0) 1399 return dasd_eckd_start_analysis(block); 1400 else 1401 return dasd_eckd_end_analysis(block); 1402 } 1403 1404 static int dasd_eckd_ready_to_online(struct dasd_device *device) 1405 { 1406 return dasd_alias_add_device(device); 1407 }; 1408 1409 static int dasd_eckd_online_to_ready(struct dasd_device *device) 1410 { 1411 return dasd_alias_remove_device(device); 1412 }; 1413 1414 static int 1415 dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo) 1416 { 1417 struct dasd_eckd_private *private; 1418 1419 private = (struct dasd_eckd_private *) block->base->private; 1420 if (dasd_check_blocksize(block->bp_block) == 0) { 1421 geo->sectors = recs_per_track(&private->rdc_data, 1422 0, block->bp_block); 1423 } 1424 geo->cylinders = private->rdc_data.no_cyl; 1425 geo->heads = private->rdc_data.trk_per_cyl; 1426 return 0; 1427 } 1428 1429 static struct dasd_ccw_req * 1430 dasd_eckd_format_device(struct dasd_device * device, 1431 struct format_data_t * fdata) 1432 { 1433 struct dasd_eckd_private *private; 1434 struct dasd_ccw_req *fcp; 1435 struct eckd_count *ect; 1436 struct ccw1 *ccw; 1437 void *data; 1438 int rpt; 1439 struct ch_t address; 1440 int cplength, datasize; 1441 int i; 1442 int intensity = 0; 1443 int r0_perm; 1444 1445 private = (struct dasd_eckd_private *) device->private; 1446 rpt = recs_per_track(&private->rdc_data, 0, fdata->blksize); 1447 set_ch_t(&address, 1448 fdata->start_unit / private->rdc_data.trk_per_cyl, 1449 fdata->start_unit % private->rdc_data.trk_per_cyl); 1450 1451 /* Sanity checks. */ 1452 if (fdata->start_unit >= 1453 (private->real_cyl * private->rdc_data.trk_per_cyl)) { 1454 dev_warn(&device->cdev->dev, "Start track number %d used in " 1455 "formatting is too big\n", fdata->start_unit); 1456 return ERR_PTR(-EINVAL); 1457 } 1458 if (fdata->start_unit > fdata->stop_unit) { 1459 dev_warn(&device->cdev->dev, "Start track %d used in " 1460 "formatting exceeds end track\n", fdata->start_unit); 1461 return ERR_PTR(-EINVAL); 1462 } 1463 if (dasd_check_blocksize(fdata->blksize) != 0) { 1464 dev_warn(&device->cdev->dev, 1465 "The DASD cannot be formatted with block size %d\n", 1466 fdata->blksize); 1467 return ERR_PTR(-EINVAL); 1468 } 1469 1470 /* 1471 * fdata->intensity is a bit string that tells us what to do: 1472 * Bit 0: write record zero 1473 * Bit 1: write home address, currently not supported 1474 * Bit 2: invalidate tracks 1475 * Bit 3: use OS/390 compatible disk layout (cdl) 1476 * Bit 4: do not allow storage subsystem to modify record zero 1477 * Only some bit combinations do make sense. 1478 */ 1479 if (fdata->intensity & 0x10) { 1480 r0_perm = 0; 1481 intensity = fdata->intensity & ~0x10; 1482 } else { 1483 r0_perm = 1; 1484 intensity = fdata->intensity; 1485 } 1486 switch (intensity) { 1487 case 0x00: /* Normal format */ 1488 case 0x08: /* Normal format, use cdl. */ 1489 cplength = 2 + rpt; 1490 datasize = sizeof(struct DE_eckd_data) + 1491 sizeof(struct LO_eckd_data) + 1492 rpt * sizeof(struct eckd_count); 1493 break; 1494 case 0x01: /* Write record zero and format track. */ 1495 case 0x09: /* Write record zero and format track, use cdl. */ 1496 cplength = 3 + rpt; 1497 datasize = sizeof(struct DE_eckd_data) + 1498 sizeof(struct LO_eckd_data) + 1499 sizeof(struct eckd_count) + 1500 rpt * sizeof(struct eckd_count); 1501 break; 1502 case 0x04: /* Invalidate track. */ 1503 case 0x0c: /* Invalidate track, use cdl. */ 1504 cplength = 3; 1505 datasize = sizeof(struct DE_eckd_data) + 1506 sizeof(struct LO_eckd_data) + 1507 sizeof(struct eckd_count); 1508 break; 1509 default: 1510 dev_warn(&device->cdev->dev, "An I/O control call used " 1511 "incorrect flags 0x%x\n", fdata->intensity); 1512 return ERR_PTR(-EINVAL); 1513 } 1514 /* Allocate the format ccw request. */ 1515 fcp = dasd_smalloc_request(dasd_eckd_discipline.name, 1516 cplength, datasize, device); 1517 if (IS_ERR(fcp)) 1518 return fcp; 1519 1520 data = fcp->data; 1521 ccw = fcp->cpaddr; 1522 1523 switch (intensity & ~0x08) { 1524 case 0x00: /* Normal format. */ 1525 define_extent(ccw++, (struct DE_eckd_data *) data, 1526 fdata->start_unit, fdata->start_unit, 1527 DASD_ECKD_CCW_WRITE_CKD, device); 1528 /* grant subsystem permission to format R0 */ 1529 if (r0_perm) 1530 ((struct DE_eckd_data *)data)->ga_extended |= 0x04; 1531 data += sizeof(struct DE_eckd_data); 1532 ccw[-1].flags |= CCW_FLAG_CC; 1533 locate_record(ccw++, (struct LO_eckd_data *) data, 1534 fdata->start_unit, 0, rpt, 1535 DASD_ECKD_CCW_WRITE_CKD, device, 1536 fdata->blksize); 1537 data += sizeof(struct LO_eckd_data); 1538 break; 1539 case 0x01: /* Write record zero + format track. */ 1540 define_extent(ccw++, (struct DE_eckd_data *) data, 1541 fdata->start_unit, fdata->start_unit, 1542 DASD_ECKD_CCW_WRITE_RECORD_ZERO, 1543 device); 1544 data += sizeof(struct DE_eckd_data); 1545 ccw[-1].flags |= CCW_FLAG_CC; 1546 locate_record(ccw++, (struct LO_eckd_data *) data, 1547 fdata->start_unit, 0, rpt + 1, 1548 DASD_ECKD_CCW_WRITE_RECORD_ZERO, device, 1549 device->block->bp_block); 1550 data += sizeof(struct LO_eckd_data); 1551 break; 1552 case 0x04: /* Invalidate track. */ 1553 define_extent(ccw++, (struct DE_eckd_data *) data, 1554 fdata->start_unit, fdata->start_unit, 1555 DASD_ECKD_CCW_WRITE_CKD, device); 1556 data += sizeof(struct DE_eckd_data); 1557 ccw[-1].flags |= CCW_FLAG_CC; 1558 locate_record(ccw++, (struct LO_eckd_data *) data, 1559 fdata->start_unit, 0, 1, 1560 DASD_ECKD_CCW_WRITE_CKD, device, 8); 1561 data += sizeof(struct LO_eckd_data); 1562 break; 1563 } 1564 if (intensity & 0x01) { /* write record zero */ 1565 ect = (struct eckd_count *) data; 1566 data += sizeof(struct eckd_count); 1567 ect->cyl = address.cyl; 1568 ect->head = address.head; 1569 ect->record = 0; 1570 ect->kl = 0; 1571 ect->dl = 8; 1572 ccw[-1].flags |= CCW_FLAG_CC; 1573 ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO; 1574 ccw->flags = CCW_FLAG_SLI; 1575 ccw->count = 8; 1576 ccw->cda = (__u32)(addr_t) ect; 1577 ccw++; 1578 } 1579 if ((intensity & ~0x08) & 0x04) { /* erase track */ 1580 ect = (struct eckd_count *) data; 1581 data += sizeof(struct eckd_count); 1582 ect->cyl = address.cyl; 1583 ect->head = address.head; 1584 ect->record = 1; 1585 ect->kl = 0; 1586 ect->dl = 0; 1587 ccw[-1].flags |= CCW_FLAG_CC; 1588 ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD; 1589 ccw->flags = CCW_FLAG_SLI; 1590 ccw->count = 8; 1591 ccw->cda = (__u32)(addr_t) ect; 1592 } else { /* write remaining records */ 1593 for (i = 0; i < rpt; i++) { 1594 ect = (struct eckd_count *) data; 1595 data += sizeof(struct eckd_count); 1596 ect->cyl = address.cyl; 1597 ect->head = address.head; 1598 ect->record = i + 1; 1599 ect->kl = 0; 1600 ect->dl = fdata->blksize; 1601 /* Check for special tracks 0-1 when formatting CDL */ 1602 if ((intensity & 0x08) && 1603 fdata->start_unit == 0) { 1604 if (i < 3) { 1605 ect->kl = 4; 1606 ect->dl = sizes_trk0[i] - 4; 1607 } 1608 } 1609 if ((intensity & 0x08) && 1610 fdata->start_unit == 1) { 1611 ect->kl = 44; 1612 ect->dl = LABEL_SIZE - 44; 1613 } 1614 ccw[-1].flags |= CCW_FLAG_CC; 1615 ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD; 1616 ccw->flags = CCW_FLAG_SLI; 1617 ccw->count = 8; 1618 ccw->cda = (__u32)(addr_t) ect; 1619 ccw++; 1620 } 1621 } 1622 fcp->startdev = device; 1623 fcp->memdev = device; 1624 clear_bit(DASD_CQR_FLAGS_USE_ERP, &fcp->flags); 1625 fcp->retries = 5; /* set retry counter to enable default ERP */ 1626 fcp->buildclk = get_clock(); 1627 fcp->status = DASD_CQR_FILLED; 1628 return fcp; 1629 } 1630 1631 static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr) 1632 { 1633 cqr->status = DASD_CQR_FILLED; 1634 if (cqr->block && (cqr->startdev != cqr->block->base)) { 1635 dasd_eckd_reset_ccw_to_base_io(cqr); 1636 cqr->startdev = cqr->block->base; 1637 } 1638 }; 1639 1640 static dasd_erp_fn_t 1641 dasd_eckd_erp_action(struct dasd_ccw_req * cqr) 1642 { 1643 struct dasd_device *device = (struct dasd_device *) cqr->startdev; 1644 struct ccw_device *cdev = device->cdev; 1645 1646 switch (cdev->id.cu_type) { 1647 case 0x3990: 1648 case 0x2105: 1649 case 0x2107: 1650 case 0x1750: 1651 return dasd_3990_erp_action; 1652 case 0x9343: 1653 case 0x3880: 1654 default: 1655 return dasd_default_erp_action; 1656 } 1657 } 1658 1659 static dasd_erp_fn_t 1660 dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr) 1661 { 1662 return dasd_default_erp_postaction; 1663 } 1664 1665 1666 static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device, 1667 struct irb *irb) 1668 { 1669 char mask; 1670 char *sense = NULL; 1671 1672 /* first of all check for state change pending interrupt */ 1673 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; 1674 if ((scsw_dstat(&irb->scsw) & mask) == mask) { 1675 dasd_generic_handle_state_change(device); 1676 return; 1677 } 1678 1679 /* summary unit check */ 1680 if ((scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) && 1681 (irb->ecw[7] == 0x0D)) { 1682 dasd_alias_handle_summary_unit_check(device, irb); 1683 return; 1684 } 1685 1686 sense = dasd_get_sense(irb); 1687 /* service information message SIM */ 1688 if (sense && !(sense[27] & DASD_SENSE_BIT_0) && 1689 ((sense[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) { 1690 dasd_3990_erp_handle_sim(device, sense); 1691 dasd_schedule_device_bh(device); 1692 return; 1693 } 1694 1695 if ((scsw_cc(&irb->scsw) == 1) && 1696 (scsw_fctl(&irb->scsw) & SCSW_FCTL_START_FUNC) && 1697 (scsw_actl(&irb->scsw) & SCSW_ACTL_START_PEND) && 1698 (scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND)) { 1699 /* fake irb do nothing, they are handled elsewhere */ 1700 dasd_schedule_device_bh(device); 1701 return; 1702 } 1703 1704 if (!sense) { 1705 /* just report other unsolicited interrupts */ 1706 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1707 "unsolicited interrupt received"); 1708 } else { 1709 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1710 "unsolicited interrupt received " 1711 "(sense available)"); 1712 device->discipline->dump_sense_dbf(device, NULL, irb, 1713 "unsolicited"); 1714 } 1715 1716 dasd_schedule_device_bh(device); 1717 return; 1718 }; 1719 1720 1721 static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single( 1722 struct dasd_device *startdev, 1723 struct dasd_block *block, 1724 struct request *req, 1725 sector_t first_rec, 1726 sector_t last_rec, 1727 sector_t first_trk, 1728 sector_t last_trk, 1729 unsigned int first_offs, 1730 unsigned int last_offs, 1731 unsigned int blk_per_trk, 1732 unsigned int blksize) 1733 { 1734 struct dasd_eckd_private *private; 1735 unsigned long *idaws; 1736 struct LO_eckd_data *LO_data; 1737 struct dasd_ccw_req *cqr; 1738 struct ccw1 *ccw; 1739 struct req_iterator iter; 1740 struct bio_vec *bv; 1741 char *dst; 1742 unsigned int off; 1743 int count, cidaw, cplength, datasize; 1744 sector_t recid; 1745 unsigned char cmd, rcmd; 1746 int use_prefix; 1747 struct dasd_device *basedev; 1748 1749 basedev = block->base; 1750 private = (struct dasd_eckd_private *) basedev->private; 1751 if (rq_data_dir(req) == READ) 1752 cmd = DASD_ECKD_CCW_READ_MT; 1753 else if (rq_data_dir(req) == WRITE) 1754 cmd = DASD_ECKD_CCW_WRITE_MT; 1755 else 1756 return ERR_PTR(-EINVAL); 1757 1758 /* Check struct bio and count the number of blocks for the request. */ 1759 count = 0; 1760 cidaw = 0; 1761 rq_for_each_segment(bv, req, iter) { 1762 if (bv->bv_len & (blksize - 1)) 1763 /* Eckd can only do full blocks. */ 1764 return ERR_PTR(-EINVAL); 1765 count += bv->bv_len >> (block->s2b_shift + 9); 1766 #if defined(CONFIG_64BIT) 1767 if (idal_is_needed (page_address(bv->bv_page), bv->bv_len)) 1768 cidaw += bv->bv_len >> (block->s2b_shift + 9); 1769 #endif 1770 } 1771 /* Paranoia. */ 1772 if (count != last_rec - first_rec + 1) 1773 return ERR_PTR(-EINVAL); 1774 1775 /* use the prefix command if available */ 1776 use_prefix = private->features.feature[8] & 0x01; 1777 if (use_prefix) { 1778 /* 1x prefix + number of blocks */ 1779 cplength = 2 + count; 1780 /* 1x prefix + cidaws*sizeof(long) */ 1781 datasize = sizeof(struct PFX_eckd_data) + 1782 sizeof(struct LO_eckd_data) + 1783 cidaw * sizeof(unsigned long); 1784 } else { 1785 /* 1x define extent + 1x locate record + number of blocks */ 1786 cplength = 2 + count; 1787 /* 1x define extent + 1x locate record + cidaws*sizeof(long) */ 1788 datasize = sizeof(struct DE_eckd_data) + 1789 sizeof(struct LO_eckd_data) + 1790 cidaw * sizeof(unsigned long); 1791 } 1792 /* Find out the number of additional locate record ccws for cdl. */ 1793 if (private->uses_cdl && first_rec < 2*blk_per_trk) { 1794 if (last_rec >= 2*blk_per_trk) 1795 count = 2*blk_per_trk - first_rec; 1796 cplength += count; 1797 datasize += count*sizeof(struct LO_eckd_data); 1798 } 1799 /* Allocate the ccw request. */ 1800 cqr = dasd_smalloc_request(dasd_eckd_discipline.name, 1801 cplength, datasize, startdev); 1802 if (IS_ERR(cqr)) 1803 return cqr; 1804 ccw = cqr->cpaddr; 1805 /* First ccw is define extent or prefix. */ 1806 if (use_prefix) { 1807 if (prefix(ccw++, cqr->data, first_trk, 1808 last_trk, cmd, basedev, startdev) == -EAGAIN) { 1809 /* Clock not in sync and XRC is enabled. 1810 * Try again later. 1811 */ 1812 dasd_sfree_request(cqr, startdev); 1813 return ERR_PTR(-EAGAIN); 1814 } 1815 idaws = (unsigned long *) (cqr->data + 1816 sizeof(struct PFX_eckd_data)); 1817 } else { 1818 if (define_extent(ccw++, cqr->data, first_trk, 1819 last_trk, cmd, startdev) == -EAGAIN) { 1820 /* Clock not in sync and XRC is enabled. 1821 * Try again later. 1822 */ 1823 dasd_sfree_request(cqr, startdev); 1824 return ERR_PTR(-EAGAIN); 1825 } 1826 idaws = (unsigned long *) (cqr->data + 1827 sizeof(struct DE_eckd_data)); 1828 } 1829 /* Build locate_record+read/write/ccws. */ 1830 LO_data = (struct LO_eckd_data *) (idaws + cidaw); 1831 recid = first_rec; 1832 if (private->uses_cdl == 0 || recid > 2*blk_per_trk) { 1833 /* Only standard blocks so there is just one locate record. */ 1834 ccw[-1].flags |= CCW_FLAG_CC; 1835 locate_record(ccw++, LO_data++, first_trk, first_offs + 1, 1836 last_rec - recid + 1, cmd, basedev, blksize); 1837 } 1838 rq_for_each_segment(bv, req, iter) { 1839 dst = page_address(bv->bv_page) + bv->bv_offset; 1840 if (dasd_page_cache) { 1841 char *copy = kmem_cache_alloc(dasd_page_cache, 1842 GFP_DMA | __GFP_NOWARN); 1843 if (copy && rq_data_dir(req) == WRITE) 1844 memcpy(copy + bv->bv_offset, dst, bv->bv_len); 1845 if (copy) 1846 dst = copy + bv->bv_offset; 1847 } 1848 for (off = 0; off < bv->bv_len; off += blksize) { 1849 sector_t trkid = recid; 1850 unsigned int recoffs = sector_div(trkid, blk_per_trk); 1851 rcmd = cmd; 1852 count = blksize; 1853 /* Locate record for cdl special block ? */ 1854 if (private->uses_cdl && recid < 2*blk_per_trk) { 1855 if (dasd_eckd_cdl_special(blk_per_trk, recid)){ 1856 rcmd |= 0x8; 1857 count = dasd_eckd_cdl_reclen(recid); 1858 if (count < blksize && 1859 rq_data_dir(req) == READ) 1860 memset(dst + count, 0xe5, 1861 blksize - count); 1862 } 1863 ccw[-1].flags |= CCW_FLAG_CC; 1864 locate_record(ccw++, LO_data++, 1865 trkid, recoffs + 1, 1866 1, rcmd, basedev, count); 1867 } 1868 /* Locate record for standard blocks ? */ 1869 if (private->uses_cdl && recid == 2*blk_per_trk) { 1870 ccw[-1].flags |= CCW_FLAG_CC; 1871 locate_record(ccw++, LO_data++, 1872 trkid, recoffs + 1, 1873 last_rec - recid + 1, 1874 cmd, basedev, count); 1875 } 1876 /* Read/write ccw. */ 1877 ccw[-1].flags |= CCW_FLAG_CC; 1878 ccw->cmd_code = rcmd; 1879 ccw->count = count; 1880 if (idal_is_needed(dst, blksize)) { 1881 ccw->cda = (__u32)(addr_t) idaws; 1882 ccw->flags = CCW_FLAG_IDA; 1883 idaws = idal_create_words(idaws, dst, blksize); 1884 } else { 1885 ccw->cda = (__u32)(addr_t) dst; 1886 ccw->flags = 0; 1887 } 1888 ccw++; 1889 dst += blksize; 1890 recid++; 1891 } 1892 } 1893 if (blk_noretry_request(req) || 1894 block->base->features & DASD_FEATURE_FAILFAST) 1895 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 1896 cqr->startdev = startdev; 1897 cqr->memdev = startdev; 1898 cqr->block = block; 1899 cqr->expires = 5 * 60 * HZ; /* 5 minutes */ 1900 cqr->lpm = private->path_data.ppm; 1901 cqr->retries = 256; 1902 cqr->buildclk = get_clock(); 1903 cqr->status = DASD_CQR_FILLED; 1904 return cqr; 1905 } 1906 1907 static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track( 1908 struct dasd_device *startdev, 1909 struct dasd_block *block, 1910 struct request *req, 1911 sector_t first_rec, 1912 sector_t last_rec, 1913 sector_t first_trk, 1914 sector_t last_trk, 1915 unsigned int first_offs, 1916 unsigned int last_offs, 1917 unsigned int blk_per_trk, 1918 unsigned int blksize) 1919 { 1920 struct dasd_eckd_private *private; 1921 unsigned long *idaws; 1922 struct dasd_ccw_req *cqr; 1923 struct ccw1 *ccw; 1924 struct req_iterator iter; 1925 struct bio_vec *bv; 1926 char *dst, *idaw_dst; 1927 unsigned int cidaw, cplength, datasize; 1928 unsigned int tlf; 1929 sector_t recid; 1930 unsigned char cmd; 1931 struct dasd_device *basedev; 1932 unsigned int trkcount, count, count_to_trk_end; 1933 unsigned int idaw_len, seg_len, part_len, len_to_track_end; 1934 unsigned char new_track, end_idaw; 1935 sector_t trkid; 1936 unsigned int recoffs; 1937 1938 basedev = block->base; 1939 private = (struct dasd_eckd_private *) basedev->private; 1940 if (rq_data_dir(req) == READ) 1941 cmd = DASD_ECKD_CCW_READ_TRACK_DATA; 1942 else if (rq_data_dir(req) == WRITE) 1943 cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA; 1944 else 1945 return ERR_PTR(-EINVAL); 1946 1947 /* Track based I/O needs IDAWs for each page, and not just for 1948 * 64 bit addresses. We need additional idals for pages 1949 * that get filled from two tracks, so we use the number 1950 * of records as upper limit. 1951 */ 1952 cidaw = last_rec - first_rec + 1; 1953 trkcount = last_trk - first_trk + 1; 1954 1955 /* 1x prefix + one read/write ccw per track */ 1956 cplength = 1 + trkcount; 1957 1958 /* on 31-bit we need space for two 32 bit addresses per page 1959 * on 64-bit one 64 bit address 1960 */ 1961 datasize = sizeof(struct PFX_eckd_data) + 1962 cidaw * sizeof(unsigned long long); 1963 1964 /* Allocate the ccw request. */ 1965 cqr = dasd_smalloc_request(dasd_eckd_discipline.name, 1966 cplength, datasize, startdev); 1967 if (IS_ERR(cqr)) 1968 return cqr; 1969 ccw = cqr->cpaddr; 1970 /* transfer length factor: how many bytes to read from the last track */ 1971 if (first_trk == last_trk) 1972 tlf = last_offs - first_offs + 1; 1973 else 1974 tlf = last_offs + 1; 1975 tlf *= blksize; 1976 1977 if (prefix_LRE(ccw++, cqr->data, first_trk, 1978 last_trk, cmd, basedev, startdev, 1979 1 /* format */, first_offs + 1, 1980 trkcount, blksize, 1981 tlf) == -EAGAIN) { 1982 /* Clock not in sync and XRC is enabled. 1983 * Try again later. 1984 */ 1985 dasd_sfree_request(cqr, startdev); 1986 return ERR_PTR(-EAGAIN); 1987 } 1988 1989 /* 1990 * The translation of request into ccw programs must meet the 1991 * following conditions: 1992 * - all idaws but the first and the last must address full pages 1993 * (or 2K blocks on 31-bit) 1994 * - the scope of a ccw and it's idal ends with the track boundaries 1995 */ 1996 idaws = (unsigned long *) (cqr->data + sizeof(struct PFX_eckd_data)); 1997 recid = first_rec; 1998 new_track = 1; 1999 end_idaw = 0; 2000 len_to_track_end = 0; 2001 idaw_dst = 0; 2002 idaw_len = 0; 2003 rq_for_each_segment(bv, req, iter) { 2004 dst = page_address(bv->bv_page) + bv->bv_offset; 2005 seg_len = bv->bv_len; 2006 while (seg_len) { 2007 if (new_track) { 2008 trkid = recid; 2009 recoffs = sector_div(trkid, blk_per_trk); 2010 count_to_trk_end = blk_per_trk - recoffs; 2011 count = min((last_rec - recid + 1), 2012 (sector_t)count_to_trk_end); 2013 len_to_track_end = count * blksize; 2014 ccw[-1].flags |= CCW_FLAG_CC; 2015 ccw->cmd_code = cmd; 2016 ccw->count = len_to_track_end; 2017 ccw->cda = (__u32)(addr_t)idaws; 2018 ccw->flags = CCW_FLAG_IDA; 2019 ccw++; 2020 recid += count; 2021 new_track = 0; 2022 } 2023 /* If we start a new idaw, everything is fine and the 2024 * start of the new idaw is the start of this segment. 2025 * If we continue an idaw, we must make sure that the 2026 * current segment begins where the so far accumulated 2027 * idaw ends 2028 */ 2029 if (!idaw_dst) 2030 idaw_dst = dst; 2031 if ((idaw_dst + idaw_len) != dst) { 2032 dasd_sfree_request(cqr, startdev); 2033 return ERR_PTR(-ERANGE); 2034 } 2035 part_len = min(seg_len, len_to_track_end); 2036 seg_len -= part_len; 2037 dst += part_len; 2038 idaw_len += part_len; 2039 len_to_track_end -= part_len; 2040 /* collected memory area ends on an IDA_BLOCK border, 2041 * -> create an idaw 2042 * idal_create_words will handle cases where idaw_len 2043 * is larger then IDA_BLOCK_SIZE 2044 */ 2045 if (!(__pa(idaw_dst + idaw_len) & (IDA_BLOCK_SIZE-1))) 2046 end_idaw = 1; 2047 /* We also need to end the idaw at track end */ 2048 if (!len_to_track_end) { 2049 new_track = 1; 2050 end_idaw = 1; 2051 } 2052 if (end_idaw) { 2053 idaws = idal_create_words(idaws, idaw_dst, 2054 idaw_len); 2055 idaw_dst = 0; 2056 idaw_len = 0; 2057 end_idaw = 0; 2058 } 2059 } 2060 } 2061 2062 if (blk_noretry_request(req) || 2063 block->base->features & DASD_FEATURE_FAILFAST) 2064 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 2065 cqr->startdev = startdev; 2066 cqr->memdev = startdev; 2067 cqr->block = block; 2068 cqr->expires = 5 * 60 * HZ; /* 5 minutes */ 2069 cqr->lpm = private->path_data.ppm; 2070 cqr->retries = 256; 2071 cqr->buildclk = get_clock(); 2072 cqr->status = DASD_CQR_FILLED; 2073 return cqr; 2074 } 2075 2076 static int prepare_itcw(struct itcw *itcw, 2077 unsigned int trk, unsigned int totrk, int cmd, 2078 struct dasd_device *basedev, 2079 struct dasd_device *startdev, 2080 unsigned int rec_on_trk, int count, 2081 unsigned int blksize, 2082 unsigned int total_data_size, 2083 unsigned int tlf, 2084 unsigned int blk_per_trk) 2085 { 2086 struct PFX_eckd_data pfxdata; 2087 struct dasd_eckd_private *basepriv, *startpriv; 2088 struct DE_eckd_data *dedata; 2089 struct LRE_eckd_data *lredata; 2090 struct dcw *dcw; 2091 2092 u32 begcyl, endcyl; 2093 u16 heads, beghead, endhead; 2094 u8 pfx_cmd; 2095 2096 int rc = 0; 2097 int sector = 0; 2098 int dn, d; 2099 2100 2101 /* setup prefix data */ 2102 basepriv = (struct dasd_eckd_private *) basedev->private; 2103 startpriv = (struct dasd_eckd_private *) startdev->private; 2104 dedata = &pfxdata.define_extent; 2105 lredata = &pfxdata.locate_record; 2106 2107 memset(&pfxdata, 0, sizeof(pfxdata)); 2108 pfxdata.format = 1; /* PFX with LRE */ 2109 pfxdata.base_address = basepriv->ned->unit_addr; 2110 pfxdata.base_lss = basepriv->ned->ID; 2111 pfxdata.validity.define_extent = 1; 2112 2113 /* private uid is kept up to date, conf_data may be outdated */ 2114 if (startpriv->uid.type != UA_BASE_DEVICE) { 2115 pfxdata.validity.verify_base = 1; 2116 if (startpriv->uid.type == UA_HYPER_PAV_ALIAS) 2117 pfxdata.validity.hyper_pav = 1; 2118 } 2119 2120 switch (cmd) { 2121 case DASD_ECKD_CCW_READ_TRACK_DATA: 2122 dedata->mask.perm = 0x1; 2123 dedata->attributes.operation = basepriv->attrib.operation; 2124 dedata->blk_size = blksize; 2125 dedata->ga_extended |= 0x42; 2126 lredata->operation.orientation = 0x0; 2127 lredata->operation.operation = 0x0C; 2128 lredata->auxiliary.check_bytes = 0x01; 2129 pfx_cmd = DASD_ECKD_CCW_PFX_READ; 2130 break; 2131 case DASD_ECKD_CCW_WRITE_TRACK_DATA: 2132 dedata->mask.perm = 0x02; 2133 dedata->attributes.operation = basepriv->attrib.operation; 2134 dedata->blk_size = blksize; 2135 rc = check_XRC_on_prefix(&pfxdata, basedev); 2136 dedata->ga_extended |= 0x42; 2137 lredata->operation.orientation = 0x0; 2138 lredata->operation.operation = 0x3F; 2139 lredata->extended_operation = 0x23; 2140 lredata->auxiliary.check_bytes = 0x2; 2141 pfx_cmd = DASD_ECKD_CCW_PFX; 2142 break; 2143 default: 2144 DBF_DEV_EVENT(DBF_ERR, basedev, 2145 "prepare itcw, unknown opcode 0x%x", cmd); 2146 BUG(); 2147 break; 2148 } 2149 if (rc) 2150 return rc; 2151 2152 dedata->attributes.mode = 0x3; /* ECKD */ 2153 2154 heads = basepriv->rdc_data.trk_per_cyl; 2155 begcyl = trk / heads; 2156 beghead = trk % heads; 2157 endcyl = totrk / heads; 2158 endhead = totrk % heads; 2159 2160 /* check for sequential prestage - enhance cylinder range */ 2161 if (dedata->attributes.operation == DASD_SEQ_PRESTAGE || 2162 dedata->attributes.operation == DASD_SEQ_ACCESS) { 2163 2164 if (endcyl + basepriv->attrib.nr_cyl < basepriv->real_cyl) 2165 endcyl += basepriv->attrib.nr_cyl; 2166 else 2167 endcyl = (basepriv->real_cyl - 1); 2168 } 2169 2170 set_ch_t(&dedata->beg_ext, begcyl, beghead); 2171 set_ch_t(&dedata->end_ext, endcyl, endhead); 2172 2173 dedata->ep_format = 0x20; /* records per track is valid */ 2174 dedata->ep_rec_per_track = blk_per_trk; 2175 2176 if (rec_on_trk) { 2177 switch (basepriv->rdc_data.dev_type) { 2178 case 0x3390: 2179 dn = ceil_quot(blksize + 6, 232); 2180 d = 9 + ceil_quot(blksize + 6 * (dn + 1), 34); 2181 sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8; 2182 break; 2183 case 0x3380: 2184 d = 7 + ceil_quot(blksize + 12, 32); 2185 sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7; 2186 break; 2187 } 2188 } 2189 2190 lredata->auxiliary.length_valid = 1; 2191 lredata->auxiliary.length_scope = 1; 2192 lredata->auxiliary.imbedded_ccw_valid = 1; 2193 lredata->length = tlf; 2194 lredata->imbedded_ccw = cmd; 2195 lredata->count = count; 2196 lredata->sector = sector; 2197 set_ch_t(&lredata->seek_addr, begcyl, beghead); 2198 lredata->search_arg.cyl = lredata->seek_addr.cyl; 2199 lredata->search_arg.head = lredata->seek_addr.head; 2200 lredata->search_arg.record = rec_on_trk; 2201 2202 dcw = itcw_add_dcw(itcw, pfx_cmd, 0, 2203 &pfxdata, sizeof(pfxdata), total_data_size); 2204 2205 return rc; 2206 } 2207 2208 static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track( 2209 struct dasd_device *startdev, 2210 struct dasd_block *block, 2211 struct request *req, 2212 sector_t first_rec, 2213 sector_t last_rec, 2214 sector_t first_trk, 2215 sector_t last_trk, 2216 unsigned int first_offs, 2217 unsigned int last_offs, 2218 unsigned int blk_per_trk, 2219 unsigned int blksize) 2220 { 2221 struct dasd_eckd_private *private; 2222 struct dasd_ccw_req *cqr; 2223 struct req_iterator iter; 2224 struct bio_vec *bv; 2225 char *dst; 2226 unsigned int trkcount, ctidaw; 2227 unsigned char cmd; 2228 struct dasd_device *basedev; 2229 unsigned int tlf; 2230 struct itcw *itcw; 2231 struct tidaw *last_tidaw = NULL; 2232 int itcw_op; 2233 size_t itcw_size; 2234 2235 basedev = block->base; 2236 private = (struct dasd_eckd_private *) basedev->private; 2237 if (rq_data_dir(req) == READ) { 2238 cmd = DASD_ECKD_CCW_READ_TRACK_DATA; 2239 itcw_op = ITCW_OP_READ; 2240 } else if (rq_data_dir(req) == WRITE) { 2241 cmd = DASD_ECKD_CCW_WRITE_TRACK_DATA; 2242 itcw_op = ITCW_OP_WRITE; 2243 } else 2244 return ERR_PTR(-EINVAL); 2245 2246 /* trackbased I/O needs address all memory via TIDAWs, 2247 * not just for 64 bit addresses. This allows us to map 2248 * each segment directly to one tidaw. 2249 */ 2250 trkcount = last_trk - first_trk + 1; 2251 ctidaw = 0; 2252 rq_for_each_segment(bv, req, iter) { 2253 ++ctidaw; 2254 } 2255 2256 /* Allocate the ccw request. */ 2257 itcw_size = itcw_calc_size(0, ctidaw, 0); 2258 cqr = dasd_smalloc_request(dasd_eckd_discipline.name, 2259 0, itcw_size, startdev); 2260 if (IS_ERR(cqr)) 2261 return cqr; 2262 2263 cqr->cpmode = 1; 2264 cqr->startdev = startdev; 2265 cqr->memdev = startdev; 2266 cqr->block = block; 2267 cqr->expires = 100*HZ; 2268 cqr->buildclk = get_clock(); 2269 cqr->status = DASD_CQR_FILLED; 2270 cqr->retries = 10; 2271 2272 /* transfer length factor: how many bytes to read from the last track */ 2273 if (first_trk == last_trk) 2274 tlf = last_offs - first_offs + 1; 2275 else 2276 tlf = last_offs + 1; 2277 tlf *= blksize; 2278 2279 itcw = itcw_init(cqr->data, itcw_size, itcw_op, 0, ctidaw, 0); 2280 cqr->cpaddr = itcw_get_tcw(itcw); 2281 2282 if (prepare_itcw(itcw, first_trk, last_trk, 2283 cmd, basedev, startdev, 2284 first_offs + 1, 2285 trkcount, blksize, 2286 (last_rec - first_rec + 1) * blksize, 2287 tlf, blk_per_trk) == -EAGAIN) { 2288 /* Clock not in sync and XRC is enabled. 2289 * Try again later. 2290 */ 2291 dasd_sfree_request(cqr, startdev); 2292 return ERR_PTR(-EAGAIN); 2293 } 2294 2295 /* 2296 * A tidaw can address 4k of memory, but must not cross page boundaries 2297 * We can let the block layer handle this by setting 2298 * blk_queue_segment_boundary to page boundaries and 2299 * blk_max_segment_size to page size when setting up the request queue. 2300 */ 2301 rq_for_each_segment(bv, req, iter) { 2302 dst = page_address(bv->bv_page) + bv->bv_offset; 2303 last_tidaw = itcw_add_tidaw(itcw, 0x00, dst, bv->bv_len); 2304 if (IS_ERR(last_tidaw)) 2305 return (struct dasd_ccw_req *)last_tidaw; 2306 } 2307 2308 last_tidaw->flags |= 0x80; 2309 itcw_finalize(itcw); 2310 2311 if (blk_noretry_request(req) || 2312 block->base->features & DASD_FEATURE_FAILFAST) 2313 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 2314 cqr->startdev = startdev; 2315 cqr->memdev = startdev; 2316 cqr->block = block; 2317 cqr->expires = 5 * 60 * HZ; /* 5 minutes */ 2318 cqr->lpm = private->path_data.ppm; 2319 cqr->retries = 256; 2320 cqr->buildclk = get_clock(); 2321 cqr->status = DASD_CQR_FILLED; 2322 return cqr; 2323 } 2324 2325 static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev, 2326 struct dasd_block *block, 2327 struct request *req) 2328 { 2329 int tpm, cmdrtd, cmdwtd; 2330 int use_prefix; 2331 2332 struct dasd_eckd_private *private; 2333 int fcx_in_css, fcx_in_gneq, fcx_in_features; 2334 struct dasd_device *basedev; 2335 sector_t first_rec, last_rec; 2336 sector_t first_trk, last_trk; 2337 unsigned int first_offs, last_offs; 2338 unsigned int blk_per_trk, blksize; 2339 int cdlspecial; 2340 struct dasd_ccw_req *cqr; 2341 2342 basedev = block->base; 2343 private = (struct dasd_eckd_private *) basedev->private; 2344 2345 /* Calculate number of blocks/records per track. */ 2346 blksize = block->bp_block; 2347 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize); 2348 /* Calculate record id of first and last block. */ 2349 first_rec = first_trk = req->sector >> block->s2b_shift; 2350 first_offs = sector_div(first_trk, blk_per_trk); 2351 last_rec = last_trk = 2352 (req->sector + req->nr_sectors - 1) >> block->s2b_shift; 2353 last_offs = sector_div(last_trk, blk_per_trk); 2354 cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk); 2355 2356 /* is transport mode supported ? */ 2357 fcx_in_css = css_general_characteristics.fcx; 2358 fcx_in_gneq = private->gneq->reserved2[7] & 0x04; 2359 fcx_in_features = private->features.feature[40] & 0x80; 2360 tpm = fcx_in_css && fcx_in_gneq && fcx_in_features; 2361 2362 /* is read track data and write track data in command mode supported? */ 2363 cmdrtd = private->features.feature[9] & 0x20; 2364 cmdwtd = private->features.feature[12] & 0x40; 2365 use_prefix = private->features.feature[8] & 0x01; 2366 2367 cqr = NULL; 2368 if (cdlspecial || dasd_page_cache) { 2369 /* do nothing, just fall through to the cmd mode single case */ 2370 } else if (!dasd_nofcx && tpm && (first_trk == last_trk)) { 2371 cqr = dasd_eckd_build_cp_tpm_track(startdev, block, req, 2372 first_rec, last_rec, 2373 first_trk, last_trk, 2374 first_offs, last_offs, 2375 blk_per_trk, blksize); 2376 if (IS_ERR(cqr) && PTR_ERR(cqr) != -EAGAIN) 2377 cqr = NULL; 2378 } else if (use_prefix && 2379 (((rq_data_dir(req) == READ) && cmdrtd) || 2380 ((rq_data_dir(req) == WRITE) && cmdwtd))) { 2381 cqr = dasd_eckd_build_cp_cmd_track(startdev, block, req, 2382 first_rec, last_rec, 2383 first_trk, last_trk, 2384 first_offs, last_offs, 2385 blk_per_trk, blksize); 2386 if (IS_ERR(cqr) && PTR_ERR(cqr) != -EAGAIN) 2387 cqr = NULL; 2388 } 2389 if (!cqr) 2390 cqr = dasd_eckd_build_cp_cmd_single(startdev, block, req, 2391 first_rec, last_rec, 2392 first_trk, last_trk, 2393 first_offs, last_offs, 2394 blk_per_trk, blksize); 2395 return cqr; 2396 } 2397 2398 static int 2399 dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req) 2400 { 2401 struct dasd_eckd_private *private; 2402 struct ccw1 *ccw; 2403 struct req_iterator iter; 2404 struct bio_vec *bv; 2405 char *dst, *cda; 2406 unsigned int blksize, blk_per_trk, off; 2407 sector_t recid; 2408 int status; 2409 2410 if (!dasd_page_cache) 2411 goto out; 2412 private = (struct dasd_eckd_private *) cqr->block->base->private; 2413 blksize = cqr->block->bp_block; 2414 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize); 2415 recid = req->sector >> cqr->block->s2b_shift; 2416 ccw = cqr->cpaddr; 2417 /* Skip over define extent & locate record. */ 2418 ccw++; 2419 if (private->uses_cdl == 0 || recid > 2*blk_per_trk) 2420 ccw++; 2421 rq_for_each_segment(bv, req, iter) { 2422 dst = page_address(bv->bv_page) + bv->bv_offset; 2423 for (off = 0; off < bv->bv_len; off += blksize) { 2424 /* Skip locate record. */ 2425 if (private->uses_cdl && recid <= 2*blk_per_trk) 2426 ccw++; 2427 if (dst) { 2428 if (ccw->flags & CCW_FLAG_IDA) 2429 cda = *((char **)((addr_t) ccw->cda)); 2430 else 2431 cda = (char *)((addr_t) ccw->cda); 2432 if (dst != cda) { 2433 if (rq_data_dir(req) == READ) 2434 memcpy(dst, cda, bv->bv_len); 2435 kmem_cache_free(dasd_page_cache, 2436 (void *)((addr_t)cda & PAGE_MASK)); 2437 } 2438 dst = NULL; 2439 } 2440 ccw++; 2441 recid++; 2442 } 2443 } 2444 out: 2445 status = cqr->status == DASD_CQR_DONE; 2446 dasd_sfree_request(cqr, cqr->memdev); 2447 return status; 2448 } 2449 2450 /* 2451 * Modify ccw/tcw in cqr so it can be started on a base device. 2452 * 2453 * Note that this is not enough to restart the cqr! 2454 * Either reset cqr->startdev as well (summary unit check handling) 2455 * or restart via separate cqr (as in ERP handling). 2456 */ 2457 void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *cqr) 2458 { 2459 struct ccw1 *ccw; 2460 struct PFX_eckd_data *pfxdata; 2461 struct tcw *tcw; 2462 struct tccb *tccb; 2463 struct dcw *dcw; 2464 2465 if (cqr->cpmode == 1) { 2466 tcw = cqr->cpaddr; 2467 tccb = tcw_get_tccb(tcw); 2468 dcw = (struct dcw *)&tccb->tca[0]; 2469 pfxdata = (struct PFX_eckd_data *)&dcw->cd[0]; 2470 pfxdata->validity.verify_base = 0; 2471 pfxdata->validity.hyper_pav = 0; 2472 } else { 2473 ccw = cqr->cpaddr; 2474 pfxdata = cqr->data; 2475 if (ccw->cmd_code == DASD_ECKD_CCW_PFX) { 2476 pfxdata->validity.verify_base = 0; 2477 pfxdata->validity.hyper_pav = 0; 2478 } 2479 } 2480 } 2481 2482 #define DASD_ECKD_CHANQ_MAX_SIZE 4 2483 2484 static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base, 2485 struct dasd_block *block, 2486 struct request *req) 2487 { 2488 struct dasd_eckd_private *private; 2489 struct dasd_device *startdev; 2490 unsigned long flags; 2491 struct dasd_ccw_req *cqr; 2492 2493 startdev = dasd_alias_get_start_dev(base); 2494 if (!startdev) 2495 startdev = base; 2496 private = (struct dasd_eckd_private *) startdev->private; 2497 if (private->count >= DASD_ECKD_CHANQ_MAX_SIZE) 2498 return ERR_PTR(-EBUSY); 2499 2500 spin_lock_irqsave(get_ccwdev_lock(startdev->cdev), flags); 2501 private->count++; 2502 cqr = dasd_eckd_build_cp(startdev, block, req); 2503 if (IS_ERR(cqr)) 2504 private->count--; 2505 spin_unlock_irqrestore(get_ccwdev_lock(startdev->cdev), flags); 2506 return cqr; 2507 } 2508 2509 static int dasd_eckd_free_alias_cp(struct dasd_ccw_req *cqr, 2510 struct request *req) 2511 { 2512 struct dasd_eckd_private *private; 2513 unsigned long flags; 2514 2515 spin_lock_irqsave(get_ccwdev_lock(cqr->memdev->cdev), flags); 2516 private = (struct dasd_eckd_private *) cqr->memdev->private; 2517 private->count--; 2518 spin_unlock_irqrestore(get_ccwdev_lock(cqr->memdev->cdev), flags); 2519 return dasd_eckd_free_cp(cqr, req); 2520 } 2521 2522 static int 2523 dasd_eckd_fill_info(struct dasd_device * device, 2524 struct dasd_information2_t * info) 2525 { 2526 struct dasd_eckd_private *private; 2527 2528 private = (struct dasd_eckd_private *) device->private; 2529 info->label_block = 2; 2530 info->FBA_layout = private->uses_cdl ? 0 : 1; 2531 info->format = private->uses_cdl ? DASD_FORMAT_CDL : DASD_FORMAT_LDL; 2532 info->characteristics_size = sizeof(struct dasd_eckd_characteristics); 2533 memcpy(info->characteristics, &private->rdc_data, 2534 sizeof(struct dasd_eckd_characteristics)); 2535 info->confdata_size = min((unsigned long)private->conf_len, 2536 sizeof(info->configuration_data)); 2537 memcpy(info->configuration_data, private->conf_data, 2538 info->confdata_size); 2539 return 0; 2540 } 2541 2542 /* 2543 * SECTION: ioctl functions for eckd devices. 2544 */ 2545 2546 /* 2547 * Release device ioctl. 2548 * Buils a channel programm to releases a prior reserved 2549 * (see dasd_eckd_reserve) device. 2550 */ 2551 static int 2552 dasd_eckd_release(struct dasd_device *device) 2553 { 2554 struct dasd_ccw_req *cqr; 2555 int rc; 2556 struct ccw1 *ccw; 2557 2558 if (!capable(CAP_SYS_ADMIN)) 2559 return -EACCES; 2560 2561 cqr = dasd_smalloc_request(dasd_eckd_discipline.name, 2562 1, 32, device); 2563 if (IS_ERR(cqr)) { 2564 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 2565 "Could not allocate initialization request"); 2566 return PTR_ERR(cqr); 2567 } 2568 ccw = cqr->cpaddr; 2569 ccw->cmd_code = DASD_ECKD_CCW_RELEASE; 2570 ccw->flags |= CCW_FLAG_SLI; 2571 ccw->count = 32; 2572 ccw->cda = (__u32)(addr_t) cqr->data; 2573 cqr->startdev = device; 2574 cqr->memdev = device; 2575 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 2576 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 2577 cqr->retries = 2; /* set retry counter to enable basic ERP */ 2578 cqr->expires = 2 * HZ; 2579 cqr->buildclk = get_clock(); 2580 cqr->status = DASD_CQR_FILLED; 2581 2582 rc = dasd_sleep_on_immediatly(cqr); 2583 2584 dasd_sfree_request(cqr, cqr->memdev); 2585 return rc; 2586 } 2587 2588 /* 2589 * Reserve device ioctl. 2590 * Options are set to 'synchronous wait for interrupt' and 2591 * 'timeout the request'. This leads to a terminate IO if 2592 * the interrupt is outstanding for a certain time. 2593 */ 2594 static int 2595 dasd_eckd_reserve(struct dasd_device *device) 2596 { 2597 struct dasd_ccw_req *cqr; 2598 int rc; 2599 struct ccw1 *ccw; 2600 2601 if (!capable(CAP_SYS_ADMIN)) 2602 return -EACCES; 2603 2604 cqr = dasd_smalloc_request(dasd_eckd_discipline.name, 2605 1, 32, device); 2606 if (IS_ERR(cqr)) { 2607 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 2608 "Could not allocate initialization request"); 2609 return PTR_ERR(cqr); 2610 } 2611 ccw = cqr->cpaddr; 2612 ccw->cmd_code = DASD_ECKD_CCW_RESERVE; 2613 ccw->flags |= CCW_FLAG_SLI; 2614 ccw->count = 32; 2615 ccw->cda = (__u32)(addr_t) cqr->data; 2616 cqr->startdev = device; 2617 cqr->memdev = device; 2618 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 2619 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 2620 cqr->retries = 2; /* set retry counter to enable basic ERP */ 2621 cqr->expires = 2 * HZ; 2622 cqr->buildclk = get_clock(); 2623 cqr->status = DASD_CQR_FILLED; 2624 2625 rc = dasd_sleep_on_immediatly(cqr); 2626 2627 dasd_sfree_request(cqr, cqr->memdev); 2628 return rc; 2629 } 2630 2631 /* 2632 * Steal lock ioctl - unconditional reserve device. 2633 * Buils a channel programm to break a device's reservation. 2634 * (unconditional reserve) 2635 */ 2636 static int 2637 dasd_eckd_steal_lock(struct dasd_device *device) 2638 { 2639 struct dasd_ccw_req *cqr; 2640 int rc; 2641 struct ccw1 *ccw; 2642 2643 if (!capable(CAP_SYS_ADMIN)) 2644 return -EACCES; 2645 2646 cqr = dasd_smalloc_request(dasd_eckd_discipline.name, 2647 1, 32, device); 2648 if (IS_ERR(cqr)) { 2649 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 2650 "Could not allocate initialization request"); 2651 return PTR_ERR(cqr); 2652 } 2653 ccw = cqr->cpaddr; 2654 ccw->cmd_code = DASD_ECKD_CCW_SLCK; 2655 ccw->flags |= CCW_FLAG_SLI; 2656 ccw->count = 32; 2657 ccw->cda = (__u32)(addr_t) cqr->data; 2658 cqr->startdev = device; 2659 cqr->memdev = device; 2660 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 2661 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 2662 cqr->retries = 2; /* set retry counter to enable basic ERP */ 2663 cqr->expires = 2 * HZ; 2664 cqr->buildclk = get_clock(); 2665 cqr->status = DASD_CQR_FILLED; 2666 2667 rc = dasd_sleep_on_immediatly(cqr); 2668 2669 dasd_sfree_request(cqr, cqr->memdev); 2670 return rc; 2671 } 2672 2673 /* 2674 * Read performance statistics 2675 */ 2676 static int 2677 dasd_eckd_performance(struct dasd_device *device, void __user *argp) 2678 { 2679 struct dasd_psf_prssd_data *prssdp; 2680 struct dasd_rssd_perf_stats_t *stats; 2681 struct dasd_ccw_req *cqr; 2682 struct ccw1 *ccw; 2683 int rc; 2684 2685 cqr = dasd_smalloc_request(dasd_eckd_discipline.name, 2686 1 /* PSF */ + 1 /* RSSD */ , 2687 (sizeof(struct dasd_psf_prssd_data) + 2688 sizeof(struct dasd_rssd_perf_stats_t)), 2689 device); 2690 if (IS_ERR(cqr)) { 2691 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 2692 "Could not allocate initialization request"); 2693 return PTR_ERR(cqr); 2694 } 2695 cqr->startdev = device; 2696 cqr->memdev = device; 2697 cqr->retries = 0; 2698 cqr->expires = 10 * HZ; 2699 2700 /* Prepare for Read Subsystem Data */ 2701 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 2702 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data)); 2703 prssdp->order = PSF_ORDER_PRSSD; 2704 prssdp->suborder = 0x01; /* Performance Statistics */ 2705 prssdp->varies[1] = 0x01; /* Perf Statistics for the Subsystem */ 2706 2707 ccw = cqr->cpaddr; 2708 ccw->cmd_code = DASD_ECKD_CCW_PSF; 2709 ccw->count = sizeof(struct dasd_psf_prssd_data); 2710 ccw->flags |= CCW_FLAG_CC; 2711 ccw->cda = (__u32)(addr_t) prssdp; 2712 2713 /* Read Subsystem Data - Performance Statistics */ 2714 stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1); 2715 memset(stats, 0, sizeof(struct dasd_rssd_perf_stats_t)); 2716 2717 ccw++; 2718 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 2719 ccw->count = sizeof(struct dasd_rssd_perf_stats_t); 2720 ccw->cda = (__u32)(addr_t) stats; 2721 2722 cqr->buildclk = get_clock(); 2723 cqr->status = DASD_CQR_FILLED; 2724 rc = dasd_sleep_on(cqr); 2725 if (rc == 0) { 2726 prssdp = (struct dasd_psf_prssd_data *) cqr->data; 2727 stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1); 2728 if (copy_to_user(argp, stats, 2729 sizeof(struct dasd_rssd_perf_stats_t))) 2730 rc = -EFAULT; 2731 } 2732 dasd_sfree_request(cqr, cqr->memdev); 2733 return rc; 2734 } 2735 2736 /* 2737 * Get attributes (cache operations) 2738 * Returnes the cache attributes used in Define Extend (DE). 2739 */ 2740 static int 2741 dasd_eckd_get_attrib(struct dasd_device *device, void __user *argp) 2742 { 2743 struct dasd_eckd_private *private = 2744 (struct dasd_eckd_private *)device->private; 2745 struct attrib_data_t attrib = private->attrib; 2746 int rc; 2747 2748 if (!capable(CAP_SYS_ADMIN)) 2749 return -EACCES; 2750 if (!argp) 2751 return -EINVAL; 2752 2753 rc = 0; 2754 if (copy_to_user(argp, (long *) &attrib, 2755 sizeof(struct attrib_data_t))) 2756 rc = -EFAULT; 2757 2758 return rc; 2759 } 2760 2761 /* 2762 * Set attributes (cache operations) 2763 * Stores the attributes for cache operation to be used in Define Extend (DE). 2764 */ 2765 static int 2766 dasd_eckd_set_attrib(struct dasd_device *device, void __user *argp) 2767 { 2768 struct dasd_eckd_private *private = 2769 (struct dasd_eckd_private *)device->private; 2770 struct attrib_data_t attrib; 2771 2772 if (!capable(CAP_SYS_ADMIN)) 2773 return -EACCES; 2774 if (!argp) 2775 return -EINVAL; 2776 2777 if (copy_from_user(&attrib, argp, sizeof(struct attrib_data_t))) 2778 return -EFAULT; 2779 private->attrib = attrib; 2780 2781 dev_info(&device->cdev->dev, 2782 "The DASD cache mode was set to %x (%i cylinder prestage)\n", 2783 private->attrib.operation, private->attrib.nr_cyl); 2784 return 0; 2785 } 2786 2787 /* 2788 * Issue syscall I/O to EMC Symmetrix array. 2789 * CCWs are PSF and RSSD 2790 */ 2791 static int dasd_symm_io(struct dasd_device *device, void __user *argp) 2792 { 2793 struct dasd_symmio_parms usrparm; 2794 char *psf_data, *rssd_result; 2795 struct dasd_ccw_req *cqr; 2796 struct ccw1 *ccw; 2797 int rc; 2798 2799 /* Copy parms from caller */ 2800 rc = -EFAULT; 2801 if (copy_from_user(&usrparm, argp, sizeof(usrparm))) 2802 goto out; 2803 #ifndef CONFIG_64BIT 2804 /* Make sure pointers are sane even on 31 bit. */ 2805 if ((usrparm.psf_data >> 32) != 0 || (usrparm.rssd_result >> 32) != 0) { 2806 rc = -EINVAL; 2807 goto out; 2808 } 2809 #endif 2810 /* alloc I/O data area */ 2811 psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA); 2812 rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA); 2813 if (!psf_data || !rssd_result) { 2814 rc = -ENOMEM; 2815 goto out_free; 2816 } 2817 2818 /* get syscall header from user space */ 2819 rc = -EFAULT; 2820 if (copy_from_user(psf_data, 2821 (void __user *)(unsigned long) usrparm.psf_data, 2822 usrparm.psf_data_len)) 2823 goto out_free; 2824 2825 /* sanity check on syscall header */ 2826 if (psf_data[0] != 0x17 && psf_data[1] != 0xce) { 2827 rc = -EINVAL; 2828 goto out_free; 2829 } 2830 2831 /* setup CCWs for PSF + RSSD */ 2832 cqr = dasd_smalloc_request("ECKD", 2 , 0, device); 2833 if (IS_ERR(cqr)) { 2834 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 2835 "Could not allocate initialization request"); 2836 rc = PTR_ERR(cqr); 2837 goto out_free; 2838 } 2839 2840 cqr->startdev = device; 2841 cqr->memdev = device; 2842 cqr->retries = 3; 2843 cqr->expires = 10 * HZ; 2844 cqr->buildclk = get_clock(); 2845 cqr->status = DASD_CQR_FILLED; 2846 2847 /* Build the ccws */ 2848 ccw = cqr->cpaddr; 2849 2850 /* PSF ccw */ 2851 ccw->cmd_code = DASD_ECKD_CCW_PSF; 2852 ccw->count = usrparm.psf_data_len; 2853 ccw->flags |= CCW_FLAG_CC; 2854 ccw->cda = (__u32)(addr_t) psf_data; 2855 2856 ccw++; 2857 2858 /* RSSD ccw */ 2859 ccw->cmd_code = DASD_ECKD_CCW_RSSD; 2860 ccw->count = usrparm.rssd_result_len; 2861 ccw->flags = CCW_FLAG_SLI ; 2862 ccw->cda = (__u32)(addr_t) rssd_result; 2863 2864 rc = dasd_sleep_on(cqr); 2865 if (rc) 2866 goto out_sfree; 2867 2868 rc = -EFAULT; 2869 if (copy_to_user((void __user *)(unsigned long) usrparm.rssd_result, 2870 rssd_result, usrparm.rssd_result_len)) 2871 goto out_sfree; 2872 rc = 0; 2873 2874 out_sfree: 2875 dasd_sfree_request(cqr, cqr->memdev); 2876 out_free: 2877 kfree(rssd_result); 2878 kfree(psf_data); 2879 out: 2880 DBF_DEV_EVENT(DBF_WARNING, device, "Symmetrix ioctl: rc=%d", rc); 2881 return rc; 2882 } 2883 2884 static int 2885 dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp) 2886 { 2887 struct dasd_device *device = block->base; 2888 2889 switch (cmd) { 2890 case BIODASDGATTR: 2891 return dasd_eckd_get_attrib(device, argp); 2892 case BIODASDSATTR: 2893 return dasd_eckd_set_attrib(device, argp); 2894 case BIODASDPSRD: 2895 return dasd_eckd_performance(device, argp); 2896 case BIODASDRLSE: 2897 return dasd_eckd_release(device); 2898 case BIODASDRSRV: 2899 return dasd_eckd_reserve(device); 2900 case BIODASDSLCK: 2901 return dasd_eckd_steal_lock(device); 2902 case BIODASDSYMMIO: 2903 return dasd_symm_io(device, argp); 2904 default: 2905 return -ENOIOCTLCMD; 2906 } 2907 } 2908 2909 /* 2910 * Dump the range of CCWs into 'page' buffer 2911 * and return number of printed chars. 2912 */ 2913 static int 2914 dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page) 2915 { 2916 int len, count; 2917 char *datap; 2918 2919 len = 0; 2920 while (from <= to) { 2921 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 2922 " CCW %p: %08X %08X DAT:", 2923 from, ((int *) from)[0], ((int *) from)[1]); 2924 2925 /* get pointer to data (consider IDALs) */ 2926 if (from->flags & CCW_FLAG_IDA) 2927 datap = (char *) *((addr_t *) (addr_t) from->cda); 2928 else 2929 datap = (char *) ((addr_t) from->cda); 2930 2931 /* dump data (max 32 bytes) */ 2932 for (count = 0; count < from->count && count < 32; count++) { 2933 if (count % 8 == 0) len += sprintf(page + len, " "); 2934 if (count % 4 == 0) len += sprintf(page + len, " "); 2935 len += sprintf(page + len, "%02x", datap[count]); 2936 } 2937 len += sprintf(page + len, "\n"); 2938 from++; 2939 } 2940 return len; 2941 } 2942 2943 static void 2944 dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct dasd_ccw_req *req, 2945 struct irb *irb, char *reason) 2946 { 2947 u64 *sense; 2948 int sl; 2949 struct tsb *tsb; 2950 2951 sense = NULL; 2952 tsb = NULL; 2953 if (req && scsw_is_tm(&req->irb.scsw)) { 2954 if (irb->scsw.tm.tcw) 2955 tsb = tcw_get_tsb( 2956 (struct tcw *)(unsigned long)irb->scsw.tm.tcw); 2957 if (tsb && (irb->scsw.tm.fcxs == 0x01)) { 2958 switch (tsb->flags & 0x07) { 2959 case 1: /* tsa_iostat */ 2960 sense = (u64 *)tsb->tsa.iostat.sense; 2961 break; 2962 case 2: /* ts_ddpc */ 2963 sense = (u64 *)tsb->tsa.ddpc.sense; 2964 break; 2965 case 3: /* tsa_intrg */ 2966 break; 2967 } 2968 } 2969 } else { 2970 if (irb->esw.esw0.erw.cons) 2971 sense = (u64 *)irb->ecw; 2972 } 2973 if (sense) { 2974 for (sl = 0; sl < 4; sl++) { 2975 DBF_DEV_EVENT(DBF_EMERG, device, 2976 "%s: %016llx %016llx %016llx %016llx", 2977 reason, sense[0], sense[1], sense[2], 2978 sense[3]); 2979 } 2980 } else { 2981 DBF_DEV_EVENT(DBF_EMERG, device, "%s", 2982 "SORRY - NO VALID SENSE AVAILABLE\n"); 2983 } 2984 } 2985 2986 /* 2987 * Print sense data and related channel program. 2988 * Parts are printed because printk buffer is only 1024 bytes. 2989 */ 2990 static void dasd_eckd_dump_sense_ccw(struct dasd_device *device, 2991 struct dasd_ccw_req *req, struct irb *irb) 2992 { 2993 char *page; 2994 struct ccw1 *first, *last, *fail, *from, *to; 2995 int len, sl, sct; 2996 2997 page = (char *) get_zeroed_page(GFP_ATOMIC); 2998 if (page == NULL) { 2999 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 3000 "No memory to dump sense data\n"); 3001 return; 3002 } 3003 /* dump the sense data */ 3004 len = sprintf(page, KERN_ERR PRINTK_HEADER 3005 " I/O status report for device %s:\n", 3006 dev_name(&device->cdev->dev)); 3007 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3008 " in req: %p CS: 0x%02X DS: 0x%02X\n", req, 3009 scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw)); 3010 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3011 " device %s: Failing CCW: %p\n", 3012 dev_name(&device->cdev->dev), 3013 (void *) (addr_t) irb->scsw.cmd.cpa); 3014 if (irb->esw.esw0.erw.cons) { 3015 for (sl = 0; sl < 4; sl++) { 3016 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3017 " Sense(hex) %2d-%2d:", 3018 (8 * sl), ((8 * sl) + 7)); 3019 3020 for (sct = 0; sct < 8; sct++) { 3021 len += sprintf(page + len, " %02x", 3022 irb->ecw[8 * sl + sct]); 3023 } 3024 len += sprintf(page + len, "\n"); 3025 } 3026 3027 if (irb->ecw[27] & DASD_SENSE_BIT_0) { 3028 /* 24 Byte Sense Data */ 3029 sprintf(page + len, KERN_ERR PRINTK_HEADER 3030 " 24 Byte: %x MSG %x, " 3031 "%s MSGb to SYSOP\n", 3032 irb->ecw[7] >> 4, irb->ecw[7] & 0x0f, 3033 irb->ecw[1] & 0x10 ? "" : "no"); 3034 } else { 3035 /* 32 Byte Sense Data */ 3036 sprintf(page + len, KERN_ERR PRINTK_HEADER 3037 " 32 Byte: Format: %x " 3038 "Exception class %x\n", 3039 irb->ecw[6] & 0x0f, irb->ecw[22] >> 4); 3040 } 3041 } else { 3042 sprintf(page + len, KERN_ERR PRINTK_HEADER 3043 " SORRY - NO VALID SENSE AVAILABLE\n"); 3044 } 3045 printk("%s", page); 3046 3047 if (req) { 3048 /* req == NULL for unsolicited interrupts */ 3049 /* dump the Channel Program (max 140 Bytes per line) */ 3050 /* Count CCW and print first CCWs (maximum 1024 % 140 = 7) */ 3051 first = req->cpaddr; 3052 for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++); 3053 to = min(first + 6, last); 3054 len = sprintf(page, KERN_ERR PRINTK_HEADER 3055 " Related CP in req: %p\n", req); 3056 dasd_eckd_dump_ccw_range(first, to, page + len); 3057 printk("%s", page); 3058 3059 /* print failing CCW area (maximum 4) */ 3060 /* scsw->cda is either valid or zero */ 3061 len = 0; 3062 from = ++to; 3063 fail = (struct ccw1 *)(addr_t) 3064 irb->scsw.cmd.cpa; /* failing CCW */ 3065 if (from < fail - 2) { 3066 from = fail - 2; /* there is a gap - print header */ 3067 len += sprintf(page, KERN_ERR PRINTK_HEADER "......\n"); 3068 } 3069 to = min(fail + 1, last); 3070 len += dasd_eckd_dump_ccw_range(from, to, page + len); 3071 3072 /* print last CCWs (maximum 2) */ 3073 from = max(from, ++to); 3074 if (from < last - 1) { 3075 from = last - 1; /* there is a gap - print header */ 3076 len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n"); 3077 } 3078 len += dasd_eckd_dump_ccw_range(from, last, page + len); 3079 if (len > 0) 3080 printk("%s", page); 3081 } 3082 free_page((unsigned long) page); 3083 } 3084 3085 3086 /* 3087 * Print sense data from a tcw. 3088 */ 3089 static void dasd_eckd_dump_sense_tcw(struct dasd_device *device, 3090 struct dasd_ccw_req *req, struct irb *irb) 3091 { 3092 char *page; 3093 int len, sl, sct, residual; 3094 3095 struct tsb *tsb; 3096 u8 *sense; 3097 3098 3099 page = (char *) get_zeroed_page(GFP_ATOMIC); 3100 if (page == NULL) { 3101 DBF_DEV_EVENT(DBF_WARNING, device, " %s", 3102 "No memory to dump sense data"); 3103 return; 3104 } 3105 /* dump the sense data */ 3106 len = sprintf(page, KERN_ERR PRINTK_HEADER 3107 " I/O status report for device %s:\n", 3108 dev_name(&device->cdev->dev)); 3109 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3110 " in req: %p CS: 0x%02X DS: 0x%02X " 3111 "fcxs: 0x%02X schxs: 0x%02X\n", req, 3112 scsw_cstat(&irb->scsw), scsw_dstat(&irb->scsw), 3113 irb->scsw.tm.fcxs, irb->scsw.tm.schxs); 3114 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3115 " device %s: Failing TCW: %p\n", 3116 dev_name(&device->cdev->dev), 3117 (void *) (addr_t) irb->scsw.tm.tcw); 3118 3119 tsb = NULL; 3120 sense = NULL; 3121 if (irb->scsw.tm.tcw) 3122 tsb = tcw_get_tsb( 3123 (struct tcw *)(unsigned long)irb->scsw.tm.tcw); 3124 3125 if (tsb && (irb->scsw.tm.fcxs == 0x01)) { 3126 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3127 " tsb->length %d\n", tsb->length); 3128 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3129 " tsb->flags %x\n", tsb->flags); 3130 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3131 " tsb->dcw_offset %d\n", tsb->dcw_offset); 3132 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3133 " tsb->count %d\n", tsb->count); 3134 residual = tsb->count - 28; 3135 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3136 " residual %d\n", residual); 3137 3138 switch (tsb->flags & 0x07) { 3139 case 1: /* tsa_iostat */ 3140 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3141 " tsb->tsa.iostat.dev_time %d\n", 3142 tsb->tsa.iostat.dev_time); 3143 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3144 " tsb->tsa.iostat.def_time %d\n", 3145 tsb->tsa.iostat.def_time); 3146 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3147 " tsb->tsa.iostat.queue_time %d\n", 3148 tsb->tsa.iostat.queue_time); 3149 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3150 " tsb->tsa.iostat.dev_busy_time %d\n", 3151 tsb->tsa.iostat.dev_busy_time); 3152 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3153 " tsb->tsa.iostat.dev_act_time %d\n", 3154 tsb->tsa.iostat.dev_act_time); 3155 sense = tsb->tsa.iostat.sense; 3156 break; 3157 case 2: /* ts_ddpc */ 3158 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3159 " tsb->tsa.ddpc.rc %d\n", tsb->tsa.ddpc.rc); 3160 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3161 " tsb->tsa.ddpc.rcq: "); 3162 for (sl = 0; sl < 16; sl++) { 3163 for (sct = 0; sct < 8; sct++) { 3164 len += sprintf(page + len, " %02x", 3165 tsb->tsa.ddpc.rcq[sl]); 3166 } 3167 len += sprintf(page + len, "\n"); 3168 } 3169 sense = tsb->tsa.ddpc.sense; 3170 break; 3171 case 3: /* tsa_intrg */ 3172 len += sprintf(page + len, KERN_ERR PRINTK_HEADER 3173 " tsb->tsa.intrg.: not supportet yet \n"); 3174 break; 3175 } 3176 3177 if (sense) { 3178 for (sl = 0; sl < 4; sl++) { 3179 len += sprintf(page + len, 3180 KERN_ERR PRINTK_HEADER 3181 " Sense(hex) %2d-%2d:", 3182 (8 * sl), ((8 * sl) + 7)); 3183 for (sct = 0; sct < 8; sct++) { 3184 len += sprintf(page + len, " %02x", 3185 sense[8 * sl + sct]); 3186 } 3187 len += sprintf(page + len, "\n"); 3188 } 3189 3190 if (sense[27] & DASD_SENSE_BIT_0) { 3191 /* 24 Byte Sense Data */ 3192 sprintf(page + len, KERN_ERR PRINTK_HEADER 3193 " 24 Byte: %x MSG %x, " 3194 "%s MSGb to SYSOP\n", 3195 sense[7] >> 4, sense[7] & 0x0f, 3196 sense[1] & 0x10 ? "" : "no"); 3197 } else { 3198 /* 32 Byte Sense Data */ 3199 sprintf(page + len, KERN_ERR PRINTK_HEADER 3200 " 32 Byte: Format: %x " 3201 "Exception class %x\n", 3202 sense[6] & 0x0f, sense[22] >> 4); 3203 } 3204 } else { 3205 sprintf(page + len, KERN_ERR PRINTK_HEADER 3206 " SORRY - NO VALID SENSE AVAILABLE\n"); 3207 } 3208 } else { 3209 sprintf(page + len, KERN_ERR PRINTK_HEADER 3210 " SORRY - NO TSB DATA AVAILABLE\n"); 3211 } 3212 printk("%s", page); 3213 free_page((unsigned long) page); 3214 } 3215 3216 static void dasd_eckd_dump_sense(struct dasd_device *device, 3217 struct dasd_ccw_req *req, struct irb *irb) 3218 { 3219 if (req && scsw_is_tm(&req->irb.scsw)) 3220 dasd_eckd_dump_sense_tcw(device, req, irb); 3221 else 3222 dasd_eckd_dump_sense_ccw(device, req, irb); 3223 } 3224 3225 3226 /* 3227 * max_blocks is dependent on the amount of storage that is available 3228 * in the static io buffer for each device. Currently each device has 3229 * 8192 bytes (=2 pages). For 64 bit one dasd_mchunkt_t structure has 3230 * 24 bytes, the struct dasd_ccw_req has 136 bytes and each block can use 3231 * up to 16 bytes (8 for the ccw and 8 for the idal pointer). In 3232 * addition we have one define extent ccw + 16 bytes of data and one 3233 * locate record ccw + 16 bytes of data. That makes: 3234 * (8192 - 24 - 136 - 8 - 16 - 8 - 16) / 16 = 499 blocks at maximum. 3235 * We want to fit two into the available memory so that we can immediately 3236 * start the next request if one finishes off. That makes 249.5 blocks 3237 * for one request. Give a little safety and the result is 240. 3238 */ 3239 static struct dasd_discipline dasd_eckd_discipline = { 3240 .owner = THIS_MODULE, 3241 .name = "ECKD", 3242 .ebcname = "ECKD", 3243 .max_blocks = 240, 3244 .check_device = dasd_eckd_check_characteristics, 3245 .uncheck_device = dasd_eckd_uncheck_device, 3246 .do_analysis = dasd_eckd_do_analysis, 3247 .ready_to_online = dasd_eckd_ready_to_online, 3248 .online_to_ready = dasd_eckd_online_to_ready, 3249 .fill_geometry = dasd_eckd_fill_geometry, 3250 .start_IO = dasd_start_IO, 3251 .term_IO = dasd_term_IO, 3252 .handle_terminated_request = dasd_eckd_handle_terminated_request, 3253 .format_device = dasd_eckd_format_device, 3254 .erp_action = dasd_eckd_erp_action, 3255 .erp_postaction = dasd_eckd_erp_postaction, 3256 .handle_unsolicited_interrupt = dasd_eckd_handle_unsolicited_interrupt, 3257 .build_cp = dasd_eckd_build_alias_cp, 3258 .free_cp = dasd_eckd_free_alias_cp, 3259 .dump_sense = dasd_eckd_dump_sense, 3260 .dump_sense_dbf = dasd_eckd_dump_sense_dbf, 3261 .fill_info = dasd_eckd_fill_info, 3262 .ioctl = dasd_eckd_ioctl, 3263 }; 3264 3265 static int __init 3266 dasd_eckd_init(void) 3267 { 3268 ASCEBC(dasd_eckd_discipline.ebcname, 4); 3269 return ccw_driver_register(&dasd_eckd_driver); 3270 } 3271 3272 static void __exit 3273 dasd_eckd_cleanup(void) 3274 { 3275 ccw_driver_unregister(&dasd_eckd_driver); 3276 } 3277 3278 module_init(dasd_eckd_init); 3279 module_exit(dasd_eckd_cleanup); 3280