1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Parallel SCSI (SPI) transport specific attributes exported to sysfs. 4 * 5 * Copyright (c) 2003 Silicon Graphics, Inc. All rights reserved. 6 * Copyright (c) 2004, 2005 James Bottomley <James.Bottomley@SteelEye.com> 7 */ 8 #include <linux/ctype.h> 9 #include <linux/init.h> 10 #include <linux/module.h> 11 #include <linux/workqueue.h> 12 #include <linux/blkdev.h> 13 #include <linux/mutex.h> 14 #include <linux/sysfs.h> 15 #include <linux/slab.h> 16 #include <linux/suspend.h> 17 #include <scsi/scsi.h> 18 #include "scsi_priv.h" 19 #include <scsi/scsi_device.h> 20 #include <scsi/scsi_host.h> 21 #include <scsi/scsi_cmnd.h> 22 #include <scsi/scsi_eh.h> 23 #include <scsi/scsi_tcq.h> 24 #include <scsi/scsi_transport.h> 25 #include <scsi/scsi_transport_spi.h> 26 27 #define SPI_NUM_ATTRS 14 /* increase this if you add attributes */ 28 #define SPI_OTHER_ATTRS 1 /* Increase this if you add "always 29 * on" attributes */ 30 #define SPI_HOST_ATTRS 1 31 32 #define SPI_MAX_ECHO_BUFFER_SIZE 4096 33 34 #define DV_LOOPS 3 35 #define DV_TIMEOUT (10*HZ) 36 #define DV_RETRIES 3 /* should only need at most 37 * two cc/ua clears */ 38 39 /* Our blacklist flags */ 40 enum { 41 SPI_BLIST_NOIUS = (__force blist_flags_t)0x1, 42 }; 43 44 /* blacklist table, modelled on scsi_devinfo.c */ 45 static struct { 46 char *vendor; 47 char *model; 48 blist_flags_t flags; 49 } spi_static_device_list[] __initdata = { 50 {"HP", "Ultrium 3-SCSI", SPI_BLIST_NOIUS }, 51 {"IBM", "ULTRIUM-TD3", SPI_BLIST_NOIUS }, 52 {NULL, NULL, 0} 53 }; 54 55 /* Private data accessors (keep these out of the header file) */ 56 #define spi_dv_in_progress(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_in_progress) 57 #define spi_dv_mutex(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_mutex) 58 59 struct spi_internal { 60 struct scsi_transport_template t; 61 struct spi_function_template *f; 62 }; 63 64 #define to_spi_internal(tmpl) container_of(tmpl, struct spi_internal, t) 65 66 static const int ppr_to_ps[] = { 67 /* The PPR values 0-6 are reserved, fill them in when 68 * the committee defines them */ 69 -1, /* 0x00 */ 70 -1, /* 0x01 */ 71 -1, /* 0x02 */ 72 -1, /* 0x03 */ 73 -1, /* 0x04 */ 74 -1, /* 0x05 */ 75 -1, /* 0x06 */ 76 3125, /* 0x07 */ 77 6250, /* 0x08 */ 78 12500, /* 0x09 */ 79 25000, /* 0x0a */ 80 30300, /* 0x0b */ 81 50000, /* 0x0c */ 82 }; 83 /* The PPR values at which you calculate the period in ns by multiplying 84 * by 4 */ 85 #define SPI_STATIC_PPR 0x0c 86 87 static int sprint_frac(char *dest, int value, int denom) 88 { 89 int frac = value % denom; 90 int result = sprintf(dest, "%d", value / denom); 91 92 if (frac == 0) 93 return result; 94 dest[result++] = '.'; 95 96 do { 97 denom /= 10; 98 sprintf(dest + result, "%d", frac / denom); 99 result++; 100 frac %= denom; 101 } while (frac); 102 103 dest[result++] = '\0'; 104 return result; 105 } 106 107 static int spi_execute(struct scsi_device *sdev, const void *cmd, 108 enum dma_data_direction dir, 109 void *buffer, unsigned bufflen, 110 struct scsi_sense_hdr *sshdr) 111 { 112 int i, result; 113 unsigned char sense[SCSI_SENSE_BUFFERSIZE]; 114 struct scsi_sense_hdr sshdr_tmp; 115 116 if (!sshdr) 117 sshdr = &sshdr_tmp; 118 119 for(i = 0; i < DV_RETRIES; i++) { 120 result = scsi_execute(sdev, cmd, dir, buffer, bufflen, sense, 121 sshdr, DV_TIMEOUT, /* retries */ 1, 122 REQ_FAILFAST_DEV | 123 REQ_FAILFAST_TRANSPORT | 124 REQ_FAILFAST_DRIVER, 125 0, NULL); 126 if (driver_byte(result) != DRIVER_SENSE || 127 sshdr->sense_key != UNIT_ATTENTION) 128 break; 129 } 130 return result; 131 } 132 133 static struct { 134 enum spi_signal_type value; 135 char *name; 136 } signal_types[] = { 137 { SPI_SIGNAL_UNKNOWN, "unknown" }, 138 { SPI_SIGNAL_SE, "SE" }, 139 { SPI_SIGNAL_LVD, "LVD" }, 140 { SPI_SIGNAL_HVD, "HVD" }, 141 }; 142 143 static inline const char *spi_signal_to_string(enum spi_signal_type type) 144 { 145 int i; 146 147 for (i = 0; i < ARRAY_SIZE(signal_types); i++) { 148 if (type == signal_types[i].value) 149 return signal_types[i].name; 150 } 151 return NULL; 152 } 153 static inline enum spi_signal_type spi_signal_to_value(const char *name) 154 { 155 int i, len; 156 157 for (i = 0; i < ARRAY_SIZE(signal_types); i++) { 158 len = strlen(signal_types[i].name); 159 if (strncmp(name, signal_types[i].name, len) == 0 && 160 (name[len] == '\n' || name[len] == '\0')) 161 return signal_types[i].value; 162 } 163 return SPI_SIGNAL_UNKNOWN; 164 } 165 166 static int spi_host_setup(struct transport_container *tc, struct device *dev, 167 struct device *cdev) 168 { 169 struct Scsi_Host *shost = dev_to_shost(dev); 170 171 spi_signalling(shost) = SPI_SIGNAL_UNKNOWN; 172 173 return 0; 174 } 175 176 static int spi_host_configure(struct transport_container *tc, 177 struct device *dev, 178 struct device *cdev); 179 180 static DECLARE_TRANSPORT_CLASS(spi_host_class, 181 "spi_host", 182 spi_host_setup, 183 NULL, 184 spi_host_configure); 185 186 static int spi_host_match(struct attribute_container *cont, 187 struct device *dev) 188 { 189 struct Scsi_Host *shost; 190 191 if (!scsi_is_host_device(dev)) 192 return 0; 193 194 shost = dev_to_shost(dev); 195 if (!shost->transportt || shost->transportt->host_attrs.ac.class 196 != &spi_host_class.class) 197 return 0; 198 199 return &shost->transportt->host_attrs.ac == cont; 200 } 201 202 static int spi_target_configure(struct transport_container *tc, 203 struct device *dev, 204 struct device *cdev); 205 206 static int spi_device_configure(struct transport_container *tc, 207 struct device *dev, 208 struct device *cdev) 209 { 210 struct scsi_device *sdev = to_scsi_device(dev); 211 struct scsi_target *starget = sdev->sdev_target; 212 blist_flags_t bflags; 213 214 bflags = scsi_get_device_flags_keyed(sdev, &sdev->inquiry[8], 215 &sdev->inquiry[16], 216 SCSI_DEVINFO_SPI); 217 218 /* Populate the target capability fields with the values 219 * gleaned from the device inquiry */ 220 221 spi_support_sync(starget) = scsi_device_sync(sdev); 222 spi_support_wide(starget) = scsi_device_wide(sdev); 223 spi_support_dt(starget) = scsi_device_dt(sdev); 224 spi_support_dt_only(starget) = scsi_device_dt_only(sdev); 225 spi_support_ius(starget) = scsi_device_ius(sdev); 226 if (bflags & SPI_BLIST_NOIUS) { 227 dev_info(dev, "Information Units disabled by blacklist\n"); 228 spi_support_ius(starget) = 0; 229 } 230 spi_support_qas(starget) = scsi_device_qas(sdev); 231 232 return 0; 233 } 234 235 static int spi_setup_transport_attrs(struct transport_container *tc, 236 struct device *dev, 237 struct device *cdev) 238 { 239 struct scsi_target *starget = to_scsi_target(dev); 240 241 spi_period(starget) = -1; /* illegal value */ 242 spi_min_period(starget) = 0; 243 spi_offset(starget) = 0; /* async */ 244 spi_max_offset(starget) = 255; 245 spi_width(starget) = 0; /* narrow */ 246 spi_max_width(starget) = 1; 247 spi_iu(starget) = 0; /* no IU */ 248 spi_max_iu(starget) = 1; 249 spi_dt(starget) = 0; /* ST */ 250 spi_qas(starget) = 0; 251 spi_max_qas(starget) = 1; 252 spi_wr_flow(starget) = 0; 253 spi_rd_strm(starget) = 0; 254 spi_rti(starget) = 0; 255 spi_pcomp_en(starget) = 0; 256 spi_hold_mcs(starget) = 0; 257 spi_dv_pending(starget) = 0; 258 spi_dv_in_progress(starget) = 0; 259 spi_initial_dv(starget) = 0; 260 mutex_init(&spi_dv_mutex(starget)); 261 262 return 0; 263 } 264 265 #define spi_transport_show_simple(field, format_string) \ 266 \ 267 static ssize_t \ 268 show_spi_transport_##field(struct device *dev, \ 269 struct device_attribute *attr, char *buf) \ 270 { \ 271 struct scsi_target *starget = transport_class_to_starget(dev); \ 272 struct spi_transport_attrs *tp; \ 273 \ 274 tp = (struct spi_transport_attrs *)&starget->starget_data; \ 275 return snprintf(buf, 20, format_string, tp->field); \ 276 } 277 278 #define spi_transport_store_simple(field, format_string) \ 279 \ 280 static ssize_t \ 281 store_spi_transport_##field(struct device *dev, \ 282 struct device_attribute *attr, \ 283 const char *buf, size_t count) \ 284 { \ 285 int val; \ 286 struct scsi_target *starget = transport_class_to_starget(dev); \ 287 struct spi_transport_attrs *tp; \ 288 \ 289 tp = (struct spi_transport_attrs *)&starget->starget_data; \ 290 val = simple_strtoul(buf, NULL, 0); \ 291 tp->field = val; \ 292 return count; \ 293 } 294 295 #define spi_transport_show_function(field, format_string) \ 296 \ 297 static ssize_t \ 298 show_spi_transport_##field(struct device *dev, \ 299 struct device_attribute *attr, char *buf) \ 300 { \ 301 struct scsi_target *starget = transport_class_to_starget(dev); \ 302 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); \ 303 struct spi_transport_attrs *tp; \ 304 struct spi_internal *i = to_spi_internal(shost->transportt); \ 305 tp = (struct spi_transport_attrs *)&starget->starget_data; \ 306 if (i->f->get_##field) \ 307 i->f->get_##field(starget); \ 308 return snprintf(buf, 20, format_string, tp->field); \ 309 } 310 311 #define spi_transport_store_function(field, format_string) \ 312 static ssize_t \ 313 store_spi_transport_##field(struct device *dev, \ 314 struct device_attribute *attr, \ 315 const char *buf, size_t count) \ 316 { \ 317 int val; \ 318 struct scsi_target *starget = transport_class_to_starget(dev); \ 319 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); \ 320 struct spi_internal *i = to_spi_internal(shost->transportt); \ 321 \ 322 if (!i->f->set_##field) \ 323 return -EINVAL; \ 324 val = simple_strtoul(buf, NULL, 0); \ 325 i->f->set_##field(starget, val); \ 326 return count; \ 327 } 328 329 #define spi_transport_store_max(field, format_string) \ 330 static ssize_t \ 331 store_spi_transport_##field(struct device *dev, \ 332 struct device_attribute *attr, \ 333 const char *buf, size_t count) \ 334 { \ 335 int val; \ 336 struct scsi_target *starget = transport_class_to_starget(dev); \ 337 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); \ 338 struct spi_internal *i = to_spi_internal(shost->transportt); \ 339 struct spi_transport_attrs *tp \ 340 = (struct spi_transport_attrs *)&starget->starget_data; \ 341 \ 342 if (i->f->set_##field) \ 343 return -EINVAL; \ 344 val = simple_strtoul(buf, NULL, 0); \ 345 if (val > tp->max_##field) \ 346 val = tp->max_##field; \ 347 i->f->set_##field(starget, val); \ 348 return count; \ 349 } 350 351 #define spi_transport_rd_attr(field, format_string) \ 352 spi_transport_show_function(field, format_string) \ 353 spi_transport_store_function(field, format_string) \ 354 static DEVICE_ATTR(field, S_IRUGO, \ 355 show_spi_transport_##field, \ 356 store_spi_transport_##field); 357 358 #define spi_transport_simple_attr(field, format_string) \ 359 spi_transport_show_simple(field, format_string) \ 360 spi_transport_store_simple(field, format_string) \ 361 static DEVICE_ATTR(field, S_IRUGO, \ 362 show_spi_transport_##field, \ 363 store_spi_transport_##field); 364 365 #define spi_transport_max_attr(field, format_string) \ 366 spi_transport_show_function(field, format_string) \ 367 spi_transport_store_max(field, format_string) \ 368 spi_transport_simple_attr(max_##field, format_string) \ 369 static DEVICE_ATTR(field, S_IRUGO, \ 370 show_spi_transport_##field, \ 371 store_spi_transport_##field); 372 373 /* The Parallel SCSI Tranport Attributes: */ 374 spi_transport_max_attr(offset, "%d\n"); 375 spi_transport_max_attr(width, "%d\n"); 376 spi_transport_max_attr(iu, "%d\n"); 377 spi_transport_rd_attr(dt, "%d\n"); 378 spi_transport_max_attr(qas, "%d\n"); 379 spi_transport_rd_attr(wr_flow, "%d\n"); 380 spi_transport_rd_attr(rd_strm, "%d\n"); 381 spi_transport_rd_attr(rti, "%d\n"); 382 spi_transport_rd_attr(pcomp_en, "%d\n"); 383 spi_transport_rd_attr(hold_mcs, "%d\n"); 384 385 /* we only care about the first child device that's a real SCSI device 386 * so we return 1 to terminate the iteration when we find it */ 387 static int child_iter(struct device *dev, void *data) 388 { 389 if (!scsi_is_sdev_device(dev)) 390 return 0; 391 392 spi_dv_device(to_scsi_device(dev)); 393 return 1; 394 } 395 396 static ssize_t 397 store_spi_revalidate(struct device *dev, struct device_attribute *attr, 398 const char *buf, size_t count) 399 { 400 struct scsi_target *starget = transport_class_to_starget(dev); 401 402 device_for_each_child(&starget->dev, NULL, child_iter); 403 return count; 404 } 405 static DEVICE_ATTR(revalidate, S_IWUSR, NULL, store_spi_revalidate); 406 407 /* Translate the period into ns according to the current spec 408 * for SDTR/PPR messages */ 409 static int period_to_str(char *buf, int period) 410 { 411 int len, picosec; 412 413 if (period < 0 || period > 0xff) { 414 picosec = -1; 415 } else if (period <= SPI_STATIC_PPR) { 416 picosec = ppr_to_ps[period]; 417 } else { 418 picosec = period * 4000; 419 } 420 421 if (picosec == -1) { 422 len = sprintf(buf, "reserved"); 423 } else { 424 len = sprint_frac(buf, picosec, 1000); 425 } 426 427 return len; 428 } 429 430 static ssize_t 431 show_spi_transport_period_helper(char *buf, int period) 432 { 433 int len = period_to_str(buf, period); 434 buf[len++] = '\n'; 435 buf[len] = '\0'; 436 return len; 437 } 438 439 static ssize_t 440 store_spi_transport_period_helper(struct device *dev, const char *buf, 441 size_t count, int *periodp) 442 { 443 int j, picosec, period = -1; 444 char *endp; 445 446 picosec = simple_strtoul(buf, &endp, 10) * 1000; 447 if (*endp == '.') { 448 int mult = 100; 449 do { 450 endp++; 451 if (!isdigit(*endp)) 452 break; 453 picosec += (*endp - '0') * mult; 454 mult /= 10; 455 } while (mult > 0); 456 } 457 458 for (j = 0; j <= SPI_STATIC_PPR; j++) { 459 if (ppr_to_ps[j] < picosec) 460 continue; 461 period = j; 462 break; 463 } 464 465 if (period == -1) 466 period = picosec / 4000; 467 468 if (period > 0xff) 469 period = 0xff; 470 471 *periodp = period; 472 473 return count; 474 } 475 476 static ssize_t 477 show_spi_transport_period(struct device *dev, 478 struct device_attribute *attr, char *buf) 479 { 480 struct scsi_target *starget = transport_class_to_starget(dev); 481 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 482 struct spi_internal *i = to_spi_internal(shost->transportt); 483 struct spi_transport_attrs *tp = 484 (struct spi_transport_attrs *)&starget->starget_data; 485 486 if (i->f->get_period) 487 i->f->get_period(starget); 488 489 return show_spi_transport_period_helper(buf, tp->period); 490 } 491 492 static ssize_t 493 store_spi_transport_period(struct device *cdev, struct device_attribute *attr, 494 const char *buf, size_t count) 495 { 496 struct scsi_target *starget = transport_class_to_starget(cdev); 497 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 498 struct spi_internal *i = to_spi_internal(shost->transportt); 499 struct spi_transport_attrs *tp = 500 (struct spi_transport_attrs *)&starget->starget_data; 501 int period, retval; 502 503 if (!i->f->set_period) 504 return -EINVAL; 505 506 retval = store_spi_transport_period_helper(cdev, buf, count, &period); 507 508 if (period < tp->min_period) 509 period = tp->min_period; 510 511 i->f->set_period(starget, period); 512 513 return retval; 514 } 515 516 static DEVICE_ATTR(period, S_IRUGO, 517 show_spi_transport_period, 518 store_spi_transport_period); 519 520 static ssize_t 521 show_spi_transport_min_period(struct device *cdev, 522 struct device_attribute *attr, char *buf) 523 { 524 struct scsi_target *starget = transport_class_to_starget(cdev); 525 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 526 struct spi_internal *i = to_spi_internal(shost->transportt); 527 struct spi_transport_attrs *tp = 528 (struct spi_transport_attrs *)&starget->starget_data; 529 530 if (!i->f->set_period) 531 return -EINVAL; 532 533 return show_spi_transport_period_helper(buf, tp->min_period); 534 } 535 536 static ssize_t 537 store_spi_transport_min_period(struct device *cdev, 538 struct device_attribute *attr, 539 const char *buf, size_t count) 540 { 541 struct scsi_target *starget = transport_class_to_starget(cdev); 542 struct spi_transport_attrs *tp = 543 (struct spi_transport_attrs *)&starget->starget_data; 544 545 return store_spi_transport_period_helper(cdev, buf, count, 546 &tp->min_period); 547 } 548 549 550 static DEVICE_ATTR(min_period, S_IRUGO, 551 show_spi_transport_min_period, 552 store_spi_transport_min_period); 553 554 555 static ssize_t show_spi_host_signalling(struct device *cdev, 556 struct device_attribute *attr, 557 char *buf) 558 { 559 struct Scsi_Host *shost = transport_class_to_shost(cdev); 560 struct spi_internal *i = to_spi_internal(shost->transportt); 561 562 if (i->f->get_signalling) 563 i->f->get_signalling(shost); 564 565 return sprintf(buf, "%s\n", spi_signal_to_string(spi_signalling(shost))); 566 } 567 static ssize_t store_spi_host_signalling(struct device *dev, 568 struct device_attribute *attr, 569 const char *buf, size_t count) 570 { 571 struct Scsi_Host *shost = transport_class_to_shost(dev); 572 struct spi_internal *i = to_spi_internal(shost->transportt); 573 enum spi_signal_type type = spi_signal_to_value(buf); 574 575 if (!i->f->set_signalling) 576 return -EINVAL; 577 578 if (type != SPI_SIGNAL_UNKNOWN) 579 i->f->set_signalling(shost, type); 580 581 return count; 582 } 583 static DEVICE_ATTR(signalling, S_IRUGO, 584 show_spi_host_signalling, 585 store_spi_host_signalling); 586 587 static ssize_t show_spi_host_width(struct device *cdev, 588 struct device_attribute *attr, 589 char *buf) 590 { 591 struct Scsi_Host *shost = transport_class_to_shost(cdev); 592 593 return sprintf(buf, "%s\n", shost->max_id == 16 ? "wide" : "narrow"); 594 } 595 static DEVICE_ATTR(host_width, S_IRUGO, 596 show_spi_host_width, NULL); 597 598 static ssize_t show_spi_host_hba_id(struct device *cdev, 599 struct device_attribute *attr, 600 char *buf) 601 { 602 struct Scsi_Host *shost = transport_class_to_shost(cdev); 603 604 return sprintf(buf, "%d\n", shost->this_id); 605 } 606 static DEVICE_ATTR(hba_id, S_IRUGO, 607 show_spi_host_hba_id, NULL); 608 609 #define DV_SET(x, y) \ 610 if(i->f->set_##x) \ 611 i->f->set_##x(sdev->sdev_target, y) 612 613 enum spi_compare_returns { 614 SPI_COMPARE_SUCCESS, 615 SPI_COMPARE_FAILURE, 616 SPI_COMPARE_SKIP_TEST, 617 }; 618 619 620 /* This is for read/write Domain Validation: If the device supports 621 * an echo buffer, we do read/write tests to it */ 622 static enum spi_compare_returns 623 spi_dv_device_echo_buffer(struct scsi_device *sdev, u8 *buffer, 624 u8 *ptr, const int retries) 625 { 626 int len = ptr - buffer; 627 int j, k, r, result; 628 unsigned int pattern = 0x0000ffff; 629 struct scsi_sense_hdr sshdr; 630 631 const char spi_write_buffer[] = { 632 WRITE_BUFFER, 0x0a, 0, 0, 0, 0, 0, len >> 8, len & 0xff, 0 633 }; 634 const char spi_read_buffer[] = { 635 READ_BUFFER, 0x0a, 0, 0, 0, 0, 0, len >> 8, len & 0xff, 0 636 }; 637 638 /* set up the pattern buffer. Doesn't matter if we spill 639 * slightly beyond since that's where the read buffer is */ 640 for (j = 0; j < len; ) { 641 642 /* fill the buffer with counting (test a) */ 643 for ( ; j < min(len, 32); j++) 644 buffer[j] = j; 645 k = j; 646 /* fill the buffer with alternating words of 0x0 and 647 * 0xffff (test b) */ 648 for ( ; j < min(len, k + 32); j += 2) { 649 u16 *word = (u16 *)&buffer[j]; 650 651 *word = (j & 0x02) ? 0x0000 : 0xffff; 652 } 653 k = j; 654 /* fill with crosstalk (alternating 0x5555 0xaaa) 655 * (test c) */ 656 for ( ; j < min(len, k + 32); j += 2) { 657 u16 *word = (u16 *)&buffer[j]; 658 659 *word = (j & 0x02) ? 0x5555 : 0xaaaa; 660 } 661 k = j; 662 /* fill with shifting bits (test d) */ 663 for ( ; j < min(len, k + 32); j += 4) { 664 u32 *word = (unsigned int *)&buffer[j]; 665 u32 roll = (pattern & 0x80000000) ? 1 : 0; 666 667 *word = pattern; 668 pattern = (pattern << 1) | roll; 669 } 670 /* don't bother with random data (test e) */ 671 } 672 673 for (r = 0; r < retries; r++) { 674 result = spi_execute(sdev, spi_write_buffer, DMA_TO_DEVICE, 675 buffer, len, &sshdr); 676 if(result || !scsi_device_online(sdev)) { 677 678 scsi_device_set_state(sdev, SDEV_QUIESCE); 679 if (scsi_sense_valid(&sshdr) 680 && sshdr.sense_key == ILLEGAL_REQUEST 681 /* INVALID FIELD IN CDB */ 682 && sshdr.asc == 0x24 && sshdr.ascq == 0x00) 683 /* This would mean that the drive lied 684 * to us about supporting an echo 685 * buffer (unfortunately some Western 686 * Digital drives do precisely this) 687 */ 688 return SPI_COMPARE_SKIP_TEST; 689 690 691 sdev_printk(KERN_ERR, sdev, "Write Buffer failure %x\n", result); 692 return SPI_COMPARE_FAILURE; 693 } 694 695 memset(ptr, 0, len); 696 spi_execute(sdev, spi_read_buffer, DMA_FROM_DEVICE, 697 ptr, len, NULL); 698 scsi_device_set_state(sdev, SDEV_QUIESCE); 699 700 if (memcmp(buffer, ptr, len) != 0) 701 return SPI_COMPARE_FAILURE; 702 } 703 return SPI_COMPARE_SUCCESS; 704 } 705 706 /* This is for the simplest form of Domain Validation: a read test 707 * on the inquiry data from the device */ 708 static enum spi_compare_returns 709 spi_dv_device_compare_inquiry(struct scsi_device *sdev, u8 *buffer, 710 u8 *ptr, const int retries) 711 { 712 int r, result; 713 const int len = sdev->inquiry_len; 714 const char spi_inquiry[] = { 715 INQUIRY, 0, 0, 0, len, 0 716 }; 717 718 for (r = 0; r < retries; r++) { 719 memset(ptr, 0, len); 720 721 result = spi_execute(sdev, spi_inquiry, DMA_FROM_DEVICE, 722 ptr, len, NULL); 723 724 if(result || !scsi_device_online(sdev)) { 725 scsi_device_set_state(sdev, SDEV_QUIESCE); 726 return SPI_COMPARE_FAILURE; 727 } 728 729 /* If we don't have the inquiry data already, the 730 * first read gets it */ 731 if (ptr == buffer) { 732 ptr += len; 733 --r; 734 continue; 735 } 736 737 if (memcmp(buffer, ptr, len) != 0) 738 /* failure */ 739 return SPI_COMPARE_FAILURE; 740 } 741 return SPI_COMPARE_SUCCESS; 742 } 743 744 static enum spi_compare_returns 745 spi_dv_retrain(struct scsi_device *sdev, u8 *buffer, u8 *ptr, 746 enum spi_compare_returns 747 (*compare_fn)(struct scsi_device *, u8 *, u8 *, int)) 748 { 749 struct spi_internal *i = to_spi_internal(sdev->host->transportt); 750 struct scsi_target *starget = sdev->sdev_target; 751 int period = 0, prevperiod = 0; 752 enum spi_compare_returns retval; 753 754 755 for (;;) { 756 int newperiod; 757 retval = compare_fn(sdev, buffer, ptr, DV_LOOPS); 758 759 if (retval == SPI_COMPARE_SUCCESS 760 || retval == SPI_COMPARE_SKIP_TEST) 761 break; 762 763 /* OK, retrain, fallback */ 764 if (i->f->get_iu) 765 i->f->get_iu(starget); 766 if (i->f->get_qas) 767 i->f->get_qas(starget); 768 if (i->f->get_period) 769 i->f->get_period(sdev->sdev_target); 770 771 /* Here's the fallback sequence; first try turning off 772 * IU, then QAS (if we can control them), then finally 773 * fall down the periods */ 774 if (i->f->set_iu && spi_iu(starget)) { 775 starget_printk(KERN_ERR, starget, "Domain Validation Disabling Information Units\n"); 776 DV_SET(iu, 0); 777 } else if (i->f->set_qas && spi_qas(starget)) { 778 starget_printk(KERN_ERR, starget, "Domain Validation Disabling Quick Arbitration and Selection\n"); 779 DV_SET(qas, 0); 780 } else { 781 newperiod = spi_period(starget); 782 period = newperiod > period ? newperiod : period; 783 if (period < 0x0d) 784 period++; 785 else 786 period += period >> 1; 787 788 if (unlikely(period > 0xff || period == prevperiod)) { 789 /* Total failure; set to async and return */ 790 starget_printk(KERN_ERR, starget, "Domain Validation Failure, dropping back to Asynchronous\n"); 791 DV_SET(offset, 0); 792 return SPI_COMPARE_FAILURE; 793 } 794 starget_printk(KERN_ERR, starget, "Domain Validation detected failure, dropping back\n"); 795 DV_SET(period, period); 796 prevperiod = period; 797 } 798 } 799 return retval; 800 } 801 802 static int 803 spi_dv_device_get_echo_buffer(struct scsi_device *sdev, u8 *buffer) 804 { 805 int l, result; 806 807 /* first off do a test unit ready. This can error out 808 * because of reservations or some other reason. If it 809 * fails, the device won't let us write to the echo buffer 810 * so just return failure */ 811 812 static const char spi_test_unit_ready[] = { 813 TEST_UNIT_READY, 0, 0, 0, 0, 0 814 }; 815 816 static const char spi_read_buffer_descriptor[] = { 817 READ_BUFFER, 0x0b, 0, 0, 0, 0, 0, 0, 4, 0 818 }; 819 820 821 /* We send a set of three TURs to clear any outstanding 822 * unit attention conditions if they exist (Otherwise the 823 * buffer tests won't be happy). If the TUR still fails 824 * (reservation conflict, device not ready, etc) just 825 * skip the write tests */ 826 for (l = 0; ; l++) { 827 result = spi_execute(sdev, spi_test_unit_ready, DMA_NONE, 828 NULL, 0, NULL); 829 830 if(result) { 831 if(l >= 3) 832 return 0; 833 } else { 834 /* TUR succeeded */ 835 break; 836 } 837 } 838 839 result = spi_execute(sdev, spi_read_buffer_descriptor, 840 DMA_FROM_DEVICE, buffer, 4, NULL); 841 842 if (result) 843 /* Device has no echo buffer */ 844 return 0; 845 846 return buffer[3] + ((buffer[2] & 0x1f) << 8); 847 } 848 849 static void 850 spi_dv_device_internal(struct scsi_device *sdev, u8 *buffer) 851 { 852 struct spi_internal *i = to_spi_internal(sdev->host->transportt); 853 struct scsi_target *starget = sdev->sdev_target; 854 struct Scsi_Host *shost = sdev->host; 855 int len = sdev->inquiry_len; 856 int min_period = spi_min_period(starget); 857 int max_width = spi_max_width(starget); 858 /* first set us up for narrow async */ 859 DV_SET(offset, 0); 860 DV_SET(width, 0); 861 862 if (spi_dv_device_compare_inquiry(sdev, buffer, buffer, DV_LOOPS) 863 != SPI_COMPARE_SUCCESS) { 864 starget_printk(KERN_ERR, starget, "Domain Validation Initial Inquiry Failed\n"); 865 /* FIXME: should probably offline the device here? */ 866 return; 867 } 868 869 if (!spi_support_wide(starget)) { 870 spi_max_width(starget) = 0; 871 max_width = 0; 872 } 873 874 /* test width */ 875 if (i->f->set_width && max_width) { 876 i->f->set_width(starget, 1); 877 878 if (spi_dv_device_compare_inquiry(sdev, buffer, 879 buffer + len, 880 DV_LOOPS) 881 != SPI_COMPARE_SUCCESS) { 882 starget_printk(KERN_ERR, starget, "Wide Transfers Fail\n"); 883 i->f->set_width(starget, 0); 884 /* Make sure we don't force wide back on by asking 885 * for a transfer period that requires it */ 886 max_width = 0; 887 if (min_period < 10) 888 min_period = 10; 889 } 890 } 891 892 if (!i->f->set_period) 893 return; 894 895 /* device can't handle synchronous */ 896 if (!spi_support_sync(starget) && !spi_support_dt(starget)) 897 return; 898 899 /* len == -1 is the signal that we need to ascertain the 900 * presence of an echo buffer before trying to use it. len == 901 * 0 means we don't have an echo buffer */ 902 len = -1; 903 904 retry: 905 906 /* now set up to the maximum */ 907 DV_SET(offset, spi_max_offset(starget)); 908 DV_SET(period, min_period); 909 910 /* try QAS requests; this should be harmless to set if the 911 * target supports it */ 912 if (spi_support_qas(starget) && spi_max_qas(starget)) { 913 DV_SET(qas, 1); 914 } else { 915 DV_SET(qas, 0); 916 } 917 918 if (spi_support_ius(starget) && spi_max_iu(starget) && 919 min_period < 9) { 920 /* This u320 (or u640). Set IU transfers */ 921 DV_SET(iu, 1); 922 /* Then set the optional parameters */ 923 DV_SET(rd_strm, 1); 924 DV_SET(wr_flow, 1); 925 DV_SET(rti, 1); 926 if (min_period == 8) 927 DV_SET(pcomp_en, 1); 928 } else { 929 DV_SET(iu, 0); 930 } 931 932 /* now that we've done all this, actually check the bus 933 * signal type (if known). Some devices are stupid on 934 * a SE bus and still claim they can try LVD only settings */ 935 if (i->f->get_signalling) 936 i->f->get_signalling(shost); 937 if (spi_signalling(shost) == SPI_SIGNAL_SE || 938 spi_signalling(shost) == SPI_SIGNAL_HVD || 939 !spi_support_dt(starget)) { 940 DV_SET(dt, 0); 941 } else { 942 DV_SET(dt, 1); 943 } 944 /* set width last because it will pull all the other 945 * parameters down to required values */ 946 DV_SET(width, max_width); 947 948 /* Do the read only INQUIRY tests */ 949 spi_dv_retrain(sdev, buffer, buffer + sdev->inquiry_len, 950 spi_dv_device_compare_inquiry); 951 /* See if we actually managed to negotiate and sustain DT */ 952 if (i->f->get_dt) 953 i->f->get_dt(starget); 954 955 /* see if the device has an echo buffer. If it does we can do 956 * the SPI pattern write tests. Because of some broken 957 * devices, we *only* try this on a device that has actually 958 * negotiated DT */ 959 960 if (len == -1 && spi_dt(starget)) 961 len = spi_dv_device_get_echo_buffer(sdev, buffer); 962 963 if (len <= 0) { 964 starget_printk(KERN_INFO, starget, "Domain Validation skipping write tests\n"); 965 return; 966 } 967 968 if (len > SPI_MAX_ECHO_BUFFER_SIZE) { 969 starget_printk(KERN_WARNING, starget, "Echo buffer size %d is too big, trimming to %d\n", len, SPI_MAX_ECHO_BUFFER_SIZE); 970 len = SPI_MAX_ECHO_BUFFER_SIZE; 971 } 972 973 if (spi_dv_retrain(sdev, buffer, buffer + len, 974 spi_dv_device_echo_buffer) 975 == SPI_COMPARE_SKIP_TEST) { 976 /* OK, the stupid drive can't do a write echo buffer 977 * test after all, fall back to the read tests */ 978 len = 0; 979 goto retry; 980 } 981 } 982 983 984 /** spi_dv_device - Do Domain Validation on the device 985 * @sdev: scsi device to validate 986 * 987 * Performs the domain validation on the given device in the 988 * current execution thread. Since DV operations may sleep, 989 * the current thread must have user context. Also no SCSI 990 * related locks that would deadlock I/O issued by the DV may 991 * be held. 992 */ 993 void 994 spi_dv_device(struct scsi_device *sdev) 995 { 996 struct scsi_target *starget = sdev->sdev_target; 997 u8 *buffer; 998 const int len = SPI_MAX_ECHO_BUFFER_SIZE*2; 999 1000 /* 1001 * Because this function and the power management code both call 1002 * scsi_device_quiesce(), it is not safe to perform domain validation 1003 * while suspend or resume is in progress. Hence the 1004 * lock/unlock_system_sleep() calls. 1005 */ 1006 lock_system_sleep(); 1007 1008 if (unlikely(spi_dv_in_progress(starget))) 1009 goto unlock; 1010 1011 if (unlikely(scsi_device_get(sdev))) 1012 goto unlock; 1013 1014 spi_dv_in_progress(starget) = 1; 1015 1016 buffer = kzalloc(len, GFP_KERNEL); 1017 1018 if (unlikely(!buffer)) 1019 goto out_put; 1020 1021 /* We need to verify that the actual device will quiesce; the 1022 * later target quiesce is just a nice to have */ 1023 if (unlikely(scsi_device_quiesce(sdev))) 1024 goto out_free; 1025 1026 scsi_target_quiesce(starget); 1027 1028 spi_dv_pending(starget) = 1; 1029 mutex_lock(&spi_dv_mutex(starget)); 1030 1031 starget_printk(KERN_INFO, starget, "Beginning Domain Validation\n"); 1032 1033 spi_dv_device_internal(sdev, buffer); 1034 1035 starget_printk(KERN_INFO, starget, "Ending Domain Validation\n"); 1036 1037 mutex_unlock(&spi_dv_mutex(starget)); 1038 spi_dv_pending(starget) = 0; 1039 1040 scsi_target_resume(starget); 1041 1042 spi_initial_dv(starget) = 1; 1043 1044 out_free: 1045 kfree(buffer); 1046 out_put: 1047 spi_dv_in_progress(starget) = 0; 1048 scsi_device_put(sdev); 1049 unlock: 1050 unlock_system_sleep(); 1051 } 1052 EXPORT_SYMBOL(spi_dv_device); 1053 1054 struct work_queue_wrapper { 1055 struct work_struct work; 1056 struct scsi_device *sdev; 1057 }; 1058 1059 static void 1060 spi_dv_device_work_wrapper(struct work_struct *work) 1061 { 1062 struct work_queue_wrapper *wqw = 1063 container_of(work, struct work_queue_wrapper, work); 1064 struct scsi_device *sdev = wqw->sdev; 1065 1066 kfree(wqw); 1067 spi_dv_device(sdev); 1068 spi_dv_pending(sdev->sdev_target) = 0; 1069 scsi_device_put(sdev); 1070 } 1071 1072 1073 /** 1074 * spi_schedule_dv_device - schedule domain validation to occur on the device 1075 * @sdev: The device to validate 1076 * 1077 * Identical to spi_dv_device() above, except that the DV will be 1078 * scheduled to occur in a workqueue later. All memory allocations 1079 * are atomic, so may be called from any context including those holding 1080 * SCSI locks. 1081 */ 1082 void 1083 spi_schedule_dv_device(struct scsi_device *sdev) 1084 { 1085 struct work_queue_wrapper *wqw = 1086 kmalloc(sizeof(struct work_queue_wrapper), GFP_ATOMIC); 1087 1088 if (unlikely(!wqw)) 1089 return; 1090 1091 if (unlikely(spi_dv_pending(sdev->sdev_target))) { 1092 kfree(wqw); 1093 return; 1094 } 1095 /* Set pending early (dv_device doesn't check it, only sets it) */ 1096 spi_dv_pending(sdev->sdev_target) = 1; 1097 if (unlikely(scsi_device_get(sdev))) { 1098 kfree(wqw); 1099 spi_dv_pending(sdev->sdev_target) = 0; 1100 return; 1101 } 1102 1103 INIT_WORK(&wqw->work, spi_dv_device_work_wrapper); 1104 wqw->sdev = sdev; 1105 1106 schedule_work(&wqw->work); 1107 } 1108 EXPORT_SYMBOL(spi_schedule_dv_device); 1109 1110 /** 1111 * spi_display_xfer_agreement - Print the current target transfer agreement 1112 * @starget: The target for which to display the agreement 1113 * 1114 * Each SPI port is required to maintain a transfer agreement for each 1115 * other port on the bus. This function prints a one-line summary of 1116 * the current agreement; more detailed information is available in sysfs. 1117 */ 1118 void spi_display_xfer_agreement(struct scsi_target *starget) 1119 { 1120 struct spi_transport_attrs *tp; 1121 tp = (struct spi_transport_attrs *)&starget->starget_data; 1122 1123 if (tp->offset > 0 && tp->period > 0) { 1124 unsigned int picosec, kb100; 1125 char *scsi = "FAST-?"; 1126 char tmp[8]; 1127 1128 if (tp->period <= SPI_STATIC_PPR) { 1129 picosec = ppr_to_ps[tp->period]; 1130 switch (tp->period) { 1131 case 7: scsi = "FAST-320"; break; 1132 case 8: scsi = "FAST-160"; break; 1133 case 9: scsi = "FAST-80"; break; 1134 case 10: 1135 case 11: scsi = "FAST-40"; break; 1136 case 12: scsi = "FAST-20"; break; 1137 } 1138 } else { 1139 picosec = tp->period * 4000; 1140 if (tp->period < 25) 1141 scsi = "FAST-20"; 1142 else if (tp->period < 50) 1143 scsi = "FAST-10"; 1144 else 1145 scsi = "FAST-5"; 1146 } 1147 1148 kb100 = (10000000 + picosec / 2) / picosec; 1149 if (tp->width) 1150 kb100 *= 2; 1151 sprint_frac(tmp, picosec, 1000); 1152 1153 dev_info(&starget->dev, 1154 "%s %sSCSI %d.%d MB/s %s%s%s%s%s%s%s%s (%s ns, offset %d)\n", 1155 scsi, tp->width ? "WIDE " : "", kb100/10, kb100 % 10, 1156 tp->dt ? "DT" : "ST", 1157 tp->iu ? " IU" : "", 1158 tp->qas ? " QAS" : "", 1159 tp->rd_strm ? " RDSTRM" : "", 1160 tp->rti ? " RTI" : "", 1161 tp->wr_flow ? " WRFLOW" : "", 1162 tp->pcomp_en ? " PCOMP" : "", 1163 tp->hold_mcs ? " HMCS" : "", 1164 tmp, tp->offset); 1165 } else { 1166 dev_info(&starget->dev, "%sasynchronous\n", 1167 tp->width ? "wide " : ""); 1168 } 1169 } 1170 EXPORT_SYMBOL(spi_display_xfer_agreement); 1171 1172 int spi_populate_width_msg(unsigned char *msg, int width) 1173 { 1174 msg[0] = EXTENDED_MESSAGE; 1175 msg[1] = 2; 1176 msg[2] = EXTENDED_WDTR; 1177 msg[3] = width; 1178 return 4; 1179 } 1180 EXPORT_SYMBOL_GPL(spi_populate_width_msg); 1181 1182 int spi_populate_sync_msg(unsigned char *msg, int period, int offset) 1183 { 1184 msg[0] = EXTENDED_MESSAGE; 1185 msg[1] = 3; 1186 msg[2] = EXTENDED_SDTR; 1187 msg[3] = period; 1188 msg[4] = offset; 1189 return 5; 1190 } 1191 EXPORT_SYMBOL_GPL(spi_populate_sync_msg); 1192 1193 int spi_populate_ppr_msg(unsigned char *msg, int period, int offset, 1194 int width, int options) 1195 { 1196 msg[0] = EXTENDED_MESSAGE; 1197 msg[1] = 6; 1198 msg[2] = EXTENDED_PPR; 1199 msg[3] = period; 1200 msg[4] = 0; 1201 msg[5] = offset; 1202 msg[6] = width; 1203 msg[7] = options; 1204 return 8; 1205 } 1206 EXPORT_SYMBOL_GPL(spi_populate_ppr_msg); 1207 1208 /** 1209 * spi_populate_tag_msg - place a tag message in a buffer 1210 * @msg: pointer to the area to place the tag 1211 * @cmd: pointer to the scsi command for the tag 1212 * 1213 * Notes: 1214 * designed to create the correct type of tag message for the 1215 * particular request. Returns the size of the tag message. 1216 * May return 0 if TCQ is disabled for this device. 1217 **/ 1218 int spi_populate_tag_msg(unsigned char *msg, struct scsi_cmnd *cmd) 1219 { 1220 if (cmd->flags & SCMD_TAGGED) { 1221 *msg++ = SIMPLE_QUEUE_TAG; 1222 *msg++ = cmd->request->tag; 1223 return 2; 1224 } 1225 1226 return 0; 1227 } 1228 EXPORT_SYMBOL_GPL(spi_populate_tag_msg); 1229 1230 #ifdef CONFIG_SCSI_CONSTANTS 1231 static const char * const one_byte_msgs[] = { 1232 /* 0x00 */ "Task Complete", NULL /* Extended Message */, "Save Pointers", 1233 /* 0x03 */ "Restore Pointers", "Disconnect", "Initiator Error", 1234 /* 0x06 */ "Abort Task Set", "Message Reject", "Nop", "Message Parity Error", 1235 /* 0x0a */ "Linked Command Complete", "Linked Command Complete w/flag", 1236 /* 0x0c */ "Target Reset", "Abort Task", "Clear Task Set", 1237 /* 0x0f */ "Initiate Recovery", "Release Recovery", 1238 /* 0x11 */ "Terminate Process", "Continue Task", "Target Transfer Disable", 1239 /* 0x14 */ NULL, NULL, "Clear ACA", "LUN Reset" 1240 }; 1241 1242 static const char * const two_byte_msgs[] = { 1243 /* 0x20 */ "Simple Queue Tag", "Head of Queue Tag", "Ordered Queue Tag", 1244 /* 0x23 */ "Ignore Wide Residue", "ACA" 1245 }; 1246 1247 static const char * const extended_msgs[] = { 1248 /* 0x00 */ "Modify Data Pointer", "Synchronous Data Transfer Request", 1249 /* 0x02 */ "SCSI-I Extended Identify", "Wide Data Transfer Request", 1250 /* 0x04 */ "Parallel Protocol Request", "Modify Bidirectional Data Pointer" 1251 }; 1252 1253 static void print_nego(const unsigned char *msg, int per, int off, int width) 1254 { 1255 if (per) { 1256 char buf[20]; 1257 period_to_str(buf, msg[per]); 1258 printk("period = %s ns ", buf); 1259 } 1260 1261 if (off) 1262 printk("offset = %d ", msg[off]); 1263 if (width) 1264 printk("width = %d ", 8 << msg[width]); 1265 } 1266 1267 static void print_ptr(const unsigned char *msg, int msb, const char *desc) 1268 { 1269 int ptr = (msg[msb] << 24) | (msg[msb+1] << 16) | (msg[msb+2] << 8) | 1270 msg[msb+3]; 1271 printk("%s = %d ", desc, ptr); 1272 } 1273 1274 int spi_print_msg(const unsigned char *msg) 1275 { 1276 int len = 1, i; 1277 if (msg[0] == EXTENDED_MESSAGE) { 1278 len = 2 + msg[1]; 1279 if (len == 2) 1280 len += 256; 1281 if (msg[2] < ARRAY_SIZE(extended_msgs)) 1282 printk ("%s ", extended_msgs[msg[2]]); 1283 else 1284 printk ("Extended Message, reserved code (0x%02x) ", 1285 (int) msg[2]); 1286 switch (msg[2]) { 1287 case EXTENDED_MODIFY_DATA_POINTER: 1288 print_ptr(msg, 3, "pointer"); 1289 break; 1290 case EXTENDED_SDTR: 1291 print_nego(msg, 3, 4, 0); 1292 break; 1293 case EXTENDED_WDTR: 1294 print_nego(msg, 0, 0, 3); 1295 break; 1296 case EXTENDED_PPR: 1297 print_nego(msg, 3, 5, 6); 1298 break; 1299 case EXTENDED_MODIFY_BIDI_DATA_PTR: 1300 print_ptr(msg, 3, "out"); 1301 print_ptr(msg, 7, "in"); 1302 break; 1303 default: 1304 for (i = 2; i < len; ++i) 1305 printk("%02x ", msg[i]); 1306 } 1307 /* Identify */ 1308 } else if (msg[0] & 0x80) { 1309 printk("Identify disconnect %sallowed %s %d ", 1310 (msg[0] & 0x40) ? "" : "not ", 1311 (msg[0] & 0x20) ? "target routine" : "lun", 1312 msg[0] & 0x7); 1313 /* Normal One byte */ 1314 } else if (msg[0] < 0x1f) { 1315 if (msg[0] < ARRAY_SIZE(one_byte_msgs) && one_byte_msgs[msg[0]]) 1316 printk("%s ", one_byte_msgs[msg[0]]); 1317 else 1318 printk("reserved (%02x) ", msg[0]); 1319 } else if (msg[0] == 0x55) { 1320 printk("QAS Request "); 1321 /* Two byte */ 1322 } else if (msg[0] <= 0x2f) { 1323 if ((msg[0] - 0x20) < ARRAY_SIZE(two_byte_msgs)) 1324 printk("%s %02x ", two_byte_msgs[msg[0] - 0x20], 1325 msg[1]); 1326 else 1327 printk("reserved two byte (%02x %02x) ", 1328 msg[0], msg[1]); 1329 len = 2; 1330 } else 1331 printk("reserved "); 1332 return len; 1333 } 1334 EXPORT_SYMBOL(spi_print_msg); 1335 1336 #else /* ifndef CONFIG_SCSI_CONSTANTS */ 1337 1338 int spi_print_msg(const unsigned char *msg) 1339 { 1340 int len = 1, i; 1341 1342 if (msg[0] == EXTENDED_MESSAGE) { 1343 len = 2 + msg[1]; 1344 if (len == 2) 1345 len += 256; 1346 for (i = 0; i < len; ++i) 1347 printk("%02x ", msg[i]); 1348 /* Identify */ 1349 } else if (msg[0] & 0x80) { 1350 printk("%02x ", msg[0]); 1351 /* Normal One byte */ 1352 } else if ((msg[0] < 0x1f) || (msg[0] == 0x55)) { 1353 printk("%02x ", msg[0]); 1354 /* Two byte */ 1355 } else if (msg[0] <= 0x2f) { 1356 printk("%02x %02x", msg[0], msg[1]); 1357 len = 2; 1358 } else 1359 printk("%02x ", msg[0]); 1360 return len; 1361 } 1362 EXPORT_SYMBOL(spi_print_msg); 1363 #endif /* ! CONFIG_SCSI_CONSTANTS */ 1364 1365 static int spi_device_match(struct attribute_container *cont, 1366 struct device *dev) 1367 { 1368 struct scsi_device *sdev; 1369 struct Scsi_Host *shost; 1370 struct spi_internal *i; 1371 1372 if (!scsi_is_sdev_device(dev)) 1373 return 0; 1374 1375 sdev = to_scsi_device(dev); 1376 shost = sdev->host; 1377 if (!shost->transportt || shost->transportt->host_attrs.ac.class 1378 != &spi_host_class.class) 1379 return 0; 1380 /* Note: this class has no device attributes, so it has 1381 * no per-HBA allocation and thus we don't need to distinguish 1382 * the attribute containers for the device */ 1383 i = to_spi_internal(shost->transportt); 1384 if (i->f->deny_binding && i->f->deny_binding(sdev->sdev_target)) 1385 return 0; 1386 return 1; 1387 } 1388 1389 static int spi_target_match(struct attribute_container *cont, 1390 struct device *dev) 1391 { 1392 struct Scsi_Host *shost; 1393 struct scsi_target *starget; 1394 struct spi_internal *i; 1395 1396 if (!scsi_is_target_device(dev)) 1397 return 0; 1398 1399 shost = dev_to_shost(dev->parent); 1400 if (!shost->transportt || shost->transportt->host_attrs.ac.class 1401 != &spi_host_class.class) 1402 return 0; 1403 1404 i = to_spi_internal(shost->transportt); 1405 starget = to_scsi_target(dev); 1406 1407 if (i->f->deny_binding && i->f->deny_binding(starget)) 1408 return 0; 1409 1410 return &i->t.target_attrs.ac == cont; 1411 } 1412 1413 static DECLARE_TRANSPORT_CLASS(spi_transport_class, 1414 "spi_transport", 1415 spi_setup_transport_attrs, 1416 NULL, 1417 spi_target_configure); 1418 1419 static DECLARE_ANON_TRANSPORT_CLASS(spi_device_class, 1420 spi_device_match, 1421 spi_device_configure); 1422 1423 static struct attribute *host_attributes[] = { 1424 &dev_attr_signalling.attr, 1425 &dev_attr_host_width.attr, 1426 &dev_attr_hba_id.attr, 1427 NULL 1428 }; 1429 1430 static struct attribute_group host_attribute_group = { 1431 .attrs = host_attributes, 1432 }; 1433 1434 static int spi_host_configure(struct transport_container *tc, 1435 struct device *dev, 1436 struct device *cdev) 1437 { 1438 struct kobject *kobj = &cdev->kobj; 1439 struct Scsi_Host *shost = transport_class_to_shost(cdev); 1440 struct spi_internal *si = to_spi_internal(shost->transportt); 1441 struct attribute *attr = &dev_attr_signalling.attr; 1442 int rc = 0; 1443 1444 if (si->f->set_signalling) 1445 rc = sysfs_chmod_file(kobj, attr, attr->mode | S_IWUSR); 1446 1447 return rc; 1448 } 1449 1450 /* returns true if we should be showing the variable. Also 1451 * overloads the return by setting 1<<1 if the attribute should 1452 * be writeable */ 1453 #define TARGET_ATTRIBUTE_HELPER(name) \ 1454 (si->f->show_##name ? S_IRUGO : 0) | \ 1455 (si->f->set_##name ? S_IWUSR : 0) 1456 1457 static umode_t target_attribute_is_visible(struct kobject *kobj, 1458 struct attribute *attr, int i) 1459 { 1460 struct device *cdev = container_of(kobj, struct device, kobj); 1461 struct scsi_target *starget = transport_class_to_starget(cdev); 1462 struct Scsi_Host *shost = transport_class_to_shost(cdev); 1463 struct spi_internal *si = to_spi_internal(shost->transportt); 1464 1465 if (attr == &dev_attr_period.attr && 1466 spi_support_sync(starget)) 1467 return TARGET_ATTRIBUTE_HELPER(period); 1468 else if (attr == &dev_attr_min_period.attr && 1469 spi_support_sync(starget)) 1470 return TARGET_ATTRIBUTE_HELPER(period); 1471 else if (attr == &dev_attr_offset.attr && 1472 spi_support_sync(starget)) 1473 return TARGET_ATTRIBUTE_HELPER(offset); 1474 else if (attr == &dev_attr_max_offset.attr && 1475 spi_support_sync(starget)) 1476 return TARGET_ATTRIBUTE_HELPER(offset); 1477 else if (attr == &dev_attr_width.attr && 1478 spi_support_wide(starget)) 1479 return TARGET_ATTRIBUTE_HELPER(width); 1480 else if (attr == &dev_attr_max_width.attr && 1481 spi_support_wide(starget)) 1482 return TARGET_ATTRIBUTE_HELPER(width); 1483 else if (attr == &dev_attr_iu.attr && 1484 spi_support_ius(starget)) 1485 return TARGET_ATTRIBUTE_HELPER(iu); 1486 else if (attr == &dev_attr_max_iu.attr && 1487 spi_support_ius(starget)) 1488 return TARGET_ATTRIBUTE_HELPER(iu); 1489 else if (attr == &dev_attr_dt.attr && 1490 spi_support_dt(starget)) 1491 return TARGET_ATTRIBUTE_HELPER(dt); 1492 else if (attr == &dev_attr_qas.attr && 1493 spi_support_qas(starget)) 1494 return TARGET_ATTRIBUTE_HELPER(qas); 1495 else if (attr == &dev_attr_max_qas.attr && 1496 spi_support_qas(starget)) 1497 return TARGET_ATTRIBUTE_HELPER(qas); 1498 else if (attr == &dev_attr_wr_flow.attr && 1499 spi_support_ius(starget)) 1500 return TARGET_ATTRIBUTE_HELPER(wr_flow); 1501 else if (attr == &dev_attr_rd_strm.attr && 1502 spi_support_ius(starget)) 1503 return TARGET_ATTRIBUTE_HELPER(rd_strm); 1504 else if (attr == &dev_attr_rti.attr && 1505 spi_support_ius(starget)) 1506 return TARGET_ATTRIBUTE_HELPER(rti); 1507 else if (attr == &dev_attr_pcomp_en.attr && 1508 spi_support_ius(starget)) 1509 return TARGET_ATTRIBUTE_HELPER(pcomp_en); 1510 else if (attr == &dev_attr_hold_mcs.attr && 1511 spi_support_ius(starget)) 1512 return TARGET_ATTRIBUTE_HELPER(hold_mcs); 1513 else if (attr == &dev_attr_revalidate.attr) 1514 return S_IWUSR; 1515 1516 return 0; 1517 } 1518 1519 static struct attribute *target_attributes[] = { 1520 &dev_attr_period.attr, 1521 &dev_attr_min_period.attr, 1522 &dev_attr_offset.attr, 1523 &dev_attr_max_offset.attr, 1524 &dev_attr_width.attr, 1525 &dev_attr_max_width.attr, 1526 &dev_attr_iu.attr, 1527 &dev_attr_max_iu.attr, 1528 &dev_attr_dt.attr, 1529 &dev_attr_qas.attr, 1530 &dev_attr_max_qas.attr, 1531 &dev_attr_wr_flow.attr, 1532 &dev_attr_rd_strm.attr, 1533 &dev_attr_rti.attr, 1534 &dev_attr_pcomp_en.attr, 1535 &dev_attr_hold_mcs.attr, 1536 &dev_attr_revalidate.attr, 1537 NULL 1538 }; 1539 1540 static struct attribute_group target_attribute_group = { 1541 .attrs = target_attributes, 1542 .is_visible = target_attribute_is_visible, 1543 }; 1544 1545 static int spi_target_configure(struct transport_container *tc, 1546 struct device *dev, 1547 struct device *cdev) 1548 { 1549 struct kobject *kobj = &cdev->kobj; 1550 1551 /* force an update based on parameters read from the device */ 1552 sysfs_update_group(kobj, &target_attribute_group); 1553 1554 return 0; 1555 } 1556 1557 struct scsi_transport_template * 1558 spi_attach_transport(struct spi_function_template *ft) 1559 { 1560 struct spi_internal *i = kzalloc(sizeof(struct spi_internal), 1561 GFP_KERNEL); 1562 1563 if (unlikely(!i)) 1564 return NULL; 1565 1566 i->t.target_attrs.ac.class = &spi_transport_class.class; 1567 i->t.target_attrs.ac.grp = &target_attribute_group; 1568 i->t.target_attrs.ac.match = spi_target_match; 1569 transport_container_register(&i->t.target_attrs); 1570 i->t.target_size = sizeof(struct spi_transport_attrs); 1571 i->t.host_attrs.ac.class = &spi_host_class.class; 1572 i->t.host_attrs.ac.grp = &host_attribute_group; 1573 i->t.host_attrs.ac.match = spi_host_match; 1574 transport_container_register(&i->t.host_attrs); 1575 i->t.host_size = sizeof(struct spi_host_attrs); 1576 i->f = ft; 1577 1578 return &i->t; 1579 } 1580 EXPORT_SYMBOL(spi_attach_transport); 1581 1582 void spi_release_transport(struct scsi_transport_template *t) 1583 { 1584 struct spi_internal *i = to_spi_internal(t); 1585 1586 transport_container_unregister(&i->t.target_attrs); 1587 transport_container_unregister(&i->t.host_attrs); 1588 1589 kfree(i); 1590 } 1591 EXPORT_SYMBOL(spi_release_transport); 1592 1593 static __init int spi_transport_init(void) 1594 { 1595 int error = scsi_dev_info_add_list(SCSI_DEVINFO_SPI, 1596 "SCSI Parallel Transport Class"); 1597 if (!error) { 1598 int i; 1599 1600 for (i = 0; spi_static_device_list[i].vendor; i++) 1601 scsi_dev_info_list_add_keyed(1, /* compatible */ 1602 spi_static_device_list[i].vendor, 1603 spi_static_device_list[i].model, 1604 NULL, 1605 spi_static_device_list[i].flags, 1606 SCSI_DEVINFO_SPI); 1607 } 1608 1609 error = transport_class_register(&spi_transport_class); 1610 if (error) 1611 return error; 1612 error = anon_transport_class_register(&spi_device_class); 1613 return transport_class_register(&spi_host_class); 1614 } 1615 1616 static void __exit spi_transport_exit(void) 1617 { 1618 transport_class_unregister(&spi_transport_class); 1619 anon_transport_class_unregister(&spi_device_class); 1620 transport_class_unregister(&spi_host_class); 1621 scsi_dev_info_remove_list(SCSI_DEVINFO_SPI); 1622 } 1623 1624 MODULE_AUTHOR("Martin Hicks"); 1625 MODULE_DESCRIPTION("SPI Transport Attributes"); 1626 MODULE_LICENSE("GPL"); 1627 1628 module_init(spi_transport_init); 1629 module_exit(spi_transport_exit); 1630