1 /* 2 * Adaptec AAC series RAID controller driver 3 * (c) Copyright 2001 Red Hat Inc. 4 * 5 * based on the old aacraid driver that is.. 6 * Adaptec aacraid device driver for Linux. 7 * 8 * Copyright (c) 2000-2010 Adaptec, Inc. 9 * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) 10 * 2016-2017 Microsemi Corp. (aacraid@microsemi.com) 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2, or (at your option) 15 * any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; see the file COPYING. If not, write to 24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 25 * 26 * Module Name: 27 * aachba.c 28 * 29 * Abstract: Contains Interfaces to manage IOs. 30 * 31 */ 32 33 #include <linux/kernel.h> 34 #include <linux/init.h> 35 #include <linux/types.h> 36 #include <linux/pci.h> 37 #include <linux/spinlock.h> 38 #include <linux/slab.h> 39 #include <linux/completion.h> 40 #include <linux/blkdev.h> 41 #include <linux/uaccess.h> 42 #include <linux/highmem.h> /* For flush_kernel_dcache_page */ 43 #include <linux/module.h> 44 45 #include <scsi/scsi.h> 46 #include <scsi/scsi_cmnd.h> 47 #include <scsi/scsi_device.h> 48 #include <scsi/scsi_host.h> 49 50 #include "aacraid.h" 51 52 /* values for inqd_pdt: Peripheral device type in plain English */ 53 #define INQD_PDT_DA 0x00 /* Direct-access (DISK) device */ 54 #define INQD_PDT_PROC 0x03 /* Processor device */ 55 #define INQD_PDT_CHNGR 0x08 /* Changer (jukebox, scsi2) */ 56 #define INQD_PDT_COMM 0x09 /* Communication device (scsi2) */ 57 #define INQD_PDT_NOLUN2 0x1f /* Unknown Device (scsi2) */ 58 #define INQD_PDT_NOLUN 0x7f /* Logical Unit Not Present */ 59 60 #define INQD_PDT_DMASK 0x1F /* Peripheral Device Type Mask */ 61 #define INQD_PDT_QMASK 0xE0 /* Peripheral Device Qualifer Mask */ 62 63 /* 64 * Sense codes 65 */ 66 67 #define SENCODE_NO_SENSE 0x00 68 #define SENCODE_END_OF_DATA 0x00 69 #define SENCODE_BECOMING_READY 0x04 70 #define SENCODE_INIT_CMD_REQUIRED 0x04 71 #define SENCODE_UNRECOVERED_READ_ERROR 0x11 72 #define SENCODE_PARAM_LIST_LENGTH_ERROR 0x1A 73 #define SENCODE_INVALID_COMMAND 0x20 74 #define SENCODE_LBA_OUT_OF_RANGE 0x21 75 #define SENCODE_INVALID_CDB_FIELD 0x24 76 #define SENCODE_LUN_NOT_SUPPORTED 0x25 77 #define SENCODE_INVALID_PARAM_FIELD 0x26 78 #define SENCODE_PARAM_NOT_SUPPORTED 0x26 79 #define SENCODE_PARAM_VALUE_INVALID 0x26 80 #define SENCODE_RESET_OCCURRED 0x29 81 #define SENCODE_LUN_NOT_SELF_CONFIGURED_YET 0x3E 82 #define SENCODE_INQUIRY_DATA_CHANGED 0x3F 83 #define SENCODE_SAVING_PARAMS_NOT_SUPPORTED 0x39 84 #define SENCODE_DIAGNOSTIC_FAILURE 0x40 85 #define SENCODE_INTERNAL_TARGET_FAILURE 0x44 86 #define SENCODE_INVALID_MESSAGE_ERROR 0x49 87 #define SENCODE_LUN_FAILED_SELF_CONFIG 0x4c 88 #define SENCODE_OVERLAPPED_COMMAND 0x4E 89 90 /* 91 * Additional sense codes 92 */ 93 94 #define ASENCODE_NO_SENSE 0x00 95 #define ASENCODE_END_OF_DATA 0x05 96 #define ASENCODE_BECOMING_READY 0x01 97 #define ASENCODE_INIT_CMD_REQUIRED 0x02 98 #define ASENCODE_PARAM_LIST_LENGTH_ERROR 0x00 99 #define ASENCODE_INVALID_COMMAND 0x00 100 #define ASENCODE_LBA_OUT_OF_RANGE 0x00 101 #define ASENCODE_INVALID_CDB_FIELD 0x00 102 #define ASENCODE_LUN_NOT_SUPPORTED 0x00 103 #define ASENCODE_INVALID_PARAM_FIELD 0x00 104 #define ASENCODE_PARAM_NOT_SUPPORTED 0x01 105 #define ASENCODE_PARAM_VALUE_INVALID 0x02 106 #define ASENCODE_RESET_OCCURRED 0x00 107 #define ASENCODE_LUN_NOT_SELF_CONFIGURED_YET 0x00 108 #define ASENCODE_INQUIRY_DATA_CHANGED 0x03 109 #define ASENCODE_SAVING_PARAMS_NOT_SUPPORTED 0x00 110 #define ASENCODE_DIAGNOSTIC_FAILURE 0x80 111 #define ASENCODE_INTERNAL_TARGET_FAILURE 0x00 112 #define ASENCODE_INVALID_MESSAGE_ERROR 0x00 113 #define ASENCODE_LUN_FAILED_SELF_CONFIG 0x00 114 #define ASENCODE_OVERLAPPED_COMMAND 0x00 115 116 #define AAC_STAT_GOOD (DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD) 117 118 #define BYTE0(x) (unsigned char)(x) 119 #define BYTE1(x) (unsigned char)((x) >> 8) 120 #define BYTE2(x) (unsigned char)((x) >> 16) 121 #define BYTE3(x) (unsigned char)((x) >> 24) 122 123 /* MODE_SENSE data format */ 124 typedef struct { 125 struct { 126 u8 data_length; 127 u8 med_type; 128 u8 dev_par; 129 u8 bd_length; 130 } __attribute__((packed)) hd; 131 struct { 132 u8 dens_code; 133 u8 block_count[3]; 134 u8 reserved; 135 u8 block_length[3]; 136 } __attribute__((packed)) bd; 137 u8 mpc_buf[3]; 138 } __attribute__((packed)) aac_modep_data; 139 140 /* MODE_SENSE_10 data format */ 141 typedef struct { 142 struct { 143 u8 data_length[2]; 144 u8 med_type; 145 u8 dev_par; 146 u8 rsrvd[2]; 147 u8 bd_length[2]; 148 } __attribute__((packed)) hd; 149 struct { 150 u8 dens_code; 151 u8 block_count[3]; 152 u8 reserved; 153 u8 block_length[3]; 154 } __attribute__((packed)) bd; 155 u8 mpc_buf[3]; 156 } __attribute__((packed)) aac_modep10_data; 157 158 /*------------------------------------------------------------------------------ 159 * S T R U C T S / T Y P E D E F S 160 *----------------------------------------------------------------------------*/ 161 /* SCSI inquiry data */ 162 struct inquiry_data { 163 u8 inqd_pdt; /* Peripheral qualifier | Peripheral Device Type */ 164 u8 inqd_dtq; /* RMB | Device Type Qualifier */ 165 u8 inqd_ver; /* ISO version | ECMA version | ANSI-approved version */ 166 u8 inqd_rdf; /* AENC | TrmIOP | Response data format */ 167 u8 inqd_len; /* Additional length (n-4) */ 168 u8 inqd_pad1[2];/* Reserved - must be zero */ 169 u8 inqd_pad2; /* RelAdr | WBus32 | WBus16 | Sync | Linked |Reserved| CmdQue | SftRe */ 170 u8 inqd_vid[8]; /* Vendor ID */ 171 u8 inqd_pid[16];/* Product ID */ 172 u8 inqd_prl[4]; /* Product Revision Level */ 173 }; 174 175 /* Added for VPD 0x83 */ 176 struct tvpd_id_descriptor_type_1 { 177 u8 codeset:4; /* VPD_CODE_SET */ 178 u8 reserved:4; 179 u8 identifiertype:4; /* VPD_IDENTIFIER_TYPE */ 180 u8 reserved2:4; 181 u8 reserved3; 182 u8 identifierlength; 183 u8 venid[8]; 184 u8 productid[16]; 185 u8 serialnumber[8]; /* SN in ASCII */ 186 187 }; 188 189 struct tvpd_id_descriptor_type_2 { 190 u8 codeset:4; /* VPD_CODE_SET */ 191 u8 reserved:4; 192 u8 identifiertype:4; /* VPD_IDENTIFIER_TYPE */ 193 u8 reserved2:4; 194 u8 reserved3; 195 u8 identifierlength; 196 struct teu64id { 197 u32 Serial; 198 /* The serial number supposed to be 40 bits, 199 * bit we only support 32, so make the last byte zero. */ 200 u8 reserved; 201 u8 venid[3]; 202 } eu64id; 203 204 }; 205 206 struct tvpd_id_descriptor_type_3 { 207 u8 codeset : 4; /* VPD_CODE_SET */ 208 u8 reserved : 4; 209 u8 identifiertype : 4; /* VPD_IDENTIFIER_TYPE */ 210 u8 reserved2 : 4; 211 u8 reserved3; 212 u8 identifierlength; 213 u8 Identifier[16]; 214 }; 215 216 struct tvpd_page83 { 217 u8 DeviceType:5; 218 u8 DeviceTypeQualifier:3; 219 u8 PageCode; 220 u8 reserved; 221 u8 PageLength; 222 struct tvpd_id_descriptor_type_1 type1; 223 struct tvpd_id_descriptor_type_2 type2; 224 struct tvpd_id_descriptor_type_3 type3; 225 }; 226 227 /* 228 * M O D U L E G L O B A L S 229 */ 230 231 static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *sgmap); 232 static long aac_build_sg64(struct scsi_cmnd *scsicmd, struct sgmap64 *psg); 233 static long aac_build_sgraw(struct scsi_cmnd *scsicmd, struct sgmapraw *psg); 234 static long aac_build_sgraw2(struct scsi_cmnd *scsicmd, 235 struct aac_raw_io2 *rio2, int sg_max); 236 static long aac_build_sghba(struct scsi_cmnd *scsicmd, 237 struct aac_hba_cmd_req *hbacmd, 238 int sg_max, u64 sg_address); 239 static int aac_convert_sgraw2(struct aac_raw_io2 *rio2, 240 int pages, int nseg, int nseg_new); 241 static int aac_send_srb_fib(struct scsi_cmnd* scsicmd); 242 static int aac_send_hba_fib(struct scsi_cmnd *scsicmd); 243 #ifdef AAC_DETAILED_STATUS_INFO 244 static char *aac_get_status_string(u32 status); 245 #endif 246 247 /* 248 * Non dasd selection is handled entirely in aachba now 249 */ 250 251 static int nondasd = -1; 252 static int aac_cache = 2; /* WCE=0 to avoid performance problems */ 253 static int dacmode = -1; 254 int aac_msi; 255 int aac_commit = -1; 256 int startup_timeout = 180; 257 int aif_timeout = 120; 258 int aac_sync_mode; /* Only Sync. transfer - disabled */ 259 int aac_convert_sgl = 1; /* convert non-conformable s/g list - enabled */ 260 261 module_param(aac_sync_mode, int, S_IRUGO|S_IWUSR); 262 MODULE_PARM_DESC(aac_sync_mode, "Force sync. transfer mode" 263 " 0=off, 1=on"); 264 module_param(aac_convert_sgl, int, S_IRUGO|S_IWUSR); 265 MODULE_PARM_DESC(aac_convert_sgl, "Convert non-conformable s/g list" 266 " 0=off, 1=on"); 267 module_param(nondasd, int, S_IRUGO|S_IWUSR); 268 MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices." 269 " 0=off, 1=on"); 270 module_param_named(cache, aac_cache, int, S_IRUGO|S_IWUSR); 271 MODULE_PARM_DESC(cache, "Disable Queue Flush commands:\n" 272 "\tbit 0 - Disable FUA in WRITE SCSI commands\n" 273 "\tbit 1 - Disable SYNCHRONIZE_CACHE SCSI command\n" 274 "\tbit 2 - Disable only if Battery is protecting Cache"); 275 module_param(dacmode, int, S_IRUGO|S_IWUSR); 276 MODULE_PARM_DESC(dacmode, "Control whether dma addressing is using 64 bit DAC." 277 " 0=off, 1=on"); 278 module_param_named(commit, aac_commit, int, S_IRUGO|S_IWUSR); 279 MODULE_PARM_DESC(commit, "Control whether a COMMIT_CONFIG is issued to the" 280 " adapter for foreign arrays.\n" 281 "This is typically needed in systems that do not have a BIOS." 282 " 0=off, 1=on"); 283 module_param_named(msi, aac_msi, int, S_IRUGO|S_IWUSR); 284 MODULE_PARM_DESC(msi, "IRQ handling." 285 " 0=PIC(default), 1=MSI, 2=MSI-X)"); 286 module_param(startup_timeout, int, S_IRUGO|S_IWUSR); 287 MODULE_PARM_DESC(startup_timeout, "The duration of time in seconds to wait for" 288 " adapter to have it's kernel up and\n" 289 "running. This is typically adjusted for large systems that do not" 290 " have a BIOS."); 291 module_param(aif_timeout, int, S_IRUGO|S_IWUSR); 292 MODULE_PARM_DESC(aif_timeout, "The duration of time in seconds to wait for" 293 " applications to pick up AIFs before\n" 294 "deregistering them. This is typically adjusted for heavily burdened" 295 " systems."); 296 297 int aac_fib_dump; 298 module_param(aac_fib_dump, int, 0644); 299 MODULE_PARM_DESC(aac_fib_dump, "Dump controller fibs prior to IOP_RESET 0=off, 1=on"); 300 301 int numacb = -1; 302 module_param(numacb, int, S_IRUGO|S_IWUSR); 303 MODULE_PARM_DESC(numacb, "Request a limit to the number of adapter control" 304 " blocks (FIB) allocated. Valid values are 512 and down. Default is" 305 " to use suggestion from Firmware."); 306 307 int acbsize = -1; 308 module_param(acbsize, int, S_IRUGO|S_IWUSR); 309 MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB)" 310 " size. Valid values are 512, 2048, 4096 and 8192. Default is to use" 311 " suggestion from Firmware."); 312 313 int update_interval = 30 * 60; 314 module_param(update_interval, int, S_IRUGO|S_IWUSR); 315 MODULE_PARM_DESC(update_interval, "Interval in seconds between time sync" 316 " updates issued to adapter."); 317 318 int check_interval = 60; 319 module_param(check_interval, int, S_IRUGO|S_IWUSR); 320 MODULE_PARM_DESC(check_interval, "Interval in seconds between adapter health" 321 " checks."); 322 323 int aac_check_reset = 1; 324 module_param_named(check_reset, aac_check_reset, int, S_IRUGO|S_IWUSR); 325 MODULE_PARM_DESC(check_reset, "If adapter fails health check, reset the" 326 " adapter. a value of -1 forces the reset to adapters programmed to" 327 " ignore it."); 328 329 int expose_physicals = -1; 330 module_param(expose_physicals, int, S_IRUGO|S_IWUSR); 331 MODULE_PARM_DESC(expose_physicals, "Expose physical components of the arrays." 332 " -1=protect 0=off, 1=on"); 333 334 int aac_reset_devices; 335 module_param_named(reset_devices, aac_reset_devices, int, S_IRUGO|S_IWUSR); 336 MODULE_PARM_DESC(reset_devices, "Force an adapter reset at initialization."); 337 338 int aac_wwn = 1; 339 module_param_named(wwn, aac_wwn, int, S_IRUGO|S_IWUSR); 340 MODULE_PARM_DESC(wwn, "Select a WWN type for the arrays:\n" 341 "\t0 - Disable\n" 342 "\t1 - Array Meta Data Signature (default)\n" 343 "\t2 - Adapter Serial Number"); 344 345 346 static inline int aac_valid_context(struct scsi_cmnd *scsicmd, 347 struct fib *fibptr) { 348 struct scsi_device *device; 349 350 if (unlikely(!scsicmd || !scsicmd->scsi_done)) { 351 dprintk((KERN_WARNING "aac_valid_context: scsi command corrupt\n")); 352 aac_fib_complete(fibptr); 353 return 0; 354 } 355 scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL; 356 device = scsicmd->device; 357 if (unlikely(!device)) { 358 dprintk((KERN_WARNING "aac_valid_context: scsi device corrupt\n")); 359 aac_fib_complete(fibptr); 360 return 0; 361 } 362 return 1; 363 } 364 365 /** 366 * aac_get_config_status - check the adapter configuration 367 * @common: adapter to query 368 * 369 * Query config status, and commit the configuration if needed. 370 */ 371 int aac_get_config_status(struct aac_dev *dev, int commit_flag) 372 { 373 int status = 0; 374 struct fib * fibptr; 375 376 if (!(fibptr = aac_fib_alloc(dev))) 377 return -ENOMEM; 378 379 aac_fib_init(fibptr); 380 { 381 struct aac_get_config_status *dinfo; 382 dinfo = (struct aac_get_config_status *) fib_data(fibptr); 383 384 dinfo->command = cpu_to_le32(VM_ContainerConfig); 385 dinfo->type = cpu_to_le32(CT_GET_CONFIG_STATUS); 386 dinfo->count = cpu_to_le32(sizeof(((struct aac_get_config_status_resp *)NULL)->data)); 387 } 388 389 status = aac_fib_send(ContainerCommand, 390 fibptr, 391 sizeof (struct aac_get_config_status), 392 FsaNormal, 393 1, 1, 394 NULL, NULL); 395 if (status < 0) { 396 printk(KERN_WARNING "aac_get_config_status: SendFIB failed.\n"); 397 } else { 398 struct aac_get_config_status_resp *reply 399 = (struct aac_get_config_status_resp *) fib_data(fibptr); 400 dprintk((KERN_WARNING 401 "aac_get_config_status: response=%d status=%d action=%d\n", 402 le32_to_cpu(reply->response), 403 le32_to_cpu(reply->status), 404 le32_to_cpu(reply->data.action))); 405 if ((le32_to_cpu(reply->response) != ST_OK) || 406 (le32_to_cpu(reply->status) != CT_OK) || 407 (le32_to_cpu(reply->data.action) > CFACT_PAUSE)) { 408 printk(KERN_WARNING "aac_get_config_status: Will not issue the Commit Configuration\n"); 409 status = -EINVAL; 410 } 411 } 412 /* Do not set XferState to zero unless receives a response from F/W */ 413 if (status >= 0) 414 aac_fib_complete(fibptr); 415 416 /* Send a CT_COMMIT_CONFIG to enable discovery of devices */ 417 if (status >= 0) { 418 if ((aac_commit == 1) || commit_flag) { 419 struct aac_commit_config * dinfo; 420 aac_fib_init(fibptr); 421 dinfo = (struct aac_commit_config *) fib_data(fibptr); 422 423 dinfo->command = cpu_to_le32(VM_ContainerConfig); 424 dinfo->type = cpu_to_le32(CT_COMMIT_CONFIG); 425 426 status = aac_fib_send(ContainerCommand, 427 fibptr, 428 sizeof (struct aac_commit_config), 429 FsaNormal, 430 1, 1, 431 NULL, NULL); 432 /* Do not set XferState to zero unless 433 * receives a response from F/W */ 434 if (status >= 0) 435 aac_fib_complete(fibptr); 436 } else if (aac_commit == 0) { 437 printk(KERN_WARNING 438 "aac_get_config_status: Foreign device configurations are being ignored\n"); 439 } 440 } 441 /* FIB should be freed only after getting the response from the F/W */ 442 if (status != -ERESTARTSYS) 443 aac_fib_free(fibptr); 444 return status; 445 } 446 447 static void aac_expose_phy_device(struct scsi_cmnd *scsicmd) 448 { 449 char inq_data; 450 scsi_sg_copy_to_buffer(scsicmd, &inq_data, sizeof(inq_data)); 451 if ((inq_data & 0x20) && (inq_data & 0x1f) == TYPE_DISK) { 452 inq_data &= 0xdf; 453 scsi_sg_copy_from_buffer(scsicmd, &inq_data, sizeof(inq_data)); 454 } 455 } 456 457 /** 458 * aac_get_containers - list containers 459 * @common: adapter to probe 460 * 461 * Make a list of all containers on this controller 462 */ 463 int aac_get_containers(struct aac_dev *dev) 464 { 465 struct fsa_dev_info *fsa_dev_ptr; 466 u32 index; 467 int status = 0; 468 struct fib * fibptr; 469 struct aac_get_container_count *dinfo; 470 struct aac_get_container_count_resp *dresp; 471 int maximum_num_containers = MAXIMUM_NUM_CONTAINERS; 472 473 if (!(fibptr = aac_fib_alloc(dev))) 474 return -ENOMEM; 475 476 aac_fib_init(fibptr); 477 dinfo = (struct aac_get_container_count *) fib_data(fibptr); 478 dinfo->command = cpu_to_le32(VM_ContainerConfig); 479 dinfo->type = cpu_to_le32(CT_GET_CONTAINER_COUNT); 480 481 status = aac_fib_send(ContainerCommand, 482 fibptr, 483 sizeof (struct aac_get_container_count), 484 FsaNormal, 485 1, 1, 486 NULL, NULL); 487 if (status >= 0) { 488 dresp = (struct aac_get_container_count_resp *)fib_data(fibptr); 489 maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries); 490 if (fibptr->dev->supplement_adapter_info.supported_options2 & 491 AAC_OPTION_SUPPORTED_240_VOLUMES) { 492 maximum_num_containers = 493 le32_to_cpu(dresp->MaxSimpleVolumes); 494 } 495 aac_fib_complete(fibptr); 496 } 497 /* FIB should be freed only after getting the response from the F/W */ 498 if (status != -ERESTARTSYS) 499 aac_fib_free(fibptr); 500 501 if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS) 502 maximum_num_containers = MAXIMUM_NUM_CONTAINERS; 503 if (dev->fsa_dev == NULL || 504 dev->maximum_num_containers != maximum_num_containers) { 505 506 fsa_dev_ptr = dev->fsa_dev; 507 508 dev->fsa_dev = kcalloc(maximum_num_containers, 509 sizeof(*fsa_dev_ptr), GFP_KERNEL); 510 511 kfree(fsa_dev_ptr); 512 fsa_dev_ptr = NULL; 513 514 515 if (!dev->fsa_dev) 516 return -ENOMEM; 517 518 dev->maximum_num_containers = maximum_num_containers; 519 } 520 for (index = 0; index < dev->maximum_num_containers; index++) { 521 dev->fsa_dev[index].devname[0] = '\0'; 522 dev->fsa_dev[index].valid = 0; 523 524 status = aac_probe_container(dev, index); 525 526 if (status < 0) { 527 printk(KERN_WARNING "aac_get_containers: SendFIB failed.\n"); 528 break; 529 } 530 } 531 return status; 532 } 533 534 static void get_container_name_callback(void *context, struct fib * fibptr) 535 { 536 struct aac_get_name_resp * get_name_reply; 537 struct scsi_cmnd * scsicmd; 538 539 scsicmd = (struct scsi_cmnd *) context; 540 541 if (!aac_valid_context(scsicmd, fibptr)) 542 return; 543 544 dprintk((KERN_DEBUG "get_container_name_callback[cpu %d]: t = %ld.\n", smp_processor_id(), jiffies)); 545 BUG_ON(fibptr == NULL); 546 547 get_name_reply = (struct aac_get_name_resp *) fib_data(fibptr); 548 /* Failure is irrelevant, using default value instead */ 549 if ((le32_to_cpu(get_name_reply->status) == CT_OK) 550 && (get_name_reply->data[0] != '\0')) { 551 char *sp = get_name_reply->data; 552 int data_size = FIELD_SIZEOF(struct aac_get_name_resp, data); 553 554 sp[data_size - 1] = '\0'; 555 while (*sp == ' ') 556 ++sp; 557 if (*sp) { 558 struct inquiry_data inq; 559 char d[sizeof(((struct inquiry_data *)NULL)->inqd_pid)]; 560 int count = sizeof(d); 561 char *dp = d; 562 do { 563 *dp++ = (*sp) ? *sp++ : ' '; 564 } while (--count > 0); 565 566 scsi_sg_copy_to_buffer(scsicmd, &inq, sizeof(inq)); 567 memcpy(inq.inqd_pid, d, sizeof(d)); 568 scsi_sg_copy_from_buffer(scsicmd, &inq, sizeof(inq)); 569 } 570 } 571 572 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; 573 574 aac_fib_complete(fibptr); 575 scsicmd->scsi_done(scsicmd); 576 } 577 578 /** 579 * aac_get_container_name - get container name, none blocking. 580 */ 581 static int aac_get_container_name(struct scsi_cmnd * scsicmd) 582 { 583 int status; 584 int data_size; 585 struct aac_get_name *dinfo; 586 struct fib * cmd_fibcontext; 587 struct aac_dev * dev; 588 589 dev = (struct aac_dev *)scsicmd->device->host->hostdata; 590 591 data_size = FIELD_SIZEOF(struct aac_get_name_resp, data); 592 593 cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); 594 595 aac_fib_init(cmd_fibcontext); 596 dinfo = (struct aac_get_name *) fib_data(cmd_fibcontext); 597 scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; 598 599 dinfo->command = cpu_to_le32(VM_ContainerConfig); 600 dinfo->type = cpu_to_le32(CT_READ_NAME); 601 dinfo->cid = cpu_to_le32(scmd_id(scsicmd)); 602 dinfo->count = cpu_to_le32(data_size - 1); 603 604 status = aac_fib_send(ContainerCommand, 605 cmd_fibcontext, 606 sizeof(struct aac_get_name_resp), 607 FsaNormal, 608 0, 1, 609 (fib_callback)get_container_name_callback, 610 (void *) scsicmd); 611 612 /* 613 * Check that the command queued to the controller 614 */ 615 if (status == -EINPROGRESS) 616 return 0; 617 618 printk(KERN_WARNING "aac_get_container_name: aac_fib_send failed with status: %d.\n", status); 619 aac_fib_complete(cmd_fibcontext); 620 return -1; 621 } 622 623 static int aac_probe_container_callback2(struct scsi_cmnd * scsicmd) 624 { 625 struct fsa_dev_info *fsa_dev_ptr = ((struct aac_dev *)(scsicmd->device->host->hostdata))->fsa_dev; 626 627 if ((fsa_dev_ptr[scmd_id(scsicmd)].valid & 1)) 628 return aac_scsi_cmd(scsicmd); 629 630 scsicmd->result = DID_NO_CONNECT << 16; 631 scsicmd->scsi_done(scsicmd); 632 return 0; 633 } 634 635 static void _aac_probe_container2(void * context, struct fib * fibptr) 636 { 637 struct fsa_dev_info *fsa_dev_ptr; 638 int (*callback)(struct scsi_cmnd *); 639 struct scsi_cmnd * scsicmd = (struct scsi_cmnd *)context; 640 int i; 641 642 643 if (!aac_valid_context(scsicmd, fibptr)) 644 return; 645 646 scsicmd->SCp.Status = 0; 647 fsa_dev_ptr = fibptr->dev->fsa_dev; 648 if (fsa_dev_ptr) { 649 struct aac_mount * dresp = (struct aac_mount *) fib_data(fibptr); 650 __le32 sup_options2; 651 652 fsa_dev_ptr += scmd_id(scsicmd); 653 sup_options2 = 654 fibptr->dev->supplement_adapter_info.supported_options2; 655 656 if ((le32_to_cpu(dresp->status) == ST_OK) && 657 (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) && 658 (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) { 659 if (!(sup_options2 & AAC_OPTION_VARIABLE_BLOCK_SIZE)) { 660 dresp->mnt[0].fileinfo.bdevinfo.block_size = 0x200; 661 fsa_dev_ptr->block_size = 0x200; 662 } else { 663 fsa_dev_ptr->block_size = 664 le32_to_cpu(dresp->mnt[0].fileinfo.bdevinfo.block_size); 665 } 666 for (i = 0; i < 16; i++) 667 fsa_dev_ptr->identifier[i] = 668 dresp->mnt[0].fileinfo.bdevinfo 669 .identifier[i]; 670 fsa_dev_ptr->valid = 1; 671 /* sense_key holds the current state of the spin-up */ 672 if (dresp->mnt[0].state & cpu_to_le32(FSCS_NOT_READY)) 673 fsa_dev_ptr->sense_data.sense_key = NOT_READY; 674 else if (fsa_dev_ptr->sense_data.sense_key == NOT_READY) 675 fsa_dev_ptr->sense_data.sense_key = NO_SENSE; 676 fsa_dev_ptr->type = le32_to_cpu(dresp->mnt[0].vol); 677 fsa_dev_ptr->size 678 = ((u64)le32_to_cpu(dresp->mnt[0].capacity)) + 679 (((u64)le32_to_cpu(dresp->mnt[0].capacityhigh)) << 32); 680 fsa_dev_ptr->ro = ((le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY) != 0); 681 } 682 if ((fsa_dev_ptr->valid & 1) == 0) 683 fsa_dev_ptr->valid = 0; 684 scsicmd->SCp.Status = le32_to_cpu(dresp->count); 685 } 686 aac_fib_complete(fibptr); 687 aac_fib_free(fibptr); 688 callback = (int (*)(struct scsi_cmnd *))(scsicmd->SCp.ptr); 689 scsicmd->SCp.ptr = NULL; 690 (*callback)(scsicmd); 691 return; 692 } 693 694 static void _aac_probe_container1(void * context, struct fib * fibptr) 695 { 696 struct scsi_cmnd * scsicmd; 697 struct aac_mount * dresp; 698 struct aac_query_mount *dinfo; 699 int status; 700 701 dresp = (struct aac_mount *) fib_data(fibptr); 702 if (!aac_supports_2T(fibptr->dev)) { 703 dresp->mnt[0].capacityhigh = 0; 704 if ((le32_to_cpu(dresp->status) == ST_OK) && 705 (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) { 706 _aac_probe_container2(context, fibptr); 707 return; 708 } 709 } 710 scsicmd = (struct scsi_cmnd *) context; 711 712 if (!aac_valid_context(scsicmd, fibptr)) 713 return; 714 715 aac_fib_init(fibptr); 716 717 dinfo = (struct aac_query_mount *)fib_data(fibptr); 718 719 if (fibptr->dev->supplement_adapter_info.supported_options2 & 720 AAC_OPTION_VARIABLE_BLOCK_SIZE) 721 dinfo->command = cpu_to_le32(VM_NameServeAllBlk); 722 else 723 dinfo->command = cpu_to_le32(VM_NameServe64); 724 725 dinfo->count = cpu_to_le32(scmd_id(scsicmd)); 726 dinfo->type = cpu_to_le32(FT_FILESYS); 727 scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; 728 729 status = aac_fib_send(ContainerCommand, 730 fibptr, 731 sizeof(struct aac_query_mount), 732 FsaNormal, 733 0, 1, 734 _aac_probe_container2, 735 (void *) scsicmd); 736 /* 737 * Check that the command queued to the controller 738 */ 739 if (status < 0 && status != -EINPROGRESS) { 740 /* Inherit results from VM_NameServe, if any */ 741 dresp->status = cpu_to_le32(ST_OK); 742 _aac_probe_container2(context, fibptr); 743 } 744 } 745 746 static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(struct scsi_cmnd *)) 747 { 748 struct fib * fibptr; 749 int status = -ENOMEM; 750 751 if ((fibptr = aac_fib_alloc((struct aac_dev *)scsicmd->device->host->hostdata))) { 752 struct aac_query_mount *dinfo; 753 754 aac_fib_init(fibptr); 755 756 dinfo = (struct aac_query_mount *)fib_data(fibptr); 757 758 if (fibptr->dev->supplement_adapter_info.supported_options2 & 759 AAC_OPTION_VARIABLE_BLOCK_SIZE) 760 dinfo->command = cpu_to_le32(VM_NameServeAllBlk); 761 else 762 dinfo->command = cpu_to_le32(VM_NameServe); 763 764 dinfo->count = cpu_to_le32(scmd_id(scsicmd)); 765 dinfo->type = cpu_to_le32(FT_FILESYS); 766 scsicmd->SCp.ptr = (char *)callback; 767 scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; 768 769 status = aac_fib_send(ContainerCommand, 770 fibptr, 771 sizeof(struct aac_query_mount), 772 FsaNormal, 773 0, 1, 774 _aac_probe_container1, 775 (void *) scsicmd); 776 /* 777 * Check that the command queued to the controller 778 */ 779 if (status == -EINPROGRESS) 780 return 0; 781 782 if (status < 0) { 783 scsicmd->SCp.ptr = NULL; 784 aac_fib_complete(fibptr); 785 aac_fib_free(fibptr); 786 } 787 } 788 if (status < 0) { 789 struct fsa_dev_info *fsa_dev_ptr = ((struct aac_dev *)(scsicmd->device->host->hostdata))->fsa_dev; 790 if (fsa_dev_ptr) { 791 fsa_dev_ptr += scmd_id(scsicmd); 792 if ((fsa_dev_ptr->valid & 1) == 0) { 793 fsa_dev_ptr->valid = 0; 794 return (*callback)(scsicmd); 795 } 796 } 797 } 798 return status; 799 } 800 801 /** 802 * aac_probe_container - query a logical volume 803 * @dev: device to query 804 * @cid: container identifier 805 * 806 * Queries the controller about the given volume. The volume information 807 * is updated in the struct fsa_dev_info structure rather than returned. 808 */ 809 static int aac_probe_container_callback1(struct scsi_cmnd * scsicmd) 810 { 811 scsicmd->device = NULL; 812 return 0; 813 } 814 815 int aac_probe_container(struct aac_dev *dev, int cid) 816 { 817 struct scsi_cmnd *scsicmd = kmalloc(sizeof(*scsicmd), GFP_KERNEL); 818 struct scsi_device *scsidev = kmalloc(sizeof(*scsidev), GFP_KERNEL); 819 int status; 820 821 if (!scsicmd || !scsidev) { 822 kfree(scsicmd); 823 kfree(scsidev); 824 return -ENOMEM; 825 } 826 scsicmd->list.next = NULL; 827 scsicmd->scsi_done = (void (*)(struct scsi_cmnd*))aac_probe_container_callback1; 828 829 scsicmd->device = scsidev; 830 scsidev->sdev_state = 0; 831 scsidev->id = cid; 832 scsidev->host = dev->scsi_host_ptr; 833 834 if (_aac_probe_container(scsicmd, aac_probe_container_callback1) == 0) 835 while (scsicmd->device == scsidev) 836 schedule(); 837 kfree(scsidev); 838 status = scsicmd->SCp.Status; 839 kfree(scsicmd); 840 return status; 841 } 842 843 /* Local Structure to set SCSI inquiry data strings */ 844 struct scsi_inq { 845 char vid[8]; /* Vendor ID */ 846 char pid[16]; /* Product ID */ 847 char prl[4]; /* Product Revision Level */ 848 }; 849 850 /** 851 * InqStrCopy - string merge 852 * @a: string to copy from 853 * @b: string to copy to 854 * 855 * Copy a String from one location to another 856 * without copying \0 857 */ 858 859 static void inqstrcpy(char *a, char *b) 860 { 861 862 while (*a != (char)0) 863 *b++ = *a++; 864 } 865 866 static char *container_types[] = { 867 "None", 868 "Volume", 869 "Mirror", 870 "Stripe", 871 "RAID5", 872 "SSRW", 873 "SSRO", 874 "Morph", 875 "Legacy", 876 "RAID4", 877 "RAID10", 878 "RAID00", 879 "V-MIRRORS", 880 "PSEUDO R4", 881 "RAID50", 882 "RAID5D", 883 "RAID5D0", 884 "RAID1E", 885 "RAID6", 886 "RAID60", 887 "Unknown" 888 }; 889 890 char * get_container_type(unsigned tindex) 891 { 892 if (tindex >= ARRAY_SIZE(container_types)) 893 tindex = ARRAY_SIZE(container_types) - 1; 894 return container_types[tindex]; 895 } 896 897 /* Function: setinqstr 898 * 899 * Arguments: [1] pointer to void [1] int 900 * 901 * Purpose: Sets SCSI inquiry data strings for vendor, product 902 * and revision level. Allows strings to be set in platform dependent 903 * files instead of in OS dependent driver source. 904 */ 905 906 static void setinqstr(struct aac_dev *dev, void *data, int tindex) 907 { 908 struct scsi_inq *str; 909 struct aac_supplement_adapter_info *sup_adap_info; 910 911 sup_adap_info = &dev->supplement_adapter_info; 912 str = (struct scsi_inq *)(data); /* cast data to scsi inq block */ 913 memset(str, ' ', sizeof(*str)); 914 915 if (sup_adap_info->adapter_type_text[0]) { 916 char *cp = sup_adap_info->adapter_type_text; 917 int c; 918 if ((cp[0] == 'A') && (cp[1] == 'O') && (cp[2] == 'C')) 919 inqstrcpy("SMC", str->vid); 920 else { 921 c = sizeof(str->vid); 922 while (*cp && *cp != ' ' && --c) 923 ++cp; 924 c = *cp; 925 *cp = '\0'; 926 inqstrcpy(sup_adap_info->adapter_type_text, str->vid); 927 *cp = c; 928 while (*cp && *cp != ' ') 929 ++cp; 930 } 931 while (*cp == ' ') 932 ++cp; 933 /* last six chars reserved for vol type */ 934 c = 0; 935 if (strlen(cp) > sizeof(str->pid)) { 936 c = cp[sizeof(str->pid)]; 937 cp[sizeof(str->pid)] = '\0'; 938 } 939 inqstrcpy (cp, str->pid); 940 if (c) 941 cp[sizeof(str->pid)] = c; 942 } else { 943 struct aac_driver_ident *mp = aac_get_driver_ident(dev->cardtype); 944 945 inqstrcpy (mp->vname, str->vid); 946 /* last six chars reserved for vol type */ 947 inqstrcpy (mp->model, str->pid); 948 } 949 950 if (tindex < ARRAY_SIZE(container_types)){ 951 char *findit = str->pid; 952 953 for ( ; *findit != ' '; findit++); /* walk till we find a space */ 954 /* RAID is superfluous in the context of a RAID device */ 955 if (memcmp(findit-4, "RAID", 4) == 0) 956 *(findit -= 4) = ' '; 957 if (((findit - str->pid) + strlen(container_types[tindex])) 958 < (sizeof(str->pid) + sizeof(str->prl))) 959 inqstrcpy (container_types[tindex], findit + 1); 960 } 961 inqstrcpy ("V1.0", str->prl); 962 } 963 964 static void build_vpd83_type3(struct tvpd_page83 *vpdpage83data, 965 struct aac_dev *dev, struct scsi_cmnd *scsicmd) 966 { 967 int container; 968 969 vpdpage83data->type3.codeset = 1; 970 vpdpage83data->type3.identifiertype = 3; 971 vpdpage83data->type3.identifierlength = sizeof(vpdpage83data->type3) 972 - 4; 973 974 for (container = 0; container < dev->maximum_num_containers; 975 container++) { 976 977 if (scmd_id(scsicmd) == container) { 978 memcpy(vpdpage83data->type3.Identifier, 979 dev->fsa_dev[container].identifier, 980 16); 981 break; 982 } 983 } 984 } 985 986 static void get_container_serial_callback(void *context, struct fib * fibptr) 987 { 988 struct aac_get_serial_resp * get_serial_reply; 989 struct scsi_cmnd * scsicmd; 990 991 BUG_ON(fibptr == NULL); 992 993 scsicmd = (struct scsi_cmnd *) context; 994 if (!aac_valid_context(scsicmd, fibptr)) 995 return; 996 997 get_serial_reply = (struct aac_get_serial_resp *) fib_data(fibptr); 998 /* Failure is irrelevant, using default value instead */ 999 if (le32_to_cpu(get_serial_reply->status) == CT_OK) { 1000 /*Check to see if it's for VPD 0x83 or 0x80 */ 1001 if (scsicmd->cmnd[2] == 0x83) { 1002 /* vpd page 0x83 - Device Identification Page */ 1003 struct aac_dev *dev; 1004 int i; 1005 struct tvpd_page83 vpdpage83data; 1006 1007 dev = (struct aac_dev *)scsicmd->device->host->hostdata; 1008 1009 memset(((u8 *)&vpdpage83data), 0, 1010 sizeof(vpdpage83data)); 1011 1012 /* DIRECT_ACCESS_DEVIC */ 1013 vpdpage83data.DeviceType = 0; 1014 /* DEVICE_CONNECTED */ 1015 vpdpage83data.DeviceTypeQualifier = 0; 1016 /* VPD_DEVICE_IDENTIFIERS */ 1017 vpdpage83data.PageCode = 0x83; 1018 vpdpage83data.reserved = 0; 1019 vpdpage83data.PageLength = 1020 sizeof(vpdpage83data.type1) + 1021 sizeof(vpdpage83data.type2); 1022 1023 /* VPD 83 Type 3 is not supported for ARC */ 1024 if (dev->sa_firmware) 1025 vpdpage83data.PageLength += 1026 sizeof(vpdpage83data.type3); 1027 1028 /* T10 Vendor Identifier Field Format */ 1029 /* VpdcodesetAscii */ 1030 vpdpage83data.type1.codeset = 2; 1031 /* VpdIdentifierTypeVendorId */ 1032 vpdpage83data.type1.identifiertype = 1; 1033 vpdpage83data.type1.identifierlength = 1034 sizeof(vpdpage83data.type1) - 4; 1035 1036 /* "ADAPTEC " for adaptec */ 1037 memcpy(vpdpage83data.type1.venid, 1038 "ADAPTEC ", 1039 sizeof(vpdpage83data.type1.venid)); 1040 memcpy(vpdpage83data.type1.productid, 1041 "ARRAY ", 1042 sizeof( 1043 vpdpage83data.type1.productid)); 1044 1045 /* Convert to ascii based serial number. 1046 * The LSB is the the end. 1047 */ 1048 for (i = 0; i < 8; i++) { 1049 u8 temp = 1050 (u8)((get_serial_reply->uid >> ((7 - i) * 4)) & 0xF); 1051 if (temp > 0x9) { 1052 vpdpage83data.type1.serialnumber[i] = 1053 'A' + (temp - 0xA); 1054 } else { 1055 vpdpage83data.type1.serialnumber[i] = 1056 '0' + temp; 1057 } 1058 } 1059 1060 /* VpdCodeSetBinary */ 1061 vpdpage83data.type2.codeset = 1; 1062 /* VpdidentifiertypeEUI64 */ 1063 vpdpage83data.type2.identifiertype = 2; 1064 vpdpage83data.type2.identifierlength = 1065 sizeof(vpdpage83data.type2) - 4; 1066 1067 vpdpage83data.type2.eu64id.venid[0] = 0xD0; 1068 vpdpage83data.type2.eu64id.venid[1] = 0; 1069 vpdpage83data.type2.eu64id.venid[2] = 0; 1070 1071 vpdpage83data.type2.eu64id.Serial = 1072 get_serial_reply->uid; 1073 vpdpage83data.type2.eu64id.reserved = 0; 1074 1075 /* 1076 * VpdIdentifierTypeFCPHName 1077 * VPD 0x83 Type 3 not supported for ARC 1078 */ 1079 if (dev->sa_firmware) { 1080 build_vpd83_type3(&vpdpage83data, 1081 dev, scsicmd); 1082 } 1083 1084 /* Move the inquiry data to the response buffer. */ 1085 scsi_sg_copy_from_buffer(scsicmd, &vpdpage83data, 1086 sizeof(vpdpage83data)); 1087 } else { 1088 /* It must be for VPD 0x80 */ 1089 char sp[13]; 1090 /* EVPD bit set */ 1091 sp[0] = INQD_PDT_DA; 1092 sp[1] = scsicmd->cmnd[2]; 1093 sp[2] = 0; 1094 sp[3] = snprintf(sp+4, sizeof(sp)-4, "%08X", 1095 le32_to_cpu(get_serial_reply->uid)); 1096 scsi_sg_copy_from_buffer(scsicmd, sp, 1097 sizeof(sp)); 1098 } 1099 } 1100 1101 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; 1102 1103 aac_fib_complete(fibptr); 1104 scsicmd->scsi_done(scsicmd); 1105 } 1106 1107 /** 1108 * aac_get_container_serial - get container serial, none blocking. 1109 */ 1110 static int aac_get_container_serial(struct scsi_cmnd * scsicmd) 1111 { 1112 int status; 1113 struct aac_get_serial *dinfo; 1114 struct fib * cmd_fibcontext; 1115 struct aac_dev * dev; 1116 1117 dev = (struct aac_dev *)scsicmd->device->host->hostdata; 1118 1119 cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); 1120 1121 aac_fib_init(cmd_fibcontext); 1122 dinfo = (struct aac_get_serial *) fib_data(cmd_fibcontext); 1123 1124 dinfo->command = cpu_to_le32(VM_ContainerConfig); 1125 dinfo->type = cpu_to_le32(CT_CID_TO_32BITS_UID); 1126 dinfo->cid = cpu_to_le32(scmd_id(scsicmd)); 1127 scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; 1128 1129 status = aac_fib_send(ContainerCommand, 1130 cmd_fibcontext, 1131 sizeof(struct aac_get_serial_resp), 1132 FsaNormal, 1133 0, 1, 1134 (fib_callback) get_container_serial_callback, 1135 (void *) scsicmd); 1136 1137 /* 1138 * Check that the command queued to the controller 1139 */ 1140 if (status == -EINPROGRESS) 1141 return 0; 1142 1143 printk(KERN_WARNING "aac_get_container_serial: aac_fib_send failed with status: %d.\n", status); 1144 aac_fib_complete(cmd_fibcontext); 1145 return -1; 1146 } 1147 1148 /* Function: setinqserial 1149 * 1150 * Arguments: [1] pointer to void [1] int 1151 * 1152 * Purpose: Sets SCSI Unit Serial number. 1153 * This is a fake. We should read a proper 1154 * serial number from the container. <SuSE>But 1155 * without docs it's quite hard to do it :-) 1156 * So this will have to do in the meantime.</SuSE> 1157 */ 1158 1159 static int setinqserial(struct aac_dev *dev, void *data, int cid) 1160 { 1161 /* 1162 * This breaks array migration. 1163 */ 1164 return snprintf((char *)(data), sizeof(struct scsi_inq) - 4, "%08X%02X", 1165 le32_to_cpu(dev->adapter_info.serial[0]), cid); 1166 } 1167 1168 static inline void set_sense(struct sense_data *sense_data, u8 sense_key, 1169 u8 sense_code, u8 a_sense_code, u8 bit_pointer, u16 field_pointer) 1170 { 1171 u8 *sense_buf = (u8 *)sense_data; 1172 /* Sense data valid, err code 70h */ 1173 sense_buf[0] = 0x70; /* No info field */ 1174 sense_buf[1] = 0; /* Segment number, always zero */ 1175 1176 sense_buf[2] = sense_key; /* Sense key */ 1177 1178 sense_buf[12] = sense_code; /* Additional sense code */ 1179 sense_buf[13] = a_sense_code; /* Additional sense code qualifier */ 1180 1181 if (sense_key == ILLEGAL_REQUEST) { 1182 sense_buf[7] = 10; /* Additional sense length */ 1183 1184 sense_buf[15] = bit_pointer; 1185 /* Illegal parameter is in the parameter block */ 1186 if (sense_code == SENCODE_INVALID_CDB_FIELD) 1187 sense_buf[15] |= 0xc0;/* Std sense key specific field */ 1188 /* Illegal parameter is in the CDB block */ 1189 sense_buf[16] = field_pointer >> 8; /* MSB */ 1190 sense_buf[17] = field_pointer; /* LSB */ 1191 } else 1192 sense_buf[7] = 6; /* Additional sense length */ 1193 } 1194 1195 static int aac_bounds_32(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba) 1196 { 1197 if (lba & 0xffffffff00000000LL) { 1198 int cid = scmd_id(cmd); 1199 dprintk((KERN_DEBUG "aacraid: Illegal lba\n")); 1200 cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | 1201 SAM_STAT_CHECK_CONDITION; 1202 set_sense(&dev->fsa_dev[cid].sense_data, 1203 HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE, 1204 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0); 1205 memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data, 1206 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), 1207 SCSI_SENSE_BUFFERSIZE)); 1208 cmd->scsi_done(cmd); 1209 return 1; 1210 } 1211 return 0; 1212 } 1213 1214 static int aac_bounds_64(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba) 1215 { 1216 return 0; 1217 } 1218 1219 static void io_callback(void *context, struct fib * fibptr); 1220 1221 static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count) 1222 { 1223 struct aac_dev *dev = fib->dev; 1224 u16 fibsize, command; 1225 long ret; 1226 1227 aac_fib_init(fib); 1228 if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 || 1229 dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) && 1230 !dev->sync_mode) { 1231 struct aac_raw_io2 *readcmd2; 1232 readcmd2 = (struct aac_raw_io2 *) fib_data(fib); 1233 memset(readcmd2, 0, sizeof(struct aac_raw_io2)); 1234 readcmd2->blockLow = cpu_to_le32((u32)(lba&0xffffffff)); 1235 readcmd2->blockHigh = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32)); 1236 readcmd2->byteCount = cpu_to_le32(count * 1237 dev->fsa_dev[scmd_id(cmd)].block_size); 1238 readcmd2->cid = cpu_to_le16(scmd_id(cmd)); 1239 readcmd2->flags = cpu_to_le16(RIO2_IO_TYPE_READ); 1240 ret = aac_build_sgraw2(cmd, readcmd2, 1241 dev->scsi_host_ptr->sg_tablesize); 1242 if (ret < 0) 1243 return ret; 1244 command = ContainerRawIo2; 1245 fibsize = sizeof(struct aac_raw_io2) + 1246 ((le32_to_cpu(readcmd2->sgeCnt)-1) * sizeof(struct sge_ieee1212)); 1247 } else { 1248 struct aac_raw_io *readcmd; 1249 readcmd = (struct aac_raw_io *) fib_data(fib); 1250 readcmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff)); 1251 readcmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32)); 1252 readcmd->count = cpu_to_le32(count * 1253 dev->fsa_dev[scmd_id(cmd)].block_size); 1254 readcmd->cid = cpu_to_le16(scmd_id(cmd)); 1255 readcmd->flags = cpu_to_le16(RIO_TYPE_READ); 1256 readcmd->bpTotal = 0; 1257 readcmd->bpComplete = 0; 1258 ret = aac_build_sgraw(cmd, &readcmd->sg); 1259 if (ret < 0) 1260 return ret; 1261 command = ContainerRawIo; 1262 fibsize = sizeof(struct aac_raw_io) + 1263 ((le32_to_cpu(readcmd->sg.count)-1) * sizeof(struct sgentryraw)); 1264 } 1265 1266 BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr))); 1267 /* 1268 * Now send the Fib to the adapter 1269 */ 1270 return aac_fib_send(command, 1271 fib, 1272 fibsize, 1273 FsaNormal, 1274 0, 1, 1275 (fib_callback) io_callback, 1276 (void *) cmd); 1277 } 1278 1279 static int aac_read_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count) 1280 { 1281 u16 fibsize; 1282 struct aac_read64 *readcmd; 1283 long ret; 1284 1285 aac_fib_init(fib); 1286 readcmd = (struct aac_read64 *) fib_data(fib); 1287 readcmd->command = cpu_to_le32(VM_CtHostRead64); 1288 readcmd->cid = cpu_to_le16(scmd_id(cmd)); 1289 readcmd->sector_count = cpu_to_le16(count); 1290 readcmd->block = cpu_to_le32((u32)(lba&0xffffffff)); 1291 readcmd->pad = 0; 1292 readcmd->flags = 0; 1293 1294 ret = aac_build_sg64(cmd, &readcmd->sg); 1295 if (ret < 0) 1296 return ret; 1297 fibsize = sizeof(struct aac_read64) + 1298 ((le32_to_cpu(readcmd->sg.count) - 1) * 1299 sizeof (struct sgentry64)); 1300 BUG_ON (fibsize > (fib->dev->max_fib_size - 1301 sizeof(struct aac_fibhdr))); 1302 /* 1303 * Now send the Fib to the adapter 1304 */ 1305 return aac_fib_send(ContainerCommand64, 1306 fib, 1307 fibsize, 1308 FsaNormal, 1309 0, 1, 1310 (fib_callback) io_callback, 1311 (void *) cmd); 1312 } 1313 1314 static int aac_read_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count) 1315 { 1316 u16 fibsize; 1317 struct aac_read *readcmd; 1318 struct aac_dev *dev = fib->dev; 1319 long ret; 1320 1321 aac_fib_init(fib); 1322 readcmd = (struct aac_read *) fib_data(fib); 1323 readcmd->command = cpu_to_le32(VM_CtBlockRead); 1324 readcmd->cid = cpu_to_le32(scmd_id(cmd)); 1325 readcmd->block = cpu_to_le32((u32)(lba&0xffffffff)); 1326 readcmd->count = cpu_to_le32(count * 1327 dev->fsa_dev[scmd_id(cmd)].block_size); 1328 1329 ret = aac_build_sg(cmd, &readcmd->sg); 1330 if (ret < 0) 1331 return ret; 1332 fibsize = sizeof(struct aac_read) + 1333 ((le32_to_cpu(readcmd->sg.count) - 1) * 1334 sizeof (struct sgentry)); 1335 BUG_ON (fibsize > (fib->dev->max_fib_size - 1336 sizeof(struct aac_fibhdr))); 1337 /* 1338 * Now send the Fib to the adapter 1339 */ 1340 return aac_fib_send(ContainerCommand, 1341 fib, 1342 fibsize, 1343 FsaNormal, 1344 0, 1, 1345 (fib_callback) io_callback, 1346 (void *) cmd); 1347 } 1348 1349 static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua) 1350 { 1351 struct aac_dev *dev = fib->dev; 1352 u16 fibsize, command; 1353 long ret; 1354 1355 aac_fib_init(fib); 1356 if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 || 1357 dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) && 1358 !dev->sync_mode) { 1359 struct aac_raw_io2 *writecmd2; 1360 writecmd2 = (struct aac_raw_io2 *) fib_data(fib); 1361 memset(writecmd2, 0, sizeof(struct aac_raw_io2)); 1362 writecmd2->blockLow = cpu_to_le32((u32)(lba&0xffffffff)); 1363 writecmd2->blockHigh = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32)); 1364 writecmd2->byteCount = cpu_to_le32(count * 1365 dev->fsa_dev[scmd_id(cmd)].block_size); 1366 writecmd2->cid = cpu_to_le16(scmd_id(cmd)); 1367 writecmd2->flags = (fua && ((aac_cache & 5) != 1) && 1368 (((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ? 1369 cpu_to_le16(RIO2_IO_TYPE_WRITE|RIO2_IO_SUREWRITE) : 1370 cpu_to_le16(RIO2_IO_TYPE_WRITE); 1371 ret = aac_build_sgraw2(cmd, writecmd2, 1372 dev->scsi_host_ptr->sg_tablesize); 1373 if (ret < 0) 1374 return ret; 1375 command = ContainerRawIo2; 1376 fibsize = sizeof(struct aac_raw_io2) + 1377 ((le32_to_cpu(writecmd2->sgeCnt)-1) * sizeof(struct sge_ieee1212)); 1378 } else { 1379 struct aac_raw_io *writecmd; 1380 writecmd = (struct aac_raw_io *) fib_data(fib); 1381 writecmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff)); 1382 writecmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32)); 1383 writecmd->count = cpu_to_le32(count * 1384 dev->fsa_dev[scmd_id(cmd)].block_size); 1385 writecmd->cid = cpu_to_le16(scmd_id(cmd)); 1386 writecmd->flags = (fua && ((aac_cache & 5) != 1) && 1387 (((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ? 1388 cpu_to_le16(RIO_TYPE_WRITE|RIO_SUREWRITE) : 1389 cpu_to_le16(RIO_TYPE_WRITE); 1390 writecmd->bpTotal = 0; 1391 writecmd->bpComplete = 0; 1392 ret = aac_build_sgraw(cmd, &writecmd->sg); 1393 if (ret < 0) 1394 return ret; 1395 command = ContainerRawIo; 1396 fibsize = sizeof(struct aac_raw_io) + 1397 ((le32_to_cpu(writecmd->sg.count)-1) * sizeof (struct sgentryraw)); 1398 } 1399 1400 BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr))); 1401 /* 1402 * Now send the Fib to the adapter 1403 */ 1404 return aac_fib_send(command, 1405 fib, 1406 fibsize, 1407 FsaNormal, 1408 0, 1, 1409 (fib_callback) io_callback, 1410 (void *) cmd); 1411 } 1412 1413 static int aac_write_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua) 1414 { 1415 u16 fibsize; 1416 struct aac_write64 *writecmd; 1417 long ret; 1418 1419 aac_fib_init(fib); 1420 writecmd = (struct aac_write64 *) fib_data(fib); 1421 writecmd->command = cpu_to_le32(VM_CtHostWrite64); 1422 writecmd->cid = cpu_to_le16(scmd_id(cmd)); 1423 writecmd->sector_count = cpu_to_le16(count); 1424 writecmd->block = cpu_to_le32((u32)(lba&0xffffffff)); 1425 writecmd->pad = 0; 1426 writecmd->flags = 0; 1427 1428 ret = aac_build_sg64(cmd, &writecmd->sg); 1429 if (ret < 0) 1430 return ret; 1431 fibsize = sizeof(struct aac_write64) + 1432 ((le32_to_cpu(writecmd->sg.count) - 1) * 1433 sizeof (struct sgentry64)); 1434 BUG_ON (fibsize > (fib->dev->max_fib_size - 1435 sizeof(struct aac_fibhdr))); 1436 /* 1437 * Now send the Fib to the adapter 1438 */ 1439 return aac_fib_send(ContainerCommand64, 1440 fib, 1441 fibsize, 1442 FsaNormal, 1443 0, 1, 1444 (fib_callback) io_callback, 1445 (void *) cmd); 1446 } 1447 1448 static int aac_write_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua) 1449 { 1450 u16 fibsize; 1451 struct aac_write *writecmd; 1452 struct aac_dev *dev = fib->dev; 1453 long ret; 1454 1455 aac_fib_init(fib); 1456 writecmd = (struct aac_write *) fib_data(fib); 1457 writecmd->command = cpu_to_le32(VM_CtBlockWrite); 1458 writecmd->cid = cpu_to_le32(scmd_id(cmd)); 1459 writecmd->block = cpu_to_le32((u32)(lba&0xffffffff)); 1460 writecmd->count = cpu_to_le32(count * 1461 dev->fsa_dev[scmd_id(cmd)].block_size); 1462 writecmd->sg.count = cpu_to_le32(1); 1463 /* ->stable is not used - it did mean which type of write */ 1464 1465 ret = aac_build_sg(cmd, &writecmd->sg); 1466 if (ret < 0) 1467 return ret; 1468 fibsize = sizeof(struct aac_write) + 1469 ((le32_to_cpu(writecmd->sg.count) - 1) * 1470 sizeof (struct sgentry)); 1471 BUG_ON (fibsize > (fib->dev->max_fib_size - 1472 sizeof(struct aac_fibhdr))); 1473 /* 1474 * Now send the Fib to the adapter 1475 */ 1476 return aac_fib_send(ContainerCommand, 1477 fib, 1478 fibsize, 1479 FsaNormal, 1480 0, 1, 1481 (fib_callback) io_callback, 1482 (void *) cmd); 1483 } 1484 1485 static struct aac_srb * aac_scsi_common(struct fib * fib, struct scsi_cmnd * cmd) 1486 { 1487 struct aac_srb * srbcmd; 1488 u32 flag; 1489 u32 timeout; 1490 1491 aac_fib_init(fib); 1492 switch(cmd->sc_data_direction){ 1493 case DMA_TO_DEVICE: 1494 flag = SRB_DataOut; 1495 break; 1496 case DMA_BIDIRECTIONAL: 1497 flag = SRB_DataIn | SRB_DataOut; 1498 break; 1499 case DMA_FROM_DEVICE: 1500 flag = SRB_DataIn; 1501 break; 1502 case DMA_NONE: 1503 default: /* shuts up some versions of gcc */ 1504 flag = SRB_NoDataXfer; 1505 break; 1506 } 1507 1508 srbcmd = (struct aac_srb*) fib_data(fib); 1509 srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); 1510 srbcmd->channel = cpu_to_le32(aac_logical_to_phys(scmd_channel(cmd))); 1511 srbcmd->id = cpu_to_le32(scmd_id(cmd)); 1512 srbcmd->lun = cpu_to_le32(cmd->device->lun); 1513 srbcmd->flags = cpu_to_le32(flag); 1514 timeout = cmd->request->timeout/HZ; 1515 if (timeout == 0) 1516 timeout = 1; 1517 srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds 1518 srbcmd->retry_limit = 0; /* Obsolete parameter */ 1519 srbcmd->cdb_size = cpu_to_le32(cmd->cmd_len); 1520 return srbcmd; 1521 } 1522 1523 static struct aac_hba_cmd_req *aac_construct_hbacmd(struct fib *fib, 1524 struct scsi_cmnd *cmd) 1525 { 1526 struct aac_hba_cmd_req *hbacmd; 1527 struct aac_dev *dev; 1528 int bus, target; 1529 u64 address; 1530 1531 dev = (struct aac_dev *)cmd->device->host->hostdata; 1532 1533 hbacmd = (struct aac_hba_cmd_req *)fib->hw_fib_va; 1534 memset(hbacmd, 0, 96); /* sizeof(*hbacmd) is not necessary */ 1535 /* iu_type is a parameter of aac_hba_send */ 1536 switch (cmd->sc_data_direction) { 1537 case DMA_TO_DEVICE: 1538 hbacmd->byte1 = 2; 1539 break; 1540 case DMA_FROM_DEVICE: 1541 case DMA_BIDIRECTIONAL: 1542 hbacmd->byte1 = 1; 1543 break; 1544 case DMA_NONE: 1545 default: 1546 break; 1547 } 1548 hbacmd->lun[1] = cpu_to_le32(cmd->device->lun); 1549 1550 bus = aac_logical_to_phys(scmd_channel(cmd)); 1551 target = scmd_id(cmd); 1552 hbacmd->it_nexus = dev->hba_map[bus][target].rmw_nexus; 1553 1554 /* we fill in reply_qid later in aac_src_deliver_message */ 1555 /* we fill in iu_type, request_id later in aac_hba_send */ 1556 /* we fill in emb_data_desc_count later in aac_build_sghba */ 1557 1558 memcpy(hbacmd->cdb, cmd->cmnd, cmd->cmd_len); 1559 hbacmd->data_length = cpu_to_le32(scsi_bufflen(cmd)); 1560 1561 address = (u64)fib->hw_error_pa; 1562 hbacmd->error_ptr_hi = cpu_to_le32((u32)(address >> 32)); 1563 hbacmd->error_ptr_lo = cpu_to_le32((u32)(address & 0xffffffff)); 1564 hbacmd->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE); 1565 1566 return hbacmd; 1567 } 1568 1569 static void aac_srb_callback(void *context, struct fib * fibptr); 1570 1571 static int aac_scsi_64(struct fib * fib, struct scsi_cmnd * cmd) 1572 { 1573 u16 fibsize; 1574 struct aac_srb * srbcmd = aac_scsi_common(fib, cmd); 1575 long ret; 1576 1577 ret = aac_build_sg64(cmd, (struct sgmap64 *) &srbcmd->sg); 1578 if (ret < 0) 1579 return ret; 1580 srbcmd->count = cpu_to_le32(scsi_bufflen(cmd)); 1581 1582 memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb)); 1583 memcpy(srbcmd->cdb, cmd->cmnd, cmd->cmd_len); 1584 /* 1585 * Build Scatter/Gather list 1586 */ 1587 fibsize = sizeof (struct aac_srb) - sizeof (struct sgentry) + 1588 ((le32_to_cpu(srbcmd->sg.count) & 0xff) * 1589 sizeof (struct sgentry64)); 1590 BUG_ON (fibsize > (fib->dev->max_fib_size - 1591 sizeof(struct aac_fibhdr))); 1592 1593 /* 1594 * Now send the Fib to the adapter 1595 */ 1596 return aac_fib_send(ScsiPortCommand64, fib, 1597 fibsize, FsaNormal, 0, 1, 1598 (fib_callback) aac_srb_callback, 1599 (void *) cmd); 1600 } 1601 1602 static int aac_scsi_32(struct fib * fib, struct scsi_cmnd * cmd) 1603 { 1604 u16 fibsize; 1605 struct aac_srb * srbcmd = aac_scsi_common(fib, cmd); 1606 long ret; 1607 1608 ret = aac_build_sg(cmd, (struct sgmap *)&srbcmd->sg); 1609 if (ret < 0) 1610 return ret; 1611 srbcmd->count = cpu_to_le32(scsi_bufflen(cmd)); 1612 1613 memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb)); 1614 memcpy(srbcmd->cdb, cmd->cmnd, cmd->cmd_len); 1615 /* 1616 * Build Scatter/Gather list 1617 */ 1618 fibsize = sizeof (struct aac_srb) + 1619 (((le32_to_cpu(srbcmd->sg.count) & 0xff) - 1) * 1620 sizeof (struct sgentry)); 1621 BUG_ON (fibsize > (fib->dev->max_fib_size - 1622 sizeof(struct aac_fibhdr))); 1623 1624 /* 1625 * Now send the Fib to the adapter 1626 */ 1627 return aac_fib_send(ScsiPortCommand, fib, fibsize, FsaNormal, 0, 1, 1628 (fib_callback) aac_srb_callback, (void *) cmd); 1629 } 1630 1631 static int aac_scsi_32_64(struct fib * fib, struct scsi_cmnd * cmd) 1632 { 1633 if ((sizeof(dma_addr_t) > 4) && fib->dev->needs_dac && 1634 (fib->dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)) 1635 return FAILED; 1636 return aac_scsi_32(fib, cmd); 1637 } 1638 1639 static int aac_adapter_hba(struct fib *fib, struct scsi_cmnd *cmd) 1640 { 1641 struct aac_hba_cmd_req *hbacmd = aac_construct_hbacmd(fib, cmd); 1642 struct aac_dev *dev; 1643 long ret; 1644 1645 dev = (struct aac_dev *)cmd->device->host->hostdata; 1646 1647 ret = aac_build_sghba(cmd, hbacmd, 1648 dev->scsi_host_ptr->sg_tablesize, (u64)fib->hw_sgl_pa); 1649 if (ret < 0) 1650 return ret; 1651 1652 /* 1653 * Now send the HBA command to the adapter 1654 */ 1655 fib->hbacmd_size = 64 + le32_to_cpu(hbacmd->emb_data_desc_count) * 1656 sizeof(struct aac_hba_sgl); 1657 1658 return aac_hba_send(HBA_IU_TYPE_SCSI_CMD_REQ, fib, 1659 (fib_callback) aac_hba_callback, 1660 (void *) cmd); 1661 } 1662 1663 int aac_issue_bmic_identify(struct aac_dev *dev, u32 bus, u32 target) 1664 { 1665 struct fib *fibptr; 1666 struct aac_srb *srbcmd; 1667 struct sgmap64 *sg64; 1668 struct aac_ciss_identify_pd *identify_resp; 1669 dma_addr_t addr; 1670 u32 vbus, vid; 1671 u16 fibsize, datasize; 1672 int rcode = -ENOMEM; 1673 1674 1675 fibptr = aac_fib_alloc(dev); 1676 if (!fibptr) 1677 goto out; 1678 1679 fibsize = sizeof(struct aac_srb) - 1680 sizeof(struct sgentry) + sizeof(struct sgentry64); 1681 datasize = sizeof(struct aac_ciss_identify_pd); 1682 1683 identify_resp = dma_alloc_coherent(&dev->pdev->dev, datasize, &addr, 1684 GFP_KERNEL); 1685 if (!identify_resp) 1686 goto fib_free_ptr; 1687 1688 vbus = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_bus); 1689 vid = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_target); 1690 1691 aac_fib_init(fibptr); 1692 1693 srbcmd = (struct aac_srb *) fib_data(fibptr); 1694 srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); 1695 srbcmd->channel = cpu_to_le32(vbus); 1696 srbcmd->id = cpu_to_le32(vid); 1697 srbcmd->lun = 0; 1698 srbcmd->flags = cpu_to_le32(SRB_DataIn); 1699 srbcmd->timeout = cpu_to_le32(10); 1700 srbcmd->retry_limit = 0; 1701 srbcmd->cdb_size = cpu_to_le32(12); 1702 srbcmd->count = cpu_to_le32(datasize); 1703 1704 memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb)); 1705 srbcmd->cdb[0] = 0x26; 1706 srbcmd->cdb[2] = (u8)((AAC_MAX_LUN + target) & 0x00FF); 1707 srbcmd->cdb[6] = CISS_IDENTIFY_PHYSICAL_DEVICE; 1708 1709 sg64 = (struct sgmap64 *)&srbcmd->sg; 1710 sg64->count = cpu_to_le32(1); 1711 sg64->sg[0].addr[1] = cpu_to_le32((u32)(((addr) >> 16) >> 16)); 1712 sg64->sg[0].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff)); 1713 sg64->sg[0].count = cpu_to_le32(datasize); 1714 1715 rcode = aac_fib_send(ScsiPortCommand64, 1716 fibptr, fibsize, FsaNormal, 1, 1, NULL, NULL); 1717 1718 if (identify_resp->current_queue_depth_limit <= 0 || 1719 identify_resp->current_queue_depth_limit > 32) 1720 dev->hba_map[bus][target].qd_limit = 32; 1721 else 1722 dev->hba_map[bus][target].qd_limit = 1723 identify_resp->current_queue_depth_limit; 1724 1725 dma_free_coherent(&dev->pdev->dev, datasize, identify_resp, addr); 1726 1727 aac_fib_complete(fibptr); 1728 1729 fib_free_ptr: 1730 aac_fib_free(fibptr); 1731 out: 1732 return rcode; 1733 } 1734 1735 /** 1736 * aac_update hba_map()- update current hba map with data from FW 1737 * @dev: aac_dev structure 1738 * @phys_luns: FW information from report phys luns 1739 * 1740 * Update our hba map with the information gathered from the FW 1741 */ 1742 void aac_update_hba_map(struct aac_dev *dev, 1743 struct aac_ciss_phys_luns_resp *phys_luns, int rescan) 1744 { 1745 /* ok and extended reporting */ 1746 u32 lun_count, nexus; 1747 u32 i, bus, target; 1748 u8 expose_flag, attribs; 1749 u8 devtype; 1750 1751 lun_count = ((phys_luns->list_length[0] << 24) 1752 + (phys_luns->list_length[1] << 16) 1753 + (phys_luns->list_length[2] << 8) 1754 + (phys_luns->list_length[3])) / 24; 1755 1756 for (i = 0; i < lun_count; ++i) { 1757 1758 bus = phys_luns->lun[i].level2[1] & 0x3f; 1759 target = phys_luns->lun[i].level2[0]; 1760 expose_flag = phys_luns->lun[i].bus >> 6; 1761 attribs = phys_luns->lun[i].node_ident[9]; 1762 nexus = *((u32 *) &phys_luns->lun[i].node_ident[12]); 1763 1764 if (bus >= AAC_MAX_BUSES || target >= AAC_MAX_TARGETS) 1765 continue; 1766 1767 dev->hba_map[bus][target].expose = expose_flag; 1768 1769 if (expose_flag != 0) { 1770 devtype = AAC_DEVTYPE_RAID_MEMBER; 1771 goto update_devtype; 1772 } 1773 1774 if (nexus != 0 && (attribs & 8)) { 1775 devtype = AAC_DEVTYPE_NATIVE_RAW; 1776 dev->hba_map[bus][target].rmw_nexus = 1777 nexus; 1778 } else 1779 devtype = AAC_DEVTYPE_ARC_RAW; 1780 1781 if (devtype != AAC_DEVTYPE_NATIVE_RAW) 1782 goto update_devtype; 1783 1784 if (aac_issue_bmic_identify(dev, bus, target) < 0) 1785 dev->hba_map[bus][target].qd_limit = 32; 1786 1787 update_devtype: 1788 if (rescan == AAC_INIT) 1789 dev->hba_map[bus][target].devtype = devtype; 1790 else 1791 dev->hba_map[bus][target].new_devtype = devtype; 1792 } 1793 } 1794 1795 /** 1796 * aac_report_phys_luns() Process topology change 1797 * @dev: aac_dev structure 1798 * @fibptr: fib pointer 1799 * 1800 * Execute a CISS REPORT PHYS LUNS and process the results into 1801 * the current hba_map. 1802 */ 1803 int aac_report_phys_luns(struct aac_dev *dev, struct fib *fibptr, int rescan) 1804 { 1805 int fibsize, datasize; 1806 struct aac_ciss_phys_luns_resp *phys_luns; 1807 struct aac_srb *srbcmd; 1808 struct sgmap64 *sg64; 1809 dma_addr_t addr; 1810 u32 vbus, vid; 1811 int rcode = 0; 1812 1813 /* Thor SA Firmware -> CISS_REPORT_PHYSICAL_LUNS */ 1814 fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry) 1815 + sizeof(struct sgentry64); 1816 datasize = sizeof(struct aac_ciss_phys_luns_resp) 1817 + (AAC_MAX_TARGETS - 1) * sizeof(struct _ciss_lun); 1818 1819 phys_luns = dma_alloc_coherent(&dev->pdev->dev, datasize, &addr, 1820 GFP_KERNEL); 1821 if (phys_luns == NULL) { 1822 rcode = -ENOMEM; 1823 goto err_out; 1824 } 1825 1826 vbus = (u32) le16_to_cpu( 1827 dev->supplement_adapter_info.virt_device_bus); 1828 vid = (u32) le16_to_cpu( 1829 dev->supplement_adapter_info.virt_device_target); 1830 1831 aac_fib_init(fibptr); 1832 1833 srbcmd = (struct aac_srb *) fib_data(fibptr); 1834 srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); 1835 srbcmd->channel = cpu_to_le32(vbus); 1836 srbcmd->id = cpu_to_le32(vid); 1837 srbcmd->lun = 0; 1838 srbcmd->flags = cpu_to_le32(SRB_DataIn); 1839 srbcmd->timeout = cpu_to_le32(10); 1840 srbcmd->retry_limit = 0; 1841 srbcmd->cdb_size = cpu_to_le32(12); 1842 srbcmd->count = cpu_to_le32(datasize); 1843 1844 memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb)); 1845 srbcmd->cdb[0] = CISS_REPORT_PHYSICAL_LUNS; 1846 srbcmd->cdb[1] = 2; /* extended reporting */ 1847 srbcmd->cdb[8] = (u8)(datasize >> 8); 1848 srbcmd->cdb[9] = (u8)(datasize); 1849 1850 sg64 = (struct sgmap64 *) &srbcmd->sg; 1851 sg64->count = cpu_to_le32(1); 1852 sg64->sg[0].addr[1] = cpu_to_le32(upper_32_bits(addr)); 1853 sg64->sg[0].addr[0] = cpu_to_le32(lower_32_bits(addr)); 1854 sg64->sg[0].count = cpu_to_le32(datasize); 1855 1856 rcode = aac_fib_send(ScsiPortCommand64, fibptr, fibsize, 1857 FsaNormal, 1, 1, NULL, NULL); 1858 1859 /* analyse data */ 1860 if (rcode >= 0 && phys_luns->resp_flag == 2) { 1861 /* ok and extended reporting */ 1862 aac_update_hba_map(dev, phys_luns, rescan); 1863 } 1864 1865 dma_free_coherent(&dev->pdev->dev, datasize, phys_luns, addr); 1866 err_out: 1867 return rcode; 1868 } 1869 1870 int aac_get_adapter_info(struct aac_dev* dev) 1871 { 1872 struct fib* fibptr; 1873 int rcode; 1874 u32 tmp, bus, target; 1875 struct aac_adapter_info *info; 1876 struct aac_bus_info *command; 1877 struct aac_bus_info_response *bus_info; 1878 1879 if (!(fibptr = aac_fib_alloc(dev))) 1880 return -ENOMEM; 1881 1882 aac_fib_init(fibptr); 1883 info = (struct aac_adapter_info *) fib_data(fibptr); 1884 memset(info,0,sizeof(*info)); 1885 1886 rcode = aac_fib_send(RequestAdapterInfo, 1887 fibptr, 1888 sizeof(*info), 1889 FsaNormal, 1890 -1, 1, /* First `interrupt' command uses special wait */ 1891 NULL, 1892 NULL); 1893 1894 if (rcode < 0) { 1895 /* FIB should be freed only after 1896 * getting the response from the F/W */ 1897 if (rcode != -ERESTARTSYS) { 1898 aac_fib_complete(fibptr); 1899 aac_fib_free(fibptr); 1900 } 1901 return rcode; 1902 } 1903 memcpy(&dev->adapter_info, info, sizeof(*info)); 1904 1905 dev->supplement_adapter_info.virt_device_bus = 0xffff; 1906 if (dev->adapter_info.options & AAC_OPT_SUPPLEMENT_ADAPTER_INFO) { 1907 struct aac_supplement_adapter_info * sinfo; 1908 1909 aac_fib_init(fibptr); 1910 1911 sinfo = (struct aac_supplement_adapter_info *) fib_data(fibptr); 1912 1913 memset(sinfo,0,sizeof(*sinfo)); 1914 1915 rcode = aac_fib_send(RequestSupplementAdapterInfo, 1916 fibptr, 1917 sizeof(*sinfo), 1918 FsaNormal, 1919 1, 1, 1920 NULL, 1921 NULL); 1922 1923 if (rcode >= 0) 1924 memcpy(&dev->supplement_adapter_info, sinfo, sizeof(*sinfo)); 1925 if (rcode == -ERESTARTSYS) { 1926 fibptr = aac_fib_alloc(dev); 1927 if (!fibptr) 1928 return -ENOMEM; 1929 } 1930 1931 } 1932 1933 /* reset all previous mapped devices (i.e. for init. after IOP_RESET) */ 1934 for (bus = 0; bus < AAC_MAX_BUSES; bus++) { 1935 for (target = 0; target < AAC_MAX_TARGETS; target++) { 1936 dev->hba_map[bus][target].devtype = 0; 1937 dev->hba_map[bus][target].qd_limit = 0; 1938 } 1939 } 1940 1941 /* 1942 * GetBusInfo 1943 */ 1944 1945 aac_fib_init(fibptr); 1946 1947 bus_info = (struct aac_bus_info_response *) fib_data(fibptr); 1948 1949 memset(bus_info, 0, sizeof(*bus_info)); 1950 1951 command = (struct aac_bus_info *)bus_info; 1952 1953 command->Command = cpu_to_le32(VM_Ioctl); 1954 command->ObjType = cpu_to_le32(FT_DRIVE); 1955 command->MethodId = cpu_to_le32(1); 1956 command->CtlCmd = cpu_to_le32(GetBusInfo); 1957 1958 rcode = aac_fib_send(ContainerCommand, 1959 fibptr, 1960 sizeof (*bus_info), 1961 FsaNormal, 1962 1, 1, 1963 NULL, NULL); 1964 1965 /* reasoned default */ 1966 dev->maximum_num_physicals = 16; 1967 if (rcode >= 0 && le32_to_cpu(bus_info->Status) == ST_OK) { 1968 dev->maximum_num_physicals = le32_to_cpu(bus_info->TargetsPerBus); 1969 dev->maximum_num_channels = le32_to_cpu(bus_info->BusCount); 1970 } 1971 1972 if (!dev->sync_mode && dev->sa_firmware && 1973 dev->supplement_adapter_info.virt_device_bus != 0xffff) { 1974 /* Thor SA Firmware -> CISS_REPORT_PHYSICAL_LUNS */ 1975 rcode = aac_report_phys_luns(dev, fibptr, AAC_INIT); 1976 } 1977 1978 if (!dev->in_reset) { 1979 char buffer[16]; 1980 tmp = le32_to_cpu(dev->adapter_info.kernelrev); 1981 printk(KERN_INFO "%s%d: kernel %d.%d-%d[%d] %.*s\n", 1982 dev->name, 1983 dev->id, 1984 tmp>>24, 1985 (tmp>>16)&0xff, 1986 tmp&0xff, 1987 le32_to_cpu(dev->adapter_info.kernelbuild), 1988 (int)sizeof(dev->supplement_adapter_info.build_date), 1989 dev->supplement_adapter_info.build_date); 1990 tmp = le32_to_cpu(dev->adapter_info.monitorrev); 1991 printk(KERN_INFO "%s%d: monitor %d.%d-%d[%d]\n", 1992 dev->name, dev->id, 1993 tmp>>24,(tmp>>16)&0xff,tmp&0xff, 1994 le32_to_cpu(dev->adapter_info.monitorbuild)); 1995 tmp = le32_to_cpu(dev->adapter_info.biosrev); 1996 printk(KERN_INFO "%s%d: bios %d.%d-%d[%d]\n", 1997 dev->name, dev->id, 1998 tmp>>24,(tmp>>16)&0xff,tmp&0xff, 1999 le32_to_cpu(dev->adapter_info.biosbuild)); 2000 buffer[0] = '\0'; 2001 if (aac_get_serial_number( 2002 shost_to_class(dev->scsi_host_ptr), buffer)) 2003 printk(KERN_INFO "%s%d: serial %s", 2004 dev->name, dev->id, buffer); 2005 if (dev->supplement_adapter_info.vpd_info.tsid[0]) { 2006 printk(KERN_INFO "%s%d: TSID %.*s\n", 2007 dev->name, dev->id, 2008 (int)sizeof(dev->supplement_adapter_info 2009 .vpd_info.tsid), 2010 dev->supplement_adapter_info.vpd_info.tsid); 2011 } 2012 if (!aac_check_reset || ((aac_check_reset == 1) && 2013 (dev->supplement_adapter_info.supported_options2 & 2014 AAC_OPTION_IGNORE_RESET))) { 2015 printk(KERN_INFO "%s%d: Reset Adapter Ignored\n", 2016 dev->name, dev->id); 2017 } 2018 } 2019 2020 dev->cache_protected = 0; 2021 dev->jbod = ((dev->supplement_adapter_info.feature_bits & 2022 AAC_FEATURE_JBOD) != 0); 2023 dev->nondasd_support = 0; 2024 dev->raid_scsi_mode = 0; 2025 if(dev->adapter_info.options & AAC_OPT_NONDASD) 2026 dev->nondasd_support = 1; 2027 2028 /* 2029 * If the firmware supports ROMB RAID/SCSI mode and we are currently 2030 * in RAID/SCSI mode, set the flag. For now if in this mode we will 2031 * force nondasd support on. If we decide to allow the non-dasd flag 2032 * additional changes changes will have to be made to support 2033 * RAID/SCSI. the function aac_scsi_cmd in this module will have to be 2034 * changed to support the new dev->raid_scsi_mode flag instead of 2035 * leaching off of the dev->nondasd_support flag. Also in linit.c the 2036 * function aac_detect will have to be modified where it sets up the 2037 * max number of channels based on the aac->nondasd_support flag only. 2038 */ 2039 if ((dev->adapter_info.options & AAC_OPT_SCSI_MANAGED) && 2040 (dev->adapter_info.options & AAC_OPT_RAID_SCSI_MODE)) { 2041 dev->nondasd_support = 1; 2042 dev->raid_scsi_mode = 1; 2043 } 2044 if (dev->raid_scsi_mode != 0) 2045 printk(KERN_INFO "%s%d: ROMB RAID/SCSI mode enabled\n", 2046 dev->name, dev->id); 2047 2048 if (nondasd != -1) 2049 dev->nondasd_support = (nondasd!=0); 2050 if (dev->nondasd_support && !dev->in_reset) 2051 printk(KERN_INFO "%s%d: Non-DASD support enabled.\n",dev->name, dev->id); 2052 2053 if (dma_get_required_mask(&dev->pdev->dev) > DMA_BIT_MASK(32)) 2054 dev->needs_dac = 1; 2055 dev->dac_support = 0; 2056 if ((sizeof(dma_addr_t) > 4) && dev->needs_dac && 2057 (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)) { 2058 if (!dev->in_reset) 2059 printk(KERN_INFO "%s%d: 64bit support enabled.\n", 2060 dev->name, dev->id); 2061 dev->dac_support = 1; 2062 } 2063 2064 if(dacmode != -1) { 2065 dev->dac_support = (dacmode!=0); 2066 } 2067 2068 /* avoid problems with AAC_QUIRK_SCSI_32 controllers */ 2069 if (dev->dac_support && (aac_get_driver_ident(dev->cardtype)->quirks 2070 & AAC_QUIRK_SCSI_32)) { 2071 dev->nondasd_support = 0; 2072 dev->jbod = 0; 2073 expose_physicals = 0; 2074 } 2075 2076 if (dev->dac_support) { 2077 if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(64))) { 2078 if (!dev->in_reset) 2079 dev_info(&dev->pdev->dev, "64 Bit DAC enabled\n"); 2080 } else if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(32))) { 2081 dev_info(&dev->pdev->dev, "DMA mask set failed, 64 Bit DAC disabled\n"); 2082 dev->dac_support = 0; 2083 } else { 2084 dev_info(&dev->pdev->dev, "No suitable DMA available\n"); 2085 rcode = -ENOMEM; 2086 } 2087 } 2088 /* 2089 * Deal with configuring for the individualized limits of each packet 2090 * interface. 2091 */ 2092 dev->a_ops.adapter_scsi = (dev->dac_support) 2093 ? ((aac_get_driver_ident(dev->cardtype)->quirks & AAC_QUIRK_SCSI_32) 2094 ? aac_scsi_32_64 2095 : aac_scsi_64) 2096 : aac_scsi_32; 2097 if (dev->raw_io_interface) { 2098 dev->a_ops.adapter_bounds = (dev->raw_io_64) 2099 ? aac_bounds_64 2100 : aac_bounds_32; 2101 dev->a_ops.adapter_read = aac_read_raw_io; 2102 dev->a_ops.adapter_write = aac_write_raw_io; 2103 } else { 2104 dev->a_ops.adapter_bounds = aac_bounds_32; 2105 dev->scsi_host_ptr->sg_tablesize = (dev->max_fib_size - 2106 sizeof(struct aac_fibhdr) - 2107 sizeof(struct aac_write) + sizeof(struct sgentry)) / 2108 sizeof(struct sgentry); 2109 if (dev->dac_support) { 2110 dev->a_ops.adapter_read = aac_read_block64; 2111 dev->a_ops.adapter_write = aac_write_block64; 2112 /* 2113 * 38 scatter gather elements 2114 */ 2115 dev->scsi_host_ptr->sg_tablesize = 2116 (dev->max_fib_size - 2117 sizeof(struct aac_fibhdr) - 2118 sizeof(struct aac_write64) + 2119 sizeof(struct sgentry64)) / 2120 sizeof(struct sgentry64); 2121 } else { 2122 dev->a_ops.adapter_read = aac_read_block; 2123 dev->a_ops.adapter_write = aac_write_block; 2124 } 2125 dev->scsi_host_ptr->max_sectors = AAC_MAX_32BIT_SGBCOUNT; 2126 if (!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) { 2127 /* 2128 * Worst case size that could cause sg overflow when 2129 * we break up SG elements that are larger than 64KB. 2130 * Would be nice if we could tell the SCSI layer what 2131 * the maximum SG element size can be. Worst case is 2132 * (sg_tablesize-1) 4KB elements with one 64KB 2133 * element. 2134 * 32bit -> 468 or 238KB 64bit -> 424 or 212KB 2135 */ 2136 dev->scsi_host_ptr->max_sectors = 2137 (dev->scsi_host_ptr->sg_tablesize * 8) + 112; 2138 } 2139 } 2140 if (!dev->sync_mode && dev->sa_firmware && 2141 dev->scsi_host_ptr->sg_tablesize > HBA_MAX_SG_SEPARATE) 2142 dev->scsi_host_ptr->sg_tablesize = dev->sg_tablesize = 2143 HBA_MAX_SG_SEPARATE; 2144 2145 /* FIB should be freed only after getting the response from the F/W */ 2146 if (rcode != -ERESTARTSYS) { 2147 aac_fib_complete(fibptr); 2148 aac_fib_free(fibptr); 2149 } 2150 2151 return rcode; 2152 } 2153 2154 2155 static void io_callback(void *context, struct fib * fibptr) 2156 { 2157 struct aac_dev *dev; 2158 struct aac_read_reply *readreply; 2159 struct scsi_cmnd *scsicmd; 2160 u32 cid; 2161 2162 scsicmd = (struct scsi_cmnd *) context; 2163 2164 if (!aac_valid_context(scsicmd, fibptr)) 2165 return; 2166 2167 dev = fibptr->dev; 2168 cid = scmd_id(scsicmd); 2169 2170 if (nblank(dprintk(x))) { 2171 u64 lba; 2172 switch (scsicmd->cmnd[0]) { 2173 case WRITE_6: 2174 case READ_6: 2175 lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | 2176 (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3]; 2177 break; 2178 case WRITE_16: 2179 case READ_16: 2180 lba = ((u64)scsicmd->cmnd[2] << 56) | 2181 ((u64)scsicmd->cmnd[3] << 48) | 2182 ((u64)scsicmd->cmnd[4] << 40) | 2183 ((u64)scsicmd->cmnd[5] << 32) | 2184 ((u64)scsicmd->cmnd[6] << 24) | 2185 (scsicmd->cmnd[7] << 16) | 2186 (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9]; 2187 break; 2188 case WRITE_12: 2189 case READ_12: 2190 lba = ((u64)scsicmd->cmnd[2] << 24) | 2191 (scsicmd->cmnd[3] << 16) | 2192 (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5]; 2193 break; 2194 default: 2195 lba = ((u64)scsicmd->cmnd[2] << 24) | 2196 (scsicmd->cmnd[3] << 16) | 2197 (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5]; 2198 break; 2199 } 2200 printk(KERN_DEBUG 2201 "io_callback[cpu %d]: lba = %llu, t = %ld.\n", 2202 smp_processor_id(), (unsigned long long)lba, jiffies); 2203 } 2204 2205 BUG_ON(fibptr == NULL); 2206 2207 scsi_dma_unmap(scsicmd); 2208 2209 readreply = (struct aac_read_reply *)fib_data(fibptr); 2210 switch (le32_to_cpu(readreply->status)) { 2211 case ST_OK: 2212 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | 2213 SAM_STAT_GOOD; 2214 dev->fsa_dev[cid].sense_data.sense_key = NO_SENSE; 2215 break; 2216 case ST_NOT_READY: 2217 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | 2218 SAM_STAT_CHECK_CONDITION; 2219 set_sense(&dev->fsa_dev[cid].sense_data, NOT_READY, 2220 SENCODE_BECOMING_READY, ASENCODE_BECOMING_READY, 0, 0); 2221 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, 2222 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), 2223 SCSI_SENSE_BUFFERSIZE)); 2224 break; 2225 case ST_MEDERR: 2226 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | 2227 SAM_STAT_CHECK_CONDITION; 2228 set_sense(&dev->fsa_dev[cid].sense_data, MEDIUM_ERROR, 2229 SENCODE_UNRECOVERED_READ_ERROR, ASENCODE_NO_SENSE, 0, 0); 2230 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, 2231 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), 2232 SCSI_SENSE_BUFFERSIZE)); 2233 break; 2234 default: 2235 #ifdef AAC_DETAILED_STATUS_INFO 2236 printk(KERN_WARNING "io_callback: io failed, status = %d\n", 2237 le32_to_cpu(readreply->status)); 2238 #endif 2239 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | 2240 SAM_STAT_CHECK_CONDITION; 2241 set_sense(&dev->fsa_dev[cid].sense_data, 2242 HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE, 2243 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0); 2244 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, 2245 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), 2246 SCSI_SENSE_BUFFERSIZE)); 2247 break; 2248 } 2249 aac_fib_complete(fibptr); 2250 2251 scsicmd->scsi_done(scsicmd); 2252 } 2253 2254 static int aac_read(struct scsi_cmnd * scsicmd) 2255 { 2256 u64 lba; 2257 u32 count; 2258 int status; 2259 struct aac_dev *dev; 2260 struct fib * cmd_fibcontext; 2261 int cid; 2262 2263 dev = (struct aac_dev *)scsicmd->device->host->hostdata; 2264 /* 2265 * Get block address and transfer length 2266 */ 2267 switch (scsicmd->cmnd[0]) { 2268 case READ_6: 2269 dprintk((KERN_DEBUG "aachba: received a read(6) command on id %d.\n", scmd_id(scsicmd))); 2270 2271 lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | 2272 (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3]; 2273 count = scsicmd->cmnd[4]; 2274 2275 if (count == 0) 2276 count = 256; 2277 break; 2278 case READ_16: 2279 dprintk((KERN_DEBUG "aachba: received a read(16) command on id %d.\n", scmd_id(scsicmd))); 2280 2281 lba = ((u64)scsicmd->cmnd[2] << 56) | 2282 ((u64)scsicmd->cmnd[3] << 48) | 2283 ((u64)scsicmd->cmnd[4] << 40) | 2284 ((u64)scsicmd->cmnd[5] << 32) | 2285 ((u64)scsicmd->cmnd[6] << 24) | 2286 (scsicmd->cmnd[7] << 16) | 2287 (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9]; 2288 count = (scsicmd->cmnd[10] << 24) | 2289 (scsicmd->cmnd[11] << 16) | 2290 (scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13]; 2291 break; 2292 case READ_12: 2293 dprintk((KERN_DEBUG "aachba: received a read(12) command on id %d.\n", scmd_id(scsicmd))); 2294 2295 lba = ((u64)scsicmd->cmnd[2] << 24) | 2296 (scsicmd->cmnd[3] << 16) | 2297 (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5]; 2298 count = (scsicmd->cmnd[6] << 24) | 2299 (scsicmd->cmnd[7] << 16) | 2300 (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9]; 2301 break; 2302 default: 2303 dprintk((KERN_DEBUG "aachba: received a read(10) command on id %d.\n", scmd_id(scsicmd))); 2304 2305 lba = ((u64)scsicmd->cmnd[2] << 24) | 2306 (scsicmd->cmnd[3] << 16) | 2307 (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5]; 2308 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8]; 2309 break; 2310 } 2311 2312 if ((lba + count) > (dev->fsa_dev[scmd_id(scsicmd)].size)) { 2313 cid = scmd_id(scsicmd); 2314 dprintk((KERN_DEBUG "aacraid: Illegal lba\n")); 2315 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | 2316 SAM_STAT_CHECK_CONDITION; 2317 set_sense(&dev->fsa_dev[cid].sense_data, 2318 HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE, 2319 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0); 2320 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, 2321 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), 2322 SCSI_SENSE_BUFFERSIZE)); 2323 scsicmd->scsi_done(scsicmd); 2324 return 1; 2325 } 2326 2327 dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %llu, t = %ld.\n", 2328 smp_processor_id(), (unsigned long long)lba, jiffies)); 2329 if (aac_adapter_bounds(dev,scsicmd,lba)) 2330 return 0; 2331 /* 2332 * Alocate and initialize a Fib 2333 */ 2334 cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); 2335 scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; 2336 status = aac_adapter_read(cmd_fibcontext, scsicmd, lba, count); 2337 2338 /* 2339 * Check that the command queued to the controller 2340 */ 2341 if (status == -EINPROGRESS) 2342 return 0; 2343 2344 printk(KERN_WARNING "aac_read: aac_fib_send failed with status: %d.\n", status); 2345 /* 2346 * For some reason, the Fib didn't queue, return QUEUE_FULL 2347 */ 2348 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL; 2349 scsicmd->scsi_done(scsicmd); 2350 aac_fib_complete(cmd_fibcontext); 2351 aac_fib_free(cmd_fibcontext); 2352 return 0; 2353 } 2354 2355 static int aac_write(struct scsi_cmnd * scsicmd) 2356 { 2357 u64 lba; 2358 u32 count; 2359 int fua; 2360 int status; 2361 struct aac_dev *dev; 2362 struct fib * cmd_fibcontext; 2363 int cid; 2364 2365 dev = (struct aac_dev *)scsicmd->device->host->hostdata; 2366 /* 2367 * Get block address and transfer length 2368 */ 2369 if (scsicmd->cmnd[0] == WRITE_6) /* 6 byte command */ 2370 { 2371 lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3]; 2372 count = scsicmd->cmnd[4]; 2373 if (count == 0) 2374 count = 256; 2375 fua = 0; 2376 } else if (scsicmd->cmnd[0] == WRITE_16) { /* 16 byte command */ 2377 dprintk((KERN_DEBUG "aachba: received a write(16) command on id %d.\n", scmd_id(scsicmd))); 2378 2379 lba = ((u64)scsicmd->cmnd[2] << 56) | 2380 ((u64)scsicmd->cmnd[3] << 48) | 2381 ((u64)scsicmd->cmnd[4] << 40) | 2382 ((u64)scsicmd->cmnd[5] << 32) | 2383 ((u64)scsicmd->cmnd[6] << 24) | 2384 (scsicmd->cmnd[7] << 16) | 2385 (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9]; 2386 count = (scsicmd->cmnd[10] << 24) | (scsicmd->cmnd[11] << 16) | 2387 (scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13]; 2388 fua = scsicmd->cmnd[1] & 0x8; 2389 } else if (scsicmd->cmnd[0] == WRITE_12) { /* 12 byte command */ 2390 dprintk((KERN_DEBUG "aachba: received a write(12) command on id %d.\n", scmd_id(scsicmd))); 2391 2392 lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) 2393 | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5]; 2394 count = (scsicmd->cmnd[6] << 24) | (scsicmd->cmnd[7] << 16) 2395 | (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9]; 2396 fua = scsicmd->cmnd[1] & 0x8; 2397 } else { 2398 dprintk((KERN_DEBUG "aachba: received a write(10) command on id %d.\n", scmd_id(scsicmd))); 2399 lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5]; 2400 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8]; 2401 fua = scsicmd->cmnd[1] & 0x8; 2402 } 2403 2404 if ((lba + count) > (dev->fsa_dev[scmd_id(scsicmd)].size)) { 2405 cid = scmd_id(scsicmd); 2406 dprintk((KERN_DEBUG "aacraid: Illegal lba\n")); 2407 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | 2408 SAM_STAT_CHECK_CONDITION; 2409 set_sense(&dev->fsa_dev[cid].sense_data, 2410 HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE, 2411 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0); 2412 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, 2413 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), 2414 SCSI_SENSE_BUFFERSIZE)); 2415 scsicmd->scsi_done(scsicmd); 2416 return 1; 2417 } 2418 2419 dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n", 2420 smp_processor_id(), (unsigned long long)lba, jiffies)); 2421 if (aac_adapter_bounds(dev,scsicmd,lba)) 2422 return 0; 2423 /* 2424 * Allocate and initialize a Fib then setup a BlockWrite command 2425 */ 2426 cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); 2427 scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; 2428 status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count, fua); 2429 2430 /* 2431 * Check that the command queued to the controller 2432 */ 2433 if (status == -EINPROGRESS) 2434 return 0; 2435 2436 printk(KERN_WARNING "aac_write: aac_fib_send failed with status: %d\n", status); 2437 /* 2438 * For some reason, the Fib didn't queue, return QUEUE_FULL 2439 */ 2440 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL; 2441 scsicmd->scsi_done(scsicmd); 2442 2443 aac_fib_complete(cmd_fibcontext); 2444 aac_fib_free(cmd_fibcontext); 2445 return 0; 2446 } 2447 2448 static void synchronize_callback(void *context, struct fib *fibptr) 2449 { 2450 struct aac_synchronize_reply *synchronizereply; 2451 struct scsi_cmnd *cmd; 2452 2453 cmd = context; 2454 2455 if (!aac_valid_context(cmd, fibptr)) 2456 return; 2457 2458 dprintk((KERN_DEBUG "synchronize_callback[cpu %d]: t = %ld.\n", 2459 smp_processor_id(), jiffies)); 2460 BUG_ON(fibptr == NULL); 2461 2462 2463 synchronizereply = fib_data(fibptr); 2464 if (le32_to_cpu(synchronizereply->status) == CT_OK) 2465 cmd->result = DID_OK << 16 | 2466 COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; 2467 else { 2468 struct scsi_device *sdev = cmd->device; 2469 struct aac_dev *dev = fibptr->dev; 2470 u32 cid = sdev_id(sdev); 2471 printk(KERN_WARNING 2472 "synchronize_callback: synchronize failed, status = %d\n", 2473 le32_to_cpu(synchronizereply->status)); 2474 cmd->result = DID_OK << 16 | 2475 COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION; 2476 set_sense(&dev->fsa_dev[cid].sense_data, 2477 HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE, 2478 ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0); 2479 memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data, 2480 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), 2481 SCSI_SENSE_BUFFERSIZE)); 2482 } 2483 2484 aac_fib_complete(fibptr); 2485 aac_fib_free(fibptr); 2486 cmd->scsi_done(cmd); 2487 } 2488 2489 static int aac_synchronize(struct scsi_cmnd *scsicmd) 2490 { 2491 int status; 2492 struct fib *cmd_fibcontext; 2493 struct aac_synchronize *synchronizecmd; 2494 struct scsi_cmnd *cmd; 2495 struct scsi_device *sdev = scsicmd->device; 2496 int active = 0; 2497 struct aac_dev *aac; 2498 u64 lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | 2499 (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5]; 2500 u32 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8]; 2501 unsigned long flags; 2502 2503 /* 2504 * Wait for all outstanding queued commands to complete to this 2505 * specific target (block). 2506 */ 2507 spin_lock_irqsave(&sdev->list_lock, flags); 2508 list_for_each_entry(cmd, &sdev->cmd_list, list) 2509 if (cmd->SCp.phase == AAC_OWNER_FIRMWARE) { 2510 u64 cmnd_lba; 2511 u32 cmnd_count; 2512 2513 if (cmd->cmnd[0] == WRITE_6) { 2514 cmnd_lba = ((cmd->cmnd[1] & 0x1F) << 16) | 2515 (cmd->cmnd[2] << 8) | 2516 cmd->cmnd[3]; 2517 cmnd_count = cmd->cmnd[4]; 2518 if (cmnd_count == 0) 2519 cmnd_count = 256; 2520 } else if (cmd->cmnd[0] == WRITE_16) { 2521 cmnd_lba = ((u64)cmd->cmnd[2] << 56) | 2522 ((u64)cmd->cmnd[3] << 48) | 2523 ((u64)cmd->cmnd[4] << 40) | 2524 ((u64)cmd->cmnd[5] << 32) | 2525 ((u64)cmd->cmnd[6] << 24) | 2526 (cmd->cmnd[7] << 16) | 2527 (cmd->cmnd[8] << 8) | 2528 cmd->cmnd[9]; 2529 cmnd_count = (cmd->cmnd[10] << 24) | 2530 (cmd->cmnd[11] << 16) | 2531 (cmd->cmnd[12] << 8) | 2532 cmd->cmnd[13]; 2533 } else if (cmd->cmnd[0] == WRITE_12) { 2534 cmnd_lba = ((u64)cmd->cmnd[2] << 24) | 2535 (cmd->cmnd[3] << 16) | 2536 (cmd->cmnd[4] << 8) | 2537 cmd->cmnd[5]; 2538 cmnd_count = (cmd->cmnd[6] << 24) | 2539 (cmd->cmnd[7] << 16) | 2540 (cmd->cmnd[8] << 8) | 2541 cmd->cmnd[9]; 2542 } else if (cmd->cmnd[0] == WRITE_10) { 2543 cmnd_lba = ((u64)cmd->cmnd[2] << 24) | 2544 (cmd->cmnd[3] << 16) | 2545 (cmd->cmnd[4] << 8) | 2546 cmd->cmnd[5]; 2547 cmnd_count = (cmd->cmnd[7] << 8) | 2548 cmd->cmnd[8]; 2549 } else 2550 continue; 2551 if (((cmnd_lba + cmnd_count) < lba) || 2552 (count && ((lba + count) < cmnd_lba))) 2553 continue; 2554 ++active; 2555 break; 2556 } 2557 2558 spin_unlock_irqrestore(&sdev->list_lock, flags); 2559 2560 /* 2561 * Yield the processor (requeue for later) 2562 */ 2563 if (active) 2564 return SCSI_MLQUEUE_DEVICE_BUSY; 2565 2566 aac = (struct aac_dev *)sdev->host->hostdata; 2567 if (aac->in_reset) 2568 return SCSI_MLQUEUE_HOST_BUSY; 2569 2570 /* 2571 * Allocate and initialize a Fib 2572 */ 2573 if (!(cmd_fibcontext = aac_fib_alloc(aac))) 2574 return SCSI_MLQUEUE_HOST_BUSY; 2575 2576 aac_fib_init(cmd_fibcontext); 2577 2578 synchronizecmd = fib_data(cmd_fibcontext); 2579 synchronizecmd->command = cpu_to_le32(VM_ContainerConfig); 2580 synchronizecmd->type = cpu_to_le32(CT_FLUSH_CACHE); 2581 synchronizecmd->cid = cpu_to_le32(scmd_id(scsicmd)); 2582 synchronizecmd->count = 2583 cpu_to_le32(sizeof(((struct aac_synchronize_reply *)NULL)->data)); 2584 scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; 2585 2586 /* 2587 * Now send the Fib to the adapter 2588 */ 2589 status = aac_fib_send(ContainerCommand, 2590 cmd_fibcontext, 2591 sizeof(struct aac_synchronize), 2592 FsaNormal, 2593 0, 1, 2594 (fib_callback)synchronize_callback, 2595 (void *)scsicmd); 2596 2597 /* 2598 * Check that the command queued to the controller 2599 */ 2600 if (status == -EINPROGRESS) 2601 return 0; 2602 2603 printk(KERN_WARNING 2604 "aac_synchronize: aac_fib_send failed with status: %d.\n", status); 2605 aac_fib_complete(cmd_fibcontext); 2606 aac_fib_free(cmd_fibcontext); 2607 return SCSI_MLQUEUE_HOST_BUSY; 2608 } 2609 2610 static void aac_start_stop_callback(void *context, struct fib *fibptr) 2611 { 2612 struct scsi_cmnd *scsicmd = context; 2613 2614 if (!aac_valid_context(scsicmd, fibptr)) 2615 return; 2616 2617 BUG_ON(fibptr == NULL); 2618 2619 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; 2620 2621 aac_fib_complete(fibptr); 2622 aac_fib_free(fibptr); 2623 scsicmd->scsi_done(scsicmd); 2624 } 2625 2626 static int aac_start_stop(struct scsi_cmnd *scsicmd) 2627 { 2628 int status; 2629 struct fib *cmd_fibcontext; 2630 struct aac_power_management *pmcmd; 2631 struct scsi_device *sdev = scsicmd->device; 2632 struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata; 2633 2634 if (!(aac->supplement_adapter_info.supported_options2 & 2635 AAC_OPTION_POWER_MANAGEMENT)) { 2636 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | 2637 SAM_STAT_GOOD; 2638 scsicmd->scsi_done(scsicmd); 2639 return 0; 2640 } 2641 2642 if (aac->in_reset) 2643 return SCSI_MLQUEUE_HOST_BUSY; 2644 2645 /* 2646 * Allocate and initialize a Fib 2647 */ 2648 cmd_fibcontext = aac_fib_alloc_tag(aac, scsicmd); 2649 2650 aac_fib_init(cmd_fibcontext); 2651 2652 pmcmd = fib_data(cmd_fibcontext); 2653 pmcmd->command = cpu_to_le32(VM_ContainerConfig); 2654 pmcmd->type = cpu_to_le32(CT_POWER_MANAGEMENT); 2655 /* Eject bit ignored, not relevant */ 2656 pmcmd->sub = (scsicmd->cmnd[4] & 1) ? 2657 cpu_to_le32(CT_PM_START_UNIT) : cpu_to_le32(CT_PM_STOP_UNIT); 2658 pmcmd->cid = cpu_to_le32(sdev_id(sdev)); 2659 pmcmd->parm = (scsicmd->cmnd[1] & 1) ? 2660 cpu_to_le32(CT_PM_UNIT_IMMEDIATE) : 0; 2661 scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; 2662 2663 /* 2664 * Now send the Fib to the adapter 2665 */ 2666 status = aac_fib_send(ContainerCommand, 2667 cmd_fibcontext, 2668 sizeof(struct aac_power_management), 2669 FsaNormal, 2670 0, 1, 2671 (fib_callback)aac_start_stop_callback, 2672 (void *)scsicmd); 2673 2674 /* 2675 * Check that the command queued to the controller 2676 */ 2677 if (status == -EINPROGRESS) 2678 return 0; 2679 2680 aac_fib_complete(cmd_fibcontext); 2681 aac_fib_free(cmd_fibcontext); 2682 return SCSI_MLQUEUE_HOST_BUSY; 2683 } 2684 2685 /** 2686 * aac_scsi_cmd() - Process SCSI command 2687 * @scsicmd: SCSI command block 2688 * 2689 * Emulate a SCSI command and queue the required request for the 2690 * aacraid firmware. 2691 */ 2692 2693 int aac_scsi_cmd(struct scsi_cmnd * scsicmd) 2694 { 2695 u32 cid, bus; 2696 struct Scsi_Host *host = scsicmd->device->host; 2697 struct aac_dev *dev = (struct aac_dev *)host->hostdata; 2698 struct fsa_dev_info *fsa_dev_ptr = dev->fsa_dev; 2699 2700 if (fsa_dev_ptr == NULL) 2701 return -1; 2702 /* 2703 * If the bus, id or lun is out of range, return fail 2704 * Test does not apply to ID 16, the pseudo id for the controller 2705 * itself. 2706 */ 2707 cid = scmd_id(scsicmd); 2708 if (cid != host->this_id) { 2709 if (scmd_channel(scsicmd) == CONTAINER_CHANNEL) { 2710 if((cid >= dev->maximum_num_containers) || 2711 (scsicmd->device->lun != 0)) { 2712 scsicmd->result = DID_NO_CONNECT << 16; 2713 goto scsi_done_ret; 2714 } 2715 2716 /* 2717 * If the target container doesn't exist, it may have 2718 * been newly created 2719 */ 2720 if (((fsa_dev_ptr[cid].valid & 1) == 0) || 2721 (fsa_dev_ptr[cid].sense_data.sense_key == 2722 NOT_READY)) { 2723 switch (scsicmd->cmnd[0]) { 2724 case SERVICE_ACTION_IN_16: 2725 if (!(dev->raw_io_interface) || 2726 !(dev->raw_io_64) || 2727 ((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16)) 2728 break; 2729 case INQUIRY: 2730 case READ_CAPACITY: 2731 case TEST_UNIT_READY: 2732 if (dev->in_reset) 2733 return -1; 2734 return _aac_probe_container(scsicmd, 2735 aac_probe_container_callback2); 2736 default: 2737 break; 2738 } 2739 } 2740 } else { /* check for physical non-dasd devices */ 2741 bus = aac_logical_to_phys(scmd_channel(scsicmd)); 2742 if (bus < AAC_MAX_BUSES && cid < AAC_MAX_TARGETS && 2743 (dev->hba_map[bus][cid].expose 2744 == AAC_HIDE_DISK)){ 2745 if (scsicmd->cmnd[0] == INQUIRY) { 2746 scsicmd->result = DID_NO_CONNECT << 16; 2747 goto scsi_done_ret; 2748 } 2749 } 2750 2751 if (bus < AAC_MAX_BUSES && cid < AAC_MAX_TARGETS && 2752 dev->hba_map[bus][cid].devtype 2753 == AAC_DEVTYPE_NATIVE_RAW) { 2754 if (dev->in_reset) 2755 return -1; 2756 return aac_send_hba_fib(scsicmd); 2757 } else if (dev->nondasd_support || expose_physicals || 2758 dev->jbod) { 2759 if (dev->in_reset) 2760 return -1; 2761 return aac_send_srb_fib(scsicmd); 2762 } else { 2763 scsicmd->result = DID_NO_CONNECT << 16; 2764 goto scsi_done_ret; 2765 } 2766 } 2767 } 2768 /* 2769 * else Command for the controller itself 2770 */ 2771 else if ((scsicmd->cmnd[0] != INQUIRY) && /* only INQUIRY & TUR cmnd supported for controller */ 2772 (scsicmd->cmnd[0] != TEST_UNIT_READY)) 2773 { 2774 dprintk((KERN_WARNING "Only INQUIRY & TUR command supported for controller, rcvd = 0x%x.\n", scsicmd->cmnd[0])); 2775 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION; 2776 set_sense(&dev->fsa_dev[cid].sense_data, 2777 ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND, 2778 ASENCODE_INVALID_COMMAND, 0, 0); 2779 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, 2780 min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), 2781 SCSI_SENSE_BUFFERSIZE)); 2782 goto scsi_done_ret; 2783 } 2784 2785 switch (scsicmd->cmnd[0]) { 2786 case READ_6: 2787 case READ_10: 2788 case READ_12: 2789 case READ_16: 2790 if (dev->in_reset) 2791 return -1; 2792 return aac_read(scsicmd); 2793 2794 case WRITE_6: 2795 case WRITE_10: 2796 case WRITE_12: 2797 case WRITE_16: 2798 if (dev->in_reset) 2799 return -1; 2800 return aac_write(scsicmd); 2801 2802 case SYNCHRONIZE_CACHE: 2803 if (((aac_cache & 6) == 6) && dev->cache_protected) { 2804 scsicmd->result = AAC_STAT_GOOD; 2805 break; 2806 } 2807 /* Issue FIB to tell Firmware to flush it's cache */ 2808 if ((aac_cache & 6) != 2) 2809 return aac_synchronize(scsicmd); 2810 case INQUIRY: 2811 { 2812 struct inquiry_data inq_data; 2813 2814 dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", cid)); 2815 memset(&inq_data, 0, sizeof (struct inquiry_data)); 2816 2817 if ((scsicmd->cmnd[1] & 0x1) && aac_wwn) { 2818 char *arr = (char *)&inq_data; 2819 2820 /* EVPD bit set */ 2821 arr[0] = (scmd_id(scsicmd) == host->this_id) ? 2822 INQD_PDT_PROC : INQD_PDT_DA; 2823 if (scsicmd->cmnd[2] == 0) { 2824 /* supported vital product data pages */ 2825 arr[3] = 3; 2826 arr[4] = 0x0; 2827 arr[5] = 0x80; 2828 arr[6] = 0x83; 2829 arr[1] = scsicmd->cmnd[2]; 2830 scsi_sg_copy_from_buffer(scsicmd, &inq_data, 2831 sizeof(inq_data)); 2832 scsicmd->result = AAC_STAT_GOOD; 2833 } else if (scsicmd->cmnd[2] == 0x80) { 2834 /* unit serial number page */ 2835 arr[3] = setinqserial(dev, &arr[4], 2836 scmd_id(scsicmd)); 2837 arr[1] = scsicmd->cmnd[2]; 2838 scsi_sg_copy_from_buffer(scsicmd, &inq_data, 2839 sizeof(inq_data)); 2840 if (aac_wwn != 2) 2841 return aac_get_container_serial( 2842 scsicmd); 2843 scsicmd->result = AAC_STAT_GOOD; 2844 } else if (scsicmd->cmnd[2] == 0x83) { 2845 /* vpd page 0x83 - Device Identification Page */ 2846 char *sno = (char *)&inq_data; 2847 sno[3] = setinqserial(dev, &sno[4], 2848 scmd_id(scsicmd)); 2849 if (aac_wwn != 2) 2850 return aac_get_container_serial( 2851 scsicmd); 2852 scsicmd->result = AAC_STAT_GOOD; 2853 } else { 2854 /* vpd page not implemented */ 2855 scsicmd->result = DID_OK << 16 | 2856 COMMAND_COMPLETE << 8 | 2857 SAM_STAT_CHECK_CONDITION; 2858 set_sense(&dev->fsa_dev[cid].sense_data, 2859 ILLEGAL_REQUEST, SENCODE_INVALID_CDB_FIELD, 2860 ASENCODE_NO_SENSE, 7, 2); 2861 memcpy(scsicmd->sense_buffer, 2862 &dev->fsa_dev[cid].sense_data, 2863 min_t(size_t, 2864 sizeof(dev->fsa_dev[cid].sense_data), 2865 SCSI_SENSE_BUFFERSIZE)); 2866 } 2867 break; 2868 } 2869 inq_data.inqd_ver = 2; /* claim compliance to SCSI-2 */ 2870 inq_data.inqd_rdf = 2; /* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */ 2871 inq_data.inqd_len = 31; 2872 /*Format for "pad2" is RelAdr | WBus32 | WBus16 | Sync | Linked |Reserved| CmdQue | SftRe */ 2873 inq_data.inqd_pad2= 0x32 ; /*WBus16|Sync|CmdQue */ 2874 /* 2875 * Set the Vendor, Product, and Revision Level 2876 * see: <vendor>.c i.e. aac.c 2877 */ 2878 if (cid == host->this_id) { 2879 setinqstr(dev, (void *) (inq_data.inqd_vid), ARRAY_SIZE(container_types)); 2880 inq_data.inqd_pdt = INQD_PDT_PROC; /* Processor device */ 2881 scsi_sg_copy_from_buffer(scsicmd, &inq_data, 2882 sizeof(inq_data)); 2883 scsicmd->result = AAC_STAT_GOOD; 2884 break; 2885 } 2886 if (dev->in_reset) 2887 return -1; 2888 setinqstr(dev, (void *) (inq_data.inqd_vid), fsa_dev_ptr[cid].type); 2889 inq_data.inqd_pdt = INQD_PDT_DA; /* Direct/random access device */ 2890 scsi_sg_copy_from_buffer(scsicmd, &inq_data, sizeof(inq_data)); 2891 return aac_get_container_name(scsicmd); 2892 } 2893 case SERVICE_ACTION_IN_16: 2894 if (!(dev->raw_io_interface) || 2895 !(dev->raw_io_64) || 2896 ((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16)) 2897 break; 2898 { 2899 u64 capacity; 2900 char cp[13]; 2901 unsigned int alloc_len; 2902 2903 dprintk((KERN_DEBUG "READ CAPACITY_16 command.\n")); 2904 capacity = fsa_dev_ptr[cid].size - 1; 2905 cp[0] = (capacity >> 56) & 0xff; 2906 cp[1] = (capacity >> 48) & 0xff; 2907 cp[2] = (capacity >> 40) & 0xff; 2908 cp[3] = (capacity >> 32) & 0xff; 2909 cp[4] = (capacity >> 24) & 0xff; 2910 cp[5] = (capacity >> 16) & 0xff; 2911 cp[6] = (capacity >> 8) & 0xff; 2912 cp[7] = (capacity >> 0) & 0xff; 2913 cp[8] = (fsa_dev_ptr[cid].block_size >> 24) & 0xff; 2914 cp[9] = (fsa_dev_ptr[cid].block_size >> 16) & 0xff; 2915 cp[10] = (fsa_dev_ptr[cid].block_size >> 8) & 0xff; 2916 cp[11] = (fsa_dev_ptr[cid].block_size) & 0xff; 2917 cp[12] = 0; 2918 2919 alloc_len = ((scsicmd->cmnd[10] << 24) 2920 + (scsicmd->cmnd[11] << 16) 2921 + (scsicmd->cmnd[12] << 8) + scsicmd->cmnd[13]); 2922 2923 alloc_len = min_t(size_t, alloc_len, sizeof(cp)); 2924 scsi_sg_copy_from_buffer(scsicmd, cp, alloc_len); 2925 if (alloc_len < scsi_bufflen(scsicmd)) 2926 scsi_set_resid(scsicmd, 2927 scsi_bufflen(scsicmd) - alloc_len); 2928 2929 /* Do not cache partition table for arrays */ 2930 scsicmd->device->removable = 1; 2931 2932 scsicmd->result = AAC_STAT_GOOD; 2933 break; 2934 } 2935 2936 case READ_CAPACITY: 2937 { 2938 u32 capacity; 2939 char cp[8]; 2940 2941 dprintk((KERN_DEBUG "READ CAPACITY command.\n")); 2942 if (fsa_dev_ptr[cid].size <= 0x100000000ULL) 2943 capacity = fsa_dev_ptr[cid].size - 1; 2944 else 2945 capacity = (u32)-1; 2946 2947 cp[0] = (capacity >> 24) & 0xff; 2948 cp[1] = (capacity >> 16) & 0xff; 2949 cp[2] = (capacity >> 8) & 0xff; 2950 cp[3] = (capacity >> 0) & 0xff; 2951 cp[4] = (fsa_dev_ptr[cid].block_size >> 24) & 0xff; 2952 cp[5] = (fsa_dev_ptr[cid].block_size >> 16) & 0xff; 2953 cp[6] = (fsa_dev_ptr[cid].block_size >> 8) & 0xff; 2954 cp[7] = (fsa_dev_ptr[cid].block_size) & 0xff; 2955 scsi_sg_copy_from_buffer(scsicmd, cp, sizeof(cp)); 2956 /* Do not cache partition table for arrays */ 2957 scsicmd->device->removable = 1; 2958 scsicmd->result = AAC_STAT_GOOD; 2959 break; 2960 } 2961 2962 case MODE_SENSE: 2963 { 2964 int mode_buf_length = 4; 2965 u32 capacity; 2966 aac_modep_data mpd; 2967 2968 if (fsa_dev_ptr[cid].size <= 0x100000000ULL) 2969 capacity = fsa_dev_ptr[cid].size - 1; 2970 else 2971 capacity = (u32)-1; 2972 2973 dprintk((KERN_DEBUG "MODE SENSE command.\n")); 2974 memset((char *)&mpd, 0, sizeof(aac_modep_data)); 2975 2976 /* Mode data length */ 2977 mpd.hd.data_length = sizeof(mpd.hd) - 1; 2978 /* Medium type - default */ 2979 mpd.hd.med_type = 0; 2980 /* Device-specific param, 2981 bit 8: 0/1 = write enabled/protected 2982 bit 4: 0/1 = FUA enabled */ 2983 mpd.hd.dev_par = 0; 2984 2985 if (dev->raw_io_interface && ((aac_cache & 5) != 1)) 2986 mpd.hd.dev_par = 0x10; 2987 if (scsicmd->cmnd[1] & 0x8) 2988 mpd.hd.bd_length = 0; /* Block descriptor length */ 2989 else { 2990 mpd.hd.bd_length = sizeof(mpd.bd); 2991 mpd.hd.data_length += mpd.hd.bd_length; 2992 mpd.bd.block_length[0] = 2993 (fsa_dev_ptr[cid].block_size >> 16) & 0xff; 2994 mpd.bd.block_length[1] = 2995 (fsa_dev_ptr[cid].block_size >> 8) & 0xff; 2996 mpd.bd.block_length[2] = 2997 fsa_dev_ptr[cid].block_size & 0xff; 2998 2999 mpd.mpc_buf[0] = scsicmd->cmnd[2]; 3000 if (scsicmd->cmnd[2] == 0x1C) { 3001 /* page length */ 3002 mpd.mpc_buf[1] = 0xa; 3003 /* Mode data length */ 3004 mpd.hd.data_length = 23; 3005 } else { 3006 /* Mode data length */ 3007 mpd.hd.data_length = 15; 3008 } 3009 3010 if (capacity > 0xffffff) { 3011 mpd.bd.block_count[0] = 0xff; 3012 mpd.bd.block_count[1] = 0xff; 3013 mpd.bd.block_count[2] = 0xff; 3014 } else { 3015 mpd.bd.block_count[0] = (capacity >> 16) & 0xff; 3016 mpd.bd.block_count[1] = (capacity >> 8) & 0xff; 3017 mpd.bd.block_count[2] = capacity & 0xff; 3018 } 3019 } 3020 if (((scsicmd->cmnd[2] & 0x3f) == 8) || 3021 ((scsicmd->cmnd[2] & 0x3f) == 0x3f)) { 3022 mpd.hd.data_length += 3; 3023 mpd.mpc_buf[0] = 8; 3024 mpd.mpc_buf[1] = 1; 3025 mpd.mpc_buf[2] = ((aac_cache & 6) == 2) 3026 ? 0 : 0x04; /* WCE */ 3027 mode_buf_length = sizeof(mpd); 3028 } 3029 3030 if (mode_buf_length > scsicmd->cmnd[4]) 3031 mode_buf_length = scsicmd->cmnd[4]; 3032 else 3033 mode_buf_length = sizeof(mpd); 3034 scsi_sg_copy_from_buffer(scsicmd, 3035 (char *)&mpd, 3036 mode_buf_length); 3037 scsicmd->result = AAC_STAT_GOOD; 3038 break; 3039 } 3040 case MODE_SENSE_10: 3041 { 3042 u32 capacity; 3043 int mode_buf_length = 8; 3044 aac_modep10_data mpd10; 3045 3046 if (fsa_dev_ptr[cid].size <= 0x100000000ULL) 3047 capacity = fsa_dev_ptr[cid].size - 1; 3048 else 3049 capacity = (u32)-1; 3050 3051 dprintk((KERN_DEBUG "MODE SENSE 10 byte command.\n")); 3052 memset((char *)&mpd10, 0, sizeof(aac_modep10_data)); 3053 /* Mode data length (MSB) */ 3054 mpd10.hd.data_length[0] = 0; 3055 /* Mode data length (LSB) */ 3056 mpd10.hd.data_length[1] = sizeof(mpd10.hd) - 1; 3057 /* Medium type - default */ 3058 mpd10.hd.med_type = 0; 3059 /* Device-specific param, 3060 bit 8: 0/1 = write enabled/protected 3061 bit 4: 0/1 = FUA enabled */ 3062 mpd10.hd.dev_par = 0; 3063 3064 if (dev->raw_io_interface && ((aac_cache & 5) != 1)) 3065 mpd10.hd.dev_par = 0x10; 3066 mpd10.hd.rsrvd[0] = 0; /* reserved */ 3067 mpd10.hd.rsrvd[1] = 0; /* reserved */ 3068 if (scsicmd->cmnd[1] & 0x8) { 3069 /* Block descriptor length (MSB) */ 3070 mpd10.hd.bd_length[0] = 0; 3071 /* Block descriptor length (LSB) */ 3072 mpd10.hd.bd_length[1] = 0; 3073 } else { 3074 mpd10.hd.bd_length[0] = 0; 3075 mpd10.hd.bd_length[1] = sizeof(mpd10.bd); 3076 3077 mpd10.hd.data_length[1] += mpd10.hd.bd_length[1]; 3078 3079 mpd10.bd.block_length[0] = 3080 (fsa_dev_ptr[cid].block_size >> 16) & 0xff; 3081 mpd10.bd.block_length[1] = 3082 (fsa_dev_ptr[cid].block_size >> 8) & 0xff; 3083 mpd10.bd.block_length[2] = 3084 fsa_dev_ptr[cid].block_size & 0xff; 3085 3086 if (capacity > 0xffffff) { 3087 mpd10.bd.block_count[0] = 0xff; 3088 mpd10.bd.block_count[1] = 0xff; 3089 mpd10.bd.block_count[2] = 0xff; 3090 } else { 3091 mpd10.bd.block_count[0] = 3092 (capacity >> 16) & 0xff; 3093 mpd10.bd.block_count[1] = 3094 (capacity >> 8) & 0xff; 3095 mpd10.bd.block_count[2] = 3096 capacity & 0xff; 3097 } 3098 } 3099 if (((scsicmd->cmnd[2] & 0x3f) == 8) || 3100 ((scsicmd->cmnd[2] & 0x3f) == 0x3f)) { 3101 mpd10.hd.data_length[1] += 3; 3102 mpd10.mpc_buf[0] = 8; 3103 mpd10.mpc_buf[1] = 1; 3104 mpd10.mpc_buf[2] = ((aac_cache & 6) == 2) 3105 ? 0 : 0x04; /* WCE */ 3106 mode_buf_length = sizeof(mpd10); 3107 if (mode_buf_length > scsicmd->cmnd[8]) 3108 mode_buf_length = scsicmd->cmnd[8]; 3109 } 3110 scsi_sg_copy_from_buffer(scsicmd, 3111 (char *)&mpd10, 3112 mode_buf_length); 3113 3114 scsicmd->result = AAC_STAT_GOOD; 3115 break; 3116 } 3117 case REQUEST_SENSE: 3118 dprintk((KERN_DEBUG "REQUEST SENSE command.\n")); 3119 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, 3120 sizeof(struct sense_data)); 3121 memset(&dev->fsa_dev[cid].sense_data, 0, 3122 sizeof(struct sense_data)); 3123 scsicmd->result = AAC_STAT_GOOD; 3124 break; 3125 3126 case ALLOW_MEDIUM_REMOVAL: 3127 dprintk((KERN_DEBUG "LOCK command.\n")); 3128 if (scsicmd->cmnd[4]) 3129 fsa_dev_ptr[cid].locked = 1; 3130 else 3131 fsa_dev_ptr[cid].locked = 0; 3132 3133 scsicmd->result = AAC_STAT_GOOD; 3134 break; 3135 /* 3136 * These commands are all No-Ops 3137 */ 3138 case TEST_UNIT_READY: 3139 if (fsa_dev_ptr[cid].sense_data.sense_key == NOT_READY) { 3140 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | 3141 SAM_STAT_CHECK_CONDITION; 3142 set_sense(&dev->fsa_dev[cid].sense_data, 3143 NOT_READY, SENCODE_BECOMING_READY, 3144 ASENCODE_BECOMING_READY, 0, 0); 3145 memcpy(scsicmd->sense_buffer, 3146 &dev->fsa_dev[cid].sense_data, 3147 min_t(size_t, 3148 sizeof(dev->fsa_dev[cid].sense_data), 3149 SCSI_SENSE_BUFFERSIZE)); 3150 break; 3151 } 3152 case RESERVE: 3153 case RELEASE: 3154 case REZERO_UNIT: 3155 case REASSIGN_BLOCKS: 3156 case SEEK_10: 3157 scsicmd->result = AAC_STAT_GOOD; 3158 break; 3159 3160 case START_STOP: 3161 return aac_start_stop(scsicmd); 3162 3163 /* FALLTHRU */ 3164 default: 3165 /* 3166 * Unhandled commands 3167 */ 3168 dprintk((KERN_WARNING "Unhandled SCSI Command: 0x%x.\n", 3169 scsicmd->cmnd[0])); 3170 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | 3171 SAM_STAT_CHECK_CONDITION; 3172 set_sense(&dev->fsa_dev[cid].sense_data, 3173 ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND, 3174 ASENCODE_INVALID_COMMAND, 0, 0); 3175 memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, 3176 min_t(size_t, 3177 sizeof(dev->fsa_dev[cid].sense_data), 3178 SCSI_SENSE_BUFFERSIZE)); 3179 } 3180 3181 scsi_done_ret: 3182 3183 scsicmd->scsi_done(scsicmd); 3184 return 0; 3185 } 3186 3187 static int query_disk(struct aac_dev *dev, void __user *arg) 3188 { 3189 struct aac_query_disk qd; 3190 struct fsa_dev_info *fsa_dev_ptr; 3191 3192 fsa_dev_ptr = dev->fsa_dev; 3193 if (!fsa_dev_ptr) 3194 return -EBUSY; 3195 if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk))) 3196 return -EFAULT; 3197 if (qd.cnum == -1) { 3198 if (qd.id < 0 || qd.id >= dev->maximum_num_containers) 3199 return -EINVAL; 3200 qd.cnum = qd.id; 3201 } else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1)) { 3202 if (qd.cnum < 0 || qd.cnum >= dev->maximum_num_containers) 3203 return -EINVAL; 3204 qd.instance = dev->scsi_host_ptr->host_no; 3205 qd.bus = 0; 3206 qd.id = CONTAINER_TO_ID(qd.cnum); 3207 qd.lun = CONTAINER_TO_LUN(qd.cnum); 3208 } 3209 else return -EINVAL; 3210 3211 qd.valid = fsa_dev_ptr[qd.cnum].valid != 0; 3212 qd.locked = fsa_dev_ptr[qd.cnum].locked; 3213 qd.deleted = fsa_dev_ptr[qd.cnum].deleted; 3214 3215 if (fsa_dev_ptr[qd.cnum].devname[0] == '\0') 3216 qd.unmapped = 1; 3217 else 3218 qd.unmapped = 0; 3219 3220 strlcpy(qd.name, fsa_dev_ptr[qd.cnum].devname, 3221 min(sizeof(qd.name), sizeof(fsa_dev_ptr[qd.cnum].devname) + 1)); 3222 3223 if (copy_to_user(arg, &qd, sizeof (struct aac_query_disk))) 3224 return -EFAULT; 3225 return 0; 3226 } 3227 3228 static int force_delete_disk(struct aac_dev *dev, void __user *arg) 3229 { 3230 struct aac_delete_disk dd; 3231 struct fsa_dev_info *fsa_dev_ptr; 3232 3233 fsa_dev_ptr = dev->fsa_dev; 3234 if (!fsa_dev_ptr) 3235 return -EBUSY; 3236 3237 if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk))) 3238 return -EFAULT; 3239 3240 if (dd.cnum >= dev->maximum_num_containers) 3241 return -EINVAL; 3242 /* 3243 * Mark this container as being deleted. 3244 */ 3245 fsa_dev_ptr[dd.cnum].deleted = 1; 3246 /* 3247 * Mark the container as no longer valid 3248 */ 3249 fsa_dev_ptr[dd.cnum].valid = 0; 3250 return 0; 3251 } 3252 3253 static int delete_disk(struct aac_dev *dev, void __user *arg) 3254 { 3255 struct aac_delete_disk dd; 3256 struct fsa_dev_info *fsa_dev_ptr; 3257 3258 fsa_dev_ptr = dev->fsa_dev; 3259 if (!fsa_dev_ptr) 3260 return -EBUSY; 3261 3262 if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk))) 3263 return -EFAULT; 3264 3265 if (dd.cnum >= dev->maximum_num_containers) 3266 return -EINVAL; 3267 /* 3268 * If the container is locked, it can not be deleted by the API. 3269 */ 3270 if (fsa_dev_ptr[dd.cnum].locked) 3271 return -EBUSY; 3272 else { 3273 /* 3274 * Mark the container as no longer being valid. 3275 */ 3276 fsa_dev_ptr[dd.cnum].valid = 0; 3277 fsa_dev_ptr[dd.cnum].devname[0] = '\0'; 3278 return 0; 3279 } 3280 } 3281 3282 int aac_dev_ioctl(struct aac_dev *dev, int cmd, void __user *arg) 3283 { 3284 switch (cmd) { 3285 case FSACTL_QUERY_DISK: 3286 return query_disk(dev, arg); 3287 case FSACTL_DELETE_DISK: 3288 return delete_disk(dev, arg); 3289 case FSACTL_FORCE_DELETE_DISK: 3290 return force_delete_disk(dev, arg); 3291 case FSACTL_GET_CONTAINERS: 3292 return aac_get_containers(dev); 3293 default: 3294 return -ENOTTY; 3295 } 3296 } 3297 3298 /** 3299 * 3300 * aac_srb_callback 3301 * @context: the context set in the fib - here it is scsi cmd 3302 * @fibptr: pointer to the fib 3303 * 3304 * Handles the completion of a scsi command to a non dasd device 3305 * 3306 */ 3307 3308 static void aac_srb_callback(void *context, struct fib * fibptr) 3309 { 3310 struct aac_dev *dev; 3311 struct aac_srb_reply *srbreply; 3312 struct scsi_cmnd *scsicmd; 3313 3314 scsicmd = (struct scsi_cmnd *) context; 3315 3316 if (!aac_valid_context(scsicmd, fibptr)) 3317 return; 3318 3319 BUG_ON(fibptr == NULL); 3320 3321 dev = fibptr->dev; 3322 3323 srbreply = (struct aac_srb_reply *) fib_data(fibptr); 3324 3325 scsicmd->sense_buffer[0] = '\0'; /* Initialize sense valid flag to false */ 3326 3327 if (fibptr->flags & FIB_CONTEXT_FLAG_FASTRESP) { 3328 /* fast response */ 3329 srbreply->srb_status = cpu_to_le32(SRB_STATUS_SUCCESS); 3330 srbreply->scsi_status = cpu_to_le32(SAM_STAT_GOOD); 3331 } else { 3332 /* 3333 * Calculate resid for sg 3334 */ 3335 scsi_set_resid(scsicmd, scsi_bufflen(scsicmd) 3336 - le32_to_cpu(srbreply->data_xfer_length)); 3337 } 3338 3339 3340 scsi_dma_unmap(scsicmd); 3341 3342 /* expose physical device if expose_physicald flag is on */ 3343 if (scsicmd->cmnd[0] == INQUIRY && !(scsicmd->cmnd[1] & 0x01) 3344 && expose_physicals > 0) 3345 aac_expose_phy_device(scsicmd); 3346 3347 /* 3348 * First check the fib status 3349 */ 3350 3351 if (le32_to_cpu(srbreply->status) != ST_OK) { 3352 int len; 3353 3354 pr_warn("aac_srb_callback: srb failed, status = %d\n", 3355 le32_to_cpu(srbreply->status)); 3356 len = min_t(u32, le32_to_cpu(srbreply->sense_data_size), 3357 SCSI_SENSE_BUFFERSIZE); 3358 scsicmd->result = DID_ERROR << 16 3359 | COMMAND_COMPLETE << 8 3360 | SAM_STAT_CHECK_CONDITION; 3361 memcpy(scsicmd->sense_buffer, 3362 srbreply->sense_data, len); 3363 } 3364 3365 /* 3366 * Next check the srb status 3367 */ 3368 switch ((le32_to_cpu(srbreply->srb_status))&0x3f) { 3369 case SRB_STATUS_ERROR_RECOVERY: 3370 case SRB_STATUS_PENDING: 3371 case SRB_STATUS_SUCCESS: 3372 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; 3373 break; 3374 case SRB_STATUS_DATA_OVERRUN: 3375 switch (scsicmd->cmnd[0]) { 3376 case READ_6: 3377 case WRITE_6: 3378 case READ_10: 3379 case WRITE_10: 3380 case READ_12: 3381 case WRITE_12: 3382 case READ_16: 3383 case WRITE_16: 3384 if (le32_to_cpu(srbreply->data_xfer_length) 3385 < scsicmd->underflow) 3386 pr_warn("aacraid: SCSI CMD underflow\n"); 3387 else 3388 pr_warn("aacraid: SCSI CMD Data Overrun\n"); 3389 scsicmd->result = DID_ERROR << 16 3390 | COMMAND_COMPLETE << 8; 3391 break; 3392 case INQUIRY: 3393 scsicmd->result = DID_OK << 16 3394 | COMMAND_COMPLETE << 8; 3395 break; 3396 default: 3397 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; 3398 break; 3399 } 3400 break; 3401 case SRB_STATUS_ABORTED: 3402 scsicmd->result = DID_ABORT << 16 | ABORT << 8; 3403 break; 3404 case SRB_STATUS_ABORT_FAILED: 3405 /* 3406 * Not sure about this one - but assuming the 3407 * hba was trying to abort for some reason 3408 */ 3409 scsicmd->result = DID_ERROR << 16 | ABORT << 8; 3410 break; 3411 case SRB_STATUS_PARITY_ERROR: 3412 scsicmd->result = DID_PARITY << 16 3413 | MSG_PARITY_ERROR << 8; 3414 break; 3415 case SRB_STATUS_NO_DEVICE: 3416 case SRB_STATUS_INVALID_PATH_ID: 3417 case SRB_STATUS_INVALID_TARGET_ID: 3418 case SRB_STATUS_INVALID_LUN: 3419 case SRB_STATUS_SELECTION_TIMEOUT: 3420 scsicmd->result = DID_NO_CONNECT << 16 3421 | COMMAND_COMPLETE << 8; 3422 break; 3423 3424 case SRB_STATUS_COMMAND_TIMEOUT: 3425 case SRB_STATUS_TIMEOUT: 3426 scsicmd->result = DID_TIME_OUT << 16 3427 | COMMAND_COMPLETE << 8; 3428 break; 3429 3430 case SRB_STATUS_BUSY: 3431 scsicmd->result = DID_BUS_BUSY << 16 3432 | COMMAND_COMPLETE << 8; 3433 break; 3434 3435 case SRB_STATUS_BUS_RESET: 3436 scsicmd->result = DID_RESET << 16 3437 | COMMAND_COMPLETE << 8; 3438 break; 3439 3440 case SRB_STATUS_MESSAGE_REJECTED: 3441 scsicmd->result = DID_ERROR << 16 3442 | MESSAGE_REJECT << 8; 3443 break; 3444 case SRB_STATUS_REQUEST_FLUSHED: 3445 case SRB_STATUS_ERROR: 3446 case SRB_STATUS_INVALID_REQUEST: 3447 case SRB_STATUS_REQUEST_SENSE_FAILED: 3448 case SRB_STATUS_NO_HBA: 3449 case SRB_STATUS_UNEXPECTED_BUS_FREE: 3450 case SRB_STATUS_PHASE_SEQUENCE_FAILURE: 3451 case SRB_STATUS_BAD_SRB_BLOCK_LENGTH: 3452 case SRB_STATUS_DELAYED_RETRY: 3453 case SRB_STATUS_BAD_FUNCTION: 3454 case SRB_STATUS_NOT_STARTED: 3455 case SRB_STATUS_NOT_IN_USE: 3456 case SRB_STATUS_FORCE_ABORT: 3457 case SRB_STATUS_DOMAIN_VALIDATION_FAIL: 3458 default: 3459 #ifdef AAC_DETAILED_STATUS_INFO 3460 pr_info("aacraid: SRB ERROR(%u) %s scsi cmd 0x%x -scsi status 0x%x\n", 3461 le32_to_cpu(srbreply->srb_status) & 0x3F, 3462 aac_get_status_string( 3463 le32_to_cpu(srbreply->srb_status) & 0x3F), 3464 scsicmd->cmnd[0], 3465 le32_to_cpu(srbreply->scsi_status)); 3466 #endif 3467 /* 3468 * When the CC bit is SET by the host in ATA pass thru CDB, 3469 * driver is supposed to return DID_OK 3470 * 3471 * When the CC bit is RESET by the host, driver should 3472 * return DID_ERROR 3473 */ 3474 if ((scsicmd->cmnd[0] == ATA_12) 3475 || (scsicmd->cmnd[0] == ATA_16)) { 3476 3477 if (scsicmd->cmnd[2] & (0x01 << 5)) { 3478 scsicmd->result = DID_OK << 16 3479 | COMMAND_COMPLETE << 8; 3480 break; 3481 } else { 3482 scsicmd->result = DID_ERROR << 16 3483 | COMMAND_COMPLETE << 8; 3484 break; 3485 } 3486 } else { 3487 scsicmd->result = DID_ERROR << 16 3488 | COMMAND_COMPLETE << 8; 3489 break; 3490 } 3491 } 3492 if (le32_to_cpu(srbreply->scsi_status) 3493 == SAM_STAT_CHECK_CONDITION) { 3494 int len; 3495 3496 scsicmd->result |= SAM_STAT_CHECK_CONDITION; 3497 len = min_t(u32, le32_to_cpu(srbreply->sense_data_size), 3498 SCSI_SENSE_BUFFERSIZE); 3499 #ifdef AAC_DETAILED_STATUS_INFO 3500 pr_warn("aac_srb_callback: check condition, status = %d len=%d\n", 3501 le32_to_cpu(srbreply->status), len); 3502 #endif 3503 memcpy(scsicmd->sense_buffer, 3504 srbreply->sense_data, len); 3505 } 3506 3507 /* 3508 * OR in the scsi status (already shifted up a bit) 3509 */ 3510 scsicmd->result |= le32_to_cpu(srbreply->scsi_status); 3511 3512 aac_fib_complete(fibptr); 3513 scsicmd->scsi_done(scsicmd); 3514 } 3515 3516 static void hba_resp_task_complete(struct aac_dev *dev, 3517 struct scsi_cmnd *scsicmd, 3518 struct aac_hba_resp *err) { 3519 3520 scsicmd->result = err->status; 3521 /* set residual count */ 3522 scsi_set_resid(scsicmd, le32_to_cpu(err->residual_count)); 3523 3524 switch (err->status) { 3525 case SAM_STAT_GOOD: 3526 scsicmd->result |= DID_OK << 16 | COMMAND_COMPLETE << 8; 3527 break; 3528 case SAM_STAT_CHECK_CONDITION: 3529 { 3530 int len; 3531 3532 len = min_t(u8, err->sense_response_data_len, 3533 SCSI_SENSE_BUFFERSIZE); 3534 if (len) 3535 memcpy(scsicmd->sense_buffer, 3536 err->sense_response_buf, len); 3537 scsicmd->result |= DID_OK << 16 | COMMAND_COMPLETE << 8; 3538 break; 3539 } 3540 case SAM_STAT_BUSY: 3541 scsicmd->result |= DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8; 3542 break; 3543 case SAM_STAT_TASK_ABORTED: 3544 scsicmd->result |= DID_ABORT << 16 | ABORT << 8; 3545 break; 3546 case SAM_STAT_RESERVATION_CONFLICT: 3547 case SAM_STAT_TASK_SET_FULL: 3548 default: 3549 scsicmd->result |= DID_ERROR << 16 | COMMAND_COMPLETE << 8; 3550 break; 3551 } 3552 } 3553 3554 static void hba_resp_task_failure(struct aac_dev *dev, 3555 struct scsi_cmnd *scsicmd, 3556 struct aac_hba_resp *err) 3557 { 3558 switch (err->status) { 3559 case HBA_RESP_STAT_HBAMODE_DISABLED: 3560 { 3561 u32 bus, cid; 3562 3563 bus = aac_logical_to_phys(scmd_channel(scsicmd)); 3564 cid = scmd_id(scsicmd); 3565 if (dev->hba_map[bus][cid].devtype == AAC_DEVTYPE_NATIVE_RAW) { 3566 dev->hba_map[bus][cid].devtype = AAC_DEVTYPE_ARC_RAW; 3567 dev->hba_map[bus][cid].rmw_nexus = 0xffffffff; 3568 } 3569 scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8; 3570 break; 3571 } 3572 case HBA_RESP_STAT_IO_ERROR: 3573 case HBA_RESP_STAT_NO_PATH_TO_DEVICE: 3574 scsicmd->result = DID_OK << 16 | 3575 COMMAND_COMPLETE << 8 | SAM_STAT_BUSY; 3576 break; 3577 case HBA_RESP_STAT_IO_ABORTED: 3578 scsicmd->result = DID_ABORT << 16 | ABORT << 8; 3579 break; 3580 case HBA_RESP_STAT_INVALID_DEVICE: 3581 scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8; 3582 break; 3583 case HBA_RESP_STAT_UNDERRUN: 3584 /* UNDERRUN is OK */ 3585 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; 3586 break; 3587 case HBA_RESP_STAT_OVERRUN: 3588 default: 3589 scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8; 3590 break; 3591 } 3592 } 3593 3594 /** 3595 * 3596 * aac_hba_callback 3597 * @context: the context set in the fib - here it is scsi cmd 3598 * @fibptr: pointer to the fib 3599 * 3600 * Handles the completion of a native HBA scsi command 3601 * 3602 */ 3603 void aac_hba_callback(void *context, struct fib *fibptr) 3604 { 3605 struct aac_dev *dev; 3606 struct scsi_cmnd *scsicmd; 3607 3608 struct aac_hba_resp *err = 3609 &((struct aac_native_hba *)fibptr->hw_fib_va)->resp.err; 3610 3611 scsicmd = (struct scsi_cmnd *) context; 3612 3613 if (!aac_valid_context(scsicmd, fibptr)) 3614 return; 3615 3616 WARN_ON(fibptr == NULL); 3617 dev = fibptr->dev; 3618 3619 if (!(fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF)) 3620 scsi_dma_unmap(scsicmd); 3621 3622 if (fibptr->flags & FIB_CONTEXT_FLAG_FASTRESP) { 3623 /* fast response */ 3624 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; 3625 goto out; 3626 } 3627 3628 switch (err->service_response) { 3629 case HBA_RESP_SVCRES_TASK_COMPLETE: 3630 hba_resp_task_complete(dev, scsicmd, err); 3631 break; 3632 case HBA_RESP_SVCRES_FAILURE: 3633 hba_resp_task_failure(dev, scsicmd, err); 3634 break; 3635 case HBA_RESP_SVCRES_TMF_REJECTED: 3636 scsicmd->result = DID_ERROR << 16 | MESSAGE_REJECT << 8; 3637 break; 3638 case HBA_RESP_SVCRES_TMF_LUN_INVALID: 3639 scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8; 3640 break; 3641 case HBA_RESP_SVCRES_TMF_COMPLETE: 3642 case HBA_RESP_SVCRES_TMF_SUCCEEDED: 3643 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; 3644 break; 3645 default: 3646 scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8; 3647 break; 3648 } 3649 3650 out: 3651 aac_fib_complete(fibptr); 3652 3653 if (fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF) 3654 scsicmd->SCp.sent_command = 1; 3655 else 3656 scsicmd->scsi_done(scsicmd); 3657 } 3658 3659 /** 3660 * 3661 * aac_send_srb_fib 3662 * @scsicmd: the scsi command block 3663 * 3664 * This routine will form a FIB and fill in the aac_srb from the 3665 * scsicmd passed in. 3666 */ 3667 3668 static int aac_send_srb_fib(struct scsi_cmnd* scsicmd) 3669 { 3670 struct fib* cmd_fibcontext; 3671 struct aac_dev* dev; 3672 int status; 3673 3674 dev = (struct aac_dev *)scsicmd->device->host->hostdata; 3675 if (scmd_id(scsicmd) >= dev->maximum_num_physicals || 3676 scsicmd->device->lun > 7) { 3677 scsicmd->result = DID_NO_CONNECT << 16; 3678 scsicmd->scsi_done(scsicmd); 3679 return 0; 3680 } 3681 3682 /* 3683 * Allocate and initialize a Fib then setup a BlockWrite command 3684 */ 3685 cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); 3686 scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; 3687 status = aac_adapter_scsi(cmd_fibcontext, scsicmd); 3688 3689 /* 3690 * Check that the command queued to the controller 3691 */ 3692 if (status == -EINPROGRESS) 3693 return 0; 3694 3695 printk(KERN_WARNING "aac_srb: aac_fib_send failed with status: %d\n", status); 3696 aac_fib_complete(cmd_fibcontext); 3697 aac_fib_free(cmd_fibcontext); 3698 3699 return -1; 3700 } 3701 3702 /** 3703 * 3704 * aac_send_hba_fib 3705 * @scsicmd: the scsi command block 3706 * 3707 * This routine will form a FIB and fill in the aac_hba_cmd_req from the 3708 * scsicmd passed in. 3709 */ 3710 static int aac_send_hba_fib(struct scsi_cmnd *scsicmd) 3711 { 3712 struct fib *cmd_fibcontext; 3713 struct aac_dev *dev; 3714 int status; 3715 3716 dev = shost_priv(scsicmd->device->host); 3717 if (scmd_id(scsicmd) >= dev->maximum_num_physicals || 3718 scsicmd->device->lun > AAC_MAX_LUN - 1) { 3719 scsicmd->result = DID_NO_CONNECT << 16; 3720 scsicmd->scsi_done(scsicmd); 3721 return 0; 3722 } 3723 3724 /* 3725 * Allocate and initialize a Fib then setup a BlockWrite command 3726 */ 3727 cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); 3728 if (!cmd_fibcontext) 3729 return -1; 3730 3731 scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; 3732 status = aac_adapter_hba(cmd_fibcontext, scsicmd); 3733 3734 /* 3735 * Check that the command queued to the controller 3736 */ 3737 if (status == -EINPROGRESS) 3738 return 0; 3739 3740 pr_warn("aac_hba_cmd_req: aac_fib_send failed with status: %d\n", 3741 status); 3742 aac_fib_complete(cmd_fibcontext); 3743 aac_fib_free(cmd_fibcontext); 3744 3745 return -1; 3746 } 3747 3748 3749 static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *psg) 3750 { 3751 struct aac_dev *dev; 3752 unsigned long byte_count = 0; 3753 int nseg; 3754 struct scatterlist *sg; 3755 int i; 3756 3757 dev = (struct aac_dev *)scsicmd->device->host->hostdata; 3758 // Get rid of old data 3759 psg->count = 0; 3760 psg->sg[0].addr = 0; 3761 psg->sg[0].count = 0; 3762 3763 nseg = scsi_dma_map(scsicmd); 3764 if (nseg <= 0) 3765 return nseg; 3766 3767 psg->count = cpu_to_le32(nseg); 3768 3769 scsi_for_each_sg(scsicmd, sg, nseg, i) { 3770 psg->sg[i].addr = cpu_to_le32(sg_dma_address(sg)); 3771 psg->sg[i].count = cpu_to_le32(sg_dma_len(sg)); 3772 byte_count += sg_dma_len(sg); 3773 } 3774 /* hba wants the size to be exact */ 3775 if (byte_count > scsi_bufflen(scsicmd)) { 3776 u32 temp = le32_to_cpu(psg->sg[i-1].count) - 3777 (byte_count - scsi_bufflen(scsicmd)); 3778 psg->sg[i-1].count = cpu_to_le32(temp); 3779 byte_count = scsi_bufflen(scsicmd); 3780 } 3781 /* Check for command underflow */ 3782 if (scsicmd->underflow && (byte_count < scsicmd->underflow)) { 3783 printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n", 3784 byte_count, scsicmd->underflow); 3785 } 3786 3787 return byte_count; 3788 } 3789 3790 3791 static long aac_build_sg64(struct scsi_cmnd *scsicmd, struct sgmap64 *psg) 3792 { 3793 struct aac_dev *dev; 3794 unsigned long byte_count = 0; 3795 u64 addr; 3796 int nseg; 3797 struct scatterlist *sg; 3798 int i; 3799 3800 dev = (struct aac_dev *)scsicmd->device->host->hostdata; 3801 // Get rid of old data 3802 psg->count = 0; 3803 psg->sg[0].addr[0] = 0; 3804 psg->sg[0].addr[1] = 0; 3805 psg->sg[0].count = 0; 3806 3807 nseg = scsi_dma_map(scsicmd); 3808 if (nseg <= 0) 3809 return nseg; 3810 3811 scsi_for_each_sg(scsicmd, sg, nseg, i) { 3812 int count = sg_dma_len(sg); 3813 addr = sg_dma_address(sg); 3814 psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff); 3815 psg->sg[i].addr[1] = cpu_to_le32(addr>>32); 3816 psg->sg[i].count = cpu_to_le32(count); 3817 byte_count += count; 3818 } 3819 psg->count = cpu_to_le32(nseg); 3820 /* hba wants the size to be exact */ 3821 if (byte_count > scsi_bufflen(scsicmd)) { 3822 u32 temp = le32_to_cpu(psg->sg[i-1].count) - 3823 (byte_count - scsi_bufflen(scsicmd)); 3824 psg->sg[i-1].count = cpu_to_le32(temp); 3825 byte_count = scsi_bufflen(scsicmd); 3826 } 3827 /* Check for command underflow */ 3828 if (scsicmd->underflow && (byte_count < scsicmd->underflow)) { 3829 printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n", 3830 byte_count, scsicmd->underflow); 3831 } 3832 3833 return byte_count; 3834 } 3835 3836 static long aac_build_sgraw(struct scsi_cmnd *scsicmd, struct sgmapraw *psg) 3837 { 3838 unsigned long byte_count = 0; 3839 int nseg; 3840 struct scatterlist *sg; 3841 int i; 3842 3843 // Get rid of old data 3844 psg->count = 0; 3845 psg->sg[0].next = 0; 3846 psg->sg[0].prev = 0; 3847 psg->sg[0].addr[0] = 0; 3848 psg->sg[0].addr[1] = 0; 3849 psg->sg[0].count = 0; 3850 psg->sg[0].flags = 0; 3851 3852 nseg = scsi_dma_map(scsicmd); 3853 if (nseg <= 0) 3854 return nseg; 3855 3856 scsi_for_each_sg(scsicmd, sg, nseg, i) { 3857 int count = sg_dma_len(sg); 3858 u64 addr = sg_dma_address(sg); 3859 psg->sg[i].next = 0; 3860 psg->sg[i].prev = 0; 3861 psg->sg[i].addr[1] = cpu_to_le32((u32)(addr>>32)); 3862 psg->sg[i].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff)); 3863 psg->sg[i].count = cpu_to_le32(count); 3864 psg->sg[i].flags = 0; 3865 byte_count += count; 3866 } 3867 psg->count = cpu_to_le32(nseg); 3868 /* hba wants the size to be exact */ 3869 if (byte_count > scsi_bufflen(scsicmd)) { 3870 u32 temp = le32_to_cpu(psg->sg[i-1].count) - 3871 (byte_count - scsi_bufflen(scsicmd)); 3872 psg->sg[i-1].count = cpu_to_le32(temp); 3873 byte_count = scsi_bufflen(scsicmd); 3874 } 3875 /* Check for command underflow */ 3876 if (scsicmd->underflow && (byte_count < scsicmd->underflow)) { 3877 printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n", 3878 byte_count, scsicmd->underflow); 3879 } 3880 3881 return byte_count; 3882 } 3883 3884 static long aac_build_sgraw2(struct scsi_cmnd *scsicmd, 3885 struct aac_raw_io2 *rio2, int sg_max) 3886 { 3887 unsigned long byte_count = 0; 3888 int nseg; 3889 struct scatterlist *sg; 3890 int i, conformable = 0; 3891 u32 min_size = PAGE_SIZE, cur_size; 3892 3893 nseg = scsi_dma_map(scsicmd); 3894 if (nseg <= 0) 3895 return nseg; 3896 3897 scsi_for_each_sg(scsicmd, sg, nseg, i) { 3898 int count = sg_dma_len(sg); 3899 u64 addr = sg_dma_address(sg); 3900 3901 BUG_ON(i >= sg_max); 3902 rio2->sge[i].addrHigh = cpu_to_le32((u32)(addr>>32)); 3903 rio2->sge[i].addrLow = cpu_to_le32((u32)(addr & 0xffffffff)); 3904 cur_size = cpu_to_le32(count); 3905 rio2->sge[i].length = cur_size; 3906 rio2->sge[i].flags = 0; 3907 if (i == 0) { 3908 conformable = 1; 3909 rio2->sgeFirstSize = cur_size; 3910 } else if (i == 1) { 3911 rio2->sgeNominalSize = cur_size; 3912 min_size = cur_size; 3913 } else if ((i+1) < nseg && cur_size != rio2->sgeNominalSize) { 3914 conformable = 0; 3915 if (cur_size < min_size) 3916 min_size = cur_size; 3917 } 3918 byte_count += count; 3919 } 3920 3921 /* hba wants the size to be exact */ 3922 if (byte_count > scsi_bufflen(scsicmd)) { 3923 u32 temp = le32_to_cpu(rio2->sge[i-1].length) - 3924 (byte_count - scsi_bufflen(scsicmd)); 3925 rio2->sge[i-1].length = cpu_to_le32(temp); 3926 byte_count = scsi_bufflen(scsicmd); 3927 } 3928 3929 rio2->sgeCnt = cpu_to_le32(nseg); 3930 rio2->flags |= cpu_to_le16(RIO2_SG_FORMAT_IEEE1212); 3931 /* not conformable: evaluate required sg elements */ 3932 if (!conformable) { 3933 int j, nseg_new = nseg, err_found; 3934 for (i = min_size / PAGE_SIZE; i >= 1; --i) { 3935 err_found = 0; 3936 nseg_new = 2; 3937 for (j = 1; j < nseg - 1; ++j) { 3938 if (rio2->sge[j].length % (i*PAGE_SIZE)) { 3939 err_found = 1; 3940 break; 3941 } 3942 nseg_new += (rio2->sge[j].length / (i*PAGE_SIZE)); 3943 } 3944 if (!err_found) 3945 break; 3946 } 3947 if (i > 0 && nseg_new <= sg_max) { 3948 int ret = aac_convert_sgraw2(rio2, i, nseg, nseg_new); 3949 3950 if (ret < 0) 3951 return ret; 3952 } 3953 } else 3954 rio2->flags |= cpu_to_le16(RIO2_SGL_CONFORMANT); 3955 3956 /* Check for command underflow */ 3957 if (scsicmd->underflow && (byte_count < scsicmd->underflow)) { 3958 printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n", 3959 byte_count, scsicmd->underflow); 3960 } 3961 3962 return byte_count; 3963 } 3964 3965 static int aac_convert_sgraw2(struct aac_raw_io2 *rio2, int pages, int nseg, int nseg_new) 3966 { 3967 struct sge_ieee1212 *sge; 3968 int i, j, pos; 3969 u32 addr_low; 3970 3971 if (aac_convert_sgl == 0) 3972 return 0; 3973 3974 sge = kmalloc(nseg_new * sizeof(struct sge_ieee1212), GFP_ATOMIC); 3975 if (sge == NULL) 3976 return -ENOMEM; 3977 3978 for (i = 1, pos = 1; i < nseg-1; ++i) { 3979 for (j = 0; j < rio2->sge[i].length / (pages * PAGE_SIZE); ++j) { 3980 addr_low = rio2->sge[i].addrLow + j * pages * PAGE_SIZE; 3981 sge[pos].addrLow = addr_low; 3982 sge[pos].addrHigh = rio2->sge[i].addrHigh; 3983 if (addr_low < rio2->sge[i].addrLow) 3984 sge[pos].addrHigh++; 3985 sge[pos].length = pages * PAGE_SIZE; 3986 sge[pos].flags = 0; 3987 pos++; 3988 } 3989 } 3990 sge[pos] = rio2->sge[nseg-1]; 3991 memcpy(&rio2->sge[1], &sge[1], (nseg_new-1)*sizeof(struct sge_ieee1212)); 3992 3993 kfree(sge); 3994 rio2->sgeCnt = cpu_to_le32(nseg_new); 3995 rio2->flags |= cpu_to_le16(RIO2_SGL_CONFORMANT); 3996 rio2->sgeNominalSize = pages * PAGE_SIZE; 3997 return 0; 3998 } 3999 4000 static long aac_build_sghba(struct scsi_cmnd *scsicmd, 4001 struct aac_hba_cmd_req *hbacmd, 4002 int sg_max, 4003 u64 sg_address) 4004 { 4005 unsigned long byte_count = 0; 4006 int nseg; 4007 struct scatterlist *sg; 4008 int i; 4009 u32 cur_size; 4010 struct aac_hba_sgl *sge; 4011 4012 nseg = scsi_dma_map(scsicmd); 4013 if (nseg <= 0) { 4014 byte_count = nseg; 4015 goto out; 4016 } 4017 4018 if (nseg > HBA_MAX_SG_EMBEDDED) 4019 sge = &hbacmd->sge[2]; 4020 else 4021 sge = &hbacmd->sge[0]; 4022 4023 scsi_for_each_sg(scsicmd, sg, nseg, i) { 4024 int count = sg_dma_len(sg); 4025 u64 addr = sg_dma_address(sg); 4026 4027 WARN_ON(i >= sg_max); 4028 sge->addr_hi = cpu_to_le32((u32)(addr>>32)); 4029 sge->addr_lo = cpu_to_le32((u32)(addr & 0xffffffff)); 4030 cur_size = cpu_to_le32(count); 4031 sge->len = cur_size; 4032 sge->flags = 0; 4033 byte_count += count; 4034 sge++; 4035 } 4036 4037 sge--; 4038 /* hba wants the size to be exact */ 4039 if (byte_count > scsi_bufflen(scsicmd)) { 4040 u32 temp; 4041 4042 temp = le32_to_cpu(sge->len) - byte_count 4043 - scsi_bufflen(scsicmd); 4044 sge->len = cpu_to_le32(temp); 4045 byte_count = scsi_bufflen(scsicmd); 4046 } 4047 4048 if (nseg <= HBA_MAX_SG_EMBEDDED) { 4049 hbacmd->emb_data_desc_count = cpu_to_le32(nseg); 4050 sge->flags = cpu_to_le32(0x40000000); 4051 } else { 4052 /* not embedded */ 4053 hbacmd->sge[0].flags = cpu_to_le32(0x80000000); 4054 hbacmd->emb_data_desc_count = (u8)cpu_to_le32(1); 4055 hbacmd->sge[0].addr_hi = (u32)cpu_to_le32(sg_address >> 32); 4056 hbacmd->sge[0].addr_lo = 4057 cpu_to_le32((u32)(sg_address & 0xffffffff)); 4058 } 4059 4060 /* Check for command underflow */ 4061 if (scsicmd->underflow && (byte_count < scsicmd->underflow)) { 4062 pr_warn("aacraid: cmd len %08lX cmd underflow %08X\n", 4063 byte_count, scsicmd->underflow); 4064 } 4065 out: 4066 return byte_count; 4067 } 4068 4069 #ifdef AAC_DETAILED_STATUS_INFO 4070 4071 struct aac_srb_status_info { 4072 u32 status; 4073 char *str; 4074 }; 4075 4076 4077 static struct aac_srb_status_info srb_status_info[] = { 4078 { SRB_STATUS_PENDING, "Pending Status"}, 4079 { SRB_STATUS_SUCCESS, "Success"}, 4080 { SRB_STATUS_ABORTED, "Aborted Command"}, 4081 { SRB_STATUS_ABORT_FAILED, "Abort Failed"}, 4082 { SRB_STATUS_ERROR, "Error Event"}, 4083 { SRB_STATUS_BUSY, "Device Busy"}, 4084 { SRB_STATUS_INVALID_REQUEST, "Invalid Request"}, 4085 { SRB_STATUS_INVALID_PATH_ID, "Invalid Path ID"}, 4086 { SRB_STATUS_NO_DEVICE, "No Device"}, 4087 { SRB_STATUS_TIMEOUT, "Timeout"}, 4088 { SRB_STATUS_SELECTION_TIMEOUT, "Selection Timeout"}, 4089 { SRB_STATUS_COMMAND_TIMEOUT, "Command Timeout"}, 4090 { SRB_STATUS_MESSAGE_REJECTED, "Message Rejected"}, 4091 { SRB_STATUS_BUS_RESET, "Bus Reset"}, 4092 { SRB_STATUS_PARITY_ERROR, "Parity Error"}, 4093 { SRB_STATUS_REQUEST_SENSE_FAILED,"Request Sense Failed"}, 4094 { SRB_STATUS_NO_HBA, "No HBA"}, 4095 { SRB_STATUS_DATA_OVERRUN, "Data Overrun/Data Underrun"}, 4096 { SRB_STATUS_UNEXPECTED_BUS_FREE,"Unexpected Bus Free"}, 4097 { SRB_STATUS_PHASE_SEQUENCE_FAILURE,"Phase Error"}, 4098 { SRB_STATUS_BAD_SRB_BLOCK_LENGTH,"Bad Srb Block Length"}, 4099 { SRB_STATUS_REQUEST_FLUSHED, "Request Flushed"}, 4100 { SRB_STATUS_DELAYED_RETRY, "Delayed Retry"}, 4101 { SRB_STATUS_INVALID_LUN, "Invalid LUN"}, 4102 { SRB_STATUS_INVALID_TARGET_ID, "Invalid TARGET ID"}, 4103 { SRB_STATUS_BAD_FUNCTION, "Bad Function"}, 4104 { SRB_STATUS_ERROR_RECOVERY, "Error Recovery"}, 4105 { SRB_STATUS_NOT_STARTED, "Not Started"}, 4106 { SRB_STATUS_NOT_IN_USE, "Not In Use"}, 4107 { SRB_STATUS_FORCE_ABORT, "Force Abort"}, 4108 { SRB_STATUS_DOMAIN_VALIDATION_FAIL,"Domain Validation Failure"}, 4109 { 0xff, "Unknown Error"} 4110 }; 4111 4112 char *aac_get_status_string(u32 status) 4113 { 4114 int i; 4115 4116 for (i = 0; i < ARRAY_SIZE(srb_status_info); i++) 4117 if (srb_status_info[i].status == status) 4118 return srb_status_info[i].str; 4119 4120 return "Bad Status Code"; 4121 } 4122 4123 #endif 4124