hpsa.c (d4e0045c4ed300781d2d4cbab57d05ed5e665a37) | hpsa.c (ba82d91b7567774242534460910530289192d212) |
---|---|
1/* 2 * Disk Array driver for HP Smart Array SAS controllers 3 * Copyright 2016 Microsemi Corporation 4 * Copyright 2014-2015 PMC-Sierra, Inc. 5 * Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by --- 46 unchanged lines hidden (view full) --- 55#include <asm/div64.h> 56#include "hpsa_cmd.h" 57#include "hpsa.h" 58 59/* 60 * HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' 61 * with an optional trailing '-' followed by a byte value (0-255). 62 */ | 1/* 2 * Disk Array driver for HP Smart Array SAS controllers 3 * Copyright 2016 Microsemi Corporation 4 * Copyright 2014-2015 PMC-Sierra, Inc. 5 * Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P. 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by --- 46 unchanged lines hidden (view full) --- 55#include <asm/div64.h> 56#include "hpsa_cmd.h" 57#include "hpsa.h" 58 59/* 60 * HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' 61 * with an optional trailing '-' followed by a byte value (0-255). 62 */ |
63#define HPSA_DRIVER_VERSION "3.4.18-0" | 63#define HPSA_DRIVER_VERSION "3.4.20-0" |
64#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" 65#define HPSA "hpsa" 66 67/* How long to wait for CISS doorbell communication */ 68#define CLEAR_EVENT_WAIT_INTERVAL 20 /* ms for each msleep() call */ 69#define MODE_CHANGE_WAIT_INTERVAL 10 /* ms for each msleep() call */ 70#define MAX_CLEAR_EVENT_WAIT 30000 /* times 20 ms = 600 s */ 71#define MAX_MODE_CHANGE_WAIT 2000 /* times 10 ms = 20 s */ --- 181 unchanged lines hidden (view full) --- 253 254static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd); 255static void hpsa_scan_start(struct Scsi_Host *); 256static int hpsa_scan_finished(struct Scsi_Host *sh, 257 unsigned long elapsed_time); 258static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth); 259 260static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd); | 64#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" 65#define HPSA "hpsa" 66 67/* How long to wait for CISS doorbell communication */ 68#define CLEAR_EVENT_WAIT_INTERVAL 20 /* ms for each msleep() call */ 69#define MODE_CHANGE_WAIT_INTERVAL 10 /* ms for each msleep() call */ 70#define MAX_CLEAR_EVENT_WAIT 30000 /* times 20 ms = 600 s */ 71#define MAX_MODE_CHANGE_WAIT 2000 /* times 10 ms = 20 s */ --- 181 unchanged lines hidden (view full) --- 253 254static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd); 255static void hpsa_scan_start(struct Scsi_Host *); 256static int hpsa_scan_finished(struct Scsi_Host *sh, 257 unsigned long elapsed_time); 258static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth); 259 260static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd); |
261static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd); | |
262static int hpsa_slave_alloc(struct scsi_device *sdev); 263static int hpsa_slave_configure(struct scsi_device *sdev); 264static void hpsa_slave_destroy(struct scsi_device *sdev); 265 266static void hpsa_update_scsi_devices(struct ctlr_info *h); 267static int check_for_unit_attention(struct ctlr_info *h, 268 struct CommandList *c); 269static void check_ioctl_unit_attention(struct ctlr_info *h, --- 51 unchanged lines hidden (view full) --- 321 322static inline bool hpsa_is_cmd_idle(struct CommandList *c) 323{ 324 return c->scsi_cmd == SCSI_CMD_IDLE; 325} 326 327static inline bool hpsa_is_pending_event(struct CommandList *c) 328{ | 261static int hpsa_slave_alloc(struct scsi_device *sdev); 262static int hpsa_slave_configure(struct scsi_device *sdev); 263static void hpsa_slave_destroy(struct scsi_device *sdev); 264 265static void hpsa_update_scsi_devices(struct ctlr_info *h); 266static int check_for_unit_attention(struct ctlr_info *h, 267 struct CommandList *c); 268static void check_ioctl_unit_attention(struct ctlr_info *h, --- 51 unchanged lines hidden (view full) --- 320 321static inline bool hpsa_is_cmd_idle(struct CommandList *c) 322{ 323 return c->scsi_cmd == SCSI_CMD_IDLE; 324} 325 326static inline bool hpsa_is_pending_event(struct CommandList *c) 327{ |
329 return c->abort_pending || c->reset_pending; | 328 return c->reset_pending; |
330} 331 332/* extract sense key, asc, and ascq from sense data. -1 means invalid. */ 333static void decode_sense_data(const u8 *sense_data, int sense_data_len, 334 u8 *sense_key, u8 *asc, u8 *ascq) 335{ 336 struct scsi_sense_hdr sshdr; 337 bool rc; --- 238 unchanged lines hidden (view full) --- 576 * it. If we reset the one controlling the cache, the other will 577 * likely not be happy. Just forbid resetting this conjoined mess. 578 * The 640x isn't really supported by hpsa anyway. 579 */ 580 0x409C0E11, /* Smart Array 6400 */ 581 0x409D0E11, /* Smart Array 6400 EM */ 582}; 583 | 329} 330 331/* extract sense key, asc, and ascq from sense data. -1 means invalid. */ 332static void decode_sense_data(const u8 *sense_data, int sense_data_len, 333 u8 *sense_key, u8 *asc, u8 *ascq) 334{ 335 struct scsi_sense_hdr sshdr; 336 bool rc; --- 238 unchanged lines hidden (view full) --- 575 * it. If we reset the one controlling the cache, the other will 576 * likely not be happy. Just forbid resetting this conjoined mess. 577 * The 640x isn't really supported by hpsa anyway. 578 */ 579 0x409C0E11, /* Smart Array 6400 */ 580 0x409D0E11, /* Smart Array 6400 EM */ 581}; 582 |
584static u32 needs_abort_tags_swizzled[] = { 585 0x323D103C, /* Smart Array P700m */ 586 0x324a103C, /* Smart Array P712m */ 587 0x324b103C, /* SmartArray P711m */ 588}; 589 | |
590static int board_id_in_array(u32 a[], int nelems, u32 board_id) 591{ 592 int i; 593 594 for (i = 0; i < nelems; i++) 595 if (a[i] == board_id) 596 return 1; 597 return 0; --- 12 unchanged lines hidden (view full) --- 610} 611 612static int ctlr_is_resettable(u32 board_id) 613{ 614 return ctlr_is_hard_resettable(board_id) || 615 ctlr_is_soft_resettable(board_id); 616} 617 | 583static int board_id_in_array(u32 a[], int nelems, u32 board_id) 584{ 585 int i; 586 587 for (i = 0; i < nelems; i++) 588 if (a[i] == board_id) 589 return 1; 590 return 0; --- 12 unchanged lines hidden (view full) --- 603} 604 605static int ctlr_is_resettable(u32 board_id) 606{ 607 return ctlr_is_hard_resettable(board_id) || 608 ctlr_is_soft_resettable(board_id); 609} 610 |
618static int ctlr_needs_abort_tags_swizzled(u32 board_id) 619{ 620 return board_id_in_array(needs_abort_tags_swizzled, 621 ARRAY_SIZE(needs_abort_tags_swizzled), board_id); 622} 623 | |
624static ssize_t host_show_resettable(struct device *dev, 625 struct device_attribute *attr, char *buf) 626{ 627 struct ctlr_info *h; 628 struct Scsi_Host *shost = class_to_shost(dev); 629 630 h = shost_to_hba(shost); 631 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id)); --- 291 unchanged lines hidden (view full) --- 923 &dev_attr_resettable, 924 &dev_attr_hp_ssd_smart_path_status, 925 &dev_attr_raid_offload_debug, 926 &dev_attr_lockup_detected, 927 &dev_attr_ctlr_num, 928 NULL, 929}; 930 | 611static ssize_t host_show_resettable(struct device *dev, 612 struct device_attribute *attr, char *buf) 613{ 614 struct ctlr_info *h; 615 struct Scsi_Host *shost = class_to_shost(dev); 616 617 h = shost_to_hba(shost); 618 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id)); --- 291 unchanged lines hidden (view full) --- 910 &dev_attr_resettable, 911 &dev_attr_hp_ssd_smart_path_status, 912 &dev_attr_raid_offload_debug, 913 &dev_attr_lockup_detected, 914 &dev_attr_ctlr_num, 915 NULL, 916}; 917 |
931#define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_ABORTS + \ 932 HPSA_CMDS_RESERVED_FOR_DRIVER + HPSA_MAX_CONCURRENT_PASSTHRUS) | 918#define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_DRIVER +\ 919 HPSA_MAX_CONCURRENT_PASSTHRUS) |
933 934static struct scsi_host_template hpsa_driver_template = { 935 .module = THIS_MODULE, 936 .name = HPSA, 937 .proc_name = HPSA, 938 .queuecommand = hpsa_scsi_queue_command, 939 .scan_start = hpsa_scan_start, 940 .scan_finished = hpsa_scan_finished, 941 .change_queue_depth = hpsa_change_queue_depth, 942 .this_id = -1, 943 .use_clustering = ENABLE_CLUSTERING, | 920 921static struct scsi_host_template hpsa_driver_template = { 922 .module = THIS_MODULE, 923 .name = HPSA, 924 .proc_name = HPSA, 925 .queuecommand = hpsa_scsi_queue_command, 926 .scan_start = hpsa_scan_start, 927 .scan_finished = hpsa_scan_finished, 928 .change_queue_depth = hpsa_change_queue_depth, 929 .this_id = -1, 930 .use_clustering = ENABLE_CLUSTERING, |
944 .eh_abort_handler = hpsa_eh_abort_handler, | |
945 .eh_device_reset_handler = hpsa_eh_device_reset_handler, 946 .ioctl = hpsa_ioctl, 947 .slave_alloc = hpsa_slave_alloc, 948 .slave_configure = hpsa_slave_configure, 949 .slave_destroy = hpsa_slave_destroy, 950#ifdef CONFIG_COMPAT 951 .compat_ioctl = hpsa_compat_ioctl, 952#endif --- 152 unchanged lines hidden (view full) --- 1105 1106/* 1107 * During firmware flash, the heartbeat register may not update as frequently 1108 * as it should. So we dial down lockup detection during firmware flash. and 1109 * dial it back up when firmware flash completes. 1110 */ 1111#define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ) 1112#define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ) | 931 .eh_device_reset_handler = hpsa_eh_device_reset_handler, 932 .ioctl = hpsa_ioctl, 933 .slave_alloc = hpsa_slave_alloc, 934 .slave_configure = hpsa_slave_configure, 935 .slave_destroy = hpsa_slave_destroy, 936#ifdef CONFIG_COMPAT 937 .compat_ioctl = hpsa_compat_ioctl, 938#endif --- 152 unchanged lines hidden (view full) --- 1091 1092/* 1093 * During firmware flash, the heartbeat register may not update as frequently 1094 * as it should. So we dial down lockup detection during firmware flash. and 1095 * dial it back up when firmware flash completes. 1096 */ 1097#define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ) 1098#define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ) |
1099#define HPSA_EVENT_MONITOR_INTERVAL (15 * HZ) |
|
1113static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h, 1114 struct CommandList *c) 1115{ 1116 if (!is_firmware_flash_cmd(c->Request.CDB)) 1117 return; 1118 atomic_inc(&h->firmware_flash_in_progress); 1119 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH; 1120} --- 733 unchanged lines hidden (view full) --- 1854 unsigned long flags; 1855 struct hpsa_scsi_dev_t **added, **removed; 1856 int nadded, nremoved; 1857 1858 /* 1859 * A reset can cause a device status to change 1860 * re-schedule the scan to see what happened. 1861 */ | 1100static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h, 1101 struct CommandList *c) 1102{ 1103 if (!is_firmware_flash_cmd(c->Request.CDB)) 1104 return; 1105 atomic_inc(&h->firmware_flash_in_progress); 1106 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH; 1107} --- 733 unchanged lines hidden (view full) --- 1841 unsigned long flags; 1842 struct hpsa_scsi_dev_t **added, **removed; 1843 int nadded, nremoved; 1844 1845 /* 1846 * A reset can cause a device status to change 1847 * re-schedule the scan to see what happened. 1848 */ |
1849 spin_lock_irqsave(&h->reset_lock, flags); |
|
1862 if (h->reset_in_progress) { 1863 h->drv_req_rescan = 1; | 1850 if (h->reset_in_progress) { 1851 h->drv_req_rescan = 1; |
1852 spin_unlock_irqrestore(&h->reset_lock, flags); |
|
1864 return; 1865 } | 1853 return; 1854 } |
1855 spin_unlock_irqrestore(&h->reset_lock, flags); |
|
1866 1867 added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL); 1868 removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL); 1869 1870 if (!added || !removed) { 1871 dev_warn(&h->pdev->dev, "out of memory in " 1872 "adjust_hpsa_scsi_table\n"); 1873 goto free_and_out; --- 187 unchanged lines hidden (view full) --- 2061static int hpsa_slave_configure(struct scsi_device *sdev) 2062{ 2063 struct hpsa_scsi_dev_t *sd; 2064 int queue_depth; 2065 2066 sd = sdev->hostdata; 2067 sdev->no_uld_attach = !sd || !sd->expose_device; 2068 | 1856 1857 added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL); 1858 removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL); 1859 1860 if (!added || !removed) { 1861 dev_warn(&h->pdev->dev, "out of memory in " 1862 "adjust_hpsa_scsi_table\n"); 1863 goto free_and_out; --- 187 unchanged lines hidden (view full) --- 2051static int hpsa_slave_configure(struct scsi_device *sdev) 2052{ 2053 struct hpsa_scsi_dev_t *sd; 2054 int queue_depth; 2055 2056 sd = sdev->hostdata; 2057 sdev->no_uld_attach = !sd || !sd->expose_device; 2058 |
2069 if (sd) 2070 queue_depth = sd->queue_depth != 0 ? 2071 sd->queue_depth : sdev->host->can_queue; 2072 else | 2059 if (sd) { 2060 if (sd->external) 2061 queue_depth = EXTERNAL_QD; 2062 else 2063 queue_depth = sd->queue_depth != 0 ? 2064 sd->queue_depth : sdev->host->can_queue; 2065 } else |
2073 queue_depth = sdev->host->can_queue; 2074 2075 scsi_change_queue_depth(sdev, queue_depth); 2076 2077 return 0; 2078} 2079 2080static void hpsa_slave_destroy(struct scsi_device *sdev) --- 268 unchanged lines hidden (view full) --- 2349} 2350 2351static void hpsa_cmd_resolve_events(struct ctlr_info *h, 2352 struct CommandList *c) 2353{ 2354 bool do_wake = false; 2355 2356 /* | 2066 queue_depth = sdev->host->can_queue; 2067 2068 scsi_change_queue_depth(sdev, queue_depth); 2069 2070 return 0; 2071} 2072 2073static void hpsa_slave_destroy(struct scsi_device *sdev) --- 268 unchanged lines hidden (view full) --- 2342} 2343 2344static void hpsa_cmd_resolve_events(struct ctlr_info *h, 2345 struct CommandList *c) 2346{ 2347 bool do_wake = false; 2348 2349 /* |
2357 * Prevent the following race in the abort handler: 2358 * 2359 * 1. LLD is requested to abort a SCSI command 2360 * 2. The SCSI command completes 2361 * 3. The struct CommandList associated with step 2 is made available 2362 * 4. New I/O request to LLD to another LUN re-uses struct CommandList 2363 * 5. Abort handler follows scsi_cmnd->host_scribble and 2364 * finds struct CommandList and tries to aborts it 2365 * Now we have aborted the wrong command. 2366 * 2367 * Reset c->scsi_cmd here so that the abort or reset handler will know | 2350 * Reset c->scsi_cmd here so that the reset handler will know |
2368 * this command has completed. Then, check to see if the handler is 2369 * waiting for this command, and, if so, wake it. 2370 */ 2371 c->scsi_cmd = SCSI_CMD_IDLE; 2372 mb(); /* Declare command idle before checking for pending events. */ | 2351 * this command has completed. Then, check to see if the handler is 2352 * waiting for this command, and, if so, wake it. 2353 */ 2354 c->scsi_cmd = SCSI_CMD_IDLE; 2355 mb(); /* Declare command idle before checking for pending events. */ |
2373 if (c->abort_pending) { 2374 do_wake = true; 2375 c->abort_pending = false; 2376 } | |
2377 if (c->reset_pending) { 2378 unsigned long flags; 2379 struct hpsa_scsi_dev_t *dev; 2380 2381 /* 2382 * There appears to be a reset pending; lock the lock and 2383 * reconfirm. If so, then decrement the count of outstanding 2384 * commands and wake the reset command if this is the last one. --- 26 unchanged lines hidden (view full) --- 2411} 2412 2413static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c) 2414{ 2415 INIT_WORK(&c->work, hpsa_command_resubmit_worker); 2416 queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work); 2417} 2418 | 2356 if (c->reset_pending) { 2357 unsigned long flags; 2358 struct hpsa_scsi_dev_t *dev; 2359 2360 /* 2361 * There appears to be a reset pending; lock the lock and 2362 * reconfirm. If so, then decrement the count of outstanding 2363 * commands and wake the reset command if this is the last one. --- 26 unchanged lines hidden (view full) --- 2390} 2391 2392static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c) 2393{ 2394 INIT_WORK(&c->work, hpsa_command_resubmit_worker); 2395 queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work); 2396} 2397 |
2419static void hpsa_set_scsi_cmd_aborted(struct scsi_cmnd *cmd) 2420{ 2421 cmd->result = DID_ABORT << 16; 2422} 2423 2424static void hpsa_cmd_abort_and_free(struct ctlr_info *h, struct CommandList *c, 2425 struct scsi_cmnd *cmd) 2426{ 2427 hpsa_set_scsi_cmd_aborted(cmd); 2428 dev_warn(&h->pdev->dev, "CDB %16phN was aborted with status 0x%x\n", 2429 c->Request.CDB, c->err_info->ScsiStatus); 2430 hpsa_cmd_resolve_and_free(h, c); 2431} 2432 | |
2433static void process_ioaccel2_completion(struct ctlr_info *h, 2434 struct CommandList *c, struct scsi_cmnd *cmd, 2435 struct hpsa_scsi_dev_t *dev) 2436{ 2437 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; 2438 2439 /* check for good status */ 2440 if (likely(c2->error_data.serv_response == 0 && --- 108 unchanged lines hidden (view full) --- 2549 * fail_all_oustanding_cmds() 2550 */ 2551 if (unlikely(ei->CommandStatus == CMD_CTLR_LOCKUP)) { 2552 /* DID_NO_CONNECT will prevent a retry */ 2553 cmd->result = DID_NO_CONNECT << 16; 2554 return hpsa_cmd_free_and_done(h, cp, cmd); 2555 } 2556 | 2398static void process_ioaccel2_completion(struct ctlr_info *h, 2399 struct CommandList *c, struct scsi_cmnd *cmd, 2400 struct hpsa_scsi_dev_t *dev) 2401{ 2402 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; 2403 2404 /* check for good status */ 2405 if (likely(c2->error_data.serv_response == 0 && --- 108 unchanged lines hidden (view full) --- 2514 * fail_all_oustanding_cmds() 2515 */ 2516 if (unlikely(ei->CommandStatus == CMD_CTLR_LOCKUP)) { 2517 /* DID_NO_CONNECT will prevent a retry */ 2518 cmd->result = DID_NO_CONNECT << 16; 2519 return hpsa_cmd_free_and_done(h, cp, cmd); 2520 } 2521 |
2557 if ((unlikely(hpsa_is_pending_event(cp)))) { | 2522 if ((unlikely(hpsa_is_pending_event(cp)))) |
2558 if (cp->reset_pending) 2559 return hpsa_cmd_free_and_done(h, cp, cmd); | 2523 if (cp->reset_pending) 2524 return hpsa_cmd_free_and_done(h, cp, cmd); |
2560 if (cp->abort_pending) 2561 return hpsa_cmd_abort_and_free(h, cp, cmd); 2562 } | |
2563 2564 if (cp->cmd_type == CMD_IOACCEL2) 2565 return process_ioaccel2_completion(h, cp, cmd, dev); 2566 2567 scsi_set_resid(cmd, ei->ResidualCnt); 2568 if (ei->CommandStatus == 0) 2569 return hpsa_cmd_free_and_done(h, cp, cmd); 2570 --- 103 unchanged lines hidden (view full) --- 2674 cp->Request.CDB); 2675 break; 2676 case CMD_CONNECTION_LOST: 2677 cmd->result = DID_ERROR << 16; 2678 dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n", 2679 cp->Request.CDB); 2680 break; 2681 case CMD_ABORTED: | 2525 2526 if (cp->cmd_type == CMD_IOACCEL2) 2527 return process_ioaccel2_completion(h, cp, cmd, dev); 2528 2529 scsi_set_resid(cmd, ei->ResidualCnt); 2530 if (ei->CommandStatus == 0) 2531 return hpsa_cmd_free_and_done(h, cp, cmd); 2532 --- 103 unchanged lines hidden (view full) --- 2636 cp->Request.CDB); 2637 break; 2638 case CMD_CONNECTION_LOST: 2639 cmd->result = DID_ERROR << 16; 2640 dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n", 2641 cp->Request.CDB); 2642 break; 2643 case CMD_ABORTED: |
2682 /* Return now to avoid calling scsi_done(). */ 2683 return hpsa_cmd_abort_and_free(h, cp, cmd); | 2644 cmd->result = DID_ABORT << 16; 2645 break; |
2684 case CMD_ABORT_FAILED: 2685 cmd->result = DID_ERROR << 16; 2686 dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n", 2687 cp->Request.CDB); 2688 break; 2689 case CMD_UNSOLICITED_ABORT: 2690 cmd->result = DID_SOFT_ERROR << 16; /* retry the command */ 2691 dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n", --- 393 unchanged lines hidden (view full) --- 3085 dev_warn(&h->pdev->dev, 3086 "Controller lockup detected during reset wait\n"); 3087 rc = -ENODEV; 3088 } 3089 3090 if (unlikely(rc)) 3091 atomic_set(&dev->reset_cmds_out, 0); 3092 else | 2646 case CMD_ABORT_FAILED: 2647 cmd->result = DID_ERROR << 16; 2648 dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n", 2649 cp->Request.CDB); 2650 break; 2651 case CMD_UNSOLICITED_ABORT: 2652 cmd->result = DID_SOFT_ERROR << 16; /* retry the command */ 2653 dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n", --- 393 unchanged lines hidden (view full) --- 3047 dev_warn(&h->pdev->dev, 3048 "Controller lockup detected during reset wait\n"); 3049 rc = -ENODEV; 3050 } 3051 3052 if (unlikely(rc)) 3053 atomic_set(&dev->reset_cmds_out, 0); 3054 else |
3093 wait_for_device_to_become_ready(h, scsi3addr, 0); | 3055 rc = wait_for_device_to_become_ready(h, scsi3addr, 0); |
3094 3095 mutex_unlock(&h->reset_mutex); 3096 return rc; 3097} 3098 3099static void hpsa_get_raid_level(struct ctlr_info *h, 3100 unsigned char *scsi3addr, unsigned char *raid_level) 3101{ --- 58 unchanged lines hidden (view full) --- 3160 dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n", 3161 le16_to_cpu(map_buff->metadata_disks_per_row)); 3162 dev_info(&h->pdev->dev, "row_cnt = %u\n", 3163 le16_to_cpu(map_buff->row_cnt)); 3164 dev_info(&h->pdev->dev, "layout_map_count = %u\n", 3165 le16_to_cpu(map_buff->layout_map_count)); 3166 dev_info(&h->pdev->dev, "flags = 0x%x\n", 3167 le16_to_cpu(map_buff->flags)); | 3056 3057 mutex_unlock(&h->reset_mutex); 3058 return rc; 3059} 3060 3061static void hpsa_get_raid_level(struct ctlr_info *h, 3062 unsigned char *scsi3addr, unsigned char *raid_level) 3063{ --- 58 unchanged lines hidden (view full) --- 3122 dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n", 3123 le16_to_cpu(map_buff->metadata_disks_per_row)); 3124 dev_info(&h->pdev->dev, "row_cnt = %u\n", 3125 le16_to_cpu(map_buff->row_cnt)); 3126 dev_info(&h->pdev->dev, "layout_map_count = %u\n", 3127 le16_to_cpu(map_buff->layout_map_count)); 3128 dev_info(&h->pdev->dev, "flags = 0x%x\n", 3129 le16_to_cpu(map_buff->flags)); |
3168 dev_info(&h->pdev->dev, "encrypytion = %s\n", | 3130 dev_info(&h->pdev->dev, "encryption = %s\n", |
3169 le16_to_cpu(map_buff->flags) & 3170 RAID_MAP_FLAG_ENCRYPT_ON ? "ON" : "OFF"); 3171 dev_info(&h->pdev->dev, "dekindex = %u\n", 3172 le16_to_cpu(map_buff->dekindex)); 3173 map_cnt = le16_to_cpu(map_buff->layout_map_count); 3174 for (map = 0; map < map_cnt; map++) { 3175 dev_info(&h->pdev->dev, "Map%u:\n", map); 3176 row_cnt = le16_to_cpu(map_buff->row_cnt); --- 171 unchanged lines hidden (view full) --- 3348 struct ErrorInfo *ei = NULL; 3349 struct bmic_sense_storage_box_params *bssbp = NULL; 3350 struct bmic_identify_physical_device *id_phys = NULL; 3351 struct ext_report_lun_entry *rle = &rlep->LUN[rle_index]; 3352 u16 bmic_device_index = 0; 3353 3354 bmic_device_index = GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]); 3355 | 3131 le16_to_cpu(map_buff->flags) & 3132 RAID_MAP_FLAG_ENCRYPT_ON ? "ON" : "OFF"); 3133 dev_info(&h->pdev->dev, "dekindex = %u\n", 3134 le16_to_cpu(map_buff->dekindex)); 3135 map_cnt = le16_to_cpu(map_buff->layout_map_count); 3136 for (map = 0; map < map_cnt; map++) { 3137 dev_info(&h->pdev->dev, "Map%u:\n", map); 3138 row_cnt = le16_to_cpu(map_buff->row_cnt); --- 171 unchanged lines hidden (view full) --- 3310 struct ErrorInfo *ei = NULL; 3311 struct bmic_sense_storage_box_params *bssbp = NULL; 3312 struct bmic_identify_physical_device *id_phys = NULL; 3313 struct ext_report_lun_entry *rle = &rlep->LUN[rle_index]; 3314 u16 bmic_device_index = 0; 3315 3316 bmic_device_index = GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]); 3317 |
3318 if (encl_dev->target == -1 || encl_dev->lun == -1) { 3319 rc = IO_OK; 3320 goto out; 3321 } 3322 |
|
3356 if (bmic_device_index == 0xFF00 || MASKED_DEVICE(&rle->lunid[0])) { 3357 rc = IO_OK; 3358 goto out; 3359 } 3360 3361 bssbp = kzalloc(sizeof(*bssbp), GFP_KERNEL); 3362 if (!bssbp) 3363 goto out; --- 412 unchanged lines hidden (view full) --- 3776 return ldstat; 3777 break; 3778 default: 3779 break; 3780 } 3781 return HPSA_LV_OK; 3782} 3783 | 3323 if (bmic_device_index == 0xFF00 || MASKED_DEVICE(&rle->lunid[0])) { 3324 rc = IO_OK; 3325 goto out; 3326 } 3327 3328 bssbp = kzalloc(sizeof(*bssbp), GFP_KERNEL); 3329 if (!bssbp) 3330 goto out; --- 412 unchanged lines hidden (view full) --- 3743 return ldstat; 3744 break; 3745 default: 3746 break; 3747 } 3748 return HPSA_LV_OK; 3749} 3750 |
3784/* 3785 * Find out if a logical device supports aborts by simply trying one. 3786 * Smart Array may claim not to support aborts on logical drives, but 3787 * if a MSA2000 * is connected, the drives on that will be presented 3788 * by the Smart Array as logical drives, and aborts may be sent to 3789 * those devices successfully. So the simplest way to find out is 3790 * to simply try an abort and see how the device responds. 3791 */ 3792static int hpsa_device_supports_aborts(struct ctlr_info *h, 3793 unsigned char *scsi3addr) 3794{ 3795 struct CommandList *c; 3796 struct ErrorInfo *ei; 3797 int rc = 0; 3798 3799 u64 tag = (u64) -1; /* bogus tag */ 3800 3801 /* Assume that physical devices support aborts */ 3802 if (!is_logical_dev_addr_mode(scsi3addr)) 3803 return 1; 3804 3805 c = cmd_alloc(h); 3806 3807 (void) fill_cmd(c, HPSA_ABORT_MSG, h, &tag, 0, 0, scsi3addr, TYPE_MSG); 3808 (void) hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, 3809 DEFAULT_TIMEOUT); 3810 /* no unmap needed here because no data xfer. */ 3811 ei = c->err_info; 3812 switch (ei->CommandStatus) { 3813 case CMD_INVALID: 3814 rc = 0; 3815 break; 3816 case CMD_UNABORTABLE: 3817 case CMD_ABORT_FAILED: 3818 rc = 1; 3819 break; 3820 case CMD_TMF_STATUS: 3821 rc = hpsa_evaluate_tmf_status(h, c); 3822 break; 3823 default: 3824 rc = 0; 3825 break; 3826 } 3827 cmd_free(h, c); 3828 return rc; 3829} 3830 | |
3831static int hpsa_update_device_info(struct ctlr_info *h, 3832 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device, 3833 unsigned char *is_OBDR_device) 3834{ 3835 3836#define OBDR_SIG_OFFSET 43 3837#define OBDR_TAPE_SIG "$DR-10" 3838#define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1) --- 63 unchanged lines hidden (view full) --- 3902 this_device->offload_config = 0; 3903 this_device->offload_enabled = 0; 3904 this_device->offload_to_be_enabled = 0; 3905 this_device->hba_ioaccel_enabled = 0; 3906 this_device->volume_offline = 0; 3907 this_device->queue_depth = h->nr_cmds; 3908 } 3909 | 3751static int hpsa_update_device_info(struct ctlr_info *h, 3752 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device, 3753 unsigned char *is_OBDR_device) 3754{ 3755 3756#define OBDR_SIG_OFFSET 43 3757#define OBDR_TAPE_SIG "$DR-10" 3758#define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1) --- 63 unchanged lines hidden (view full) --- 3822 this_device->offload_config = 0; 3823 this_device->offload_enabled = 0; 3824 this_device->offload_to_be_enabled = 0; 3825 this_device->hba_ioaccel_enabled = 0; 3826 this_device->volume_offline = 0; 3827 this_device->queue_depth = h->nr_cmds; 3828 } 3829 |
3830 if (this_device->external) 3831 this_device->queue_depth = EXTERNAL_QD; 3832 |
|
3910 if (is_OBDR_device) { 3911 /* See if this is a One-Button-Disaster-Recovery device 3912 * by looking for "$DR-10" at offset 43 in inquiry data. 3913 */ 3914 obdr_sig = &inq_buff[OBDR_SIG_OFFSET]; 3915 *is_OBDR_device = (this_device->devtype == TYPE_ROM && 3916 strncmp(obdr_sig, OBDR_TAPE_SIG, 3917 OBDR_SIG_LEN) == 0); 3918 } 3919 kfree(inq_buff); 3920 return 0; 3921 3922bail_out: 3923 kfree(inq_buff); 3924 return rc; 3925} 3926 | 3833 if (is_OBDR_device) { 3834 /* See if this is a One-Button-Disaster-Recovery device 3835 * by looking for "$DR-10" at offset 43 in inquiry data. 3836 */ 3837 obdr_sig = &inq_buff[OBDR_SIG_OFFSET]; 3838 *is_OBDR_device = (this_device->devtype == TYPE_ROM && 3839 strncmp(obdr_sig, OBDR_TAPE_SIG, 3840 OBDR_SIG_LEN) == 0); 3841 } 3842 kfree(inq_buff); 3843 return 0; 3844 3845bail_out: 3846 kfree(inq_buff); 3847 return rc; 3848} 3849 |
3927static void hpsa_update_device_supports_aborts(struct ctlr_info *h, 3928 struct hpsa_scsi_dev_t *dev, u8 *scsi3addr) 3929{ 3930 unsigned long flags; 3931 int rc, entry; 3932 /* 3933 * See if this device supports aborts. If we already know 3934 * the device, we already know if it supports aborts, otherwise 3935 * we have to find out if it supports aborts by trying one. 3936 */ 3937 spin_lock_irqsave(&h->devlock, flags); 3938 rc = hpsa_scsi_find_entry(dev, h->dev, h->ndevices, &entry); 3939 if ((rc == DEVICE_SAME || rc == DEVICE_UPDATED) && 3940 entry >= 0 && entry < h->ndevices) { 3941 dev->supports_aborts = h->dev[entry]->supports_aborts; 3942 spin_unlock_irqrestore(&h->devlock, flags); 3943 } else { 3944 spin_unlock_irqrestore(&h->devlock, flags); 3945 dev->supports_aborts = 3946 hpsa_device_supports_aborts(h, scsi3addr); 3947 if (dev->supports_aborts < 0) 3948 dev->supports_aborts = 0; 3949 } 3950} 3951 | |
3952/* 3953 * Helper function to assign bus, target, lun mapping of devices. 3954 * Logical drive target and lun are assigned at this time, but 3955 * physical device lun and target assignment are deferred (assigned 3956 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.) 3957*/ 3958static void figure_bus_target_lun(struct ctlr_info *h, 3959 u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device) --- 21 unchanged lines hidden (view full) --- 3981 HPSA_EXTERNAL_RAID_VOLUME_BUS, (lunid >> 16) & 0x3fff, 3982 lunid & 0x00ff); 3983 return; 3984 } 3985 hpsa_set_bus_target_lun(device, HPSA_RAID_VOLUME_BUS, 3986 0, lunid & 0x3fff); 3987} 3988 | 3850/* 3851 * Helper function to assign bus, target, lun mapping of devices. 3852 * Logical drive target and lun are assigned at this time, but 3853 * physical device lun and target assignment are deferred (assigned 3854 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.) 3855*/ 3856static void figure_bus_target_lun(struct ctlr_info *h, 3857 u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device) --- 21 unchanged lines hidden (view full) --- 3879 HPSA_EXTERNAL_RAID_VOLUME_BUS, (lunid >> 16) & 0x3fff, 3880 lunid & 0x00ff); 3881 return; 3882 } 3883 hpsa_set_bus_target_lun(device, HPSA_RAID_VOLUME_BUS, 3884 0, lunid & 0x3fff); 3885} 3886 |
3989 3990/* 3991 * Get address of physical disk used for an ioaccel2 mode command: 3992 * 1. Extract ioaccel2 handle from the command. 3993 * 2. Find a matching ioaccel2 handle from list of physical disks. 3994 * 3. Return: 3995 * 1 and set scsi3addr to address of matching physical 3996 * 0 if no matching physical disk was found. 3997 */ 3998static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h, 3999 struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr) 4000{ 4001 struct io_accel2_cmd *c2 = 4002 &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex]; 4003 unsigned long flags; 4004 int i; 4005 4006 spin_lock_irqsave(&h->devlock, flags); 4007 for (i = 0; i < h->ndevices; i++) 4008 if (h->dev[i]->ioaccel_handle == le32_to_cpu(c2->scsi_nexus)) { 4009 memcpy(scsi3addr, h->dev[i]->scsi3addr, 4010 sizeof(h->dev[i]->scsi3addr)); 4011 spin_unlock_irqrestore(&h->devlock, flags); 4012 return 1; 4013 } 4014 spin_unlock_irqrestore(&h->devlock, flags); 4015 return 0; 4016} 4017 | |
4018static int figure_external_status(struct ctlr_info *h, int raid_ctlr_position, 4019 int i, int nphysicals, int nlocal_logicals) 4020{ 4021 /* In report logicals, local logicals are listed first, 4022 * then any externals. 4023 */ 4024 int logicals_start = nphysicals + (raid_ctlr_position == 0); 4025 --- 84 unchanged lines hidden (view full) --- 4110static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h, 4111 struct hpsa_scsi_dev_t *dev, 4112 struct ReportExtendedLUNdata *rlep, int rle_index, 4113 struct bmic_identify_physical_device *id_phys) 4114{ 4115 int rc; 4116 struct ext_report_lun_entry *rle; 4117 | 3887static int figure_external_status(struct ctlr_info *h, int raid_ctlr_position, 3888 int i, int nphysicals, int nlocal_logicals) 3889{ 3890 /* In report logicals, local logicals are listed first, 3891 * then any externals. 3892 */ 3893 int logicals_start = nphysicals + (raid_ctlr_position == 0); 3894 --- 84 unchanged lines hidden (view full) --- 3979static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h, 3980 struct hpsa_scsi_dev_t *dev, 3981 struct ReportExtendedLUNdata *rlep, int rle_index, 3982 struct bmic_identify_physical_device *id_phys) 3983{ 3984 int rc; 3985 struct ext_report_lun_entry *rle; 3986 |
4118 /* 4119 * external targets don't support BMIC 4120 */ 4121 if (dev->external) { 4122 dev->queue_depth = 7; 4123 return; 4124 } 4125 | |
4126 rle = &rlep->LUN[rle_index]; 4127 4128 dev->ioaccel_handle = rle->ioaccel_handle; 4129 if ((rle->device_flags & 0x08) && dev->ioaccel_handle) 4130 dev->hba_ioaccel_enabled = 1; 4131 memset(id_phys, 0, sizeof(*id_phys)); 4132 rc = hpsa_bmic_id_physical_device(h, &rle->lunid[0], 4133 GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]), id_phys, --- 248 unchanged lines hidden (view full) --- 4382 goto out; 4383 } 4384 if (rc) { 4385 h->drv_req_rescan = 1; 4386 continue; 4387 } 4388 4389 figure_bus_target_lun(h, lunaddrbytes, tmpdevice); | 3987 rle = &rlep->LUN[rle_index]; 3988 3989 dev->ioaccel_handle = rle->ioaccel_handle; 3990 if ((rle->device_flags & 0x08) && dev->ioaccel_handle) 3991 dev->hba_ioaccel_enabled = 1; 3992 memset(id_phys, 0, sizeof(*id_phys)); 3993 rc = hpsa_bmic_id_physical_device(h, &rle->lunid[0], 3994 GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]), id_phys, --- 248 unchanged lines hidden (view full) --- 4243 goto out; 4244 } 4245 if (rc) { 4246 h->drv_req_rescan = 1; 4247 continue; 4248 } 4249 4250 figure_bus_target_lun(h, lunaddrbytes, tmpdevice); |
4390 hpsa_update_device_supports_aborts(h, tmpdevice, lunaddrbytes); | |
4391 this_device = currentsd[ncurrent]; 4392 4393 /* Turn on discovery_polling if there are ext target devices. 4394 * Event-based change notification is unreliable for those. 4395 */ 4396 if (!h->discovery_polling) { 4397 if (tmpdevice->external) { 4398 h->discovery_polling = 1; --- 180 unchanged lines hidden (view full) --- 4579 4580sglist_finished: 4581 4582 cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */ 4583 cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in cmd list */ 4584 return 0; 4585} 4586 | 4251 this_device = currentsd[ncurrent]; 4252 4253 /* Turn on discovery_polling if there are ext target devices. 4254 * Event-based change notification is unreliable for those. 4255 */ 4256 if (!h->discovery_polling) { 4257 if (tmpdevice->external) { 4258 h->discovery_polling = 1; --- 180 unchanged lines hidden (view full) --- 4439 4440sglist_finished: 4441 4442 cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */ 4443 cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in cmd list */ 4444 return 0; 4445} 4446 |
4587#define IO_ACCEL_INELIGIBLE (1) | 4447#define BUFLEN 128 4448static inline void warn_zero_length_transfer(struct ctlr_info *h, 4449 u8 *cdb, int cdb_len, 4450 const char *func) 4451{ 4452 char buf[BUFLEN]; 4453 int outlen; 4454 int i; 4455 4456 outlen = scnprintf(buf, BUFLEN, 4457 "%s: Blocking zero-length request: CDB:", func); 4458 for (i = 0; i < cdb_len; i++) 4459 outlen += scnprintf(buf+outlen, BUFLEN - outlen, 4460 "%02hhx", cdb[i]); 4461 dev_warn(&h->pdev->dev, "%s\n", buf); 4462} 4463 4464#define IO_ACCEL_INELIGIBLE 1 4465/* zero-length transfers trigger hardware errors. */ 4466static bool is_zero_length_transfer(u8 *cdb) 4467{ 4468 u32 block_cnt; 4469 4470 /* Block zero-length transfer sizes on certain commands. */ 4471 switch (cdb[0]) { 4472 case READ_10: 4473 case WRITE_10: 4474 case VERIFY: /* 0x2F */ 4475 case WRITE_VERIFY: /* 0x2E */ 4476 block_cnt = get_unaligned_be16(&cdb[7]); 4477 break; 4478 case READ_12: 4479 case WRITE_12: 4480 case VERIFY_12: /* 0xAF */ 4481 case WRITE_VERIFY_12: /* 0xAE */ 4482 block_cnt = get_unaligned_be32(&cdb[6]); 4483 break; 4484 case READ_16: 4485 case WRITE_16: 4486 case VERIFY_16: /* 0x8F */ 4487 block_cnt = get_unaligned_be32(&cdb[10]); 4488 break; 4489 default: 4490 return false; 4491 } 4492 4493 return block_cnt == 0; 4494} 4495 |
4588static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len) 4589{ 4590 int is_write = 0; 4591 u32 block; 4592 u32 block_cnt; 4593 4594 /* Perform some CDB fixups if needed using 10 byte reads/writes only */ 4595 switch (cdb[0]) { --- 50 unchanged lines hidden (view full) --- 4646 /* TODO: implement chaining support */ 4647 if (scsi_sg_count(cmd) > h->ioaccel_maxsg) { 4648 atomic_dec(&phys_disk->ioaccel_cmds_out); 4649 return IO_ACCEL_INELIGIBLE; 4650 } 4651 4652 BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX); 4653 | 4496static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len) 4497{ 4498 int is_write = 0; 4499 u32 block; 4500 u32 block_cnt; 4501 4502 /* Perform some CDB fixups if needed using 10 byte reads/writes only */ 4503 switch (cdb[0]) { --- 50 unchanged lines hidden (view full) --- 4554 /* TODO: implement chaining support */ 4555 if (scsi_sg_count(cmd) > h->ioaccel_maxsg) { 4556 atomic_dec(&phys_disk->ioaccel_cmds_out); 4557 return IO_ACCEL_INELIGIBLE; 4558 } 4559 4560 BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX); 4561 |
4562 if (is_zero_length_transfer(cdb)) { 4563 warn_zero_length_transfer(h, cdb, cdb_len, __func__); 4564 atomic_dec(&phys_disk->ioaccel_cmds_out); 4565 return IO_ACCEL_INELIGIBLE; 4566 } 4567 |
|
4654 if (fixup_ioaccel_cdb(cdb, &cdb_len)) { 4655 atomic_dec(&phys_disk->ioaccel_cmds_out); 4656 return IO_ACCEL_INELIGIBLE; 4657 } 4658 4659 c->cmd_type = CMD_IOACCEL1; 4660 4661 /* Adjust the DMA address to point to the accelerated command buffer */ --- 148 unchanged lines hidden (view full) --- 4810 if (!cmd->device) 4811 return -1; 4812 4813 if (!cmd->device->hostdata) 4814 return -1; 4815 4816 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries); 4817 | 4568 if (fixup_ioaccel_cdb(cdb, &cdb_len)) { 4569 atomic_dec(&phys_disk->ioaccel_cmds_out); 4570 return IO_ACCEL_INELIGIBLE; 4571 } 4572 4573 c->cmd_type = CMD_IOACCEL1; 4574 4575 /* Adjust the DMA address to point to the accelerated command buffer */ --- 148 unchanged lines hidden (view full) --- 4724 if (!cmd->device) 4725 return -1; 4726 4727 if (!cmd->device->hostdata) 4728 return -1; 4729 4730 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries); 4731 |
4732 if (is_zero_length_transfer(cdb)) { 4733 warn_zero_length_transfer(h, cdb, cdb_len, __func__); 4734 atomic_dec(&phys_disk->ioaccel_cmds_out); 4735 return IO_ACCEL_INELIGIBLE; 4736 } 4737 |
|
4818 if (fixup_ioaccel_cdb(cdb, &cdb_len)) { 4819 atomic_dec(&phys_disk->ioaccel_cmds_out); 4820 return IO_ACCEL_INELIGIBLE; 4821 } 4822 4823 c->cmd_type = CMD_IOACCEL2; 4824 /* Adjust the DMA address to point to the accelerated command buffer */ 4825 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle + --- 629 unchanged lines hidden (view full) --- 5455 5456 cmd = c->scsi_cmd; 5457 dev = cmd->device->hostdata; 5458 if (!dev) { 5459 cmd->result = DID_NO_CONNECT << 16; 5460 return hpsa_cmd_free_and_done(c->h, c, cmd); 5461 } 5462 if (c->reset_pending) | 4738 if (fixup_ioaccel_cdb(cdb, &cdb_len)) { 4739 atomic_dec(&phys_disk->ioaccel_cmds_out); 4740 return IO_ACCEL_INELIGIBLE; 4741 } 4742 4743 c->cmd_type = CMD_IOACCEL2; 4744 /* Adjust the DMA address to point to the accelerated command buffer */ 4745 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle + --- 629 unchanged lines hidden (view full) --- 5375 5376 cmd = c->scsi_cmd; 5377 dev = cmd->device->hostdata; 5378 if (!dev) { 5379 cmd->result = DID_NO_CONNECT << 16; 5380 return hpsa_cmd_free_and_done(c->h, c, cmd); 5381 } 5382 if (c->reset_pending) |
5463 return hpsa_cmd_resolve_and_free(c->h, c); 5464 if (c->abort_pending) 5465 return hpsa_cmd_abort_and_free(c->h, c, cmd); | 5383 return hpsa_cmd_free_and_done(c->h, c, cmd); |
5466 if (c->cmd_type == CMD_IOACCEL2) { 5467 struct ctlr_info *h = c->h; 5468 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; 5469 int rc; 5470 5471 if (c2->error_data.serv_response == 5472 IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL) { 5473 rc = hpsa_ioaccel_submit(h, c, cmd, dev->scsi3addr); --- 134 unchanged lines hidden (view full) --- 5608 spin_unlock_irqrestore(&h->scan_lock, flags); 5609 5610 if (unlikely(lockup_detected(h))) 5611 return hpsa_scan_complete(h); 5612 5613 /* 5614 * Do the scan after a reset completion 5615 */ | 5384 if (c->cmd_type == CMD_IOACCEL2) { 5385 struct ctlr_info *h = c->h; 5386 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; 5387 int rc; 5388 5389 if (c2->error_data.serv_response == 5390 IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL) { 5391 rc = hpsa_ioaccel_submit(h, c, cmd, dev->scsi3addr); --- 134 unchanged lines hidden (view full) --- 5526 spin_unlock_irqrestore(&h->scan_lock, flags); 5527 5528 if (unlikely(lockup_detected(h))) 5529 return hpsa_scan_complete(h); 5530 5531 /* 5532 * Do the scan after a reset completion 5533 */ |
5534 spin_lock_irqsave(&h->reset_lock, flags); |
|
5616 if (h->reset_in_progress) { 5617 h->drv_req_rescan = 1; | 5535 if (h->reset_in_progress) { 5536 h->drv_req_rescan = 1; |
5537 spin_unlock_irqrestore(&h->reset_lock, flags); 5538 hpsa_scan_complete(h); |
|
5618 return; 5619 } | 5539 return; 5540 } |
5541 spin_unlock_irqrestore(&h->reset_lock, flags); |
|
5620 5621 hpsa_update_scsi_devices(h); 5622 5623 hpsa_scan_complete(h); 5624} 5625 5626static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth) 5627{ --- 195 unchanged lines hidden (view full) --- 5823 return rc; 5824} 5825 5826/* Need at least one of these error handlers to keep ../scsi/hosts.c from 5827 * complaining. Doing a host- or bus-reset can't do anything good here. 5828 */ 5829static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd) 5830{ | 5542 5543 hpsa_update_scsi_devices(h); 5544 5545 hpsa_scan_complete(h); 5546} 5547 5548static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth) 5549{ --- 195 unchanged lines hidden (view full) --- 5745 return rc; 5746} 5747 5748/* Need at least one of these error handlers to keep ../scsi/hosts.c from 5749 * complaining. Doing a host- or bus-reset can't do anything good here. 5750 */ 5751static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd) 5752{ |
5831 int rc; | 5753 int rc = SUCCESS; |
5832 struct ctlr_info *h; 5833 struct hpsa_scsi_dev_t *dev; 5834 u8 reset_type; 5835 char msg[48]; | 5754 struct ctlr_info *h; 5755 struct hpsa_scsi_dev_t *dev; 5756 u8 reset_type; 5757 char msg[48]; |
5758 unsigned long flags; |
|
5836 5837 /* find the controller to which the command to be aborted was sent */ 5838 h = sdev_to_hba(scsicmd->device); 5839 if (h == NULL) /* paranoia */ 5840 return FAILED; 5841 | 5759 5760 /* find the controller to which the command to be aborted was sent */ 5761 h = sdev_to_hba(scsicmd->device); 5762 if (h == NULL) /* paranoia */ 5763 return FAILED; 5764 |
5842 if (lockup_detected(h)) 5843 return FAILED; | 5765 spin_lock_irqsave(&h->reset_lock, flags); 5766 h->reset_in_progress = 1; 5767 spin_unlock_irqrestore(&h->reset_lock, flags); |
5844 | 5768 |
5769 if (lockup_detected(h)) { 5770 rc = FAILED; 5771 goto return_reset_status; 5772 } 5773 |
|
5845 dev = scsicmd->device->hostdata; 5846 if (!dev) { 5847 dev_err(&h->pdev->dev, "%s: device lookup failed\n", __func__); | 5774 dev = scsicmd->device->hostdata; 5775 if (!dev) { 5776 dev_err(&h->pdev->dev, "%s: device lookup failed\n", __func__); |
5848 return FAILED; | 5777 rc = FAILED; 5778 goto return_reset_status; |
5849 } 5850 | 5779 } 5780 |
5781 if (dev->devtype == TYPE_ENCLOSURE) { 5782 rc = SUCCESS; 5783 goto return_reset_status; 5784 } 5785 |
|
5851 /* if controller locked up, we can guarantee command won't complete */ 5852 if (lockup_detected(h)) { 5853 snprintf(msg, sizeof(msg), 5854 "cmd %d RESET FAILED, lockup detected", 5855 hpsa_get_cmd_index(scsicmd)); 5856 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); | 5786 /* if controller locked up, we can guarantee command won't complete */ 5787 if (lockup_detected(h)) { 5788 snprintf(msg, sizeof(msg), 5789 "cmd %d RESET FAILED, lockup detected", 5790 hpsa_get_cmd_index(scsicmd)); 5791 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); |
5857 return FAILED; | 5792 rc = FAILED; 5793 goto return_reset_status; |
5858 } 5859 5860 /* this reset request might be the result of a lockup; check */ 5861 if (detect_controller_lockup(h)) { 5862 snprintf(msg, sizeof(msg), 5863 "cmd %d RESET FAILED, new lockup detected", 5864 hpsa_get_cmd_index(scsicmd)); 5865 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); | 5794 } 5795 5796 /* this reset request might be the result of a lockup; check */ 5797 if (detect_controller_lockup(h)) { 5798 snprintf(msg, sizeof(msg), 5799 "cmd %d RESET FAILED, new lockup detected", 5800 hpsa_get_cmd_index(scsicmd)); 5801 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); |
5866 return FAILED; | 5802 rc = FAILED; 5803 goto return_reset_status; |
5867 } 5868 5869 /* Do not attempt on controller */ | 5804 } 5805 5806 /* Do not attempt on controller */ |
5870 if (is_hba_lunid(dev->scsi3addr)) 5871 return SUCCESS; | 5807 if (is_hba_lunid(dev->scsi3addr)) { 5808 rc = SUCCESS; 5809 goto return_reset_status; 5810 } |
5872 5873 if (is_logical_dev_addr_mode(dev->scsi3addr)) 5874 reset_type = HPSA_DEVICE_RESET_MSG; 5875 else 5876 reset_type = HPSA_PHYS_TARGET_RESET; 5877 5878 sprintf(msg, "resetting %s", 5879 reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical "); 5880 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); 5881 | 5811 5812 if (is_logical_dev_addr_mode(dev->scsi3addr)) 5813 reset_type = HPSA_DEVICE_RESET_MSG; 5814 else 5815 reset_type = HPSA_PHYS_TARGET_RESET; 5816 5817 sprintf(msg, "resetting %s", 5818 reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical "); 5819 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); 5820 |
5882 h->reset_in_progress = 1; 5883 | |
5884 /* send a reset to the SCSI LUN which the command was sent to */ 5885 rc = hpsa_do_reset(h, dev, dev->scsi3addr, reset_type, 5886 DEFAULT_REPLY_QUEUE); | 5821 /* send a reset to the SCSI LUN which the command was sent to */ 5822 rc = hpsa_do_reset(h, dev, dev->scsi3addr, reset_type, 5823 DEFAULT_REPLY_QUEUE); |
5824 if (rc == 0) 5825 rc = SUCCESS; 5826 else 5827 rc = FAILED; 5828 |
|
5887 sprintf(msg, "reset %s %s", 5888 reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ", | 5829 sprintf(msg, "reset %s %s", 5830 reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ", |
5889 rc == 0 ? "completed successfully" : "failed"); | 5831 rc == SUCCESS ? "completed successfully" : "failed"); |
5890 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); | 5832 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg); |
5891 h->reset_in_progress = 0; 5892 return rc == 0 ? SUCCESS : FAILED; 5893} | |
5894 | 5833 |
5895static void swizzle_abort_tag(u8 *tag) 5896{ 5897 u8 original_tag[8]; 5898 5899 memcpy(original_tag, tag, 8); 5900 tag[0] = original_tag[3]; 5901 tag[1] = original_tag[2]; 5902 tag[2] = original_tag[1]; 5903 tag[3] = original_tag[0]; 5904 tag[4] = original_tag[7]; 5905 tag[5] = original_tag[6]; 5906 tag[6] = original_tag[5]; 5907 tag[7] = original_tag[4]; 5908} 5909 5910static void hpsa_get_tag(struct ctlr_info *h, 5911 struct CommandList *c, __le32 *taglower, __le32 *tagupper) 5912{ 5913 u64 tag; 5914 if (c->cmd_type == CMD_IOACCEL1) { 5915 struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *) 5916 &h->ioaccel_cmd_pool[c->cmdindex]; 5917 tag = le64_to_cpu(cm1->tag); 5918 *tagupper = cpu_to_le32(tag >> 32); 5919 *taglower = cpu_to_le32(tag); 5920 return; 5921 } 5922 if (c->cmd_type == CMD_IOACCEL2) { 5923 struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *) 5924 &h->ioaccel2_cmd_pool[c->cmdindex]; 5925 /* upper tag not used in ioaccel2 mode */ 5926 memset(tagupper, 0, sizeof(*tagupper)); 5927 *taglower = cm2->Tag; 5928 return; 5929 } 5930 tag = le64_to_cpu(c->Header.tag); 5931 *tagupper = cpu_to_le32(tag >> 32); 5932 *taglower = cpu_to_le32(tag); 5933} 5934 5935static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr, 5936 struct CommandList *abort, int reply_queue) 5937{ 5938 int rc = IO_OK; 5939 struct CommandList *c; 5940 struct ErrorInfo *ei; 5941 __le32 tagupper, taglower; 5942 5943 c = cmd_alloc(h); 5944 5945 /* fill_cmd can't fail here, no buffer to map */ 5946 (void) fill_cmd(c, HPSA_ABORT_MSG, h, &abort->Header.tag, 5947 0, 0, scsi3addr, TYPE_MSG); 5948 if (h->needs_abort_tags_swizzled) 5949 swizzle_abort_tag(&c->Request.CDB[4]); 5950 (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT); 5951 hpsa_get_tag(h, abort, &taglower, &tagupper); 5952 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd(abort) completed.\n", 5953 __func__, tagupper, taglower); 5954 /* no unmap needed here because no data xfer. */ 5955 5956 ei = c->err_info; 5957 switch (ei->CommandStatus) { 5958 case CMD_SUCCESS: 5959 break; 5960 case CMD_TMF_STATUS: 5961 rc = hpsa_evaluate_tmf_status(h, c); 5962 break; 5963 case CMD_UNABORTABLE: /* Very common, don't make noise. */ 5964 rc = -1; 5965 break; 5966 default: 5967 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n", 5968 __func__, tagupper, taglower); 5969 hpsa_scsi_interpret_error(h, c); 5970 rc = -1; 5971 break; 5972 } 5973 cmd_free(h, c); 5974 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", 5975 __func__, tagupper, taglower); | 5834return_reset_status: 5835 spin_lock_irqsave(&h->reset_lock, flags); 5836 h->reset_in_progress = 0; 5837 spin_unlock_irqrestore(&h->reset_lock, flags); |
5976 return rc; 5977} 5978 | 5838 return rc; 5839} 5840 |
5979static void setup_ioaccel2_abort_cmd(struct CommandList *c, struct ctlr_info *h, 5980 struct CommandList *command_to_abort, int reply_queue) 5981{ 5982 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; 5983 struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2; 5984 struct io_accel2_cmd *c2a = 5985 &h->ioaccel2_cmd_pool[command_to_abort->cmdindex]; 5986 struct scsi_cmnd *scmd = command_to_abort->scsi_cmd; 5987 struct hpsa_scsi_dev_t *dev = scmd->device->hostdata; 5988 5989 if (!dev) 5990 return; 5991 5992 /* 5993 * We're overlaying struct hpsa_tmf_struct on top of something which 5994 * was allocated as a struct io_accel2_cmd, so we better be sure it 5995 * actually fits, and doesn't overrun the error info space. 5996 */ 5997 BUILD_BUG_ON(sizeof(struct hpsa_tmf_struct) > 5998 sizeof(struct io_accel2_cmd)); 5999 BUG_ON(offsetof(struct io_accel2_cmd, error_data) < 6000 offsetof(struct hpsa_tmf_struct, error_len) + 6001 sizeof(ac->error_len)); 6002 6003 c->cmd_type = IOACCEL2_TMF; 6004 c->scsi_cmd = SCSI_CMD_BUSY; 6005 6006 /* Adjust the DMA address to point to the accelerated command buffer */ 6007 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle + 6008 (c->cmdindex * sizeof(struct io_accel2_cmd)); 6009 BUG_ON(c->busaddr & 0x0000007F); 6010 6011 memset(ac, 0, sizeof(*c2)); /* yes this is correct */ 6012 ac->iu_type = IOACCEL2_IU_TMF_TYPE; 6013 ac->reply_queue = reply_queue; 6014 ac->tmf = IOACCEL2_TMF_ABORT; 6015 ac->it_nexus = cpu_to_le32(dev->ioaccel_handle); 6016 memset(ac->lun_id, 0, sizeof(ac->lun_id)); 6017 ac->tag = cpu_to_le64(c->cmdindex << DIRECT_LOOKUP_SHIFT); 6018 ac->abort_tag = cpu_to_le64(le32_to_cpu(c2a->Tag)); 6019 ac->error_ptr = cpu_to_le64(c->busaddr + 6020 offsetof(struct io_accel2_cmd, error_data)); 6021 ac->error_len = cpu_to_le32(sizeof(c2->error_data)); 6022} 6023 6024/* ioaccel2 path firmware cannot handle abort task requests. 6025 * Change abort requests to physical target reset, and send to the 6026 * address of the physical disk used for the ioaccel 2 command. 6027 * Return 0 on success (IO_OK) 6028 * -1 on failure 6029 */ 6030 6031static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h, 6032 unsigned char *scsi3addr, struct CommandList *abort, int reply_queue) 6033{ 6034 int rc = IO_OK; 6035 struct scsi_cmnd *scmd; /* scsi command within request being aborted */ 6036 struct hpsa_scsi_dev_t *dev; /* device to which scsi cmd was sent */ 6037 unsigned char phys_scsi3addr[8]; /* addr of phys disk with volume */ 6038 unsigned char *psa = &phys_scsi3addr[0]; 6039 6040 /* Get a pointer to the hpsa logical device. */ 6041 scmd = abort->scsi_cmd; 6042 dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata); 6043 if (dev == NULL) { 6044 dev_warn(&h->pdev->dev, 6045 "Cannot abort: no device pointer for command.\n"); 6046 return -1; /* not abortable */ 6047 } 6048 6049 if (h->raid_offload_debug > 0) 6050 dev_info(&h->pdev->dev, 6051 "scsi %d:%d:%d:%d %s scsi3addr 0x%8phN\n", 6052 h->scsi_host->host_no, dev->bus, dev->target, dev->lun, 6053 "Reset as abort", scsi3addr); 6054 6055 if (!dev->offload_enabled) { 6056 dev_warn(&h->pdev->dev, 6057 "Can't abort: device is not operating in HP SSD Smart Path mode.\n"); 6058 return -1; /* not abortable */ 6059 } 6060 6061 /* Incoming scsi3addr is logical addr. We need physical disk addr. */ 6062 if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) { 6063 dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n"); 6064 return -1; /* not abortable */ 6065 } 6066 6067 /* send the reset */ 6068 if (h->raid_offload_debug > 0) 6069 dev_info(&h->pdev->dev, 6070 "Reset as abort: Resetting physical device at scsi3addr 0x%8phN\n", 6071 psa); 6072 rc = hpsa_do_reset(h, dev, psa, HPSA_PHYS_TARGET_RESET, reply_queue); 6073 if (rc != 0) { 6074 dev_warn(&h->pdev->dev, 6075 "Reset as abort: Failed on physical device at scsi3addr 0x%8phN\n", 6076 psa); 6077 return rc; /* failed to reset */ 6078 } 6079 6080 /* wait for device to recover */ 6081 if (wait_for_device_to_become_ready(h, psa, reply_queue) != 0) { 6082 dev_warn(&h->pdev->dev, 6083 "Reset as abort: Failed: Device never recovered from reset: 0x%8phN\n", 6084 psa); 6085 return -1; /* failed to recover */ 6086 } 6087 6088 /* device recovered */ 6089 dev_info(&h->pdev->dev, 6090 "Reset as abort: Device recovered from reset: scsi3addr 0x%8phN\n", 6091 psa); 6092 6093 return rc; /* success */ 6094} 6095 6096static int hpsa_send_abort_ioaccel2(struct ctlr_info *h, 6097 struct CommandList *abort, int reply_queue) 6098{ 6099 int rc = IO_OK; 6100 struct CommandList *c; 6101 __le32 taglower, tagupper; 6102 struct hpsa_scsi_dev_t *dev; 6103 struct io_accel2_cmd *c2; 6104 6105 dev = abort->scsi_cmd->device->hostdata; 6106 if (!dev) 6107 return -1; 6108 6109 if (!dev->offload_enabled && !dev->hba_ioaccel_enabled) 6110 return -1; 6111 6112 c = cmd_alloc(h); 6113 setup_ioaccel2_abort_cmd(c, h, abort, reply_queue); 6114 c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; 6115 (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT); 6116 hpsa_get_tag(h, abort, &taglower, &tagupper); 6117 dev_dbg(&h->pdev->dev, 6118 "%s: Tag:0x%08x:%08x: do_simple_cmd(ioaccel2 abort) completed.\n", 6119 __func__, tagupper, taglower); 6120 /* no unmap needed here because no data xfer. */ 6121 6122 dev_dbg(&h->pdev->dev, 6123 "%s: Tag:0x%08x:%08x: abort service response = 0x%02x.\n", 6124 __func__, tagupper, taglower, c2->error_data.serv_response); 6125 switch (c2->error_data.serv_response) { 6126 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE: 6127 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS: 6128 rc = 0; 6129 break; 6130 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED: 6131 case IOACCEL2_SERV_RESPONSE_FAILURE: 6132 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN: 6133 rc = -1; 6134 break; 6135 default: 6136 dev_warn(&h->pdev->dev, 6137 "%s: Tag:0x%08x:%08x: unknown abort service response 0x%02x\n", 6138 __func__, tagupper, taglower, 6139 c2->error_data.serv_response); 6140 rc = -1; 6141 } 6142 cmd_free(h, c); 6143 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", __func__, 6144 tagupper, taglower); 6145 return rc; 6146} 6147 6148static int hpsa_send_abort_both_ways(struct ctlr_info *h, 6149 struct hpsa_scsi_dev_t *dev, struct CommandList *abort, int reply_queue) 6150{ 6151 /* 6152 * ioccelerator mode 2 commands should be aborted via the 6153 * accelerated path, since RAID path is unaware of these commands, 6154 * but not all underlying firmware can handle abort TMF. 6155 * Change abort to physical device reset when abort TMF is unsupported. 6156 */ 6157 if (abort->cmd_type == CMD_IOACCEL2) { 6158 if ((HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags) || 6159 dev->physical_device) 6160 return hpsa_send_abort_ioaccel2(h, abort, 6161 reply_queue); 6162 else 6163 return hpsa_send_reset_as_abort_ioaccel2(h, 6164 dev->scsi3addr, 6165 abort, reply_queue); 6166 } 6167 return hpsa_send_abort(h, dev->scsi3addr, abort, reply_queue); 6168} 6169 6170/* Find out which reply queue a command was meant to return on */ 6171static int hpsa_extract_reply_queue(struct ctlr_info *h, 6172 struct CommandList *c) 6173{ 6174 if (c->cmd_type == CMD_IOACCEL2) 6175 return h->ioaccel2_cmd_pool[c->cmdindex].reply_queue; 6176 return c->Header.ReplyQueue; 6177} 6178 | |
6179/* | 5841/* |
6180 * Limit concurrency of abort commands to prevent 6181 * over-subscription of commands 6182 */ 6183static inline int wait_for_available_abort_cmd(struct ctlr_info *h) 6184{ 6185#define ABORT_CMD_WAIT_MSECS 5000 6186 return !wait_event_timeout(h->abort_cmd_wait_queue, 6187 atomic_dec_if_positive(&h->abort_cmds_available) >= 0, 6188 msecs_to_jiffies(ABORT_CMD_WAIT_MSECS)); 6189} 6190 6191/* Send an abort for the specified command. 6192 * If the device and controller support it, 6193 * send a task abort request. 6194 */ 6195static int hpsa_eh_abort_handler(struct scsi_cmnd *sc) 6196{ 6197 6198 int rc; 6199 struct ctlr_info *h; 6200 struct hpsa_scsi_dev_t *dev; 6201 struct CommandList *abort; /* pointer to command to be aborted */ 6202 struct scsi_cmnd *as; /* ptr to scsi cmd inside aborted command. */ 6203 char msg[256]; /* For debug messaging. */ 6204 int ml = 0; 6205 __le32 tagupper, taglower; 6206 int refcount, reply_queue; 6207 6208 if (sc == NULL) 6209 return FAILED; 6210 6211 if (sc->device == NULL) 6212 return FAILED; 6213 6214 /* Find the controller of the command to be aborted */ 6215 h = sdev_to_hba(sc->device); 6216 if (h == NULL) 6217 return FAILED; 6218 6219 /* Find the device of the command to be aborted */ 6220 dev = sc->device->hostdata; 6221 if (!dev) { 6222 dev_err(&h->pdev->dev, "%s FAILED, Device lookup failed.\n", 6223 msg); 6224 return FAILED; 6225 } 6226 6227 /* If controller locked up, we can guarantee command won't complete */ 6228 if (lockup_detected(h)) { 6229 hpsa_show_dev_msg(KERN_WARNING, h, dev, 6230 "ABORT FAILED, lockup detected"); 6231 return FAILED; 6232 } 6233 6234 /* This is a good time to check if controller lockup has occurred */ 6235 if (detect_controller_lockup(h)) { 6236 hpsa_show_dev_msg(KERN_WARNING, h, dev, 6237 "ABORT FAILED, new lockup detected"); 6238 return FAILED; 6239 } 6240 6241 /* Check that controller supports some kind of task abort */ 6242 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) && 6243 !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags)) 6244 return FAILED; 6245 6246 memset(msg, 0, sizeof(msg)); 6247 ml += sprintf(msg+ml, "scsi %d:%d:%d:%llu %s %p", 6248 h->scsi_host->host_no, sc->device->channel, 6249 sc->device->id, sc->device->lun, 6250 "Aborting command", sc); 6251 6252 /* Get SCSI command to be aborted */ 6253 abort = (struct CommandList *) sc->host_scribble; 6254 if (abort == NULL) { 6255 /* This can happen if the command already completed. */ 6256 return SUCCESS; 6257 } 6258 refcount = atomic_inc_return(&abort->refcount); 6259 if (refcount == 1) { /* Command is done already. */ 6260 cmd_free(h, abort); 6261 return SUCCESS; 6262 } 6263 6264 /* Don't bother trying the abort if we know it won't work. */ 6265 if (abort->cmd_type != CMD_IOACCEL2 && 6266 abort->cmd_type != CMD_IOACCEL1 && !dev->supports_aborts) { 6267 cmd_free(h, abort); 6268 return FAILED; 6269 } 6270 6271 /* 6272 * Check that we're aborting the right command. 6273 * It's possible the CommandList already completed and got re-used. 6274 */ 6275 if (abort->scsi_cmd != sc) { 6276 cmd_free(h, abort); 6277 return SUCCESS; 6278 } 6279 6280 abort->abort_pending = true; 6281 hpsa_get_tag(h, abort, &taglower, &tagupper); 6282 reply_queue = hpsa_extract_reply_queue(h, abort); 6283 ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower); 6284 as = abort->scsi_cmd; 6285 if (as != NULL) 6286 ml += sprintf(msg+ml, 6287 "CDBLen: %d CDB: 0x%02x%02x... SN: 0x%lx ", 6288 as->cmd_len, as->cmnd[0], as->cmnd[1], 6289 as->serial_number); 6290 dev_warn(&h->pdev->dev, "%s BEING SENT\n", msg); 6291 hpsa_show_dev_msg(KERN_WARNING, h, dev, "Aborting command"); 6292 6293 /* 6294 * Command is in flight, or possibly already completed 6295 * by the firmware (but not to the scsi mid layer) but we can't 6296 * distinguish which. Send the abort down. 6297 */ 6298 if (wait_for_available_abort_cmd(h)) { 6299 dev_warn(&h->pdev->dev, 6300 "%s FAILED, timeout waiting for an abort command to become available.\n", 6301 msg); 6302 cmd_free(h, abort); 6303 return FAILED; 6304 } 6305 rc = hpsa_send_abort_both_ways(h, dev, abort, reply_queue); 6306 atomic_inc(&h->abort_cmds_available); 6307 wake_up_all(&h->abort_cmd_wait_queue); 6308 if (rc != 0) { 6309 dev_warn(&h->pdev->dev, "%s SENT, FAILED\n", msg); 6310 hpsa_show_dev_msg(KERN_WARNING, h, dev, 6311 "FAILED to abort command"); 6312 cmd_free(h, abort); 6313 return FAILED; 6314 } 6315 dev_info(&h->pdev->dev, "%s SENT, SUCCESS\n", msg); 6316 wait_event(h->event_sync_wait_queue, 6317 abort->scsi_cmd != sc || lockup_detected(h)); 6318 cmd_free(h, abort); 6319 return !lockup_detected(h) ? SUCCESS : FAILED; 6320} 6321 6322/* | |
6323 * For operations with an associated SCSI command, a command block is allocated 6324 * at init, and managed by cmd_tagged_alloc() and cmd_tagged_free() using the 6325 * block request tag as an index into a table of entries. cmd_tagged_free() is 6326 * the complement, although cmd_free() may be called instead. 6327 */ 6328static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h, 6329 struct scsi_cmnd *scmd) 6330{ --- 28 unchanged lines hidden (view full) --- 6359 hpsa_cmd_partial_init(h, idx, c); 6360 return c; 6361} 6362 6363static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c) 6364{ 6365 /* 6366 * Release our reference to the block. We don't need to do anything | 5842 * For operations with an associated SCSI command, a command block is allocated 5843 * at init, and managed by cmd_tagged_alloc() and cmd_tagged_free() using the 5844 * block request tag as an index into a table of entries. cmd_tagged_free() is 5845 * the complement, although cmd_free() may be called instead. 5846 */ 5847static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h, 5848 struct scsi_cmnd *scmd) 5849{ --- 28 unchanged lines hidden (view full) --- 5878 hpsa_cmd_partial_init(h, idx, c); 5879 return c; 5880} 5881 5882static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c) 5883{ 5884 /* 5885 * Release our reference to the block. We don't need to do anything |
6367 * else to free it, because it is accessed by index. (There's no point 6368 * in checking the result of the decrement, since we cannot guarantee 6369 * that there isn't a concurrent abort which is also accessing it.) | 5886 * else to free it, because it is accessed by index. |
6370 */ 6371 (void)atomic_dec(&c->refcount); 6372} 6373 6374/* 6375 * For operations that cannot sleep, a command block is allocated at init, 6376 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track 6377 * which ones are free or in use. Lock must be held when calling this. --- 522 unchanged lines hidden (view full) --- 6900 return; 6901} 6902 6903static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, 6904 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr, 6905 int cmd_type) 6906{ 6907 int pci_dir = XFER_NONE; | 5887 */ 5888 (void)atomic_dec(&c->refcount); 5889} 5890 5891/* 5892 * For operations that cannot sleep, a command block is allocated at init, 5893 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track 5894 * which ones are free or in use. Lock must be held when calling this. --- 522 unchanged lines hidden (view full) --- 6417 return; 6418} 6419 6420static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, 6421 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr, 6422 int cmd_type) 6423{ 6424 int pci_dir = XFER_NONE; |
6908 u64 tag; /* for commands to be aborted */ | |
6909 6910 c->cmd_type = CMD_IOCTL_PEND; 6911 c->scsi_cmd = SCSI_CMD_BUSY; 6912 c->Header.ReplyQueue = 0; 6913 if (buff != NULL && size > 0) { 6914 c->Header.SGList = 1; 6915 c->Header.SGTotal = cpu_to_le16(1); 6916 } else { --- 167 unchanged lines hidden (view full) --- 7084 c->Request.CDB[1] = HPSA_RESET_TYPE_LUN; 7085 /* If bytes 4-7 are zero, it means reset the */ 7086 /* LunID device */ 7087 c->Request.CDB[4] = 0x00; 7088 c->Request.CDB[5] = 0x00; 7089 c->Request.CDB[6] = 0x00; 7090 c->Request.CDB[7] = 0x00; 7091 break; | 6425 6426 c->cmd_type = CMD_IOCTL_PEND; 6427 c->scsi_cmd = SCSI_CMD_BUSY; 6428 c->Header.ReplyQueue = 0; 6429 if (buff != NULL && size > 0) { 6430 c->Header.SGList = 1; 6431 c->Header.SGTotal = cpu_to_le16(1); 6432 } else { --- 167 unchanged lines hidden (view full) --- 6600 c->Request.CDB[1] = HPSA_RESET_TYPE_LUN; 6601 /* If bytes 4-7 are zero, it means reset the */ 6602 /* LunID device */ 6603 c->Request.CDB[4] = 0x00; 6604 c->Request.CDB[5] = 0x00; 6605 c->Request.CDB[6] = 0x00; 6606 c->Request.CDB[7] = 0x00; 6607 break; |
7092 case HPSA_ABORT_MSG: 7093 memcpy(&tag, buff, sizeof(tag)); 7094 dev_dbg(&h->pdev->dev, 7095 "Abort Tag:0x%016llx using rqst Tag:0x%016llx", 7096 tag, c->Header.tag); 7097 c->Request.CDBLen = 16; 7098 c->Request.type_attr_dir = 7099 TYPE_ATTR_DIR(cmd_type, 7100 ATTR_SIMPLE, XFER_WRITE); 7101 c->Request.Timeout = 0; /* Don't time out */ 7102 c->Request.CDB[0] = HPSA_TASK_MANAGEMENT; 7103 c->Request.CDB[1] = HPSA_TMF_ABORT_TASK; 7104 c->Request.CDB[2] = 0x00; /* reserved */ 7105 c->Request.CDB[3] = 0x00; /* reserved */ 7106 /* Tag to abort goes in CDB[4]-CDB[11] */ 7107 memcpy(&c->Request.CDB[4], &tag, sizeof(tag)); 7108 c->Request.CDB[12] = 0x00; /* reserved */ 7109 c->Request.CDB[13] = 0x00; /* reserved */ 7110 c->Request.CDB[14] = 0x00; /* reserved */ 7111 c->Request.CDB[15] = 0x00; /* reserved */ 7112 break; | |
7113 default: 7114 dev_warn(&h->pdev->dev, "unknown message type %d\n", 7115 cmd); 7116 BUG(); 7117 } 7118 } else { 7119 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type); 7120 BUG(); --- 941 unchanged lines hidden (view full) --- 8062 int prod_index, err; 8063 8064 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id); 8065 if (prod_index < 0) 8066 return prod_index; 8067 h->product_name = products[prod_index].product_name; 8068 h->access = *(products[prod_index].access); 8069 | 6608 default: 6609 dev_warn(&h->pdev->dev, "unknown message type %d\n", 6610 cmd); 6611 BUG(); 6612 } 6613 } else { 6614 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type); 6615 BUG(); --- 941 unchanged lines hidden (view full) --- 7557 int prod_index, err; 7558 7559 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id); 7560 if (prod_index < 0) 7561 return prod_index; 7562 h->product_name = products[prod_index].product_name; 7563 h->access = *(products[prod_index].access); 7564 |
8070 h->needs_abort_tags_swizzled = 8071 ctlr_needs_abort_tags_swizzled(h->board_id); 8072 | |
8073 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S | 8074 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); 8075 8076 err = pci_enable_device(h->pdev); 8077 if (err) { 8078 dev_err(&h->pdev->dev, "failed to enable PCI device\n"); 8079 pci_disable_device(h->pdev); 8080 return err; --- 541 unchanged lines hidden (view full) --- 8622 goto out; 8623 } else 8624 rc = 0; /* no changes detected. */ 8625out: 8626 kfree(logdev); 8627 return rc; 8628} 8629 | 7565 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S | 7566 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); 7567 7568 err = pci_enable_device(h->pdev); 7569 if (err) { 7570 dev_err(&h->pdev->dev, "failed to enable PCI device\n"); 7571 pci_disable_device(h->pdev); 7572 return err; --- 541 unchanged lines hidden (view full) --- 8114 goto out; 8115 } else 8116 rc = 0; /* no changes detected. */ 8117out: 8118 kfree(logdev); 8119 return rc; 8120} 8121 |
8630static void hpsa_rescan_ctlr_worker(struct work_struct *work) | 8122static void hpsa_perform_rescan(struct ctlr_info *h) |
8631{ | 8123{ |
8124 struct Scsi_Host *sh = NULL; |
|
8632 unsigned long flags; | 8125 unsigned long flags; |
8633 struct ctlr_info *h = container_of(to_delayed_work(work), 8634 struct ctlr_info, rescan_ctlr_work); | |
8635 | 8126 |
8636 8637 if (h->remove_in_progress) 8638 return; 8639 | |
8640 /* 8641 * Do the scan after the reset 8642 */ | 8127 /* 8128 * Do the scan after the reset 8129 */ |
8130 spin_lock_irqsave(&h->reset_lock, flags); |
|
8643 if (h->reset_in_progress) { 8644 h->drv_req_rescan = 1; | 8131 if (h->reset_in_progress) { 8132 h->drv_req_rescan = 1; |
8133 spin_unlock_irqrestore(&h->reset_lock, flags); |
|
8645 return; 8646 } | 8134 return; 8135 } |
8136 spin_unlock_irqrestore(&h->reset_lock, flags); |
|
8647 | 8137 |
8648 if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) { 8649 scsi_host_get(h->scsi_host); | 8138 sh = scsi_host_get(h->scsi_host); 8139 if (sh != NULL) { 8140 hpsa_scan_start(sh); 8141 scsi_host_put(sh); 8142 h->drv_req_rescan = 0; 8143 } 8144} 8145 8146/* 8147 * watch for controller events 8148 */ 8149static void hpsa_event_monitor_worker(struct work_struct *work) 8150{ 8151 struct ctlr_info *h = container_of(to_delayed_work(work), 8152 struct ctlr_info, event_monitor_work); 8153 unsigned long flags; 8154 8155 spin_lock_irqsave(&h->lock, flags); 8156 if (h->remove_in_progress) { 8157 spin_unlock_irqrestore(&h->lock, flags); 8158 return; 8159 } 8160 spin_unlock_irqrestore(&h->lock, flags); 8161 8162 if (hpsa_ctlr_needs_rescan(h)) { |
8650 hpsa_ack_ctlr_events(h); | 8163 hpsa_ack_ctlr_events(h); |
8651 hpsa_scan_start(h->scsi_host); 8652 scsi_host_put(h->scsi_host); | 8164 hpsa_perform_rescan(h); 8165 } 8166 8167 spin_lock_irqsave(&h->lock, flags); 8168 if (!h->remove_in_progress) 8169 schedule_delayed_work(&h->event_monitor_work, 8170 HPSA_EVENT_MONITOR_INTERVAL); 8171 spin_unlock_irqrestore(&h->lock, flags); 8172} 8173 8174static void hpsa_rescan_ctlr_worker(struct work_struct *work) 8175{ 8176 unsigned long flags; 8177 struct ctlr_info *h = container_of(to_delayed_work(work), 8178 struct ctlr_info, rescan_ctlr_work); 8179 8180 spin_lock_irqsave(&h->lock, flags); 8181 if (h->remove_in_progress) { 8182 spin_unlock_irqrestore(&h->lock, flags); 8183 return; 8184 } 8185 spin_unlock_irqrestore(&h->lock, flags); 8186 8187 if (h->drv_req_rescan || hpsa_offline_devices_ready(h)) { 8188 hpsa_perform_rescan(h); |
8653 } else if (h->discovery_polling) { 8654 hpsa_disable_rld_caching(h); 8655 if (hpsa_luns_changed(h)) { | 8189 } else if (h->discovery_polling) { 8190 hpsa_disable_rld_caching(h); 8191 if (hpsa_luns_changed(h)) { |
8656 struct Scsi_Host *sh = NULL; 8657 | |
8658 dev_info(&h->pdev->dev, 8659 "driver discovery polling rescan.\n"); | 8192 dev_info(&h->pdev->dev, 8193 "driver discovery polling rescan.\n"); |
8660 sh = scsi_host_get(h->scsi_host); 8661 if (sh != NULL) { 8662 hpsa_scan_start(sh); 8663 scsi_host_put(sh); 8664 } | 8194 hpsa_perform_rescan(h); |
8665 } 8666 } 8667 spin_lock_irqsave(&h->lock, flags); 8668 if (!h->remove_in_progress) 8669 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work, 8670 h->heartbeat_sample_interval); 8671 spin_unlock_irqrestore(&h->lock, flags); 8672} --- 72 unchanged lines hidden (view full) --- 8745 8746 h->pdev = pdev; 8747 8748 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT; 8749 INIT_LIST_HEAD(&h->offline_device_list); 8750 spin_lock_init(&h->lock); 8751 spin_lock_init(&h->offline_device_lock); 8752 spin_lock_init(&h->scan_lock); | 8195 } 8196 } 8197 spin_lock_irqsave(&h->lock, flags); 8198 if (!h->remove_in_progress) 8199 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work, 8200 h->heartbeat_sample_interval); 8201 spin_unlock_irqrestore(&h->lock, flags); 8202} --- 72 unchanged lines hidden (view full) --- 8275 8276 h->pdev = pdev; 8277 8278 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT; 8279 INIT_LIST_HEAD(&h->offline_device_list); 8280 spin_lock_init(&h->lock); 8281 spin_lock_init(&h->offline_device_lock); 8282 spin_lock_init(&h->scan_lock); |
8283 spin_lock_init(&h->reset_lock); |
|
8753 atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS); | 8284 atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS); |
8754 atomic_set(&h->abort_cmds_available, HPSA_CMDS_RESERVED_FOR_ABORTS); | |
8755 8756 /* Allocate and clear per-cpu variable lockup_detected */ 8757 h->lockup_detected = alloc_percpu(u32); 8758 if (!h->lockup_detected) { 8759 dev_err(&h->pdev->dev, "Failed to allocate lockup detector\n"); 8760 rc = -ENOMEM; 8761 goto clean1; /* aer/h */ 8762 } --- 35 unchanged lines hidden (view full) --- 8798 goto clean3; /* shost, pci, lu, aer/h */ 8799 rc = hpsa_alloc_cmd_pool(h); 8800 if (rc) 8801 goto clean4; /* irq, shost, pci, lu, aer/h */ 8802 rc = hpsa_alloc_sg_chain_blocks(h); 8803 if (rc) 8804 goto clean5; /* cmd, irq, shost, pci, lu, aer/h */ 8805 init_waitqueue_head(&h->scan_wait_queue); | 8285 8286 /* Allocate and clear per-cpu variable lockup_detected */ 8287 h->lockup_detected = alloc_percpu(u32); 8288 if (!h->lockup_detected) { 8289 dev_err(&h->pdev->dev, "Failed to allocate lockup detector\n"); 8290 rc = -ENOMEM; 8291 goto clean1; /* aer/h */ 8292 } --- 35 unchanged lines hidden (view full) --- 8328 goto clean3; /* shost, pci, lu, aer/h */ 8329 rc = hpsa_alloc_cmd_pool(h); 8330 if (rc) 8331 goto clean4; /* irq, shost, pci, lu, aer/h */ 8332 rc = hpsa_alloc_sg_chain_blocks(h); 8333 if (rc) 8334 goto clean5; /* cmd, irq, shost, pci, lu, aer/h */ 8335 init_waitqueue_head(&h->scan_wait_queue); |
8806 init_waitqueue_head(&h->abort_cmd_wait_queue); | |
8807 init_waitqueue_head(&h->event_sync_wait_queue); 8808 mutex_init(&h->reset_mutex); 8809 h->scan_finished = 1; /* no scan currently in progress */ 8810 h->scan_waiting = 0; 8811 8812 pci_set_drvdata(pdev, h); 8813 h->ndevices = 0; 8814 --- 106 unchanged lines hidden (view full) --- 8921 /* Monitor the controller for firmware lockups */ 8922 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; 8923 INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker); 8924 schedule_delayed_work(&h->monitor_ctlr_work, 8925 h->heartbeat_sample_interval); 8926 INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker); 8927 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work, 8928 h->heartbeat_sample_interval); | 8336 init_waitqueue_head(&h->event_sync_wait_queue); 8337 mutex_init(&h->reset_mutex); 8338 h->scan_finished = 1; /* no scan currently in progress */ 8339 h->scan_waiting = 0; 8340 8341 pci_set_drvdata(pdev, h); 8342 h->ndevices = 0; 8343 --- 106 unchanged lines hidden (view full) --- 8450 /* Monitor the controller for firmware lockups */ 8451 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; 8452 INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker); 8453 schedule_delayed_work(&h->monitor_ctlr_work, 8454 h->heartbeat_sample_interval); 8455 INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker); 8456 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work, 8457 h->heartbeat_sample_interval); |
8458 INIT_DELAYED_WORK(&h->event_monitor_work, hpsa_event_monitor_worker); 8459 schedule_delayed_work(&h->event_monitor_work, 8460 HPSA_EVENT_MONITOR_INTERVAL); |
|
8929 return 0; 8930 8931clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */ 8932 hpsa_free_performant_mode(h); 8933 h->access.set_intr_mask(h, HPSA_INTR_OFF); 8934clean6: /* sg, cmd, irq, pci, lockup, wq/aer/h */ 8935 hpsa_free_sg_chain_blocks(h); 8936clean5: /* cmd, irq, shost, pci, lu, aer/h */ --- 152 unchanged lines hidden (view full) --- 9089 h = pci_get_drvdata(pdev); 9090 9091 /* Get rid of any controller monitoring work items */ 9092 spin_lock_irqsave(&h->lock, flags); 9093 h->remove_in_progress = 1; 9094 spin_unlock_irqrestore(&h->lock, flags); 9095 cancel_delayed_work_sync(&h->monitor_ctlr_work); 9096 cancel_delayed_work_sync(&h->rescan_ctlr_work); | 8461 return 0; 8462 8463clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */ 8464 hpsa_free_performant_mode(h); 8465 h->access.set_intr_mask(h, HPSA_INTR_OFF); 8466clean6: /* sg, cmd, irq, pci, lockup, wq/aer/h */ 8467 hpsa_free_sg_chain_blocks(h); 8468clean5: /* cmd, irq, shost, pci, lu, aer/h */ --- 152 unchanged lines hidden (view full) --- 8621 h = pci_get_drvdata(pdev); 8622 8623 /* Get rid of any controller monitoring work items */ 8624 spin_lock_irqsave(&h->lock, flags); 8625 h->remove_in_progress = 1; 8626 spin_unlock_irqrestore(&h->lock, flags); 8627 cancel_delayed_work_sync(&h->monitor_ctlr_work); 8628 cancel_delayed_work_sync(&h->rescan_ctlr_work); |
8629 cancel_delayed_work_sync(&h->event_monitor_work); |
|
9097 destroy_workqueue(h->rescan_ctlr_wq); 9098 destroy_workqueue(h->resubmit_wq); 9099 9100 /* 9101 * Call before disabling interrupts. 9102 * scsi_remove_host can trigger I/O operations especially 9103 * when multipath is enabled. There can be SYNCHRONIZE CACHE 9104 * operations which cannot complete and will hang the system. --- 935 unchanged lines hidden --- | 8630 destroy_workqueue(h->rescan_ctlr_wq); 8631 destroy_workqueue(h->resubmit_wq); 8632 8633 /* 8634 * Call before disabling interrupts. 8635 * scsi_remove_host can trigger I/O operations especially 8636 * when multipath is enabled. There can be SYNCHRONIZE CACHE 8637 * operations which cannot complete and will hang the system. --- 935 unchanged lines hidden --- |