hpsa.c (41b3cf08cd5e7915293f3784ab649d48bb142153) hpsa.c (094963dad88c86f8f480c78992df03d916774c18)
1/*
2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *

--- 34 unchanged lines hidden (view full) ---

43#include <scsi/scsi_device.h>
44#include <scsi/scsi_host.h>
45#include <scsi/scsi_tcq.h>
46#include <linux/cciss_ioctl.h>
47#include <linux/string.h>
48#include <linux/bitmap.h>
49#include <linux/atomic.h>
50#include <linux/jiffies.h>
1/*
2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *

--- 34 unchanged lines hidden (view full) ---

43#include <scsi/scsi_device.h>
44#include <scsi/scsi_host.h>
45#include <scsi/scsi_tcq.h>
46#include <linux/cciss_ioctl.h>
47#include <linux/string.h>
48#include <linux/bitmap.h>
49#include <linux/atomic.h>
50#include <linux/jiffies.h>
51#include <linux/percpu.h>
51#include <asm/div64.h>
52#include "hpsa_cmd.h"
53#include "hpsa.h"
54
55/* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
56#define HPSA_DRIVER_VERSION "3.4.4-1"
57#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
58#define HPSA "hpsa"

--- 1927 unchanged lines hidden (view full) ---

1986{
1987 DECLARE_COMPLETION_ONSTACK(wait);
1988
1989 c->waiting = &wait;
1990 enqueue_cmd_and_start_io(h, c);
1991 wait_for_completion(&wait);
1992}
1993
52#include <asm/div64.h>
53#include "hpsa_cmd.h"
54#include "hpsa.h"
55
56/* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
57#define HPSA_DRIVER_VERSION "3.4.4-1"
58#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
59#define HPSA "hpsa"

--- 1927 unchanged lines hidden (view full) ---

1987{
1988 DECLARE_COMPLETION_ONSTACK(wait);
1989
1990 c->waiting = &wait;
1991 enqueue_cmd_and_start_io(h, c);
1992 wait_for_completion(&wait);
1993}
1994
1995static u32 lockup_detected(struct ctlr_info *h)
1996{
1997 int cpu;
1998 u32 rc, *lockup_detected;
1999
2000 cpu = get_cpu();
2001 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
2002 rc = *lockup_detected;
2003 put_cpu();
2004 return rc;
2005}
2006
1994static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h,
1995 struct CommandList *c)
1996{
2007static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h,
2008 struct CommandList *c)
2009{
1997 unsigned long flags;
1998
1999 /* If controller lockup detected, fake a hardware error. */
2010 /* If controller lockup detected, fake a hardware error. */
2000 spin_lock_irqsave(&h->lock, flags);
2001 if (unlikely(h->lockup_detected)) {
2002 spin_unlock_irqrestore(&h->lock, flags);
2011 if (unlikely(lockup_detected(h)))
2003 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
2012 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
2004 } else {
2005 spin_unlock_irqrestore(&h->lock, flags);
2013 else
2006 hpsa_scsi_do_simple_cmd_core(h, c);
2014 hpsa_scsi_do_simple_cmd_core(h, c);
2007 }
2008}
2009
2010#define MAX_DRIVER_CMD_RETRIES 25
2011static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
2012 struct CommandList *c, int data_direction)
2013{
2014 int backoff_time = 10, retry_count = 0;
2015

--- 1950 unchanged lines hidden (view full) ---

3966
3967static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
3968 void (*done)(struct scsi_cmnd *))
3969{
3970 struct ctlr_info *h;
3971 struct hpsa_scsi_dev_t *dev;
3972 unsigned char scsi3addr[8];
3973 struct CommandList *c;
2015}
2016
2017#define MAX_DRIVER_CMD_RETRIES 25
2018static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
2019 struct CommandList *c, int data_direction)
2020{
2021 int backoff_time = 10, retry_count = 0;
2022

--- 1950 unchanged lines hidden (view full) ---

3973
3974static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
3975 void (*done)(struct scsi_cmnd *))
3976{
3977 struct ctlr_info *h;
3978 struct hpsa_scsi_dev_t *dev;
3979 unsigned char scsi3addr[8];
3980 struct CommandList *c;
3974 unsigned long flags;
3975 int rc = 0;
3976
3977 /* Get the ptr to our adapter structure out of cmd->host. */
3978 h = sdev_to_hba(cmd->device);
3979 dev = cmd->device->hostdata;
3980 if (!dev) {
3981 cmd->result = DID_NO_CONNECT << 16;
3982 done(cmd);
3983 return 0;
3984 }
3985 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
3986
3981 int rc = 0;
3982
3983 /* Get the ptr to our adapter structure out of cmd->host. */
3984 h = sdev_to_hba(cmd->device);
3985 dev = cmd->device->hostdata;
3986 if (!dev) {
3987 cmd->result = DID_NO_CONNECT << 16;
3988 done(cmd);
3989 return 0;
3990 }
3991 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
3992
3987 spin_lock_irqsave(&h->lock, flags);
3988 if (unlikely(h->lockup_detected)) {
3989 spin_unlock_irqrestore(&h->lock, flags);
3993 if (unlikely(lockup_detected(h))) {
3990 cmd->result = DID_ERROR << 16;
3991 done(cmd);
3992 return 0;
3993 }
3994 cmd->result = DID_ERROR << 16;
3995 done(cmd);
3996 return 0;
3997 }
3994 spin_unlock_irqrestore(&h->lock, flags);
3995 c = cmd_alloc(h);
3996 if (c == NULL) { /* trouble... */
3997 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
3998 return SCSI_MLQUEUE_HOST_BUSY;
3999 }
4000
4001 /* Fill in the command list header */
4002

--- 95 unchanged lines hidden (view full) ---

4098
4099 /*
4100 * Don't let rescans be initiated on a controller known
4101 * to be locked up. If the controller locks up *during*
4102 * a rescan, that thread is probably hosed, but at least
4103 * we can prevent new rescan threads from piling up on a
4104 * locked up controller.
4105 */
3998 c = cmd_alloc(h);
3999 if (c == NULL) { /* trouble... */
4000 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
4001 return SCSI_MLQUEUE_HOST_BUSY;
4002 }
4003
4004 /* Fill in the command list header */
4005

--- 95 unchanged lines hidden (view full) ---

4101
4102 /*
4103 * Don't let rescans be initiated on a controller known
4104 * to be locked up. If the controller locks up *during*
4105 * a rescan, that thread is probably hosed, but at least
4106 * we can prevent new rescan threads from piling up on a
4107 * locked up controller.
4108 */
4106 spin_lock_irqsave(&h->lock, flags);
4107 if (unlikely(h->lockup_detected)) {
4108 spin_unlock_irqrestore(&h->lock, flags);
4109 if (unlikely(lockup_detected(h))) {
4109 spin_lock_irqsave(&h->scan_lock, flags);
4110 h->scan_finished = 1;
4111 wake_up_all(&h->scan_wait_queue);
4112 spin_unlock_irqrestore(&h->scan_lock, flags);
4113 return 1;
4114 }
4110 spin_lock_irqsave(&h->scan_lock, flags);
4111 h->scan_finished = 1;
4112 wake_up_all(&h->scan_wait_queue);
4113 spin_unlock_irqrestore(&h->scan_lock, flags);
4114 return 1;
4115 }
4115 spin_unlock_irqrestore(&h->lock, flags);
4116 return 0;
4117}
4118
4119static void hpsa_scan_start(struct Scsi_Host *sh)
4120{
4121 struct ctlr_info *h = shost_to_hba(sh);
4122 unsigned long flags;
4123

--- 2639 unchanged lines hidden (view full) ---

6763 /* Mark all outstanding commands as failed and complete them. */
6764 while (!list_empty(list)) {
6765 c = list_entry(list->next, struct CommandList, list);
6766 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
6767 finish_cmd(c);
6768 }
6769}
6770
4116 return 0;
4117}
4118
4119static void hpsa_scan_start(struct Scsi_Host *sh)
4120{
4121 struct ctlr_info *h = shost_to_hba(sh);
4122 unsigned long flags;
4123

--- 2639 unchanged lines hidden (view full) ---

6763 /* Mark all outstanding commands as failed and complete them. */
6764 while (!list_empty(list)) {
6765 c = list_entry(list->next, struct CommandList, list);
6766 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
6767 finish_cmd(c);
6768 }
6769}
6770
6771static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value)
6772{
6773 int i, cpu;
6774
6775 cpu = cpumask_first(cpu_online_mask);
6776 for (i = 0; i < num_online_cpus(); i++) {
6777 u32 *lockup_detected;
6778 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
6779 *lockup_detected = value;
6780 cpu = cpumask_next(cpu, cpu_online_mask);
6781 }
6782 wmb(); /* be sure the per-cpu variables are out to memory */
6783}
6784
6771static void controller_lockup_detected(struct ctlr_info *h)
6772{
6773 unsigned long flags;
6785static void controller_lockup_detected(struct ctlr_info *h)
6786{
6787 unsigned long flags;
6788 u32 lockup_detected;
6774
6775 h->access.set_intr_mask(h, HPSA_INTR_OFF);
6776 spin_lock_irqsave(&h->lock, flags);
6789
6790 h->access.set_intr_mask(h, HPSA_INTR_OFF);
6791 spin_lock_irqsave(&h->lock, flags);
6777 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
6792 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
6793 if (!lockup_detected) {
6794 /* no heartbeat, but controller gave us a zero. */
6795 dev_warn(&h->pdev->dev,
6796 "lockup detected but scratchpad register is zero\n");
6797 lockup_detected = 0xffffffff;
6798 }
6799 set_lockup_detected_for_all_cpus(h, lockup_detected);
6778 spin_unlock_irqrestore(&h->lock, flags);
6779 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n",
6800 spin_unlock_irqrestore(&h->lock, flags);
6801 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n",
6780 h->lockup_detected);
6802 lockup_detected);
6781 pci_disable_device(h->pdev);
6782 spin_lock_irqsave(&h->lock, flags);
6783 fail_all_cmds_on_list(h, &h->cmpQ);
6784 fail_all_cmds_on_list(h, &h->reqQ);
6785 spin_unlock_irqrestore(&h->lock, flags);
6786}
6787
6788static void detect_controller_lockup(struct ctlr_info *h)

--- 118 unchanged lines hidden (view full) ---

6907
6908
6909static void hpsa_monitor_ctlr_worker(struct work_struct *work)
6910{
6911 unsigned long flags;
6912 struct ctlr_info *h = container_of(to_delayed_work(work),
6913 struct ctlr_info, monitor_ctlr_work);
6914 detect_controller_lockup(h);
6803 pci_disable_device(h->pdev);
6804 spin_lock_irqsave(&h->lock, flags);
6805 fail_all_cmds_on_list(h, &h->cmpQ);
6806 fail_all_cmds_on_list(h, &h->reqQ);
6807 spin_unlock_irqrestore(&h->lock, flags);
6808}
6809
6810static void detect_controller_lockup(struct ctlr_info *h)

--- 118 unchanged lines hidden (view full) ---

6929
6930
6931static void hpsa_monitor_ctlr_worker(struct work_struct *work)
6932{
6933 unsigned long flags;
6934 struct ctlr_info *h = container_of(to_delayed_work(work),
6935 struct ctlr_info, monitor_ctlr_work);
6936 detect_controller_lockup(h);
6915 if (h->lockup_detected)
6937 if (lockup_detected(h))
6916 return;
6917
6918 if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
6919 scsi_host_get(h->scsi_host);
6920 h->drv_req_rescan = 0;
6921 hpsa_ack_ctlr_events(h);
6922 hpsa_scan_start(h->scsi_host);
6923 scsi_host_put(h->scsi_host);

--- 47 unchanged lines hidden (view full) ---

6971 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
6972 INIT_LIST_HEAD(&h->cmpQ);
6973 INIT_LIST_HEAD(&h->reqQ);
6974 INIT_LIST_HEAD(&h->offline_device_list);
6975 spin_lock_init(&h->lock);
6976 spin_lock_init(&h->offline_device_lock);
6977 spin_lock_init(&h->scan_lock);
6978 spin_lock_init(&h->passthru_count_lock);
6938 return;
6939
6940 if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
6941 scsi_host_get(h->scsi_host);
6942 h->drv_req_rescan = 0;
6943 hpsa_ack_ctlr_events(h);
6944 hpsa_scan_start(h->scsi_host);
6945 scsi_host_put(h->scsi_host);

--- 47 unchanged lines hidden (view full) ---

6993 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
6994 INIT_LIST_HEAD(&h->cmpQ);
6995 INIT_LIST_HEAD(&h->reqQ);
6996 INIT_LIST_HEAD(&h->offline_device_list);
6997 spin_lock_init(&h->lock);
6998 spin_lock_init(&h->offline_device_lock);
6999 spin_lock_init(&h->scan_lock);
7000 spin_lock_init(&h->passthru_count_lock);
7001
7002 /* Allocate and clear per-cpu variable lockup_detected */
7003 h->lockup_detected = alloc_percpu(u32);
7004 if (!h->lockup_detected)
7005 goto clean1;
7006 set_lockup_detected_for_all_cpus(h, 0);
7007
6979 rc = hpsa_pci_init(h);
6980 if (rc != 0)
6981 goto clean1;
6982
6983 sprintf(h->devname, HPSA "%d", number_of_controllers);
6984 h->ctlr = number_of_controllers;
6985 number_of_controllers++;
6986

--- 107 unchanged lines hidden (view full) ---

7094 return 0;
7095
7096clean4:
7097 hpsa_free_sg_chain_blocks(h);
7098 hpsa_free_cmd_pool(h);
7099 free_irqs(h);
7100clean2:
7101clean1:
7008 rc = hpsa_pci_init(h);
7009 if (rc != 0)
7010 goto clean1;
7011
7012 sprintf(h->devname, HPSA "%d", number_of_controllers);
7013 h->ctlr = number_of_controllers;
7014 number_of_controllers++;
7015

--- 107 unchanged lines hidden (view full) ---

7123 return 0;
7124
7125clean4:
7126 hpsa_free_sg_chain_blocks(h);
7127 hpsa_free_cmd_pool(h);
7128 free_irqs(h);
7129clean2:
7130clean1:
7131 if (h->lockup_detected)
7132 free_percpu(h->lockup_detected);
7102 kfree(h);
7103 return rc;
7104}
7105
7106static void hpsa_flush_cache(struct ctlr_info *h)
7107{
7108 char *flush_buf;
7109 struct CommandList *c;
7133 kfree(h);
7134 return rc;
7135}
7136
7137static void hpsa_flush_cache(struct ctlr_info *h)
7138{
7139 char *flush_buf;
7140 struct CommandList *c;
7110 unsigned long flags;
7111
7112 /* Don't bother trying to flush the cache if locked up */
7141
7142 /* Don't bother trying to flush the cache if locked up */
7113 spin_lock_irqsave(&h->lock, flags);
7114 if (unlikely(h->lockup_detected)) {
7115 spin_unlock_irqrestore(&h->lock, flags);
7143 if (unlikely(lockup_detected(h)))
7116 return;
7144 return;
7117 }
7118 spin_unlock_irqrestore(&h->lock, flags);
7119
7120 flush_buf = kzalloc(4, GFP_KERNEL);
7121 if (!flush_buf)
7122 return;
7123
7124 c = cmd_special_alloc(h);
7125 if (!c) {
7126 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
7127 goto out_of_memory;

--- 67 unchanged lines hidden (view full) ---

7195 hpsa_free_reply_queues(h);
7196 kfree(h->cmd_pool_bits);
7197 kfree(h->blockFetchTable);
7198 kfree(h->ioaccel1_blockFetchTable);
7199 kfree(h->ioaccel2_blockFetchTable);
7200 kfree(h->hba_inquiry_data);
7201 pci_disable_device(pdev);
7202 pci_release_regions(pdev);
7145 flush_buf = kzalloc(4, GFP_KERNEL);
7146 if (!flush_buf)
7147 return;
7148
7149 c = cmd_special_alloc(h);
7150 if (!c) {
7151 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
7152 goto out_of_memory;

--- 67 unchanged lines hidden (view full) ---

7220 hpsa_free_reply_queues(h);
7221 kfree(h->cmd_pool_bits);
7222 kfree(h->blockFetchTable);
7223 kfree(h->ioaccel1_blockFetchTable);
7224 kfree(h->ioaccel2_blockFetchTable);
7225 kfree(h->hba_inquiry_data);
7226 pci_disable_device(pdev);
7227 pci_release_regions(pdev);
7228 free_percpu(h->lockup_detected);
7203 kfree(h);
7204}
7205
7206static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
7207 __attribute__((unused)) pm_message_t state)
7208{
7209 return -ENOSYS;
7210}

--- 470 unchanged lines hidden ---
7229 kfree(h);
7230}
7231
7232static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
7233 __attribute__((unused)) pm_message_t state)
7234{
7235 return -ENOSYS;
7236}

--- 470 unchanged lines hidden ---