xref: /openbmc/linux/drivers/scsi/ibmvscsi/ibmvfc.c (revision 19b438592238b3b40c3f945bb5f9c4ca971c0c45)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * ibmvfc.c -- driver for IBM Power Virtual Fibre Channel Adapter
4  *
5  * Written By: Brian King <brking@linux.vnet.ibm.com>, IBM Corporation
6  *
7  * Copyright (C) IBM Corporation, 2008
8  */
9 
10 #include <linux/module.h>
11 #include <linux/moduleparam.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmapool.h>
14 #include <linux/delay.h>
15 #include <linux/interrupt.h>
16 #include <linux/irqdomain.h>
17 #include <linux/kthread.h>
18 #include <linux/slab.h>
19 #include <linux/of.h>
20 #include <linux/pm.h>
21 #include <linux/stringify.h>
22 #include <linux/bsg-lib.h>
23 #include <asm/firmware.h>
24 #include <asm/irq.h>
25 #include <asm/rtas.h>
26 #include <asm/vio.h>
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_cmnd.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_device.h>
31 #include <scsi/scsi_tcq.h>
32 #include <scsi/scsi_transport_fc.h>
33 #include <scsi/scsi_bsg_fc.h>
34 #include "ibmvfc.h"
35 
36 static unsigned int init_timeout = IBMVFC_INIT_TIMEOUT;
37 static unsigned int default_timeout = IBMVFC_DEFAULT_TIMEOUT;
38 static u64 max_lun = IBMVFC_MAX_LUN;
39 static unsigned int max_targets = IBMVFC_MAX_TARGETS;
40 static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT;
41 static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS;
42 static unsigned int ibmvfc_debug = IBMVFC_DEBUG;
43 static unsigned int log_level = IBMVFC_DEFAULT_LOG_LEVEL;
44 static unsigned int cls3_error = IBMVFC_CLS3_ERROR;
45 static unsigned int mq_enabled = IBMVFC_MQ;
46 static unsigned int nr_scsi_hw_queues = IBMVFC_SCSI_HW_QUEUES;
47 static unsigned int nr_scsi_channels = IBMVFC_SCSI_CHANNELS;
48 static unsigned int mig_channels_only = IBMVFC_MIG_NO_SUB_TO_CRQ;
49 static unsigned int mig_no_less_channels = IBMVFC_MIG_NO_N_TO_M;
50 
51 static LIST_HEAD(ibmvfc_head);
52 static DEFINE_SPINLOCK(ibmvfc_driver_lock);
53 static struct scsi_transport_template *ibmvfc_transport_template;
54 
55 MODULE_DESCRIPTION("IBM Virtual Fibre Channel Driver");
56 MODULE_AUTHOR("Brian King <brking@linux.vnet.ibm.com>");
57 MODULE_LICENSE("GPL");
58 MODULE_VERSION(IBMVFC_DRIVER_VERSION);
59 
60 module_param_named(mq, mq_enabled, uint, S_IRUGO);
61 MODULE_PARM_DESC(mq, "Enable multiqueue support. "
62 		 "[Default=" __stringify(IBMVFC_MQ) "]");
63 module_param_named(scsi_host_queues, nr_scsi_hw_queues, uint, S_IRUGO);
64 MODULE_PARM_DESC(scsi_host_queues, "Number of SCSI Host submission queues. "
65 		 "[Default=" __stringify(IBMVFC_SCSI_HW_QUEUES) "]");
66 module_param_named(scsi_hw_channels, nr_scsi_channels, uint, S_IRUGO);
67 MODULE_PARM_DESC(scsi_hw_channels, "Number of hw scsi channels to request. "
68 		 "[Default=" __stringify(IBMVFC_SCSI_CHANNELS) "]");
69 module_param_named(mig_channels_only, mig_channels_only, uint, S_IRUGO);
70 MODULE_PARM_DESC(mig_channels_only, "Prevent migration to non-channelized system. "
71 		 "[Default=" __stringify(IBMVFC_MIG_NO_SUB_TO_CRQ) "]");
72 module_param_named(mig_no_less_channels, mig_no_less_channels, uint, S_IRUGO);
73 MODULE_PARM_DESC(mig_no_less_channels, "Prevent migration to system with less channels. "
74 		 "[Default=" __stringify(IBMVFC_MIG_NO_N_TO_M) "]");
75 
76 module_param_named(init_timeout, init_timeout, uint, S_IRUGO | S_IWUSR);
77 MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds. "
78 		 "[Default=" __stringify(IBMVFC_INIT_TIMEOUT) "]");
79 module_param_named(default_timeout, default_timeout, uint, S_IRUGO | S_IWUSR);
80 MODULE_PARM_DESC(default_timeout,
81 		 "Default timeout in seconds for initialization and EH commands. "
82 		 "[Default=" __stringify(IBMVFC_DEFAULT_TIMEOUT) "]");
83 module_param_named(max_requests, max_requests, uint, S_IRUGO);
84 MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter. "
85 		 "[Default=" __stringify(IBMVFC_MAX_REQUESTS_DEFAULT) "]");
86 module_param_named(max_lun, max_lun, ullong, S_IRUGO);
87 MODULE_PARM_DESC(max_lun, "Maximum allowed LUN. "
88 		 "[Default=" __stringify(IBMVFC_MAX_LUN) "]");
89 module_param_named(max_targets, max_targets, uint, S_IRUGO);
90 MODULE_PARM_DESC(max_targets, "Maximum allowed targets. "
91 		 "[Default=" __stringify(IBMVFC_MAX_TARGETS) "]");
92 module_param_named(disc_threads, disc_threads, uint, S_IRUGO);
93 MODULE_PARM_DESC(disc_threads, "Number of device discovery threads to use. "
94 		 "[Default=" __stringify(IBMVFC_MAX_DISC_THREADS) "]");
95 module_param_named(debug, ibmvfc_debug, uint, S_IRUGO | S_IWUSR);
96 MODULE_PARM_DESC(debug, "Enable driver debug information. "
97 		 "[Default=" __stringify(IBMVFC_DEBUG) "]");
98 module_param_named(log_level, log_level, uint, 0);
99 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver. "
100 		 "[Default=" __stringify(IBMVFC_DEFAULT_LOG_LEVEL) "]");
101 module_param_named(cls3_error, cls3_error, uint, 0);
102 MODULE_PARM_DESC(cls3_error, "Enable FC Class 3 Error Recovery. "
103 		 "[Default=" __stringify(IBMVFC_CLS3_ERROR) "]");
104 
105 static const struct {
106 	u16 status;
107 	u16 error;
108 	u8 result;
109 	u8 retry;
110 	int log;
111 	char *name;
112 } cmd_status [] = {
113 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_ESTABLISH, DID_ERROR, 1, 1, "unable to establish" },
114 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_FAULT, DID_OK, 1, 0, "transport fault" },
115 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_CMD_TIMEOUT, DID_TIME_OUT, 1, 1, "command timeout" },
116 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_ENETDOWN, DID_TRANSPORT_DISRUPTED, 1, 1, "network down" },
117 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_HW_FAILURE, DID_ERROR, 1, 1, "hardware failure" },
118 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DOWN_ERR, DID_REQUEUE, 0, 0, "link down" },
119 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DEAD_ERR, DID_ERROR, 0, 0, "link dead" },
120 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_REGISTER, DID_ERROR, 1, 1, "unable to register" },
121 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_BUSY, DID_BUS_BUSY, 1, 0, "transport busy" },
122 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_DEAD, DID_ERROR, 0, 1, "transport dead" },
123 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_CONFIG_ERROR, DID_ERROR, 1, 1, "configuration error" },
124 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_NAME_SERVER_FAIL, DID_ERROR, 1, 1, "name server failure" },
125 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 1, 0, "link halted" },
126 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_GENERAL, DID_OK, 1, 0, "general transport error" },
127 
128 	{ IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" },
129 	{ IBMVFC_VIOS_FAILURE, IBMVFC_SW_FAILURE, DID_ERROR, 0, 1, "software failure" },
130 	{ IBMVFC_VIOS_FAILURE, IBMVFC_INVALID_PARAMETER, DID_ERROR, 0, 1, "invalid parameter" },
131 	{ IBMVFC_VIOS_FAILURE, IBMVFC_MISSING_PARAMETER, DID_ERROR, 0, 1, "missing parameter" },
132 	{ IBMVFC_VIOS_FAILURE, IBMVFC_HOST_IO_BUS, DID_ERROR, 1, 1, "host I/O bus failure" },
133 	{ IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED, DID_ERROR, 0, 1, "transaction cancelled" },
134 	{ IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED_IMPLICIT, DID_ERROR, 0, 1, "transaction cancelled implicit" },
135 	{ IBMVFC_VIOS_FAILURE, IBMVFC_INSUFFICIENT_RESOURCE, DID_REQUEUE, 1, 1, "insufficient resources" },
136 	{ IBMVFC_VIOS_FAILURE, IBMVFC_PLOGI_REQUIRED, DID_ERROR, 0, 1, "port login required" },
137 	{ IBMVFC_VIOS_FAILURE, IBMVFC_COMMAND_FAILED, DID_ERROR, 1, 1, "command failed" },
138 
139 	{ IBMVFC_FC_FAILURE, IBMVFC_INVALID_ELS_CMD_CODE, DID_ERROR, 0, 1, "invalid ELS command code" },
140 	{ IBMVFC_FC_FAILURE, IBMVFC_INVALID_VERSION, DID_ERROR, 0, 1, "invalid version level" },
141 	{ IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_ERROR, DID_ERROR, 1, 1, "logical error" },
142 	{ IBMVFC_FC_FAILURE, IBMVFC_INVALID_CT_IU_SIZE, DID_ERROR, 0, 1, "invalid CT_IU size" },
143 	{ IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_BUSY, DID_REQUEUE, 1, 0, "logical busy" },
144 	{ IBMVFC_FC_FAILURE, IBMVFC_PROTOCOL_ERROR, DID_ERROR, 1, 1, "protocol error" },
145 	{ IBMVFC_FC_FAILURE, IBMVFC_UNABLE_TO_PERFORM_REQ, DID_ERROR, 1, 1, "unable to perform request" },
146 	{ IBMVFC_FC_FAILURE, IBMVFC_CMD_NOT_SUPPORTED, DID_ERROR, 0, 0, "command not supported" },
147 	{ IBMVFC_FC_FAILURE, IBMVFC_SERVER_NOT_AVAIL, DID_ERROR, 0, 1, "server not available" },
148 	{ IBMVFC_FC_FAILURE, IBMVFC_CMD_IN_PROGRESS, DID_ERROR, 0, 1, "command already in progress" },
149 	{ IBMVFC_FC_FAILURE, IBMVFC_VENDOR_SPECIFIC, DID_ERROR, 1, 1, "vendor specific" },
150 
151 	{ IBMVFC_FC_SCSI_ERROR, 0, DID_OK, 1, 0, "SCSI error" },
152 	{ IBMVFC_FC_SCSI_ERROR, IBMVFC_COMMAND_FAILED, DID_ERROR, 0, 1, "PRLI to device failed." },
153 };
154 
155 static void ibmvfc_npiv_login(struct ibmvfc_host *);
156 static void ibmvfc_tgt_send_prli(struct ibmvfc_target *);
157 static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *);
158 static void ibmvfc_tgt_query_target(struct ibmvfc_target *);
159 static void ibmvfc_npiv_logout(struct ibmvfc_host *);
160 static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *);
161 static void ibmvfc_tgt_move_login(struct ibmvfc_target *);
162 
163 static void ibmvfc_release_sub_crqs(struct ibmvfc_host *);
164 static void ibmvfc_init_sub_crqs(struct ibmvfc_host *);
165 
166 static const char *unknown_error = "unknown error";
167 
168 static long h_reg_sub_crq(unsigned long unit_address, unsigned long ioba,
169 			  unsigned long length, unsigned long *cookie,
170 			  unsigned long *irq)
171 {
172 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
173 	long rc;
174 
175 	rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, ioba, length);
176 	*cookie = retbuf[0];
177 	*irq = retbuf[1];
178 
179 	return rc;
180 }
181 
182 static int ibmvfc_check_caps(struct ibmvfc_host *vhost, unsigned long cap_flags)
183 {
184 	u64 host_caps = be64_to_cpu(vhost->login_buf->resp.capabilities);
185 
186 	return (host_caps & cap_flags) ? 1 : 0;
187 }
188 
189 static struct ibmvfc_fcp_cmd_iu *ibmvfc_get_fcp_iu(struct ibmvfc_host *vhost,
190 						   struct ibmvfc_cmd *vfc_cmd)
191 {
192 	if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
193 		return &vfc_cmd->v2.iu;
194 	else
195 		return &vfc_cmd->v1.iu;
196 }
197 
198 static struct ibmvfc_fcp_rsp *ibmvfc_get_fcp_rsp(struct ibmvfc_host *vhost,
199 						 struct ibmvfc_cmd *vfc_cmd)
200 {
201 	if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
202 		return &vfc_cmd->v2.rsp;
203 	else
204 		return &vfc_cmd->v1.rsp;
205 }
206 
207 #ifdef CONFIG_SCSI_IBMVFC_TRACE
208 /**
209  * ibmvfc_trc_start - Log a start trace entry
210  * @evt:		ibmvfc event struct
211  *
212  **/
213 static void ibmvfc_trc_start(struct ibmvfc_event *evt)
214 {
215 	struct ibmvfc_host *vhost = evt->vhost;
216 	struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd;
217 	struct ibmvfc_mad_common *mad = &evt->iu.mad_common;
218 	struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
219 	struct ibmvfc_trace_entry *entry;
220 	int index = atomic_inc_return(&vhost->trace_index) & IBMVFC_TRACE_INDEX_MASK;
221 
222 	entry = &vhost->trace[index];
223 	entry->evt = evt;
224 	entry->time = jiffies;
225 	entry->fmt = evt->crq.format;
226 	entry->type = IBMVFC_TRC_START;
227 
228 	switch (entry->fmt) {
229 	case IBMVFC_CMD_FORMAT:
230 		entry->op_code = iu->cdb[0];
231 		entry->scsi_id = be64_to_cpu(vfc_cmd->tgt_scsi_id);
232 		entry->lun = scsilun_to_int(&iu->lun);
233 		entry->tmf_flags = iu->tmf_flags;
234 		entry->u.start.xfer_len = be32_to_cpu(iu->xfer_len);
235 		break;
236 	case IBMVFC_MAD_FORMAT:
237 		entry->op_code = be32_to_cpu(mad->opcode);
238 		break;
239 	default:
240 		break;
241 	}
242 }
243 
244 /**
245  * ibmvfc_trc_end - Log an end trace entry
246  * @evt:		ibmvfc event struct
247  *
248  **/
249 static void ibmvfc_trc_end(struct ibmvfc_event *evt)
250 {
251 	struct ibmvfc_host *vhost = evt->vhost;
252 	struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
253 	struct ibmvfc_mad_common *mad = &evt->xfer_iu->mad_common;
254 	struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
255 	struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
256 	struct ibmvfc_trace_entry *entry;
257 	int index = atomic_inc_return(&vhost->trace_index) & IBMVFC_TRACE_INDEX_MASK;
258 
259 	entry = &vhost->trace[index];
260 	entry->evt = evt;
261 	entry->time = jiffies;
262 	entry->fmt = evt->crq.format;
263 	entry->type = IBMVFC_TRC_END;
264 
265 	switch (entry->fmt) {
266 	case IBMVFC_CMD_FORMAT:
267 		entry->op_code = iu->cdb[0];
268 		entry->scsi_id = be64_to_cpu(vfc_cmd->tgt_scsi_id);
269 		entry->lun = scsilun_to_int(&iu->lun);
270 		entry->tmf_flags = iu->tmf_flags;
271 		entry->u.end.status = be16_to_cpu(vfc_cmd->status);
272 		entry->u.end.error = be16_to_cpu(vfc_cmd->error);
273 		entry->u.end.fcp_rsp_flags = rsp->flags;
274 		entry->u.end.rsp_code = rsp->data.info.rsp_code;
275 		entry->u.end.scsi_status = rsp->scsi_status;
276 		break;
277 	case IBMVFC_MAD_FORMAT:
278 		entry->op_code = be32_to_cpu(mad->opcode);
279 		entry->u.end.status = be16_to_cpu(mad->status);
280 		break;
281 	default:
282 		break;
283 
284 	}
285 }
286 
287 #else
288 #define ibmvfc_trc_start(evt) do { } while (0)
289 #define ibmvfc_trc_end(evt) do { } while (0)
290 #endif
291 
292 /**
293  * ibmvfc_get_err_index - Find the index into cmd_status for the fcp response
294  * @status:		status / error class
295  * @error:		error
296  *
297  * Return value:
298  *	index into cmd_status / -EINVAL on failure
299  **/
300 static int ibmvfc_get_err_index(u16 status, u16 error)
301 {
302 	int i;
303 
304 	for (i = 0; i < ARRAY_SIZE(cmd_status); i++)
305 		if ((cmd_status[i].status & status) == cmd_status[i].status &&
306 		    cmd_status[i].error == error)
307 			return i;
308 
309 	return -EINVAL;
310 }
311 
312 /**
313  * ibmvfc_get_cmd_error - Find the error description for the fcp response
314  * @status:		status / error class
315  * @error:		error
316  *
317  * Return value:
318  *	error description string
319  **/
320 static const char *ibmvfc_get_cmd_error(u16 status, u16 error)
321 {
322 	int rc = ibmvfc_get_err_index(status, error);
323 	if (rc >= 0)
324 		return cmd_status[rc].name;
325 	return unknown_error;
326 }
327 
328 /**
329  * ibmvfc_get_err_result - Find the scsi status to return for the fcp response
330  * @vhost:      ibmvfc host struct
331  * @vfc_cmd:	ibmvfc command struct
332  *
333  * Return value:
334  *	SCSI result value to return for completed command
335  **/
336 static int ibmvfc_get_err_result(struct ibmvfc_host *vhost, struct ibmvfc_cmd *vfc_cmd)
337 {
338 	int err;
339 	struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
340 	int fc_rsp_len = be32_to_cpu(rsp->fcp_rsp_len);
341 
342 	if ((rsp->flags & FCP_RSP_LEN_VALID) &&
343 	    ((fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) ||
344 	     rsp->data.info.rsp_code))
345 		return DID_ERROR << 16;
346 
347 	err = ibmvfc_get_err_index(be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error));
348 	if (err >= 0)
349 		return rsp->scsi_status | (cmd_status[err].result << 16);
350 	return rsp->scsi_status | (DID_ERROR << 16);
351 }
352 
353 /**
354  * ibmvfc_retry_cmd - Determine if error status is retryable
355  * @status:		status / error class
356  * @error:		error
357  *
358  * Return value:
359  *	1 if error should be retried / 0 if it should not
360  **/
361 static int ibmvfc_retry_cmd(u16 status, u16 error)
362 {
363 	int rc = ibmvfc_get_err_index(status, error);
364 
365 	if (rc >= 0)
366 		return cmd_status[rc].retry;
367 	return 1;
368 }
369 
370 static const char *unknown_fc_explain = "unknown fc explain";
371 
372 static const struct {
373 	u16 fc_explain;
374 	char *name;
375 } ls_explain [] = {
376 	{ 0x00, "no additional explanation" },
377 	{ 0x01, "service parameter error - options" },
378 	{ 0x03, "service parameter error - initiator control" },
379 	{ 0x05, "service parameter error - recipient control" },
380 	{ 0x07, "service parameter error - received data field size" },
381 	{ 0x09, "service parameter error - concurrent seq" },
382 	{ 0x0B, "service parameter error - credit" },
383 	{ 0x0D, "invalid N_Port/F_Port_Name" },
384 	{ 0x0E, "invalid node/Fabric Name" },
385 	{ 0x0F, "invalid common service parameters" },
386 	{ 0x11, "invalid association header" },
387 	{ 0x13, "association header required" },
388 	{ 0x15, "invalid originator S_ID" },
389 	{ 0x17, "invalid OX_ID-RX-ID combination" },
390 	{ 0x19, "command (request) already in progress" },
391 	{ 0x1E, "N_Port Login requested" },
392 	{ 0x1F, "Invalid N_Port_ID" },
393 };
394 
395 static const struct {
396 	u16 fc_explain;
397 	char *name;
398 } gs_explain [] = {
399 	{ 0x00, "no additional explanation" },
400 	{ 0x01, "port identifier not registered" },
401 	{ 0x02, "port name not registered" },
402 	{ 0x03, "node name not registered" },
403 	{ 0x04, "class of service not registered" },
404 	{ 0x06, "initial process associator not registered" },
405 	{ 0x07, "FC-4 TYPEs not registered" },
406 	{ 0x08, "symbolic port name not registered" },
407 	{ 0x09, "symbolic node name not registered" },
408 	{ 0x0A, "port type not registered" },
409 	{ 0xF0, "authorization exception" },
410 	{ 0xF1, "authentication exception" },
411 	{ 0xF2, "data base full" },
412 	{ 0xF3, "data base empty" },
413 	{ 0xF4, "processing request" },
414 	{ 0xF5, "unable to verify connection" },
415 	{ 0xF6, "devices not in a common zone" },
416 };
417 
418 /**
419  * ibmvfc_get_ls_explain - Return the FC Explain description text
420  * @status:	FC Explain status
421  *
422  * Returns:
423  *	error string
424  **/
425 static const char *ibmvfc_get_ls_explain(u16 status)
426 {
427 	int i;
428 
429 	for (i = 0; i < ARRAY_SIZE(ls_explain); i++)
430 		if (ls_explain[i].fc_explain == status)
431 			return ls_explain[i].name;
432 
433 	return unknown_fc_explain;
434 }
435 
436 /**
437  * ibmvfc_get_gs_explain - Return the FC Explain description text
438  * @status:	FC Explain status
439  *
440  * Returns:
441  *	error string
442  **/
443 static const char *ibmvfc_get_gs_explain(u16 status)
444 {
445 	int i;
446 
447 	for (i = 0; i < ARRAY_SIZE(gs_explain); i++)
448 		if (gs_explain[i].fc_explain == status)
449 			return gs_explain[i].name;
450 
451 	return unknown_fc_explain;
452 }
453 
454 static const struct {
455 	enum ibmvfc_fc_type fc_type;
456 	char *name;
457 } fc_type [] = {
458 	{ IBMVFC_FABRIC_REJECT, "fabric reject" },
459 	{ IBMVFC_PORT_REJECT, "port reject" },
460 	{ IBMVFC_LS_REJECT, "ELS reject" },
461 	{ IBMVFC_FABRIC_BUSY, "fabric busy" },
462 	{ IBMVFC_PORT_BUSY, "port busy" },
463 	{ IBMVFC_BASIC_REJECT, "basic reject" },
464 };
465 
466 static const char *unknown_fc_type = "unknown fc type";
467 
468 /**
469  * ibmvfc_get_fc_type - Return the FC Type description text
470  * @status:	FC Type error status
471  *
472  * Returns:
473  *	error string
474  **/
475 static const char *ibmvfc_get_fc_type(u16 status)
476 {
477 	int i;
478 
479 	for (i = 0; i < ARRAY_SIZE(fc_type); i++)
480 		if (fc_type[i].fc_type == status)
481 			return fc_type[i].name;
482 
483 	return unknown_fc_type;
484 }
485 
486 /**
487  * ibmvfc_set_tgt_action - Set the next init action for the target
488  * @tgt:		ibmvfc target struct
489  * @action:		action to perform
490  *
491  * Returns:
492  *	0 if action changed / non-zero if not changed
493  **/
494 static int ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
495 				  enum ibmvfc_target_action action)
496 {
497 	int rc = -EINVAL;
498 
499 	switch (tgt->action) {
500 	case IBMVFC_TGT_ACTION_LOGOUT_RPORT:
501 		if (action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT ||
502 		    action == IBMVFC_TGT_ACTION_DEL_RPORT) {
503 			tgt->action = action;
504 			rc = 0;
505 		}
506 		break;
507 	case IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT:
508 		if (action == IBMVFC_TGT_ACTION_DEL_RPORT ||
509 		    action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
510 			tgt->action = action;
511 			rc = 0;
512 		}
513 		break;
514 	case IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT:
515 		if (action == IBMVFC_TGT_ACTION_LOGOUT_RPORT) {
516 			tgt->action = action;
517 			rc = 0;
518 		}
519 		break;
520 	case IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT:
521 		if (action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
522 			tgt->action = action;
523 			rc = 0;
524 		}
525 		break;
526 	case IBMVFC_TGT_ACTION_DEL_RPORT:
527 		if (action == IBMVFC_TGT_ACTION_DELETED_RPORT) {
528 			tgt->action = action;
529 			rc = 0;
530 		}
531 		break;
532 	case IBMVFC_TGT_ACTION_DELETED_RPORT:
533 		break;
534 	default:
535 		tgt->action = action;
536 		rc = 0;
537 		break;
538 	}
539 
540 	if (action >= IBMVFC_TGT_ACTION_LOGOUT_RPORT)
541 		tgt->add_rport = 0;
542 
543 	return rc;
544 }
545 
546 /**
547  * ibmvfc_set_host_state - Set the state for the host
548  * @vhost:		ibmvfc host struct
549  * @state:		state to set host to
550  *
551  * Returns:
552  *	0 if state changed / non-zero if not changed
553  **/
554 static int ibmvfc_set_host_state(struct ibmvfc_host *vhost,
555 				  enum ibmvfc_host_state state)
556 {
557 	int rc = 0;
558 
559 	switch (vhost->state) {
560 	case IBMVFC_HOST_OFFLINE:
561 		rc = -EINVAL;
562 		break;
563 	default:
564 		vhost->state = state;
565 		break;
566 	}
567 
568 	return rc;
569 }
570 
571 /**
572  * ibmvfc_set_host_action - Set the next init action for the host
573  * @vhost:		ibmvfc host struct
574  * @action:		action to perform
575  *
576  **/
577 static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
578 				   enum ibmvfc_host_action action)
579 {
580 	switch (action) {
581 	case IBMVFC_HOST_ACTION_ALLOC_TGTS:
582 		if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT)
583 			vhost->action = action;
584 		break;
585 	case IBMVFC_HOST_ACTION_LOGO_WAIT:
586 		if (vhost->action == IBMVFC_HOST_ACTION_LOGO)
587 			vhost->action = action;
588 		break;
589 	case IBMVFC_HOST_ACTION_INIT_WAIT:
590 		if (vhost->action == IBMVFC_HOST_ACTION_INIT)
591 			vhost->action = action;
592 		break;
593 	case IBMVFC_HOST_ACTION_QUERY:
594 		switch (vhost->action) {
595 		case IBMVFC_HOST_ACTION_INIT_WAIT:
596 		case IBMVFC_HOST_ACTION_NONE:
597 		case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
598 			vhost->action = action;
599 			break;
600 		default:
601 			break;
602 		}
603 		break;
604 	case IBMVFC_HOST_ACTION_TGT_INIT:
605 		if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
606 			vhost->action = action;
607 		break;
608 	case IBMVFC_HOST_ACTION_REENABLE:
609 	case IBMVFC_HOST_ACTION_RESET:
610 		vhost->action = action;
611 		break;
612 	case IBMVFC_HOST_ACTION_INIT:
613 	case IBMVFC_HOST_ACTION_TGT_DEL:
614 	case IBMVFC_HOST_ACTION_LOGO:
615 	case IBMVFC_HOST_ACTION_QUERY_TGTS:
616 	case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
617 	case IBMVFC_HOST_ACTION_NONE:
618 	default:
619 		switch (vhost->action) {
620 		case IBMVFC_HOST_ACTION_RESET:
621 		case IBMVFC_HOST_ACTION_REENABLE:
622 			break;
623 		default:
624 			vhost->action = action;
625 			break;
626 		}
627 		break;
628 	}
629 }
630 
631 /**
632  * ibmvfc_reinit_host - Re-start host initialization (no NPIV Login)
633  * @vhost:		ibmvfc host struct
634  *
635  * Return value:
636  *	nothing
637  **/
638 static void ibmvfc_reinit_host(struct ibmvfc_host *vhost)
639 {
640 	if (vhost->action == IBMVFC_HOST_ACTION_NONE &&
641 	    vhost->state == IBMVFC_ACTIVE) {
642 		if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
643 			scsi_block_requests(vhost->host);
644 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
645 		}
646 	} else
647 		vhost->reinit = 1;
648 
649 	wake_up(&vhost->work_wait_q);
650 }
651 
652 /**
653  * ibmvfc_del_tgt - Schedule cleanup and removal of the target
654  * @tgt:		ibmvfc target struct
655  **/
656 static void ibmvfc_del_tgt(struct ibmvfc_target *tgt)
657 {
658 	if (!ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_RPORT))
659 		tgt->job_step = ibmvfc_tgt_implicit_logout_and_del;
660 	wake_up(&tgt->vhost->work_wait_q);
661 }
662 
663 /**
664  * ibmvfc_link_down - Handle a link down event from the adapter
665  * @vhost:	ibmvfc host struct
666  * @state:	ibmvfc host state to enter
667  *
668  **/
669 static void ibmvfc_link_down(struct ibmvfc_host *vhost,
670 			     enum ibmvfc_host_state state)
671 {
672 	struct ibmvfc_target *tgt;
673 
674 	ENTER;
675 	scsi_block_requests(vhost->host);
676 	list_for_each_entry(tgt, &vhost->targets, queue)
677 		ibmvfc_del_tgt(tgt);
678 	ibmvfc_set_host_state(vhost, state);
679 	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
680 	vhost->events_to_log |= IBMVFC_AE_LINKDOWN;
681 	wake_up(&vhost->work_wait_q);
682 	LEAVE;
683 }
684 
685 /**
686  * ibmvfc_init_host - Start host initialization
687  * @vhost:		ibmvfc host struct
688  *
689  * Return value:
690  *	nothing
691  **/
692 static void ibmvfc_init_host(struct ibmvfc_host *vhost)
693 {
694 	struct ibmvfc_target *tgt;
695 
696 	if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
697 		if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
698 			dev_err(vhost->dev,
699 				"Host initialization retries exceeded. Taking adapter offline\n");
700 			ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
701 			return;
702 		}
703 	}
704 
705 	if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
706 		memset(vhost->async_crq.msgs.async, 0, PAGE_SIZE);
707 		vhost->async_crq.cur = 0;
708 
709 		list_for_each_entry(tgt, &vhost->targets, queue)
710 			ibmvfc_del_tgt(tgt);
711 		scsi_block_requests(vhost->host);
712 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
713 		vhost->job_step = ibmvfc_npiv_login;
714 		wake_up(&vhost->work_wait_q);
715 	}
716 }
717 
718 /**
719  * ibmvfc_send_crq - Send a CRQ
720  * @vhost:	ibmvfc host struct
721  * @word1:	the first 64 bits of the data
722  * @word2:	the second 64 bits of the data
723  *
724  * Return value:
725  *	0 on success / other on failure
726  **/
727 static int ibmvfc_send_crq(struct ibmvfc_host *vhost, u64 word1, u64 word2)
728 {
729 	struct vio_dev *vdev = to_vio_dev(vhost->dev);
730 	return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
731 }
732 
733 static int ibmvfc_send_sub_crq(struct ibmvfc_host *vhost, u64 cookie, u64 word1,
734 			       u64 word2, u64 word3, u64 word4)
735 {
736 	struct vio_dev *vdev = to_vio_dev(vhost->dev);
737 
738 	return plpar_hcall_norets(H_SEND_SUB_CRQ, vdev->unit_address, cookie,
739 				  word1, word2, word3, word4);
740 }
741 
742 /**
743  * ibmvfc_send_crq_init - Send a CRQ init message
744  * @vhost:	ibmvfc host struct
745  *
746  * Return value:
747  *	0 on success / other on failure
748  **/
749 static int ibmvfc_send_crq_init(struct ibmvfc_host *vhost)
750 {
751 	ibmvfc_dbg(vhost, "Sending CRQ init\n");
752 	return ibmvfc_send_crq(vhost, 0xC001000000000000LL, 0);
753 }
754 
755 /**
756  * ibmvfc_send_crq_init_complete - Send a CRQ init complete message
757  * @vhost:	ibmvfc host struct
758  *
759  * Return value:
760  *	0 on success / other on failure
761  **/
762 static int ibmvfc_send_crq_init_complete(struct ibmvfc_host *vhost)
763 {
764 	ibmvfc_dbg(vhost, "Sending CRQ init complete\n");
765 	return ibmvfc_send_crq(vhost, 0xC002000000000000LL, 0);
766 }
767 
768 /**
769  * ibmvfc_init_event_pool - Allocates and initializes the event pool for a host
770  * @vhost:	ibmvfc host who owns the event pool
771  * @queue:      ibmvfc queue struct
772  * @size:       pool size
773  *
774  * Returns zero on success.
775  **/
776 static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost,
777 				  struct ibmvfc_queue *queue,
778 				  unsigned int size)
779 {
780 	int i;
781 	struct ibmvfc_event_pool *pool = &queue->evt_pool;
782 
783 	ENTER;
784 	if (!size)
785 		return 0;
786 
787 	pool->size = size;
788 	pool->events = kcalloc(size, sizeof(*pool->events), GFP_KERNEL);
789 	if (!pool->events)
790 		return -ENOMEM;
791 
792 	pool->iu_storage = dma_alloc_coherent(vhost->dev,
793 					      size * sizeof(*pool->iu_storage),
794 					      &pool->iu_token, 0);
795 
796 	if (!pool->iu_storage) {
797 		kfree(pool->events);
798 		return -ENOMEM;
799 	}
800 
801 	INIT_LIST_HEAD(&queue->sent);
802 	INIT_LIST_HEAD(&queue->free);
803 	spin_lock_init(&queue->l_lock);
804 
805 	for (i = 0; i < size; ++i) {
806 		struct ibmvfc_event *evt = &pool->events[i];
807 
808 		atomic_set(&evt->free, 1);
809 		evt->crq.valid = 0x80;
810 		evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i));
811 		evt->xfer_iu = pool->iu_storage + i;
812 		evt->vhost = vhost;
813 		evt->queue = queue;
814 		evt->ext_list = NULL;
815 		list_add_tail(&evt->queue_list, &queue->free);
816 	}
817 
818 	LEAVE;
819 	return 0;
820 }
821 
822 /**
823  * ibmvfc_free_event_pool - Frees memory of the event pool of a host
824  * @vhost:	ibmvfc host who owns the event pool
825  * @queue:      ibmvfc queue struct
826  *
827  **/
828 static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost,
829 				   struct ibmvfc_queue *queue)
830 {
831 	int i;
832 	struct ibmvfc_event_pool *pool = &queue->evt_pool;
833 
834 	ENTER;
835 	for (i = 0; i < pool->size; ++i) {
836 		list_del(&pool->events[i].queue_list);
837 		BUG_ON(atomic_read(&pool->events[i].free) != 1);
838 		if (pool->events[i].ext_list)
839 			dma_pool_free(vhost->sg_pool,
840 				      pool->events[i].ext_list,
841 				      pool->events[i].ext_list_token);
842 	}
843 
844 	kfree(pool->events);
845 	dma_free_coherent(vhost->dev,
846 			  pool->size * sizeof(*pool->iu_storage),
847 			  pool->iu_storage, pool->iu_token);
848 	LEAVE;
849 }
850 
851 /**
852  * ibmvfc_free_queue - Deallocate queue
853  * @vhost:	ibmvfc host struct
854  * @queue:	ibmvfc queue struct
855  *
856  * Unmaps dma and deallocates page for messages
857  **/
858 static void ibmvfc_free_queue(struct ibmvfc_host *vhost,
859 			      struct ibmvfc_queue *queue)
860 {
861 	struct device *dev = vhost->dev;
862 
863 	dma_unmap_single(dev, queue->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
864 	free_page((unsigned long)queue->msgs.handle);
865 	queue->msgs.handle = NULL;
866 
867 	ibmvfc_free_event_pool(vhost, queue);
868 }
869 
870 /**
871  * ibmvfc_release_crq_queue - Deallocates data and unregisters CRQ
872  * @vhost:	ibmvfc host struct
873  *
874  * Frees irq, deallocates a page for messages, unmaps dma, and unregisters
875  * the crq with the hypervisor.
876  **/
877 static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
878 {
879 	long rc = 0;
880 	struct vio_dev *vdev = to_vio_dev(vhost->dev);
881 	struct ibmvfc_queue *crq = &vhost->crq;
882 
883 	ibmvfc_dbg(vhost, "Releasing CRQ\n");
884 	free_irq(vdev->irq, vhost);
885 	tasklet_kill(&vhost->tasklet);
886 	do {
887 		if (rc)
888 			msleep(100);
889 		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
890 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
891 
892 	vhost->state = IBMVFC_NO_CRQ;
893 	vhost->logged_in = 0;
894 
895 	ibmvfc_free_queue(vhost, crq);
896 }
897 
898 /**
899  * ibmvfc_reenable_crq_queue - reenables the CRQ
900  * @vhost:	ibmvfc host struct
901  *
902  * Return value:
903  *	0 on success / other on failure
904  **/
905 static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost)
906 {
907 	int rc = 0;
908 	struct vio_dev *vdev = to_vio_dev(vhost->dev);
909 	unsigned long flags;
910 
911 	ibmvfc_release_sub_crqs(vhost);
912 
913 	/* Re-enable the CRQ */
914 	do {
915 		if (rc)
916 			msleep(100);
917 		rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
918 	} while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
919 
920 	if (rc)
921 		dev_err(vhost->dev, "Error enabling adapter (rc=%d)\n", rc);
922 
923 	spin_lock_irqsave(vhost->host->host_lock, flags);
924 	spin_lock(vhost->crq.q_lock);
925 	vhost->do_enquiry = 1;
926 	vhost->using_channels = 0;
927 	spin_unlock(vhost->crq.q_lock);
928 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
929 
930 	ibmvfc_init_sub_crqs(vhost);
931 
932 	return rc;
933 }
934 
935 /**
936  * ibmvfc_reset_crq - resets a crq after a failure
937  * @vhost:	ibmvfc host struct
938  *
939  * Return value:
940  *	0 on success / other on failure
941  **/
942 static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
943 {
944 	int rc = 0;
945 	unsigned long flags;
946 	struct vio_dev *vdev = to_vio_dev(vhost->dev);
947 	struct ibmvfc_queue *crq = &vhost->crq;
948 
949 	ibmvfc_release_sub_crqs(vhost);
950 
951 	/* Close the CRQ */
952 	do {
953 		if (rc)
954 			msleep(100);
955 		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
956 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
957 
958 	spin_lock_irqsave(vhost->host->host_lock, flags);
959 	spin_lock(vhost->crq.q_lock);
960 	vhost->state = IBMVFC_NO_CRQ;
961 	vhost->logged_in = 0;
962 	vhost->do_enquiry = 1;
963 	vhost->using_channels = 0;
964 
965 	/* Clean out the queue */
966 	memset(crq->msgs.crq, 0, PAGE_SIZE);
967 	crq->cur = 0;
968 
969 	/* And re-open it again */
970 	rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
971 				crq->msg_token, PAGE_SIZE);
972 
973 	if (rc == H_CLOSED)
974 		/* Adapter is good, but other end is not ready */
975 		dev_warn(vhost->dev, "Partner adapter not ready\n");
976 	else if (rc != 0)
977 		dev_warn(vhost->dev, "Couldn't register crq (rc=%d)\n", rc);
978 
979 	spin_unlock(vhost->crq.q_lock);
980 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
981 
982 	ibmvfc_init_sub_crqs(vhost);
983 
984 	return rc;
985 }
986 
987 /**
988  * ibmvfc_valid_event - Determines if event is valid.
989  * @pool:	event_pool that contains the event
990  * @evt:	ibmvfc event to be checked for validity
991  *
992  * Return value:
993  *	1 if event is valid / 0 if event is not valid
994  **/
995 static int ibmvfc_valid_event(struct ibmvfc_event_pool *pool,
996 			      struct ibmvfc_event *evt)
997 {
998 	int index = evt - pool->events;
999 	if (index < 0 || index >= pool->size)	/* outside of bounds */
1000 		return 0;
1001 	if (evt != pool->events + index)	/* unaligned */
1002 		return 0;
1003 	return 1;
1004 }
1005 
1006 /**
1007  * ibmvfc_free_event - Free the specified event
1008  * @evt:	ibmvfc_event to be freed
1009  *
1010  **/
1011 static void ibmvfc_free_event(struct ibmvfc_event *evt)
1012 {
1013 	struct ibmvfc_event_pool *pool = &evt->queue->evt_pool;
1014 	unsigned long flags;
1015 
1016 	BUG_ON(!ibmvfc_valid_event(pool, evt));
1017 	BUG_ON(atomic_inc_return(&evt->free) != 1);
1018 
1019 	spin_lock_irqsave(&evt->queue->l_lock, flags);
1020 	list_add_tail(&evt->queue_list, &evt->queue->free);
1021 	if (evt->eh_comp)
1022 		complete(evt->eh_comp);
1023 	spin_unlock_irqrestore(&evt->queue->l_lock, flags);
1024 }
1025 
1026 /**
1027  * ibmvfc_scsi_eh_done - EH done function for queuecommand commands
1028  * @evt:	ibmvfc event struct
1029  *
1030  * This function does not setup any error status, that must be done
1031  * before this function gets called.
1032  **/
1033 static void ibmvfc_scsi_eh_done(struct ibmvfc_event *evt)
1034 {
1035 	struct scsi_cmnd *cmnd = evt->cmnd;
1036 
1037 	if (cmnd) {
1038 		scsi_dma_unmap(cmnd);
1039 		cmnd->scsi_done(cmnd);
1040 	}
1041 
1042 	ibmvfc_free_event(evt);
1043 }
1044 
1045 /**
1046  * ibmvfc_complete_purge - Complete failed command list
1047  * @purge_list:		list head of failed commands
1048  *
1049  * This function runs completions on commands to fail as a result of a
1050  * host reset or platform migration.
1051  **/
1052 static void ibmvfc_complete_purge(struct list_head *purge_list)
1053 {
1054 	struct ibmvfc_event *evt, *pos;
1055 
1056 	list_for_each_entry_safe(evt, pos, purge_list, queue_list) {
1057 		list_del(&evt->queue_list);
1058 		ibmvfc_trc_end(evt);
1059 		evt->done(evt);
1060 	}
1061 }
1062 
1063 /**
1064  * ibmvfc_fail_request - Fail request with specified error code
1065  * @evt:		ibmvfc event struct
1066  * @error_code:	error code to fail request with
1067  *
1068  * Return value:
1069  *	none
1070  **/
1071 static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code)
1072 {
1073 	if (evt->cmnd) {
1074 		evt->cmnd->result = (error_code << 16);
1075 		evt->done = ibmvfc_scsi_eh_done;
1076 	} else
1077 		evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_DRIVER_FAILED);
1078 
1079 	del_timer(&evt->timer);
1080 }
1081 
1082 /**
1083  * ibmvfc_purge_requests - Our virtual adapter just shut down. Purge any sent requests
1084  * @vhost:		ibmvfc host struct
1085  * @error_code:	error code to fail requests with
1086  *
1087  * Return value:
1088  *	none
1089  **/
1090 static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code)
1091 {
1092 	struct ibmvfc_event *evt, *pos;
1093 	struct ibmvfc_queue *queues = vhost->scsi_scrqs.scrqs;
1094 	unsigned long flags;
1095 	int hwqs = 0;
1096 	int i;
1097 
1098 	if (vhost->using_channels)
1099 		hwqs = vhost->scsi_scrqs.active_queues;
1100 
1101 	ibmvfc_dbg(vhost, "Purging all requests\n");
1102 	spin_lock_irqsave(&vhost->crq.l_lock, flags);
1103 	list_for_each_entry_safe(evt, pos, &vhost->crq.sent, queue_list)
1104 		ibmvfc_fail_request(evt, error_code);
1105 	list_splice_init(&vhost->crq.sent, &vhost->purge);
1106 	spin_unlock_irqrestore(&vhost->crq.l_lock, flags);
1107 
1108 	for (i = 0; i < hwqs; i++) {
1109 		spin_lock_irqsave(queues[i].q_lock, flags);
1110 		spin_lock(&queues[i].l_lock);
1111 		list_for_each_entry_safe(evt, pos, &queues[i].sent, queue_list)
1112 			ibmvfc_fail_request(evt, error_code);
1113 		list_splice_init(&queues[i].sent, &vhost->purge);
1114 		spin_unlock(&queues[i].l_lock);
1115 		spin_unlock_irqrestore(queues[i].q_lock, flags);
1116 	}
1117 }
1118 
1119 /**
1120  * ibmvfc_hard_reset_host - Reset the connection to the server by breaking the CRQ
1121  * @vhost:	struct ibmvfc host to reset
1122  **/
1123 static void ibmvfc_hard_reset_host(struct ibmvfc_host *vhost)
1124 {
1125 	ibmvfc_purge_requests(vhost, DID_ERROR);
1126 	ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
1127 	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
1128 }
1129 
1130 /**
1131  * __ibmvfc_reset_host - Reset the connection to the server (no locking)
1132  * @vhost:	struct ibmvfc host to reset
1133  **/
1134 static void __ibmvfc_reset_host(struct ibmvfc_host *vhost)
1135 {
1136 	if (vhost->logged_in && vhost->action != IBMVFC_HOST_ACTION_LOGO_WAIT &&
1137 	    !ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
1138 		scsi_block_requests(vhost->host);
1139 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO);
1140 		vhost->job_step = ibmvfc_npiv_logout;
1141 		wake_up(&vhost->work_wait_q);
1142 	} else
1143 		ibmvfc_hard_reset_host(vhost);
1144 }
1145 
1146 /**
1147  * ibmvfc_reset_host - Reset the connection to the server
1148  * @vhost:	ibmvfc host struct
1149  **/
1150 static void ibmvfc_reset_host(struct ibmvfc_host *vhost)
1151 {
1152 	unsigned long flags;
1153 
1154 	spin_lock_irqsave(vhost->host->host_lock, flags);
1155 	__ibmvfc_reset_host(vhost);
1156 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
1157 }
1158 
1159 /**
1160  * ibmvfc_retry_host_init - Retry host initialization if allowed
1161  * @vhost:	ibmvfc host struct
1162  *
1163  * Returns: 1 if init will be retried / 0 if not
1164  *
1165  **/
1166 static int ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
1167 {
1168 	int retry = 0;
1169 
1170 	if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
1171 		vhost->delay_init = 1;
1172 		if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
1173 			dev_err(vhost->dev,
1174 				"Host initialization retries exceeded. Taking adapter offline\n");
1175 			ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
1176 		} else if (vhost->init_retries == IBMVFC_MAX_HOST_INIT_RETRIES)
1177 			__ibmvfc_reset_host(vhost);
1178 		else {
1179 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
1180 			retry = 1;
1181 		}
1182 	}
1183 
1184 	wake_up(&vhost->work_wait_q);
1185 	return retry;
1186 }
1187 
1188 /**
1189  * __ibmvfc_get_target - Find the specified scsi_target (no locking)
1190  * @starget:	scsi target struct
1191  *
1192  * Return value:
1193  *	ibmvfc_target struct / NULL if not found
1194  **/
1195 static struct ibmvfc_target *__ibmvfc_get_target(struct scsi_target *starget)
1196 {
1197 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1198 	struct ibmvfc_host *vhost = shost_priv(shost);
1199 	struct ibmvfc_target *tgt;
1200 
1201 	list_for_each_entry(tgt, &vhost->targets, queue)
1202 		if (tgt->target_id == starget->id) {
1203 			kref_get(&tgt->kref);
1204 			return tgt;
1205 		}
1206 	return NULL;
1207 }
1208 
1209 /**
1210  * ibmvfc_get_target - Find the specified scsi_target
1211  * @starget:	scsi target struct
1212  *
1213  * Return value:
1214  *	ibmvfc_target struct / NULL if not found
1215  **/
1216 static struct ibmvfc_target *ibmvfc_get_target(struct scsi_target *starget)
1217 {
1218 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1219 	struct ibmvfc_target *tgt;
1220 	unsigned long flags;
1221 
1222 	spin_lock_irqsave(shost->host_lock, flags);
1223 	tgt = __ibmvfc_get_target(starget);
1224 	spin_unlock_irqrestore(shost->host_lock, flags);
1225 	return tgt;
1226 }
1227 
1228 /**
1229  * ibmvfc_get_host_speed - Get host port speed
1230  * @shost:		scsi host struct
1231  *
1232  * Return value:
1233  * 	none
1234  **/
1235 static void ibmvfc_get_host_speed(struct Scsi_Host *shost)
1236 {
1237 	struct ibmvfc_host *vhost = shost_priv(shost);
1238 	unsigned long flags;
1239 
1240 	spin_lock_irqsave(shost->host_lock, flags);
1241 	if (vhost->state == IBMVFC_ACTIVE) {
1242 		switch (be64_to_cpu(vhost->login_buf->resp.link_speed) / 100) {
1243 		case 1:
1244 			fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
1245 			break;
1246 		case 2:
1247 			fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
1248 			break;
1249 		case 4:
1250 			fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
1251 			break;
1252 		case 8:
1253 			fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
1254 			break;
1255 		case 10:
1256 			fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
1257 			break;
1258 		case 16:
1259 			fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
1260 			break;
1261 		default:
1262 			ibmvfc_log(vhost, 3, "Unknown port speed: %lld Gbit\n",
1263 				   be64_to_cpu(vhost->login_buf->resp.link_speed) / 100);
1264 			fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
1265 			break;
1266 		}
1267 	} else
1268 		fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
1269 	spin_unlock_irqrestore(shost->host_lock, flags);
1270 }
1271 
1272 /**
1273  * ibmvfc_get_host_port_state - Get host port state
1274  * @shost:		scsi host struct
1275  *
1276  * Return value:
1277  * 	none
1278  **/
1279 static void ibmvfc_get_host_port_state(struct Scsi_Host *shost)
1280 {
1281 	struct ibmvfc_host *vhost = shost_priv(shost);
1282 	unsigned long flags;
1283 
1284 	spin_lock_irqsave(shost->host_lock, flags);
1285 	switch (vhost->state) {
1286 	case IBMVFC_INITIALIZING:
1287 	case IBMVFC_ACTIVE:
1288 		fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
1289 		break;
1290 	case IBMVFC_LINK_DOWN:
1291 		fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
1292 		break;
1293 	case IBMVFC_LINK_DEAD:
1294 	case IBMVFC_HOST_OFFLINE:
1295 		fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
1296 		break;
1297 	case IBMVFC_HALTED:
1298 		fc_host_port_state(shost) = FC_PORTSTATE_BLOCKED;
1299 		break;
1300 	case IBMVFC_NO_CRQ:
1301 		fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
1302 		break;
1303 	default:
1304 		ibmvfc_log(vhost, 3, "Unknown port state: %d\n", vhost->state);
1305 		fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
1306 		break;
1307 	}
1308 	spin_unlock_irqrestore(shost->host_lock, flags);
1309 }
1310 
1311 /**
1312  * ibmvfc_set_rport_dev_loss_tmo - Set rport's device loss timeout
1313  * @rport:		rport struct
1314  * @timeout:	timeout value
1315  *
1316  * Return value:
1317  * 	none
1318  **/
1319 static void ibmvfc_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
1320 {
1321 	if (timeout)
1322 		rport->dev_loss_tmo = timeout;
1323 	else
1324 		rport->dev_loss_tmo = 1;
1325 }
1326 
1327 /**
1328  * ibmvfc_release_tgt - Free memory allocated for a target
1329  * @kref:		kref struct
1330  *
1331  **/
1332 static void ibmvfc_release_tgt(struct kref *kref)
1333 {
1334 	struct ibmvfc_target *tgt = container_of(kref, struct ibmvfc_target, kref);
1335 	kfree(tgt);
1336 }
1337 
1338 /**
1339  * ibmvfc_get_starget_node_name - Get SCSI target's node name
1340  * @starget:	scsi target struct
1341  *
1342  * Return value:
1343  * 	none
1344  **/
1345 static void ibmvfc_get_starget_node_name(struct scsi_target *starget)
1346 {
1347 	struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1348 	fc_starget_port_name(starget) = tgt ? tgt->ids.node_name : 0;
1349 	if (tgt)
1350 		kref_put(&tgt->kref, ibmvfc_release_tgt);
1351 }
1352 
1353 /**
1354  * ibmvfc_get_starget_port_name - Get SCSI target's port name
1355  * @starget:	scsi target struct
1356  *
1357  * Return value:
1358  * 	none
1359  **/
1360 static void ibmvfc_get_starget_port_name(struct scsi_target *starget)
1361 {
1362 	struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1363 	fc_starget_port_name(starget) = tgt ? tgt->ids.port_name : 0;
1364 	if (tgt)
1365 		kref_put(&tgt->kref, ibmvfc_release_tgt);
1366 }
1367 
1368 /**
1369  * ibmvfc_get_starget_port_id - Get SCSI target's port ID
1370  * @starget:	scsi target struct
1371  *
1372  * Return value:
1373  * 	none
1374  **/
1375 static void ibmvfc_get_starget_port_id(struct scsi_target *starget)
1376 {
1377 	struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1378 	fc_starget_port_id(starget) = tgt ? tgt->scsi_id : -1;
1379 	if (tgt)
1380 		kref_put(&tgt->kref, ibmvfc_release_tgt);
1381 }
1382 
1383 /**
1384  * ibmvfc_wait_while_resetting - Wait while the host resets
1385  * @vhost:		ibmvfc host struct
1386  *
1387  * Return value:
1388  * 	0 on success / other on failure
1389  **/
1390 static int ibmvfc_wait_while_resetting(struct ibmvfc_host *vhost)
1391 {
1392 	long timeout = wait_event_timeout(vhost->init_wait_q,
1393 					  ((vhost->state == IBMVFC_ACTIVE ||
1394 					    vhost->state == IBMVFC_HOST_OFFLINE ||
1395 					    vhost->state == IBMVFC_LINK_DEAD) &&
1396 					   vhost->action == IBMVFC_HOST_ACTION_NONE),
1397 					  (init_timeout * HZ));
1398 
1399 	return timeout ? 0 : -EIO;
1400 }
1401 
1402 /**
1403  * ibmvfc_issue_fc_host_lip - Re-initiate link initialization
1404  * @shost:		scsi host struct
1405  *
1406  * Return value:
1407  * 	0 on success / other on failure
1408  **/
1409 static int ibmvfc_issue_fc_host_lip(struct Scsi_Host *shost)
1410 {
1411 	struct ibmvfc_host *vhost = shost_priv(shost);
1412 
1413 	dev_err(vhost->dev, "Initiating host LIP. Resetting connection\n");
1414 	ibmvfc_reset_host(vhost);
1415 	return ibmvfc_wait_while_resetting(vhost);
1416 }
1417 
1418 /**
1419  * ibmvfc_gather_partition_info - Gather info about the LPAR
1420  * @vhost:      ibmvfc host struct
1421  *
1422  * Return value:
1423  *	none
1424  **/
1425 static void ibmvfc_gather_partition_info(struct ibmvfc_host *vhost)
1426 {
1427 	struct device_node *rootdn;
1428 	const char *name;
1429 	const unsigned int *num;
1430 
1431 	rootdn = of_find_node_by_path("/");
1432 	if (!rootdn)
1433 		return;
1434 
1435 	name = of_get_property(rootdn, "ibm,partition-name", NULL);
1436 	if (name)
1437 		strncpy(vhost->partition_name, name, sizeof(vhost->partition_name));
1438 	num = of_get_property(rootdn, "ibm,partition-no", NULL);
1439 	if (num)
1440 		vhost->partition_number = *num;
1441 	of_node_put(rootdn);
1442 }
1443 
1444 /**
1445  * ibmvfc_set_login_info - Setup info for NPIV login
1446  * @vhost:	ibmvfc host struct
1447  *
1448  * Return value:
1449  *	none
1450  **/
1451 static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
1452 {
1453 	struct ibmvfc_npiv_login *login_info = &vhost->login_info;
1454 	struct ibmvfc_queue *async_crq = &vhost->async_crq;
1455 	struct device_node *of_node = vhost->dev->of_node;
1456 	const char *location;
1457 
1458 	memset(login_info, 0, sizeof(*login_info));
1459 
1460 	login_info->ostype = cpu_to_be32(IBMVFC_OS_LINUX);
1461 	login_info->max_dma_len = cpu_to_be64(IBMVFC_MAX_SECTORS << 9);
1462 	login_info->max_payload = cpu_to_be32(sizeof(struct ibmvfc_fcp_cmd_iu));
1463 	login_info->max_response = cpu_to_be32(sizeof(struct ibmvfc_fcp_rsp));
1464 	login_info->partition_num = cpu_to_be32(vhost->partition_number);
1465 	login_info->vfc_frame_version = cpu_to_be32(1);
1466 	login_info->fcp_version = cpu_to_be16(3);
1467 	login_info->flags = cpu_to_be16(IBMVFC_FLUSH_ON_HALT);
1468 	if (vhost->client_migrated)
1469 		login_info->flags |= cpu_to_be16(IBMVFC_CLIENT_MIGRATED);
1470 
1471 	login_info->max_cmds = cpu_to_be32(max_requests + IBMVFC_NUM_INTERNAL_REQ);
1472 	login_info->capabilities = cpu_to_be64(IBMVFC_CAN_MIGRATE | IBMVFC_CAN_SEND_VF_WWPN);
1473 
1474 	if (vhost->mq_enabled || vhost->using_channels)
1475 		login_info->capabilities |= cpu_to_be64(IBMVFC_CAN_USE_CHANNELS);
1476 
1477 	login_info->async.va = cpu_to_be64(vhost->async_crq.msg_token);
1478 	login_info->async.len = cpu_to_be32(async_crq->size *
1479 					    sizeof(*async_crq->msgs.async));
1480 	strncpy(login_info->partition_name, vhost->partition_name, IBMVFC_MAX_NAME);
1481 	strncpy(login_info->device_name,
1482 		dev_name(&vhost->host->shost_gendev), IBMVFC_MAX_NAME);
1483 
1484 	location = of_get_property(of_node, "ibm,loc-code", NULL);
1485 	location = location ? location : dev_name(vhost->dev);
1486 	strncpy(login_info->drc_name, location, IBMVFC_MAX_NAME);
1487 }
1488 
1489 /**
1490  * ibmvfc_get_event - Gets the next free event in pool
1491  * @queue:      ibmvfc queue struct
1492  *
1493  * Returns a free event from the pool.
1494  **/
1495 static struct ibmvfc_event *ibmvfc_get_event(struct ibmvfc_queue *queue)
1496 {
1497 	struct ibmvfc_event *evt;
1498 	unsigned long flags;
1499 
1500 	spin_lock_irqsave(&queue->l_lock, flags);
1501 	BUG_ON(list_empty(&queue->free));
1502 	evt = list_entry(queue->free.next, struct ibmvfc_event, queue_list);
1503 	atomic_set(&evt->free, 0);
1504 	list_del(&evt->queue_list);
1505 	spin_unlock_irqrestore(&queue->l_lock, flags);
1506 	return evt;
1507 }
1508 
1509 /**
1510  * ibmvfc_locked_done - Calls evt completion with host_lock held
1511  * @evt:	ibmvfc evt to complete
1512  *
1513  * All non-scsi command completion callbacks have the expectation that the
1514  * host_lock is held. This callback is used by ibmvfc_init_event to wrap a
1515  * MAD evt with the host_lock.
1516  **/
1517 static void ibmvfc_locked_done(struct ibmvfc_event *evt)
1518 {
1519 	unsigned long flags;
1520 
1521 	spin_lock_irqsave(evt->vhost->host->host_lock, flags);
1522 	evt->_done(evt);
1523 	spin_unlock_irqrestore(evt->vhost->host->host_lock, flags);
1524 }
1525 
1526 /**
1527  * ibmvfc_init_event - Initialize fields in an event struct that are always
1528  *				required.
1529  * @evt:	The event
1530  * @done:	Routine to call when the event is responded to
1531  * @format:	SRP or MAD format
1532  **/
1533 static void ibmvfc_init_event(struct ibmvfc_event *evt,
1534 			      void (*done) (struct ibmvfc_event *), u8 format)
1535 {
1536 	evt->cmnd = NULL;
1537 	evt->sync_iu = NULL;
1538 	evt->eh_comp = NULL;
1539 	evt->crq.format = format;
1540 	if (format == IBMVFC_CMD_FORMAT)
1541 		evt->done = done;
1542 	else {
1543 		evt->_done = done;
1544 		evt->done = ibmvfc_locked_done;
1545 	}
1546 	evt->hwq = 0;
1547 }
1548 
1549 /**
1550  * ibmvfc_map_sg_list - Initialize scatterlist
1551  * @scmd:	scsi command struct
1552  * @nseg:	number of scatterlist segments
1553  * @md:	memory descriptor list to initialize
1554  **/
1555 static void ibmvfc_map_sg_list(struct scsi_cmnd *scmd, int nseg,
1556 			       struct srp_direct_buf *md)
1557 {
1558 	int i;
1559 	struct scatterlist *sg;
1560 
1561 	scsi_for_each_sg(scmd, sg, nseg, i) {
1562 		md[i].va = cpu_to_be64(sg_dma_address(sg));
1563 		md[i].len = cpu_to_be32(sg_dma_len(sg));
1564 		md[i].key = 0;
1565 	}
1566 }
1567 
1568 /**
1569  * ibmvfc_map_sg_data - Maps dma for a scatterlist and initializes descriptor fields
1570  * @scmd:		struct scsi_cmnd with the scatterlist
1571  * @evt:		ibmvfc event struct
1572  * @vfc_cmd:	vfc_cmd that contains the memory descriptor
1573  * @dev:		device for which to map dma memory
1574  *
1575  * Returns:
1576  *	0 on success / non-zero on failure
1577  **/
1578 static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd,
1579 			      struct ibmvfc_event *evt,
1580 			      struct ibmvfc_cmd *vfc_cmd, struct device *dev)
1581 {
1582 
1583 	int sg_mapped;
1584 	struct srp_direct_buf *data = &vfc_cmd->ioba;
1585 	struct ibmvfc_host *vhost = dev_get_drvdata(dev);
1586 	struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(evt->vhost, vfc_cmd);
1587 
1588 	if (cls3_error)
1589 		vfc_cmd->flags |= cpu_to_be16(IBMVFC_CLASS_3_ERR);
1590 
1591 	sg_mapped = scsi_dma_map(scmd);
1592 	if (!sg_mapped) {
1593 		vfc_cmd->flags |= cpu_to_be16(IBMVFC_NO_MEM_DESC);
1594 		return 0;
1595 	} else if (unlikely(sg_mapped < 0)) {
1596 		if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1597 			scmd_printk(KERN_ERR, scmd, "Failed to map DMA buffer for command\n");
1598 		return sg_mapped;
1599 	}
1600 
1601 	if (scmd->sc_data_direction == DMA_TO_DEVICE) {
1602 		vfc_cmd->flags |= cpu_to_be16(IBMVFC_WRITE);
1603 		iu->add_cdb_len |= IBMVFC_WRDATA;
1604 	} else {
1605 		vfc_cmd->flags |= cpu_to_be16(IBMVFC_READ);
1606 		iu->add_cdb_len |= IBMVFC_RDDATA;
1607 	}
1608 
1609 	if (sg_mapped == 1) {
1610 		ibmvfc_map_sg_list(scmd, sg_mapped, data);
1611 		return 0;
1612 	}
1613 
1614 	vfc_cmd->flags |= cpu_to_be16(IBMVFC_SCATTERLIST);
1615 
1616 	if (!evt->ext_list) {
1617 		evt->ext_list = dma_pool_alloc(vhost->sg_pool, GFP_ATOMIC,
1618 					       &evt->ext_list_token);
1619 
1620 		if (!evt->ext_list) {
1621 			scsi_dma_unmap(scmd);
1622 			if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1623 				scmd_printk(KERN_ERR, scmd, "Can't allocate memory for scatterlist\n");
1624 			return -ENOMEM;
1625 		}
1626 	}
1627 
1628 	ibmvfc_map_sg_list(scmd, sg_mapped, evt->ext_list);
1629 
1630 	data->va = cpu_to_be64(evt->ext_list_token);
1631 	data->len = cpu_to_be32(sg_mapped * sizeof(struct srp_direct_buf));
1632 	data->key = 0;
1633 	return 0;
1634 }
1635 
1636 /**
1637  * ibmvfc_timeout - Internal command timeout handler
1638  * @t:	struct ibmvfc_event that timed out
1639  *
1640  * Called when an internally generated command times out
1641  **/
1642 static void ibmvfc_timeout(struct timer_list *t)
1643 {
1644 	struct ibmvfc_event *evt = from_timer(evt, t, timer);
1645 	struct ibmvfc_host *vhost = evt->vhost;
1646 	dev_err(vhost->dev, "Command timed out (%p). Resetting connection\n", evt);
1647 	ibmvfc_reset_host(vhost);
1648 }
1649 
1650 /**
1651  * ibmvfc_send_event - Transforms event to u64 array and calls send_crq()
1652  * @evt:		event to be sent
1653  * @vhost:		ibmvfc host struct
1654  * @timeout:	timeout in seconds - 0 means do not time command
1655  *
1656  * Returns the value returned from ibmvfc_send_crq(). (Zero for success)
1657  **/
1658 static int ibmvfc_send_event(struct ibmvfc_event *evt,
1659 			     struct ibmvfc_host *vhost, unsigned long timeout)
1660 {
1661 	__be64 *crq_as_u64 = (__be64 *) &evt->crq;
1662 	unsigned long flags;
1663 	int rc;
1664 
1665 	/* Copy the IU into the transfer area */
1666 	*evt->xfer_iu = evt->iu;
1667 	if (evt->crq.format == IBMVFC_CMD_FORMAT)
1668 		evt->xfer_iu->cmd.tag = cpu_to_be64((u64)evt);
1669 	else if (evt->crq.format == IBMVFC_MAD_FORMAT)
1670 		evt->xfer_iu->mad_common.tag = cpu_to_be64((u64)evt);
1671 	else
1672 		BUG();
1673 
1674 	timer_setup(&evt->timer, ibmvfc_timeout, 0);
1675 
1676 	if (timeout) {
1677 		evt->timer.expires = jiffies + (timeout * HZ);
1678 		add_timer(&evt->timer);
1679 	}
1680 
1681 	spin_lock_irqsave(&evt->queue->l_lock, flags);
1682 	list_add_tail(&evt->queue_list, &evt->queue->sent);
1683 
1684 	mb();
1685 
1686 	if (evt->queue->fmt == IBMVFC_SUB_CRQ_FMT)
1687 		rc = ibmvfc_send_sub_crq(vhost,
1688 					 evt->queue->vios_cookie,
1689 					 be64_to_cpu(crq_as_u64[0]),
1690 					 be64_to_cpu(crq_as_u64[1]),
1691 					 0, 0);
1692 	else
1693 		rc = ibmvfc_send_crq(vhost, be64_to_cpu(crq_as_u64[0]),
1694 				     be64_to_cpu(crq_as_u64[1]));
1695 
1696 	if (rc) {
1697 		list_del(&evt->queue_list);
1698 		spin_unlock_irqrestore(&evt->queue->l_lock, flags);
1699 		del_timer(&evt->timer);
1700 
1701 		/* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY.
1702 		 * Firmware will send a CRQ with a transport event (0xFF) to
1703 		 * tell this client what has happened to the transport. This
1704 		 * will be handled in ibmvfc_handle_crq()
1705 		 */
1706 		if (rc == H_CLOSED) {
1707 			if (printk_ratelimit())
1708 				dev_warn(vhost->dev, "Send warning. Receive queue closed, will retry.\n");
1709 			if (evt->cmnd)
1710 				scsi_dma_unmap(evt->cmnd);
1711 			ibmvfc_free_event(evt);
1712 			return SCSI_MLQUEUE_HOST_BUSY;
1713 		}
1714 
1715 		dev_err(vhost->dev, "Send error (rc=%d)\n", rc);
1716 		if (evt->cmnd) {
1717 			evt->cmnd->result = DID_ERROR << 16;
1718 			evt->done = ibmvfc_scsi_eh_done;
1719 		} else
1720 			evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_CRQ_ERROR);
1721 
1722 		evt->done(evt);
1723 	} else {
1724 		spin_unlock_irqrestore(&evt->queue->l_lock, flags);
1725 		ibmvfc_trc_start(evt);
1726 	}
1727 
1728 	return 0;
1729 }
1730 
1731 /**
1732  * ibmvfc_log_error - Log an error for the failed command if appropriate
1733  * @evt:	ibmvfc event to log
1734  *
1735  **/
1736 static void ibmvfc_log_error(struct ibmvfc_event *evt)
1737 {
1738 	struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
1739 	struct ibmvfc_host *vhost = evt->vhost;
1740 	struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
1741 	struct scsi_cmnd *cmnd = evt->cmnd;
1742 	const char *err = unknown_error;
1743 	int index = ibmvfc_get_err_index(be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error));
1744 	int logerr = 0;
1745 	int rsp_code = 0;
1746 
1747 	if (index >= 0) {
1748 		logerr = cmd_status[index].log;
1749 		err = cmd_status[index].name;
1750 	}
1751 
1752 	if (!logerr && (vhost->log_level <= (IBMVFC_DEFAULT_LOG_LEVEL + 1)))
1753 		return;
1754 
1755 	if (rsp->flags & FCP_RSP_LEN_VALID)
1756 		rsp_code = rsp->data.info.rsp_code;
1757 
1758 	scmd_printk(KERN_ERR, cmnd, "Command (%02X) : %s (%x:%x) "
1759 		    "flags: %x fcp_rsp: %x, resid=%d, scsi_status: %x\n",
1760 		    cmnd->cmnd[0], err, be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error),
1761 		    rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status);
1762 }
1763 
1764 /**
1765  * ibmvfc_relogin - Log back into the specified device
1766  * @sdev:	scsi device struct
1767  *
1768  **/
1769 static void ibmvfc_relogin(struct scsi_device *sdev)
1770 {
1771 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
1772 	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1773 	struct ibmvfc_target *tgt;
1774 	unsigned long flags;
1775 
1776 	spin_lock_irqsave(vhost->host->host_lock, flags);
1777 	list_for_each_entry(tgt, &vhost->targets, queue) {
1778 		if (rport == tgt->rport) {
1779 			ibmvfc_del_tgt(tgt);
1780 			break;
1781 		}
1782 	}
1783 
1784 	ibmvfc_reinit_host(vhost);
1785 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
1786 }
1787 
1788 /**
1789  * ibmvfc_scsi_done - Handle responses from commands
1790  * @evt:	ibmvfc event to be handled
1791  *
1792  * Used as a callback when sending scsi cmds.
1793  **/
1794 static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
1795 {
1796 	struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
1797 	struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(evt->vhost, vfc_cmd);
1798 	struct scsi_cmnd *cmnd = evt->cmnd;
1799 	u32 rsp_len = 0;
1800 	u32 sense_len = be32_to_cpu(rsp->fcp_sense_len);
1801 
1802 	if (cmnd) {
1803 		if (be16_to_cpu(vfc_cmd->response_flags) & IBMVFC_ADAPTER_RESID_VALID)
1804 			scsi_set_resid(cmnd, be32_to_cpu(vfc_cmd->adapter_resid));
1805 		else if (rsp->flags & FCP_RESID_UNDER)
1806 			scsi_set_resid(cmnd, be32_to_cpu(rsp->fcp_resid));
1807 		else
1808 			scsi_set_resid(cmnd, 0);
1809 
1810 		if (vfc_cmd->status) {
1811 			cmnd->result = ibmvfc_get_err_result(evt->vhost, vfc_cmd);
1812 
1813 			if (rsp->flags & FCP_RSP_LEN_VALID)
1814 				rsp_len = be32_to_cpu(rsp->fcp_rsp_len);
1815 			if ((sense_len + rsp_len) > SCSI_SENSE_BUFFERSIZE)
1816 				sense_len = SCSI_SENSE_BUFFERSIZE - rsp_len;
1817 			if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8)
1818 				memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len);
1819 			if ((be16_to_cpu(vfc_cmd->status) & IBMVFC_VIOS_FAILURE) &&
1820 			    (be16_to_cpu(vfc_cmd->error) == IBMVFC_PLOGI_REQUIRED))
1821 				ibmvfc_relogin(cmnd->device);
1822 
1823 			if (!cmnd->result && (!scsi_get_resid(cmnd) || (rsp->flags & FCP_RESID_OVER)))
1824 				cmnd->result = (DID_ERROR << 16);
1825 
1826 			ibmvfc_log_error(evt);
1827 		}
1828 
1829 		if (!cmnd->result &&
1830 		    (scsi_bufflen(cmnd) - scsi_get_resid(cmnd) < cmnd->underflow))
1831 			cmnd->result = (DID_ERROR << 16);
1832 
1833 		scsi_dma_unmap(cmnd);
1834 		cmnd->scsi_done(cmnd);
1835 	}
1836 
1837 	ibmvfc_free_event(evt);
1838 }
1839 
1840 /**
1841  * ibmvfc_host_chkready - Check if the host can accept commands
1842  * @vhost:	 struct ibmvfc host
1843  *
1844  * Returns:
1845  *	1 if host can accept command / 0 if not
1846  **/
1847 static inline int ibmvfc_host_chkready(struct ibmvfc_host *vhost)
1848 {
1849 	int result = 0;
1850 
1851 	switch (vhost->state) {
1852 	case IBMVFC_LINK_DEAD:
1853 	case IBMVFC_HOST_OFFLINE:
1854 		result = DID_NO_CONNECT << 16;
1855 		break;
1856 	case IBMVFC_NO_CRQ:
1857 	case IBMVFC_INITIALIZING:
1858 	case IBMVFC_HALTED:
1859 	case IBMVFC_LINK_DOWN:
1860 		result = DID_REQUEUE << 16;
1861 		break;
1862 	case IBMVFC_ACTIVE:
1863 		result = 0;
1864 		break;
1865 	}
1866 
1867 	return result;
1868 }
1869 
1870 static struct ibmvfc_cmd *ibmvfc_init_vfc_cmd(struct ibmvfc_event *evt, struct scsi_device *sdev)
1871 {
1872 	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1873 	struct ibmvfc_host *vhost = evt->vhost;
1874 	struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd;
1875 	struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
1876 	struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
1877 	size_t offset;
1878 
1879 	memset(vfc_cmd, 0, sizeof(*vfc_cmd));
1880 	if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
1881 		offset = offsetof(struct ibmvfc_cmd, v2.rsp);
1882 		vfc_cmd->target_wwpn = cpu_to_be64(rport->port_name);
1883 	} else
1884 		offset = offsetof(struct ibmvfc_cmd, v1.rsp);
1885 	vfc_cmd->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offset);
1886 	vfc_cmd->resp.len = cpu_to_be32(sizeof(*rsp));
1887 	vfc_cmd->frame_type = cpu_to_be32(IBMVFC_SCSI_FCP_TYPE);
1888 	vfc_cmd->payload_len = cpu_to_be32(sizeof(*iu));
1889 	vfc_cmd->resp_len = cpu_to_be32(sizeof(*rsp));
1890 	vfc_cmd->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
1891 	vfc_cmd->tgt_scsi_id = cpu_to_be64(rport->port_id);
1892 	int_to_scsilun(sdev->lun, &iu->lun);
1893 
1894 	return vfc_cmd;
1895 }
1896 
1897 /**
1898  * ibmvfc_queuecommand - The queuecommand function of the scsi template
1899  * @shost:	scsi host struct
1900  * @cmnd:	struct scsi_cmnd to be executed
1901  *
1902  * Returns:
1903  *	0 on success / other on failure
1904  **/
1905 static int ibmvfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
1906 {
1907 	struct ibmvfc_host *vhost = shost_priv(shost);
1908 	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
1909 	struct ibmvfc_cmd *vfc_cmd;
1910 	struct ibmvfc_fcp_cmd_iu *iu;
1911 	struct ibmvfc_event *evt;
1912 	u32 tag_and_hwq = blk_mq_unique_tag(cmnd->request);
1913 	u16 hwq = blk_mq_unique_tag_to_hwq(tag_and_hwq);
1914 	u16 scsi_channel;
1915 	int rc;
1916 
1917 	if (unlikely((rc = fc_remote_port_chkready(rport))) ||
1918 	    unlikely((rc = ibmvfc_host_chkready(vhost)))) {
1919 		cmnd->result = rc;
1920 		cmnd->scsi_done(cmnd);
1921 		return 0;
1922 	}
1923 
1924 	cmnd->result = (DID_OK << 16);
1925 	if (vhost->using_channels) {
1926 		scsi_channel = hwq % vhost->scsi_scrqs.active_queues;
1927 		evt = ibmvfc_get_event(&vhost->scsi_scrqs.scrqs[scsi_channel]);
1928 		evt->hwq = hwq % vhost->scsi_scrqs.active_queues;
1929 	} else
1930 		evt = ibmvfc_get_event(&vhost->crq);
1931 
1932 	ibmvfc_init_event(evt, ibmvfc_scsi_done, IBMVFC_CMD_FORMAT);
1933 	evt->cmnd = cmnd;
1934 
1935 	vfc_cmd = ibmvfc_init_vfc_cmd(evt, cmnd->device);
1936 	iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
1937 
1938 	iu->xfer_len = cpu_to_be32(scsi_bufflen(cmnd));
1939 	memcpy(iu->cdb, cmnd->cmnd, cmnd->cmd_len);
1940 
1941 	if (cmnd->flags & SCMD_TAGGED) {
1942 		vfc_cmd->task_tag = cpu_to_be64(cmnd->tag);
1943 		iu->pri_task_attr = IBMVFC_SIMPLE_TASK;
1944 	}
1945 
1946 	vfc_cmd->correlation = cpu_to_be64((u64)evt);
1947 
1948 	if (likely(!(rc = ibmvfc_map_sg_data(cmnd, evt, vfc_cmd, vhost->dev))))
1949 		return ibmvfc_send_event(evt, vhost, 0);
1950 
1951 	ibmvfc_free_event(evt);
1952 	if (rc == -ENOMEM)
1953 		return SCSI_MLQUEUE_HOST_BUSY;
1954 
1955 	if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1956 		scmd_printk(KERN_ERR, cmnd,
1957 			    "Failed to map DMA buffer for command. rc=%d\n", rc);
1958 
1959 	cmnd->result = DID_ERROR << 16;
1960 	cmnd->scsi_done(cmnd);
1961 	return 0;
1962 }
1963 
1964 /**
1965  * ibmvfc_sync_completion - Signal that a synchronous command has completed
1966  * @evt:	ibmvfc event struct
1967  *
1968  **/
1969 static void ibmvfc_sync_completion(struct ibmvfc_event *evt)
1970 {
1971 	/* copy the response back */
1972 	if (evt->sync_iu)
1973 		*evt->sync_iu = *evt->xfer_iu;
1974 
1975 	complete(&evt->comp);
1976 }
1977 
1978 /**
1979  * ibmvfc_bsg_timeout_done - Completion handler for cancelling BSG commands
1980  * @evt:	struct ibmvfc_event
1981  *
1982  **/
1983 static void ibmvfc_bsg_timeout_done(struct ibmvfc_event *evt)
1984 {
1985 	struct ibmvfc_host *vhost = evt->vhost;
1986 
1987 	ibmvfc_free_event(evt);
1988 	vhost->aborting_passthru = 0;
1989 	dev_info(vhost->dev, "Passthru command cancelled\n");
1990 }
1991 
1992 /**
1993  * ibmvfc_bsg_timeout - Handle a BSG timeout
1994  * @job:	struct bsg_job that timed out
1995  *
1996  * Returns:
1997  *	0 on success / other on failure
1998  **/
1999 static int ibmvfc_bsg_timeout(struct bsg_job *job)
2000 {
2001 	struct ibmvfc_host *vhost = shost_priv(fc_bsg_to_shost(job));
2002 	unsigned long port_id = (unsigned long)job->dd_data;
2003 	struct ibmvfc_event *evt;
2004 	struct ibmvfc_tmf *tmf;
2005 	unsigned long flags;
2006 	int rc;
2007 
2008 	ENTER;
2009 	spin_lock_irqsave(vhost->host->host_lock, flags);
2010 	if (vhost->aborting_passthru || vhost->state != IBMVFC_ACTIVE) {
2011 		__ibmvfc_reset_host(vhost);
2012 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
2013 		return 0;
2014 	}
2015 
2016 	vhost->aborting_passthru = 1;
2017 	evt = ibmvfc_get_event(&vhost->crq);
2018 	ibmvfc_init_event(evt, ibmvfc_bsg_timeout_done, IBMVFC_MAD_FORMAT);
2019 
2020 	tmf = &evt->iu.tmf;
2021 	memset(tmf, 0, sizeof(*tmf));
2022 	tmf->common.version = cpu_to_be32(1);
2023 	tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
2024 	tmf->common.length = cpu_to_be16(sizeof(*tmf));
2025 	tmf->scsi_id = cpu_to_be64(port_id);
2026 	tmf->cancel_key = cpu_to_be32(IBMVFC_PASSTHRU_CANCEL_KEY);
2027 	tmf->my_cancel_key = cpu_to_be32(IBMVFC_INTERNAL_CANCEL_KEY);
2028 	rc = ibmvfc_send_event(evt, vhost, default_timeout);
2029 
2030 	if (rc != 0) {
2031 		vhost->aborting_passthru = 0;
2032 		dev_err(vhost->dev, "Failed to send cancel event. rc=%d\n", rc);
2033 		rc = -EIO;
2034 	} else
2035 		dev_info(vhost->dev, "Cancelling passthru command to port id 0x%lx\n",
2036 			 port_id);
2037 
2038 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2039 
2040 	LEAVE;
2041 	return rc;
2042 }
2043 
2044 /**
2045  * ibmvfc_bsg_plogi - PLOGI into a target to handle a BSG command
2046  * @vhost:		struct ibmvfc_host to send command
2047  * @port_id:	port ID to send command
2048  *
2049  * Returns:
2050  *	0 on success / other on failure
2051  **/
2052 static int ibmvfc_bsg_plogi(struct ibmvfc_host *vhost, unsigned int port_id)
2053 {
2054 	struct ibmvfc_port_login *plogi;
2055 	struct ibmvfc_target *tgt;
2056 	struct ibmvfc_event *evt;
2057 	union ibmvfc_iu rsp_iu;
2058 	unsigned long flags;
2059 	int rc = 0, issue_login = 1;
2060 
2061 	ENTER;
2062 	spin_lock_irqsave(vhost->host->host_lock, flags);
2063 	list_for_each_entry(tgt, &vhost->targets, queue) {
2064 		if (tgt->scsi_id == port_id) {
2065 			issue_login = 0;
2066 			break;
2067 		}
2068 	}
2069 
2070 	if (!issue_login)
2071 		goto unlock_out;
2072 	if (unlikely((rc = ibmvfc_host_chkready(vhost))))
2073 		goto unlock_out;
2074 
2075 	evt = ibmvfc_get_event(&vhost->crq);
2076 	ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
2077 	plogi = &evt->iu.plogi;
2078 	memset(plogi, 0, sizeof(*plogi));
2079 	plogi->common.version = cpu_to_be32(1);
2080 	plogi->common.opcode = cpu_to_be32(IBMVFC_PORT_LOGIN);
2081 	plogi->common.length = cpu_to_be16(sizeof(*plogi));
2082 	plogi->scsi_id = cpu_to_be64(port_id);
2083 	evt->sync_iu = &rsp_iu;
2084 	init_completion(&evt->comp);
2085 
2086 	rc = ibmvfc_send_event(evt, vhost, default_timeout);
2087 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2088 
2089 	if (rc)
2090 		return -EIO;
2091 
2092 	wait_for_completion(&evt->comp);
2093 
2094 	if (rsp_iu.plogi.common.status)
2095 		rc = -EIO;
2096 
2097 	spin_lock_irqsave(vhost->host->host_lock, flags);
2098 	ibmvfc_free_event(evt);
2099 unlock_out:
2100 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2101 	LEAVE;
2102 	return rc;
2103 }
2104 
2105 /**
2106  * ibmvfc_bsg_request - Handle a BSG request
2107  * @job:	struct bsg_job to be executed
2108  *
2109  * Returns:
2110  *	0 on success / other on failure
2111  **/
2112 static int ibmvfc_bsg_request(struct bsg_job *job)
2113 {
2114 	struct ibmvfc_host *vhost = shost_priv(fc_bsg_to_shost(job));
2115 	struct fc_rport *rport = fc_bsg_to_rport(job);
2116 	struct ibmvfc_passthru_mad *mad;
2117 	struct ibmvfc_event *evt;
2118 	union ibmvfc_iu rsp_iu;
2119 	unsigned long flags, port_id = -1;
2120 	struct fc_bsg_request *bsg_request = job->request;
2121 	struct fc_bsg_reply *bsg_reply = job->reply;
2122 	unsigned int code = bsg_request->msgcode;
2123 	int rc = 0, req_seg, rsp_seg, issue_login = 0;
2124 	u32 fc_flags, rsp_len;
2125 
2126 	ENTER;
2127 	bsg_reply->reply_payload_rcv_len = 0;
2128 	if (rport)
2129 		port_id = rport->port_id;
2130 
2131 	switch (code) {
2132 	case FC_BSG_HST_ELS_NOLOGIN:
2133 		port_id = (bsg_request->rqst_data.h_els.port_id[0] << 16) |
2134 			(bsg_request->rqst_data.h_els.port_id[1] << 8) |
2135 			bsg_request->rqst_data.h_els.port_id[2];
2136 		fallthrough;
2137 	case FC_BSG_RPT_ELS:
2138 		fc_flags = IBMVFC_FC_ELS;
2139 		break;
2140 	case FC_BSG_HST_CT:
2141 		issue_login = 1;
2142 		port_id = (bsg_request->rqst_data.h_ct.port_id[0] << 16) |
2143 			(bsg_request->rqst_data.h_ct.port_id[1] << 8) |
2144 			bsg_request->rqst_data.h_ct.port_id[2];
2145 		fallthrough;
2146 	case FC_BSG_RPT_CT:
2147 		fc_flags = IBMVFC_FC_CT_IU;
2148 		break;
2149 	default:
2150 		return -ENOTSUPP;
2151 	}
2152 
2153 	if (port_id == -1)
2154 		return -EINVAL;
2155 	if (!mutex_trylock(&vhost->passthru_mutex))
2156 		return -EBUSY;
2157 
2158 	job->dd_data = (void *)port_id;
2159 	req_seg = dma_map_sg(vhost->dev, job->request_payload.sg_list,
2160 			     job->request_payload.sg_cnt, DMA_TO_DEVICE);
2161 
2162 	if (!req_seg) {
2163 		mutex_unlock(&vhost->passthru_mutex);
2164 		return -ENOMEM;
2165 	}
2166 
2167 	rsp_seg = dma_map_sg(vhost->dev, job->reply_payload.sg_list,
2168 			     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2169 
2170 	if (!rsp_seg) {
2171 		dma_unmap_sg(vhost->dev, job->request_payload.sg_list,
2172 			     job->request_payload.sg_cnt, DMA_TO_DEVICE);
2173 		mutex_unlock(&vhost->passthru_mutex);
2174 		return -ENOMEM;
2175 	}
2176 
2177 	if (req_seg > 1 || rsp_seg > 1) {
2178 		rc = -EINVAL;
2179 		goto out;
2180 	}
2181 
2182 	if (issue_login)
2183 		rc = ibmvfc_bsg_plogi(vhost, port_id);
2184 
2185 	spin_lock_irqsave(vhost->host->host_lock, flags);
2186 
2187 	if (unlikely(rc || (rport && (rc = fc_remote_port_chkready(rport)))) ||
2188 	    unlikely((rc = ibmvfc_host_chkready(vhost)))) {
2189 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
2190 		goto out;
2191 	}
2192 
2193 	evt = ibmvfc_get_event(&vhost->crq);
2194 	ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
2195 	mad = &evt->iu.passthru;
2196 
2197 	memset(mad, 0, sizeof(*mad));
2198 	mad->common.version = cpu_to_be32(1);
2199 	mad->common.opcode = cpu_to_be32(IBMVFC_PASSTHRU);
2200 	mad->common.length = cpu_to_be16(sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu));
2201 
2202 	mad->cmd_ioba.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) +
2203 		offsetof(struct ibmvfc_passthru_mad, iu));
2204 	mad->cmd_ioba.len = cpu_to_be32(sizeof(mad->iu));
2205 
2206 	mad->iu.cmd_len = cpu_to_be32(job->request_payload.payload_len);
2207 	mad->iu.rsp_len = cpu_to_be32(job->reply_payload.payload_len);
2208 	mad->iu.flags = cpu_to_be32(fc_flags);
2209 	mad->iu.cancel_key = cpu_to_be32(IBMVFC_PASSTHRU_CANCEL_KEY);
2210 
2211 	mad->iu.cmd.va = cpu_to_be64(sg_dma_address(job->request_payload.sg_list));
2212 	mad->iu.cmd.len = cpu_to_be32(sg_dma_len(job->request_payload.sg_list));
2213 	mad->iu.rsp.va = cpu_to_be64(sg_dma_address(job->reply_payload.sg_list));
2214 	mad->iu.rsp.len = cpu_to_be32(sg_dma_len(job->reply_payload.sg_list));
2215 	mad->iu.scsi_id = cpu_to_be64(port_id);
2216 	mad->iu.tag = cpu_to_be64((u64)evt);
2217 	rsp_len = be32_to_cpu(mad->iu.rsp.len);
2218 
2219 	evt->sync_iu = &rsp_iu;
2220 	init_completion(&evt->comp);
2221 	rc = ibmvfc_send_event(evt, vhost, 0);
2222 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2223 
2224 	if (rc) {
2225 		rc = -EIO;
2226 		goto out;
2227 	}
2228 
2229 	wait_for_completion(&evt->comp);
2230 
2231 	if (rsp_iu.passthru.common.status)
2232 		rc = -EIO;
2233 	else
2234 		bsg_reply->reply_payload_rcv_len = rsp_len;
2235 
2236 	spin_lock_irqsave(vhost->host->host_lock, flags);
2237 	ibmvfc_free_event(evt);
2238 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2239 	bsg_reply->result = rc;
2240 	bsg_job_done(job, bsg_reply->result,
2241 		       bsg_reply->reply_payload_rcv_len);
2242 	rc = 0;
2243 out:
2244 	dma_unmap_sg(vhost->dev, job->request_payload.sg_list,
2245 		     job->request_payload.sg_cnt, DMA_TO_DEVICE);
2246 	dma_unmap_sg(vhost->dev, job->reply_payload.sg_list,
2247 		     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2248 	mutex_unlock(&vhost->passthru_mutex);
2249 	LEAVE;
2250 	return rc;
2251 }
2252 
2253 /**
2254  * ibmvfc_reset_device - Reset the device with the specified reset type
2255  * @sdev:	scsi device to reset
2256  * @type:	reset type
2257  * @desc:	reset type description for log messages
2258  *
2259  * Returns:
2260  *	0 on success / other on failure
2261  **/
2262 static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
2263 {
2264 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2265 	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2266 	struct ibmvfc_cmd *tmf;
2267 	struct ibmvfc_event *evt = NULL;
2268 	union ibmvfc_iu rsp_iu;
2269 	struct ibmvfc_fcp_cmd_iu *iu;
2270 	struct ibmvfc_fcp_rsp *fc_rsp = ibmvfc_get_fcp_rsp(vhost, &rsp_iu.cmd);
2271 	int rsp_rc = -EBUSY;
2272 	unsigned long flags;
2273 	int rsp_code = 0;
2274 
2275 	spin_lock_irqsave(vhost->host->host_lock, flags);
2276 	if (vhost->state == IBMVFC_ACTIVE) {
2277 		if (vhost->using_channels)
2278 			evt = ibmvfc_get_event(&vhost->scsi_scrqs.scrqs[0]);
2279 		else
2280 			evt = ibmvfc_get_event(&vhost->crq);
2281 
2282 		ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
2283 		tmf = ibmvfc_init_vfc_cmd(evt, sdev);
2284 		iu = ibmvfc_get_fcp_iu(vhost, tmf);
2285 
2286 		tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF));
2287 		if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
2288 			tmf->target_wwpn = cpu_to_be64(rport->port_name);
2289 		iu->tmf_flags = type;
2290 		evt->sync_iu = &rsp_iu;
2291 
2292 		init_completion(&evt->comp);
2293 		rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
2294 	}
2295 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2296 
2297 	if (rsp_rc != 0) {
2298 		sdev_printk(KERN_ERR, sdev, "Failed to send %s reset event. rc=%d\n",
2299 			    desc, rsp_rc);
2300 		return -EIO;
2301 	}
2302 
2303 	sdev_printk(KERN_INFO, sdev, "Resetting %s\n", desc);
2304 	wait_for_completion(&evt->comp);
2305 
2306 	if (rsp_iu.cmd.status)
2307 		rsp_code = ibmvfc_get_err_result(vhost, &rsp_iu.cmd);
2308 
2309 	if (rsp_code) {
2310 		if (fc_rsp->flags & FCP_RSP_LEN_VALID)
2311 			rsp_code = fc_rsp->data.info.rsp_code;
2312 
2313 		sdev_printk(KERN_ERR, sdev, "%s reset failed: %s (%x:%x) "
2314 			    "flags: %x fcp_rsp: %x, scsi_status: %x\n", desc,
2315 			    ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
2316 			    be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
2317 			    fc_rsp->scsi_status);
2318 		rsp_rc = -EIO;
2319 	} else
2320 		sdev_printk(KERN_INFO, sdev, "%s reset successful\n", desc);
2321 
2322 	spin_lock_irqsave(vhost->host->host_lock, flags);
2323 	ibmvfc_free_event(evt);
2324 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2325 	return rsp_rc;
2326 }
2327 
2328 /**
2329  * ibmvfc_match_rport - Match function for specified remote port
2330  * @evt:	ibmvfc event struct
2331  * @rport:	device to match
2332  *
2333  * Returns:
2334  *	1 if event matches rport / 0 if event does not match rport
2335  **/
2336 static int ibmvfc_match_rport(struct ibmvfc_event *evt, void *rport)
2337 {
2338 	struct fc_rport *cmd_rport;
2339 
2340 	if (evt->cmnd) {
2341 		cmd_rport = starget_to_rport(scsi_target(evt->cmnd->device));
2342 		if (cmd_rport == rport)
2343 			return 1;
2344 	}
2345 	return 0;
2346 }
2347 
2348 /**
2349  * ibmvfc_match_target - Match function for specified target
2350  * @evt:	ibmvfc event struct
2351  * @device:	device to match (starget)
2352  *
2353  * Returns:
2354  *	1 if event matches starget / 0 if event does not match starget
2355  **/
2356 static int ibmvfc_match_target(struct ibmvfc_event *evt, void *device)
2357 {
2358 	if (evt->cmnd && scsi_target(evt->cmnd->device) == device)
2359 		return 1;
2360 	return 0;
2361 }
2362 
2363 /**
2364  * ibmvfc_match_lun - Match function for specified LUN
2365  * @evt:	ibmvfc event struct
2366  * @device:	device to match (sdev)
2367  *
2368  * Returns:
2369  *	1 if event matches sdev / 0 if event does not match sdev
2370  **/
2371 static int ibmvfc_match_lun(struct ibmvfc_event *evt, void *device)
2372 {
2373 	if (evt->cmnd && evt->cmnd->device == device)
2374 		return 1;
2375 	return 0;
2376 }
2377 
2378 /**
2379  * ibmvfc_event_is_free - Check if event is free or not
2380  * @evt:	ibmvfc event struct
2381  *
2382  * Returns:
2383  *	true / false
2384  **/
2385 static bool ibmvfc_event_is_free(struct ibmvfc_event *evt)
2386 {
2387 	struct ibmvfc_event *loop_evt;
2388 
2389 	list_for_each_entry(loop_evt, &evt->queue->free, queue_list)
2390 		if (loop_evt == evt)
2391 			return true;
2392 
2393 	return false;
2394 }
2395 
2396 /**
2397  * ibmvfc_wait_for_ops - Wait for ops to complete
2398  * @vhost:	ibmvfc host struct
2399  * @device:	device to match (starget or sdev)
2400  * @match:	match function
2401  *
2402  * Returns:
2403  *	SUCCESS / FAILED
2404  **/
2405 static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device,
2406 			       int (*match) (struct ibmvfc_event *, void *))
2407 {
2408 	struct ibmvfc_event *evt;
2409 	DECLARE_COMPLETION_ONSTACK(comp);
2410 	int wait, i, q_index, q_size;
2411 	unsigned long flags;
2412 	signed long timeout = IBMVFC_ABORT_WAIT_TIMEOUT * HZ;
2413 	struct ibmvfc_queue *queues;
2414 
2415 	ENTER;
2416 	if (vhost->mq_enabled && vhost->using_channels) {
2417 		queues = vhost->scsi_scrqs.scrqs;
2418 		q_size = vhost->scsi_scrqs.active_queues;
2419 	} else {
2420 		queues = &vhost->crq;
2421 		q_size = 1;
2422 	}
2423 
2424 	do {
2425 		wait = 0;
2426 		spin_lock_irqsave(vhost->host->host_lock, flags);
2427 		for (q_index = 0; q_index < q_size; q_index++) {
2428 			spin_lock(&queues[q_index].l_lock);
2429 			for (i = 0; i < queues[q_index].evt_pool.size; i++) {
2430 				evt = &queues[q_index].evt_pool.events[i];
2431 				if (!ibmvfc_event_is_free(evt)) {
2432 					if (match(evt, device)) {
2433 						evt->eh_comp = &comp;
2434 						wait++;
2435 					}
2436 				}
2437 			}
2438 			spin_unlock(&queues[q_index].l_lock);
2439 		}
2440 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
2441 
2442 		if (wait) {
2443 			timeout = wait_for_completion_timeout(&comp, timeout);
2444 
2445 			if (!timeout) {
2446 				wait = 0;
2447 				spin_lock_irqsave(vhost->host->host_lock, flags);
2448 				for (q_index = 0; q_index < q_size; q_index++) {
2449 					spin_lock(&queues[q_index].l_lock);
2450 					for (i = 0; i < queues[q_index].evt_pool.size; i++) {
2451 						evt = &queues[q_index].evt_pool.events[i];
2452 						if (!ibmvfc_event_is_free(evt)) {
2453 							if (match(evt, device)) {
2454 								evt->eh_comp = NULL;
2455 								wait++;
2456 							}
2457 						}
2458 					}
2459 					spin_unlock(&queues[q_index].l_lock);
2460 				}
2461 				spin_unlock_irqrestore(vhost->host->host_lock, flags);
2462 				if (wait)
2463 					dev_err(vhost->dev, "Timed out waiting for aborted commands\n");
2464 				LEAVE;
2465 				return wait ? FAILED : SUCCESS;
2466 			}
2467 		}
2468 	} while (wait);
2469 
2470 	LEAVE;
2471 	return SUCCESS;
2472 }
2473 
2474 static struct ibmvfc_event *ibmvfc_init_tmf(struct ibmvfc_queue *queue,
2475 					    struct scsi_device *sdev,
2476 					    int type)
2477 {
2478 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2479 	struct scsi_target *starget = scsi_target(sdev);
2480 	struct fc_rport *rport = starget_to_rport(starget);
2481 	struct ibmvfc_event *evt;
2482 	struct ibmvfc_tmf *tmf;
2483 
2484 	evt = ibmvfc_get_event(queue);
2485 	ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
2486 
2487 	tmf = &evt->iu.tmf;
2488 	memset(tmf, 0, sizeof(*tmf));
2489 	if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
2490 		tmf->common.version = cpu_to_be32(2);
2491 		tmf->target_wwpn = cpu_to_be64(rport->port_name);
2492 	} else {
2493 		tmf->common.version = cpu_to_be32(1);
2494 	}
2495 	tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
2496 	tmf->common.length = cpu_to_be16(sizeof(*tmf));
2497 	tmf->scsi_id = cpu_to_be64(rport->port_id);
2498 	int_to_scsilun(sdev->lun, &tmf->lun);
2499 	if (!ibmvfc_check_caps(vhost, IBMVFC_CAN_SUPPRESS_ABTS))
2500 		type &= ~IBMVFC_TMF_SUPPRESS_ABTS;
2501 	if (vhost->state == IBMVFC_ACTIVE)
2502 		tmf->flags = cpu_to_be32((type | IBMVFC_TMF_LUA_VALID));
2503 	else
2504 		tmf->flags = cpu_to_be32(((type & IBMVFC_TMF_SUPPRESS_ABTS) | IBMVFC_TMF_LUA_VALID));
2505 	tmf->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
2506 	tmf->my_cancel_key = cpu_to_be32((unsigned long)starget->hostdata);
2507 
2508 	init_completion(&evt->comp);
2509 
2510 	return evt;
2511 }
2512 
2513 static int ibmvfc_cancel_all_mq(struct scsi_device *sdev, int type)
2514 {
2515 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2516 	struct ibmvfc_event *evt, *found_evt, *temp;
2517 	struct ibmvfc_queue *queues = vhost->scsi_scrqs.scrqs;
2518 	unsigned long flags;
2519 	int num_hwq, i;
2520 	int fail = 0;
2521 	LIST_HEAD(cancelq);
2522 	u16 status;
2523 
2524 	ENTER;
2525 	spin_lock_irqsave(vhost->host->host_lock, flags);
2526 	num_hwq = vhost->scsi_scrqs.active_queues;
2527 	for (i = 0; i < num_hwq; i++) {
2528 		spin_lock(queues[i].q_lock);
2529 		spin_lock(&queues[i].l_lock);
2530 		found_evt = NULL;
2531 		list_for_each_entry(evt, &queues[i].sent, queue_list) {
2532 			if (evt->cmnd && evt->cmnd->device == sdev) {
2533 				found_evt = evt;
2534 				break;
2535 			}
2536 		}
2537 		spin_unlock(&queues[i].l_lock);
2538 
2539 		if (found_evt && vhost->logged_in) {
2540 			evt = ibmvfc_init_tmf(&queues[i], sdev, type);
2541 			evt->sync_iu = &queues[i].cancel_rsp;
2542 			ibmvfc_send_event(evt, vhost, default_timeout);
2543 			list_add_tail(&evt->cancel, &cancelq);
2544 		}
2545 
2546 		spin_unlock(queues[i].q_lock);
2547 	}
2548 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2549 
2550 	if (list_empty(&cancelq)) {
2551 		if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
2552 			sdev_printk(KERN_INFO, sdev, "No events found to cancel\n");
2553 		return 0;
2554 	}
2555 
2556 	sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
2557 
2558 	list_for_each_entry_safe(evt, temp, &cancelq, cancel) {
2559 		wait_for_completion(&evt->comp);
2560 		status = be16_to_cpu(evt->queue->cancel_rsp.mad_common.status);
2561 		list_del(&evt->cancel);
2562 		ibmvfc_free_event(evt);
2563 
2564 		if (status != IBMVFC_MAD_SUCCESS) {
2565 			sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
2566 			switch (status) {
2567 			case IBMVFC_MAD_DRIVER_FAILED:
2568 			case IBMVFC_MAD_CRQ_ERROR:
2569 			/* Host adapter most likely going through reset, return success to
2570 			 * the caller will wait for the command being cancelled to get returned
2571 			 */
2572 				break;
2573 			default:
2574 				fail = 1;
2575 				break;
2576 			}
2577 		}
2578 	}
2579 
2580 	if (fail)
2581 		return -EIO;
2582 
2583 	sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
2584 	LEAVE;
2585 	return 0;
2586 }
2587 
2588 static int ibmvfc_cancel_all_sq(struct scsi_device *sdev, int type)
2589 {
2590 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2591 	struct ibmvfc_event *evt, *found_evt;
2592 	union ibmvfc_iu rsp;
2593 	int rsp_rc = -EBUSY;
2594 	unsigned long flags;
2595 	u16 status;
2596 
2597 	ENTER;
2598 	found_evt = NULL;
2599 	spin_lock_irqsave(vhost->host->host_lock, flags);
2600 	spin_lock(&vhost->crq.l_lock);
2601 	list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
2602 		if (evt->cmnd && evt->cmnd->device == sdev) {
2603 			found_evt = evt;
2604 			break;
2605 		}
2606 	}
2607 	spin_unlock(&vhost->crq.l_lock);
2608 
2609 	if (!found_evt) {
2610 		if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
2611 			sdev_printk(KERN_INFO, sdev, "No events found to cancel\n");
2612 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
2613 		return 0;
2614 	}
2615 
2616 	if (vhost->logged_in) {
2617 		evt = ibmvfc_init_tmf(&vhost->crq, sdev, type);
2618 		evt->sync_iu = &rsp;
2619 		rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
2620 	}
2621 
2622 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2623 
2624 	if (rsp_rc != 0) {
2625 		sdev_printk(KERN_ERR, sdev, "Failed to send cancel event. rc=%d\n", rsp_rc);
2626 		/* If failure is received, the host adapter is most likely going
2627 		 through reset, return success so the caller will wait for the command
2628 		 being cancelled to get returned */
2629 		return 0;
2630 	}
2631 
2632 	sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
2633 
2634 	wait_for_completion(&evt->comp);
2635 	status = be16_to_cpu(rsp.mad_common.status);
2636 	spin_lock_irqsave(vhost->host->host_lock, flags);
2637 	ibmvfc_free_event(evt);
2638 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2639 
2640 	if (status != IBMVFC_MAD_SUCCESS) {
2641 		sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
2642 		switch (status) {
2643 		case IBMVFC_MAD_DRIVER_FAILED:
2644 		case IBMVFC_MAD_CRQ_ERROR:
2645 			/* Host adapter most likely going through reset, return success to
2646 			 the caller will wait for the command being cancelled to get returned */
2647 			return 0;
2648 		default:
2649 			return -EIO;
2650 		};
2651 	}
2652 
2653 	sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
2654 	return 0;
2655 }
2656 
2657 /**
2658  * ibmvfc_cancel_all - Cancel all outstanding commands to the device
2659  * @sdev:	scsi device to cancel commands
2660  * @type:	type of error recovery being performed
2661  *
2662  * This sends a cancel to the VIOS for the specified device. This does
2663  * NOT send any abort to the actual device. That must be done separately.
2664  *
2665  * Returns:
2666  *	0 on success / other on failure
2667  **/
2668 static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
2669 {
2670 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2671 
2672 	if (vhost->mq_enabled && vhost->using_channels)
2673 		return ibmvfc_cancel_all_mq(sdev, type);
2674 	else
2675 		return ibmvfc_cancel_all_sq(sdev, type);
2676 }
2677 
2678 /**
2679  * ibmvfc_match_key - Match function for specified cancel key
2680  * @evt:	ibmvfc event struct
2681  * @key:	cancel key to match
2682  *
2683  * Returns:
2684  *	1 if event matches key / 0 if event does not match key
2685  **/
2686 static int ibmvfc_match_key(struct ibmvfc_event *evt, void *key)
2687 {
2688 	unsigned long cancel_key = (unsigned long)key;
2689 
2690 	if (evt->crq.format == IBMVFC_CMD_FORMAT &&
2691 	    be32_to_cpu(evt->iu.cmd.cancel_key) == cancel_key)
2692 		return 1;
2693 	return 0;
2694 }
2695 
2696 /**
2697  * ibmvfc_match_evt - Match function for specified event
2698  * @evt:	ibmvfc event struct
2699  * @match:	event to match
2700  *
2701  * Returns:
2702  *	1 if event matches key / 0 if event does not match key
2703  **/
2704 static int ibmvfc_match_evt(struct ibmvfc_event *evt, void *match)
2705 {
2706 	if (evt == match)
2707 		return 1;
2708 	return 0;
2709 }
2710 
2711 /**
2712  * ibmvfc_abort_task_set - Abort outstanding commands to the device
2713  * @sdev:	scsi device to abort commands
2714  *
2715  * This sends an Abort Task Set to the VIOS for the specified device. This does
2716  * NOT send any cancel to the VIOS. That must be done separately.
2717  *
2718  * Returns:
2719  *	0 on success / other on failure
2720  **/
2721 static int ibmvfc_abort_task_set(struct scsi_device *sdev)
2722 {
2723 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2724 	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2725 	struct ibmvfc_cmd *tmf;
2726 	struct ibmvfc_event *evt, *found_evt;
2727 	union ibmvfc_iu rsp_iu;
2728 	struct ibmvfc_fcp_cmd_iu *iu;
2729 	struct ibmvfc_fcp_rsp *fc_rsp = ibmvfc_get_fcp_rsp(vhost, &rsp_iu.cmd);
2730 	int rc, rsp_rc = -EBUSY;
2731 	unsigned long flags, timeout = IBMVFC_ABORT_TIMEOUT;
2732 	int rsp_code = 0;
2733 
2734 	found_evt = NULL;
2735 	spin_lock_irqsave(vhost->host->host_lock, flags);
2736 	spin_lock(&vhost->crq.l_lock);
2737 	list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
2738 		if (evt->cmnd && evt->cmnd->device == sdev) {
2739 			found_evt = evt;
2740 			break;
2741 		}
2742 	}
2743 	spin_unlock(&vhost->crq.l_lock);
2744 
2745 	if (!found_evt) {
2746 		if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
2747 			sdev_printk(KERN_INFO, sdev, "No events found to abort\n");
2748 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
2749 		return 0;
2750 	}
2751 
2752 	if (vhost->state == IBMVFC_ACTIVE) {
2753 		evt = ibmvfc_get_event(&vhost->crq);
2754 		ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
2755 		tmf = ibmvfc_init_vfc_cmd(evt, sdev);
2756 		iu = ibmvfc_get_fcp_iu(vhost, tmf);
2757 
2758 		if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
2759 			tmf->target_wwpn = cpu_to_be64(rport->port_name);
2760 		iu->tmf_flags = IBMVFC_ABORT_TASK_SET;
2761 		tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF));
2762 		evt->sync_iu = &rsp_iu;
2763 
2764 		tmf->correlation = cpu_to_be64((u64)evt);
2765 
2766 		init_completion(&evt->comp);
2767 		rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
2768 	}
2769 
2770 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2771 
2772 	if (rsp_rc != 0) {
2773 		sdev_printk(KERN_ERR, sdev, "Failed to send abort. rc=%d\n", rsp_rc);
2774 		return -EIO;
2775 	}
2776 
2777 	sdev_printk(KERN_INFO, sdev, "Aborting outstanding commands\n");
2778 	timeout = wait_for_completion_timeout(&evt->comp, timeout);
2779 
2780 	if (!timeout) {
2781 		rc = ibmvfc_cancel_all(sdev, 0);
2782 		if (!rc) {
2783 			rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key);
2784 			if (rc == SUCCESS)
2785 				rc = 0;
2786 		}
2787 
2788 		if (rc) {
2789 			sdev_printk(KERN_INFO, sdev, "Cancel failed, resetting host\n");
2790 			ibmvfc_reset_host(vhost);
2791 			rsp_rc = -EIO;
2792 			rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key);
2793 
2794 			if (rc == SUCCESS)
2795 				rsp_rc = 0;
2796 
2797 			rc = ibmvfc_wait_for_ops(vhost, evt, ibmvfc_match_evt);
2798 			if (rc != SUCCESS) {
2799 				spin_lock_irqsave(vhost->host->host_lock, flags);
2800 				ibmvfc_hard_reset_host(vhost);
2801 				spin_unlock_irqrestore(vhost->host->host_lock, flags);
2802 				rsp_rc = 0;
2803 			}
2804 
2805 			goto out;
2806 		}
2807 	}
2808 
2809 	if (rsp_iu.cmd.status)
2810 		rsp_code = ibmvfc_get_err_result(vhost, &rsp_iu.cmd);
2811 
2812 	if (rsp_code) {
2813 		if (fc_rsp->flags & FCP_RSP_LEN_VALID)
2814 			rsp_code = fc_rsp->data.info.rsp_code;
2815 
2816 		sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) "
2817 			    "flags: %x fcp_rsp: %x, scsi_status: %x\n",
2818 			    ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
2819 			    be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
2820 			    fc_rsp->scsi_status);
2821 		rsp_rc = -EIO;
2822 	} else
2823 		sdev_printk(KERN_INFO, sdev, "Abort successful\n");
2824 
2825 out:
2826 	spin_lock_irqsave(vhost->host->host_lock, flags);
2827 	ibmvfc_free_event(evt);
2828 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2829 	return rsp_rc;
2830 }
2831 
2832 /**
2833  * ibmvfc_eh_abort_handler - Abort a command
2834  * @cmd:	scsi command to abort
2835  *
2836  * Returns:
2837  *	SUCCESS / FAST_IO_FAIL / FAILED
2838  **/
2839 static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
2840 {
2841 	struct scsi_device *sdev = cmd->device;
2842 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2843 	int cancel_rc, block_rc;
2844 	int rc = FAILED;
2845 
2846 	ENTER;
2847 	block_rc = fc_block_scsi_eh(cmd);
2848 	ibmvfc_wait_while_resetting(vhost);
2849 	if (block_rc != FAST_IO_FAIL) {
2850 		cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
2851 		ibmvfc_abort_task_set(sdev);
2852 	} else
2853 		cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2854 
2855 	if (!cancel_rc)
2856 		rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
2857 
2858 	if (block_rc == FAST_IO_FAIL && rc != FAILED)
2859 		rc = FAST_IO_FAIL;
2860 
2861 	LEAVE;
2862 	return rc;
2863 }
2864 
2865 /**
2866  * ibmvfc_eh_device_reset_handler - Reset a single LUN
2867  * @cmd:	scsi command struct
2868  *
2869  * Returns:
2870  *	SUCCESS / FAST_IO_FAIL / FAILED
2871  **/
2872 static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd)
2873 {
2874 	struct scsi_device *sdev = cmd->device;
2875 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2876 	int cancel_rc, block_rc, reset_rc = 0;
2877 	int rc = FAILED;
2878 
2879 	ENTER;
2880 	block_rc = fc_block_scsi_eh(cmd);
2881 	ibmvfc_wait_while_resetting(vhost);
2882 	if (block_rc != FAST_IO_FAIL) {
2883 		cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET);
2884 		reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN");
2885 	} else
2886 		cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2887 
2888 	if (!cancel_rc && !reset_rc)
2889 		rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
2890 
2891 	if (block_rc == FAST_IO_FAIL && rc != FAILED)
2892 		rc = FAST_IO_FAIL;
2893 
2894 	LEAVE;
2895 	return rc;
2896 }
2897 
2898 /**
2899  * ibmvfc_dev_cancel_all_noreset - Device iterated cancel all function
2900  * @sdev:	scsi device struct
2901  * @data:	return code
2902  *
2903  **/
2904 static void ibmvfc_dev_cancel_all_noreset(struct scsi_device *sdev, void *data)
2905 {
2906 	unsigned long *rc = data;
2907 	*rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2908 }
2909 
2910 /**
2911  * ibmvfc_dev_cancel_all_reset - Device iterated cancel all function
2912  * @sdev:	scsi device struct
2913  * @data:	return code
2914  *
2915  **/
2916 static void ibmvfc_dev_cancel_all_reset(struct scsi_device *sdev, void *data)
2917 {
2918 	unsigned long *rc = data;
2919 	*rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_TGT_RESET);
2920 }
2921 
2922 /**
2923  * ibmvfc_eh_target_reset_handler - Reset the target
2924  * @cmd:	scsi command struct
2925  *
2926  * Returns:
2927  *	SUCCESS / FAST_IO_FAIL / FAILED
2928  **/
2929 static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
2930 {
2931 	struct scsi_device *sdev = cmd->device;
2932 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2933 	struct scsi_target *starget = scsi_target(sdev);
2934 	int block_rc;
2935 	int reset_rc = 0;
2936 	int rc = FAILED;
2937 	unsigned long cancel_rc = 0;
2938 
2939 	ENTER;
2940 	block_rc = fc_block_scsi_eh(cmd);
2941 	ibmvfc_wait_while_resetting(vhost);
2942 	if (block_rc != FAST_IO_FAIL) {
2943 		starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset);
2944 		reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target");
2945 	} else
2946 		starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_noreset);
2947 
2948 	if (!cancel_rc && !reset_rc)
2949 		rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target);
2950 
2951 	if (block_rc == FAST_IO_FAIL && rc != FAILED)
2952 		rc = FAST_IO_FAIL;
2953 
2954 	LEAVE;
2955 	return rc;
2956 }
2957 
2958 /**
2959  * ibmvfc_eh_host_reset_handler - Reset the connection to the server
2960  * @cmd:	struct scsi_cmnd having problems
2961  *
2962  **/
2963 static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd)
2964 {
2965 	int rc;
2966 	struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
2967 
2968 	dev_err(vhost->dev, "Resetting connection due to error recovery\n");
2969 	rc = ibmvfc_issue_fc_host_lip(vhost->host);
2970 
2971 	return rc ? FAILED : SUCCESS;
2972 }
2973 
2974 /**
2975  * ibmvfc_terminate_rport_io - Terminate all pending I/O to the rport.
2976  * @rport:		rport struct
2977  *
2978  * Return value:
2979  * 	none
2980  **/
2981 static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
2982 {
2983 	struct Scsi_Host *shost = rport_to_shost(rport);
2984 	struct ibmvfc_host *vhost = shost_priv(shost);
2985 	struct fc_rport *dev_rport;
2986 	struct scsi_device *sdev;
2987 	struct ibmvfc_target *tgt;
2988 	unsigned long rc, flags;
2989 	unsigned int found;
2990 
2991 	ENTER;
2992 	shost_for_each_device(sdev, shost) {
2993 		dev_rport = starget_to_rport(scsi_target(sdev));
2994 		if (dev_rport != rport)
2995 			continue;
2996 		ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2997 	}
2998 
2999 	rc = ibmvfc_wait_for_ops(vhost, rport, ibmvfc_match_rport);
3000 
3001 	if (rc == FAILED)
3002 		ibmvfc_issue_fc_host_lip(shost);
3003 
3004 	spin_lock_irqsave(shost->host_lock, flags);
3005 	found = 0;
3006 	list_for_each_entry(tgt, &vhost->targets, queue) {
3007 		if (tgt->scsi_id == rport->port_id) {
3008 			found++;
3009 			break;
3010 		}
3011 	}
3012 
3013 	if (found && tgt->action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
3014 		/*
3015 		 * If we get here, that means we previously attempted to send
3016 		 * an implicit logout to the target but it failed, most likely
3017 		 * due to I/O being pending, so we need to send it again
3018 		 */
3019 		ibmvfc_del_tgt(tgt);
3020 		ibmvfc_reinit_host(vhost);
3021 	}
3022 
3023 	spin_unlock_irqrestore(shost->host_lock, flags);
3024 	LEAVE;
3025 }
3026 
3027 static const struct ibmvfc_async_desc ae_desc [] = {
3028 	{ "PLOGI",	IBMVFC_AE_ELS_PLOGI,	IBMVFC_DEFAULT_LOG_LEVEL + 1 },
3029 	{ "LOGO",	IBMVFC_AE_ELS_LOGO,	IBMVFC_DEFAULT_LOG_LEVEL + 1 },
3030 	{ "PRLO",	IBMVFC_AE_ELS_PRLO,	IBMVFC_DEFAULT_LOG_LEVEL + 1 },
3031 	{ "N-Port SCN",	IBMVFC_AE_SCN_NPORT,	IBMVFC_DEFAULT_LOG_LEVEL + 1 },
3032 	{ "Group SCN",	IBMVFC_AE_SCN_GROUP,	IBMVFC_DEFAULT_LOG_LEVEL + 1 },
3033 	{ "Domain SCN",	IBMVFC_AE_SCN_DOMAIN,	IBMVFC_DEFAULT_LOG_LEVEL },
3034 	{ "Fabric SCN",	IBMVFC_AE_SCN_FABRIC,	IBMVFC_DEFAULT_LOG_LEVEL },
3035 	{ "Link Up",	IBMVFC_AE_LINK_UP,	IBMVFC_DEFAULT_LOG_LEVEL },
3036 	{ "Link Down",	IBMVFC_AE_LINK_DOWN,	IBMVFC_DEFAULT_LOG_LEVEL },
3037 	{ "Link Dead",	IBMVFC_AE_LINK_DEAD,	IBMVFC_DEFAULT_LOG_LEVEL },
3038 	{ "Halt",	IBMVFC_AE_HALT,		IBMVFC_DEFAULT_LOG_LEVEL },
3039 	{ "Resume",	IBMVFC_AE_RESUME,	IBMVFC_DEFAULT_LOG_LEVEL },
3040 	{ "Adapter Failed", IBMVFC_AE_ADAPTER_FAILED, IBMVFC_DEFAULT_LOG_LEVEL },
3041 };
3042 
3043 static const struct ibmvfc_async_desc unknown_ae = {
3044 	"Unknown async", 0, IBMVFC_DEFAULT_LOG_LEVEL
3045 };
3046 
3047 /**
3048  * ibmvfc_get_ae_desc - Get text description for async event
3049  * @ae:	async event
3050  *
3051  **/
3052 static const struct ibmvfc_async_desc *ibmvfc_get_ae_desc(u64 ae)
3053 {
3054 	int i;
3055 
3056 	for (i = 0; i < ARRAY_SIZE(ae_desc); i++)
3057 		if (ae_desc[i].ae == ae)
3058 			return &ae_desc[i];
3059 
3060 	return &unknown_ae;
3061 }
3062 
3063 static const struct {
3064 	enum ibmvfc_ae_link_state state;
3065 	const char *desc;
3066 } link_desc [] = {
3067 	{ IBMVFC_AE_LS_LINK_UP,		" link up" },
3068 	{ IBMVFC_AE_LS_LINK_BOUNCED,	" link bounced" },
3069 	{ IBMVFC_AE_LS_LINK_DOWN,	" link down" },
3070 	{ IBMVFC_AE_LS_LINK_DEAD,	" link dead" },
3071 };
3072 
3073 /**
3074  * ibmvfc_get_link_state - Get text description for link state
3075  * @state:	link state
3076  *
3077  **/
3078 static const char *ibmvfc_get_link_state(enum ibmvfc_ae_link_state state)
3079 {
3080 	int i;
3081 
3082 	for (i = 0; i < ARRAY_SIZE(link_desc); i++)
3083 		if (link_desc[i].state == state)
3084 			return link_desc[i].desc;
3085 
3086 	return "";
3087 }
3088 
3089 /**
3090  * ibmvfc_handle_async - Handle an async event from the adapter
3091  * @crq:	crq to process
3092  * @vhost:	ibmvfc host struct
3093  *
3094  **/
3095 static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
3096 				struct ibmvfc_host *vhost)
3097 {
3098 	const struct ibmvfc_async_desc *desc = ibmvfc_get_ae_desc(be64_to_cpu(crq->event));
3099 	struct ibmvfc_target *tgt;
3100 
3101 	ibmvfc_log(vhost, desc->log_level, "%s event received. scsi_id: %llx, wwpn: %llx,"
3102 		   " node_name: %llx%s\n", desc->desc, be64_to_cpu(crq->scsi_id),
3103 		   be64_to_cpu(crq->wwpn), be64_to_cpu(crq->node_name),
3104 		   ibmvfc_get_link_state(crq->link_state));
3105 
3106 	switch (be64_to_cpu(crq->event)) {
3107 	case IBMVFC_AE_RESUME:
3108 		switch (crq->link_state) {
3109 		case IBMVFC_AE_LS_LINK_DOWN:
3110 			ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
3111 			break;
3112 		case IBMVFC_AE_LS_LINK_DEAD:
3113 			ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3114 			break;
3115 		case IBMVFC_AE_LS_LINK_UP:
3116 		case IBMVFC_AE_LS_LINK_BOUNCED:
3117 		default:
3118 			vhost->events_to_log |= IBMVFC_AE_LINKUP;
3119 			vhost->delay_init = 1;
3120 			__ibmvfc_reset_host(vhost);
3121 			break;
3122 		}
3123 
3124 		break;
3125 	case IBMVFC_AE_LINK_UP:
3126 		vhost->events_to_log |= IBMVFC_AE_LINKUP;
3127 		vhost->delay_init = 1;
3128 		__ibmvfc_reset_host(vhost);
3129 		break;
3130 	case IBMVFC_AE_SCN_FABRIC:
3131 	case IBMVFC_AE_SCN_DOMAIN:
3132 		vhost->events_to_log |= IBMVFC_AE_RSCN;
3133 		if (vhost->state < IBMVFC_HALTED) {
3134 			vhost->delay_init = 1;
3135 			__ibmvfc_reset_host(vhost);
3136 		}
3137 		break;
3138 	case IBMVFC_AE_SCN_NPORT:
3139 	case IBMVFC_AE_SCN_GROUP:
3140 		vhost->events_to_log |= IBMVFC_AE_RSCN;
3141 		ibmvfc_reinit_host(vhost);
3142 		break;
3143 	case IBMVFC_AE_ELS_LOGO:
3144 	case IBMVFC_AE_ELS_PRLO:
3145 	case IBMVFC_AE_ELS_PLOGI:
3146 		list_for_each_entry(tgt, &vhost->targets, queue) {
3147 			if (!crq->scsi_id && !crq->wwpn && !crq->node_name)
3148 				break;
3149 			if (crq->scsi_id && cpu_to_be64(tgt->scsi_id) != crq->scsi_id)
3150 				continue;
3151 			if (crq->wwpn && cpu_to_be64(tgt->ids.port_name) != crq->wwpn)
3152 				continue;
3153 			if (crq->node_name && cpu_to_be64(tgt->ids.node_name) != crq->node_name)
3154 				continue;
3155 			if (tgt->need_login && be64_to_cpu(crq->event) == IBMVFC_AE_ELS_LOGO)
3156 				tgt->logo_rcvd = 1;
3157 			if (!tgt->need_login || be64_to_cpu(crq->event) == IBMVFC_AE_ELS_PLOGI) {
3158 				ibmvfc_del_tgt(tgt);
3159 				ibmvfc_reinit_host(vhost);
3160 			}
3161 		}
3162 		break;
3163 	case IBMVFC_AE_LINK_DOWN:
3164 	case IBMVFC_AE_ADAPTER_FAILED:
3165 		ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
3166 		break;
3167 	case IBMVFC_AE_LINK_DEAD:
3168 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3169 		break;
3170 	case IBMVFC_AE_HALT:
3171 		ibmvfc_link_down(vhost, IBMVFC_HALTED);
3172 		break;
3173 	default:
3174 		dev_err(vhost->dev, "Unknown async event received: %lld\n", crq->event);
3175 		break;
3176 	}
3177 }
3178 
3179 /**
3180  * ibmvfc_handle_crq - Handles and frees received events in the CRQ
3181  * @crq:	Command/Response queue
3182  * @vhost:	ibmvfc host struct
3183  * @evt_doneq:	Event done queue
3184  *
3185 **/
3186 static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost,
3187 			      struct list_head *evt_doneq)
3188 {
3189 	long rc;
3190 	struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba);
3191 
3192 	switch (crq->valid) {
3193 	case IBMVFC_CRQ_INIT_RSP:
3194 		switch (crq->format) {
3195 		case IBMVFC_CRQ_INIT:
3196 			dev_info(vhost->dev, "Partner initialized\n");
3197 			/* Send back a response */
3198 			rc = ibmvfc_send_crq_init_complete(vhost);
3199 			if (rc == 0)
3200 				ibmvfc_init_host(vhost);
3201 			else
3202 				dev_err(vhost->dev, "Unable to send init rsp. rc=%ld\n", rc);
3203 			break;
3204 		case IBMVFC_CRQ_INIT_COMPLETE:
3205 			dev_info(vhost->dev, "Partner initialization complete\n");
3206 			ibmvfc_init_host(vhost);
3207 			break;
3208 		default:
3209 			dev_err(vhost->dev, "Unknown crq message type: %d\n", crq->format);
3210 		}
3211 		return;
3212 	case IBMVFC_CRQ_XPORT_EVENT:
3213 		vhost->state = IBMVFC_NO_CRQ;
3214 		vhost->logged_in = 0;
3215 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
3216 		if (crq->format == IBMVFC_PARTITION_MIGRATED) {
3217 			/* We need to re-setup the interpartition connection */
3218 			dev_info(vhost->dev, "Partition migrated, Re-enabling adapter\n");
3219 			vhost->client_migrated = 1;
3220 			ibmvfc_purge_requests(vhost, DID_REQUEUE);
3221 			ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
3222 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_REENABLE);
3223 		} else if (crq->format == IBMVFC_PARTNER_FAILED || crq->format == IBMVFC_PARTNER_DEREGISTER) {
3224 			dev_err(vhost->dev, "Host partner adapter deregistered or failed (rc=%d)\n", crq->format);
3225 			ibmvfc_purge_requests(vhost, DID_ERROR);
3226 			ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
3227 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
3228 		} else {
3229 			dev_err(vhost->dev, "Received unknown transport event from partner (rc=%d)\n", crq->format);
3230 		}
3231 		return;
3232 	case IBMVFC_CRQ_CMD_RSP:
3233 		break;
3234 	default:
3235 		dev_err(vhost->dev, "Got an invalid message type 0x%02x\n", crq->valid);
3236 		return;
3237 	}
3238 
3239 	if (crq->format == IBMVFC_ASYNC_EVENT)
3240 		return;
3241 
3242 	/* The only kind of payload CRQs we should get are responses to
3243 	 * things we send. Make sure this response is to something we
3244 	 * actually sent
3245 	 */
3246 	if (unlikely(!ibmvfc_valid_event(&vhost->crq.evt_pool, evt))) {
3247 		dev_err(vhost->dev, "Returned correlation_token 0x%08llx is invalid!\n",
3248 			crq->ioba);
3249 		return;
3250 	}
3251 
3252 	if (unlikely(atomic_read(&evt->free))) {
3253 		dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n",
3254 			crq->ioba);
3255 		return;
3256 	}
3257 
3258 	spin_lock(&evt->queue->l_lock);
3259 	list_move_tail(&evt->queue_list, evt_doneq);
3260 	spin_unlock(&evt->queue->l_lock);
3261 }
3262 
3263 /**
3264  * ibmvfc_scan_finished - Check if the device scan is done.
3265  * @shost:	scsi host struct
3266  * @time:	current elapsed time
3267  *
3268  * Returns:
3269  *	0 if scan is not done / 1 if scan is done
3270  **/
3271 static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
3272 {
3273 	unsigned long flags;
3274 	struct ibmvfc_host *vhost = shost_priv(shost);
3275 	int done = 0;
3276 
3277 	spin_lock_irqsave(shost->host_lock, flags);
3278 	if (time >= (init_timeout * HZ)) {
3279 		dev_info(vhost->dev, "Scan taking longer than %d seconds, "
3280 			 "continuing initialization\n", init_timeout);
3281 		done = 1;
3282 	}
3283 
3284 	if (vhost->scan_complete)
3285 		done = 1;
3286 	spin_unlock_irqrestore(shost->host_lock, flags);
3287 	return done;
3288 }
3289 
3290 /**
3291  * ibmvfc_slave_alloc - Setup the device's task set value
3292  * @sdev:	struct scsi_device device to configure
3293  *
3294  * Set the device's task set value so that error handling works as
3295  * expected.
3296  *
3297  * Returns:
3298  *	0 on success / -ENXIO if device does not exist
3299  **/
3300 static int ibmvfc_slave_alloc(struct scsi_device *sdev)
3301 {
3302 	struct Scsi_Host *shost = sdev->host;
3303 	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
3304 	struct ibmvfc_host *vhost = shost_priv(shost);
3305 	unsigned long flags = 0;
3306 
3307 	if (!rport || fc_remote_port_chkready(rport))
3308 		return -ENXIO;
3309 
3310 	spin_lock_irqsave(shost->host_lock, flags);
3311 	sdev->hostdata = (void *)(unsigned long)vhost->task_set++;
3312 	spin_unlock_irqrestore(shost->host_lock, flags);
3313 	return 0;
3314 }
3315 
3316 /**
3317  * ibmvfc_target_alloc - Setup the target's task set value
3318  * @starget:	struct scsi_target
3319  *
3320  * Set the target's task set value so that error handling works as
3321  * expected.
3322  *
3323  * Returns:
3324  *	0 on success / -ENXIO if device does not exist
3325  **/
3326 static int ibmvfc_target_alloc(struct scsi_target *starget)
3327 {
3328 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
3329 	struct ibmvfc_host *vhost = shost_priv(shost);
3330 	unsigned long flags = 0;
3331 
3332 	spin_lock_irqsave(shost->host_lock, flags);
3333 	starget->hostdata = (void *)(unsigned long)vhost->task_set++;
3334 	spin_unlock_irqrestore(shost->host_lock, flags);
3335 	return 0;
3336 }
3337 
3338 /**
3339  * ibmvfc_slave_configure - Configure the device
3340  * @sdev:	struct scsi_device device to configure
3341  *
3342  * Enable allow_restart for a device if it is a disk. Adjust the
3343  * queue_depth here also.
3344  *
3345  * Returns:
3346  *	0
3347  **/
3348 static int ibmvfc_slave_configure(struct scsi_device *sdev)
3349 {
3350 	struct Scsi_Host *shost = sdev->host;
3351 	unsigned long flags = 0;
3352 
3353 	spin_lock_irqsave(shost->host_lock, flags);
3354 	if (sdev->type == TYPE_DISK) {
3355 		sdev->allow_restart = 1;
3356 		blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
3357 	}
3358 	spin_unlock_irqrestore(shost->host_lock, flags);
3359 	return 0;
3360 }
3361 
3362 /**
3363  * ibmvfc_change_queue_depth - Change the device's queue depth
3364  * @sdev:	scsi device struct
3365  * @qdepth:	depth to set
3366  *
3367  * Return value:
3368  * 	actual depth set
3369  **/
3370 static int ibmvfc_change_queue_depth(struct scsi_device *sdev, int qdepth)
3371 {
3372 	if (qdepth > IBMVFC_MAX_CMDS_PER_LUN)
3373 		qdepth = IBMVFC_MAX_CMDS_PER_LUN;
3374 
3375 	return scsi_change_queue_depth(sdev, qdepth);
3376 }
3377 
3378 static ssize_t ibmvfc_show_host_partition_name(struct device *dev,
3379 						 struct device_attribute *attr, char *buf)
3380 {
3381 	struct Scsi_Host *shost = class_to_shost(dev);
3382 	struct ibmvfc_host *vhost = shost_priv(shost);
3383 
3384 	return snprintf(buf, PAGE_SIZE, "%s\n",
3385 			vhost->login_buf->resp.partition_name);
3386 }
3387 
3388 static ssize_t ibmvfc_show_host_device_name(struct device *dev,
3389 					    struct device_attribute *attr, char *buf)
3390 {
3391 	struct Scsi_Host *shost = class_to_shost(dev);
3392 	struct ibmvfc_host *vhost = shost_priv(shost);
3393 
3394 	return snprintf(buf, PAGE_SIZE, "%s\n",
3395 			vhost->login_buf->resp.device_name);
3396 }
3397 
3398 static ssize_t ibmvfc_show_host_loc_code(struct device *dev,
3399 					 struct device_attribute *attr, char *buf)
3400 {
3401 	struct Scsi_Host *shost = class_to_shost(dev);
3402 	struct ibmvfc_host *vhost = shost_priv(shost);
3403 
3404 	return snprintf(buf, PAGE_SIZE, "%s\n",
3405 			vhost->login_buf->resp.port_loc_code);
3406 }
3407 
3408 static ssize_t ibmvfc_show_host_drc_name(struct device *dev,
3409 					 struct device_attribute *attr, char *buf)
3410 {
3411 	struct Scsi_Host *shost = class_to_shost(dev);
3412 	struct ibmvfc_host *vhost = shost_priv(shost);
3413 
3414 	return snprintf(buf, PAGE_SIZE, "%s\n",
3415 			vhost->login_buf->resp.drc_name);
3416 }
3417 
3418 static ssize_t ibmvfc_show_host_npiv_version(struct device *dev,
3419 					     struct device_attribute *attr, char *buf)
3420 {
3421 	struct Scsi_Host *shost = class_to_shost(dev);
3422 	struct ibmvfc_host *vhost = shost_priv(shost);
3423 	return snprintf(buf, PAGE_SIZE, "%d\n", be32_to_cpu(vhost->login_buf->resp.version));
3424 }
3425 
3426 static ssize_t ibmvfc_show_host_capabilities(struct device *dev,
3427 					     struct device_attribute *attr, char *buf)
3428 {
3429 	struct Scsi_Host *shost = class_to_shost(dev);
3430 	struct ibmvfc_host *vhost = shost_priv(shost);
3431 	return snprintf(buf, PAGE_SIZE, "%llx\n", be64_to_cpu(vhost->login_buf->resp.capabilities));
3432 }
3433 
3434 /**
3435  * ibmvfc_show_log_level - Show the adapter's error logging level
3436  * @dev:	class device struct
3437  * @attr:	unused
3438  * @buf:	buffer
3439  *
3440  * Return value:
3441  * 	number of bytes printed to buffer
3442  **/
3443 static ssize_t ibmvfc_show_log_level(struct device *dev,
3444 				     struct device_attribute *attr, char *buf)
3445 {
3446 	struct Scsi_Host *shost = class_to_shost(dev);
3447 	struct ibmvfc_host *vhost = shost_priv(shost);
3448 	unsigned long flags = 0;
3449 	int len;
3450 
3451 	spin_lock_irqsave(shost->host_lock, flags);
3452 	len = snprintf(buf, PAGE_SIZE, "%d\n", vhost->log_level);
3453 	spin_unlock_irqrestore(shost->host_lock, flags);
3454 	return len;
3455 }
3456 
3457 /**
3458  * ibmvfc_store_log_level - Change the adapter's error logging level
3459  * @dev:	class device struct
3460  * @attr:	unused
3461  * @buf:	buffer
3462  * @count:      buffer size
3463  *
3464  * Return value:
3465  * 	number of bytes printed to buffer
3466  **/
3467 static ssize_t ibmvfc_store_log_level(struct device *dev,
3468 				      struct device_attribute *attr,
3469 				      const char *buf, size_t count)
3470 {
3471 	struct Scsi_Host *shost = class_to_shost(dev);
3472 	struct ibmvfc_host *vhost = shost_priv(shost);
3473 	unsigned long flags = 0;
3474 
3475 	spin_lock_irqsave(shost->host_lock, flags);
3476 	vhost->log_level = simple_strtoul(buf, NULL, 10);
3477 	spin_unlock_irqrestore(shost->host_lock, flags);
3478 	return strlen(buf);
3479 }
3480 
3481 static ssize_t ibmvfc_show_scsi_channels(struct device *dev,
3482 					 struct device_attribute *attr, char *buf)
3483 {
3484 	struct Scsi_Host *shost = class_to_shost(dev);
3485 	struct ibmvfc_host *vhost = shost_priv(shost);
3486 	unsigned long flags = 0;
3487 	int len;
3488 
3489 	spin_lock_irqsave(shost->host_lock, flags);
3490 	len = snprintf(buf, PAGE_SIZE, "%d\n", vhost->client_scsi_channels);
3491 	spin_unlock_irqrestore(shost->host_lock, flags);
3492 	return len;
3493 }
3494 
3495 static ssize_t ibmvfc_store_scsi_channels(struct device *dev,
3496 					 struct device_attribute *attr,
3497 					 const char *buf, size_t count)
3498 {
3499 	struct Scsi_Host *shost = class_to_shost(dev);
3500 	struct ibmvfc_host *vhost = shost_priv(shost);
3501 	unsigned long flags = 0;
3502 	unsigned int channels;
3503 
3504 	spin_lock_irqsave(shost->host_lock, flags);
3505 	channels = simple_strtoul(buf, NULL, 10);
3506 	vhost->client_scsi_channels = min(channels, nr_scsi_hw_queues);
3507 	ibmvfc_hard_reset_host(vhost);
3508 	spin_unlock_irqrestore(shost->host_lock, flags);
3509 	return strlen(buf);
3510 }
3511 
3512 static DEVICE_ATTR(partition_name, S_IRUGO, ibmvfc_show_host_partition_name, NULL);
3513 static DEVICE_ATTR(device_name, S_IRUGO, ibmvfc_show_host_device_name, NULL);
3514 static DEVICE_ATTR(port_loc_code, S_IRUGO, ibmvfc_show_host_loc_code, NULL);
3515 static DEVICE_ATTR(drc_name, S_IRUGO, ibmvfc_show_host_drc_name, NULL);
3516 static DEVICE_ATTR(npiv_version, S_IRUGO, ibmvfc_show_host_npiv_version, NULL);
3517 static DEVICE_ATTR(capabilities, S_IRUGO, ibmvfc_show_host_capabilities, NULL);
3518 static DEVICE_ATTR(log_level, S_IRUGO | S_IWUSR,
3519 		   ibmvfc_show_log_level, ibmvfc_store_log_level);
3520 static DEVICE_ATTR(nr_scsi_channels, S_IRUGO | S_IWUSR,
3521 		   ibmvfc_show_scsi_channels, ibmvfc_store_scsi_channels);
3522 
3523 #ifdef CONFIG_SCSI_IBMVFC_TRACE
3524 /**
3525  * ibmvfc_read_trace - Dump the adapter trace
3526  * @filp:		open sysfs file
3527  * @kobj:		kobject struct
3528  * @bin_attr:	bin_attribute struct
3529  * @buf:		buffer
3530  * @off:		offset
3531  * @count:		buffer size
3532  *
3533  * Return value:
3534  *	number of bytes printed to buffer
3535  **/
3536 static ssize_t ibmvfc_read_trace(struct file *filp, struct kobject *kobj,
3537 				 struct bin_attribute *bin_attr,
3538 				 char *buf, loff_t off, size_t count)
3539 {
3540 	struct device *dev = kobj_to_dev(kobj);
3541 	struct Scsi_Host *shost = class_to_shost(dev);
3542 	struct ibmvfc_host *vhost = shost_priv(shost);
3543 	unsigned long flags = 0;
3544 	int size = IBMVFC_TRACE_SIZE;
3545 	char *src = (char *)vhost->trace;
3546 
3547 	if (off > size)
3548 		return 0;
3549 	if (off + count > size) {
3550 		size -= off;
3551 		count = size;
3552 	}
3553 
3554 	spin_lock_irqsave(shost->host_lock, flags);
3555 	memcpy(buf, &src[off], count);
3556 	spin_unlock_irqrestore(shost->host_lock, flags);
3557 	return count;
3558 }
3559 
3560 static struct bin_attribute ibmvfc_trace_attr = {
3561 	.attr =	{
3562 		.name = "trace",
3563 		.mode = S_IRUGO,
3564 	},
3565 	.size = 0,
3566 	.read = ibmvfc_read_trace,
3567 };
3568 #endif
3569 
3570 static struct device_attribute *ibmvfc_attrs[] = {
3571 	&dev_attr_partition_name,
3572 	&dev_attr_device_name,
3573 	&dev_attr_port_loc_code,
3574 	&dev_attr_drc_name,
3575 	&dev_attr_npiv_version,
3576 	&dev_attr_capabilities,
3577 	&dev_attr_log_level,
3578 	&dev_attr_nr_scsi_channels,
3579 	NULL
3580 };
3581 
3582 static struct scsi_host_template driver_template = {
3583 	.module = THIS_MODULE,
3584 	.name = "IBM POWER Virtual FC Adapter",
3585 	.proc_name = IBMVFC_NAME,
3586 	.queuecommand = ibmvfc_queuecommand,
3587 	.eh_timed_out = fc_eh_timed_out,
3588 	.eh_abort_handler = ibmvfc_eh_abort_handler,
3589 	.eh_device_reset_handler = ibmvfc_eh_device_reset_handler,
3590 	.eh_target_reset_handler = ibmvfc_eh_target_reset_handler,
3591 	.eh_host_reset_handler = ibmvfc_eh_host_reset_handler,
3592 	.slave_alloc = ibmvfc_slave_alloc,
3593 	.slave_configure = ibmvfc_slave_configure,
3594 	.target_alloc = ibmvfc_target_alloc,
3595 	.scan_finished = ibmvfc_scan_finished,
3596 	.change_queue_depth = ibmvfc_change_queue_depth,
3597 	.cmd_per_lun = 16,
3598 	.can_queue = IBMVFC_MAX_REQUESTS_DEFAULT,
3599 	.this_id = -1,
3600 	.sg_tablesize = SG_ALL,
3601 	.max_sectors = IBMVFC_MAX_SECTORS,
3602 	.shost_attrs = ibmvfc_attrs,
3603 	.track_queue_depth = 1,
3604 	.host_tagset = 1,
3605 };
3606 
3607 /**
3608  * ibmvfc_next_async_crq - Returns the next entry in async queue
3609  * @vhost:	ibmvfc host struct
3610  *
3611  * Returns:
3612  *	Pointer to next entry in queue / NULL if empty
3613  **/
3614 static struct ibmvfc_async_crq *ibmvfc_next_async_crq(struct ibmvfc_host *vhost)
3615 {
3616 	struct ibmvfc_queue *async_crq = &vhost->async_crq;
3617 	struct ibmvfc_async_crq *crq;
3618 
3619 	crq = &async_crq->msgs.async[async_crq->cur];
3620 	if (crq->valid & 0x80) {
3621 		if (++async_crq->cur == async_crq->size)
3622 			async_crq->cur = 0;
3623 		rmb();
3624 	} else
3625 		crq = NULL;
3626 
3627 	return crq;
3628 }
3629 
3630 /**
3631  * ibmvfc_next_crq - Returns the next entry in message queue
3632  * @vhost:	ibmvfc host struct
3633  *
3634  * Returns:
3635  *	Pointer to next entry in queue / NULL if empty
3636  **/
3637 static struct ibmvfc_crq *ibmvfc_next_crq(struct ibmvfc_host *vhost)
3638 {
3639 	struct ibmvfc_queue *queue = &vhost->crq;
3640 	struct ibmvfc_crq *crq;
3641 
3642 	crq = &queue->msgs.crq[queue->cur];
3643 	if (crq->valid & 0x80) {
3644 		if (++queue->cur == queue->size)
3645 			queue->cur = 0;
3646 		rmb();
3647 	} else
3648 		crq = NULL;
3649 
3650 	return crq;
3651 }
3652 
3653 /**
3654  * ibmvfc_interrupt - Interrupt handler
3655  * @irq:		number of irq to handle, not used
3656  * @dev_instance: ibmvfc_host that received interrupt
3657  *
3658  * Returns:
3659  *	IRQ_HANDLED
3660  **/
3661 static irqreturn_t ibmvfc_interrupt(int irq, void *dev_instance)
3662 {
3663 	struct ibmvfc_host *vhost = (struct ibmvfc_host *)dev_instance;
3664 	unsigned long flags;
3665 
3666 	spin_lock_irqsave(vhost->host->host_lock, flags);
3667 	vio_disable_interrupts(to_vio_dev(vhost->dev));
3668 	tasklet_schedule(&vhost->tasklet);
3669 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
3670 	return IRQ_HANDLED;
3671 }
3672 
3673 /**
3674  * ibmvfc_tasklet - Interrupt handler tasklet
3675  * @data:		ibmvfc host struct
3676  *
3677  * Returns:
3678  *	Nothing
3679  **/
3680 static void ibmvfc_tasklet(void *data)
3681 {
3682 	struct ibmvfc_host *vhost = data;
3683 	struct vio_dev *vdev = to_vio_dev(vhost->dev);
3684 	struct ibmvfc_crq *crq;
3685 	struct ibmvfc_async_crq *async;
3686 	struct ibmvfc_event *evt, *temp;
3687 	unsigned long flags;
3688 	int done = 0;
3689 	LIST_HEAD(evt_doneq);
3690 
3691 	spin_lock_irqsave(vhost->host->host_lock, flags);
3692 	spin_lock(vhost->crq.q_lock);
3693 	while (!done) {
3694 		/* Pull all the valid messages off the async CRQ */
3695 		while ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
3696 			ibmvfc_handle_async(async, vhost);
3697 			async->valid = 0;
3698 			wmb();
3699 		}
3700 
3701 		/* Pull all the valid messages off the CRQ */
3702 		while ((crq = ibmvfc_next_crq(vhost)) != NULL) {
3703 			ibmvfc_handle_crq(crq, vhost, &evt_doneq);
3704 			crq->valid = 0;
3705 			wmb();
3706 		}
3707 
3708 		vio_enable_interrupts(vdev);
3709 		if ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
3710 			vio_disable_interrupts(vdev);
3711 			ibmvfc_handle_async(async, vhost);
3712 			async->valid = 0;
3713 			wmb();
3714 		} else if ((crq = ibmvfc_next_crq(vhost)) != NULL) {
3715 			vio_disable_interrupts(vdev);
3716 			ibmvfc_handle_crq(crq, vhost, &evt_doneq);
3717 			crq->valid = 0;
3718 			wmb();
3719 		} else
3720 			done = 1;
3721 	}
3722 
3723 	spin_unlock(vhost->crq.q_lock);
3724 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
3725 
3726 	list_for_each_entry_safe(evt, temp, &evt_doneq, queue_list) {
3727 		del_timer(&evt->timer);
3728 		list_del(&evt->queue_list);
3729 		ibmvfc_trc_end(evt);
3730 		evt->done(evt);
3731 	}
3732 }
3733 
3734 static int ibmvfc_toggle_scrq_irq(struct ibmvfc_queue *scrq, int enable)
3735 {
3736 	struct device *dev = scrq->vhost->dev;
3737 	struct vio_dev *vdev = to_vio_dev(dev);
3738 	unsigned long rc;
3739 	int irq_action = H_ENABLE_VIO_INTERRUPT;
3740 
3741 	if (!enable)
3742 		irq_action = H_DISABLE_VIO_INTERRUPT;
3743 
3744 	rc = plpar_hcall_norets(H_VIOCTL, vdev->unit_address, irq_action,
3745 				scrq->hw_irq, 0, 0);
3746 
3747 	if (rc)
3748 		dev_err(dev, "Couldn't %s sub-crq[%lu] irq. rc=%ld\n",
3749 			enable ? "enable" : "disable", scrq->hwq_id, rc);
3750 
3751 	return rc;
3752 }
3753 
3754 static void ibmvfc_handle_scrq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost,
3755 			       struct list_head *evt_doneq)
3756 {
3757 	struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba);
3758 
3759 	switch (crq->valid) {
3760 	case IBMVFC_CRQ_CMD_RSP:
3761 		break;
3762 	case IBMVFC_CRQ_XPORT_EVENT:
3763 		return;
3764 	default:
3765 		dev_err(vhost->dev, "Got and invalid message type 0x%02x\n", crq->valid);
3766 		return;
3767 	}
3768 
3769 	/* The only kind of payload CRQs we should get are responses to
3770 	 * things we send. Make sure this response is to something we
3771 	 * actually sent
3772 	 */
3773 	if (unlikely(!ibmvfc_valid_event(&evt->queue->evt_pool, evt))) {
3774 		dev_err(vhost->dev, "Returned correlation_token 0x%08llx is invalid!\n",
3775 			crq->ioba);
3776 		return;
3777 	}
3778 
3779 	if (unlikely(atomic_read(&evt->free))) {
3780 		dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n",
3781 			crq->ioba);
3782 		return;
3783 	}
3784 
3785 	spin_lock(&evt->queue->l_lock);
3786 	list_move_tail(&evt->queue_list, evt_doneq);
3787 	spin_unlock(&evt->queue->l_lock);
3788 }
3789 
3790 static struct ibmvfc_crq *ibmvfc_next_scrq(struct ibmvfc_queue *scrq)
3791 {
3792 	struct ibmvfc_crq *crq;
3793 
3794 	crq = &scrq->msgs.scrq[scrq->cur].crq;
3795 	if (crq->valid & 0x80) {
3796 		if (++scrq->cur == scrq->size)
3797 			scrq->cur = 0;
3798 		rmb();
3799 	} else
3800 		crq = NULL;
3801 
3802 	return crq;
3803 }
3804 
3805 static void ibmvfc_drain_sub_crq(struct ibmvfc_queue *scrq)
3806 {
3807 	struct ibmvfc_crq *crq;
3808 	struct ibmvfc_event *evt, *temp;
3809 	unsigned long flags;
3810 	int done = 0;
3811 	LIST_HEAD(evt_doneq);
3812 
3813 	spin_lock_irqsave(scrq->q_lock, flags);
3814 	while (!done) {
3815 		while ((crq = ibmvfc_next_scrq(scrq)) != NULL) {
3816 			ibmvfc_handle_scrq(crq, scrq->vhost, &evt_doneq);
3817 			crq->valid = 0;
3818 			wmb();
3819 		}
3820 
3821 		ibmvfc_toggle_scrq_irq(scrq, 1);
3822 		if ((crq = ibmvfc_next_scrq(scrq)) != NULL) {
3823 			ibmvfc_toggle_scrq_irq(scrq, 0);
3824 			ibmvfc_handle_scrq(crq, scrq->vhost, &evt_doneq);
3825 			crq->valid = 0;
3826 			wmb();
3827 		} else
3828 			done = 1;
3829 	}
3830 	spin_unlock_irqrestore(scrq->q_lock, flags);
3831 
3832 	list_for_each_entry_safe(evt, temp, &evt_doneq, queue_list) {
3833 		del_timer(&evt->timer);
3834 		list_del(&evt->queue_list);
3835 		ibmvfc_trc_end(evt);
3836 		evt->done(evt);
3837 	}
3838 }
3839 
3840 static irqreturn_t ibmvfc_interrupt_scsi(int irq, void *scrq_instance)
3841 {
3842 	struct ibmvfc_queue *scrq = (struct ibmvfc_queue *)scrq_instance;
3843 
3844 	ibmvfc_toggle_scrq_irq(scrq, 0);
3845 	ibmvfc_drain_sub_crq(scrq);
3846 
3847 	return IRQ_HANDLED;
3848 }
3849 
3850 /**
3851  * ibmvfc_init_tgt - Set the next init job step for the target
3852  * @tgt:		ibmvfc target struct
3853  * @job_step:	job step to perform
3854  *
3855  **/
3856 static void ibmvfc_init_tgt(struct ibmvfc_target *tgt,
3857 			    void (*job_step) (struct ibmvfc_target *))
3858 {
3859 	if (!ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT))
3860 		tgt->job_step = job_step;
3861 	wake_up(&tgt->vhost->work_wait_q);
3862 }
3863 
3864 /**
3865  * ibmvfc_retry_tgt_init - Attempt to retry a step in target initialization
3866  * @tgt:		ibmvfc target struct
3867  * @job_step:	initialization job step
3868  *
3869  * Returns: 1 if step will be retried / 0 if not
3870  *
3871  **/
3872 static int ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
3873 				  void (*job_step) (struct ibmvfc_target *))
3874 {
3875 	if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) {
3876 		ibmvfc_del_tgt(tgt);
3877 		wake_up(&tgt->vhost->work_wait_q);
3878 		return 0;
3879 	} else
3880 		ibmvfc_init_tgt(tgt, job_step);
3881 	return 1;
3882 }
3883 
3884 /* Defined in FC-LS */
3885 static const struct {
3886 	int code;
3887 	int retry;
3888 	int logged_in;
3889 } prli_rsp [] = {
3890 	{ 0, 1, 0 },
3891 	{ 1, 0, 1 },
3892 	{ 2, 1, 0 },
3893 	{ 3, 1, 0 },
3894 	{ 4, 0, 0 },
3895 	{ 5, 0, 0 },
3896 	{ 6, 0, 1 },
3897 	{ 7, 0, 0 },
3898 	{ 8, 1, 0 },
3899 };
3900 
3901 /**
3902  * ibmvfc_get_prli_rsp - Find PRLI response index
3903  * @flags:	PRLI response flags
3904  *
3905  **/
3906 static int ibmvfc_get_prli_rsp(u16 flags)
3907 {
3908 	int i;
3909 	int code = (flags & 0x0f00) >> 8;
3910 
3911 	for (i = 0; i < ARRAY_SIZE(prli_rsp); i++)
3912 		if (prli_rsp[i].code == code)
3913 			return i;
3914 
3915 	return 0;
3916 }
3917 
3918 /**
3919  * ibmvfc_tgt_prli_done - Completion handler for Process Login
3920  * @evt:	ibmvfc event struct
3921  *
3922  **/
3923 static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
3924 {
3925 	struct ibmvfc_target *tgt = evt->tgt;
3926 	struct ibmvfc_host *vhost = evt->vhost;
3927 	struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli;
3928 	struct ibmvfc_prli_svc_parms *parms = &rsp->parms;
3929 	u32 status = be16_to_cpu(rsp->common.status);
3930 	int index, level = IBMVFC_DEFAULT_LOG_LEVEL;
3931 
3932 	vhost->discovery_threads--;
3933 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3934 	switch (status) {
3935 	case IBMVFC_MAD_SUCCESS:
3936 		tgt_dbg(tgt, "Process Login succeeded: %X %02X %04X\n",
3937 			parms->type, parms->flags, parms->service_parms);
3938 
3939 		if (parms->type == IBMVFC_SCSI_FCP_TYPE) {
3940 			index = ibmvfc_get_prli_rsp(be16_to_cpu(parms->flags));
3941 			if (prli_rsp[index].logged_in) {
3942 				if (be16_to_cpu(parms->flags) & IBMVFC_PRLI_EST_IMG_PAIR) {
3943 					tgt->need_login = 0;
3944 					tgt->ids.roles = 0;
3945 					if (be32_to_cpu(parms->service_parms) & IBMVFC_PRLI_TARGET_FUNC)
3946 						tgt->ids.roles |= FC_PORT_ROLE_FCP_TARGET;
3947 					if (be32_to_cpu(parms->service_parms) & IBMVFC_PRLI_INITIATOR_FUNC)
3948 						tgt->ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
3949 					tgt->add_rport = 1;
3950 				} else
3951 					ibmvfc_del_tgt(tgt);
3952 			} else if (prli_rsp[index].retry)
3953 				ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
3954 			else
3955 				ibmvfc_del_tgt(tgt);
3956 		} else
3957 			ibmvfc_del_tgt(tgt);
3958 		break;
3959 	case IBMVFC_MAD_DRIVER_FAILED:
3960 		break;
3961 	case IBMVFC_MAD_CRQ_ERROR:
3962 		ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
3963 		break;
3964 	case IBMVFC_MAD_FAILED:
3965 	default:
3966 		if ((be16_to_cpu(rsp->status) & IBMVFC_VIOS_FAILURE) &&
3967 		     be16_to_cpu(rsp->error) == IBMVFC_PLOGI_REQUIRED)
3968 			level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
3969 		else if (tgt->logo_rcvd)
3970 			level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
3971 		else if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
3972 			level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
3973 		else
3974 			ibmvfc_del_tgt(tgt);
3975 
3976 		tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
3977 			ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
3978 			be16_to_cpu(rsp->status), be16_to_cpu(rsp->error), status);
3979 		break;
3980 	}
3981 
3982 	kref_put(&tgt->kref, ibmvfc_release_tgt);
3983 	ibmvfc_free_event(evt);
3984 	wake_up(&vhost->work_wait_q);
3985 }
3986 
3987 /**
3988  * ibmvfc_tgt_send_prli - Send a process login
3989  * @tgt:	ibmvfc target struct
3990  *
3991  **/
3992 static void ibmvfc_tgt_send_prli(struct ibmvfc_target *tgt)
3993 {
3994 	struct ibmvfc_process_login *prli;
3995 	struct ibmvfc_host *vhost = tgt->vhost;
3996 	struct ibmvfc_event *evt;
3997 
3998 	if (vhost->discovery_threads >= disc_threads)
3999 		return;
4000 
4001 	kref_get(&tgt->kref);
4002 	evt = ibmvfc_get_event(&vhost->crq);
4003 	vhost->discovery_threads++;
4004 	ibmvfc_init_event(evt, ibmvfc_tgt_prli_done, IBMVFC_MAD_FORMAT);
4005 	evt->tgt = tgt;
4006 	prli = &evt->iu.prli;
4007 	memset(prli, 0, sizeof(*prli));
4008 	if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
4009 		prli->common.version = cpu_to_be32(2);
4010 		prli->target_wwpn = cpu_to_be64(tgt->wwpn);
4011 	} else {
4012 		prli->common.version = cpu_to_be32(1);
4013 	}
4014 	prli->common.opcode = cpu_to_be32(IBMVFC_PROCESS_LOGIN);
4015 	prli->common.length = cpu_to_be16(sizeof(*prli));
4016 	prli->scsi_id = cpu_to_be64(tgt->scsi_id);
4017 
4018 	prli->parms.type = IBMVFC_SCSI_FCP_TYPE;
4019 	prli->parms.flags = cpu_to_be16(IBMVFC_PRLI_EST_IMG_PAIR);
4020 	prli->parms.service_parms = cpu_to_be32(IBMVFC_PRLI_INITIATOR_FUNC);
4021 	prli->parms.service_parms |= cpu_to_be32(IBMVFC_PRLI_READ_FCP_XFER_RDY_DISABLED);
4022 
4023 	if (cls3_error)
4024 		prli->parms.service_parms |= cpu_to_be32(IBMVFC_PRLI_RETRY);
4025 
4026 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4027 	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4028 		vhost->discovery_threads--;
4029 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4030 		kref_put(&tgt->kref, ibmvfc_release_tgt);
4031 	} else
4032 		tgt_dbg(tgt, "Sent process login\n");
4033 }
4034 
4035 /**
4036  * ibmvfc_tgt_plogi_done - Completion handler for Port Login
4037  * @evt:	ibmvfc event struct
4038  *
4039  **/
4040 static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
4041 {
4042 	struct ibmvfc_target *tgt = evt->tgt;
4043 	struct ibmvfc_host *vhost = evt->vhost;
4044 	struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi;
4045 	u32 status = be16_to_cpu(rsp->common.status);
4046 	int level = IBMVFC_DEFAULT_LOG_LEVEL;
4047 
4048 	vhost->discovery_threads--;
4049 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4050 	switch (status) {
4051 	case IBMVFC_MAD_SUCCESS:
4052 		tgt_dbg(tgt, "Port Login succeeded\n");
4053 		if (tgt->ids.port_name &&
4054 		    tgt->ids.port_name != wwn_to_u64(rsp->service_parms.port_name)) {
4055 			vhost->reinit = 1;
4056 			tgt_dbg(tgt, "Port re-init required\n");
4057 			break;
4058 		}
4059 		tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name);
4060 		tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name);
4061 		tgt->ids.port_id = tgt->scsi_id;
4062 		memcpy(&tgt->service_parms, &rsp->service_parms,
4063 		       sizeof(tgt->service_parms));
4064 		memcpy(&tgt->service_parms_change, &rsp->service_parms_change,
4065 		       sizeof(tgt->service_parms_change));
4066 		ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_prli);
4067 		break;
4068 	case IBMVFC_MAD_DRIVER_FAILED:
4069 		break;
4070 	case IBMVFC_MAD_CRQ_ERROR:
4071 		ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
4072 		break;
4073 	case IBMVFC_MAD_FAILED:
4074 	default:
4075 		if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
4076 			level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
4077 		else
4078 			ibmvfc_del_tgt(tgt);
4079 
4080 		tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
4081 			ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
4082 					     be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
4083 			ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
4084 			ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain), status);
4085 		break;
4086 	}
4087 
4088 	kref_put(&tgt->kref, ibmvfc_release_tgt);
4089 	ibmvfc_free_event(evt);
4090 	wake_up(&vhost->work_wait_q);
4091 }
4092 
4093 /**
4094  * ibmvfc_tgt_send_plogi - Send PLOGI to the specified target
4095  * @tgt:	ibmvfc target struct
4096  *
4097  **/
4098 static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *tgt)
4099 {
4100 	struct ibmvfc_port_login *plogi;
4101 	struct ibmvfc_host *vhost = tgt->vhost;
4102 	struct ibmvfc_event *evt;
4103 
4104 	if (vhost->discovery_threads >= disc_threads)
4105 		return;
4106 
4107 	kref_get(&tgt->kref);
4108 	tgt->logo_rcvd = 0;
4109 	evt = ibmvfc_get_event(&vhost->crq);
4110 	vhost->discovery_threads++;
4111 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4112 	ibmvfc_init_event(evt, ibmvfc_tgt_plogi_done, IBMVFC_MAD_FORMAT);
4113 	evt->tgt = tgt;
4114 	plogi = &evt->iu.plogi;
4115 	memset(plogi, 0, sizeof(*plogi));
4116 	if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
4117 		plogi->common.version = cpu_to_be32(2);
4118 		plogi->target_wwpn = cpu_to_be64(tgt->wwpn);
4119 	} else {
4120 		plogi->common.version = cpu_to_be32(1);
4121 	}
4122 	plogi->common.opcode = cpu_to_be32(IBMVFC_PORT_LOGIN);
4123 	plogi->common.length = cpu_to_be16(sizeof(*plogi));
4124 	plogi->scsi_id = cpu_to_be64(tgt->scsi_id);
4125 
4126 	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4127 		vhost->discovery_threads--;
4128 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4129 		kref_put(&tgt->kref, ibmvfc_release_tgt);
4130 	} else
4131 		tgt_dbg(tgt, "Sent port login\n");
4132 }
4133 
4134 /**
4135  * ibmvfc_tgt_implicit_logout_done - Completion handler for Implicit Logout MAD
4136  * @evt:	ibmvfc event struct
4137  *
4138  **/
4139 static void ibmvfc_tgt_implicit_logout_done(struct ibmvfc_event *evt)
4140 {
4141 	struct ibmvfc_target *tgt = evt->tgt;
4142 	struct ibmvfc_host *vhost = evt->vhost;
4143 	struct ibmvfc_implicit_logout *rsp = &evt->xfer_iu->implicit_logout;
4144 	u32 status = be16_to_cpu(rsp->common.status);
4145 
4146 	vhost->discovery_threads--;
4147 	ibmvfc_free_event(evt);
4148 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4149 
4150 	switch (status) {
4151 	case IBMVFC_MAD_SUCCESS:
4152 		tgt_dbg(tgt, "Implicit Logout succeeded\n");
4153 		break;
4154 	case IBMVFC_MAD_DRIVER_FAILED:
4155 		kref_put(&tgt->kref, ibmvfc_release_tgt);
4156 		wake_up(&vhost->work_wait_q);
4157 		return;
4158 	case IBMVFC_MAD_FAILED:
4159 	default:
4160 		tgt_err(tgt, "Implicit Logout failed: rc=0x%02X\n", status);
4161 		break;
4162 	}
4163 
4164 	ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_plogi);
4165 	kref_put(&tgt->kref, ibmvfc_release_tgt);
4166 	wake_up(&vhost->work_wait_q);
4167 }
4168 
4169 /**
4170  * __ibmvfc_tgt_get_implicit_logout_evt - Allocate and init an event for implicit logout
4171  * @tgt:		ibmvfc target struct
4172  * @done:		Routine to call when the event is responded to
4173  *
4174  * Returns:
4175  *	Allocated and initialized ibmvfc_event struct
4176  **/
4177 static struct ibmvfc_event *__ibmvfc_tgt_get_implicit_logout_evt(struct ibmvfc_target *tgt,
4178 								 void (*done) (struct ibmvfc_event *))
4179 {
4180 	struct ibmvfc_implicit_logout *mad;
4181 	struct ibmvfc_host *vhost = tgt->vhost;
4182 	struct ibmvfc_event *evt;
4183 
4184 	kref_get(&tgt->kref);
4185 	evt = ibmvfc_get_event(&vhost->crq);
4186 	ibmvfc_init_event(evt, done, IBMVFC_MAD_FORMAT);
4187 	evt->tgt = tgt;
4188 	mad = &evt->iu.implicit_logout;
4189 	memset(mad, 0, sizeof(*mad));
4190 	mad->common.version = cpu_to_be32(1);
4191 	mad->common.opcode = cpu_to_be32(IBMVFC_IMPLICIT_LOGOUT);
4192 	mad->common.length = cpu_to_be16(sizeof(*mad));
4193 	mad->old_scsi_id = cpu_to_be64(tgt->scsi_id);
4194 	return evt;
4195 }
4196 
4197 /**
4198  * ibmvfc_tgt_implicit_logout - Initiate an Implicit Logout for specified target
4199  * @tgt:		ibmvfc target struct
4200  *
4201  **/
4202 static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
4203 {
4204 	struct ibmvfc_host *vhost = tgt->vhost;
4205 	struct ibmvfc_event *evt;
4206 
4207 	if (vhost->discovery_threads >= disc_threads)
4208 		return;
4209 
4210 	vhost->discovery_threads++;
4211 	evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt,
4212 						   ibmvfc_tgt_implicit_logout_done);
4213 
4214 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4215 	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4216 		vhost->discovery_threads--;
4217 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4218 		kref_put(&tgt->kref, ibmvfc_release_tgt);
4219 	} else
4220 		tgt_dbg(tgt, "Sent Implicit Logout\n");
4221 }
4222 
4223 /**
4224  * ibmvfc_tgt_implicit_logout_and_del_done - Completion handler for Implicit Logout MAD
4225  * @evt:	ibmvfc event struct
4226  *
4227  **/
4228 static void ibmvfc_tgt_implicit_logout_and_del_done(struct ibmvfc_event *evt)
4229 {
4230 	struct ibmvfc_target *tgt = evt->tgt;
4231 	struct ibmvfc_host *vhost = evt->vhost;
4232 	struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru;
4233 	u32 status = be16_to_cpu(mad->common.status);
4234 
4235 	vhost->discovery_threads--;
4236 	ibmvfc_free_event(evt);
4237 
4238 	/*
4239 	 * If our state is IBMVFC_HOST_OFFLINE, we could be unloading the
4240 	 * driver in which case we need to free up all the targets. If we are
4241 	 * not unloading, we will still go through a hard reset to get out of
4242 	 * offline state, so there is no need to track the old targets in that
4243 	 * case.
4244 	 */
4245 	if (status == IBMVFC_MAD_SUCCESS || vhost->state == IBMVFC_HOST_OFFLINE)
4246 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
4247 	else
4248 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT);
4249 
4250 	tgt_dbg(tgt, "Implicit Logout %s\n", (status == IBMVFC_MAD_SUCCESS) ? "succeeded" : "failed");
4251 	kref_put(&tgt->kref, ibmvfc_release_tgt);
4252 	wake_up(&vhost->work_wait_q);
4253 }
4254 
4255 /**
4256  * ibmvfc_tgt_implicit_logout_and_del - Initiate an Implicit Logout for specified target
4257  * @tgt:		ibmvfc target struct
4258  *
4259  **/
4260 static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *tgt)
4261 {
4262 	struct ibmvfc_host *vhost = tgt->vhost;
4263 	struct ibmvfc_event *evt;
4264 
4265 	if (!vhost->logged_in) {
4266 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
4267 		return;
4268 	}
4269 
4270 	if (vhost->discovery_threads >= disc_threads)
4271 		return;
4272 
4273 	vhost->discovery_threads++;
4274 	evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt,
4275 						   ibmvfc_tgt_implicit_logout_and_del_done);
4276 
4277 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT);
4278 	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4279 		vhost->discovery_threads--;
4280 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
4281 		kref_put(&tgt->kref, ibmvfc_release_tgt);
4282 	} else
4283 		tgt_dbg(tgt, "Sent Implicit Logout\n");
4284 }
4285 
4286 /**
4287  * ibmvfc_tgt_move_login_done - Completion handler for Move Login
4288  * @evt:	ibmvfc event struct
4289  *
4290  **/
4291 static void ibmvfc_tgt_move_login_done(struct ibmvfc_event *evt)
4292 {
4293 	struct ibmvfc_target *tgt = evt->tgt;
4294 	struct ibmvfc_host *vhost = evt->vhost;
4295 	struct ibmvfc_move_login *rsp = &evt->xfer_iu->move_login;
4296 	u32 status = be16_to_cpu(rsp->common.status);
4297 	int level = IBMVFC_DEFAULT_LOG_LEVEL;
4298 
4299 	vhost->discovery_threads--;
4300 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4301 	switch (status) {
4302 	case IBMVFC_MAD_SUCCESS:
4303 		tgt_dbg(tgt, "Move Login succeeded for old scsi_id: %llX\n", tgt->old_scsi_id);
4304 		tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name);
4305 		tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name);
4306 		tgt->ids.port_id = tgt->scsi_id;
4307 		memcpy(&tgt->service_parms, &rsp->service_parms,
4308 		       sizeof(tgt->service_parms));
4309 		memcpy(&tgt->service_parms_change, &rsp->service_parms_change,
4310 		       sizeof(tgt->service_parms_change));
4311 		ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_prli);
4312 		break;
4313 	case IBMVFC_MAD_DRIVER_FAILED:
4314 		break;
4315 	case IBMVFC_MAD_CRQ_ERROR:
4316 		ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_move_login);
4317 		break;
4318 	case IBMVFC_MAD_FAILED:
4319 	default:
4320 		level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_move_login);
4321 
4322 		tgt_log(tgt, level,
4323 			"Move Login failed: old scsi_id: %llX, flags:%x, vios_flags:%x, rc=0x%02X\n",
4324 			tgt->old_scsi_id, be32_to_cpu(rsp->flags), be16_to_cpu(rsp->vios_flags),
4325 			status);
4326 		break;
4327 	}
4328 
4329 	kref_put(&tgt->kref, ibmvfc_release_tgt);
4330 	ibmvfc_free_event(evt);
4331 	wake_up(&vhost->work_wait_q);
4332 }
4333 
4334 
4335 /**
4336  * ibmvfc_tgt_move_login - Initiate a move login for specified target
4337  * @tgt:		ibmvfc target struct
4338  *
4339  **/
4340 static void ibmvfc_tgt_move_login(struct ibmvfc_target *tgt)
4341 {
4342 	struct ibmvfc_host *vhost = tgt->vhost;
4343 	struct ibmvfc_move_login *move;
4344 	struct ibmvfc_event *evt;
4345 
4346 	if (vhost->discovery_threads >= disc_threads)
4347 		return;
4348 
4349 	kref_get(&tgt->kref);
4350 	evt = ibmvfc_get_event(&vhost->crq);
4351 	vhost->discovery_threads++;
4352 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4353 	ibmvfc_init_event(evt, ibmvfc_tgt_move_login_done, IBMVFC_MAD_FORMAT);
4354 	evt->tgt = tgt;
4355 	move = &evt->iu.move_login;
4356 	memset(move, 0, sizeof(*move));
4357 	move->common.version = cpu_to_be32(1);
4358 	move->common.opcode = cpu_to_be32(IBMVFC_MOVE_LOGIN);
4359 	move->common.length = cpu_to_be16(sizeof(*move));
4360 
4361 	move->old_scsi_id = cpu_to_be64(tgt->old_scsi_id);
4362 	move->new_scsi_id = cpu_to_be64(tgt->scsi_id);
4363 	move->wwpn = cpu_to_be64(tgt->wwpn);
4364 	move->node_name = cpu_to_be64(tgt->ids.node_name);
4365 
4366 	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4367 		vhost->discovery_threads--;
4368 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
4369 		kref_put(&tgt->kref, ibmvfc_release_tgt);
4370 	} else
4371 		tgt_dbg(tgt, "Sent Move Login for old scsi_id: %llX\n", tgt->old_scsi_id);
4372 }
4373 
4374 /**
4375  * ibmvfc_adisc_needs_plogi - Does device need PLOGI?
4376  * @mad:	ibmvfc passthru mad struct
4377  * @tgt:	ibmvfc target struct
4378  *
4379  * Returns:
4380  *	1 if PLOGI needed / 0 if PLOGI not needed
4381  **/
4382 static int ibmvfc_adisc_needs_plogi(struct ibmvfc_passthru_mad *mad,
4383 				    struct ibmvfc_target *tgt)
4384 {
4385 	if (wwn_to_u64((u8 *)&mad->fc_iu.response[2]) != tgt->ids.port_name)
4386 		return 1;
4387 	if (wwn_to_u64((u8 *)&mad->fc_iu.response[4]) != tgt->ids.node_name)
4388 		return 1;
4389 	if (be32_to_cpu(mad->fc_iu.response[6]) != tgt->scsi_id)
4390 		return 1;
4391 	return 0;
4392 }
4393 
4394 /**
4395  * ibmvfc_tgt_adisc_done - Completion handler for ADISC
4396  * @evt:	ibmvfc event struct
4397  *
4398  **/
4399 static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
4400 {
4401 	struct ibmvfc_target *tgt = evt->tgt;
4402 	struct ibmvfc_host *vhost = evt->vhost;
4403 	struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru;
4404 	u32 status = be16_to_cpu(mad->common.status);
4405 	u8 fc_reason, fc_explain;
4406 
4407 	vhost->discovery_threads--;
4408 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4409 	del_timer(&tgt->timer);
4410 
4411 	switch (status) {
4412 	case IBMVFC_MAD_SUCCESS:
4413 		tgt_dbg(tgt, "ADISC succeeded\n");
4414 		if (ibmvfc_adisc_needs_plogi(mad, tgt))
4415 			ibmvfc_del_tgt(tgt);
4416 		break;
4417 	case IBMVFC_MAD_DRIVER_FAILED:
4418 		break;
4419 	case IBMVFC_MAD_FAILED:
4420 	default:
4421 		ibmvfc_del_tgt(tgt);
4422 		fc_reason = (be32_to_cpu(mad->fc_iu.response[1]) & 0x00ff0000) >> 16;
4423 		fc_explain = (be32_to_cpu(mad->fc_iu.response[1]) & 0x0000ff00) >> 8;
4424 		tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
4425 			 ibmvfc_get_cmd_error(be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error)),
4426 			 be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error),
4427 			 ibmvfc_get_fc_type(fc_reason), fc_reason,
4428 			 ibmvfc_get_ls_explain(fc_explain), fc_explain, status);
4429 		break;
4430 	}
4431 
4432 	kref_put(&tgt->kref, ibmvfc_release_tgt);
4433 	ibmvfc_free_event(evt);
4434 	wake_up(&vhost->work_wait_q);
4435 }
4436 
4437 /**
4438  * ibmvfc_init_passthru - Initialize an event struct for FC passthru
4439  * @evt:		ibmvfc event struct
4440  *
4441  **/
4442 static void ibmvfc_init_passthru(struct ibmvfc_event *evt)
4443 {
4444 	struct ibmvfc_passthru_mad *mad = &evt->iu.passthru;
4445 
4446 	memset(mad, 0, sizeof(*mad));
4447 	mad->common.version = cpu_to_be32(1);
4448 	mad->common.opcode = cpu_to_be32(IBMVFC_PASSTHRU);
4449 	mad->common.length = cpu_to_be16(sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu));
4450 	mad->cmd_ioba.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
4451 		offsetof(struct ibmvfc_passthru_mad, iu));
4452 	mad->cmd_ioba.len = cpu_to_be32(sizeof(mad->iu));
4453 	mad->iu.cmd_len = cpu_to_be32(sizeof(mad->fc_iu.payload));
4454 	mad->iu.rsp_len = cpu_to_be32(sizeof(mad->fc_iu.response));
4455 	mad->iu.cmd.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
4456 		offsetof(struct ibmvfc_passthru_mad, fc_iu) +
4457 		offsetof(struct ibmvfc_passthru_fc_iu, payload));
4458 	mad->iu.cmd.len = cpu_to_be32(sizeof(mad->fc_iu.payload));
4459 	mad->iu.rsp.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
4460 		offsetof(struct ibmvfc_passthru_mad, fc_iu) +
4461 		offsetof(struct ibmvfc_passthru_fc_iu, response));
4462 	mad->iu.rsp.len = cpu_to_be32(sizeof(mad->fc_iu.response));
4463 }
4464 
4465 /**
4466  * ibmvfc_tgt_adisc_cancel_done - Completion handler when cancelling an ADISC
4467  * @evt:		ibmvfc event struct
4468  *
4469  * Just cleanup this event struct. Everything else is handled by
4470  * the ADISC completion handler. If the ADISC never actually comes
4471  * back, we still have the timer running on the ADISC event struct
4472  * which will fire and cause the CRQ to get reset.
4473  *
4474  **/
4475 static void ibmvfc_tgt_adisc_cancel_done(struct ibmvfc_event *evt)
4476 {
4477 	struct ibmvfc_host *vhost = evt->vhost;
4478 	struct ibmvfc_target *tgt = evt->tgt;
4479 
4480 	tgt_dbg(tgt, "ADISC cancel complete\n");
4481 	vhost->abort_threads--;
4482 	ibmvfc_free_event(evt);
4483 	kref_put(&tgt->kref, ibmvfc_release_tgt);
4484 	wake_up(&vhost->work_wait_q);
4485 }
4486 
4487 /**
4488  * ibmvfc_adisc_timeout - Handle an ADISC timeout
4489  * @t:		ibmvfc target struct
4490  *
4491  * If an ADISC times out, send a cancel. If the cancel times
4492  * out, reset the CRQ. When the ADISC comes back as cancelled,
4493  * log back into the target.
4494  **/
4495 static void ibmvfc_adisc_timeout(struct timer_list *t)
4496 {
4497 	struct ibmvfc_target *tgt = from_timer(tgt, t, timer);
4498 	struct ibmvfc_host *vhost = tgt->vhost;
4499 	struct ibmvfc_event *evt;
4500 	struct ibmvfc_tmf *tmf;
4501 	unsigned long flags;
4502 	int rc;
4503 
4504 	tgt_dbg(tgt, "ADISC timeout\n");
4505 	spin_lock_irqsave(vhost->host->host_lock, flags);
4506 	if (vhost->abort_threads >= disc_threads ||
4507 	    tgt->action != IBMVFC_TGT_ACTION_INIT_WAIT ||
4508 	    vhost->state != IBMVFC_INITIALIZING ||
4509 	    vhost->action != IBMVFC_HOST_ACTION_QUERY_TGTS) {
4510 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
4511 		return;
4512 	}
4513 
4514 	vhost->abort_threads++;
4515 	kref_get(&tgt->kref);
4516 	evt = ibmvfc_get_event(&vhost->crq);
4517 	ibmvfc_init_event(evt, ibmvfc_tgt_adisc_cancel_done, IBMVFC_MAD_FORMAT);
4518 
4519 	evt->tgt = tgt;
4520 	tmf = &evt->iu.tmf;
4521 	memset(tmf, 0, sizeof(*tmf));
4522 	if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
4523 		tmf->common.version = cpu_to_be32(2);
4524 		tmf->target_wwpn = cpu_to_be64(tgt->wwpn);
4525 	} else {
4526 		tmf->common.version = cpu_to_be32(1);
4527 	}
4528 	tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
4529 	tmf->common.length = cpu_to_be16(sizeof(*tmf));
4530 	tmf->scsi_id = cpu_to_be64(tgt->scsi_id);
4531 	tmf->cancel_key = cpu_to_be32(tgt->cancel_key);
4532 
4533 	rc = ibmvfc_send_event(evt, vhost, default_timeout);
4534 
4535 	if (rc) {
4536 		tgt_err(tgt, "Failed to send cancel event for ADISC. rc=%d\n", rc);
4537 		vhost->abort_threads--;
4538 		kref_put(&tgt->kref, ibmvfc_release_tgt);
4539 		__ibmvfc_reset_host(vhost);
4540 	} else
4541 		tgt_dbg(tgt, "Attempting to cancel ADISC\n");
4542 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
4543 }
4544 
4545 /**
4546  * ibmvfc_tgt_adisc - Initiate an ADISC for specified target
4547  * @tgt:		ibmvfc target struct
4548  *
4549  * When sending an ADISC we end up with two timers running. The
4550  * first timer is the timer in the ibmvfc target struct. If this
4551  * fires, we send a cancel to the target. The second timer is the
4552  * timer on the ibmvfc event for the ADISC, which is longer. If that
4553  * fires, it means the ADISC timed out and our attempt to cancel it
4554  * also failed, so we need to reset the CRQ.
4555  **/
4556 static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt)
4557 {
4558 	struct ibmvfc_passthru_mad *mad;
4559 	struct ibmvfc_host *vhost = tgt->vhost;
4560 	struct ibmvfc_event *evt;
4561 
4562 	if (vhost->discovery_threads >= disc_threads)
4563 		return;
4564 
4565 	kref_get(&tgt->kref);
4566 	evt = ibmvfc_get_event(&vhost->crq);
4567 	vhost->discovery_threads++;
4568 	ibmvfc_init_event(evt, ibmvfc_tgt_adisc_done, IBMVFC_MAD_FORMAT);
4569 	evt->tgt = tgt;
4570 
4571 	ibmvfc_init_passthru(evt);
4572 	mad = &evt->iu.passthru;
4573 	mad->iu.flags = cpu_to_be32(IBMVFC_FC_ELS);
4574 	mad->iu.scsi_id = cpu_to_be64(tgt->scsi_id);
4575 	mad->iu.cancel_key = cpu_to_be32(tgt->cancel_key);
4576 
4577 	mad->fc_iu.payload[0] = cpu_to_be32(IBMVFC_ADISC);
4578 	memcpy(&mad->fc_iu.payload[2], &vhost->login_buf->resp.port_name,
4579 	       sizeof(vhost->login_buf->resp.port_name));
4580 	memcpy(&mad->fc_iu.payload[4], &vhost->login_buf->resp.node_name,
4581 	       sizeof(vhost->login_buf->resp.node_name));
4582 	mad->fc_iu.payload[6] = cpu_to_be32(be64_to_cpu(vhost->login_buf->resp.scsi_id) & 0x00ffffff);
4583 
4584 	if (timer_pending(&tgt->timer))
4585 		mod_timer(&tgt->timer, jiffies + (IBMVFC_ADISC_TIMEOUT * HZ));
4586 	else {
4587 		tgt->timer.expires = jiffies + (IBMVFC_ADISC_TIMEOUT * HZ);
4588 		add_timer(&tgt->timer);
4589 	}
4590 
4591 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4592 	if (ibmvfc_send_event(evt, vhost, IBMVFC_ADISC_PLUS_CANCEL_TIMEOUT)) {
4593 		vhost->discovery_threads--;
4594 		del_timer(&tgt->timer);
4595 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4596 		kref_put(&tgt->kref, ibmvfc_release_tgt);
4597 	} else
4598 		tgt_dbg(tgt, "Sent ADISC\n");
4599 }
4600 
4601 /**
4602  * ibmvfc_tgt_query_target_done - Completion handler for Query Target MAD
4603  * @evt:	ibmvfc event struct
4604  *
4605  **/
4606 static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
4607 {
4608 	struct ibmvfc_target *tgt = evt->tgt;
4609 	struct ibmvfc_host *vhost = evt->vhost;
4610 	struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt;
4611 	u32 status = be16_to_cpu(rsp->common.status);
4612 	int level = IBMVFC_DEFAULT_LOG_LEVEL;
4613 
4614 	vhost->discovery_threads--;
4615 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4616 	switch (status) {
4617 	case IBMVFC_MAD_SUCCESS:
4618 		tgt_dbg(tgt, "Query Target succeeded\n");
4619 		if (be64_to_cpu(rsp->scsi_id) != tgt->scsi_id)
4620 			ibmvfc_del_tgt(tgt);
4621 		else
4622 			ibmvfc_init_tgt(tgt, ibmvfc_tgt_adisc);
4623 		break;
4624 	case IBMVFC_MAD_DRIVER_FAILED:
4625 		break;
4626 	case IBMVFC_MAD_CRQ_ERROR:
4627 		ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
4628 		break;
4629 	case IBMVFC_MAD_FAILED:
4630 	default:
4631 		if ((be16_to_cpu(rsp->status) & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED &&
4632 		    be16_to_cpu(rsp->error) == IBMVFC_UNABLE_TO_PERFORM_REQ &&
4633 		    be16_to_cpu(rsp->fc_explain) == IBMVFC_PORT_NAME_NOT_REG)
4634 			ibmvfc_del_tgt(tgt);
4635 		else if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
4636 			level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
4637 		else
4638 			ibmvfc_del_tgt(tgt);
4639 
4640 		tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
4641 			ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
4642 			be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
4643 			ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
4644 			ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain),
4645 			status);
4646 		break;
4647 	}
4648 
4649 	kref_put(&tgt->kref, ibmvfc_release_tgt);
4650 	ibmvfc_free_event(evt);
4651 	wake_up(&vhost->work_wait_q);
4652 }
4653 
4654 /**
4655  * ibmvfc_tgt_query_target - Initiate a Query Target for specified target
4656  * @tgt:	ibmvfc target struct
4657  *
4658  **/
4659 static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt)
4660 {
4661 	struct ibmvfc_query_tgt *query_tgt;
4662 	struct ibmvfc_host *vhost = tgt->vhost;
4663 	struct ibmvfc_event *evt;
4664 
4665 	if (vhost->discovery_threads >= disc_threads)
4666 		return;
4667 
4668 	kref_get(&tgt->kref);
4669 	evt = ibmvfc_get_event(&vhost->crq);
4670 	vhost->discovery_threads++;
4671 	evt->tgt = tgt;
4672 	ibmvfc_init_event(evt, ibmvfc_tgt_query_target_done, IBMVFC_MAD_FORMAT);
4673 	query_tgt = &evt->iu.query_tgt;
4674 	memset(query_tgt, 0, sizeof(*query_tgt));
4675 	query_tgt->common.version = cpu_to_be32(1);
4676 	query_tgt->common.opcode = cpu_to_be32(IBMVFC_QUERY_TARGET);
4677 	query_tgt->common.length = cpu_to_be16(sizeof(*query_tgt));
4678 	query_tgt->wwpn = cpu_to_be64(tgt->ids.port_name);
4679 
4680 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4681 	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4682 		vhost->discovery_threads--;
4683 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4684 		kref_put(&tgt->kref, ibmvfc_release_tgt);
4685 	} else
4686 		tgt_dbg(tgt, "Sent Query Target\n");
4687 }
4688 
4689 /**
4690  * ibmvfc_alloc_target - Allocate and initialize an ibmvfc target
4691  * @vhost:		ibmvfc host struct
4692  * @target:		Holds SCSI ID to allocate target forand the WWPN
4693  *
4694  * Returns:
4695  *	0 on success / other on failure
4696  **/
4697 static int ibmvfc_alloc_target(struct ibmvfc_host *vhost,
4698 			       struct ibmvfc_discover_targets_entry *target)
4699 {
4700 	struct ibmvfc_target *stgt = NULL;
4701 	struct ibmvfc_target *wtgt = NULL;
4702 	struct ibmvfc_target *tgt;
4703 	unsigned long flags;
4704 	u64 scsi_id = be32_to_cpu(target->scsi_id) & IBMVFC_DISC_TGT_SCSI_ID_MASK;
4705 	u64 wwpn = be64_to_cpu(target->wwpn);
4706 
4707 	/* Look to see if we already have a target allocated for this SCSI ID or WWPN */
4708 	spin_lock_irqsave(vhost->host->host_lock, flags);
4709 	list_for_each_entry(tgt, &vhost->targets, queue) {
4710 		if (tgt->wwpn == wwpn) {
4711 			wtgt = tgt;
4712 			break;
4713 		}
4714 	}
4715 
4716 	list_for_each_entry(tgt, &vhost->targets, queue) {
4717 		if (tgt->scsi_id == scsi_id) {
4718 			stgt = tgt;
4719 			break;
4720 		}
4721 	}
4722 
4723 	if (wtgt && !stgt) {
4724 		/*
4725 		 * A WWPN target has moved and we still are tracking the old
4726 		 * SCSI ID.  The only way we should be able to get here is if
4727 		 * we attempted to send an implicit logout for the old SCSI ID
4728 		 * and it failed for some reason, such as there being I/O
4729 		 * pending to the target. In this case, we will have already
4730 		 * deleted the rport from the FC transport so we do a move
4731 		 * login, which works even with I/O pending, as it will cancel
4732 		 * any active commands.
4733 		 */
4734 		if (wtgt->action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
4735 			/*
4736 			 * Do a move login here. The old target is no longer
4737 			 * known to the transport layer We don't use the
4738 			 * normal ibmvfc_set_tgt_action to set this, as we
4739 			 * don't normally want to allow this state change.
4740 			 */
4741 			wtgt->old_scsi_id = wtgt->scsi_id;
4742 			wtgt->scsi_id = scsi_id;
4743 			wtgt->action = IBMVFC_TGT_ACTION_INIT;
4744 			ibmvfc_init_tgt(wtgt, ibmvfc_tgt_move_login);
4745 			goto unlock_out;
4746 		} else {
4747 			tgt_err(wtgt, "Unexpected target state: %d, %p\n",
4748 				wtgt->action, wtgt->rport);
4749 		}
4750 	} else if (stgt) {
4751 		if (tgt->need_login)
4752 			ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
4753 		goto unlock_out;
4754 	}
4755 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
4756 
4757 	tgt = mempool_alloc(vhost->tgt_pool, GFP_NOIO);
4758 	memset(tgt, 0, sizeof(*tgt));
4759 	tgt->scsi_id = scsi_id;
4760 	tgt->wwpn = wwpn;
4761 	tgt->vhost = vhost;
4762 	tgt->need_login = 1;
4763 	timer_setup(&tgt->timer, ibmvfc_adisc_timeout, 0);
4764 	kref_init(&tgt->kref);
4765 	ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
4766 	spin_lock_irqsave(vhost->host->host_lock, flags);
4767 	tgt->cancel_key = vhost->task_set++;
4768 	list_add_tail(&tgt->queue, &vhost->targets);
4769 
4770 unlock_out:
4771 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
4772 	return 0;
4773 }
4774 
4775 /**
4776  * ibmvfc_alloc_targets - Allocate and initialize ibmvfc targets
4777  * @vhost:		ibmvfc host struct
4778  *
4779  * Returns:
4780  *	0 on success / other on failure
4781  **/
4782 static int ibmvfc_alloc_targets(struct ibmvfc_host *vhost)
4783 {
4784 	int i, rc;
4785 
4786 	for (i = 0, rc = 0; !rc && i < vhost->num_targets; i++)
4787 		rc = ibmvfc_alloc_target(vhost, &vhost->disc_buf[i]);
4788 
4789 	return rc;
4790 }
4791 
4792 /**
4793  * ibmvfc_discover_targets_done - Completion handler for discover targets MAD
4794  * @evt:	ibmvfc event struct
4795  *
4796  **/
4797 static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
4798 {
4799 	struct ibmvfc_host *vhost = evt->vhost;
4800 	struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets;
4801 	u32 mad_status = be16_to_cpu(rsp->common.status);
4802 	int level = IBMVFC_DEFAULT_LOG_LEVEL;
4803 
4804 	switch (mad_status) {
4805 	case IBMVFC_MAD_SUCCESS:
4806 		ibmvfc_dbg(vhost, "Discover Targets succeeded\n");
4807 		vhost->num_targets = be32_to_cpu(rsp->num_written);
4808 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS);
4809 		break;
4810 	case IBMVFC_MAD_FAILED:
4811 		level += ibmvfc_retry_host_init(vhost);
4812 		ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n",
4813 			   ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
4814 			   be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
4815 		break;
4816 	case IBMVFC_MAD_DRIVER_FAILED:
4817 		break;
4818 	default:
4819 		dev_err(vhost->dev, "Invalid Discover Targets response: 0x%x\n", mad_status);
4820 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4821 		break;
4822 	}
4823 
4824 	ibmvfc_free_event(evt);
4825 	wake_up(&vhost->work_wait_q);
4826 }
4827 
4828 /**
4829  * ibmvfc_discover_targets - Send Discover Targets MAD
4830  * @vhost:	ibmvfc host struct
4831  *
4832  **/
4833 static void ibmvfc_discover_targets(struct ibmvfc_host *vhost)
4834 {
4835 	struct ibmvfc_discover_targets *mad;
4836 	struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
4837 
4838 	ibmvfc_init_event(evt, ibmvfc_discover_targets_done, IBMVFC_MAD_FORMAT);
4839 	mad = &evt->iu.discover_targets;
4840 	memset(mad, 0, sizeof(*mad));
4841 	mad->common.version = cpu_to_be32(1);
4842 	mad->common.opcode = cpu_to_be32(IBMVFC_DISC_TARGETS);
4843 	mad->common.length = cpu_to_be16(sizeof(*mad));
4844 	mad->bufflen = cpu_to_be32(vhost->disc_buf_sz);
4845 	mad->buffer.va = cpu_to_be64(vhost->disc_buf_dma);
4846 	mad->buffer.len = cpu_to_be32(vhost->disc_buf_sz);
4847 	mad->flags = cpu_to_be32(IBMVFC_DISC_TGT_PORT_ID_WWPN_LIST);
4848 	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
4849 
4850 	if (!ibmvfc_send_event(evt, vhost, default_timeout))
4851 		ibmvfc_dbg(vhost, "Sent discover targets\n");
4852 	else
4853 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4854 }
4855 
4856 static void ibmvfc_channel_setup_done(struct ibmvfc_event *evt)
4857 {
4858 	struct ibmvfc_host *vhost = evt->vhost;
4859 	struct ibmvfc_channel_setup *setup = vhost->channel_setup_buf;
4860 	struct ibmvfc_scsi_channels *scrqs = &vhost->scsi_scrqs;
4861 	u32 mad_status = be16_to_cpu(evt->xfer_iu->channel_setup.common.status);
4862 	int level = IBMVFC_DEFAULT_LOG_LEVEL;
4863 	int flags, active_queues, i;
4864 
4865 	ibmvfc_free_event(evt);
4866 
4867 	switch (mad_status) {
4868 	case IBMVFC_MAD_SUCCESS:
4869 		ibmvfc_dbg(vhost, "Channel Setup succeeded\n");
4870 		flags = be32_to_cpu(setup->flags);
4871 		vhost->do_enquiry = 0;
4872 		active_queues = be32_to_cpu(setup->num_scsi_subq_channels);
4873 		scrqs->active_queues = active_queues;
4874 
4875 		if (flags & IBMVFC_CHANNELS_CANCELED) {
4876 			ibmvfc_dbg(vhost, "Channels Canceled\n");
4877 			vhost->using_channels = 0;
4878 		} else {
4879 			if (active_queues)
4880 				vhost->using_channels = 1;
4881 			for (i = 0; i < active_queues; i++)
4882 				scrqs->scrqs[i].vios_cookie =
4883 					be64_to_cpu(setup->channel_handles[i]);
4884 
4885 			ibmvfc_dbg(vhost, "Using %u channels\n",
4886 				   vhost->scsi_scrqs.active_queues);
4887 		}
4888 		break;
4889 	case IBMVFC_MAD_FAILED:
4890 		level += ibmvfc_retry_host_init(vhost);
4891 		ibmvfc_log(vhost, level, "Channel Setup failed\n");
4892 		fallthrough;
4893 	case IBMVFC_MAD_DRIVER_FAILED:
4894 		return;
4895 	default:
4896 		dev_err(vhost->dev, "Invalid Channel Setup response: 0x%x\n",
4897 			mad_status);
4898 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4899 		return;
4900 	}
4901 
4902 	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
4903 	wake_up(&vhost->work_wait_q);
4904 }
4905 
4906 static void ibmvfc_channel_setup(struct ibmvfc_host *vhost)
4907 {
4908 	struct ibmvfc_channel_setup_mad *mad;
4909 	struct ibmvfc_channel_setup *setup_buf = vhost->channel_setup_buf;
4910 	struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
4911 	struct ibmvfc_scsi_channels *scrqs = &vhost->scsi_scrqs;
4912 	unsigned int num_channels =
4913 		min(vhost->client_scsi_channels, vhost->max_vios_scsi_channels);
4914 	int i;
4915 
4916 	memset(setup_buf, 0, sizeof(*setup_buf));
4917 	if (num_channels == 0)
4918 		setup_buf->flags = cpu_to_be32(IBMVFC_CANCEL_CHANNELS);
4919 	else {
4920 		setup_buf->num_scsi_subq_channels = cpu_to_be32(num_channels);
4921 		for (i = 0; i < num_channels; i++)
4922 			setup_buf->channel_handles[i] = cpu_to_be64(scrqs->scrqs[i].cookie);
4923 	}
4924 
4925 	ibmvfc_init_event(evt, ibmvfc_channel_setup_done, IBMVFC_MAD_FORMAT);
4926 	mad = &evt->iu.channel_setup;
4927 	memset(mad, 0, sizeof(*mad));
4928 	mad->common.version = cpu_to_be32(1);
4929 	mad->common.opcode = cpu_to_be32(IBMVFC_CHANNEL_SETUP);
4930 	mad->common.length = cpu_to_be16(sizeof(*mad));
4931 	mad->buffer.va = cpu_to_be64(vhost->channel_setup_dma);
4932 	mad->buffer.len = cpu_to_be32(sizeof(*vhost->channel_setup_buf));
4933 
4934 	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
4935 
4936 	if (!ibmvfc_send_event(evt, vhost, default_timeout))
4937 		ibmvfc_dbg(vhost, "Sent channel setup\n");
4938 	else
4939 		ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
4940 }
4941 
4942 static void ibmvfc_channel_enquiry_done(struct ibmvfc_event *evt)
4943 {
4944 	struct ibmvfc_host *vhost = evt->vhost;
4945 	struct ibmvfc_channel_enquiry *rsp = &evt->xfer_iu->channel_enquiry;
4946 	u32 mad_status = be16_to_cpu(rsp->common.status);
4947 	int level = IBMVFC_DEFAULT_LOG_LEVEL;
4948 
4949 	switch (mad_status) {
4950 	case IBMVFC_MAD_SUCCESS:
4951 		ibmvfc_dbg(vhost, "Channel Enquiry succeeded\n");
4952 		vhost->max_vios_scsi_channels = be32_to_cpu(rsp->num_scsi_subq_channels);
4953 		ibmvfc_free_event(evt);
4954 		break;
4955 	case IBMVFC_MAD_FAILED:
4956 		level += ibmvfc_retry_host_init(vhost);
4957 		ibmvfc_log(vhost, level, "Channel Enquiry failed\n");
4958 		fallthrough;
4959 	case IBMVFC_MAD_DRIVER_FAILED:
4960 		ibmvfc_free_event(evt);
4961 		return;
4962 	default:
4963 		dev_err(vhost->dev, "Invalid Channel Enquiry response: 0x%x\n",
4964 			mad_status);
4965 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4966 		ibmvfc_free_event(evt);
4967 		return;
4968 	}
4969 
4970 	ibmvfc_channel_setup(vhost);
4971 }
4972 
4973 static void ibmvfc_channel_enquiry(struct ibmvfc_host *vhost)
4974 {
4975 	struct ibmvfc_channel_enquiry *mad;
4976 	struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
4977 
4978 	ibmvfc_init_event(evt, ibmvfc_channel_enquiry_done, IBMVFC_MAD_FORMAT);
4979 	mad = &evt->iu.channel_enquiry;
4980 	memset(mad, 0, sizeof(*mad));
4981 	mad->common.version = cpu_to_be32(1);
4982 	mad->common.opcode = cpu_to_be32(IBMVFC_CHANNEL_ENQUIRY);
4983 	mad->common.length = cpu_to_be16(sizeof(*mad));
4984 
4985 	if (mig_channels_only)
4986 		mad->flags |= cpu_to_be32(IBMVFC_NO_CHANNELS_TO_CRQ_SUPPORT);
4987 	if (mig_no_less_channels)
4988 		mad->flags |= cpu_to_be32(IBMVFC_NO_N_TO_M_CHANNELS_SUPPORT);
4989 
4990 	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
4991 
4992 	if (!ibmvfc_send_event(evt, vhost, default_timeout))
4993 		ibmvfc_dbg(vhost, "Send channel enquiry\n");
4994 	else
4995 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4996 }
4997 
4998 /**
4999  * ibmvfc_npiv_login_done - Completion handler for NPIV Login
5000  * @evt:	ibmvfc event struct
5001  *
5002  **/
5003 static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
5004 {
5005 	struct ibmvfc_host *vhost = evt->vhost;
5006 	u32 mad_status = be16_to_cpu(evt->xfer_iu->npiv_login.common.status);
5007 	struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp;
5008 	unsigned int npiv_max_sectors;
5009 	int level = IBMVFC_DEFAULT_LOG_LEVEL;
5010 
5011 	switch (mad_status) {
5012 	case IBMVFC_MAD_SUCCESS:
5013 		ibmvfc_free_event(evt);
5014 		break;
5015 	case IBMVFC_MAD_FAILED:
5016 		if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
5017 			level += ibmvfc_retry_host_init(vhost);
5018 		else
5019 			ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5020 		ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n",
5021 			   ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
5022 						be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
5023 		ibmvfc_free_event(evt);
5024 		return;
5025 	case IBMVFC_MAD_CRQ_ERROR:
5026 		ibmvfc_retry_host_init(vhost);
5027 		fallthrough;
5028 	case IBMVFC_MAD_DRIVER_FAILED:
5029 		ibmvfc_free_event(evt);
5030 		return;
5031 	default:
5032 		dev_err(vhost->dev, "Invalid NPIV Login response: 0x%x\n", mad_status);
5033 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5034 		ibmvfc_free_event(evt);
5035 		return;
5036 	}
5037 
5038 	vhost->client_migrated = 0;
5039 
5040 	if (!(be32_to_cpu(rsp->flags) & IBMVFC_NATIVE_FC)) {
5041 		dev_err(vhost->dev, "Virtual adapter does not support FC. %x\n",
5042 			rsp->flags);
5043 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5044 		wake_up(&vhost->work_wait_q);
5045 		return;
5046 	}
5047 
5048 	if (be32_to_cpu(rsp->max_cmds) <= IBMVFC_NUM_INTERNAL_REQ) {
5049 		dev_err(vhost->dev, "Virtual adapter supported queue depth too small: %d\n",
5050 			rsp->max_cmds);
5051 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5052 		wake_up(&vhost->work_wait_q);
5053 		return;
5054 	}
5055 
5056 	vhost->logged_in = 1;
5057 	npiv_max_sectors = min((uint)(be64_to_cpu(rsp->max_dma_len) >> 9), IBMVFC_MAX_SECTORS);
5058 	dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n",
5059 		 rsp->partition_name, rsp->device_name, rsp->port_loc_code,
5060 		 rsp->drc_name, npiv_max_sectors);
5061 
5062 	fc_host_fabric_name(vhost->host) = be64_to_cpu(rsp->node_name);
5063 	fc_host_node_name(vhost->host) = be64_to_cpu(rsp->node_name);
5064 	fc_host_port_name(vhost->host) = be64_to_cpu(rsp->port_name);
5065 	fc_host_port_id(vhost->host) = be64_to_cpu(rsp->scsi_id);
5066 	fc_host_port_type(vhost->host) = FC_PORTTYPE_NPIV;
5067 	fc_host_supported_classes(vhost->host) = 0;
5068 	if (be32_to_cpu(rsp->service_parms.class1_parms[0]) & 0x80000000)
5069 		fc_host_supported_classes(vhost->host) |= FC_COS_CLASS1;
5070 	if (be32_to_cpu(rsp->service_parms.class2_parms[0]) & 0x80000000)
5071 		fc_host_supported_classes(vhost->host) |= FC_COS_CLASS2;
5072 	if (be32_to_cpu(rsp->service_parms.class3_parms[0]) & 0x80000000)
5073 		fc_host_supported_classes(vhost->host) |= FC_COS_CLASS3;
5074 	fc_host_maxframe_size(vhost->host) =
5075 		be16_to_cpu(rsp->service_parms.common.bb_rcv_sz) & 0x0fff;
5076 
5077 	vhost->host->can_queue = be32_to_cpu(rsp->max_cmds) - IBMVFC_NUM_INTERNAL_REQ;
5078 	vhost->host->max_sectors = npiv_max_sectors;
5079 
5080 	if (ibmvfc_check_caps(vhost, IBMVFC_CAN_SUPPORT_CHANNELS) && vhost->do_enquiry) {
5081 		ibmvfc_channel_enquiry(vhost);
5082 	} else {
5083 		vhost->do_enquiry = 0;
5084 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
5085 		wake_up(&vhost->work_wait_q);
5086 	}
5087 }
5088 
5089 /**
5090  * ibmvfc_npiv_login - Sends NPIV login
5091  * @vhost:	ibmvfc host struct
5092  *
5093  **/
5094 static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
5095 {
5096 	struct ibmvfc_npiv_login_mad *mad;
5097 	struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
5098 
5099 	ibmvfc_gather_partition_info(vhost);
5100 	ibmvfc_set_login_info(vhost);
5101 	ibmvfc_init_event(evt, ibmvfc_npiv_login_done, IBMVFC_MAD_FORMAT);
5102 
5103 	memcpy(vhost->login_buf, &vhost->login_info, sizeof(vhost->login_info));
5104 	mad = &evt->iu.npiv_login;
5105 	memset(mad, 0, sizeof(struct ibmvfc_npiv_login_mad));
5106 	mad->common.version = cpu_to_be32(1);
5107 	mad->common.opcode = cpu_to_be32(IBMVFC_NPIV_LOGIN);
5108 	mad->common.length = cpu_to_be16(sizeof(struct ibmvfc_npiv_login_mad));
5109 	mad->buffer.va = cpu_to_be64(vhost->login_buf_dma);
5110 	mad->buffer.len = cpu_to_be32(sizeof(*vhost->login_buf));
5111 
5112 	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
5113 
5114 	if (!ibmvfc_send_event(evt, vhost, default_timeout))
5115 		ibmvfc_dbg(vhost, "Sent NPIV login\n");
5116 	else
5117 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5118 }
5119 
5120 /**
5121  * ibmvfc_npiv_logout_done - Completion handler for NPIV Logout
5122  * @evt:		ibmvfc event struct
5123  *
5124  **/
5125 static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt)
5126 {
5127 	struct ibmvfc_host *vhost = evt->vhost;
5128 	u32 mad_status = be16_to_cpu(evt->xfer_iu->npiv_logout.common.status);
5129 
5130 	ibmvfc_free_event(evt);
5131 
5132 	switch (mad_status) {
5133 	case IBMVFC_MAD_SUCCESS:
5134 		if (list_empty(&vhost->crq.sent) &&
5135 		    vhost->action == IBMVFC_HOST_ACTION_LOGO_WAIT) {
5136 			ibmvfc_init_host(vhost);
5137 			return;
5138 		}
5139 		break;
5140 	case IBMVFC_MAD_FAILED:
5141 	case IBMVFC_MAD_NOT_SUPPORTED:
5142 	case IBMVFC_MAD_CRQ_ERROR:
5143 	case IBMVFC_MAD_DRIVER_FAILED:
5144 	default:
5145 		ibmvfc_dbg(vhost, "NPIV Logout failed. 0x%X\n", mad_status);
5146 		break;
5147 	}
5148 
5149 	ibmvfc_hard_reset_host(vhost);
5150 }
5151 
5152 /**
5153  * ibmvfc_npiv_logout - Issue an NPIV Logout
5154  * @vhost:		ibmvfc host struct
5155  *
5156  **/
5157 static void ibmvfc_npiv_logout(struct ibmvfc_host *vhost)
5158 {
5159 	struct ibmvfc_npiv_logout_mad *mad;
5160 	struct ibmvfc_event *evt;
5161 
5162 	evt = ibmvfc_get_event(&vhost->crq);
5163 	ibmvfc_init_event(evt, ibmvfc_npiv_logout_done, IBMVFC_MAD_FORMAT);
5164 
5165 	mad = &evt->iu.npiv_logout;
5166 	memset(mad, 0, sizeof(*mad));
5167 	mad->common.version = cpu_to_be32(1);
5168 	mad->common.opcode = cpu_to_be32(IBMVFC_NPIV_LOGOUT);
5169 	mad->common.length = cpu_to_be16(sizeof(struct ibmvfc_npiv_logout_mad));
5170 
5171 	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO_WAIT);
5172 
5173 	if (!ibmvfc_send_event(evt, vhost, default_timeout))
5174 		ibmvfc_dbg(vhost, "Sent NPIV logout\n");
5175 	else
5176 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5177 }
5178 
5179 /**
5180  * ibmvfc_dev_init_to_do - Is there target initialization work to do?
5181  * @vhost:		ibmvfc host struct
5182  *
5183  * Returns:
5184  *	1 if work to do / 0 if not
5185  **/
5186 static int ibmvfc_dev_init_to_do(struct ibmvfc_host *vhost)
5187 {
5188 	struct ibmvfc_target *tgt;
5189 
5190 	list_for_each_entry(tgt, &vhost->targets, queue) {
5191 		if (tgt->action == IBMVFC_TGT_ACTION_INIT ||
5192 		    tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
5193 			return 1;
5194 	}
5195 
5196 	return 0;
5197 }
5198 
5199 /**
5200  * ibmvfc_dev_logo_to_do - Is there target logout work to do?
5201  * @vhost:		ibmvfc host struct
5202  *
5203  * Returns:
5204  *	1 if work to do / 0 if not
5205  **/
5206 static int ibmvfc_dev_logo_to_do(struct ibmvfc_host *vhost)
5207 {
5208 	struct ibmvfc_target *tgt;
5209 
5210 	list_for_each_entry(tgt, &vhost->targets, queue) {
5211 		if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT ||
5212 		    tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT)
5213 			return 1;
5214 	}
5215 	return 0;
5216 }
5217 
5218 /**
5219  * __ibmvfc_work_to_do - Is there task level work to do? (no locking)
5220  * @vhost:		ibmvfc host struct
5221  *
5222  * Returns:
5223  *	1 if work to do / 0 if not
5224  **/
5225 static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
5226 {
5227 	struct ibmvfc_target *tgt;
5228 
5229 	if (kthread_should_stop())
5230 		return 1;
5231 	switch (vhost->action) {
5232 	case IBMVFC_HOST_ACTION_NONE:
5233 	case IBMVFC_HOST_ACTION_INIT_WAIT:
5234 	case IBMVFC_HOST_ACTION_LOGO_WAIT:
5235 		return 0;
5236 	case IBMVFC_HOST_ACTION_TGT_INIT:
5237 	case IBMVFC_HOST_ACTION_QUERY_TGTS:
5238 		if (vhost->discovery_threads == disc_threads)
5239 			return 0;
5240 		list_for_each_entry(tgt, &vhost->targets, queue)
5241 			if (tgt->action == IBMVFC_TGT_ACTION_INIT)
5242 				return 1;
5243 		list_for_each_entry(tgt, &vhost->targets, queue)
5244 			if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
5245 				return 0;
5246 		return 1;
5247 	case IBMVFC_HOST_ACTION_TGT_DEL:
5248 	case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
5249 		if (vhost->discovery_threads == disc_threads)
5250 			return 0;
5251 		list_for_each_entry(tgt, &vhost->targets, queue)
5252 			if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT)
5253 				return 1;
5254 		list_for_each_entry(tgt, &vhost->targets, queue)
5255 			if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT)
5256 				return 0;
5257 		return 1;
5258 	case IBMVFC_HOST_ACTION_LOGO:
5259 	case IBMVFC_HOST_ACTION_INIT:
5260 	case IBMVFC_HOST_ACTION_ALLOC_TGTS:
5261 	case IBMVFC_HOST_ACTION_QUERY:
5262 	case IBMVFC_HOST_ACTION_RESET:
5263 	case IBMVFC_HOST_ACTION_REENABLE:
5264 	default:
5265 		break;
5266 	}
5267 
5268 	return 1;
5269 }
5270 
5271 /**
5272  * ibmvfc_work_to_do - Is there task level work to do?
5273  * @vhost:		ibmvfc host struct
5274  *
5275  * Returns:
5276  *	1 if work to do / 0 if not
5277  **/
5278 static int ibmvfc_work_to_do(struct ibmvfc_host *vhost)
5279 {
5280 	unsigned long flags;
5281 	int rc;
5282 
5283 	spin_lock_irqsave(vhost->host->host_lock, flags);
5284 	rc = __ibmvfc_work_to_do(vhost);
5285 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
5286 	return rc;
5287 }
5288 
5289 /**
5290  * ibmvfc_log_ae - Log async events if necessary
5291  * @vhost:		ibmvfc host struct
5292  * @events:		events to log
5293  *
5294  **/
5295 static void ibmvfc_log_ae(struct ibmvfc_host *vhost, int events)
5296 {
5297 	if (events & IBMVFC_AE_RSCN)
5298 		fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_RSCN, 0);
5299 	if ((events & IBMVFC_AE_LINKDOWN) &&
5300 	    vhost->state >= IBMVFC_HALTED)
5301 		fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
5302 	if ((events & IBMVFC_AE_LINKUP) &&
5303 	    vhost->state == IBMVFC_INITIALIZING)
5304 		fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKUP, 0);
5305 }
5306 
5307 /**
5308  * ibmvfc_tgt_add_rport - Tell the FC transport about a new remote port
5309  * @tgt:		ibmvfc target struct
5310  *
5311  **/
5312 static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
5313 {
5314 	struct ibmvfc_host *vhost = tgt->vhost;
5315 	struct fc_rport *rport;
5316 	unsigned long flags;
5317 
5318 	tgt_dbg(tgt, "Adding rport\n");
5319 	rport = fc_remote_port_add(vhost->host, 0, &tgt->ids);
5320 	spin_lock_irqsave(vhost->host->host_lock, flags);
5321 
5322 	if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
5323 		tgt_dbg(tgt, "Deleting rport\n");
5324 		list_del(&tgt->queue);
5325 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT);
5326 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
5327 		fc_remote_port_delete(rport);
5328 		del_timer_sync(&tgt->timer);
5329 		kref_put(&tgt->kref, ibmvfc_release_tgt);
5330 		return;
5331 	} else if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
5332 		tgt_dbg(tgt, "Deleting rport with outstanding I/O\n");
5333 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT);
5334 		tgt->rport = NULL;
5335 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
5336 		fc_remote_port_delete(rport);
5337 		return;
5338 	} else if (rport && tgt->action == IBMVFC_TGT_ACTION_DELETED_RPORT) {
5339 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
5340 		return;
5341 	}
5342 
5343 	if (rport) {
5344 		tgt_dbg(tgt, "rport add succeeded\n");
5345 		tgt->rport = rport;
5346 		rport->maxframe_size = be16_to_cpu(tgt->service_parms.common.bb_rcv_sz) & 0x0fff;
5347 		rport->supported_classes = 0;
5348 		tgt->target_id = rport->scsi_target_id;
5349 		if (be32_to_cpu(tgt->service_parms.class1_parms[0]) & 0x80000000)
5350 			rport->supported_classes |= FC_COS_CLASS1;
5351 		if (be32_to_cpu(tgt->service_parms.class2_parms[0]) & 0x80000000)
5352 			rport->supported_classes |= FC_COS_CLASS2;
5353 		if (be32_to_cpu(tgt->service_parms.class3_parms[0]) & 0x80000000)
5354 			rport->supported_classes |= FC_COS_CLASS3;
5355 		if (rport->rqst_q)
5356 			blk_queue_max_segments(rport->rqst_q, 1);
5357 	} else
5358 		tgt_dbg(tgt, "rport add failed\n");
5359 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
5360 }
5361 
5362 /**
5363  * ibmvfc_do_work - Do task level work
5364  * @vhost:		ibmvfc host struct
5365  *
5366  **/
5367 static void ibmvfc_do_work(struct ibmvfc_host *vhost)
5368 {
5369 	struct ibmvfc_target *tgt;
5370 	unsigned long flags;
5371 	struct fc_rport *rport;
5372 	LIST_HEAD(purge);
5373 	int rc;
5374 
5375 	ibmvfc_log_ae(vhost, vhost->events_to_log);
5376 	spin_lock_irqsave(vhost->host->host_lock, flags);
5377 	vhost->events_to_log = 0;
5378 	switch (vhost->action) {
5379 	case IBMVFC_HOST_ACTION_NONE:
5380 	case IBMVFC_HOST_ACTION_LOGO_WAIT:
5381 	case IBMVFC_HOST_ACTION_INIT_WAIT:
5382 		break;
5383 	case IBMVFC_HOST_ACTION_RESET:
5384 		list_splice_init(&vhost->purge, &purge);
5385 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
5386 		ibmvfc_complete_purge(&purge);
5387 		rc = ibmvfc_reset_crq(vhost);
5388 
5389 		spin_lock_irqsave(vhost->host->host_lock, flags);
5390 		if (!rc || rc == H_CLOSED)
5391 			vio_enable_interrupts(to_vio_dev(vhost->dev));
5392 		if (vhost->action == IBMVFC_HOST_ACTION_RESET) {
5393 			/*
5394 			 * The only action we could have changed to would have
5395 			 * been reenable, in which case, we skip the rest of
5396 			 * this path and wait until we've done the re-enable
5397 			 * before sending the crq init.
5398 			 */
5399 			vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
5400 
5401 			if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
5402 			    (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
5403 				ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5404 				dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
5405 			}
5406 		}
5407 		break;
5408 	case IBMVFC_HOST_ACTION_REENABLE:
5409 		list_splice_init(&vhost->purge, &purge);
5410 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
5411 		ibmvfc_complete_purge(&purge);
5412 		rc = ibmvfc_reenable_crq_queue(vhost);
5413 
5414 		spin_lock_irqsave(vhost->host->host_lock, flags);
5415 		if (vhost->action == IBMVFC_HOST_ACTION_REENABLE) {
5416 			/*
5417 			 * The only action we could have changed to would have
5418 			 * been reset, in which case, we skip the rest of this
5419 			 * path and wait until we've done the reset before
5420 			 * sending the crq init.
5421 			 */
5422 			vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
5423 			if (rc || (rc = ibmvfc_send_crq_init(vhost))) {
5424 				ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5425 				dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc);
5426 			}
5427 		}
5428 		break;
5429 	case IBMVFC_HOST_ACTION_LOGO:
5430 		vhost->job_step(vhost);
5431 		break;
5432 	case IBMVFC_HOST_ACTION_INIT:
5433 		BUG_ON(vhost->state != IBMVFC_INITIALIZING);
5434 		if (vhost->delay_init) {
5435 			vhost->delay_init = 0;
5436 			spin_unlock_irqrestore(vhost->host->host_lock, flags);
5437 			ssleep(15);
5438 			return;
5439 		} else
5440 			vhost->job_step(vhost);
5441 		break;
5442 	case IBMVFC_HOST_ACTION_QUERY:
5443 		list_for_each_entry(tgt, &vhost->targets, queue)
5444 			ibmvfc_init_tgt(tgt, ibmvfc_tgt_query_target);
5445 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY_TGTS);
5446 		break;
5447 	case IBMVFC_HOST_ACTION_QUERY_TGTS:
5448 		list_for_each_entry(tgt, &vhost->targets, queue) {
5449 			if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
5450 				tgt->job_step(tgt);
5451 				break;
5452 			}
5453 		}
5454 
5455 		if (!ibmvfc_dev_init_to_do(vhost))
5456 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
5457 		break;
5458 	case IBMVFC_HOST_ACTION_TGT_DEL:
5459 	case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
5460 		list_for_each_entry(tgt, &vhost->targets, queue) {
5461 			if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT) {
5462 				tgt->job_step(tgt);
5463 				break;
5464 			}
5465 		}
5466 
5467 		if (ibmvfc_dev_logo_to_do(vhost)) {
5468 			spin_unlock_irqrestore(vhost->host->host_lock, flags);
5469 			return;
5470 		}
5471 
5472 		list_for_each_entry(tgt, &vhost->targets, queue) {
5473 			if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
5474 				tgt_dbg(tgt, "Deleting rport\n");
5475 				rport = tgt->rport;
5476 				tgt->rport = NULL;
5477 				list_del(&tgt->queue);
5478 				ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT);
5479 				spin_unlock_irqrestore(vhost->host->host_lock, flags);
5480 				if (rport)
5481 					fc_remote_port_delete(rport);
5482 				del_timer_sync(&tgt->timer);
5483 				kref_put(&tgt->kref, ibmvfc_release_tgt);
5484 				return;
5485 			} else if (tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
5486 				tgt_dbg(tgt, "Deleting rport with I/O outstanding\n");
5487 				rport = tgt->rport;
5488 				tgt->rport = NULL;
5489 				ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT);
5490 				spin_unlock_irqrestore(vhost->host->host_lock, flags);
5491 				if (rport)
5492 					fc_remote_port_delete(rport);
5493 				return;
5494 			}
5495 		}
5496 
5497 		if (vhost->state == IBMVFC_INITIALIZING) {
5498 			if (vhost->action == IBMVFC_HOST_ACTION_TGT_DEL_FAILED) {
5499 				if (vhost->reinit) {
5500 					vhost->reinit = 0;
5501 					scsi_block_requests(vhost->host);
5502 					ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
5503 					spin_unlock_irqrestore(vhost->host->host_lock, flags);
5504 				} else {
5505 					ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE);
5506 					ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
5507 					wake_up(&vhost->init_wait_q);
5508 					schedule_work(&vhost->rport_add_work_q);
5509 					vhost->init_retries = 0;
5510 					spin_unlock_irqrestore(vhost->host->host_lock, flags);
5511 					scsi_unblock_requests(vhost->host);
5512 				}
5513 
5514 				return;
5515 			} else {
5516 				ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
5517 				vhost->job_step = ibmvfc_discover_targets;
5518 			}
5519 		} else {
5520 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
5521 			spin_unlock_irqrestore(vhost->host->host_lock, flags);
5522 			scsi_unblock_requests(vhost->host);
5523 			wake_up(&vhost->init_wait_q);
5524 			return;
5525 		}
5526 		break;
5527 	case IBMVFC_HOST_ACTION_ALLOC_TGTS:
5528 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_INIT);
5529 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
5530 		ibmvfc_alloc_targets(vhost);
5531 		spin_lock_irqsave(vhost->host->host_lock, flags);
5532 		break;
5533 	case IBMVFC_HOST_ACTION_TGT_INIT:
5534 		list_for_each_entry(tgt, &vhost->targets, queue) {
5535 			if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
5536 				tgt->job_step(tgt);
5537 				break;
5538 			}
5539 		}
5540 
5541 		if (!ibmvfc_dev_init_to_do(vhost))
5542 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL_FAILED);
5543 		break;
5544 	default:
5545 		break;
5546 	}
5547 
5548 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
5549 }
5550 
5551 /**
5552  * ibmvfc_work - Do task level work
5553  * @data:		ibmvfc host struct
5554  *
5555  * Returns:
5556  *	zero
5557  **/
5558 static int ibmvfc_work(void *data)
5559 {
5560 	struct ibmvfc_host *vhost = data;
5561 	int rc;
5562 
5563 	set_user_nice(current, MIN_NICE);
5564 
5565 	while (1) {
5566 		rc = wait_event_interruptible(vhost->work_wait_q,
5567 					      ibmvfc_work_to_do(vhost));
5568 
5569 		BUG_ON(rc);
5570 
5571 		if (kthread_should_stop())
5572 			break;
5573 
5574 		ibmvfc_do_work(vhost);
5575 	}
5576 
5577 	ibmvfc_dbg(vhost, "ibmvfc kthread exiting...\n");
5578 	return 0;
5579 }
5580 
5581 /**
5582  * ibmvfc_alloc_queue - Allocate queue
5583  * @vhost:	ibmvfc host struct
5584  * @queue:	ibmvfc queue to allocate
5585  * @fmt:	queue format to allocate
5586  *
5587  * Returns:
5588  *	0 on success / non-zero on failure
5589  **/
5590 static int ibmvfc_alloc_queue(struct ibmvfc_host *vhost,
5591 			      struct ibmvfc_queue *queue,
5592 			      enum ibmvfc_msg_fmt fmt)
5593 {
5594 	struct device *dev = vhost->dev;
5595 	size_t fmt_size;
5596 	unsigned int pool_size = 0;
5597 
5598 	ENTER;
5599 	spin_lock_init(&queue->_lock);
5600 	queue->q_lock = &queue->_lock;
5601 
5602 	switch (fmt) {
5603 	case IBMVFC_CRQ_FMT:
5604 		fmt_size = sizeof(*queue->msgs.crq);
5605 		pool_size = max_requests + IBMVFC_NUM_INTERNAL_REQ;
5606 		break;
5607 	case IBMVFC_ASYNC_FMT:
5608 		fmt_size = sizeof(*queue->msgs.async);
5609 		break;
5610 	case IBMVFC_SUB_CRQ_FMT:
5611 		fmt_size = sizeof(*queue->msgs.scrq);
5612 		/* We need one extra event for Cancel Commands */
5613 		pool_size = max_requests + 1;
5614 		break;
5615 	default:
5616 		dev_warn(dev, "Unknown command/response queue message format: %d\n", fmt);
5617 		return -EINVAL;
5618 	}
5619 
5620 	if (ibmvfc_init_event_pool(vhost, queue, pool_size)) {
5621 		dev_err(dev, "Couldn't initialize event pool.\n");
5622 		return -ENOMEM;
5623 	}
5624 
5625 	queue->msgs.handle = (void *)get_zeroed_page(GFP_KERNEL);
5626 	if (!queue->msgs.handle)
5627 		return -ENOMEM;
5628 
5629 	queue->msg_token = dma_map_single(dev, queue->msgs.handle, PAGE_SIZE,
5630 					  DMA_BIDIRECTIONAL);
5631 
5632 	if (dma_mapping_error(dev, queue->msg_token)) {
5633 		free_page((unsigned long)queue->msgs.handle);
5634 		queue->msgs.handle = NULL;
5635 		return -ENOMEM;
5636 	}
5637 
5638 	queue->cur = 0;
5639 	queue->fmt = fmt;
5640 	queue->size = PAGE_SIZE / fmt_size;
5641 	return 0;
5642 }
5643 
5644 /**
5645  * ibmvfc_init_crq - Initializes and registers CRQ with hypervisor
5646  * @vhost:	ibmvfc host struct
5647  *
5648  * Allocates a page for messages, maps it for dma, and registers
5649  * the crq with the hypervisor.
5650  *
5651  * Return value:
5652  *	zero on success / other on failure
5653  **/
5654 static int ibmvfc_init_crq(struct ibmvfc_host *vhost)
5655 {
5656 	int rc, retrc = -ENOMEM;
5657 	struct device *dev = vhost->dev;
5658 	struct vio_dev *vdev = to_vio_dev(dev);
5659 	struct ibmvfc_queue *crq = &vhost->crq;
5660 
5661 	ENTER;
5662 	if (ibmvfc_alloc_queue(vhost, crq, IBMVFC_CRQ_FMT))
5663 		return -ENOMEM;
5664 
5665 	retrc = rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
5666 					crq->msg_token, PAGE_SIZE);
5667 
5668 	if (rc == H_RESOURCE)
5669 		/* maybe kexecing and resource is busy. try a reset */
5670 		retrc = rc = ibmvfc_reset_crq(vhost);
5671 
5672 	if (rc == H_CLOSED)
5673 		dev_warn(dev, "Partner adapter not ready\n");
5674 	else if (rc) {
5675 		dev_warn(dev, "Error %d opening adapter\n", rc);
5676 		goto reg_crq_failed;
5677 	}
5678 
5679 	retrc = 0;
5680 
5681 	tasklet_init(&vhost->tasklet, (void *)ibmvfc_tasklet, (unsigned long)vhost);
5682 
5683 	if ((rc = request_irq(vdev->irq, ibmvfc_interrupt, 0, IBMVFC_NAME, vhost))) {
5684 		dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n", vdev->irq, rc);
5685 		goto req_irq_failed;
5686 	}
5687 
5688 	if ((rc = vio_enable_interrupts(vdev))) {
5689 		dev_err(dev, "Error %d enabling interrupts\n", rc);
5690 		goto req_irq_failed;
5691 	}
5692 
5693 	LEAVE;
5694 	return retrc;
5695 
5696 req_irq_failed:
5697 	tasklet_kill(&vhost->tasklet);
5698 	do {
5699 		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5700 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5701 reg_crq_failed:
5702 	ibmvfc_free_queue(vhost, crq);
5703 	return retrc;
5704 }
5705 
5706 static int ibmvfc_register_scsi_channel(struct ibmvfc_host *vhost,
5707 				  int index)
5708 {
5709 	struct device *dev = vhost->dev;
5710 	struct vio_dev *vdev = to_vio_dev(dev);
5711 	struct ibmvfc_queue *scrq = &vhost->scsi_scrqs.scrqs[index];
5712 	int rc = -ENOMEM;
5713 
5714 	ENTER;
5715 
5716 	if (ibmvfc_alloc_queue(vhost, scrq, IBMVFC_SUB_CRQ_FMT))
5717 		return -ENOMEM;
5718 
5719 	rc = h_reg_sub_crq(vdev->unit_address, scrq->msg_token, PAGE_SIZE,
5720 			   &scrq->cookie, &scrq->hw_irq);
5721 
5722 	/* H_CLOSED indicates successful register, but no CRQ partner */
5723 	if (rc && rc != H_CLOSED) {
5724 		dev_warn(dev, "Error registering sub-crq: %d\n", rc);
5725 		if (rc == H_PARAMETER)
5726 			dev_warn_once(dev, "Firmware may not support MQ\n");
5727 		goto reg_failed;
5728 	}
5729 
5730 	scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
5731 
5732 	if (!scrq->irq) {
5733 		rc = -EINVAL;
5734 		dev_err(dev, "Error mapping sub-crq[%d] irq\n", index);
5735 		goto irq_failed;
5736 	}
5737 
5738 	snprintf(scrq->name, sizeof(scrq->name), "ibmvfc-%x-scsi%d",
5739 		 vdev->unit_address, index);
5740 	rc = request_irq(scrq->irq, ibmvfc_interrupt_scsi, 0, scrq->name, scrq);
5741 
5742 	if (rc) {
5743 		dev_err(dev, "Couldn't register sub-crq[%d] irq\n", index);
5744 		irq_dispose_mapping(scrq->irq);
5745 		goto irq_failed;
5746 	}
5747 
5748 	scrq->hwq_id = index;
5749 	scrq->vhost = vhost;
5750 
5751 	LEAVE;
5752 	return 0;
5753 
5754 irq_failed:
5755 	do {
5756 		rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address, scrq->cookie);
5757 	} while (rtas_busy_delay(rc));
5758 reg_failed:
5759 	ibmvfc_free_queue(vhost, scrq);
5760 	LEAVE;
5761 	return rc;
5762 }
5763 
5764 static void ibmvfc_deregister_scsi_channel(struct ibmvfc_host *vhost, int index)
5765 {
5766 	struct device *dev = vhost->dev;
5767 	struct vio_dev *vdev = to_vio_dev(dev);
5768 	struct ibmvfc_queue *scrq = &vhost->scsi_scrqs.scrqs[index];
5769 	long rc;
5770 
5771 	ENTER;
5772 
5773 	free_irq(scrq->irq, scrq);
5774 	irq_dispose_mapping(scrq->irq);
5775 	scrq->irq = 0;
5776 
5777 	do {
5778 		rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address,
5779 					scrq->cookie);
5780 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5781 
5782 	if (rc)
5783 		dev_err(dev, "Failed to free sub-crq[%d]: rc=%ld\n", index, rc);
5784 
5785 	ibmvfc_free_queue(vhost, scrq);
5786 	LEAVE;
5787 }
5788 
5789 static void ibmvfc_init_sub_crqs(struct ibmvfc_host *vhost)
5790 {
5791 	int i, j;
5792 
5793 	ENTER;
5794 	if (!vhost->mq_enabled)
5795 		return;
5796 
5797 	vhost->scsi_scrqs.scrqs = kcalloc(nr_scsi_hw_queues,
5798 					  sizeof(*vhost->scsi_scrqs.scrqs),
5799 					  GFP_KERNEL);
5800 	if (!vhost->scsi_scrqs.scrqs) {
5801 		vhost->do_enquiry = 0;
5802 		return;
5803 	}
5804 
5805 	for (i = 0; i < nr_scsi_hw_queues; i++) {
5806 		if (ibmvfc_register_scsi_channel(vhost, i)) {
5807 			for (j = i; j > 0; j--)
5808 				ibmvfc_deregister_scsi_channel(vhost, j - 1);
5809 			kfree(vhost->scsi_scrqs.scrqs);
5810 			vhost->scsi_scrqs.scrqs = NULL;
5811 			vhost->scsi_scrqs.active_queues = 0;
5812 			vhost->do_enquiry = 0;
5813 			break;
5814 		}
5815 	}
5816 
5817 	LEAVE;
5818 }
5819 
5820 static void ibmvfc_release_sub_crqs(struct ibmvfc_host *vhost)
5821 {
5822 	int i;
5823 
5824 	ENTER;
5825 	if (!vhost->scsi_scrqs.scrqs)
5826 		return;
5827 
5828 	for (i = 0; i < nr_scsi_hw_queues; i++)
5829 		ibmvfc_deregister_scsi_channel(vhost, i);
5830 
5831 	kfree(vhost->scsi_scrqs.scrqs);
5832 	vhost->scsi_scrqs.scrqs = NULL;
5833 	vhost->scsi_scrqs.active_queues = 0;
5834 	LEAVE;
5835 }
5836 
5837 /**
5838  * ibmvfc_free_mem - Free memory for vhost
5839  * @vhost:	ibmvfc host struct
5840  *
5841  * Return value:
5842  * 	none
5843  **/
5844 static void ibmvfc_free_mem(struct ibmvfc_host *vhost)
5845 {
5846 	struct ibmvfc_queue *async_q = &vhost->async_crq;
5847 
5848 	ENTER;
5849 	mempool_destroy(vhost->tgt_pool);
5850 	kfree(vhost->trace);
5851 	dma_free_coherent(vhost->dev, vhost->disc_buf_sz, vhost->disc_buf,
5852 			  vhost->disc_buf_dma);
5853 	dma_free_coherent(vhost->dev, sizeof(*vhost->login_buf),
5854 			  vhost->login_buf, vhost->login_buf_dma);
5855 	dma_free_coherent(vhost->dev, sizeof(*vhost->channel_setup_buf),
5856 			  vhost->channel_setup_buf, vhost->channel_setup_dma);
5857 	dma_pool_destroy(vhost->sg_pool);
5858 	ibmvfc_free_queue(vhost, async_q);
5859 	LEAVE;
5860 }
5861 
5862 /**
5863  * ibmvfc_alloc_mem - Allocate memory for vhost
5864  * @vhost:	ibmvfc host struct
5865  *
5866  * Return value:
5867  * 	0 on success / non-zero on failure
5868  **/
5869 static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost)
5870 {
5871 	struct ibmvfc_queue *async_q = &vhost->async_crq;
5872 	struct device *dev = vhost->dev;
5873 
5874 	ENTER;
5875 	if (ibmvfc_alloc_queue(vhost, async_q, IBMVFC_ASYNC_FMT)) {
5876 		dev_err(dev, "Couldn't allocate/map async queue.\n");
5877 		goto nomem;
5878 	}
5879 
5880 	vhost->sg_pool = dma_pool_create(IBMVFC_NAME, dev,
5881 					 SG_ALL * sizeof(struct srp_direct_buf),
5882 					 sizeof(struct srp_direct_buf), 0);
5883 
5884 	if (!vhost->sg_pool) {
5885 		dev_err(dev, "Failed to allocate sg pool\n");
5886 		goto unmap_async_crq;
5887 	}
5888 
5889 	vhost->login_buf = dma_alloc_coherent(dev, sizeof(*vhost->login_buf),
5890 					      &vhost->login_buf_dma, GFP_KERNEL);
5891 
5892 	if (!vhost->login_buf) {
5893 		dev_err(dev, "Couldn't allocate NPIV login buffer\n");
5894 		goto free_sg_pool;
5895 	}
5896 
5897 	vhost->disc_buf_sz = sizeof(*vhost->disc_buf) * max_targets;
5898 	vhost->disc_buf = dma_alloc_coherent(dev, vhost->disc_buf_sz,
5899 					     &vhost->disc_buf_dma, GFP_KERNEL);
5900 
5901 	if (!vhost->disc_buf) {
5902 		dev_err(dev, "Couldn't allocate Discover Targets buffer\n");
5903 		goto free_login_buffer;
5904 	}
5905 
5906 	vhost->trace = kcalloc(IBMVFC_NUM_TRACE_ENTRIES,
5907 			       sizeof(struct ibmvfc_trace_entry), GFP_KERNEL);
5908 	atomic_set(&vhost->trace_index, -1);
5909 
5910 	if (!vhost->trace)
5911 		goto free_disc_buffer;
5912 
5913 	vhost->tgt_pool = mempool_create_kmalloc_pool(IBMVFC_TGT_MEMPOOL_SZ,
5914 						      sizeof(struct ibmvfc_target));
5915 
5916 	if (!vhost->tgt_pool) {
5917 		dev_err(dev, "Couldn't allocate target memory pool\n");
5918 		goto free_trace;
5919 	}
5920 
5921 	vhost->channel_setup_buf = dma_alloc_coherent(dev, sizeof(*vhost->channel_setup_buf),
5922 						      &vhost->channel_setup_dma,
5923 						      GFP_KERNEL);
5924 
5925 	if (!vhost->channel_setup_buf) {
5926 		dev_err(dev, "Couldn't allocate Channel Setup buffer\n");
5927 		goto free_tgt_pool;
5928 	}
5929 
5930 	LEAVE;
5931 	return 0;
5932 
5933 free_tgt_pool:
5934 	mempool_destroy(vhost->tgt_pool);
5935 free_trace:
5936 	kfree(vhost->trace);
5937 free_disc_buffer:
5938 	dma_free_coherent(dev, vhost->disc_buf_sz, vhost->disc_buf,
5939 			  vhost->disc_buf_dma);
5940 free_login_buffer:
5941 	dma_free_coherent(dev, sizeof(*vhost->login_buf),
5942 			  vhost->login_buf, vhost->login_buf_dma);
5943 free_sg_pool:
5944 	dma_pool_destroy(vhost->sg_pool);
5945 unmap_async_crq:
5946 	ibmvfc_free_queue(vhost, async_q);
5947 nomem:
5948 	LEAVE;
5949 	return -ENOMEM;
5950 }
5951 
5952 /**
5953  * ibmvfc_rport_add_thread - Worker thread for rport adds
5954  * @work:	work struct
5955  *
5956  **/
5957 static void ibmvfc_rport_add_thread(struct work_struct *work)
5958 {
5959 	struct ibmvfc_host *vhost = container_of(work, struct ibmvfc_host,
5960 						 rport_add_work_q);
5961 	struct ibmvfc_target *tgt;
5962 	struct fc_rport *rport;
5963 	unsigned long flags;
5964 	int did_work;
5965 
5966 	ENTER;
5967 	spin_lock_irqsave(vhost->host->host_lock, flags);
5968 	do {
5969 		did_work = 0;
5970 		if (vhost->state != IBMVFC_ACTIVE)
5971 			break;
5972 
5973 		list_for_each_entry(tgt, &vhost->targets, queue) {
5974 			if (tgt->add_rport) {
5975 				did_work = 1;
5976 				tgt->add_rport = 0;
5977 				kref_get(&tgt->kref);
5978 				rport = tgt->rport;
5979 				if (!rport) {
5980 					spin_unlock_irqrestore(vhost->host->host_lock, flags);
5981 					ibmvfc_tgt_add_rport(tgt);
5982 				} else if (get_device(&rport->dev)) {
5983 					spin_unlock_irqrestore(vhost->host->host_lock, flags);
5984 					tgt_dbg(tgt, "Setting rport roles\n");
5985 					fc_remote_port_rolechg(rport, tgt->ids.roles);
5986 					put_device(&rport->dev);
5987 				} else {
5988 					spin_unlock_irqrestore(vhost->host->host_lock, flags);
5989 				}
5990 
5991 				kref_put(&tgt->kref, ibmvfc_release_tgt);
5992 				spin_lock_irqsave(vhost->host->host_lock, flags);
5993 				break;
5994 			}
5995 		}
5996 	} while(did_work);
5997 
5998 	if (vhost->state == IBMVFC_ACTIVE)
5999 		vhost->scan_complete = 1;
6000 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
6001 	LEAVE;
6002 }
6003 
6004 /**
6005  * ibmvfc_probe - Adapter hot plug add entry point
6006  * @vdev:	vio device struct
6007  * @id:	vio device id struct
6008  *
6009  * Return value:
6010  * 	0 on success / non-zero on failure
6011  **/
6012 static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
6013 {
6014 	struct ibmvfc_host *vhost;
6015 	struct Scsi_Host *shost;
6016 	struct device *dev = &vdev->dev;
6017 	int rc = -ENOMEM;
6018 	unsigned int max_scsi_queues = IBMVFC_MAX_SCSI_QUEUES;
6019 
6020 	ENTER;
6021 	shost = scsi_host_alloc(&driver_template, sizeof(*vhost));
6022 	if (!shost) {
6023 		dev_err(dev, "Couldn't allocate host data\n");
6024 		goto out;
6025 	}
6026 
6027 	shost->transportt = ibmvfc_transport_template;
6028 	shost->can_queue = max_requests;
6029 	shost->max_lun = max_lun;
6030 	shost->max_id = max_targets;
6031 	shost->max_sectors = IBMVFC_MAX_SECTORS;
6032 	shost->max_cmd_len = IBMVFC_MAX_CDB_LEN;
6033 	shost->unique_id = shost->host_no;
6034 	shost->nr_hw_queues = mq_enabled ? min(max_scsi_queues, nr_scsi_hw_queues) : 1;
6035 
6036 	vhost = shost_priv(shost);
6037 	INIT_LIST_HEAD(&vhost->targets);
6038 	INIT_LIST_HEAD(&vhost->purge);
6039 	sprintf(vhost->name, IBMVFC_NAME);
6040 	vhost->host = shost;
6041 	vhost->dev = dev;
6042 	vhost->partition_number = -1;
6043 	vhost->log_level = log_level;
6044 	vhost->task_set = 1;
6045 
6046 	vhost->mq_enabled = mq_enabled;
6047 	vhost->client_scsi_channels = min(shost->nr_hw_queues, nr_scsi_channels);
6048 	vhost->using_channels = 0;
6049 	vhost->do_enquiry = 1;
6050 
6051 	strcpy(vhost->partition_name, "UNKNOWN");
6052 	init_waitqueue_head(&vhost->work_wait_q);
6053 	init_waitqueue_head(&vhost->init_wait_q);
6054 	INIT_WORK(&vhost->rport_add_work_q, ibmvfc_rport_add_thread);
6055 	mutex_init(&vhost->passthru_mutex);
6056 
6057 	if ((rc = ibmvfc_alloc_mem(vhost)))
6058 		goto free_scsi_host;
6059 
6060 	vhost->work_thread = kthread_run(ibmvfc_work, vhost, "%s_%d", IBMVFC_NAME,
6061 					 shost->host_no);
6062 
6063 	if (IS_ERR(vhost->work_thread)) {
6064 		dev_err(dev, "Couldn't create kernel thread: %ld\n",
6065 			PTR_ERR(vhost->work_thread));
6066 		rc = PTR_ERR(vhost->work_thread);
6067 		goto free_host_mem;
6068 	}
6069 
6070 	if ((rc = ibmvfc_init_crq(vhost))) {
6071 		dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
6072 		goto kill_kthread;
6073 	}
6074 
6075 	if ((rc = scsi_add_host(shost, dev)))
6076 		goto release_crq;
6077 
6078 	fc_host_dev_loss_tmo(shost) = IBMVFC_DEV_LOSS_TMO;
6079 
6080 	if ((rc = ibmvfc_create_trace_file(&shost->shost_dev.kobj,
6081 					   &ibmvfc_trace_attr))) {
6082 		dev_err(dev, "Failed to create trace file. rc=%d\n", rc);
6083 		goto remove_shost;
6084 	}
6085 
6086 	ibmvfc_init_sub_crqs(vhost);
6087 
6088 	if (shost_to_fc_host(shost)->rqst_q)
6089 		blk_queue_max_segments(shost_to_fc_host(shost)->rqst_q, 1);
6090 	dev_set_drvdata(dev, vhost);
6091 	spin_lock(&ibmvfc_driver_lock);
6092 	list_add_tail(&vhost->queue, &ibmvfc_head);
6093 	spin_unlock(&ibmvfc_driver_lock);
6094 
6095 	ibmvfc_send_crq_init(vhost);
6096 	scsi_scan_host(shost);
6097 	return 0;
6098 
6099 remove_shost:
6100 	scsi_remove_host(shost);
6101 release_crq:
6102 	ibmvfc_release_crq_queue(vhost);
6103 kill_kthread:
6104 	kthread_stop(vhost->work_thread);
6105 free_host_mem:
6106 	ibmvfc_free_mem(vhost);
6107 free_scsi_host:
6108 	scsi_host_put(shost);
6109 out:
6110 	LEAVE;
6111 	return rc;
6112 }
6113 
6114 /**
6115  * ibmvfc_remove - Adapter hot plug remove entry point
6116  * @vdev:	vio device struct
6117  *
6118  * Return value:
6119  * 	0
6120  **/
6121 static void ibmvfc_remove(struct vio_dev *vdev)
6122 {
6123 	struct ibmvfc_host *vhost = dev_get_drvdata(&vdev->dev);
6124 	LIST_HEAD(purge);
6125 	unsigned long flags;
6126 
6127 	ENTER;
6128 	ibmvfc_remove_trace_file(&vhost->host->shost_dev.kobj, &ibmvfc_trace_attr);
6129 
6130 	spin_lock_irqsave(vhost->host->host_lock, flags);
6131 	ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
6132 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
6133 
6134 	ibmvfc_wait_while_resetting(vhost);
6135 	kthread_stop(vhost->work_thread);
6136 	fc_remove_host(vhost->host);
6137 	scsi_remove_host(vhost->host);
6138 
6139 	spin_lock_irqsave(vhost->host->host_lock, flags);
6140 	ibmvfc_purge_requests(vhost, DID_ERROR);
6141 	list_splice_init(&vhost->purge, &purge);
6142 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
6143 	ibmvfc_complete_purge(&purge);
6144 	ibmvfc_release_sub_crqs(vhost);
6145 	ibmvfc_release_crq_queue(vhost);
6146 
6147 	ibmvfc_free_mem(vhost);
6148 	spin_lock(&ibmvfc_driver_lock);
6149 	list_del(&vhost->queue);
6150 	spin_unlock(&ibmvfc_driver_lock);
6151 	scsi_host_put(vhost->host);
6152 	LEAVE;
6153 }
6154 
6155 /**
6156  * ibmvfc_resume - Resume from suspend
6157  * @dev:	device struct
6158  *
6159  * We may have lost an interrupt across suspend/resume, so kick the
6160  * interrupt handler
6161  *
6162  */
6163 static int ibmvfc_resume(struct device *dev)
6164 {
6165 	unsigned long flags;
6166 	struct ibmvfc_host *vhost = dev_get_drvdata(dev);
6167 	struct vio_dev *vdev = to_vio_dev(dev);
6168 
6169 	spin_lock_irqsave(vhost->host->host_lock, flags);
6170 	vio_disable_interrupts(vdev);
6171 	tasklet_schedule(&vhost->tasklet);
6172 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
6173 	return 0;
6174 }
6175 
6176 /**
6177  * ibmvfc_get_desired_dma - Calculate DMA resources needed by the driver
6178  * @vdev:	vio device struct
6179  *
6180  * Return value:
6181  *	Number of bytes the driver will need to DMA map at the same time in
6182  *	order to perform well.
6183  */
6184 static unsigned long ibmvfc_get_desired_dma(struct vio_dev *vdev)
6185 {
6186 	unsigned long pool_dma = max_requests * sizeof(union ibmvfc_iu);
6187 	return pool_dma + ((512 * 1024) * driver_template.cmd_per_lun);
6188 }
6189 
6190 static const struct vio_device_id ibmvfc_device_table[] = {
6191 	{"fcp", "IBM,vfc-client"},
6192 	{ "", "" }
6193 };
6194 MODULE_DEVICE_TABLE(vio, ibmvfc_device_table);
6195 
6196 static const struct dev_pm_ops ibmvfc_pm_ops = {
6197 	.resume = ibmvfc_resume
6198 };
6199 
6200 static struct vio_driver ibmvfc_driver = {
6201 	.id_table = ibmvfc_device_table,
6202 	.probe = ibmvfc_probe,
6203 	.remove = ibmvfc_remove,
6204 	.get_desired_dma = ibmvfc_get_desired_dma,
6205 	.name = IBMVFC_NAME,
6206 	.pm = &ibmvfc_pm_ops,
6207 };
6208 
6209 static struct fc_function_template ibmvfc_transport_functions = {
6210 	.show_host_fabric_name = 1,
6211 	.show_host_node_name = 1,
6212 	.show_host_port_name = 1,
6213 	.show_host_supported_classes = 1,
6214 	.show_host_port_type = 1,
6215 	.show_host_port_id = 1,
6216 	.show_host_maxframe_size = 1,
6217 
6218 	.get_host_port_state = ibmvfc_get_host_port_state,
6219 	.show_host_port_state = 1,
6220 
6221 	.get_host_speed = ibmvfc_get_host_speed,
6222 	.show_host_speed = 1,
6223 
6224 	.issue_fc_host_lip = ibmvfc_issue_fc_host_lip,
6225 	.terminate_rport_io = ibmvfc_terminate_rport_io,
6226 
6227 	.show_rport_maxframe_size = 1,
6228 	.show_rport_supported_classes = 1,
6229 
6230 	.set_rport_dev_loss_tmo = ibmvfc_set_rport_dev_loss_tmo,
6231 	.show_rport_dev_loss_tmo = 1,
6232 
6233 	.get_starget_node_name = ibmvfc_get_starget_node_name,
6234 	.show_starget_node_name = 1,
6235 
6236 	.get_starget_port_name = ibmvfc_get_starget_port_name,
6237 	.show_starget_port_name = 1,
6238 
6239 	.get_starget_port_id = ibmvfc_get_starget_port_id,
6240 	.show_starget_port_id = 1,
6241 
6242 	.bsg_request = ibmvfc_bsg_request,
6243 	.bsg_timeout = ibmvfc_bsg_timeout,
6244 };
6245 
6246 /**
6247  * ibmvfc_module_init - Initialize the ibmvfc module
6248  *
6249  * Return value:
6250  * 	0 on success / other on failure
6251  **/
6252 static int __init ibmvfc_module_init(void)
6253 {
6254 	int rc;
6255 
6256 	if (!firmware_has_feature(FW_FEATURE_VIO))
6257 		return -ENODEV;
6258 
6259 	printk(KERN_INFO IBMVFC_NAME": IBM Virtual Fibre Channel Driver version: %s %s\n",
6260 	       IBMVFC_DRIVER_VERSION, IBMVFC_DRIVER_DATE);
6261 
6262 	ibmvfc_transport_template = fc_attach_transport(&ibmvfc_transport_functions);
6263 	if (!ibmvfc_transport_template)
6264 		return -ENOMEM;
6265 
6266 	rc = vio_register_driver(&ibmvfc_driver);
6267 	if (rc)
6268 		fc_release_transport(ibmvfc_transport_template);
6269 	return rc;
6270 }
6271 
6272 /**
6273  * ibmvfc_module_exit - Teardown the ibmvfc module
6274  *
6275  * Return value:
6276  * 	nothing
6277  **/
6278 static void __exit ibmvfc_module_exit(void)
6279 {
6280 	vio_unregister_driver(&ibmvfc_driver);
6281 	fc_release_transport(ibmvfc_transport_template);
6282 }
6283 
6284 module_init(ibmvfc_module_init);
6285 module_exit(ibmvfc_module_exit);
6286