xref: /openbmc/linux/drivers/scsi/ibmvscsi/ibmvfc.c (revision 29d97219)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * ibmvfc.c -- driver for IBM Power Virtual Fibre Channel Adapter
4  *
5  * Written By: Brian King <brking@linux.vnet.ibm.com>, IBM Corporation
6  *
7  * Copyright (C) IBM Corporation, 2008
8  */
9 
10 #include <linux/module.h>
11 #include <linux/moduleparam.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmapool.h>
14 #include <linux/delay.h>
15 #include <linux/interrupt.h>
16 #include <linux/kthread.h>
17 #include <linux/slab.h>
18 #include <linux/of.h>
19 #include <linux/pm.h>
20 #include <linux/stringify.h>
21 #include <linux/bsg-lib.h>
22 #include <asm/firmware.h>
23 #include <asm/irq.h>
24 #include <asm/rtas.h>
25 #include <asm/vio.h>
26 #include <scsi/scsi.h>
27 #include <scsi/scsi_cmnd.h>
28 #include <scsi/scsi_host.h>
29 #include <scsi/scsi_device.h>
30 #include <scsi/scsi_tcq.h>
31 #include <scsi/scsi_transport_fc.h>
32 #include <scsi/scsi_bsg_fc.h>
33 #include "ibmvfc.h"
34 
35 static unsigned int init_timeout = IBMVFC_INIT_TIMEOUT;
36 static unsigned int default_timeout = IBMVFC_DEFAULT_TIMEOUT;
37 static u64 max_lun = IBMVFC_MAX_LUN;
38 static unsigned int max_targets = IBMVFC_MAX_TARGETS;
39 static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT;
40 static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS;
41 static unsigned int ibmvfc_debug = IBMVFC_DEBUG;
42 static unsigned int log_level = IBMVFC_DEFAULT_LOG_LEVEL;
43 static unsigned int cls3_error = IBMVFC_CLS3_ERROR;
44 static unsigned int mq_enabled = IBMVFC_MQ;
45 static unsigned int nr_scsi_hw_queues = IBMVFC_SCSI_HW_QUEUES;
46 static unsigned int nr_scsi_channels = IBMVFC_SCSI_CHANNELS;
47 static unsigned int mig_channels_only = IBMVFC_MIG_NO_SUB_TO_CRQ;
48 static unsigned int mig_no_less_channels = IBMVFC_MIG_NO_N_TO_M;
49 
50 static LIST_HEAD(ibmvfc_head);
51 static DEFINE_SPINLOCK(ibmvfc_driver_lock);
52 static struct scsi_transport_template *ibmvfc_transport_template;
53 
54 MODULE_DESCRIPTION("IBM Virtual Fibre Channel Driver");
55 MODULE_AUTHOR("Brian King <brking@linux.vnet.ibm.com>");
56 MODULE_LICENSE("GPL");
57 MODULE_VERSION(IBMVFC_DRIVER_VERSION);
58 
59 module_param_named(mq, mq_enabled, uint, S_IRUGO);
60 MODULE_PARM_DESC(mq, "Enable multiqueue support. "
61 		 "[Default=" __stringify(IBMVFC_MQ) "]");
62 module_param_named(scsi_host_queues, nr_scsi_hw_queues, uint, S_IRUGO);
63 MODULE_PARM_DESC(scsi_host_queues, "Number of SCSI Host submission queues. "
64 		 "[Default=" __stringify(IBMVFC_SCSI_HW_QUEUES) "]");
65 module_param_named(scsi_hw_channels, nr_scsi_channels, uint, S_IRUGO);
66 MODULE_PARM_DESC(scsi_hw_channels, "Number of hw scsi channels to request. "
67 		 "[Default=" __stringify(IBMVFC_SCSI_CHANNELS) "]");
68 module_param_named(mig_channels_only, mig_channels_only, uint, S_IRUGO);
69 MODULE_PARM_DESC(mig_channels_only, "Prevent migration to non-channelized system. "
70 		 "[Default=" __stringify(IBMVFC_MIG_NO_SUB_TO_CRQ) "]");
71 module_param_named(mig_no_less_channels, mig_no_less_channels, uint, S_IRUGO);
72 MODULE_PARM_DESC(mig_no_less_channels, "Prevent migration to system with less channels. "
73 		 "[Default=" __stringify(IBMVFC_MIG_NO_N_TO_M) "]");
74 
75 module_param_named(init_timeout, init_timeout, uint, S_IRUGO | S_IWUSR);
76 MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds. "
77 		 "[Default=" __stringify(IBMVFC_INIT_TIMEOUT) "]");
78 module_param_named(default_timeout, default_timeout, uint, S_IRUGO | S_IWUSR);
79 MODULE_PARM_DESC(default_timeout,
80 		 "Default timeout in seconds for initialization and EH commands. "
81 		 "[Default=" __stringify(IBMVFC_DEFAULT_TIMEOUT) "]");
82 module_param_named(max_requests, max_requests, uint, S_IRUGO);
83 MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter. "
84 		 "[Default=" __stringify(IBMVFC_MAX_REQUESTS_DEFAULT) "]");
85 module_param_named(max_lun, max_lun, ullong, S_IRUGO);
86 MODULE_PARM_DESC(max_lun, "Maximum allowed LUN. "
87 		 "[Default=" __stringify(IBMVFC_MAX_LUN) "]");
88 module_param_named(max_targets, max_targets, uint, S_IRUGO);
89 MODULE_PARM_DESC(max_targets, "Maximum allowed targets. "
90 		 "[Default=" __stringify(IBMVFC_MAX_TARGETS) "]");
91 module_param_named(disc_threads, disc_threads, uint, S_IRUGO);
92 MODULE_PARM_DESC(disc_threads, "Number of device discovery threads to use. "
93 		 "[Default=" __stringify(IBMVFC_MAX_DISC_THREADS) "]");
94 module_param_named(debug, ibmvfc_debug, uint, S_IRUGO | S_IWUSR);
95 MODULE_PARM_DESC(debug, "Enable driver debug information. "
96 		 "[Default=" __stringify(IBMVFC_DEBUG) "]");
97 module_param_named(log_level, log_level, uint, 0);
98 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver. "
99 		 "[Default=" __stringify(IBMVFC_DEFAULT_LOG_LEVEL) "]");
100 module_param_named(cls3_error, cls3_error, uint, 0);
101 MODULE_PARM_DESC(cls3_error, "Enable FC Class 3 Error Recovery. "
102 		 "[Default=" __stringify(IBMVFC_CLS3_ERROR) "]");
103 
104 static const struct {
105 	u16 status;
106 	u16 error;
107 	u8 result;
108 	u8 retry;
109 	int log;
110 	char *name;
111 } cmd_status [] = {
112 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_ESTABLISH, DID_ERROR, 1, 1, "unable to establish" },
113 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_FAULT, DID_OK, 1, 0, "transport fault" },
114 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_CMD_TIMEOUT, DID_TIME_OUT, 1, 1, "command timeout" },
115 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_ENETDOWN, DID_TRANSPORT_DISRUPTED, 1, 1, "network down" },
116 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_HW_FAILURE, DID_ERROR, 1, 1, "hardware failure" },
117 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DOWN_ERR, DID_REQUEUE, 0, 0, "link down" },
118 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DEAD_ERR, DID_ERROR, 0, 0, "link dead" },
119 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_REGISTER, DID_ERROR, 1, 1, "unable to register" },
120 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_BUSY, DID_BUS_BUSY, 1, 0, "transport busy" },
121 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_DEAD, DID_ERROR, 0, 1, "transport dead" },
122 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_CONFIG_ERROR, DID_ERROR, 1, 1, "configuration error" },
123 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_NAME_SERVER_FAIL, DID_ERROR, 1, 1, "name server failure" },
124 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 1, 0, "link halted" },
125 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_GENERAL, DID_OK, 1, 0, "general transport error" },
126 
127 	{ IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" },
128 	{ IBMVFC_VIOS_FAILURE, IBMVFC_SW_FAILURE, DID_ERROR, 0, 1, "software failure" },
129 	{ IBMVFC_VIOS_FAILURE, IBMVFC_INVALID_PARAMETER, DID_ERROR, 0, 1, "invalid parameter" },
130 	{ IBMVFC_VIOS_FAILURE, IBMVFC_MISSING_PARAMETER, DID_ERROR, 0, 1, "missing parameter" },
131 	{ IBMVFC_VIOS_FAILURE, IBMVFC_HOST_IO_BUS, DID_ERROR, 1, 1, "host I/O bus failure" },
132 	{ IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED, DID_ERROR, 0, 1, "transaction cancelled" },
133 	{ IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED_IMPLICIT, DID_ERROR, 0, 1, "transaction cancelled implicit" },
134 	{ IBMVFC_VIOS_FAILURE, IBMVFC_INSUFFICIENT_RESOURCE, DID_REQUEUE, 1, 1, "insufficient resources" },
135 	{ IBMVFC_VIOS_FAILURE, IBMVFC_PLOGI_REQUIRED, DID_ERROR, 0, 1, "port login required" },
136 	{ IBMVFC_VIOS_FAILURE, IBMVFC_COMMAND_FAILED, DID_ERROR, 1, 1, "command failed" },
137 
138 	{ IBMVFC_FC_FAILURE, IBMVFC_INVALID_ELS_CMD_CODE, DID_ERROR, 0, 1, "invalid ELS command code" },
139 	{ IBMVFC_FC_FAILURE, IBMVFC_INVALID_VERSION, DID_ERROR, 0, 1, "invalid version level" },
140 	{ IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_ERROR, DID_ERROR, 1, 1, "logical error" },
141 	{ IBMVFC_FC_FAILURE, IBMVFC_INVALID_CT_IU_SIZE, DID_ERROR, 0, 1, "invalid CT_IU size" },
142 	{ IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_BUSY, DID_REQUEUE, 1, 0, "logical busy" },
143 	{ IBMVFC_FC_FAILURE, IBMVFC_PROTOCOL_ERROR, DID_ERROR, 1, 1, "protocol error" },
144 	{ IBMVFC_FC_FAILURE, IBMVFC_UNABLE_TO_PERFORM_REQ, DID_ERROR, 1, 1, "unable to perform request" },
145 	{ IBMVFC_FC_FAILURE, IBMVFC_CMD_NOT_SUPPORTED, DID_ERROR, 0, 0, "command not supported" },
146 	{ IBMVFC_FC_FAILURE, IBMVFC_SERVER_NOT_AVAIL, DID_ERROR, 0, 1, "server not available" },
147 	{ IBMVFC_FC_FAILURE, IBMVFC_CMD_IN_PROGRESS, DID_ERROR, 0, 1, "command already in progress" },
148 	{ IBMVFC_FC_FAILURE, IBMVFC_VENDOR_SPECIFIC, DID_ERROR, 1, 1, "vendor specific" },
149 
150 	{ IBMVFC_FC_SCSI_ERROR, 0, DID_OK, 1, 0, "SCSI error" },
151 	{ IBMVFC_FC_SCSI_ERROR, IBMVFC_COMMAND_FAILED, DID_ERROR, 0, 1, "PRLI to device failed." },
152 };
153 
154 static void ibmvfc_npiv_login(struct ibmvfc_host *);
155 static void ibmvfc_tgt_send_prli(struct ibmvfc_target *);
156 static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *);
157 static void ibmvfc_tgt_query_target(struct ibmvfc_target *);
158 static void ibmvfc_npiv_logout(struct ibmvfc_host *);
159 static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *);
160 static void ibmvfc_tgt_move_login(struct ibmvfc_target *);
161 
162 static void ibmvfc_release_sub_crqs(struct ibmvfc_host *);
163 static void ibmvfc_init_sub_crqs(struct ibmvfc_host *);
164 
165 static const char *unknown_error = "unknown error";
166 
167 static long h_reg_sub_crq(unsigned long unit_address, unsigned long ioba,
168 			  unsigned long length, unsigned long *cookie,
169 			  unsigned long *irq)
170 {
171 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
172 	long rc;
173 
174 	rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, ioba, length);
175 	*cookie = retbuf[0];
176 	*irq = retbuf[1];
177 
178 	return rc;
179 }
180 
181 static int ibmvfc_check_caps(struct ibmvfc_host *vhost, unsigned long cap_flags)
182 {
183 	u64 host_caps = be64_to_cpu(vhost->login_buf->resp.capabilities);
184 
185 	return (host_caps & cap_flags) ? 1 : 0;
186 }
187 
188 static struct ibmvfc_fcp_cmd_iu *ibmvfc_get_fcp_iu(struct ibmvfc_host *vhost,
189 						   struct ibmvfc_cmd *vfc_cmd)
190 {
191 	if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
192 		return &vfc_cmd->v2.iu;
193 	else
194 		return &vfc_cmd->v1.iu;
195 }
196 
197 static struct ibmvfc_fcp_rsp *ibmvfc_get_fcp_rsp(struct ibmvfc_host *vhost,
198 						 struct ibmvfc_cmd *vfc_cmd)
199 {
200 	if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
201 		return &vfc_cmd->v2.rsp;
202 	else
203 		return &vfc_cmd->v1.rsp;
204 }
205 
206 #ifdef CONFIG_SCSI_IBMVFC_TRACE
207 /**
208  * ibmvfc_trc_start - Log a start trace entry
209  * @evt:		ibmvfc event struct
210  *
211  **/
212 static void ibmvfc_trc_start(struct ibmvfc_event *evt)
213 {
214 	struct ibmvfc_host *vhost = evt->vhost;
215 	struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd;
216 	struct ibmvfc_mad_common *mad = &evt->iu.mad_common;
217 	struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
218 	struct ibmvfc_trace_entry *entry;
219 	int index = atomic_inc_return(&vhost->trace_index) & IBMVFC_TRACE_INDEX_MASK;
220 
221 	entry = &vhost->trace[index];
222 	entry->evt = evt;
223 	entry->time = jiffies;
224 	entry->fmt = evt->crq.format;
225 	entry->type = IBMVFC_TRC_START;
226 
227 	switch (entry->fmt) {
228 	case IBMVFC_CMD_FORMAT:
229 		entry->op_code = iu->cdb[0];
230 		entry->scsi_id = be64_to_cpu(vfc_cmd->tgt_scsi_id);
231 		entry->lun = scsilun_to_int(&iu->lun);
232 		entry->tmf_flags = iu->tmf_flags;
233 		entry->u.start.xfer_len = be32_to_cpu(iu->xfer_len);
234 		break;
235 	case IBMVFC_MAD_FORMAT:
236 		entry->op_code = be32_to_cpu(mad->opcode);
237 		break;
238 	default:
239 		break;
240 	}
241 }
242 
243 /**
244  * ibmvfc_trc_end - Log an end trace entry
245  * @evt:		ibmvfc event struct
246  *
247  **/
248 static void ibmvfc_trc_end(struct ibmvfc_event *evt)
249 {
250 	struct ibmvfc_host *vhost = evt->vhost;
251 	struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
252 	struct ibmvfc_mad_common *mad = &evt->xfer_iu->mad_common;
253 	struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
254 	struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
255 	struct ibmvfc_trace_entry *entry;
256 	int index = atomic_inc_return(&vhost->trace_index) & IBMVFC_TRACE_INDEX_MASK;
257 
258 	entry = &vhost->trace[index];
259 	entry->evt = evt;
260 	entry->time = jiffies;
261 	entry->fmt = evt->crq.format;
262 	entry->type = IBMVFC_TRC_END;
263 
264 	switch (entry->fmt) {
265 	case IBMVFC_CMD_FORMAT:
266 		entry->op_code = iu->cdb[0];
267 		entry->scsi_id = be64_to_cpu(vfc_cmd->tgt_scsi_id);
268 		entry->lun = scsilun_to_int(&iu->lun);
269 		entry->tmf_flags = iu->tmf_flags;
270 		entry->u.end.status = be16_to_cpu(vfc_cmd->status);
271 		entry->u.end.error = be16_to_cpu(vfc_cmd->error);
272 		entry->u.end.fcp_rsp_flags = rsp->flags;
273 		entry->u.end.rsp_code = rsp->data.info.rsp_code;
274 		entry->u.end.scsi_status = rsp->scsi_status;
275 		break;
276 	case IBMVFC_MAD_FORMAT:
277 		entry->op_code = be32_to_cpu(mad->opcode);
278 		entry->u.end.status = be16_to_cpu(mad->status);
279 		break;
280 	default:
281 		break;
282 
283 	}
284 }
285 
286 #else
287 #define ibmvfc_trc_start(evt) do { } while (0)
288 #define ibmvfc_trc_end(evt) do { } while (0)
289 #endif
290 
291 /**
292  * ibmvfc_get_err_index - Find the index into cmd_status for the fcp response
293  * @status:		status / error class
294  * @error:		error
295  *
296  * Return value:
297  *	index into cmd_status / -EINVAL on failure
298  **/
299 static int ibmvfc_get_err_index(u16 status, u16 error)
300 {
301 	int i;
302 
303 	for (i = 0; i < ARRAY_SIZE(cmd_status); i++)
304 		if ((cmd_status[i].status & status) == cmd_status[i].status &&
305 		    cmd_status[i].error == error)
306 			return i;
307 
308 	return -EINVAL;
309 }
310 
311 /**
312  * ibmvfc_get_cmd_error - Find the error description for the fcp response
313  * @status:		status / error class
314  * @error:		error
315  *
316  * Return value:
317  *	error description string
318  **/
319 static const char *ibmvfc_get_cmd_error(u16 status, u16 error)
320 {
321 	int rc = ibmvfc_get_err_index(status, error);
322 	if (rc >= 0)
323 		return cmd_status[rc].name;
324 	return unknown_error;
325 }
326 
327 /**
328  * ibmvfc_get_err_result - Find the scsi status to return for the fcp response
329  * @vhost:      ibmvfc host struct
330  * @vfc_cmd:	ibmvfc command struct
331  *
332  * Return value:
333  *	SCSI result value to return for completed command
334  **/
335 static int ibmvfc_get_err_result(struct ibmvfc_host *vhost, struct ibmvfc_cmd *vfc_cmd)
336 {
337 	int err;
338 	struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
339 	int fc_rsp_len = be32_to_cpu(rsp->fcp_rsp_len);
340 
341 	if ((rsp->flags & FCP_RSP_LEN_VALID) &&
342 	    ((fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) ||
343 	     rsp->data.info.rsp_code))
344 		return DID_ERROR << 16;
345 
346 	err = ibmvfc_get_err_index(be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error));
347 	if (err >= 0)
348 		return rsp->scsi_status | (cmd_status[err].result << 16);
349 	return rsp->scsi_status | (DID_ERROR << 16);
350 }
351 
352 /**
353  * ibmvfc_retry_cmd - Determine if error status is retryable
354  * @status:		status / error class
355  * @error:		error
356  *
357  * Return value:
358  *	1 if error should be retried / 0 if it should not
359  **/
360 static int ibmvfc_retry_cmd(u16 status, u16 error)
361 {
362 	int rc = ibmvfc_get_err_index(status, error);
363 
364 	if (rc >= 0)
365 		return cmd_status[rc].retry;
366 	return 1;
367 }
368 
369 static const char *unknown_fc_explain = "unknown fc explain";
370 
371 static const struct {
372 	u16 fc_explain;
373 	char *name;
374 } ls_explain [] = {
375 	{ 0x00, "no additional explanation" },
376 	{ 0x01, "service parameter error - options" },
377 	{ 0x03, "service parameter error - initiator control" },
378 	{ 0x05, "service parameter error - recipient control" },
379 	{ 0x07, "service parameter error - received data field size" },
380 	{ 0x09, "service parameter error - concurrent seq" },
381 	{ 0x0B, "service parameter error - credit" },
382 	{ 0x0D, "invalid N_Port/F_Port_Name" },
383 	{ 0x0E, "invalid node/Fabric Name" },
384 	{ 0x0F, "invalid common service parameters" },
385 	{ 0x11, "invalid association header" },
386 	{ 0x13, "association header required" },
387 	{ 0x15, "invalid originator S_ID" },
388 	{ 0x17, "invalid OX_ID-RX-ID combination" },
389 	{ 0x19, "command (request) already in progress" },
390 	{ 0x1E, "N_Port Login requested" },
391 	{ 0x1F, "Invalid N_Port_ID" },
392 };
393 
394 static const struct {
395 	u16 fc_explain;
396 	char *name;
397 } gs_explain [] = {
398 	{ 0x00, "no additional explanation" },
399 	{ 0x01, "port identifier not registered" },
400 	{ 0x02, "port name not registered" },
401 	{ 0x03, "node name not registered" },
402 	{ 0x04, "class of service not registered" },
403 	{ 0x06, "initial process associator not registered" },
404 	{ 0x07, "FC-4 TYPEs not registered" },
405 	{ 0x08, "symbolic port name not registered" },
406 	{ 0x09, "symbolic node name not registered" },
407 	{ 0x0A, "port type not registered" },
408 	{ 0xF0, "authorization exception" },
409 	{ 0xF1, "authentication exception" },
410 	{ 0xF2, "data base full" },
411 	{ 0xF3, "data base empty" },
412 	{ 0xF4, "processing request" },
413 	{ 0xF5, "unable to verify connection" },
414 	{ 0xF6, "devices not in a common zone" },
415 };
416 
417 /**
418  * ibmvfc_get_ls_explain - Return the FC Explain description text
419  * @status:	FC Explain status
420  *
421  * Returns:
422  *	error string
423  **/
424 static const char *ibmvfc_get_ls_explain(u16 status)
425 {
426 	int i;
427 
428 	for (i = 0; i < ARRAY_SIZE(ls_explain); i++)
429 		if (ls_explain[i].fc_explain == status)
430 			return ls_explain[i].name;
431 
432 	return unknown_fc_explain;
433 }
434 
435 /**
436  * ibmvfc_get_gs_explain - Return the FC Explain description text
437  * @status:	FC Explain status
438  *
439  * Returns:
440  *	error string
441  **/
442 static const char *ibmvfc_get_gs_explain(u16 status)
443 {
444 	int i;
445 
446 	for (i = 0; i < ARRAY_SIZE(gs_explain); i++)
447 		if (gs_explain[i].fc_explain == status)
448 			return gs_explain[i].name;
449 
450 	return unknown_fc_explain;
451 }
452 
453 static const struct {
454 	enum ibmvfc_fc_type fc_type;
455 	char *name;
456 } fc_type [] = {
457 	{ IBMVFC_FABRIC_REJECT, "fabric reject" },
458 	{ IBMVFC_PORT_REJECT, "port reject" },
459 	{ IBMVFC_LS_REJECT, "ELS reject" },
460 	{ IBMVFC_FABRIC_BUSY, "fabric busy" },
461 	{ IBMVFC_PORT_BUSY, "port busy" },
462 	{ IBMVFC_BASIC_REJECT, "basic reject" },
463 };
464 
465 static const char *unknown_fc_type = "unknown fc type";
466 
467 /**
468  * ibmvfc_get_fc_type - Return the FC Type description text
469  * @status:	FC Type error status
470  *
471  * Returns:
472  *	error string
473  **/
474 static const char *ibmvfc_get_fc_type(u16 status)
475 {
476 	int i;
477 
478 	for (i = 0; i < ARRAY_SIZE(fc_type); i++)
479 		if (fc_type[i].fc_type == status)
480 			return fc_type[i].name;
481 
482 	return unknown_fc_type;
483 }
484 
485 /**
486  * ibmvfc_set_tgt_action - Set the next init action for the target
487  * @tgt:		ibmvfc target struct
488  * @action:		action to perform
489  *
490  * Returns:
491  *	0 if action changed / non-zero if not changed
492  **/
493 static int ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
494 				  enum ibmvfc_target_action action)
495 {
496 	int rc = -EINVAL;
497 
498 	switch (tgt->action) {
499 	case IBMVFC_TGT_ACTION_LOGOUT_RPORT:
500 		if (action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT ||
501 		    action == IBMVFC_TGT_ACTION_DEL_RPORT) {
502 			tgt->action = action;
503 			rc = 0;
504 		}
505 		break;
506 	case IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT:
507 		if (action == IBMVFC_TGT_ACTION_DEL_RPORT ||
508 		    action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
509 			tgt->action = action;
510 			rc = 0;
511 		}
512 		break;
513 	case IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT:
514 		if (action == IBMVFC_TGT_ACTION_LOGOUT_RPORT) {
515 			tgt->action = action;
516 			rc = 0;
517 		}
518 		break;
519 	case IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT:
520 		if (action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
521 			tgt->action = action;
522 			rc = 0;
523 		}
524 		break;
525 	case IBMVFC_TGT_ACTION_DEL_RPORT:
526 		if (action == IBMVFC_TGT_ACTION_DELETED_RPORT) {
527 			tgt->action = action;
528 			rc = 0;
529 		}
530 		break;
531 	case IBMVFC_TGT_ACTION_DELETED_RPORT:
532 		break;
533 	default:
534 		tgt->action = action;
535 		rc = 0;
536 		break;
537 	}
538 
539 	if (action >= IBMVFC_TGT_ACTION_LOGOUT_RPORT)
540 		tgt->add_rport = 0;
541 
542 	return rc;
543 }
544 
545 /**
546  * ibmvfc_set_host_state - Set the state for the host
547  * @vhost:		ibmvfc host struct
548  * @state:		state to set host to
549  *
550  * Returns:
551  *	0 if state changed / non-zero if not changed
552  **/
553 static int ibmvfc_set_host_state(struct ibmvfc_host *vhost,
554 				  enum ibmvfc_host_state state)
555 {
556 	int rc = 0;
557 
558 	switch (vhost->state) {
559 	case IBMVFC_HOST_OFFLINE:
560 		rc = -EINVAL;
561 		break;
562 	default:
563 		vhost->state = state;
564 		break;
565 	}
566 
567 	return rc;
568 }
569 
570 /**
571  * ibmvfc_set_host_action - Set the next init action for the host
572  * @vhost:		ibmvfc host struct
573  * @action:		action to perform
574  *
575  **/
576 static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
577 				   enum ibmvfc_host_action action)
578 {
579 	switch (action) {
580 	case IBMVFC_HOST_ACTION_ALLOC_TGTS:
581 		if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT)
582 			vhost->action = action;
583 		break;
584 	case IBMVFC_HOST_ACTION_LOGO_WAIT:
585 		if (vhost->action == IBMVFC_HOST_ACTION_LOGO)
586 			vhost->action = action;
587 		break;
588 	case IBMVFC_HOST_ACTION_INIT_WAIT:
589 		if (vhost->action == IBMVFC_HOST_ACTION_INIT)
590 			vhost->action = action;
591 		break;
592 	case IBMVFC_HOST_ACTION_QUERY:
593 		switch (vhost->action) {
594 		case IBMVFC_HOST_ACTION_INIT_WAIT:
595 		case IBMVFC_HOST_ACTION_NONE:
596 		case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
597 			vhost->action = action;
598 			break;
599 		default:
600 			break;
601 		}
602 		break;
603 	case IBMVFC_HOST_ACTION_TGT_INIT:
604 		if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
605 			vhost->action = action;
606 		break;
607 	case IBMVFC_HOST_ACTION_REENABLE:
608 	case IBMVFC_HOST_ACTION_RESET:
609 		vhost->action = action;
610 		break;
611 	case IBMVFC_HOST_ACTION_INIT:
612 	case IBMVFC_HOST_ACTION_TGT_DEL:
613 	case IBMVFC_HOST_ACTION_LOGO:
614 	case IBMVFC_HOST_ACTION_QUERY_TGTS:
615 	case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
616 	case IBMVFC_HOST_ACTION_NONE:
617 	default:
618 		switch (vhost->action) {
619 		case IBMVFC_HOST_ACTION_RESET:
620 		case IBMVFC_HOST_ACTION_REENABLE:
621 			break;
622 		default:
623 			vhost->action = action;
624 			break;
625 		}
626 		break;
627 	}
628 }
629 
630 /**
631  * ibmvfc_reinit_host - Re-start host initialization (no NPIV Login)
632  * @vhost:		ibmvfc host struct
633  *
634  * Return value:
635  *	nothing
636  **/
637 static void ibmvfc_reinit_host(struct ibmvfc_host *vhost)
638 {
639 	if (vhost->action == IBMVFC_HOST_ACTION_NONE &&
640 	    vhost->state == IBMVFC_ACTIVE) {
641 		if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
642 			scsi_block_requests(vhost->host);
643 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
644 		}
645 	} else
646 		vhost->reinit = 1;
647 
648 	wake_up(&vhost->work_wait_q);
649 }
650 
651 /**
652  * ibmvfc_del_tgt - Schedule cleanup and removal of the target
653  * @tgt:		ibmvfc target struct
654  **/
655 static void ibmvfc_del_tgt(struct ibmvfc_target *tgt)
656 {
657 	if (!ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_RPORT))
658 		tgt->job_step = ibmvfc_tgt_implicit_logout_and_del;
659 	wake_up(&tgt->vhost->work_wait_q);
660 }
661 
662 /**
663  * ibmvfc_link_down - Handle a link down event from the adapter
664  * @vhost:	ibmvfc host struct
665  * @state:	ibmvfc host state to enter
666  *
667  **/
668 static void ibmvfc_link_down(struct ibmvfc_host *vhost,
669 			     enum ibmvfc_host_state state)
670 {
671 	struct ibmvfc_target *tgt;
672 
673 	ENTER;
674 	scsi_block_requests(vhost->host);
675 	list_for_each_entry(tgt, &vhost->targets, queue)
676 		ibmvfc_del_tgt(tgt);
677 	ibmvfc_set_host_state(vhost, state);
678 	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
679 	vhost->events_to_log |= IBMVFC_AE_LINKDOWN;
680 	wake_up(&vhost->work_wait_q);
681 	LEAVE;
682 }
683 
684 /**
685  * ibmvfc_init_host - Start host initialization
686  * @vhost:		ibmvfc host struct
687  *
688  * Return value:
689  *	nothing
690  **/
691 static void ibmvfc_init_host(struct ibmvfc_host *vhost)
692 {
693 	struct ibmvfc_target *tgt;
694 
695 	if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
696 		if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
697 			dev_err(vhost->dev,
698 				"Host initialization retries exceeded. Taking adapter offline\n");
699 			ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
700 			return;
701 		}
702 	}
703 
704 	if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
705 		memset(vhost->async_crq.msgs.async, 0, PAGE_SIZE);
706 		vhost->async_crq.cur = 0;
707 
708 		list_for_each_entry(tgt, &vhost->targets, queue)
709 			ibmvfc_del_tgt(tgt);
710 		scsi_block_requests(vhost->host);
711 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
712 		vhost->job_step = ibmvfc_npiv_login;
713 		wake_up(&vhost->work_wait_q);
714 	}
715 }
716 
717 /**
718  * ibmvfc_send_crq - Send a CRQ
719  * @vhost:	ibmvfc host struct
720  * @word1:	the first 64 bits of the data
721  * @word2:	the second 64 bits of the data
722  *
723  * Return value:
724  *	0 on success / other on failure
725  **/
726 static int ibmvfc_send_crq(struct ibmvfc_host *vhost, u64 word1, u64 word2)
727 {
728 	struct vio_dev *vdev = to_vio_dev(vhost->dev);
729 	return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
730 }
731 
732 static int ibmvfc_send_sub_crq(struct ibmvfc_host *vhost, u64 cookie, u64 word1,
733 			       u64 word2, u64 word3, u64 word4)
734 {
735 	struct vio_dev *vdev = to_vio_dev(vhost->dev);
736 
737 	return plpar_hcall_norets(H_SEND_SUB_CRQ, vdev->unit_address, cookie,
738 				  word1, word2, word3, word4);
739 }
740 
741 /**
742  * ibmvfc_send_crq_init - Send a CRQ init message
743  * @vhost:	ibmvfc host struct
744  *
745  * Return value:
746  *	0 on success / other on failure
747  **/
748 static int ibmvfc_send_crq_init(struct ibmvfc_host *vhost)
749 {
750 	ibmvfc_dbg(vhost, "Sending CRQ init\n");
751 	return ibmvfc_send_crq(vhost, 0xC001000000000000LL, 0);
752 }
753 
754 /**
755  * ibmvfc_send_crq_init_complete - Send a CRQ init complete message
756  * @vhost:	ibmvfc host struct
757  *
758  * Return value:
759  *	0 on success / other on failure
760  **/
761 static int ibmvfc_send_crq_init_complete(struct ibmvfc_host *vhost)
762 {
763 	ibmvfc_dbg(vhost, "Sending CRQ init complete\n");
764 	return ibmvfc_send_crq(vhost, 0xC002000000000000LL, 0);
765 }
766 
767 /**
768  * ibmvfc_init_event_pool - Allocates and initializes the event pool for a host
769  * @vhost:	ibmvfc host who owns the event pool
770  * @queue:      ibmvfc queue struct
771  * @size:       pool size
772  *
773  * Returns zero on success.
774  **/
775 static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost,
776 				  struct ibmvfc_queue *queue,
777 				  unsigned int size)
778 {
779 	int i;
780 	struct ibmvfc_event_pool *pool = &queue->evt_pool;
781 
782 	ENTER;
783 	if (!size)
784 		return 0;
785 
786 	pool->size = size;
787 	pool->events = kcalloc(size, sizeof(*pool->events), GFP_KERNEL);
788 	if (!pool->events)
789 		return -ENOMEM;
790 
791 	pool->iu_storage = dma_alloc_coherent(vhost->dev,
792 					      size * sizeof(*pool->iu_storage),
793 					      &pool->iu_token, 0);
794 
795 	if (!pool->iu_storage) {
796 		kfree(pool->events);
797 		return -ENOMEM;
798 	}
799 
800 	INIT_LIST_HEAD(&queue->sent);
801 	INIT_LIST_HEAD(&queue->free);
802 	spin_lock_init(&queue->l_lock);
803 
804 	for (i = 0; i < size; ++i) {
805 		struct ibmvfc_event *evt = &pool->events[i];
806 
807 		atomic_set(&evt->free, 1);
808 		evt->crq.valid = 0x80;
809 		evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i));
810 		evt->xfer_iu = pool->iu_storage + i;
811 		evt->vhost = vhost;
812 		evt->queue = queue;
813 		evt->ext_list = NULL;
814 		list_add_tail(&evt->queue_list, &queue->free);
815 	}
816 
817 	LEAVE;
818 	return 0;
819 }
820 
821 /**
822  * ibmvfc_free_event_pool - Frees memory of the event pool of a host
823  * @vhost:	ibmvfc host who owns the event pool
824  * @queue:      ibmvfc queue struct
825  *
826  **/
827 static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost,
828 				   struct ibmvfc_queue *queue)
829 {
830 	int i;
831 	struct ibmvfc_event_pool *pool = &queue->evt_pool;
832 
833 	ENTER;
834 	for (i = 0; i < pool->size; ++i) {
835 		list_del(&pool->events[i].queue_list);
836 		BUG_ON(atomic_read(&pool->events[i].free) != 1);
837 		if (pool->events[i].ext_list)
838 			dma_pool_free(vhost->sg_pool,
839 				      pool->events[i].ext_list,
840 				      pool->events[i].ext_list_token);
841 	}
842 
843 	kfree(pool->events);
844 	dma_free_coherent(vhost->dev,
845 			  pool->size * sizeof(*pool->iu_storage),
846 			  pool->iu_storage, pool->iu_token);
847 	LEAVE;
848 }
849 
850 /**
851  * ibmvfc_free_queue - Deallocate queue
852  * @vhost:	ibmvfc host struct
853  * @queue:	ibmvfc queue struct
854  *
855  * Unmaps dma and deallocates page for messages
856  **/
857 static void ibmvfc_free_queue(struct ibmvfc_host *vhost,
858 			      struct ibmvfc_queue *queue)
859 {
860 	struct device *dev = vhost->dev;
861 
862 	dma_unmap_single(dev, queue->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
863 	free_page((unsigned long)queue->msgs.handle);
864 	queue->msgs.handle = NULL;
865 
866 	ibmvfc_free_event_pool(vhost, queue);
867 }
868 
869 /**
870  * ibmvfc_release_crq_queue - Deallocates data and unregisters CRQ
871  * @vhost:	ibmvfc host struct
872  *
873  * Frees irq, deallocates a page for messages, unmaps dma, and unregisters
874  * the crq with the hypervisor.
875  **/
876 static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
877 {
878 	long rc = 0;
879 	struct vio_dev *vdev = to_vio_dev(vhost->dev);
880 	struct ibmvfc_queue *crq = &vhost->crq;
881 
882 	ibmvfc_dbg(vhost, "Releasing CRQ\n");
883 	free_irq(vdev->irq, vhost);
884 	tasklet_kill(&vhost->tasklet);
885 	do {
886 		if (rc)
887 			msleep(100);
888 		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
889 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
890 
891 	vhost->state = IBMVFC_NO_CRQ;
892 	vhost->logged_in = 0;
893 
894 	ibmvfc_free_queue(vhost, crq);
895 }
896 
897 /**
898  * ibmvfc_reenable_crq_queue - reenables the CRQ
899  * @vhost:	ibmvfc host struct
900  *
901  * Return value:
902  *	0 on success / other on failure
903  **/
904 static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost)
905 {
906 	int rc = 0;
907 	struct vio_dev *vdev = to_vio_dev(vhost->dev);
908 	unsigned long flags;
909 
910 	ibmvfc_release_sub_crqs(vhost);
911 
912 	/* Re-enable the CRQ */
913 	do {
914 		if (rc)
915 			msleep(100);
916 		rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
917 	} while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
918 
919 	if (rc)
920 		dev_err(vhost->dev, "Error enabling adapter (rc=%d)\n", rc);
921 
922 	spin_lock_irqsave(vhost->host->host_lock, flags);
923 	spin_lock(vhost->crq.q_lock);
924 	vhost->do_enquiry = 1;
925 	vhost->using_channels = 0;
926 	spin_unlock(vhost->crq.q_lock);
927 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
928 
929 	ibmvfc_init_sub_crqs(vhost);
930 
931 	return rc;
932 }
933 
934 /**
935  * ibmvfc_reset_crq - resets a crq after a failure
936  * @vhost:	ibmvfc host struct
937  *
938  * Return value:
939  *	0 on success / other on failure
940  **/
941 static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
942 {
943 	int rc = 0;
944 	unsigned long flags;
945 	struct vio_dev *vdev = to_vio_dev(vhost->dev);
946 	struct ibmvfc_queue *crq = &vhost->crq;
947 
948 	ibmvfc_release_sub_crqs(vhost);
949 
950 	/* Close the CRQ */
951 	do {
952 		if (rc)
953 			msleep(100);
954 		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
955 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
956 
957 	spin_lock_irqsave(vhost->host->host_lock, flags);
958 	spin_lock(vhost->crq.q_lock);
959 	vhost->state = IBMVFC_NO_CRQ;
960 	vhost->logged_in = 0;
961 	vhost->do_enquiry = 1;
962 	vhost->using_channels = 0;
963 
964 	/* Clean out the queue */
965 	memset(crq->msgs.crq, 0, PAGE_SIZE);
966 	crq->cur = 0;
967 
968 	/* And re-open it again */
969 	rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
970 				crq->msg_token, PAGE_SIZE);
971 
972 	if (rc == H_CLOSED)
973 		/* Adapter is good, but other end is not ready */
974 		dev_warn(vhost->dev, "Partner adapter not ready\n");
975 	else if (rc != 0)
976 		dev_warn(vhost->dev, "Couldn't register crq (rc=%d)\n", rc);
977 
978 	spin_unlock(vhost->crq.q_lock);
979 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
980 
981 	ibmvfc_init_sub_crqs(vhost);
982 
983 	return rc;
984 }
985 
986 /**
987  * ibmvfc_valid_event - Determines if event is valid.
988  * @pool:	event_pool that contains the event
989  * @evt:	ibmvfc event to be checked for validity
990  *
991  * Return value:
992  *	1 if event is valid / 0 if event is not valid
993  **/
994 static int ibmvfc_valid_event(struct ibmvfc_event_pool *pool,
995 			      struct ibmvfc_event *evt)
996 {
997 	int index = evt - pool->events;
998 	if (index < 0 || index >= pool->size)	/* outside of bounds */
999 		return 0;
1000 	if (evt != pool->events + index)	/* unaligned */
1001 		return 0;
1002 	return 1;
1003 }
1004 
1005 /**
1006  * ibmvfc_free_event - Free the specified event
1007  * @evt:	ibmvfc_event to be freed
1008  *
1009  **/
1010 static void ibmvfc_free_event(struct ibmvfc_event *evt)
1011 {
1012 	struct ibmvfc_event_pool *pool = &evt->queue->evt_pool;
1013 	unsigned long flags;
1014 
1015 	BUG_ON(!ibmvfc_valid_event(pool, evt));
1016 	BUG_ON(atomic_inc_return(&evt->free) != 1);
1017 
1018 	spin_lock_irqsave(&evt->queue->l_lock, flags);
1019 	list_add_tail(&evt->queue_list, &evt->queue->free);
1020 	if (evt->eh_comp)
1021 		complete(evt->eh_comp);
1022 	spin_unlock_irqrestore(&evt->queue->l_lock, flags);
1023 }
1024 
1025 /**
1026  * ibmvfc_scsi_eh_done - EH done function for queuecommand commands
1027  * @evt:	ibmvfc event struct
1028  *
1029  * This function does not setup any error status, that must be done
1030  * before this function gets called.
1031  **/
1032 static void ibmvfc_scsi_eh_done(struct ibmvfc_event *evt)
1033 {
1034 	struct scsi_cmnd *cmnd = evt->cmnd;
1035 
1036 	if (cmnd) {
1037 		scsi_dma_unmap(cmnd);
1038 		cmnd->scsi_done(cmnd);
1039 	}
1040 
1041 	ibmvfc_free_event(evt);
1042 }
1043 
1044 /**
1045  * ibmvfc_complete_purge - Complete failed command list
1046  * @purge_list:		list head of failed commands
1047  *
1048  * This function runs completions on commands to fail as a result of a
1049  * host reset or platform migration.
1050  **/
1051 static void ibmvfc_complete_purge(struct list_head *purge_list)
1052 {
1053 	struct ibmvfc_event *evt, *pos;
1054 
1055 	list_for_each_entry_safe(evt, pos, purge_list, queue_list) {
1056 		list_del(&evt->queue_list);
1057 		ibmvfc_trc_end(evt);
1058 		evt->done(evt);
1059 	}
1060 }
1061 
1062 /**
1063  * ibmvfc_fail_request - Fail request with specified error code
1064  * @evt:		ibmvfc event struct
1065  * @error_code:	error code to fail request with
1066  *
1067  * Return value:
1068  *	none
1069  **/
1070 static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code)
1071 {
1072 	if (evt->cmnd) {
1073 		evt->cmnd->result = (error_code << 16);
1074 		evt->done = ibmvfc_scsi_eh_done;
1075 	} else
1076 		evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_DRIVER_FAILED);
1077 
1078 	del_timer(&evt->timer);
1079 }
1080 
1081 /**
1082  * ibmvfc_purge_requests - Our virtual adapter just shut down. Purge any sent requests
1083  * @vhost:		ibmvfc host struct
1084  * @error_code:	error code to fail requests with
1085  *
1086  * Return value:
1087  *	none
1088  **/
1089 static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code)
1090 {
1091 	struct ibmvfc_event *evt, *pos;
1092 	struct ibmvfc_queue *queues = vhost->scsi_scrqs.scrqs;
1093 	unsigned long flags;
1094 	int hwqs = 0;
1095 	int i;
1096 
1097 	if (vhost->using_channels)
1098 		hwqs = vhost->scsi_scrqs.active_queues;
1099 
1100 	ibmvfc_dbg(vhost, "Purging all requests\n");
1101 	spin_lock_irqsave(&vhost->crq.l_lock, flags);
1102 	list_for_each_entry_safe(evt, pos, &vhost->crq.sent, queue_list)
1103 		ibmvfc_fail_request(evt, error_code);
1104 	list_splice_init(&vhost->crq.sent, &vhost->purge);
1105 	spin_unlock_irqrestore(&vhost->crq.l_lock, flags);
1106 
1107 	for (i = 0; i < hwqs; i++) {
1108 		spin_lock_irqsave(queues[i].q_lock, flags);
1109 		spin_lock(&queues[i].l_lock);
1110 		list_for_each_entry_safe(evt, pos, &queues[i].sent, queue_list)
1111 			ibmvfc_fail_request(evt, error_code);
1112 		list_splice_init(&queues[i].sent, &vhost->purge);
1113 		spin_unlock(&queues[i].l_lock);
1114 		spin_unlock_irqrestore(queues[i].q_lock, flags);
1115 	}
1116 }
1117 
1118 /**
1119  * ibmvfc_hard_reset_host - Reset the connection to the server by breaking the CRQ
1120  * @vhost:	struct ibmvfc host to reset
1121  **/
1122 static void ibmvfc_hard_reset_host(struct ibmvfc_host *vhost)
1123 {
1124 	ibmvfc_purge_requests(vhost, DID_ERROR);
1125 	ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
1126 	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
1127 }
1128 
1129 /**
1130  * __ibmvfc_reset_host - Reset the connection to the server (no locking)
1131  * @vhost:	struct ibmvfc host to reset
1132  **/
1133 static void __ibmvfc_reset_host(struct ibmvfc_host *vhost)
1134 {
1135 	if (vhost->logged_in && vhost->action != IBMVFC_HOST_ACTION_LOGO_WAIT &&
1136 	    !ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
1137 		scsi_block_requests(vhost->host);
1138 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO);
1139 		vhost->job_step = ibmvfc_npiv_logout;
1140 		wake_up(&vhost->work_wait_q);
1141 	} else
1142 		ibmvfc_hard_reset_host(vhost);
1143 }
1144 
1145 /**
1146  * ibmvfc_reset_host - Reset the connection to the server
1147  * @vhost:	ibmvfc host struct
1148  **/
1149 static void ibmvfc_reset_host(struct ibmvfc_host *vhost)
1150 {
1151 	unsigned long flags;
1152 
1153 	spin_lock_irqsave(vhost->host->host_lock, flags);
1154 	__ibmvfc_reset_host(vhost);
1155 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
1156 }
1157 
1158 /**
1159  * ibmvfc_retry_host_init - Retry host initialization if allowed
1160  * @vhost:	ibmvfc host struct
1161  *
1162  * Returns: 1 if init will be retried / 0 if not
1163  *
1164  **/
1165 static int ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
1166 {
1167 	int retry = 0;
1168 
1169 	if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
1170 		vhost->delay_init = 1;
1171 		if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
1172 			dev_err(vhost->dev,
1173 				"Host initialization retries exceeded. Taking adapter offline\n");
1174 			ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
1175 		} else if (vhost->init_retries == IBMVFC_MAX_HOST_INIT_RETRIES)
1176 			__ibmvfc_reset_host(vhost);
1177 		else {
1178 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
1179 			retry = 1;
1180 		}
1181 	}
1182 
1183 	wake_up(&vhost->work_wait_q);
1184 	return retry;
1185 }
1186 
1187 /**
1188  * __ibmvfc_get_target - Find the specified scsi_target (no locking)
1189  * @starget:	scsi target struct
1190  *
1191  * Return value:
1192  *	ibmvfc_target struct / NULL if not found
1193  **/
1194 static struct ibmvfc_target *__ibmvfc_get_target(struct scsi_target *starget)
1195 {
1196 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1197 	struct ibmvfc_host *vhost = shost_priv(shost);
1198 	struct ibmvfc_target *tgt;
1199 
1200 	list_for_each_entry(tgt, &vhost->targets, queue)
1201 		if (tgt->target_id == starget->id) {
1202 			kref_get(&tgt->kref);
1203 			return tgt;
1204 		}
1205 	return NULL;
1206 }
1207 
1208 /**
1209  * ibmvfc_get_target - Find the specified scsi_target
1210  * @starget:	scsi target struct
1211  *
1212  * Return value:
1213  *	ibmvfc_target struct / NULL if not found
1214  **/
1215 static struct ibmvfc_target *ibmvfc_get_target(struct scsi_target *starget)
1216 {
1217 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1218 	struct ibmvfc_target *tgt;
1219 	unsigned long flags;
1220 
1221 	spin_lock_irqsave(shost->host_lock, flags);
1222 	tgt = __ibmvfc_get_target(starget);
1223 	spin_unlock_irqrestore(shost->host_lock, flags);
1224 	return tgt;
1225 }
1226 
1227 /**
1228  * ibmvfc_get_host_speed - Get host port speed
1229  * @shost:		scsi host struct
1230  *
1231  * Return value:
1232  * 	none
1233  **/
1234 static void ibmvfc_get_host_speed(struct Scsi_Host *shost)
1235 {
1236 	struct ibmvfc_host *vhost = shost_priv(shost);
1237 	unsigned long flags;
1238 
1239 	spin_lock_irqsave(shost->host_lock, flags);
1240 	if (vhost->state == IBMVFC_ACTIVE) {
1241 		switch (be64_to_cpu(vhost->login_buf->resp.link_speed) / 100) {
1242 		case 1:
1243 			fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
1244 			break;
1245 		case 2:
1246 			fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
1247 			break;
1248 		case 4:
1249 			fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
1250 			break;
1251 		case 8:
1252 			fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
1253 			break;
1254 		case 10:
1255 			fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
1256 			break;
1257 		case 16:
1258 			fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
1259 			break;
1260 		default:
1261 			ibmvfc_log(vhost, 3, "Unknown port speed: %lld Gbit\n",
1262 				   be64_to_cpu(vhost->login_buf->resp.link_speed) / 100);
1263 			fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
1264 			break;
1265 		}
1266 	} else
1267 		fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
1268 	spin_unlock_irqrestore(shost->host_lock, flags);
1269 }
1270 
1271 /**
1272  * ibmvfc_get_host_port_state - Get host port state
1273  * @shost:		scsi host struct
1274  *
1275  * Return value:
1276  * 	none
1277  **/
1278 static void ibmvfc_get_host_port_state(struct Scsi_Host *shost)
1279 {
1280 	struct ibmvfc_host *vhost = shost_priv(shost);
1281 	unsigned long flags;
1282 
1283 	spin_lock_irqsave(shost->host_lock, flags);
1284 	switch (vhost->state) {
1285 	case IBMVFC_INITIALIZING:
1286 	case IBMVFC_ACTIVE:
1287 		fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
1288 		break;
1289 	case IBMVFC_LINK_DOWN:
1290 		fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
1291 		break;
1292 	case IBMVFC_LINK_DEAD:
1293 	case IBMVFC_HOST_OFFLINE:
1294 		fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
1295 		break;
1296 	case IBMVFC_HALTED:
1297 		fc_host_port_state(shost) = FC_PORTSTATE_BLOCKED;
1298 		break;
1299 	case IBMVFC_NO_CRQ:
1300 		fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
1301 		break;
1302 	default:
1303 		ibmvfc_log(vhost, 3, "Unknown port state: %d\n", vhost->state);
1304 		fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
1305 		break;
1306 	}
1307 	spin_unlock_irqrestore(shost->host_lock, flags);
1308 }
1309 
1310 /**
1311  * ibmvfc_set_rport_dev_loss_tmo - Set rport's device loss timeout
1312  * @rport:		rport struct
1313  * @timeout:	timeout value
1314  *
1315  * Return value:
1316  * 	none
1317  **/
1318 static void ibmvfc_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
1319 {
1320 	if (timeout)
1321 		rport->dev_loss_tmo = timeout;
1322 	else
1323 		rport->dev_loss_tmo = 1;
1324 }
1325 
1326 /**
1327  * ibmvfc_release_tgt - Free memory allocated for a target
1328  * @kref:		kref struct
1329  *
1330  **/
1331 static void ibmvfc_release_tgt(struct kref *kref)
1332 {
1333 	struct ibmvfc_target *tgt = container_of(kref, struct ibmvfc_target, kref);
1334 	kfree(tgt);
1335 }
1336 
1337 /**
1338  * ibmvfc_get_starget_node_name - Get SCSI target's node name
1339  * @starget:	scsi target struct
1340  *
1341  * Return value:
1342  * 	none
1343  **/
1344 static void ibmvfc_get_starget_node_name(struct scsi_target *starget)
1345 {
1346 	struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1347 	fc_starget_port_name(starget) = tgt ? tgt->ids.node_name : 0;
1348 	if (tgt)
1349 		kref_put(&tgt->kref, ibmvfc_release_tgt);
1350 }
1351 
1352 /**
1353  * ibmvfc_get_starget_port_name - Get SCSI target's port name
1354  * @starget:	scsi target struct
1355  *
1356  * Return value:
1357  * 	none
1358  **/
1359 static void ibmvfc_get_starget_port_name(struct scsi_target *starget)
1360 {
1361 	struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1362 	fc_starget_port_name(starget) = tgt ? tgt->ids.port_name : 0;
1363 	if (tgt)
1364 		kref_put(&tgt->kref, ibmvfc_release_tgt);
1365 }
1366 
1367 /**
1368  * ibmvfc_get_starget_port_id - Get SCSI target's port ID
1369  * @starget:	scsi target struct
1370  *
1371  * Return value:
1372  * 	none
1373  **/
1374 static void ibmvfc_get_starget_port_id(struct scsi_target *starget)
1375 {
1376 	struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1377 	fc_starget_port_id(starget) = tgt ? tgt->scsi_id : -1;
1378 	if (tgt)
1379 		kref_put(&tgt->kref, ibmvfc_release_tgt);
1380 }
1381 
1382 /**
1383  * ibmvfc_wait_while_resetting - Wait while the host resets
1384  * @vhost:		ibmvfc host struct
1385  *
1386  * Return value:
1387  * 	0 on success / other on failure
1388  **/
1389 static int ibmvfc_wait_while_resetting(struct ibmvfc_host *vhost)
1390 {
1391 	long timeout = wait_event_timeout(vhost->init_wait_q,
1392 					  ((vhost->state == IBMVFC_ACTIVE ||
1393 					    vhost->state == IBMVFC_HOST_OFFLINE ||
1394 					    vhost->state == IBMVFC_LINK_DEAD) &&
1395 					   vhost->action == IBMVFC_HOST_ACTION_NONE),
1396 					  (init_timeout * HZ));
1397 
1398 	return timeout ? 0 : -EIO;
1399 }
1400 
1401 /**
1402  * ibmvfc_issue_fc_host_lip - Re-initiate link initialization
1403  * @shost:		scsi host struct
1404  *
1405  * Return value:
1406  * 	0 on success / other on failure
1407  **/
1408 static int ibmvfc_issue_fc_host_lip(struct Scsi_Host *shost)
1409 {
1410 	struct ibmvfc_host *vhost = shost_priv(shost);
1411 
1412 	dev_err(vhost->dev, "Initiating host LIP. Resetting connection\n");
1413 	ibmvfc_reset_host(vhost);
1414 	return ibmvfc_wait_while_resetting(vhost);
1415 }
1416 
1417 /**
1418  * ibmvfc_gather_partition_info - Gather info about the LPAR
1419  * @vhost:      ibmvfc host struct
1420  *
1421  * Return value:
1422  *	none
1423  **/
1424 static void ibmvfc_gather_partition_info(struct ibmvfc_host *vhost)
1425 {
1426 	struct device_node *rootdn;
1427 	const char *name;
1428 	const unsigned int *num;
1429 
1430 	rootdn = of_find_node_by_path("/");
1431 	if (!rootdn)
1432 		return;
1433 
1434 	name = of_get_property(rootdn, "ibm,partition-name", NULL);
1435 	if (name)
1436 		strncpy(vhost->partition_name, name, sizeof(vhost->partition_name));
1437 	num = of_get_property(rootdn, "ibm,partition-no", NULL);
1438 	if (num)
1439 		vhost->partition_number = *num;
1440 	of_node_put(rootdn);
1441 }
1442 
1443 /**
1444  * ibmvfc_set_login_info - Setup info for NPIV login
1445  * @vhost:	ibmvfc host struct
1446  *
1447  * Return value:
1448  *	none
1449  **/
1450 static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
1451 {
1452 	struct ibmvfc_npiv_login *login_info = &vhost->login_info;
1453 	struct ibmvfc_queue *async_crq = &vhost->async_crq;
1454 	struct device_node *of_node = vhost->dev->of_node;
1455 	const char *location;
1456 
1457 	memset(login_info, 0, sizeof(*login_info));
1458 
1459 	login_info->ostype = cpu_to_be32(IBMVFC_OS_LINUX);
1460 	login_info->max_dma_len = cpu_to_be64(IBMVFC_MAX_SECTORS << 9);
1461 	login_info->max_payload = cpu_to_be32(sizeof(struct ibmvfc_fcp_cmd_iu));
1462 	login_info->max_response = cpu_to_be32(sizeof(struct ibmvfc_fcp_rsp));
1463 	login_info->partition_num = cpu_to_be32(vhost->partition_number);
1464 	login_info->vfc_frame_version = cpu_to_be32(1);
1465 	login_info->fcp_version = cpu_to_be16(3);
1466 	login_info->flags = cpu_to_be16(IBMVFC_FLUSH_ON_HALT);
1467 	if (vhost->client_migrated)
1468 		login_info->flags |= cpu_to_be16(IBMVFC_CLIENT_MIGRATED);
1469 
1470 	login_info->max_cmds = cpu_to_be32(max_requests + IBMVFC_NUM_INTERNAL_REQ);
1471 	login_info->capabilities = cpu_to_be64(IBMVFC_CAN_MIGRATE | IBMVFC_CAN_SEND_VF_WWPN);
1472 
1473 	if (vhost->mq_enabled || vhost->using_channels)
1474 		login_info->capabilities |= cpu_to_be64(IBMVFC_CAN_USE_CHANNELS);
1475 
1476 	login_info->async.va = cpu_to_be64(vhost->async_crq.msg_token);
1477 	login_info->async.len = cpu_to_be32(async_crq->size *
1478 					    sizeof(*async_crq->msgs.async));
1479 	strncpy(login_info->partition_name, vhost->partition_name, IBMVFC_MAX_NAME);
1480 	strncpy(login_info->device_name,
1481 		dev_name(&vhost->host->shost_gendev), IBMVFC_MAX_NAME);
1482 
1483 	location = of_get_property(of_node, "ibm,loc-code", NULL);
1484 	location = location ? location : dev_name(vhost->dev);
1485 	strncpy(login_info->drc_name, location, IBMVFC_MAX_NAME);
1486 }
1487 
1488 /**
1489  * ibmvfc_get_event - Gets the next free event in pool
1490  * @queue:      ibmvfc queue struct
1491  *
1492  * Returns a free event from the pool.
1493  **/
1494 static struct ibmvfc_event *ibmvfc_get_event(struct ibmvfc_queue *queue)
1495 {
1496 	struct ibmvfc_event *evt;
1497 	unsigned long flags;
1498 
1499 	spin_lock_irqsave(&queue->l_lock, flags);
1500 	BUG_ON(list_empty(&queue->free));
1501 	evt = list_entry(queue->free.next, struct ibmvfc_event, queue_list);
1502 	atomic_set(&evt->free, 0);
1503 	list_del(&evt->queue_list);
1504 	spin_unlock_irqrestore(&queue->l_lock, flags);
1505 	return evt;
1506 }
1507 
1508 /**
1509  * ibmvfc_locked_done - Calls evt completion with host_lock held
1510  * @evt:	ibmvfc evt to complete
1511  *
1512  * All non-scsi command completion callbacks have the expectation that the
1513  * host_lock is held. This callback is used by ibmvfc_init_event to wrap a
1514  * MAD evt with the host_lock.
1515  **/
1516 static void ibmvfc_locked_done(struct ibmvfc_event *evt)
1517 {
1518 	unsigned long flags;
1519 
1520 	spin_lock_irqsave(evt->vhost->host->host_lock, flags);
1521 	evt->_done(evt);
1522 	spin_unlock_irqrestore(evt->vhost->host->host_lock, flags);
1523 }
1524 
1525 /**
1526  * ibmvfc_init_event - Initialize fields in an event struct that are always
1527  *				required.
1528  * @evt:	The event
1529  * @done:	Routine to call when the event is responded to
1530  * @format:	SRP or MAD format
1531  **/
1532 static void ibmvfc_init_event(struct ibmvfc_event *evt,
1533 			      void (*done) (struct ibmvfc_event *), u8 format)
1534 {
1535 	evt->cmnd = NULL;
1536 	evt->sync_iu = NULL;
1537 	evt->eh_comp = NULL;
1538 	evt->crq.format = format;
1539 	if (format == IBMVFC_CMD_FORMAT)
1540 		evt->done = done;
1541 	else {
1542 		evt->_done = done;
1543 		evt->done = ibmvfc_locked_done;
1544 	}
1545 	evt->hwq = 0;
1546 }
1547 
1548 /**
1549  * ibmvfc_map_sg_list - Initialize scatterlist
1550  * @scmd:	scsi command struct
1551  * @nseg:	number of scatterlist segments
1552  * @md:	memory descriptor list to initialize
1553  **/
1554 static void ibmvfc_map_sg_list(struct scsi_cmnd *scmd, int nseg,
1555 			       struct srp_direct_buf *md)
1556 {
1557 	int i;
1558 	struct scatterlist *sg;
1559 
1560 	scsi_for_each_sg(scmd, sg, nseg, i) {
1561 		md[i].va = cpu_to_be64(sg_dma_address(sg));
1562 		md[i].len = cpu_to_be32(sg_dma_len(sg));
1563 		md[i].key = 0;
1564 	}
1565 }
1566 
1567 /**
1568  * ibmvfc_map_sg_data - Maps dma for a scatterlist and initializes descriptor fields
1569  * @scmd:		struct scsi_cmnd with the scatterlist
1570  * @evt:		ibmvfc event struct
1571  * @vfc_cmd:	vfc_cmd that contains the memory descriptor
1572  * @dev:		device for which to map dma memory
1573  *
1574  * Returns:
1575  *	0 on success / non-zero on failure
1576  **/
1577 static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd,
1578 			      struct ibmvfc_event *evt,
1579 			      struct ibmvfc_cmd *vfc_cmd, struct device *dev)
1580 {
1581 
1582 	int sg_mapped;
1583 	struct srp_direct_buf *data = &vfc_cmd->ioba;
1584 	struct ibmvfc_host *vhost = dev_get_drvdata(dev);
1585 	struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(evt->vhost, vfc_cmd);
1586 
1587 	if (cls3_error)
1588 		vfc_cmd->flags |= cpu_to_be16(IBMVFC_CLASS_3_ERR);
1589 
1590 	sg_mapped = scsi_dma_map(scmd);
1591 	if (!sg_mapped) {
1592 		vfc_cmd->flags |= cpu_to_be16(IBMVFC_NO_MEM_DESC);
1593 		return 0;
1594 	} else if (unlikely(sg_mapped < 0)) {
1595 		if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1596 			scmd_printk(KERN_ERR, scmd, "Failed to map DMA buffer for command\n");
1597 		return sg_mapped;
1598 	}
1599 
1600 	if (scmd->sc_data_direction == DMA_TO_DEVICE) {
1601 		vfc_cmd->flags |= cpu_to_be16(IBMVFC_WRITE);
1602 		iu->add_cdb_len |= IBMVFC_WRDATA;
1603 	} else {
1604 		vfc_cmd->flags |= cpu_to_be16(IBMVFC_READ);
1605 		iu->add_cdb_len |= IBMVFC_RDDATA;
1606 	}
1607 
1608 	if (sg_mapped == 1) {
1609 		ibmvfc_map_sg_list(scmd, sg_mapped, data);
1610 		return 0;
1611 	}
1612 
1613 	vfc_cmd->flags |= cpu_to_be16(IBMVFC_SCATTERLIST);
1614 
1615 	if (!evt->ext_list) {
1616 		evt->ext_list = dma_pool_alloc(vhost->sg_pool, GFP_ATOMIC,
1617 					       &evt->ext_list_token);
1618 
1619 		if (!evt->ext_list) {
1620 			scsi_dma_unmap(scmd);
1621 			if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1622 				scmd_printk(KERN_ERR, scmd, "Can't allocate memory for scatterlist\n");
1623 			return -ENOMEM;
1624 		}
1625 	}
1626 
1627 	ibmvfc_map_sg_list(scmd, sg_mapped, evt->ext_list);
1628 
1629 	data->va = cpu_to_be64(evt->ext_list_token);
1630 	data->len = cpu_to_be32(sg_mapped * sizeof(struct srp_direct_buf));
1631 	data->key = 0;
1632 	return 0;
1633 }
1634 
1635 /**
1636  * ibmvfc_timeout - Internal command timeout handler
1637  * @t:	struct ibmvfc_event that timed out
1638  *
1639  * Called when an internally generated command times out
1640  **/
1641 static void ibmvfc_timeout(struct timer_list *t)
1642 {
1643 	struct ibmvfc_event *evt = from_timer(evt, t, timer);
1644 	struct ibmvfc_host *vhost = evt->vhost;
1645 	dev_err(vhost->dev, "Command timed out (%p). Resetting connection\n", evt);
1646 	ibmvfc_reset_host(vhost);
1647 }
1648 
1649 /**
1650  * ibmvfc_send_event - Transforms event to u64 array and calls send_crq()
1651  * @evt:		event to be sent
1652  * @vhost:		ibmvfc host struct
1653  * @timeout:	timeout in seconds - 0 means do not time command
1654  *
1655  * Returns the value returned from ibmvfc_send_crq(). (Zero for success)
1656  **/
1657 static int ibmvfc_send_event(struct ibmvfc_event *evt,
1658 			     struct ibmvfc_host *vhost, unsigned long timeout)
1659 {
1660 	__be64 *crq_as_u64 = (__be64 *) &evt->crq;
1661 	unsigned long flags;
1662 	int rc;
1663 
1664 	/* Copy the IU into the transfer area */
1665 	*evt->xfer_iu = evt->iu;
1666 	if (evt->crq.format == IBMVFC_CMD_FORMAT)
1667 		evt->xfer_iu->cmd.tag = cpu_to_be64((u64)evt);
1668 	else if (evt->crq.format == IBMVFC_MAD_FORMAT)
1669 		evt->xfer_iu->mad_common.tag = cpu_to_be64((u64)evt);
1670 	else
1671 		BUG();
1672 
1673 	timer_setup(&evt->timer, ibmvfc_timeout, 0);
1674 
1675 	if (timeout) {
1676 		evt->timer.expires = jiffies + (timeout * HZ);
1677 		add_timer(&evt->timer);
1678 	}
1679 
1680 	spin_lock_irqsave(&evt->queue->l_lock, flags);
1681 	list_add_tail(&evt->queue_list, &evt->queue->sent);
1682 
1683 	mb();
1684 
1685 	if (evt->queue->fmt == IBMVFC_SUB_CRQ_FMT)
1686 		rc = ibmvfc_send_sub_crq(vhost,
1687 					 evt->queue->vios_cookie,
1688 					 be64_to_cpu(crq_as_u64[0]),
1689 					 be64_to_cpu(crq_as_u64[1]),
1690 					 0, 0);
1691 	else
1692 		rc = ibmvfc_send_crq(vhost, be64_to_cpu(crq_as_u64[0]),
1693 				     be64_to_cpu(crq_as_u64[1]));
1694 
1695 	if (rc) {
1696 		list_del(&evt->queue_list);
1697 		spin_unlock_irqrestore(&evt->queue->l_lock, flags);
1698 		del_timer(&evt->timer);
1699 
1700 		/* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY.
1701 		 * Firmware will send a CRQ with a transport event (0xFF) to
1702 		 * tell this client what has happened to the transport. This
1703 		 * will be handled in ibmvfc_handle_crq()
1704 		 */
1705 		if (rc == H_CLOSED) {
1706 			if (printk_ratelimit())
1707 				dev_warn(vhost->dev, "Send warning. Receive queue closed, will retry.\n");
1708 			if (evt->cmnd)
1709 				scsi_dma_unmap(evt->cmnd);
1710 			ibmvfc_free_event(evt);
1711 			return SCSI_MLQUEUE_HOST_BUSY;
1712 		}
1713 
1714 		dev_err(vhost->dev, "Send error (rc=%d)\n", rc);
1715 		if (evt->cmnd) {
1716 			evt->cmnd->result = DID_ERROR << 16;
1717 			evt->done = ibmvfc_scsi_eh_done;
1718 		} else
1719 			evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_CRQ_ERROR);
1720 
1721 		evt->done(evt);
1722 	} else {
1723 		spin_unlock_irqrestore(&evt->queue->l_lock, flags);
1724 		ibmvfc_trc_start(evt);
1725 	}
1726 
1727 	return 0;
1728 }
1729 
1730 /**
1731  * ibmvfc_log_error - Log an error for the failed command if appropriate
1732  * @evt:	ibmvfc event to log
1733  *
1734  **/
1735 static void ibmvfc_log_error(struct ibmvfc_event *evt)
1736 {
1737 	struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
1738 	struct ibmvfc_host *vhost = evt->vhost;
1739 	struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
1740 	struct scsi_cmnd *cmnd = evt->cmnd;
1741 	const char *err = unknown_error;
1742 	int index = ibmvfc_get_err_index(be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error));
1743 	int logerr = 0;
1744 	int rsp_code = 0;
1745 
1746 	if (index >= 0) {
1747 		logerr = cmd_status[index].log;
1748 		err = cmd_status[index].name;
1749 	}
1750 
1751 	if (!logerr && (vhost->log_level <= (IBMVFC_DEFAULT_LOG_LEVEL + 1)))
1752 		return;
1753 
1754 	if (rsp->flags & FCP_RSP_LEN_VALID)
1755 		rsp_code = rsp->data.info.rsp_code;
1756 
1757 	scmd_printk(KERN_ERR, cmnd, "Command (%02X) : %s (%x:%x) "
1758 		    "flags: %x fcp_rsp: %x, resid=%d, scsi_status: %x\n",
1759 		    cmnd->cmnd[0], err, be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error),
1760 		    rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status);
1761 }
1762 
1763 /**
1764  * ibmvfc_relogin - Log back into the specified device
1765  * @sdev:	scsi device struct
1766  *
1767  **/
1768 static void ibmvfc_relogin(struct scsi_device *sdev)
1769 {
1770 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
1771 	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1772 	struct ibmvfc_target *tgt;
1773 	unsigned long flags;
1774 
1775 	spin_lock_irqsave(vhost->host->host_lock, flags);
1776 	list_for_each_entry(tgt, &vhost->targets, queue) {
1777 		if (rport == tgt->rport) {
1778 			ibmvfc_del_tgt(tgt);
1779 			break;
1780 		}
1781 	}
1782 
1783 	ibmvfc_reinit_host(vhost);
1784 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
1785 }
1786 
1787 /**
1788  * ibmvfc_scsi_done - Handle responses from commands
1789  * @evt:	ibmvfc event to be handled
1790  *
1791  * Used as a callback when sending scsi cmds.
1792  **/
1793 static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
1794 {
1795 	struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
1796 	struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(evt->vhost, vfc_cmd);
1797 	struct scsi_cmnd *cmnd = evt->cmnd;
1798 	u32 rsp_len = 0;
1799 	u32 sense_len = be32_to_cpu(rsp->fcp_sense_len);
1800 
1801 	if (cmnd) {
1802 		if (be16_to_cpu(vfc_cmd->response_flags) & IBMVFC_ADAPTER_RESID_VALID)
1803 			scsi_set_resid(cmnd, be32_to_cpu(vfc_cmd->adapter_resid));
1804 		else if (rsp->flags & FCP_RESID_UNDER)
1805 			scsi_set_resid(cmnd, be32_to_cpu(rsp->fcp_resid));
1806 		else
1807 			scsi_set_resid(cmnd, 0);
1808 
1809 		if (vfc_cmd->status) {
1810 			cmnd->result = ibmvfc_get_err_result(evt->vhost, vfc_cmd);
1811 
1812 			if (rsp->flags & FCP_RSP_LEN_VALID)
1813 				rsp_len = be32_to_cpu(rsp->fcp_rsp_len);
1814 			if ((sense_len + rsp_len) > SCSI_SENSE_BUFFERSIZE)
1815 				sense_len = SCSI_SENSE_BUFFERSIZE - rsp_len;
1816 			if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8)
1817 				memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len);
1818 			if ((be16_to_cpu(vfc_cmd->status) & IBMVFC_VIOS_FAILURE) &&
1819 			    (be16_to_cpu(vfc_cmd->error) == IBMVFC_PLOGI_REQUIRED))
1820 				ibmvfc_relogin(cmnd->device);
1821 
1822 			if (!cmnd->result && (!scsi_get_resid(cmnd) || (rsp->flags & FCP_RESID_OVER)))
1823 				cmnd->result = (DID_ERROR << 16);
1824 
1825 			ibmvfc_log_error(evt);
1826 		}
1827 
1828 		if (!cmnd->result &&
1829 		    (scsi_bufflen(cmnd) - scsi_get_resid(cmnd) < cmnd->underflow))
1830 			cmnd->result = (DID_ERROR << 16);
1831 
1832 		scsi_dma_unmap(cmnd);
1833 		cmnd->scsi_done(cmnd);
1834 	}
1835 
1836 	ibmvfc_free_event(evt);
1837 }
1838 
1839 /**
1840  * ibmvfc_host_chkready - Check if the host can accept commands
1841  * @vhost:	 struct ibmvfc host
1842  *
1843  * Returns:
1844  *	1 if host can accept command / 0 if not
1845  **/
1846 static inline int ibmvfc_host_chkready(struct ibmvfc_host *vhost)
1847 {
1848 	int result = 0;
1849 
1850 	switch (vhost->state) {
1851 	case IBMVFC_LINK_DEAD:
1852 	case IBMVFC_HOST_OFFLINE:
1853 		result = DID_NO_CONNECT << 16;
1854 		break;
1855 	case IBMVFC_NO_CRQ:
1856 	case IBMVFC_INITIALIZING:
1857 	case IBMVFC_HALTED:
1858 	case IBMVFC_LINK_DOWN:
1859 		result = DID_REQUEUE << 16;
1860 		break;
1861 	case IBMVFC_ACTIVE:
1862 		result = 0;
1863 		break;
1864 	}
1865 
1866 	return result;
1867 }
1868 
1869 static struct ibmvfc_cmd *ibmvfc_init_vfc_cmd(struct ibmvfc_event *evt, struct scsi_device *sdev)
1870 {
1871 	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1872 	struct ibmvfc_host *vhost = evt->vhost;
1873 	struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd;
1874 	struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
1875 	struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
1876 	size_t offset;
1877 
1878 	memset(vfc_cmd, 0, sizeof(*vfc_cmd));
1879 	if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
1880 		offset = offsetof(struct ibmvfc_cmd, v2.rsp);
1881 		vfc_cmd->target_wwpn = cpu_to_be64(rport->port_name);
1882 	} else
1883 		offset = offsetof(struct ibmvfc_cmd, v1.rsp);
1884 	vfc_cmd->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offset);
1885 	vfc_cmd->resp.len = cpu_to_be32(sizeof(*rsp));
1886 	vfc_cmd->frame_type = cpu_to_be32(IBMVFC_SCSI_FCP_TYPE);
1887 	vfc_cmd->payload_len = cpu_to_be32(sizeof(*iu));
1888 	vfc_cmd->resp_len = cpu_to_be32(sizeof(*rsp));
1889 	vfc_cmd->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
1890 	vfc_cmd->tgt_scsi_id = cpu_to_be64(rport->port_id);
1891 	int_to_scsilun(sdev->lun, &iu->lun);
1892 
1893 	return vfc_cmd;
1894 }
1895 
1896 /**
1897  * ibmvfc_queuecommand - The queuecommand function of the scsi template
1898  * @shost:	scsi host struct
1899  * @cmnd:	struct scsi_cmnd to be executed
1900  *
1901  * Returns:
1902  *	0 on success / other on failure
1903  **/
1904 static int ibmvfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
1905 {
1906 	struct ibmvfc_host *vhost = shost_priv(shost);
1907 	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
1908 	struct ibmvfc_cmd *vfc_cmd;
1909 	struct ibmvfc_fcp_cmd_iu *iu;
1910 	struct ibmvfc_event *evt;
1911 	u32 tag_and_hwq = blk_mq_unique_tag(cmnd->request);
1912 	u16 hwq = blk_mq_unique_tag_to_hwq(tag_and_hwq);
1913 	u16 scsi_channel;
1914 	int rc;
1915 
1916 	if (unlikely((rc = fc_remote_port_chkready(rport))) ||
1917 	    unlikely((rc = ibmvfc_host_chkready(vhost)))) {
1918 		cmnd->result = rc;
1919 		cmnd->scsi_done(cmnd);
1920 		return 0;
1921 	}
1922 
1923 	cmnd->result = (DID_OK << 16);
1924 	if (vhost->using_channels) {
1925 		scsi_channel = hwq % vhost->scsi_scrqs.active_queues;
1926 		evt = ibmvfc_get_event(&vhost->scsi_scrqs.scrqs[scsi_channel]);
1927 		evt->hwq = hwq % vhost->scsi_scrqs.active_queues;
1928 	} else
1929 		evt = ibmvfc_get_event(&vhost->crq);
1930 
1931 	ibmvfc_init_event(evt, ibmvfc_scsi_done, IBMVFC_CMD_FORMAT);
1932 	evt->cmnd = cmnd;
1933 
1934 	vfc_cmd = ibmvfc_init_vfc_cmd(evt, cmnd->device);
1935 	iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
1936 
1937 	iu->xfer_len = cpu_to_be32(scsi_bufflen(cmnd));
1938 	memcpy(iu->cdb, cmnd->cmnd, cmnd->cmd_len);
1939 
1940 	if (cmnd->flags & SCMD_TAGGED) {
1941 		vfc_cmd->task_tag = cpu_to_be64(cmnd->tag);
1942 		iu->pri_task_attr = IBMVFC_SIMPLE_TASK;
1943 	}
1944 
1945 	vfc_cmd->correlation = cpu_to_be64((u64)evt);
1946 
1947 	if (likely(!(rc = ibmvfc_map_sg_data(cmnd, evt, vfc_cmd, vhost->dev))))
1948 		return ibmvfc_send_event(evt, vhost, 0);
1949 
1950 	ibmvfc_free_event(evt);
1951 	if (rc == -ENOMEM)
1952 		return SCSI_MLQUEUE_HOST_BUSY;
1953 
1954 	if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1955 		scmd_printk(KERN_ERR, cmnd,
1956 			    "Failed to map DMA buffer for command. rc=%d\n", rc);
1957 
1958 	cmnd->result = DID_ERROR << 16;
1959 	cmnd->scsi_done(cmnd);
1960 	return 0;
1961 }
1962 
1963 /**
1964  * ibmvfc_sync_completion - Signal that a synchronous command has completed
1965  * @evt:	ibmvfc event struct
1966  *
1967  **/
1968 static void ibmvfc_sync_completion(struct ibmvfc_event *evt)
1969 {
1970 	/* copy the response back */
1971 	if (evt->sync_iu)
1972 		*evt->sync_iu = *evt->xfer_iu;
1973 
1974 	complete(&evt->comp);
1975 }
1976 
1977 /**
1978  * ibmvfc_bsg_timeout_done - Completion handler for cancelling BSG commands
1979  * @evt:	struct ibmvfc_event
1980  *
1981  **/
1982 static void ibmvfc_bsg_timeout_done(struct ibmvfc_event *evt)
1983 {
1984 	struct ibmvfc_host *vhost = evt->vhost;
1985 
1986 	ibmvfc_free_event(evt);
1987 	vhost->aborting_passthru = 0;
1988 	dev_info(vhost->dev, "Passthru command cancelled\n");
1989 }
1990 
1991 /**
1992  * ibmvfc_bsg_timeout - Handle a BSG timeout
1993  * @job:	struct bsg_job that timed out
1994  *
1995  * Returns:
1996  *	0 on success / other on failure
1997  **/
1998 static int ibmvfc_bsg_timeout(struct bsg_job *job)
1999 {
2000 	struct ibmvfc_host *vhost = shost_priv(fc_bsg_to_shost(job));
2001 	unsigned long port_id = (unsigned long)job->dd_data;
2002 	struct ibmvfc_event *evt;
2003 	struct ibmvfc_tmf *tmf;
2004 	unsigned long flags;
2005 	int rc;
2006 
2007 	ENTER;
2008 	spin_lock_irqsave(vhost->host->host_lock, flags);
2009 	if (vhost->aborting_passthru || vhost->state != IBMVFC_ACTIVE) {
2010 		__ibmvfc_reset_host(vhost);
2011 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
2012 		return 0;
2013 	}
2014 
2015 	vhost->aborting_passthru = 1;
2016 	evt = ibmvfc_get_event(&vhost->crq);
2017 	ibmvfc_init_event(evt, ibmvfc_bsg_timeout_done, IBMVFC_MAD_FORMAT);
2018 
2019 	tmf = &evt->iu.tmf;
2020 	memset(tmf, 0, sizeof(*tmf));
2021 	tmf->common.version = cpu_to_be32(1);
2022 	tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
2023 	tmf->common.length = cpu_to_be16(sizeof(*tmf));
2024 	tmf->scsi_id = cpu_to_be64(port_id);
2025 	tmf->cancel_key = cpu_to_be32(IBMVFC_PASSTHRU_CANCEL_KEY);
2026 	tmf->my_cancel_key = cpu_to_be32(IBMVFC_INTERNAL_CANCEL_KEY);
2027 	rc = ibmvfc_send_event(evt, vhost, default_timeout);
2028 
2029 	if (rc != 0) {
2030 		vhost->aborting_passthru = 0;
2031 		dev_err(vhost->dev, "Failed to send cancel event. rc=%d\n", rc);
2032 		rc = -EIO;
2033 	} else
2034 		dev_info(vhost->dev, "Cancelling passthru command to port id 0x%lx\n",
2035 			 port_id);
2036 
2037 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2038 
2039 	LEAVE;
2040 	return rc;
2041 }
2042 
2043 /**
2044  * ibmvfc_bsg_plogi - PLOGI into a target to handle a BSG command
2045  * @vhost:		struct ibmvfc_host to send command
2046  * @port_id:	port ID to send command
2047  *
2048  * Returns:
2049  *	0 on success / other on failure
2050  **/
2051 static int ibmvfc_bsg_plogi(struct ibmvfc_host *vhost, unsigned int port_id)
2052 {
2053 	struct ibmvfc_port_login *plogi;
2054 	struct ibmvfc_target *tgt;
2055 	struct ibmvfc_event *evt;
2056 	union ibmvfc_iu rsp_iu;
2057 	unsigned long flags;
2058 	int rc = 0, issue_login = 1;
2059 
2060 	ENTER;
2061 	spin_lock_irqsave(vhost->host->host_lock, flags);
2062 	list_for_each_entry(tgt, &vhost->targets, queue) {
2063 		if (tgt->scsi_id == port_id) {
2064 			issue_login = 0;
2065 			break;
2066 		}
2067 	}
2068 
2069 	if (!issue_login)
2070 		goto unlock_out;
2071 	if (unlikely((rc = ibmvfc_host_chkready(vhost))))
2072 		goto unlock_out;
2073 
2074 	evt = ibmvfc_get_event(&vhost->crq);
2075 	ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
2076 	plogi = &evt->iu.plogi;
2077 	memset(plogi, 0, sizeof(*plogi));
2078 	plogi->common.version = cpu_to_be32(1);
2079 	plogi->common.opcode = cpu_to_be32(IBMVFC_PORT_LOGIN);
2080 	plogi->common.length = cpu_to_be16(sizeof(*plogi));
2081 	plogi->scsi_id = cpu_to_be64(port_id);
2082 	evt->sync_iu = &rsp_iu;
2083 	init_completion(&evt->comp);
2084 
2085 	rc = ibmvfc_send_event(evt, vhost, default_timeout);
2086 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2087 
2088 	if (rc)
2089 		return -EIO;
2090 
2091 	wait_for_completion(&evt->comp);
2092 
2093 	if (rsp_iu.plogi.common.status)
2094 		rc = -EIO;
2095 
2096 	spin_lock_irqsave(vhost->host->host_lock, flags);
2097 	ibmvfc_free_event(evt);
2098 unlock_out:
2099 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2100 	LEAVE;
2101 	return rc;
2102 }
2103 
2104 /**
2105  * ibmvfc_bsg_request - Handle a BSG request
2106  * @job:	struct bsg_job to be executed
2107  *
2108  * Returns:
2109  *	0 on success / other on failure
2110  **/
2111 static int ibmvfc_bsg_request(struct bsg_job *job)
2112 {
2113 	struct ibmvfc_host *vhost = shost_priv(fc_bsg_to_shost(job));
2114 	struct fc_rport *rport = fc_bsg_to_rport(job);
2115 	struct ibmvfc_passthru_mad *mad;
2116 	struct ibmvfc_event *evt;
2117 	union ibmvfc_iu rsp_iu;
2118 	unsigned long flags, port_id = -1;
2119 	struct fc_bsg_request *bsg_request = job->request;
2120 	struct fc_bsg_reply *bsg_reply = job->reply;
2121 	unsigned int code = bsg_request->msgcode;
2122 	int rc = 0, req_seg, rsp_seg, issue_login = 0;
2123 	u32 fc_flags, rsp_len;
2124 
2125 	ENTER;
2126 	bsg_reply->reply_payload_rcv_len = 0;
2127 	if (rport)
2128 		port_id = rport->port_id;
2129 
2130 	switch (code) {
2131 	case FC_BSG_HST_ELS_NOLOGIN:
2132 		port_id = (bsg_request->rqst_data.h_els.port_id[0] << 16) |
2133 			(bsg_request->rqst_data.h_els.port_id[1] << 8) |
2134 			bsg_request->rqst_data.h_els.port_id[2];
2135 		fallthrough;
2136 	case FC_BSG_RPT_ELS:
2137 		fc_flags = IBMVFC_FC_ELS;
2138 		break;
2139 	case FC_BSG_HST_CT:
2140 		issue_login = 1;
2141 		port_id = (bsg_request->rqst_data.h_ct.port_id[0] << 16) |
2142 			(bsg_request->rqst_data.h_ct.port_id[1] << 8) |
2143 			bsg_request->rqst_data.h_ct.port_id[2];
2144 		fallthrough;
2145 	case FC_BSG_RPT_CT:
2146 		fc_flags = IBMVFC_FC_CT_IU;
2147 		break;
2148 	default:
2149 		return -ENOTSUPP;
2150 	}
2151 
2152 	if (port_id == -1)
2153 		return -EINVAL;
2154 	if (!mutex_trylock(&vhost->passthru_mutex))
2155 		return -EBUSY;
2156 
2157 	job->dd_data = (void *)port_id;
2158 	req_seg = dma_map_sg(vhost->dev, job->request_payload.sg_list,
2159 			     job->request_payload.sg_cnt, DMA_TO_DEVICE);
2160 
2161 	if (!req_seg) {
2162 		mutex_unlock(&vhost->passthru_mutex);
2163 		return -ENOMEM;
2164 	}
2165 
2166 	rsp_seg = dma_map_sg(vhost->dev, job->reply_payload.sg_list,
2167 			     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2168 
2169 	if (!rsp_seg) {
2170 		dma_unmap_sg(vhost->dev, job->request_payload.sg_list,
2171 			     job->request_payload.sg_cnt, DMA_TO_DEVICE);
2172 		mutex_unlock(&vhost->passthru_mutex);
2173 		return -ENOMEM;
2174 	}
2175 
2176 	if (req_seg > 1 || rsp_seg > 1) {
2177 		rc = -EINVAL;
2178 		goto out;
2179 	}
2180 
2181 	if (issue_login)
2182 		rc = ibmvfc_bsg_plogi(vhost, port_id);
2183 
2184 	spin_lock_irqsave(vhost->host->host_lock, flags);
2185 
2186 	if (unlikely(rc || (rport && (rc = fc_remote_port_chkready(rport)))) ||
2187 	    unlikely((rc = ibmvfc_host_chkready(vhost)))) {
2188 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
2189 		goto out;
2190 	}
2191 
2192 	evt = ibmvfc_get_event(&vhost->crq);
2193 	ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
2194 	mad = &evt->iu.passthru;
2195 
2196 	memset(mad, 0, sizeof(*mad));
2197 	mad->common.version = cpu_to_be32(1);
2198 	mad->common.opcode = cpu_to_be32(IBMVFC_PASSTHRU);
2199 	mad->common.length = cpu_to_be16(sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu));
2200 
2201 	mad->cmd_ioba.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) +
2202 		offsetof(struct ibmvfc_passthru_mad, iu));
2203 	mad->cmd_ioba.len = cpu_to_be32(sizeof(mad->iu));
2204 
2205 	mad->iu.cmd_len = cpu_to_be32(job->request_payload.payload_len);
2206 	mad->iu.rsp_len = cpu_to_be32(job->reply_payload.payload_len);
2207 	mad->iu.flags = cpu_to_be32(fc_flags);
2208 	mad->iu.cancel_key = cpu_to_be32(IBMVFC_PASSTHRU_CANCEL_KEY);
2209 
2210 	mad->iu.cmd.va = cpu_to_be64(sg_dma_address(job->request_payload.sg_list));
2211 	mad->iu.cmd.len = cpu_to_be32(sg_dma_len(job->request_payload.sg_list));
2212 	mad->iu.rsp.va = cpu_to_be64(sg_dma_address(job->reply_payload.sg_list));
2213 	mad->iu.rsp.len = cpu_to_be32(sg_dma_len(job->reply_payload.sg_list));
2214 	mad->iu.scsi_id = cpu_to_be64(port_id);
2215 	mad->iu.tag = cpu_to_be64((u64)evt);
2216 	rsp_len = be32_to_cpu(mad->iu.rsp.len);
2217 
2218 	evt->sync_iu = &rsp_iu;
2219 	init_completion(&evt->comp);
2220 	rc = ibmvfc_send_event(evt, vhost, 0);
2221 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2222 
2223 	if (rc) {
2224 		rc = -EIO;
2225 		goto out;
2226 	}
2227 
2228 	wait_for_completion(&evt->comp);
2229 
2230 	if (rsp_iu.passthru.common.status)
2231 		rc = -EIO;
2232 	else
2233 		bsg_reply->reply_payload_rcv_len = rsp_len;
2234 
2235 	spin_lock_irqsave(vhost->host->host_lock, flags);
2236 	ibmvfc_free_event(evt);
2237 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2238 	bsg_reply->result = rc;
2239 	bsg_job_done(job, bsg_reply->result,
2240 		       bsg_reply->reply_payload_rcv_len);
2241 	rc = 0;
2242 out:
2243 	dma_unmap_sg(vhost->dev, job->request_payload.sg_list,
2244 		     job->request_payload.sg_cnt, DMA_TO_DEVICE);
2245 	dma_unmap_sg(vhost->dev, job->reply_payload.sg_list,
2246 		     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2247 	mutex_unlock(&vhost->passthru_mutex);
2248 	LEAVE;
2249 	return rc;
2250 }
2251 
2252 /**
2253  * ibmvfc_reset_device - Reset the device with the specified reset type
2254  * @sdev:	scsi device to reset
2255  * @type:	reset type
2256  * @desc:	reset type description for log messages
2257  *
2258  * Returns:
2259  *	0 on success / other on failure
2260  **/
2261 static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
2262 {
2263 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2264 	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2265 	struct ibmvfc_cmd *tmf;
2266 	struct ibmvfc_event *evt = NULL;
2267 	union ibmvfc_iu rsp_iu;
2268 	struct ibmvfc_fcp_cmd_iu *iu;
2269 	struct ibmvfc_fcp_rsp *fc_rsp = ibmvfc_get_fcp_rsp(vhost, &rsp_iu.cmd);
2270 	int rsp_rc = -EBUSY;
2271 	unsigned long flags;
2272 	int rsp_code = 0;
2273 
2274 	spin_lock_irqsave(vhost->host->host_lock, flags);
2275 	if (vhost->state == IBMVFC_ACTIVE) {
2276 		if (vhost->using_channels)
2277 			evt = ibmvfc_get_event(&vhost->scsi_scrqs.scrqs[0]);
2278 		else
2279 			evt = ibmvfc_get_event(&vhost->crq);
2280 
2281 		ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
2282 		tmf = ibmvfc_init_vfc_cmd(evt, sdev);
2283 		iu = ibmvfc_get_fcp_iu(vhost, tmf);
2284 
2285 		tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF));
2286 		if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
2287 			tmf->target_wwpn = cpu_to_be64(rport->port_name);
2288 		iu->tmf_flags = type;
2289 		evt->sync_iu = &rsp_iu;
2290 
2291 		init_completion(&evt->comp);
2292 		rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
2293 	}
2294 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2295 
2296 	if (rsp_rc != 0) {
2297 		sdev_printk(KERN_ERR, sdev, "Failed to send %s reset event. rc=%d\n",
2298 			    desc, rsp_rc);
2299 		return -EIO;
2300 	}
2301 
2302 	sdev_printk(KERN_INFO, sdev, "Resetting %s\n", desc);
2303 	wait_for_completion(&evt->comp);
2304 
2305 	if (rsp_iu.cmd.status)
2306 		rsp_code = ibmvfc_get_err_result(vhost, &rsp_iu.cmd);
2307 
2308 	if (rsp_code) {
2309 		if (fc_rsp->flags & FCP_RSP_LEN_VALID)
2310 			rsp_code = fc_rsp->data.info.rsp_code;
2311 
2312 		sdev_printk(KERN_ERR, sdev, "%s reset failed: %s (%x:%x) "
2313 			    "flags: %x fcp_rsp: %x, scsi_status: %x\n", desc,
2314 			    ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
2315 			    be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
2316 			    fc_rsp->scsi_status);
2317 		rsp_rc = -EIO;
2318 	} else
2319 		sdev_printk(KERN_INFO, sdev, "%s reset successful\n", desc);
2320 
2321 	spin_lock_irqsave(vhost->host->host_lock, flags);
2322 	ibmvfc_free_event(evt);
2323 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2324 	return rsp_rc;
2325 }
2326 
2327 /**
2328  * ibmvfc_match_rport - Match function for specified remote port
2329  * @evt:	ibmvfc event struct
2330  * @rport:	device to match
2331  *
2332  * Returns:
2333  *	1 if event matches rport / 0 if event does not match rport
2334  **/
2335 static int ibmvfc_match_rport(struct ibmvfc_event *evt, void *rport)
2336 {
2337 	struct fc_rport *cmd_rport;
2338 
2339 	if (evt->cmnd) {
2340 		cmd_rport = starget_to_rport(scsi_target(evt->cmnd->device));
2341 		if (cmd_rport == rport)
2342 			return 1;
2343 	}
2344 	return 0;
2345 }
2346 
2347 /**
2348  * ibmvfc_match_target - Match function for specified target
2349  * @evt:	ibmvfc event struct
2350  * @device:	device to match (starget)
2351  *
2352  * Returns:
2353  *	1 if event matches starget / 0 if event does not match starget
2354  **/
2355 static int ibmvfc_match_target(struct ibmvfc_event *evt, void *device)
2356 {
2357 	if (evt->cmnd && scsi_target(evt->cmnd->device) == device)
2358 		return 1;
2359 	return 0;
2360 }
2361 
2362 /**
2363  * ibmvfc_match_lun - Match function for specified LUN
2364  * @evt:	ibmvfc event struct
2365  * @device:	device to match (sdev)
2366  *
2367  * Returns:
2368  *	1 if event matches sdev / 0 if event does not match sdev
2369  **/
2370 static int ibmvfc_match_lun(struct ibmvfc_event *evt, void *device)
2371 {
2372 	if (evt->cmnd && evt->cmnd->device == device)
2373 		return 1;
2374 	return 0;
2375 }
2376 
2377 /**
2378  * ibmvfc_event_is_free - Check if event is free or not
2379  * @evt:	ibmvfc event struct
2380  *
2381  * Returns:
2382  *	true / false
2383  **/
2384 static bool ibmvfc_event_is_free(struct ibmvfc_event *evt)
2385 {
2386 	struct ibmvfc_event *loop_evt;
2387 
2388 	list_for_each_entry(loop_evt, &evt->queue->free, queue_list)
2389 		if (loop_evt == evt)
2390 			return true;
2391 
2392 	return false;
2393 }
2394 
2395 /**
2396  * ibmvfc_wait_for_ops - Wait for ops to complete
2397  * @vhost:	ibmvfc host struct
2398  * @device:	device to match (starget or sdev)
2399  * @match:	match function
2400  *
2401  * Returns:
2402  *	SUCCESS / FAILED
2403  **/
2404 static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device,
2405 			       int (*match) (struct ibmvfc_event *, void *))
2406 {
2407 	struct ibmvfc_event *evt;
2408 	DECLARE_COMPLETION_ONSTACK(comp);
2409 	int wait, i, q_index, q_size;
2410 	unsigned long flags;
2411 	signed long timeout = IBMVFC_ABORT_WAIT_TIMEOUT * HZ;
2412 	struct ibmvfc_queue *queues;
2413 
2414 	ENTER;
2415 	if (vhost->mq_enabled && vhost->using_channels) {
2416 		queues = vhost->scsi_scrqs.scrqs;
2417 		q_size = vhost->scsi_scrqs.active_queues;
2418 	} else {
2419 		queues = &vhost->crq;
2420 		q_size = 1;
2421 	}
2422 
2423 	do {
2424 		wait = 0;
2425 		spin_lock_irqsave(vhost->host->host_lock, flags);
2426 		for (q_index = 0; q_index < q_size; q_index++) {
2427 			spin_lock(&queues[q_index].l_lock);
2428 			for (i = 0; i < queues[q_index].evt_pool.size; i++) {
2429 				evt = &queues[q_index].evt_pool.events[i];
2430 				if (!ibmvfc_event_is_free(evt)) {
2431 					if (match(evt, device)) {
2432 						evt->eh_comp = &comp;
2433 						wait++;
2434 					}
2435 				}
2436 			}
2437 			spin_unlock(&queues[q_index].l_lock);
2438 		}
2439 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
2440 
2441 		if (wait) {
2442 			timeout = wait_for_completion_timeout(&comp, timeout);
2443 
2444 			if (!timeout) {
2445 				wait = 0;
2446 				spin_lock_irqsave(vhost->host->host_lock, flags);
2447 				for (q_index = 0; q_index < q_size; q_index++) {
2448 					spin_lock(&queues[q_index].l_lock);
2449 					for (i = 0; i < queues[q_index].evt_pool.size; i++) {
2450 						evt = &queues[q_index].evt_pool.events[i];
2451 						if (!ibmvfc_event_is_free(evt)) {
2452 							if (match(evt, device)) {
2453 								evt->eh_comp = NULL;
2454 								wait++;
2455 							}
2456 						}
2457 					}
2458 					spin_unlock(&queues[q_index].l_lock);
2459 				}
2460 				spin_unlock_irqrestore(vhost->host->host_lock, flags);
2461 				if (wait)
2462 					dev_err(vhost->dev, "Timed out waiting for aborted commands\n");
2463 				LEAVE;
2464 				return wait ? FAILED : SUCCESS;
2465 			}
2466 		}
2467 	} while (wait);
2468 
2469 	LEAVE;
2470 	return SUCCESS;
2471 }
2472 
2473 static struct ibmvfc_event *ibmvfc_init_tmf(struct ibmvfc_queue *queue,
2474 					    struct scsi_device *sdev,
2475 					    int type)
2476 {
2477 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2478 	struct scsi_target *starget = scsi_target(sdev);
2479 	struct fc_rport *rport = starget_to_rport(starget);
2480 	struct ibmvfc_event *evt;
2481 	struct ibmvfc_tmf *tmf;
2482 
2483 	evt = ibmvfc_get_event(queue);
2484 	ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
2485 
2486 	tmf = &evt->iu.tmf;
2487 	memset(tmf, 0, sizeof(*tmf));
2488 	if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
2489 		tmf->common.version = cpu_to_be32(2);
2490 		tmf->target_wwpn = cpu_to_be64(rport->port_name);
2491 	} else {
2492 		tmf->common.version = cpu_to_be32(1);
2493 	}
2494 	tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
2495 	tmf->common.length = cpu_to_be16(sizeof(*tmf));
2496 	tmf->scsi_id = cpu_to_be64(rport->port_id);
2497 	int_to_scsilun(sdev->lun, &tmf->lun);
2498 	if (!ibmvfc_check_caps(vhost, IBMVFC_CAN_SUPPRESS_ABTS))
2499 		type &= ~IBMVFC_TMF_SUPPRESS_ABTS;
2500 	if (vhost->state == IBMVFC_ACTIVE)
2501 		tmf->flags = cpu_to_be32((type | IBMVFC_TMF_LUA_VALID));
2502 	else
2503 		tmf->flags = cpu_to_be32(((type & IBMVFC_TMF_SUPPRESS_ABTS) | IBMVFC_TMF_LUA_VALID));
2504 	tmf->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
2505 	tmf->my_cancel_key = cpu_to_be32((unsigned long)starget->hostdata);
2506 
2507 	init_completion(&evt->comp);
2508 
2509 	return evt;
2510 }
2511 
2512 static int ibmvfc_cancel_all_mq(struct scsi_device *sdev, int type)
2513 {
2514 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2515 	struct ibmvfc_event *evt, *found_evt, *temp;
2516 	struct ibmvfc_queue *queues = vhost->scsi_scrqs.scrqs;
2517 	unsigned long flags;
2518 	int num_hwq, i;
2519 	int fail = 0;
2520 	LIST_HEAD(cancelq);
2521 	u16 status;
2522 
2523 	ENTER;
2524 	spin_lock_irqsave(vhost->host->host_lock, flags);
2525 	num_hwq = vhost->scsi_scrqs.active_queues;
2526 	for (i = 0; i < num_hwq; i++) {
2527 		spin_lock(queues[i].q_lock);
2528 		spin_lock(&queues[i].l_lock);
2529 		found_evt = NULL;
2530 		list_for_each_entry(evt, &queues[i].sent, queue_list) {
2531 			if (evt->cmnd && evt->cmnd->device == sdev) {
2532 				found_evt = evt;
2533 				break;
2534 			}
2535 		}
2536 		spin_unlock(&queues[i].l_lock);
2537 
2538 		if (found_evt && vhost->logged_in) {
2539 			evt = ibmvfc_init_tmf(&queues[i], sdev, type);
2540 			evt->sync_iu = &queues[i].cancel_rsp;
2541 			ibmvfc_send_event(evt, vhost, default_timeout);
2542 			list_add_tail(&evt->cancel, &cancelq);
2543 		}
2544 
2545 		spin_unlock(queues[i].q_lock);
2546 	}
2547 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2548 
2549 	if (list_empty(&cancelq)) {
2550 		if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
2551 			sdev_printk(KERN_INFO, sdev, "No events found to cancel\n");
2552 		return 0;
2553 	}
2554 
2555 	sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
2556 
2557 	list_for_each_entry_safe(evt, temp, &cancelq, cancel) {
2558 		wait_for_completion(&evt->comp);
2559 		status = be16_to_cpu(evt->queue->cancel_rsp.mad_common.status);
2560 		list_del(&evt->cancel);
2561 		ibmvfc_free_event(evt);
2562 
2563 		if (status != IBMVFC_MAD_SUCCESS) {
2564 			sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
2565 			switch (status) {
2566 			case IBMVFC_MAD_DRIVER_FAILED:
2567 			case IBMVFC_MAD_CRQ_ERROR:
2568 			/* Host adapter most likely going through reset, return success to
2569 			 * the caller will wait for the command being cancelled to get returned
2570 			 */
2571 				break;
2572 			default:
2573 				fail = 1;
2574 				break;
2575 			}
2576 		}
2577 	}
2578 
2579 	if (fail)
2580 		return -EIO;
2581 
2582 	sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
2583 	LEAVE;
2584 	return 0;
2585 }
2586 
2587 static int ibmvfc_cancel_all_sq(struct scsi_device *sdev, int type)
2588 {
2589 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2590 	struct ibmvfc_event *evt, *found_evt;
2591 	union ibmvfc_iu rsp;
2592 	int rsp_rc = -EBUSY;
2593 	unsigned long flags;
2594 	u16 status;
2595 
2596 	ENTER;
2597 	found_evt = NULL;
2598 	spin_lock_irqsave(vhost->host->host_lock, flags);
2599 	spin_lock(&vhost->crq.l_lock);
2600 	list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
2601 		if (evt->cmnd && evt->cmnd->device == sdev) {
2602 			found_evt = evt;
2603 			break;
2604 		}
2605 	}
2606 	spin_unlock(&vhost->crq.l_lock);
2607 
2608 	if (!found_evt) {
2609 		if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
2610 			sdev_printk(KERN_INFO, sdev, "No events found to cancel\n");
2611 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
2612 		return 0;
2613 	}
2614 
2615 	if (vhost->logged_in) {
2616 		evt = ibmvfc_init_tmf(&vhost->crq, sdev, type);
2617 		evt->sync_iu = &rsp;
2618 		rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
2619 	}
2620 
2621 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2622 
2623 	if (rsp_rc != 0) {
2624 		sdev_printk(KERN_ERR, sdev, "Failed to send cancel event. rc=%d\n", rsp_rc);
2625 		/* If failure is received, the host adapter is most likely going
2626 		 through reset, return success so the caller will wait for the command
2627 		 being cancelled to get returned */
2628 		return 0;
2629 	}
2630 
2631 	sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
2632 
2633 	wait_for_completion(&evt->comp);
2634 	status = be16_to_cpu(rsp.mad_common.status);
2635 	spin_lock_irqsave(vhost->host->host_lock, flags);
2636 	ibmvfc_free_event(evt);
2637 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2638 
2639 	if (status != IBMVFC_MAD_SUCCESS) {
2640 		sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
2641 		switch (status) {
2642 		case IBMVFC_MAD_DRIVER_FAILED:
2643 		case IBMVFC_MAD_CRQ_ERROR:
2644 			/* Host adapter most likely going through reset, return success to
2645 			 the caller will wait for the command being cancelled to get returned */
2646 			return 0;
2647 		default:
2648 			return -EIO;
2649 		};
2650 	}
2651 
2652 	sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
2653 	return 0;
2654 }
2655 
2656 /**
2657  * ibmvfc_cancel_all - Cancel all outstanding commands to the device
2658  * @sdev:	scsi device to cancel commands
2659  * @type:	type of error recovery being performed
2660  *
2661  * This sends a cancel to the VIOS for the specified device. This does
2662  * NOT send any abort to the actual device. That must be done separately.
2663  *
2664  * Returns:
2665  *	0 on success / other on failure
2666  **/
2667 static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
2668 {
2669 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2670 
2671 	if (vhost->mq_enabled && vhost->using_channels)
2672 		return ibmvfc_cancel_all_mq(sdev, type);
2673 	else
2674 		return ibmvfc_cancel_all_sq(sdev, type);
2675 }
2676 
2677 /**
2678  * ibmvfc_match_key - Match function for specified cancel key
2679  * @evt:	ibmvfc event struct
2680  * @key:	cancel key to match
2681  *
2682  * Returns:
2683  *	1 if event matches key / 0 if event does not match key
2684  **/
2685 static int ibmvfc_match_key(struct ibmvfc_event *evt, void *key)
2686 {
2687 	unsigned long cancel_key = (unsigned long)key;
2688 
2689 	if (evt->crq.format == IBMVFC_CMD_FORMAT &&
2690 	    be32_to_cpu(evt->iu.cmd.cancel_key) == cancel_key)
2691 		return 1;
2692 	return 0;
2693 }
2694 
2695 /**
2696  * ibmvfc_match_evt - Match function for specified event
2697  * @evt:	ibmvfc event struct
2698  * @match:	event to match
2699  *
2700  * Returns:
2701  *	1 if event matches key / 0 if event does not match key
2702  **/
2703 static int ibmvfc_match_evt(struct ibmvfc_event *evt, void *match)
2704 {
2705 	if (evt == match)
2706 		return 1;
2707 	return 0;
2708 }
2709 
2710 /**
2711  * ibmvfc_abort_task_set - Abort outstanding commands to the device
2712  * @sdev:	scsi device to abort commands
2713  *
2714  * This sends an Abort Task Set to the VIOS for the specified device. This does
2715  * NOT send any cancel to the VIOS. That must be done separately.
2716  *
2717  * Returns:
2718  *	0 on success / other on failure
2719  **/
2720 static int ibmvfc_abort_task_set(struct scsi_device *sdev)
2721 {
2722 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2723 	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2724 	struct ibmvfc_cmd *tmf;
2725 	struct ibmvfc_event *evt, *found_evt;
2726 	union ibmvfc_iu rsp_iu;
2727 	struct ibmvfc_fcp_cmd_iu *iu;
2728 	struct ibmvfc_fcp_rsp *fc_rsp = ibmvfc_get_fcp_rsp(vhost, &rsp_iu.cmd);
2729 	int rc, rsp_rc = -EBUSY;
2730 	unsigned long flags, timeout = IBMVFC_ABORT_TIMEOUT;
2731 	int rsp_code = 0;
2732 
2733 	found_evt = NULL;
2734 	spin_lock_irqsave(vhost->host->host_lock, flags);
2735 	spin_lock(&vhost->crq.l_lock);
2736 	list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
2737 		if (evt->cmnd && evt->cmnd->device == sdev) {
2738 			found_evt = evt;
2739 			break;
2740 		}
2741 	}
2742 	spin_unlock(&vhost->crq.l_lock);
2743 
2744 	if (!found_evt) {
2745 		if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
2746 			sdev_printk(KERN_INFO, sdev, "No events found to abort\n");
2747 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
2748 		return 0;
2749 	}
2750 
2751 	if (vhost->state == IBMVFC_ACTIVE) {
2752 		evt = ibmvfc_get_event(&vhost->crq);
2753 		ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
2754 		tmf = ibmvfc_init_vfc_cmd(evt, sdev);
2755 		iu = ibmvfc_get_fcp_iu(vhost, tmf);
2756 
2757 		if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
2758 			tmf->target_wwpn = cpu_to_be64(rport->port_name);
2759 		iu->tmf_flags = IBMVFC_ABORT_TASK_SET;
2760 		tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF));
2761 		evt->sync_iu = &rsp_iu;
2762 
2763 		tmf->correlation = cpu_to_be64((u64)evt);
2764 
2765 		init_completion(&evt->comp);
2766 		rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
2767 	}
2768 
2769 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2770 
2771 	if (rsp_rc != 0) {
2772 		sdev_printk(KERN_ERR, sdev, "Failed to send abort. rc=%d\n", rsp_rc);
2773 		return -EIO;
2774 	}
2775 
2776 	sdev_printk(KERN_INFO, sdev, "Aborting outstanding commands\n");
2777 	timeout = wait_for_completion_timeout(&evt->comp, timeout);
2778 
2779 	if (!timeout) {
2780 		rc = ibmvfc_cancel_all(sdev, 0);
2781 		if (!rc) {
2782 			rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key);
2783 			if (rc == SUCCESS)
2784 				rc = 0;
2785 		}
2786 
2787 		if (rc) {
2788 			sdev_printk(KERN_INFO, sdev, "Cancel failed, resetting host\n");
2789 			ibmvfc_reset_host(vhost);
2790 			rsp_rc = -EIO;
2791 			rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key);
2792 
2793 			if (rc == SUCCESS)
2794 				rsp_rc = 0;
2795 
2796 			rc = ibmvfc_wait_for_ops(vhost, evt, ibmvfc_match_evt);
2797 			if (rc != SUCCESS) {
2798 				spin_lock_irqsave(vhost->host->host_lock, flags);
2799 				ibmvfc_hard_reset_host(vhost);
2800 				spin_unlock_irqrestore(vhost->host->host_lock, flags);
2801 				rsp_rc = 0;
2802 			}
2803 
2804 			goto out;
2805 		}
2806 	}
2807 
2808 	if (rsp_iu.cmd.status)
2809 		rsp_code = ibmvfc_get_err_result(vhost, &rsp_iu.cmd);
2810 
2811 	if (rsp_code) {
2812 		if (fc_rsp->flags & FCP_RSP_LEN_VALID)
2813 			rsp_code = fc_rsp->data.info.rsp_code;
2814 
2815 		sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) "
2816 			    "flags: %x fcp_rsp: %x, scsi_status: %x\n",
2817 			    ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
2818 			    be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
2819 			    fc_rsp->scsi_status);
2820 		rsp_rc = -EIO;
2821 	} else
2822 		sdev_printk(KERN_INFO, sdev, "Abort successful\n");
2823 
2824 out:
2825 	spin_lock_irqsave(vhost->host->host_lock, flags);
2826 	ibmvfc_free_event(evt);
2827 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2828 	return rsp_rc;
2829 }
2830 
2831 /**
2832  * ibmvfc_eh_abort_handler - Abort a command
2833  * @cmd:	scsi command to abort
2834  *
2835  * Returns:
2836  *	SUCCESS / FAST_IO_FAIL / FAILED
2837  **/
2838 static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
2839 {
2840 	struct scsi_device *sdev = cmd->device;
2841 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2842 	int cancel_rc, block_rc;
2843 	int rc = FAILED;
2844 
2845 	ENTER;
2846 	block_rc = fc_block_scsi_eh(cmd);
2847 	ibmvfc_wait_while_resetting(vhost);
2848 	if (block_rc != FAST_IO_FAIL) {
2849 		cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
2850 		ibmvfc_abort_task_set(sdev);
2851 	} else
2852 		cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2853 
2854 	if (!cancel_rc)
2855 		rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
2856 
2857 	if (block_rc == FAST_IO_FAIL && rc != FAILED)
2858 		rc = FAST_IO_FAIL;
2859 
2860 	LEAVE;
2861 	return rc;
2862 }
2863 
2864 /**
2865  * ibmvfc_eh_device_reset_handler - Reset a single LUN
2866  * @cmd:	scsi command struct
2867  *
2868  * Returns:
2869  *	SUCCESS / FAST_IO_FAIL / FAILED
2870  **/
2871 static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd)
2872 {
2873 	struct scsi_device *sdev = cmd->device;
2874 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2875 	int cancel_rc, block_rc, reset_rc = 0;
2876 	int rc = FAILED;
2877 
2878 	ENTER;
2879 	block_rc = fc_block_scsi_eh(cmd);
2880 	ibmvfc_wait_while_resetting(vhost);
2881 	if (block_rc != FAST_IO_FAIL) {
2882 		cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET);
2883 		reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN");
2884 	} else
2885 		cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2886 
2887 	if (!cancel_rc && !reset_rc)
2888 		rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
2889 
2890 	if (block_rc == FAST_IO_FAIL && rc != FAILED)
2891 		rc = FAST_IO_FAIL;
2892 
2893 	LEAVE;
2894 	return rc;
2895 }
2896 
2897 /**
2898  * ibmvfc_dev_cancel_all_noreset - Device iterated cancel all function
2899  * @sdev:	scsi device struct
2900  * @data:	return code
2901  *
2902  **/
2903 static void ibmvfc_dev_cancel_all_noreset(struct scsi_device *sdev, void *data)
2904 {
2905 	unsigned long *rc = data;
2906 	*rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2907 }
2908 
2909 /**
2910  * ibmvfc_dev_cancel_all_reset - Device iterated cancel all function
2911  * @sdev:	scsi device struct
2912  * @data:	return code
2913  *
2914  **/
2915 static void ibmvfc_dev_cancel_all_reset(struct scsi_device *sdev, void *data)
2916 {
2917 	unsigned long *rc = data;
2918 	*rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_TGT_RESET);
2919 }
2920 
2921 /**
2922  * ibmvfc_eh_target_reset_handler - Reset the target
2923  * @cmd:	scsi command struct
2924  *
2925  * Returns:
2926  *	SUCCESS / FAST_IO_FAIL / FAILED
2927  **/
2928 static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
2929 {
2930 	struct scsi_device *sdev = cmd->device;
2931 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2932 	struct scsi_target *starget = scsi_target(sdev);
2933 	int block_rc;
2934 	int reset_rc = 0;
2935 	int rc = FAILED;
2936 	unsigned long cancel_rc = 0;
2937 
2938 	ENTER;
2939 	block_rc = fc_block_scsi_eh(cmd);
2940 	ibmvfc_wait_while_resetting(vhost);
2941 	if (block_rc != FAST_IO_FAIL) {
2942 		starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset);
2943 		reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target");
2944 	} else
2945 		starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_noreset);
2946 
2947 	if (!cancel_rc && !reset_rc)
2948 		rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target);
2949 
2950 	if (block_rc == FAST_IO_FAIL && rc != FAILED)
2951 		rc = FAST_IO_FAIL;
2952 
2953 	LEAVE;
2954 	return rc;
2955 }
2956 
2957 /**
2958  * ibmvfc_eh_host_reset_handler - Reset the connection to the server
2959  * @cmd:	struct scsi_cmnd having problems
2960  *
2961  **/
2962 static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd)
2963 {
2964 	int rc;
2965 	struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
2966 
2967 	dev_err(vhost->dev, "Resetting connection due to error recovery\n");
2968 	rc = ibmvfc_issue_fc_host_lip(vhost->host);
2969 
2970 	return rc ? FAILED : SUCCESS;
2971 }
2972 
2973 /**
2974  * ibmvfc_terminate_rport_io - Terminate all pending I/O to the rport.
2975  * @rport:		rport struct
2976  *
2977  * Return value:
2978  * 	none
2979  **/
2980 static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
2981 {
2982 	struct Scsi_Host *shost = rport_to_shost(rport);
2983 	struct ibmvfc_host *vhost = shost_priv(shost);
2984 	struct fc_rport *dev_rport;
2985 	struct scsi_device *sdev;
2986 	struct ibmvfc_target *tgt;
2987 	unsigned long rc, flags;
2988 	unsigned int found;
2989 
2990 	ENTER;
2991 	shost_for_each_device(sdev, shost) {
2992 		dev_rport = starget_to_rport(scsi_target(sdev));
2993 		if (dev_rport != rport)
2994 			continue;
2995 		ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2996 	}
2997 
2998 	rc = ibmvfc_wait_for_ops(vhost, rport, ibmvfc_match_rport);
2999 
3000 	if (rc == FAILED)
3001 		ibmvfc_issue_fc_host_lip(shost);
3002 
3003 	spin_lock_irqsave(shost->host_lock, flags);
3004 	found = 0;
3005 	list_for_each_entry(tgt, &vhost->targets, queue) {
3006 		if (tgt->scsi_id == rport->port_id) {
3007 			found++;
3008 			break;
3009 		}
3010 	}
3011 
3012 	if (found && tgt->action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
3013 		/*
3014 		 * If we get here, that means we previously attempted to send
3015 		 * an implicit logout to the target but it failed, most likely
3016 		 * due to I/O being pending, so we need to send it again
3017 		 */
3018 		ibmvfc_del_tgt(tgt);
3019 		ibmvfc_reinit_host(vhost);
3020 	}
3021 
3022 	spin_unlock_irqrestore(shost->host_lock, flags);
3023 	LEAVE;
3024 }
3025 
3026 static const struct ibmvfc_async_desc ae_desc [] = {
3027 	{ "PLOGI",	IBMVFC_AE_ELS_PLOGI,	IBMVFC_DEFAULT_LOG_LEVEL + 1 },
3028 	{ "LOGO",	IBMVFC_AE_ELS_LOGO,	IBMVFC_DEFAULT_LOG_LEVEL + 1 },
3029 	{ "PRLO",	IBMVFC_AE_ELS_PRLO,	IBMVFC_DEFAULT_LOG_LEVEL + 1 },
3030 	{ "N-Port SCN",	IBMVFC_AE_SCN_NPORT,	IBMVFC_DEFAULT_LOG_LEVEL + 1 },
3031 	{ "Group SCN",	IBMVFC_AE_SCN_GROUP,	IBMVFC_DEFAULT_LOG_LEVEL + 1 },
3032 	{ "Domain SCN",	IBMVFC_AE_SCN_DOMAIN,	IBMVFC_DEFAULT_LOG_LEVEL },
3033 	{ "Fabric SCN",	IBMVFC_AE_SCN_FABRIC,	IBMVFC_DEFAULT_LOG_LEVEL },
3034 	{ "Link Up",	IBMVFC_AE_LINK_UP,	IBMVFC_DEFAULT_LOG_LEVEL },
3035 	{ "Link Down",	IBMVFC_AE_LINK_DOWN,	IBMVFC_DEFAULT_LOG_LEVEL },
3036 	{ "Link Dead",	IBMVFC_AE_LINK_DEAD,	IBMVFC_DEFAULT_LOG_LEVEL },
3037 	{ "Halt",	IBMVFC_AE_HALT,		IBMVFC_DEFAULT_LOG_LEVEL },
3038 	{ "Resume",	IBMVFC_AE_RESUME,	IBMVFC_DEFAULT_LOG_LEVEL },
3039 	{ "Adapter Failed", IBMVFC_AE_ADAPTER_FAILED, IBMVFC_DEFAULT_LOG_LEVEL },
3040 };
3041 
3042 static const struct ibmvfc_async_desc unknown_ae = {
3043 	"Unknown async", 0, IBMVFC_DEFAULT_LOG_LEVEL
3044 };
3045 
3046 /**
3047  * ibmvfc_get_ae_desc - Get text description for async event
3048  * @ae:	async event
3049  *
3050  **/
3051 static const struct ibmvfc_async_desc *ibmvfc_get_ae_desc(u64 ae)
3052 {
3053 	int i;
3054 
3055 	for (i = 0; i < ARRAY_SIZE(ae_desc); i++)
3056 		if (ae_desc[i].ae == ae)
3057 			return &ae_desc[i];
3058 
3059 	return &unknown_ae;
3060 }
3061 
3062 static const struct {
3063 	enum ibmvfc_ae_link_state state;
3064 	const char *desc;
3065 } link_desc [] = {
3066 	{ IBMVFC_AE_LS_LINK_UP,		" link up" },
3067 	{ IBMVFC_AE_LS_LINK_BOUNCED,	" link bounced" },
3068 	{ IBMVFC_AE_LS_LINK_DOWN,	" link down" },
3069 	{ IBMVFC_AE_LS_LINK_DEAD,	" link dead" },
3070 };
3071 
3072 /**
3073  * ibmvfc_get_link_state - Get text description for link state
3074  * @state:	link state
3075  *
3076  **/
3077 static const char *ibmvfc_get_link_state(enum ibmvfc_ae_link_state state)
3078 {
3079 	int i;
3080 
3081 	for (i = 0; i < ARRAY_SIZE(link_desc); i++)
3082 		if (link_desc[i].state == state)
3083 			return link_desc[i].desc;
3084 
3085 	return "";
3086 }
3087 
3088 /**
3089  * ibmvfc_handle_async - Handle an async event from the adapter
3090  * @crq:	crq to process
3091  * @vhost:	ibmvfc host struct
3092  *
3093  **/
3094 static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
3095 				struct ibmvfc_host *vhost)
3096 {
3097 	const struct ibmvfc_async_desc *desc = ibmvfc_get_ae_desc(be64_to_cpu(crq->event));
3098 	struct ibmvfc_target *tgt;
3099 
3100 	ibmvfc_log(vhost, desc->log_level, "%s event received. scsi_id: %llx, wwpn: %llx,"
3101 		   " node_name: %llx%s\n", desc->desc, be64_to_cpu(crq->scsi_id),
3102 		   be64_to_cpu(crq->wwpn), be64_to_cpu(crq->node_name),
3103 		   ibmvfc_get_link_state(crq->link_state));
3104 
3105 	switch (be64_to_cpu(crq->event)) {
3106 	case IBMVFC_AE_RESUME:
3107 		switch (crq->link_state) {
3108 		case IBMVFC_AE_LS_LINK_DOWN:
3109 			ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
3110 			break;
3111 		case IBMVFC_AE_LS_LINK_DEAD:
3112 			ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3113 			break;
3114 		case IBMVFC_AE_LS_LINK_UP:
3115 		case IBMVFC_AE_LS_LINK_BOUNCED:
3116 		default:
3117 			vhost->events_to_log |= IBMVFC_AE_LINKUP;
3118 			vhost->delay_init = 1;
3119 			__ibmvfc_reset_host(vhost);
3120 			break;
3121 		}
3122 
3123 		break;
3124 	case IBMVFC_AE_LINK_UP:
3125 		vhost->events_to_log |= IBMVFC_AE_LINKUP;
3126 		vhost->delay_init = 1;
3127 		__ibmvfc_reset_host(vhost);
3128 		break;
3129 	case IBMVFC_AE_SCN_FABRIC:
3130 	case IBMVFC_AE_SCN_DOMAIN:
3131 		vhost->events_to_log |= IBMVFC_AE_RSCN;
3132 		if (vhost->state < IBMVFC_HALTED) {
3133 			vhost->delay_init = 1;
3134 			__ibmvfc_reset_host(vhost);
3135 		}
3136 		break;
3137 	case IBMVFC_AE_SCN_NPORT:
3138 	case IBMVFC_AE_SCN_GROUP:
3139 		vhost->events_to_log |= IBMVFC_AE_RSCN;
3140 		ibmvfc_reinit_host(vhost);
3141 		break;
3142 	case IBMVFC_AE_ELS_LOGO:
3143 	case IBMVFC_AE_ELS_PRLO:
3144 	case IBMVFC_AE_ELS_PLOGI:
3145 		list_for_each_entry(tgt, &vhost->targets, queue) {
3146 			if (!crq->scsi_id && !crq->wwpn && !crq->node_name)
3147 				break;
3148 			if (crq->scsi_id && cpu_to_be64(tgt->scsi_id) != crq->scsi_id)
3149 				continue;
3150 			if (crq->wwpn && cpu_to_be64(tgt->ids.port_name) != crq->wwpn)
3151 				continue;
3152 			if (crq->node_name && cpu_to_be64(tgt->ids.node_name) != crq->node_name)
3153 				continue;
3154 			if (tgt->need_login && be64_to_cpu(crq->event) == IBMVFC_AE_ELS_LOGO)
3155 				tgt->logo_rcvd = 1;
3156 			if (!tgt->need_login || be64_to_cpu(crq->event) == IBMVFC_AE_ELS_PLOGI) {
3157 				ibmvfc_del_tgt(tgt);
3158 				ibmvfc_reinit_host(vhost);
3159 			}
3160 		}
3161 		break;
3162 	case IBMVFC_AE_LINK_DOWN:
3163 	case IBMVFC_AE_ADAPTER_FAILED:
3164 		ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
3165 		break;
3166 	case IBMVFC_AE_LINK_DEAD:
3167 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3168 		break;
3169 	case IBMVFC_AE_HALT:
3170 		ibmvfc_link_down(vhost, IBMVFC_HALTED);
3171 		break;
3172 	default:
3173 		dev_err(vhost->dev, "Unknown async event received: %lld\n", crq->event);
3174 		break;
3175 	}
3176 }
3177 
3178 /**
3179  * ibmvfc_handle_crq - Handles and frees received events in the CRQ
3180  * @crq:	Command/Response queue
3181  * @vhost:	ibmvfc host struct
3182  * @evt_doneq:	Event done queue
3183  *
3184 **/
3185 static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost,
3186 			      struct list_head *evt_doneq)
3187 {
3188 	long rc;
3189 	struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba);
3190 
3191 	switch (crq->valid) {
3192 	case IBMVFC_CRQ_INIT_RSP:
3193 		switch (crq->format) {
3194 		case IBMVFC_CRQ_INIT:
3195 			dev_info(vhost->dev, "Partner initialized\n");
3196 			/* Send back a response */
3197 			rc = ibmvfc_send_crq_init_complete(vhost);
3198 			if (rc == 0)
3199 				ibmvfc_init_host(vhost);
3200 			else
3201 				dev_err(vhost->dev, "Unable to send init rsp. rc=%ld\n", rc);
3202 			break;
3203 		case IBMVFC_CRQ_INIT_COMPLETE:
3204 			dev_info(vhost->dev, "Partner initialization complete\n");
3205 			ibmvfc_init_host(vhost);
3206 			break;
3207 		default:
3208 			dev_err(vhost->dev, "Unknown crq message type: %d\n", crq->format);
3209 		}
3210 		return;
3211 	case IBMVFC_CRQ_XPORT_EVENT:
3212 		vhost->state = IBMVFC_NO_CRQ;
3213 		vhost->logged_in = 0;
3214 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
3215 		if (crq->format == IBMVFC_PARTITION_MIGRATED) {
3216 			/* We need to re-setup the interpartition connection */
3217 			dev_info(vhost->dev, "Partition migrated, Re-enabling adapter\n");
3218 			vhost->client_migrated = 1;
3219 			ibmvfc_purge_requests(vhost, DID_REQUEUE);
3220 			ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
3221 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_REENABLE);
3222 		} else if (crq->format == IBMVFC_PARTNER_FAILED || crq->format == IBMVFC_PARTNER_DEREGISTER) {
3223 			dev_err(vhost->dev, "Host partner adapter deregistered or failed (rc=%d)\n", crq->format);
3224 			ibmvfc_purge_requests(vhost, DID_ERROR);
3225 			ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
3226 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
3227 		} else {
3228 			dev_err(vhost->dev, "Received unknown transport event from partner (rc=%d)\n", crq->format);
3229 		}
3230 		return;
3231 	case IBMVFC_CRQ_CMD_RSP:
3232 		break;
3233 	default:
3234 		dev_err(vhost->dev, "Got an invalid message type 0x%02x\n", crq->valid);
3235 		return;
3236 	}
3237 
3238 	if (crq->format == IBMVFC_ASYNC_EVENT)
3239 		return;
3240 
3241 	/* The only kind of payload CRQs we should get are responses to
3242 	 * things we send. Make sure this response is to something we
3243 	 * actually sent
3244 	 */
3245 	if (unlikely(!ibmvfc_valid_event(&vhost->crq.evt_pool, evt))) {
3246 		dev_err(vhost->dev, "Returned correlation_token 0x%08llx is invalid!\n",
3247 			crq->ioba);
3248 		return;
3249 	}
3250 
3251 	if (unlikely(atomic_read(&evt->free))) {
3252 		dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n",
3253 			crq->ioba);
3254 		return;
3255 	}
3256 
3257 	spin_lock(&evt->queue->l_lock);
3258 	list_move_tail(&evt->queue_list, evt_doneq);
3259 	spin_unlock(&evt->queue->l_lock);
3260 }
3261 
3262 /**
3263  * ibmvfc_scan_finished - Check if the device scan is done.
3264  * @shost:	scsi host struct
3265  * @time:	current elapsed time
3266  *
3267  * Returns:
3268  *	0 if scan is not done / 1 if scan is done
3269  **/
3270 static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
3271 {
3272 	unsigned long flags;
3273 	struct ibmvfc_host *vhost = shost_priv(shost);
3274 	int done = 0;
3275 
3276 	spin_lock_irqsave(shost->host_lock, flags);
3277 	if (time >= (init_timeout * HZ)) {
3278 		dev_info(vhost->dev, "Scan taking longer than %d seconds, "
3279 			 "continuing initialization\n", init_timeout);
3280 		done = 1;
3281 	}
3282 
3283 	if (vhost->scan_complete)
3284 		done = 1;
3285 	spin_unlock_irqrestore(shost->host_lock, flags);
3286 	return done;
3287 }
3288 
3289 /**
3290  * ibmvfc_slave_alloc - Setup the device's task set value
3291  * @sdev:	struct scsi_device device to configure
3292  *
3293  * Set the device's task set value so that error handling works as
3294  * expected.
3295  *
3296  * Returns:
3297  *	0 on success / -ENXIO if device does not exist
3298  **/
3299 static int ibmvfc_slave_alloc(struct scsi_device *sdev)
3300 {
3301 	struct Scsi_Host *shost = sdev->host;
3302 	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
3303 	struct ibmvfc_host *vhost = shost_priv(shost);
3304 	unsigned long flags = 0;
3305 
3306 	if (!rport || fc_remote_port_chkready(rport))
3307 		return -ENXIO;
3308 
3309 	spin_lock_irqsave(shost->host_lock, flags);
3310 	sdev->hostdata = (void *)(unsigned long)vhost->task_set++;
3311 	spin_unlock_irqrestore(shost->host_lock, flags);
3312 	return 0;
3313 }
3314 
3315 /**
3316  * ibmvfc_target_alloc - Setup the target's task set value
3317  * @starget:	struct scsi_target
3318  *
3319  * Set the target's task set value so that error handling works as
3320  * expected.
3321  *
3322  * Returns:
3323  *	0 on success / -ENXIO if device does not exist
3324  **/
3325 static int ibmvfc_target_alloc(struct scsi_target *starget)
3326 {
3327 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
3328 	struct ibmvfc_host *vhost = shost_priv(shost);
3329 	unsigned long flags = 0;
3330 
3331 	spin_lock_irqsave(shost->host_lock, flags);
3332 	starget->hostdata = (void *)(unsigned long)vhost->task_set++;
3333 	spin_unlock_irqrestore(shost->host_lock, flags);
3334 	return 0;
3335 }
3336 
3337 /**
3338  * ibmvfc_slave_configure - Configure the device
3339  * @sdev:	struct scsi_device device to configure
3340  *
3341  * Enable allow_restart for a device if it is a disk. Adjust the
3342  * queue_depth here also.
3343  *
3344  * Returns:
3345  *	0
3346  **/
3347 static int ibmvfc_slave_configure(struct scsi_device *sdev)
3348 {
3349 	struct Scsi_Host *shost = sdev->host;
3350 	unsigned long flags = 0;
3351 
3352 	spin_lock_irqsave(shost->host_lock, flags);
3353 	if (sdev->type == TYPE_DISK) {
3354 		sdev->allow_restart = 1;
3355 		blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
3356 	}
3357 	spin_unlock_irqrestore(shost->host_lock, flags);
3358 	return 0;
3359 }
3360 
3361 /**
3362  * ibmvfc_change_queue_depth - Change the device's queue depth
3363  * @sdev:	scsi device struct
3364  * @qdepth:	depth to set
3365  *
3366  * Return value:
3367  * 	actual depth set
3368  **/
3369 static int ibmvfc_change_queue_depth(struct scsi_device *sdev, int qdepth)
3370 {
3371 	if (qdepth > IBMVFC_MAX_CMDS_PER_LUN)
3372 		qdepth = IBMVFC_MAX_CMDS_PER_LUN;
3373 
3374 	return scsi_change_queue_depth(sdev, qdepth);
3375 }
3376 
3377 static ssize_t ibmvfc_show_host_partition_name(struct device *dev,
3378 						 struct device_attribute *attr, char *buf)
3379 {
3380 	struct Scsi_Host *shost = class_to_shost(dev);
3381 	struct ibmvfc_host *vhost = shost_priv(shost);
3382 
3383 	return snprintf(buf, PAGE_SIZE, "%s\n",
3384 			vhost->login_buf->resp.partition_name);
3385 }
3386 
3387 static ssize_t ibmvfc_show_host_device_name(struct device *dev,
3388 					    struct device_attribute *attr, char *buf)
3389 {
3390 	struct Scsi_Host *shost = class_to_shost(dev);
3391 	struct ibmvfc_host *vhost = shost_priv(shost);
3392 
3393 	return snprintf(buf, PAGE_SIZE, "%s\n",
3394 			vhost->login_buf->resp.device_name);
3395 }
3396 
3397 static ssize_t ibmvfc_show_host_loc_code(struct device *dev,
3398 					 struct device_attribute *attr, char *buf)
3399 {
3400 	struct Scsi_Host *shost = class_to_shost(dev);
3401 	struct ibmvfc_host *vhost = shost_priv(shost);
3402 
3403 	return snprintf(buf, PAGE_SIZE, "%s\n",
3404 			vhost->login_buf->resp.port_loc_code);
3405 }
3406 
3407 static ssize_t ibmvfc_show_host_drc_name(struct device *dev,
3408 					 struct device_attribute *attr, char *buf)
3409 {
3410 	struct Scsi_Host *shost = class_to_shost(dev);
3411 	struct ibmvfc_host *vhost = shost_priv(shost);
3412 
3413 	return snprintf(buf, PAGE_SIZE, "%s\n",
3414 			vhost->login_buf->resp.drc_name);
3415 }
3416 
3417 static ssize_t ibmvfc_show_host_npiv_version(struct device *dev,
3418 					     struct device_attribute *attr, char *buf)
3419 {
3420 	struct Scsi_Host *shost = class_to_shost(dev);
3421 	struct ibmvfc_host *vhost = shost_priv(shost);
3422 	return snprintf(buf, PAGE_SIZE, "%d\n", be32_to_cpu(vhost->login_buf->resp.version));
3423 }
3424 
3425 static ssize_t ibmvfc_show_host_capabilities(struct device *dev,
3426 					     struct device_attribute *attr, char *buf)
3427 {
3428 	struct Scsi_Host *shost = class_to_shost(dev);
3429 	struct ibmvfc_host *vhost = shost_priv(shost);
3430 	return snprintf(buf, PAGE_SIZE, "%llx\n", be64_to_cpu(vhost->login_buf->resp.capabilities));
3431 }
3432 
3433 /**
3434  * ibmvfc_show_log_level - Show the adapter's error logging level
3435  * @dev:	class device struct
3436  * @attr:	unused
3437  * @buf:	buffer
3438  *
3439  * Return value:
3440  * 	number of bytes printed to buffer
3441  **/
3442 static ssize_t ibmvfc_show_log_level(struct device *dev,
3443 				     struct device_attribute *attr, char *buf)
3444 {
3445 	struct Scsi_Host *shost = class_to_shost(dev);
3446 	struct ibmvfc_host *vhost = shost_priv(shost);
3447 	unsigned long flags = 0;
3448 	int len;
3449 
3450 	spin_lock_irqsave(shost->host_lock, flags);
3451 	len = snprintf(buf, PAGE_SIZE, "%d\n", vhost->log_level);
3452 	spin_unlock_irqrestore(shost->host_lock, flags);
3453 	return len;
3454 }
3455 
3456 /**
3457  * ibmvfc_store_log_level - Change the adapter's error logging level
3458  * @dev:	class device struct
3459  * @attr:	unused
3460  * @buf:	buffer
3461  * @count:      buffer size
3462  *
3463  * Return value:
3464  * 	number of bytes printed to buffer
3465  **/
3466 static ssize_t ibmvfc_store_log_level(struct device *dev,
3467 				      struct device_attribute *attr,
3468 				      const char *buf, size_t count)
3469 {
3470 	struct Scsi_Host *shost = class_to_shost(dev);
3471 	struct ibmvfc_host *vhost = shost_priv(shost);
3472 	unsigned long flags = 0;
3473 
3474 	spin_lock_irqsave(shost->host_lock, flags);
3475 	vhost->log_level = simple_strtoul(buf, NULL, 10);
3476 	spin_unlock_irqrestore(shost->host_lock, flags);
3477 	return strlen(buf);
3478 }
3479 
3480 static ssize_t ibmvfc_show_scsi_channels(struct device *dev,
3481 					 struct device_attribute *attr, char *buf)
3482 {
3483 	struct Scsi_Host *shost = class_to_shost(dev);
3484 	struct ibmvfc_host *vhost = shost_priv(shost);
3485 	unsigned long flags = 0;
3486 	int len;
3487 
3488 	spin_lock_irqsave(shost->host_lock, flags);
3489 	len = snprintf(buf, PAGE_SIZE, "%d\n", vhost->client_scsi_channels);
3490 	spin_unlock_irqrestore(shost->host_lock, flags);
3491 	return len;
3492 }
3493 
3494 static ssize_t ibmvfc_store_scsi_channels(struct device *dev,
3495 					 struct device_attribute *attr,
3496 					 const char *buf, size_t count)
3497 {
3498 	struct Scsi_Host *shost = class_to_shost(dev);
3499 	struct ibmvfc_host *vhost = shost_priv(shost);
3500 	unsigned long flags = 0;
3501 	unsigned int channels;
3502 
3503 	spin_lock_irqsave(shost->host_lock, flags);
3504 	channels = simple_strtoul(buf, NULL, 10);
3505 	vhost->client_scsi_channels = min(channels, nr_scsi_hw_queues);
3506 	ibmvfc_hard_reset_host(vhost);
3507 	spin_unlock_irqrestore(shost->host_lock, flags);
3508 	return strlen(buf);
3509 }
3510 
3511 static DEVICE_ATTR(partition_name, S_IRUGO, ibmvfc_show_host_partition_name, NULL);
3512 static DEVICE_ATTR(device_name, S_IRUGO, ibmvfc_show_host_device_name, NULL);
3513 static DEVICE_ATTR(port_loc_code, S_IRUGO, ibmvfc_show_host_loc_code, NULL);
3514 static DEVICE_ATTR(drc_name, S_IRUGO, ibmvfc_show_host_drc_name, NULL);
3515 static DEVICE_ATTR(npiv_version, S_IRUGO, ibmvfc_show_host_npiv_version, NULL);
3516 static DEVICE_ATTR(capabilities, S_IRUGO, ibmvfc_show_host_capabilities, NULL);
3517 static DEVICE_ATTR(log_level, S_IRUGO | S_IWUSR,
3518 		   ibmvfc_show_log_level, ibmvfc_store_log_level);
3519 static DEVICE_ATTR(nr_scsi_channels, S_IRUGO | S_IWUSR,
3520 		   ibmvfc_show_scsi_channels, ibmvfc_store_scsi_channels);
3521 
3522 #ifdef CONFIG_SCSI_IBMVFC_TRACE
3523 /**
3524  * ibmvfc_read_trace - Dump the adapter trace
3525  * @filp:		open sysfs file
3526  * @kobj:		kobject struct
3527  * @bin_attr:	bin_attribute struct
3528  * @buf:		buffer
3529  * @off:		offset
3530  * @count:		buffer size
3531  *
3532  * Return value:
3533  *	number of bytes printed to buffer
3534  **/
3535 static ssize_t ibmvfc_read_trace(struct file *filp, struct kobject *kobj,
3536 				 struct bin_attribute *bin_attr,
3537 				 char *buf, loff_t off, size_t count)
3538 {
3539 	struct device *dev = kobj_to_dev(kobj);
3540 	struct Scsi_Host *shost = class_to_shost(dev);
3541 	struct ibmvfc_host *vhost = shost_priv(shost);
3542 	unsigned long flags = 0;
3543 	int size = IBMVFC_TRACE_SIZE;
3544 	char *src = (char *)vhost->trace;
3545 
3546 	if (off > size)
3547 		return 0;
3548 	if (off + count > size) {
3549 		size -= off;
3550 		count = size;
3551 	}
3552 
3553 	spin_lock_irqsave(shost->host_lock, flags);
3554 	memcpy(buf, &src[off], count);
3555 	spin_unlock_irqrestore(shost->host_lock, flags);
3556 	return count;
3557 }
3558 
3559 static struct bin_attribute ibmvfc_trace_attr = {
3560 	.attr =	{
3561 		.name = "trace",
3562 		.mode = S_IRUGO,
3563 	},
3564 	.size = 0,
3565 	.read = ibmvfc_read_trace,
3566 };
3567 #endif
3568 
3569 static struct device_attribute *ibmvfc_attrs[] = {
3570 	&dev_attr_partition_name,
3571 	&dev_attr_device_name,
3572 	&dev_attr_port_loc_code,
3573 	&dev_attr_drc_name,
3574 	&dev_attr_npiv_version,
3575 	&dev_attr_capabilities,
3576 	&dev_attr_log_level,
3577 	&dev_attr_nr_scsi_channels,
3578 	NULL
3579 };
3580 
3581 static struct scsi_host_template driver_template = {
3582 	.module = THIS_MODULE,
3583 	.name = "IBM POWER Virtual FC Adapter",
3584 	.proc_name = IBMVFC_NAME,
3585 	.queuecommand = ibmvfc_queuecommand,
3586 	.eh_timed_out = fc_eh_timed_out,
3587 	.eh_abort_handler = ibmvfc_eh_abort_handler,
3588 	.eh_device_reset_handler = ibmvfc_eh_device_reset_handler,
3589 	.eh_target_reset_handler = ibmvfc_eh_target_reset_handler,
3590 	.eh_host_reset_handler = ibmvfc_eh_host_reset_handler,
3591 	.slave_alloc = ibmvfc_slave_alloc,
3592 	.slave_configure = ibmvfc_slave_configure,
3593 	.target_alloc = ibmvfc_target_alloc,
3594 	.scan_finished = ibmvfc_scan_finished,
3595 	.change_queue_depth = ibmvfc_change_queue_depth,
3596 	.cmd_per_lun = 16,
3597 	.can_queue = IBMVFC_MAX_REQUESTS_DEFAULT,
3598 	.this_id = -1,
3599 	.sg_tablesize = SG_ALL,
3600 	.max_sectors = IBMVFC_MAX_SECTORS,
3601 	.shost_attrs = ibmvfc_attrs,
3602 	.track_queue_depth = 1,
3603 	.host_tagset = 1,
3604 };
3605 
3606 /**
3607  * ibmvfc_next_async_crq - Returns the next entry in async queue
3608  * @vhost:	ibmvfc host struct
3609  *
3610  * Returns:
3611  *	Pointer to next entry in queue / NULL if empty
3612  **/
3613 static struct ibmvfc_async_crq *ibmvfc_next_async_crq(struct ibmvfc_host *vhost)
3614 {
3615 	struct ibmvfc_queue *async_crq = &vhost->async_crq;
3616 	struct ibmvfc_async_crq *crq;
3617 
3618 	crq = &async_crq->msgs.async[async_crq->cur];
3619 	if (crq->valid & 0x80) {
3620 		if (++async_crq->cur == async_crq->size)
3621 			async_crq->cur = 0;
3622 		rmb();
3623 	} else
3624 		crq = NULL;
3625 
3626 	return crq;
3627 }
3628 
3629 /**
3630  * ibmvfc_next_crq - Returns the next entry in message queue
3631  * @vhost:	ibmvfc host struct
3632  *
3633  * Returns:
3634  *	Pointer to next entry in queue / NULL if empty
3635  **/
3636 static struct ibmvfc_crq *ibmvfc_next_crq(struct ibmvfc_host *vhost)
3637 {
3638 	struct ibmvfc_queue *queue = &vhost->crq;
3639 	struct ibmvfc_crq *crq;
3640 
3641 	crq = &queue->msgs.crq[queue->cur];
3642 	if (crq->valid & 0x80) {
3643 		if (++queue->cur == queue->size)
3644 			queue->cur = 0;
3645 		rmb();
3646 	} else
3647 		crq = NULL;
3648 
3649 	return crq;
3650 }
3651 
3652 /**
3653  * ibmvfc_interrupt - Interrupt handler
3654  * @irq:		number of irq to handle, not used
3655  * @dev_instance: ibmvfc_host that received interrupt
3656  *
3657  * Returns:
3658  *	IRQ_HANDLED
3659  **/
3660 static irqreturn_t ibmvfc_interrupt(int irq, void *dev_instance)
3661 {
3662 	struct ibmvfc_host *vhost = (struct ibmvfc_host *)dev_instance;
3663 	unsigned long flags;
3664 
3665 	spin_lock_irqsave(vhost->host->host_lock, flags);
3666 	vio_disable_interrupts(to_vio_dev(vhost->dev));
3667 	tasklet_schedule(&vhost->tasklet);
3668 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
3669 	return IRQ_HANDLED;
3670 }
3671 
3672 /**
3673  * ibmvfc_tasklet - Interrupt handler tasklet
3674  * @data:		ibmvfc host struct
3675  *
3676  * Returns:
3677  *	Nothing
3678  **/
3679 static void ibmvfc_tasklet(void *data)
3680 {
3681 	struct ibmvfc_host *vhost = data;
3682 	struct vio_dev *vdev = to_vio_dev(vhost->dev);
3683 	struct ibmvfc_crq *crq;
3684 	struct ibmvfc_async_crq *async;
3685 	struct ibmvfc_event *evt, *temp;
3686 	unsigned long flags;
3687 	int done = 0;
3688 	LIST_HEAD(evt_doneq);
3689 
3690 	spin_lock_irqsave(vhost->host->host_lock, flags);
3691 	spin_lock(vhost->crq.q_lock);
3692 	while (!done) {
3693 		/* Pull all the valid messages off the async CRQ */
3694 		while ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
3695 			ibmvfc_handle_async(async, vhost);
3696 			async->valid = 0;
3697 			wmb();
3698 		}
3699 
3700 		/* Pull all the valid messages off the CRQ */
3701 		while ((crq = ibmvfc_next_crq(vhost)) != NULL) {
3702 			ibmvfc_handle_crq(crq, vhost, &evt_doneq);
3703 			crq->valid = 0;
3704 			wmb();
3705 		}
3706 
3707 		vio_enable_interrupts(vdev);
3708 		if ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
3709 			vio_disable_interrupts(vdev);
3710 			ibmvfc_handle_async(async, vhost);
3711 			async->valid = 0;
3712 			wmb();
3713 		} else if ((crq = ibmvfc_next_crq(vhost)) != NULL) {
3714 			vio_disable_interrupts(vdev);
3715 			ibmvfc_handle_crq(crq, vhost, &evt_doneq);
3716 			crq->valid = 0;
3717 			wmb();
3718 		} else
3719 			done = 1;
3720 	}
3721 
3722 	spin_unlock(vhost->crq.q_lock);
3723 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
3724 
3725 	list_for_each_entry_safe(evt, temp, &evt_doneq, queue_list) {
3726 		del_timer(&evt->timer);
3727 		list_del(&evt->queue_list);
3728 		ibmvfc_trc_end(evt);
3729 		evt->done(evt);
3730 	}
3731 }
3732 
3733 static int ibmvfc_toggle_scrq_irq(struct ibmvfc_queue *scrq, int enable)
3734 {
3735 	struct device *dev = scrq->vhost->dev;
3736 	struct vio_dev *vdev = to_vio_dev(dev);
3737 	unsigned long rc;
3738 	int irq_action = H_ENABLE_VIO_INTERRUPT;
3739 
3740 	if (!enable)
3741 		irq_action = H_DISABLE_VIO_INTERRUPT;
3742 
3743 	rc = plpar_hcall_norets(H_VIOCTL, vdev->unit_address, irq_action,
3744 				scrq->hw_irq, 0, 0);
3745 
3746 	if (rc)
3747 		dev_err(dev, "Couldn't %s sub-crq[%lu] irq. rc=%ld\n",
3748 			enable ? "enable" : "disable", scrq->hwq_id, rc);
3749 
3750 	return rc;
3751 }
3752 
3753 static void ibmvfc_handle_scrq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost,
3754 			       struct list_head *evt_doneq)
3755 {
3756 	struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba);
3757 
3758 	switch (crq->valid) {
3759 	case IBMVFC_CRQ_CMD_RSP:
3760 		break;
3761 	case IBMVFC_CRQ_XPORT_EVENT:
3762 		return;
3763 	default:
3764 		dev_err(vhost->dev, "Got and invalid message type 0x%02x\n", crq->valid);
3765 		return;
3766 	}
3767 
3768 	/* The only kind of payload CRQs we should get are responses to
3769 	 * things we send. Make sure this response is to something we
3770 	 * actually sent
3771 	 */
3772 	if (unlikely(!ibmvfc_valid_event(&evt->queue->evt_pool, evt))) {
3773 		dev_err(vhost->dev, "Returned correlation_token 0x%08llx is invalid!\n",
3774 			crq->ioba);
3775 		return;
3776 	}
3777 
3778 	if (unlikely(atomic_read(&evt->free))) {
3779 		dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n",
3780 			crq->ioba);
3781 		return;
3782 	}
3783 
3784 	spin_lock(&evt->queue->l_lock);
3785 	list_move_tail(&evt->queue_list, evt_doneq);
3786 	spin_unlock(&evt->queue->l_lock);
3787 }
3788 
3789 static struct ibmvfc_crq *ibmvfc_next_scrq(struct ibmvfc_queue *scrq)
3790 {
3791 	struct ibmvfc_crq *crq;
3792 
3793 	crq = &scrq->msgs.scrq[scrq->cur].crq;
3794 	if (crq->valid & 0x80) {
3795 		if (++scrq->cur == scrq->size)
3796 			scrq->cur = 0;
3797 		rmb();
3798 	} else
3799 		crq = NULL;
3800 
3801 	return crq;
3802 }
3803 
3804 static void ibmvfc_drain_sub_crq(struct ibmvfc_queue *scrq)
3805 {
3806 	struct ibmvfc_crq *crq;
3807 	struct ibmvfc_event *evt, *temp;
3808 	unsigned long flags;
3809 	int done = 0;
3810 	LIST_HEAD(evt_doneq);
3811 
3812 	spin_lock_irqsave(scrq->q_lock, flags);
3813 	while (!done) {
3814 		while ((crq = ibmvfc_next_scrq(scrq)) != NULL) {
3815 			ibmvfc_handle_scrq(crq, scrq->vhost, &evt_doneq);
3816 			crq->valid = 0;
3817 			wmb();
3818 		}
3819 
3820 		ibmvfc_toggle_scrq_irq(scrq, 1);
3821 		if ((crq = ibmvfc_next_scrq(scrq)) != NULL) {
3822 			ibmvfc_toggle_scrq_irq(scrq, 0);
3823 			ibmvfc_handle_scrq(crq, scrq->vhost, &evt_doneq);
3824 			crq->valid = 0;
3825 			wmb();
3826 		} else
3827 			done = 1;
3828 	}
3829 	spin_unlock_irqrestore(scrq->q_lock, flags);
3830 
3831 	list_for_each_entry_safe(evt, temp, &evt_doneq, queue_list) {
3832 		del_timer(&evt->timer);
3833 		list_del(&evt->queue_list);
3834 		ibmvfc_trc_end(evt);
3835 		evt->done(evt);
3836 	}
3837 }
3838 
3839 static irqreturn_t ibmvfc_interrupt_scsi(int irq, void *scrq_instance)
3840 {
3841 	struct ibmvfc_queue *scrq = (struct ibmvfc_queue *)scrq_instance;
3842 
3843 	ibmvfc_toggle_scrq_irq(scrq, 0);
3844 	ibmvfc_drain_sub_crq(scrq);
3845 
3846 	return IRQ_HANDLED;
3847 }
3848 
3849 /**
3850  * ibmvfc_init_tgt - Set the next init job step for the target
3851  * @tgt:		ibmvfc target struct
3852  * @job_step:	job step to perform
3853  *
3854  **/
3855 static void ibmvfc_init_tgt(struct ibmvfc_target *tgt,
3856 			    void (*job_step) (struct ibmvfc_target *))
3857 {
3858 	if (!ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT))
3859 		tgt->job_step = job_step;
3860 	wake_up(&tgt->vhost->work_wait_q);
3861 }
3862 
3863 /**
3864  * ibmvfc_retry_tgt_init - Attempt to retry a step in target initialization
3865  * @tgt:		ibmvfc target struct
3866  * @job_step:	initialization job step
3867  *
3868  * Returns: 1 if step will be retried / 0 if not
3869  *
3870  **/
3871 static int ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
3872 				  void (*job_step) (struct ibmvfc_target *))
3873 {
3874 	if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) {
3875 		ibmvfc_del_tgt(tgt);
3876 		wake_up(&tgt->vhost->work_wait_q);
3877 		return 0;
3878 	} else
3879 		ibmvfc_init_tgt(tgt, job_step);
3880 	return 1;
3881 }
3882 
3883 /* Defined in FC-LS */
3884 static const struct {
3885 	int code;
3886 	int retry;
3887 	int logged_in;
3888 } prli_rsp [] = {
3889 	{ 0, 1, 0 },
3890 	{ 1, 0, 1 },
3891 	{ 2, 1, 0 },
3892 	{ 3, 1, 0 },
3893 	{ 4, 0, 0 },
3894 	{ 5, 0, 0 },
3895 	{ 6, 0, 1 },
3896 	{ 7, 0, 0 },
3897 	{ 8, 1, 0 },
3898 };
3899 
3900 /**
3901  * ibmvfc_get_prli_rsp - Find PRLI response index
3902  * @flags:	PRLI response flags
3903  *
3904  **/
3905 static int ibmvfc_get_prli_rsp(u16 flags)
3906 {
3907 	int i;
3908 	int code = (flags & 0x0f00) >> 8;
3909 
3910 	for (i = 0; i < ARRAY_SIZE(prli_rsp); i++)
3911 		if (prli_rsp[i].code == code)
3912 			return i;
3913 
3914 	return 0;
3915 }
3916 
3917 /**
3918  * ibmvfc_tgt_prli_done - Completion handler for Process Login
3919  * @evt:	ibmvfc event struct
3920  *
3921  **/
3922 static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
3923 {
3924 	struct ibmvfc_target *tgt = evt->tgt;
3925 	struct ibmvfc_host *vhost = evt->vhost;
3926 	struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli;
3927 	struct ibmvfc_prli_svc_parms *parms = &rsp->parms;
3928 	u32 status = be16_to_cpu(rsp->common.status);
3929 	int index, level = IBMVFC_DEFAULT_LOG_LEVEL;
3930 
3931 	vhost->discovery_threads--;
3932 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3933 	switch (status) {
3934 	case IBMVFC_MAD_SUCCESS:
3935 		tgt_dbg(tgt, "Process Login succeeded: %X %02X %04X\n",
3936 			parms->type, parms->flags, parms->service_parms);
3937 
3938 		if (parms->type == IBMVFC_SCSI_FCP_TYPE) {
3939 			index = ibmvfc_get_prli_rsp(be16_to_cpu(parms->flags));
3940 			if (prli_rsp[index].logged_in) {
3941 				if (be16_to_cpu(parms->flags) & IBMVFC_PRLI_EST_IMG_PAIR) {
3942 					tgt->need_login = 0;
3943 					tgt->ids.roles = 0;
3944 					if (be32_to_cpu(parms->service_parms) & IBMVFC_PRLI_TARGET_FUNC)
3945 						tgt->ids.roles |= FC_PORT_ROLE_FCP_TARGET;
3946 					if (be32_to_cpu(parms->service_parms) & IBMVFC_PRLI_INITIATOR_FUNC)
3947 						tgt->ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
3948 					tgt->add_rport = 1;
3949 				} else
3950 					ibmvfc_del_tgt(tgt);
3951 			} else if (prli_rsp[index].retry)
3952 				ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
3953 			else
3954 				ibmvfc_del_tgt(tgt);
3955 		} else
3956 			ibmvfc_del_tgt(tgt);
3957 		break;
3958 	case IBMVFC_MAD_DRIVER_FAILED:
3959 		break;
3960 	case IBMVFC_MAD_CRQ_ERROR:
3961 		ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
3962 		break;
3963 	case IBMVFC_MAD_FAILED:
3964 	default:
3965 		if ((be16_to_cpu(rsp->status) & IBMVFC_VIOS_FAILURE) &&
3966 		     be16_to_cpu(rsp->error) == IBMVFC_PLOGI_REQUIRED)
3967 			level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
3968 		else if (tgt->logo_rcvd)
3969 			level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
3970 		else if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
3971 			level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
3972 		else
3973 			ibmvfc_del_tgt(tgt);
3974 
3975 		tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
3976 			ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
3977 			be16_to_cpu(rsp->status), be16_to_cpu(rsp->error), status);
3978 		break;
3979 	}
3980 
3981 	kref_put(&tgt->kref, ibmvfc_release_tgt);
3982 	ibmvfc_free_event(evt);
3983 	wake_up(&vhost->work_wait_q);
3984 }
3985 
3986 /**
3987  * ibmvfc_tgt_send_prli - Send a process login
3988  * @tgt:	ibmvfc target struct
3989  *
3990  **/
3991 static void ibmvfc_tgt_send_prli(struct ibmvfc_target *tgt)
3992 {
3993 	struct ibmvfc_process_login *prli;
3994 	struct ibmvfc_host *vhost = tgt->vhost;
3995 	struct ibmvfc_event *evt;
3996 
3997 	if (vhost->discovery_threads >= disc_threads)
3998 		return;
3999 
4000 	kref_get(&tgt->kref);
4001 	evt = ibmvfc_get_event(&vhost->crq);
4002 	vhost->discovery_threads++;
4003 	ibmvfc_init_event(evt, ibmvfc_tgt_prli_done, IBMVFC_MAD_FORMAT);
4004 	evt->tgt = tgt;
4005 	prli = &evt->iu.prli;
4006 	memset(prli, 0, sizeof(*prli));
4007 	if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
4008 		prli->common.version = cpu_to_be32(2);
4009 		prli->target_wwpn = cpu_to_be64(tgt->wwpn);
4010 	} else {
4011 		prli->common.version = cpu_to_be32(1);
4012 	}
4013 	prli->common.opcode = cpu_to_be32(IBMVFC_PROCESS_LOGIN);
4014 	prli->common.length = cpu_to_be16(sizeof(*prli));
4015 	prli->scsi_id = cpu_to_be64(tgt->scsi_id);
4016 
4017 	prli->parms.type = IBMVFC_SCSI_FCP_TYPE;
4018 	prli->parms.flags = cpu_to_be16(IBMVFC_PRLI_EST_IMG_PAIR);
4019 	prli->parms.service_parms = cpu_to_be32(IBMVFC_PRLI_INITIATOR_FUNC);
4020 	prli->parms.service_parms |= cpu_to_be32(IBMVFC_PRLI_READ_FCP_XFER_RDY_DISABLED);
4021 
4022 	if (cls3_error)
4023 		prli->parms.service_parms |= cpu_to_be32(IBMVFC_PRLI_RETRY);
4024 
4025 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4026 	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4027 		vhost->discovery_threads--;
4028 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4029 		kref_put(&tgt->kref, ibmvfc_release_tgt);
4030 	} else
4031 		tgt_dbg(tgt, "Sent process login\n");
4032 }
4033 
4034 /**
4035  * ibmvfc_tgt_plogi_done - Completion handler for Port Login
4036  * @evt:	ibmvfc event struct
4037  *
4038  **/
4039 static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
4040 {
4041 	struct ibmvfc_target *tgt = evt->tgt;
4042 	struct ibmvfc_host *vhost = evt->vhost;
4043 	struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi;
4044 	u32 status = be16_to_cpu(rsp->common.status);
4045 	int level = IBMVFC_DEFAULT_LOG_LEVEL;
4046 
4047 	vhost->discovery_threads--;
4048 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4049 	switch (status) {
4050 	case IBMVFC_MAD_SUCCESS:
4051 		tgt_dbg(tgt, "Port Login succeeded\n");
4052 		if (tgt->ids.port_name &&
4053 		    tgt->ids.port_name != wwn_to_u64(rsp->service_parms.port_name)) {
4054 			vhost->reinit = 1;
4055 			tgt_dbg(tgt, "Port re-init required\n");
4056 			break;
4057 		}
4058 		tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name);
4059 		tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name);
4060 		tgt->ids.port_id = tgt->scsi_id;
4061 		memcpy(&tgt->service_parms, &rsp->service_parms,
4062 		       sizeof(tgt->service_parms));
4063 		memcpy(&tgt->service_parms_change, &rsp->service_parms_change,
4064 		       sizeof(tgt->service_parms_change));
4065 		ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_prli);
4066 		break;
4067 	case IBMVFC_MAD_DRIVER_FAILED:
4068 		break;
4069 	case IBMVFC_MAD_CRQ_ERROR:
4070 		ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
4071 		break;
4072 	case IBMVFC_MAD_FAILED:
4073 	default:
4074 		if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
4075 			level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
4076 		else
4077 			ibmvfc_del_tgt(tgt);
4078 
4079 		tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
4080 			ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
4081 					     be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
4082 			ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
4083 			ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain), status);
4084 		break;
4085 	}
4086 
4087 	kref_put(&tgt->kref, ibmvfc_release_tgt);
4088 	ibmvfc_free_event(evt);
4089 	wake_up(&vhost->work_wait_q);
4090 }
4091 
4092 /**
4093  * ibmvfc_tgt_send_plogi - Send PLOGI to the specified target
4094  * @tgt:	ibmvfc target struct
4095  *
4096  **/
4097 static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *tgt)
4098 {
4099 	struct ibmvfc_port_login *plogi;
4100 	struct ibmvfc_host *vhost = tgt->vhost;
4101 	struct ibmvfc_event *evt;
4102 
4103 	if (vhost->discovery_threads >= disc_threads)
4104 		return;
4105 
4106 	kref_get(&tgt->kref);
4107 	tgt->logo_rcvd = 0;
4108 	evt = ibmvfc_get_event(&vhost->crq);
4109 	vhost->discovery_threads++;
4110 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4111 	ibmvfc_init_event(evt, ibmvfc_tgt_plogi_done, IBMVFC_MAD_FORMAT);
4112 	evt->tgt = tgt;
4113 	plogi = &evt->iu.plogi;
4114 	memset(plogi, 0, sizeof(*plogi));
4115 	if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
4116 		plogi->common.version = cpu_to_be32(2);
4117 		plogi->target_wwpn = cpu_to_be64(tgt->wwpn);
4118 	} else {
4119 		plogi->common.version = cpu_to_be32(1);
4120 	}
4121 	plogi->common.opcode = cpu_to_be32(IBMVFC_PORT_LOGIN);
4122 	plogi->common.length = cpu_to_be16(sizeof(*plogi));
4123 	plogi->scsi_id = cpu_to_be64(tgt->scsi_id);
4124 
4125 	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4126 		vhost->discovery_threads--;
4127 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4128 		kref_put(&tgt->kref, ibmvfc_release_tgt);
4129 	} else
4130 		tgt_dbg(tgt, "Sent port login\n");
4131 }
4132 
4133 /**
4134  * ibmvfc_tgt_implicit_logout_done - Completion handler for Implicit Logout MAD
4135  * @evt:	ibmvfc event struct
4136  *
4137  **/
4138 static void ibmvfc_tgt_implicit_logout_done(struct ibmvfc_event *evt)
4139 {
4140 	struct ibmvfc_target *tgt = evt->tgt;
4141 	struct ibmvfc_host *vhost = evt->vhost;
4142 	struct ibmvfc_implicit_logout *rsp = &evt->xfer_iu->implicit_logout;
4143 	u32 status = be16_to_cpu(rsp->common.status);
4144 
4145 	vhost->discovery_threads--;
4146 	ibmvfc_free_event(evt);
4147 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4148 
4149 	switch (status) {
4150 	case IBMVFC_MAD_SUCCESS:
4151 		tgt_dbg(tgt, "Implicit Logout succeeded\n");
4152 		break;
4153 	case IBMVFC_MAD_DRIVER_FAILED:
4154 		kref_put(&tgt->kref, ibmvfc_release_tgt);
4155 		wake_up(&vhost->work_wait_q);
4156 		return;
4157 	case IBMVFC_MAD_FAILED:
4158 	default:
4159 		tgt_err(tgt, "Implicit Logout failed: rc=0x%02X\n", status);
4160 		break;
4161 	}
4162 
4163 	ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_plogi);
4164 	kref_put(&tgt->kref, ibmvfc_release_tgt);
4165 	wake_up(&vhost->work_wait_q);
4166 }
4167 
4168 /**
4169  * __ibmvfc_tgt_get_implicit_logout_evt - Allocate and init an event for implicit logout
4170  * @tgt:		ibmvfc target struct
4171  * @done:		Routine to call when the event is responded to
4172  *
4173  * Returns:
4174  *	Allocated and initialized ibmvfc_event struct
4175  **/
4176 static struct ibmvfc_event *__ibmvfc_tgt_get_implicit_logout_evt(struct ibmvfc_target *tgt,
4177 								 void (*done) (struct ibmvfc_event *))
4178 {
4179 	struct ibmvfc_implicit_logout *mad;
4180 	struct ibmvfc_host *vhost = tgt->vhost;
4181 	struct ibmvfc_event *evt;
4182 
4183 	kref_get(&tgt->kref);
4184 	evt = ibmvfc_get_event(&vhost->crq);
4185 	ibmvfc_init_event(evt, done, IBMVFC_MAD_FORMAT);
4186 	evt->tgt = tgt;
4187 	mad = &evt->iu.implicit_logout;
4188 	memset(mad, 0, sizeof(*mad));
4189 	mad->common.version = cpu_to_be32(1);
4190 	mad->common.opcode = cpu_to_be32(IBMVFC_IMPLICIT_LOGOUT);
4191 	mad->common.length = cpu_to_be16(sizeof(*mad));
4192 	mad->old_scsi_id = cpu_to_be64(tgt->scsi_id);
4193 	return evt;
4194 }
4195 
4196 /**
4197  * ibmvfc_tgt_implicit_logout - Initiate an Implicit Logout for specified target
4198  * @tgt:		ibmvfc target struct
4199  *
4200  **/
4201 static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
4202 {
4203 	struct ibmvfc_host *vhost = tgt->vhost;
4204 	struct ibmvfc_event *evt;
4205 
4206 	if (vhost->discovery_threads >= disc_threads)
4207 		return;
4208 
4209 	vhost->discovery_threads++;
4210 	evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt,
4211 						   ibmvfc_tgt_implicit_logout_done);
4212 
4213 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4214 	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4215 		vhost->discovery_threads--;
4216 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4217 		kref_put(&tgt->kref, ibmvfc_release_tgt);
4218 	} else
4219 		tgt_dbg(tgt, "Sent Implicit Logout\n");
4220 }
4221 
4222 /**
4223  * ibmvfc_tgt_implicit_logout_and_del_done - Completion handler for Implicit Logout MAD
4224  * @evt:	ibmvfc event struct
4225  *
4226  **/
4227 static void ibmvfc_tgt_implicit_logout_and_del_done(struct ibmvfc_event *evt)
4228 {
4229 	struct ibmvfc_target *tgt = evt->tgt;
4230 	struct ibmvfc_host *vhost = evt->vhost;
4231 	struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru;
4232 	u32 status = be16_to_cpu(mad->common.status);
4233 
4234 	vhost->discovery_threads--;
4235 	ibmvfc_free_event(evt);
4236 
4237 	/*
4238 	 * If our state is IBMVFC_HOST_OFFLINE, we could be unloading the
4239 	 * driver in which case we need to free up all the targets. If we are
4240 	 * not unloading, we will still go through a hard reset to get out of
4241 	 * offline state, so there is no need to track the old targets in that
4242 	 * case.
4243 	 */
4244 	if (status == IBMVFC_MAD_SUCCESS || vhost->state == IBMVFC_HOST_OFFLINE)
4245 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
4246 	else
4247 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT);
4248 
4249 	tgt_dbg(tgt, "Implicit Logout %s\n", (status == IBMVFC_MAD_SUCCESS) ? "succeeded" : "failed");
4250 	kref_put(&tgt->kref, ibmvfc_release_tgt);
4251 	wake_up(&vhost->work_wait_q);
4252 }
4253 
4254 /**
4255  * ibmvfc_tgt_implicit_logout_and_del - Initiate an Implicit Logout for specified target
4256  * @tgt:		ibmvfc target struct
4257  *
4258  **/
4259 static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *tgt)
4260 {
4261 	struct ibmvfc_host *vhost = tgt->vhost;
4262 	struct ibmvfc_event *evt;
4263 
4264 	if (!vhost->logged_in) {
4265 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
4266 		return;
4267 	}
4268 
4269 	if (vhost->discovery_threads >= disc_threads)
4270 		return;
4271 
4272 	vhost->discovery_threads++;
4273 	evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt,
4274 						   ibmvfc_tgt_implicit_logout_and_del_done);
4275 
4276 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT);
4277 	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4278 		vhost->discovery_threads--;
4279 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
4280 		kref_put(&tgt->kref, ibmvfc_release_tgt);
4281 	} else
4282 		tgt_dbg(tgt, "Sent Implicit Logout\n");
4283 }
4284 
4285 /**
4286  * ibmvfc_tgt_move_login_done - Completion handler for Move Login
4287  * @evt:	ibmvfc event struct
4288  *
4289  **/
4290 static void ibmvfc_tgt_move_login_done(struct ibmvfc_event *evt)
4291 {
4292 	struct ibmvfc_target *tgt = evt->tgt;
4293 	struct ibmvfc_host *vhost = evt->vhost;
4294 	struct ibmvfc_move_login *rsp = &evt->xfer_iu->move_login;
4295 	u32 status = be16_to_cpu(rsp->common.status);
4296 	int level = IBMVFC_DEFAULT_LOG_LEVEL;
4297 
4298 	vhost->discovery_threads--;
4299 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4300 	switch (status) {
4301 	case IBMVFC_MAD_SUCCESS:
4302 		tgt_dbg(tgt, "Move Login succeeded for old scsi_id: %llX\n", tgt->old_scsi_id);
4303 		tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name);
4304 		tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name);
4305 		tgt->ids.port_id = tgt->scsi_id;
4306 		memcpy(&tgt->service_parms, &rsp->service_parms,
4307 		       sizeof(tgt->service_parms));
4308 		memcpy(&tgt->service_parms_change, &rsp->service_parms_change,
4309 		       sizeof(tgt->service_parms_change));
4310 		ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_prli);
4311 		break;
4312 	case IBMVFC_MAD_DRIVER_FAILED:
4313 		break;
4314 	case IBMVFC_MAD_CRQ_ERROR:
4315 		ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_move_login);
4316 		break;
4317 	case IBMVFC_MAD_FAILED:
4318 	default:
4319 		level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_move_login);
4320 
4321 		tgt_log(tgt, level,
4322 			"Move Login failed: old scsi_id: %llX, flags:%x, vios_flags:%x, rc=0x%02X\n",
4323 			tgt->old_scsi_id, be32_to_cpu(rsp->flags), be16_to_cpu(rsp->vios_flags),
4324 			status);
4325 		break;
4326 	}
4327 
4328 	kref_put(&tgt->kref, ibmvfc_release_tgt);
4329 	ibmvfc_free_event(evt);
4330 	wake_up(&vhost->work_wait_q);
4331 }
4332 
4333 
4334 /**
4335  * ibmvfc_tgt_move_login - Initiate a move login for specified target
4336  * @tgt:		ibmvfc target struct
4337  *
4338  **/
4339 static void ibmvfc_tgt_move_login(struct ibmvfc_target *tgt)
4340 {
4341 	struct ibmvfc_host *vhost = tgt->vhost;
4342 	struct ibmvfc_move_login *move;
4343 	struct ibmvfc_event *evt;
4344 
4345 	if (vhost->discovery_threads >= disc_threads)
4346 		return;
4347 
4348 	kref_get(&tgt->kref);
4349 	evt = ibmvfc_get_event(&vhost->crq);
4350 	vhost->discovery_threads++;
4351 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4352 	ibmvfc_init_event(evt, ibmvfc_tgt_move_login_done, IBMVFC_MAD_FORMAT);
4353 	evt->tgt = tgt;
4354 	move = &evt->iu.move_login;
4355 	memset(move, 0, sizeof(*move));
4356 	move->common.version = cpu_to_be32(1);
4357 	move->common.opcode = cpu_to_be32(IBMVFC_MOVE_LOGIN);
4358 	move->common.length = cpu_to_be16(sizeof(*move));
4359 
4360 	move->old_scsi_id = cpu_to_be64(tgt->old_scsi_id);
4361 	move->new_scsi_id = cpu_to_be64(tgt->scsi_id);
4362 	move->wwpn = cpu_to_be64(tgt->wwpn);
4363 	move->node_name = cpu_to_be64(tgt->ids.node_name);
4364 
4365 	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4366 		vhost->discovery_threads--;
4367 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
4368 		kref_put(&tgt->kref, ibmvfc_release_tgt);
4369 	} else
4370 		tgt_dbg(tgt, "Sent Move Login for old scsi_id: %llX\n", tgt->old_scsi_id);
4371 }
4372 
4373 /**
4374  * ibmvfc_adisc_needs_plogi - Does device need PLOGI?
4375  * @mad:	ibmvfc passthru mad struct
4376  * @tgt:	ibmvfc target struct
4377  *
4378  * Returns:
4379  *	1 if PLOGI needed / 0 if PLOGI not needed
4380  **/
4381 static int ibmvfc_adisc_needs_plogi(struct ibmvfc_passthru_mad *mad,
4382 				    struct ibmvfc_target *tgt)
4383 {
4384 	if (wwn_to_u64((u8 *)&mad->fc_iu.response[2]) != tgt->ids.port_name)
4385 		return 1;
4386 	if (wwn_to_u64((u8 *)&mad->fc_iu.response[4]) != tgt->ids.node_name)
4387 		return 1;
4388 	if (be32_to_cpu(mad->fc_iu.response[6]) != tgt->scsi_id)
4389 		return 1;
4390 	return 0;
4391 }
4392 
4393 /**
4394  * ibmvfc_tgt_adisc_done - Completion handler for ADISC
4395  * @evt:	ibmvfc event struct
4396  *
4397  **/
4398 static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
4399 {
4400 	struct ibmvfc_target *tgt = evt->tgt;
4401 	struct ibmvfc_host *vhost = evt->vhost;
4402 	struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru;
4403 	u32 status = be16_to_cpu(mad->common.status);
4404 	u8 fc_reason, fc_explain;
4405 
4406 	vhost->discovery_threads--;
4407 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4408 	del_timer(&tgt->timer);
4409 
4410 	switch (status) {
4411 	case IBMVFC_MAD_SUCCESS:
4412 		tgt_dbg(tgt, "ADISC succeeded\n");
4413 		if (ibmvfc_adisc_needs_plogi(mad, tgt))
4414 			ibmvfc_del_tgt(tgt);
4415 		break;
4416 	case IBMVFC_MAD_DRIVER_FAILED:
4417 		break;
4418 	case IBMVFC_MAD_FAILED:
4419 	default:
4420 		ibmvfc_del_tgt(tgt);
4421 		fc_reason = (be32_to_cpu(mad->fc_iu.response[1]) & 0x00ff0000) >> 16;
4422 		fc_explain = (be32_to_cpu(mad->fc_iu.response[1]) & 0x0000ff00) >> 8;
4423 		tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
4424 			 ibmvfc_get_cmd_error(be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error)),
4425 			 be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error),
4426 			 ibmvfc_get_fc_type(fc_reason), fc_reason,
4427 			 ibmvfc_get_ls_explain(fc_explain), fc_explain, status);
4428 		break;
4429 	}
4430 
4431 	kref_put(&tgt->kref, ibmvfc_release_tgt);
4432 	ibmvfc_free_event(evt);
4433 	wake_up(&vhost->work_wait_q);
4434 }
4435 
4436 /**
4437  * ibmvfc_init_passthru - Initialize an event struct for FC passthru
4438  * @evt:		ibmvfc event struct
4439  *
4440  **/
4441 static void ibmvfc_init_passthru(struct ibmvfc_event *evt)
4442 {
4443 	struct ibmvfc_passthru_mad *mad = &evt->iu.passthru;
4444 
4445 	memset(mad, 0, sizeof(*mad));
4446 	mad->common.version = cpu_to_be32(1);
4447 	mad->common.opcode = cpu_to_be32(IBMVFC_PASSTHRU);
4448 	mad->common.length = cpu_to_be16(sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu));
4449 	mad->cmd_ioba.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
4450 		offsetof(struct ibmvfc_passthru_mad, iu));
4451 	mad->cmd_ioba.len = cpu_to_be32(sizeof(mad->iu));
4452 	mad->iu.cmd_len = cpu_to_be32(sizeof(mad->fc_iu.payload));
4453 	mad->iu.rsp_len = cpu_to_be32(sizeof(mad->fc_iu.response));
4454 	mad->iu.cmd.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
4455 		offsetof(struct ibmvfc_passthru_mad, fc_iu) +
4456 		offsetof(struct ibmvfc_passthru_fc_iu, payload));
4457 	mad->iu.cmd.len = cpu_to_be32(sizeof(mad->fc_iu.payload));
4458 	mad->iu.rsp.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
4459 		offsetof(struct ibmvfc_passthru_mad, fc_iu) +
4460 		offsetof(struct ibmvfc_passthru_fc_iu, response));
4461 	mad->iu.rsp.len = cpu_to_be32(sizeof(mad->fc_iu.response));
4462 }
4463 
4464 /**
4465  * ibmvfc_tgt_adisc_cancel_done - Completion handler when cancelling an ADISC
4466  * @evt:		ibmvfc event struct
4467  *
4468  * Just cleanup this event struct. Everything else is handled by
4469  * the ADISC completion handler. If the ADISC never actually comes
4470  * back, we still have the timer running on the ADISC event struct
4471  * which will fire and cause the CRQ to get reset.
4472  *
4473  **/
4474 static void ibmvfc_tgt_adisc_cancel_done(struct ibmvfc_event *evt)
4475 {
4476 	struct ibmvfc_host *vhost = evt->vhost;
4477 	struct ibmvfc_target *tgt = evt->tgt;
4478 
4479 	tgt_dbg(tgt, "ADISC cancel complete\n");
4480 	vhost->abort_threads--;
4481 	ibmvfc_free_event(evt);
4482 	kref_put(&tgt->kref, ibmvfc_release_tgt);
4483 	wake_up(&vhost->work_wait_q);
4484 }
4485 
4486 /**
4487  * ibmvfc_adisc_timeout - Handle an ADISC timeout
4488  * @t:		ibmvfc target struct
4489  *
4490  * If an ADISC times out, send a cancel. If the cancel times
4491  * out, reset the CRQ. When the ADISC comes back as cancelled,
4492  * log back into the target.
4493  **/
4494 static void ibmvfc_adisc_timeout(struct timer_list *t)
4495 {
4496 	struct ibmvfc_target *tgt = from_timer(tgt, t, timer);
4497 	struct ibmvfc_host *vhost = tgt->vhost;
4498 	struct ibmvfc_event *evt;
4499 	struct ibmvfc_tmf *tmf;
4500 	unsigned long flags;
4501 	int rc;
4502 
4503 	tgt_dbg(tgt, "ADISC timeout\n");
4504 	spin_lock_irqsave(vhost->host->host_lock, flags);
4505 	if (vhost->abort_threads >= disc_threads ||
4506 	    tgt->action != IBMVFC_TGT_ACTION_INIT_WAIT ||
4507 	    vhost->state != IBMVFC_INITIALIZING ||
4508 	    vhost->action != IBMVFC_HOST_ACTION_QUERY_TGTS) {
4509 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
4510 		return;
4511 	}
4512 
4513 	vhost->abort_threads++;
4514 	kref_get(&tgt->kref);
4515 	evt = ibmvfc_get_event(&vhost->crq);
4516 	ibmvfc_init_event(evt, ibmvfc_tgt_adisc_cancel_done, IBMVFC_MAD_FORMAT);
4517 
4518 	evt->tgt = tgt;
4519 	tmf = &evt->iu.tmf;
4520 	memset(tmf, 0, sizeof(*tmf));
4521 	if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
4522 		tmf->common.version = cpu_to_be32(2);
4523 		tmf->target_wwpn = cpu_to_be64(tgt->wwpn);
4524 	} else {
4525 		tmf->common.version = cpu_to_be32(1);
4526 	}
4527 	tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
4528 	tmf->common.length = cpu_to_be16(sizeof(*tmf));
4529 	tmf->scsi_id = cpu_to_be64(tgt->scsi_id);
4530 	tmf->cancel_key = cpu_to_be32(tgt->cancel_key);
4531 
4532 	rc = ibmvfc_send_event(evt, vhost, default_timeout);
4533 
4534 	if (rc) {
4535 		tgt_err(tgt, "Failed to send cancel event for ADISC. rc=%d\n", rc);
4536 		vhost->abort_threads--;
4537 		kref_put(&tgt->kref, ibmvfc_release_tgt);
4538 		__ibmvfc_reset_host(vhost);
4539 	} else
4540 		tgt_dbg(tgt, "Attempting to cancel ADISC\n");
4541 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
4542 }
4543 
4544 /**
4545  * ibmvfc_tgt_adisc - Initiate an ADISC for specified target
4546  * @tgt:		ibmvfc target struct
4547  *
4548  * When sending an ADISC we end up with two timers running. The
4549  * first timer is the timer in the ibmvfc target struct. If this
4550  * fires, we send a cancel to the target. The second timer is the
4551  * timer on the ibmvfc event for the ADISC, which is longer. If that
4552  * fires, it means the ADISC timed out and our attempt to cancel it
4553  * also failed, so we need to reset the CRQ.
4554  **/
4555 static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt)
4556 {
4557 	struct ibmvfc_passthru_mad *mad;
4558 	struct ibmvfc_host *vhost = tgt->vhost;
4559 	struct ibmvfc_event *evt;
4560 
4561 	if (vhost->discovery_threads >= disc_threads)
4562 		return;
4563 
4564 	kref_get(&tgt->kref);
4565 	evt = ibmvfc_get_event(&vhost->crq);
4566 	vhost->discovery_threads++;
4567 	ibmvfc_init_event(evt, ibmvfc_tgt_adisc_done, IBMVFC_MAD_FORMAT);
4568 	evt->tgt = tgt;
4569 
4570 	ibmvfc_init_passthru(evt);
4571 	mad = &evt->iu.passthru;
4572 	mad->iu.flags = cpu_to_be32(IBMVFC_FC_ELS);
4573 	mad->iu.scsi_id = cpu_to_be64(tgt->scsi_id);
4574 	mad->iu.cancel_key = cpu_to_be32(tgt->cancel_key);
4575 
4576 	mad->fc_iu.payload[0] = cpu_to_be32(IBMVFC_ADISC);
4577 	memcpy(&mad->fc_iu.payload[2], &vhost->login_buf->resp.port_name,
4578 	       sizeof(vhost->login_buf->resp.port_name));
4579 	memcpy(&mad->fc_iu.payload[4], &vhost->login_buf->resp.node_name,
4580 	       sizeof(vhost->login_buf->resp.node_name));
4581 	mad->fc_iu.payload[6] = cpu_to_be32(be64_to_cpu(vhost->login_buf->resp.scsi_id) & 0x00ffffff);
4582 
4583 	if (timer_pending(&tgt->timer))
4584 		mod_timer(&tgt->timer, jiffies + (IBMVFC_ADISC_TIMEOUT * HZ));
4585 	else {
4586 		tgt->timer.expires = jiffies + (IBMVFC_ADISC_TIMEOUT * HZ);
4587 		add_timer(&tgt->timer);
4588 	}
4589 
4590 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4591 	if (ibmvfc_send_event(evt, vhost, IBMVFC_ADISC_PLUS_CANCEL_TIMEOUT)) {
4592 		vhost->discovery_threads--;
4593 		del_timer(&tgt->timer);
4594 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4595 		kref_put(&tgt->kref, ibmvfc_release_tgt);
4596 	} else
4597 		tgt_dbg(tgt, "Sent ADISC\n");
4598 }
4599 
4600 /**
4601  * ibmvfc_tgt_query_target_done - Completion handler for Query Target MAD
4602  * @evt:	ibmvfc event struct
4603  *
4604  **/
4605 static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
4606 {
4607 	struct ibmvfc_target *tgt = evt->tgt;
4608 	struct ibmvfc_host *vhost = evt->vhost;
4609 	struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt;
4610 	u32 status = be16_to_cpu(rsp->common.status);
4611 	int level = IBMVFC_DEFAULT_LOG_LEVEL;
4612 
4613 	vhost->discovery_threads--;
4614 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4615 	switch (status) {
4616 	case IBMVFC_MAD_SUCCESS:
4617 		tgt_dbg(tgt, "Query Target succeeded\n");
4618 		if (be64_to_cpu(rsp->scsi_id) != tgt->scsi_id)
4619 			ibmvfc_del_tgt(tgt);
4620 		else
4621 			ibmvfc_init_tgt(tgt, ibmvfc_tgt_adisc);
4622 		break;
4623 	case IBMVFC_MAD_DRIVER_FAILED:
4624 		break;
4625 	case IBMVFC_MAD_CRQ_ERROR:
4626 		ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
4627 		break;
4628 	case IBMVFC_MAD_FAILED:
4629 	default:
4630 		if ((be16_to_cpu(rsp->status) & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED &&
4631 		    be16_to_cpu(rsp->error) == IBMVFC_UNABLE_TO_PERFORM_REQ &&
4632 		    be16_to_cpu(rsp->fc_explain) == IBMVFC_PORT_NAME_NOT_REG)
4633 			ibmvfc_del_tgt(tgt);
4634 		else if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
4635 			level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
4636 		else
4637 			ibmvfc_del_tgt(tgt);
4638 
4639 		tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
4640 			ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
4641 			be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
4642 			ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
4643 			ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain),
4644 			status);
4645 		break;
4646 	}
4647 
4648 	kref_put(&tgt->kref, ibmvfc_release_tgt);
4649 	ibmvfc_free_event(evt);
4650 	wake_up(&vhost->work_wait_q);
4651 }
4652 
4653 /**
4654  * ibmvfc_tgt_query_target - Initiate a Query Target for specified target
4655  * @tgt:	ibmvfc target struct
4656  *
4657  **/
4658 static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt)
4659 {
4660 	struct ibmvfc_query_tgt *query_tgt;
4661 	struct ibmvfc_host *vhost = tgt->vhost;
4662 	struct ibmvfc_event *evt;
4663 
4664 	if (vhost->discovery_threads >= disc_threads)
4665 		return;
4666 
4667 	kref_get(&tgt->kref);
4668 	evt = ibmvfc_get_event(&vhost->crq);
4669 	vhost->discovery_threads++;
4670 	evt->tgt = tgt;
4671 	ibmvfc_init_event(evt, ibmvfc_tgt_query_target_done, IBMVFC_MAD_FORMAT);
4672 	query_tgt = &evt->iu.query_tgt;
4673 	memset(query_tgt, 0, sizeof(*query_tgt));
4674 	query_tgt->common.version = cpu_to_be32(1);
4675 	query_tgt->common.opcode = cpu_to_be32(IBMVFC_QUERY_TARGET);
4676 	query_tgt->common.length = cpu_to_be16(sizeof(*query_tgt));
4677 	query_tgt->wwpn = cpu_to_be64(tgt->ids.port_name);
4678 
4679 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4680 	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4681 		vhost->discovery_threads--;
4682 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4683 		kref_put(&tgt->kref, ibmvfc_release_tgt);
4684 	} else
4685 		tgt_dbg(tgt, "Sent Query Target\n");
4686 }
4687 
4688 /**
4689  * ibmvfc_alloc_target - Allocate and initialize an ibmvfc target
4690  * @vhost:		ibmvfc host struct
4691  * @target:		Holds SCSI ID to allocate target forand the WWPN
4692  *
4693  * Returns:
4694  *	0 on success / other on failure
4695  **/
4696 static int ibmvfc_alloc_target(struct ibmvfc_host *vhost,
4697 			       struct ibmvfc_discover_targets_entry *target)
4698 {
4699 	struct ibmvfc_target *stgt = NULL;
4700 	struct ibmvfc_target *wtgt = NULL;
4701 	struct ibmvfc_target *tgt;
4702 	unsigned long flags;
4703 	u64 scsi_id = be32_to_cpu(target->scsi_id) & IBMVFC_DISC_TGT_SCSI_ID_MASK;
4704 	u64 wwpn = be64_to_cpu(target->wwpn);
4705 
4706 	/* Look to see if we already have a target allocated for this SCSI ID or WWPN */
4707 	spin_lock_irqsave(vhost->host->host_lock, flags);
4708 	list_for_each_entry(tgt, &vhost->targets, queue) {
4709 		if (tgt->wwpn == wwpn) {
4710 			wtgt = tgt;
4711 			break;
4712 		}
4713 	}
4714 
4715 	list_for_each_entry(tgt, &vhost->targets, queue) {
4716 		if (tgt->scsi_id == scsi_id) {
4717 			stgt = tgt;
4718 			break;
4719 		}
4720 	}
4721 
4722 	if (wtgt && !stgt) {
4723 		/*
4724 		 * A WWPN target has moved and we still are tracking the old
4725 		 * SCSI ID.  The only way we should be able to get here is if
4726 		 * we attempted to send an implicit logout for the old SCSI ID
4727 		 * and it failed for some reason, such as there being I/O
4728 		 * pending to the target. In this case, we will have already
4729 		 * deleted the rport from the FC transport so we do a move
4730 		 * login, which works even with I/O pending, as it will cancel
4731 		 * any active commands.
4732 		 */
4733 		if (wtgt->action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
4734 			/*
4735 			 * Do a move login here. The old target is no longer
4736 			 * known to the transport layer We don't use the
4737 			 * normal ibmvfc_set_tgt_action to set this, as we
4738 			 * don't normally want to allow this state change.
4739 			 */
4740 			wtgt->old_scsi_id = wtgt->scsi_id;
4741 			wtgt->scsi_id = scsi_id;
4742 			wtgt->action = IBMVFC_TGT_ACTION_INIT;
4743 			ibmvfc_init_tgt(wtgt, ibmvfc_tgt_move_login);
4744 			goto unlock_out;
4745 		} else {
4746 			tgt_err(wtgt, "Unexpected target state: %d, %p\n",
4747 				wtgt->action, wtgt->rport);
4748 		}
4749 	} else if (stgt) {
4750 		if (tgt->need_login)
4751 			ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
4752 		goto unlock_out;
4753 	}
4754 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
4755 
4756 	tgt = mempool_alloc(vhost->tgt_pool, GFP_NOIO);
4757 	memset(tgt, 0, sizeof(*tgt));
4758 	tgt->scsi_id = scsi_id;
4759 	tgt->wwpn = wwpn;
4760 	tgt->vhost = vhost;
4761 	tgt->need_login = 1;
4762 	timer_setup(&tgt->timer, ibmvfc_adisc_timeout, 0);
4763 	kref_init(&tgt->kref);
4764 	ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
4765 	spin_lock_irqsave(vhost->host->host_lock, flags);
4766 	tgt->cancel_key = vhost->task_set++;
4767 	list_add_tail(&tgt->queue, &vhost->targets);
4768 
4769 unlock_out:
4770 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
4771 	return 0;
4772 }
4773 
4774 /**
4775  * ibmvfc_alloc_targets - Allocate and initialize ibmvfc targets
4776  * @vhost:		ibmvfc host struct
4777  *
4778  * Returns:
4779  *	0 on success / other on failure
4780  **/
4781 static int ibmvfc_alloc_targets(struct ibmvfc_host *vhost)
4782 {
4783 	int i, rc;
4784 
4785 	for (i = 0, rc = 0; !rc && i < vhost->num_targets; i++)
4786 		rc = ibmvfc_alloc_target(vhost, &vhost->disc_buf[i]);
4787 
4788 	return rc;
4789 }
4790 
4791 /**
4792  * ibmvfc_discover_targets_done - Completion handler for discover targets MAD
4793  * @evt:	ibmvfc event struct
4794  *
4795  **/
4796 static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
4797 {
4798 	struct ibmvfc_host *vhost = evt->vhost;
4799 	struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets;
4800 	u32 mad_status = be16_to_cpu(rsp->common.status);
4801 	int level = IBMVFC_DEFAULT_LOG_LEVEL;
4802 
4803 	switch (mad_status) {
4804 	case IBMVFC_MAD_SUCCESS:
4805 		ibmvfc_dbg(vhost, "Discover Targets succeeded\n");
4806 		vhost->num_targets = be32_to_cpu(rsp->num_written);
4807 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS);
4808 		break;
4809 	case IBMVFC_MAD_FAILED:
4810 		level += ibmvfc_retry_host_init(vhost);
4811 		ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n",
4812 			   ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
4813 			   be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
4814 		break;
4815 	case IBMVFC_MAD_DRIVER_FAILED:
4816 		break;
4817 	default:
4818 		dev_err(vhost->dev, "Invalid Discover Targets response: 0x%x\n", mad_status);
4819 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4820 		break;
4821 	}
4822 
4823 	ibmvfc_free_event(evt);
4824 	wake_up(&vhost->work_wait_q);
4825 }
4826 
4827 /**
4828  * ibmvfc_discover_targets - Send Discover Targets MAD
4829  * @vhost:	ibmvfc host struct
4830  *
4831  **/
4832 static void ibmvfc_discover_targets(struct ibmvfc_host *vhost)
4833 {
4834 	struct ibmvfc_discover_targets *mad;
4835 	struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
4836 
4837 	ibmvfc_init_event(evt, ibmvfc_discover_targets_done, IBMVFC_MAD_FORMAT);
4838 	mad = &evt->iu.discover_targets;
4839 	memset(mad, 0, sizeof(*mad));
4840 	mad->common.version = cpu_to_be32(1);
4841 	mad->common.opcode = cpu_to_be32(IBMVFC_DISC_TARGETS);
4842 	mad->common.length = cpu_to_be16(sizeof(*mad));
4843 	mad->bufflen = cpu_to_be32(vhost->disc_buf_sz);
4844 	mad->buffer.va = cpu_to_be64(vhost->disc_buf_dma);
4845 	mad->buffer.len = cpu_to_be32(vhost->disc_buf_sz);
4846 	mad->flags = cpu_to_be32(IBMVFC_DISC_TGT_PORT_ID_WWPN_LIST);
4847 	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
4848 
4849 	if (!ibmvfc_send_event(evt, vhost, default_timeout))
4850 		ibmvfc_dbg(vhost, "Sent discover targets\n");
4851 	else
4852 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4853 }
4854 
4855 static void ibmvfc_channel_setup_done(struct ibmvfc_event *evt)
4856 {
4857 	struct ibmvfc_host *vhost = evt->vhost;
4858 	struct ibmvfc_channel_setup *setup = vhost->channel_setup_buf;
4859 	struct ibmvfc_scsi_channels *scrqs = &vhost->scsi_scrqs;
4860 	u32 mad_status = be16_to_cpu(evt->xfer_iu->channel_setup.common.status);
4861 	int level = IBMVFC_DEFAULT_LOG_LEVEL;
4862 	int flags, active_queues, i;
4863 
4864 	ibmvfc_free_event(evt);
4865 
4866 	switch (mad_status) {
4867 	case IBMVFC_MAD_SUCCESS:
4868 		ibmvfc_dbg(vhost, "Channel Setup succeeded\n");
4869 		flags = be32_to_cpu(setup->flags);
4870 		vhost->do_enquiry = 0;
4871 		active_queues = be32_to_cpu(setup->num_scsi_subq_channels);
4872 		scrqs->active_queues = active_queues;
4873 
4874 		if (flags & IBMVFC_CHANNELS_CANCELED) {
4875 			ibmvfc_dbg(vhost, "Channels Canceled\n");
4876 			vhost->using_channels = 0;
4877 		} else {
4878 			if (active_queues)
4879 				vhost->using_channels = 1;
4880 			for (i = 0; i < active_queues; i++)
4881 				scrqs->scrqs[i].vios_cookie =
4882 					be64_to_cpu(setup->channel_handles[i]);
4883 
4884 			ibmvfc_dbg(vhost, "Using %u channels\n",
4885 				   vhost->scsi_scrqs.active_queues);
4886 		}
4887 		break;
4888 	case IBMVFC_MAD_FAILED:
4889 		level += ibmvfc_retry_host_init(vhost);
4890 		ibmvfc_log(vhost, level, "Channel Setup failed\n");
4891 		fallthrough;
4892 	case IBMVFC_MAD_DRIVER_FAILED:
4893 		return;
4894 	default:
4895 		dev_err(vhost->dev, "Invalid Channel Setup response: 0x%x\n",
4896 			mad_status);
4897 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4898 		return;
4899 	}
4900 
4901 	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
4902 	wake_up(&vhost->work_wait_q);
4903 }
4904 
4905 static void ibmvfc_channel_setup(struct ibmvfc_host *vhost)
4906 {
4907 	struct ibmvfc_channel_setup_mad *mad;
4908 	struct ibmvfc_channel_setup *setup_buf = vhost->channel_setup_buf;
4909 	struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
4910 	struct ibmvfc_scsi_channels *scrqs = &vhost->scsi_scrqs;
4911 	unsigned int num_channels =
4912 		min(vhost->client_scsi_channels, vhost->max_vios_scsi_channels);
4913 	int i;
4914 
4915 	memset(setup_buf, 0, sizeof(*setup_buf));
4916 	if (num_channels == 0)
4917 		setup_buf->flags = cpu_to_be32(IBMVFC_CANCEL_CHANNELS);
4918 	else {
4919 		setup_buf->num_scsi_subq_channels = cpu_to_be32(num_channels);
4920 		for (i = 0; i < num_channels; i++)
4921 			setup_buf->channel_handles[i] = cpu_to_be64(scrqs->scrqs[i].cookie);
4922 	}
4923 
4924 	ibmvfc_init_event(evt, ibmvfc_channel_setup_done, IBMVFC_MAD_FORMAT);
4925 	mad = &evt->iu.channel_setup;
4926 	memset(mad, 0, sizeof(*mad));
4927 	mad->common.version = cpu_to_be32(1);
4928 	mad->common.opcode = cpu_to_be32(IBMVFC_CHANNEL_SETUP);
4929 	mad->common.length = cpu_to_be16(sizeof(*mad));
4930 	mad->buffer.va = cpu_to_be64(vhost->channel_setup_dma);
4931 	mad->buffer.len = cpu_to_be32(sizeof(*vhost->channel_setup_buf));
4932 
4933 	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
4934 
4935 	if (!ibmvfc_send_event(evt, vhost, default_timeout))
4936 		ibmvfc_dbg(vhost, "Sent channel setup\n");
4937 	else
4938 		ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
4939 }
4940 
4941 static void ibmvfc_channel_enquiry_done(struct ibmvfc_event *evt)
4942 {
4943 	struct ibmvfc_host *vhost = evt->vhost;
4944 	struct ibmvfc_channel_enquiry *rsp = &evt->xfer_iu->channel_enquiry;
4945 	u32 mad_status = be16_to_cpu(rsp->common.status);
4946 	int level = IBMVFC_DEFAULT_LOG_LEVEL;
4947 
4948 	switch (mad_status) {
4949 	case IBMVFC_MAD_SUCCESS:
4950 		ibmvfc_dbg(vhost, "Channel Enquiry succeeded\n");
4951 		vhost->max_vios_scsi_channels = be32_to_cpu(rsp->num_scsi_subq_channels);
4952 		ibmvfc_free_event(evt);
4953 		break;
4954 	case IBMVFC_MAD_FAILED:
4955 		level += ibmvfc_retry_host_init(vhost);
4956 		ibmvfc_log(vhost, level, "Channel Enquiry failed\n");
4957 		fallthrough;
4958 	case IBMVFC_MAD_DRIVER_FAILED:
4959 		ibmvfc_free_event(evt);
4960 		return;
4961 	default:
4962 		dev_err(vhost->dev, "Invalid Channel Enquiry response: 0x%x\n",
4963 			mad_status);
4964 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4965 		ibmvfc_free_event(evt);
4966 		return;
4967 	}
4968 
4969 	ibmvfc_channel_setup(vhost);
4970 }
4971 
4972 static void ibmvfc_channel_enquiry(struct ibmvfc_host *vhost)
4973 {
4974 	struct ibmvfc_channel_enquiry *mad;
4975 	struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
4976 
4977 	ibmvfc_init_event(evt, ibmvfc_channel_enquiry_done, IBMVFC_MAD_FORMAT);
4978 	mad = &evt->iu.channel_enquiry;
4979 	memset(mad, 0, sizeof(*mad));
4980 	mad->common.version = cpu_to_be32(1);
4981 	mad->common.opcode = cpu_to_be32(IBMVFC_CHANNEL_ENQUIRY);
4982 	mad->common.length = cpu_to_be16(sizeof(*mad));
4983 
4984 	if (mig_channels_only)
4985 		mad->flags |= cpu_to_be32(IBMVFC_NO_CHANNELS_TO_CRQ_SUPPORT);
4986 	if (mig_no_less_channels)
4987 		mad->flags |= cpu_to_be32(IBMVFC_NO_N_TO_M_CHANNELS_SUPPORT);
4988 
4989 	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
4990 
4991 	if (!ibmvfc_send_event(evt, vhost, default_timeout))
4992 		ibmvfc_dbg(vhost, "Send channel enquiry\n");
4993 	else
4994 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4995 }
4996 
4997 /**
4998  * ibmvfc_npiv_login_done - Completion handler for NPIV Login
4999  * @evt:	ibmvfc event struct
5000  *
5001  **/
5002 static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
5003 {
5004 	struct ibmvfc_host *vhost = evt->vhost;
5005 	u32 mad_status = be16_to_cpu(evt->xfer_iu->npiv_login.common.status);
5006 	struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp;
5007 	unsigned int npiv_max_sectors;
5008 	int level = IBMVFC_DEFAULT_LOG_LEVEL;
5009 
5010 	switch (mad_status) {
5011 	case IBMVFC_MAD_SUCCESS:
5012 		ibmvfc_free_event(evt);
5013 		break;
5014 	case IBMVFC_MAD_FAILED:
5015 		if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
5016 			level += ibmvfc_retry_host_init(vhost);
5017 		else
5018 			ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5019 		ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n",
5020 			   ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
5021 						be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
5022 		ibmvfc_free_event(evt);
5023 		return;
5024 	case IBMVFC_MAD_CRQ_ERROR:
5025 		ibmvfc_retry_host_init(vhost);
5026 		fallthrough;
5027 	case IBMVFC_MAD_DRIVER_FAILED:
5028 		ibmvfc_free_event(evt);
5029 		return;
5030 	default:
5031 		dev_err(vhost->dev, "Invalid NPIV Login response: 0x%x\n", mad_status);
5032 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5033 		ibmvfc_free_event(evt);
5034 		return;
5035 	}
5036 
5037 	vhost->client_migrated = 0;
5038 
5039 	if (!(be32_to_cpu(rsp->flags) & IBMVFC_NATIVE_FC)) {
5040 		dev_err(vhost->dev, "Virtual adapter does not support FC. %x\n",
5041 			rsp->flags);
5042 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5043 		wake_up(&vhost->work_wait_q);
5044 		return;
5045 	}
5046 
5047 	if (be32_to_cpu(rsp->max_cmds) <= IBMVFC_NUM_INTERNAL_REQ) {
5048 		dev_err(vhost->dev, "Virtual adapter supported queue depth too small: %d\n",
5049 			rsp->max_cmds);
5050 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5051 		wake_up(&vhost->work_wait_q);
5052 		return;
5053 	}
5054 
5055 	vhost->logged_in = 1;
5056 	npiv_max_sectors = min((uint)(be64_to_cpu(rsp->max_dma_len) >> 9), IBMVFC_MAX_SECTORS);
5057 	dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n",
5058 		 rsp->partition_name, rsp->device_name, rsp->port_loc_code,
5059 		 rsp->drc_name, npiv_max_sectors);
5060 
5061 	fc_host_fabric_name(vhost->host) = be64_to_cpu(rsp->node_name);
5062 	fc_host_node_name(vhost->host) = be64_to_cpu(rsp->node_name);
5063 	fc_host_port_name(vhost->host) = be64_to_cpu(rsp->port_name);
5064 	fc_host_port_id(vhost->host) = be64_to_cpu(rsp->scsi_id);
5065 	fc_host_port_type(vhost->host) = FC_PORTTYPE_NPIV;
5066 	fc_host_supported_classes(vhost->host) = 0;
5067 	if (be32_to_cpu(rsp->service_parms.class1_parms[0]) & 0x80000000)
5068 		fc_host_supported_classes(vhost->host) |= FC_COS_CLASS1;
5069 	if (be32_to_cpu(rsp->service_parms.class2_parms[0]) & 0x80000000)
5070 		fc_host_supported_classes(vhost->host) |= FC_COS_CLASS2;
5071 	if (be32_to_cpu(rsp->service_parms.class3_parms[0]) & 0x80000000)
5072 		fc_host_supported_classes(vhost->host) |= FC_COS_CLASS3;
5073 	fc_host_maxframe_size(vhost->host) =
5074 		be16_to_cpu(rsp->service_parms.common.bb_rcv_sz) & 0x0fff;
5075 
5076 	vhost->host->can_queue = be32_to_cpu(rsp->max_cmds) - IBMVFC_NUM_INTERNAL_REQ;
5077 	vhost->host->max_sectors = npiv_max_sectors;
5078 
5079 	if (ibmvfc_check_caps(vhost, IBMVFC_CAN_SUPPORT_CHANNELS) && vhost->do_enquiry) {
5080 		ibmvfc_channel_enquiry(vhost);
5081 	} else {
5082 		vhost->do_enquiry = 0;
5083 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
5084 		wake_up(&vhost->work_wait_q);
5085 	}
5086 }
5087 
5088 /**
5089  * ibmvfc_npiv_login - Sends NPIV login
5090  * @vhost:	ibmvfc host struct
5091  *
5092  **/
5093 static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
5094 {
5095 	struct ibmvfc_npiv_login_mad *mad;
5096 	struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
5097 
5098 	ibmvfc_gather_partition_info(vhost);
5099 	ibmvfc_set_login_info(vhost);
5100 	ibmvfc_init_event(evt, ibmvfc_npiv_login_done, IBMVFC_MAD_FORMAT);
5101 
5102 	memcpy(vhost->login_buf, &vhost->login_info, sizeof(vhost->login_info));
5103 	mad = &evt->iu.npiv_login;
5104 	memset(mad, 0, sizeof(struct ibmvfc_npiv_login_mad));
5105 	mad->common.version = cpu_to_be32(1);
5106 	mad->common.opcode = cpu_to_be32(IBMVFC_NPIV_LOGIN);
5107 	mad->common.length = cpu_to_be16(sizeof(struct ibmvfc_npiv_login_mad));
5108 	mad->buffer.va = cpu_to_be64(vhost->login_buf_dma);
5109 	mad->buffer.len = cpu_to_be32(sizeof(*vhost->login_buf));
5110 
5111 	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
5112 
5113 	if (!ibmvfc_send_event(evt, vhost, default_timeout))
5114 		ibmvfc_dbg(vhost, "Sent NPIV login\n");
5115 	else
5116 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5117 }
5118 
5119 /**
5120  * ibmvfc_npiv_logout_done - Completion handler for NPIV Logout
5121  * @evt:		ibmvfc event struct
5122  *
5123  **/
5124 static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt)
5125 {
5126 	struct ibmvfc_host *vhost = evt->vhost;
5127 	u32 mad_status = be16_to_cpu(evt->xfer_iu->npiv_logout.common.status);
5128 
5129 	ibmvfc_free_event(evt);
5130 
5131 	switch (mad_status) {
5132 	case IBMVFC_MAD_SUCCESS:
5133 		if (list_empty(&vhost->crq.sent) &&
5134 		    vhost->action == IBMVFC_HOST_ACTION_LOGO_WAIT) {
5135 			ibmvfc_init_host(vhost);
5136 			return;
5137 		}
5138 		break;
5139 	case IBMVFC_MAD_FAILED:
5140 	case IBMVFC_MAD_NOT_SUPPORTED:
5141 	case IBMVFC_MAD_CRQ_ERROR:
5142 	case IBMVFC_MAD_DRIVER_FAILED:
5143 	default:
5144 		ibmvfc_dbg(vhost, "NPIV Logout failed. 0x%X\n", mad_status);
5145 		break;
5146 	}
5147 
5148 	ibmvfc_hard_reset_host(vhost);
5149 }
5150 
5151 /**
5152  * ibmvfc_npiv_logout - Issue an NPIV Logout
5153  * @vhost:		ibmvfc host struct
5154  *
5155  **/
5156 static void ibmvfc_npiv_logout(struct ibmvfc_host *vhost)
5157 {
5158 	struct ibmvfc_npiv_logout_mad *mad;
5159 	struct ibmvfc_event *evt;
5160 
5161 	evt = ibmvfc_get_event(&vhost->crq);
5162 	ibmvfc_init_event(evt, ibmvfc_npiv_logout_done, IBMVFC_MAD_FORMAT);
5163 
5164 	mad = &evt->iu.npiv_logout;
5165 	memset(mad, 0, sizeof(*mad));
5166 	mad->common.version = cpu_to_be32(1);
5167 	mad->common.opcode = cpu_to_be32(IBMVFC_NPIV_LOGOUT);
5168 	mad->common.length = cpu_to_be16(sizeof(struct ibmvfc_npiv_logout_mad));
5169 
5170 	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO_WAIT);
5171 
5172 	if (!ibmvfc_send_event(evt, vhost, default_timeout))
5173 		ibmvfc_dbg(vhost, "Sent NPIV logout\n");
5174 	else
5175 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5176 }
5177 
5178 /**
5179  * ibmvfc_dev_init_to_do - Is there target initialization work to do?
5180  * @vhost:		ibmvfc host struct
5181  *
5182  * Returns:
5183  *	1 if work to do / 0 if not
5184  **/
5185 static int ibmvfc_dev_init_to_do(struct ibmvfc_host *vhost)
5186 {
5187 	struct ibmvfc_target *tgt;
5188 
5189 	list_for_each_entry(tgt, &vhost->targets, queue) {
5190 		if (tgt->action == IBMVFC_TGT_ACTION_INIT ||
5191 		    tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
5192 			return 1;
5193 	}
5194 
5195 	return 0;
5196 }
5197 
5198 /**
5199  * ibmvfc_dev_logo_to_do - Is there target logout work to do?
5200  * @vhost:		ibmvfc host struct
5201  *
5202  * Returns:
5203  *	1 if work to do / 0 if not
5204  **/
5205 static int ibmvfc_dev_logo_to_do(struct ibmvfc_host *vhost)
5206 {
5207 	struct ibmvfc_target *tgt;
5208 
5209 	list_for_each_entry(tgt, &vhost->targets, queue) {
5210 		if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT ||
5211 		    tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT)
5212 			return 1;
5213 	}
5214 	return 0;
5215 }
5216 
5217 /**
5218  * __ibmvfc_work_to_do - Is there task level work to do? (no locking)
5219  * @vhost:		ibmvfc host struct
5220  *
5221  * Returns:
5222  *	1 if work to do / 0 if not
5223  **/
5224 static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
5225 {
5226 	struct ibmvfc_target *tgt;
5227 
5228 	if (kthread_should_stop())
5229 		return 1;
5230 	switch (vhost->action) {
5231 	case IBMVFC_HOST_ACTION_NONE:
5232 	case IBMVFC_HOST_ACTION_INIT_WAIT:
5233 	case IBMVFC_HOST_ACTION_LOGO_WAIT:
5234 		return 0;
5235 	case IBMVFC_HOST_ACTION_TGT_INIT:
5236 	case IBMVFC_HOST_ACTION_QUERY_TGTS:
5237 		if (vhost->discovery_threads == disc_threads)
5238 			return 0;
5239 		list_for_each_entry(tgt, &vhost->targets, queue)
5240 			if (tgt->action == IBMVFC_TGT_ACTION_INIT)
5241 				return 1;
5242 		list_for_each_entry(tgt, &vhost->targets, queue)
5243 			if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
5244 				return 0;
5245 		return 1;
5246 	case IBMVFC_HOST_ACTION_TGT_DEL:
5247 	case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
5248 		if (vhost->discovery_threads == disc_threads)
5249 			return 0;
5250 		list_for_each_entry(tgt, &vhost->targets, queue)
5251 			if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT)
5252 				return 1;
5253 		list_for_each_entry(tgt, &vhost->targets, queue)
5254 			if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT)
5255 				return 0;
5256 		return 1;
5257 	case IBMVFC_HOST_ACTION_LOGO:
5258 	case IBMVFC_HOST_ACTION_INIT:
5259 	case IBMVFC_HOST_ACTION_ALLOC_TGTS:
5260 	case IBMVFC_HOST_ACTION_QUERY:
5261 	case IBMVFC_HOST_ACTION_RESET:
5262 	case IBMVFC_HOST_ACTION_REENABLE:
5263 	default:
5264 		break;
5265 	}
5266 
5267 	return 1;
5268 }
5269 
5270 /**
5271  * ibmvfc_work_to_do - Is there task level work to do?
5272  * @vhost:		ibmvfc host struct
5273  *
5274  * Returns:
5275  *	1 if work to do / 0 if not
5276  **/
5277 static int ibmvfc_work_to_do(struct ibmvfc_host *vhost)
5278 {
5279 	unsigned long flags;
5280 	int rc;
5281 
5282 	spin_lock_irqsave(vhost->host->host_lock, flags);
5283 	rc = __ibmvfc_work_to_do(vhost);
5284 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
5285 	return rc;
5286 }
5287 
5288 /**
5289  * ibmvfc_log_ae - Log async events if necessary
5290  * @vhost:		ibmvfc host struct
5291  * @events:		events to log
5292  *
5293  **/
5294 static void ibmvfc_log_ae(struct ibmvfc_host *vhost, int events)
5295 {
5296 	if (events & IBMVFC_AE_RSCN)
5297 		fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_RSCN, 0);
5298 	if ((events & IBMVFC_AE_LINKDOWN) &&
5299 	    vhost->state >= IBMVFC_HALTED)
5300 		fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
5301 	if ((events & IBMVFC_AE_LINKUP) &&
5302 	    vhost->state == IBMVFC_INITIALIZING)
5303 		fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKUP, 0);
5304 }
5305 
5306 /**
5307  * ibmvfc_tgt_add_rport - Tell the FC transport about a new remote port
5308  * @tgt:		ibmvfc target struct
5309  *
5310  **/
5311 static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
5312 {
5313 	struct ibmvfc_host *vhost = tgt->vhost;
5314 	struct fc_rport *rport;
5315 	unsigned long flags;
5316 
5317 	tgt_dbg(tgt, "Adding rport\n");
5318 	rport = fc_remote_port_add(vhost->host, 0, &tgt->ids);
5319 	spin_lock_irqsave(vhost->host->host_lock, flags);
5320 
5321 	if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
5322 		tgt_dbg(tgt, "Deleting rport\n");
5323 		list_del(&tgt->queue);
5324 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT);
5325 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
5326 		fc_remote_port_delete(rport);
5327 		del_timer_sync(&tgt->timer);
5328 		kref_put(&tgt->kref, ibmvfc_release_tgt);
5329 		return;
5330 	} else if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
5331 		tgt_dbg(tgt, "Deleting rport with outstanding I/O\n");
5332 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT);
5333 		tgt->rport = NULL;
5334 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
5335 		fc_remote_port_delete(rport);
5336 		return;
5337 	} else if (rport && tgt->action == IBMVFC_TGT_ACTION_DELETED_RPORT) {
5338 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
5339 		return;
5340 	}
5341 
5342 	if (rport) {
5343 		tgt_dbg(tgt, "rport add succeeded\n");
5344 		tgt->rport = rport;
5345 		rport->maxframe_size = be16_to_cpu(tgt->service_parms.common.bb_rcv_sz) & 0x0fff;
5346 		rport->supported_classes = 0;
5347 		tgt->target_id = rport->scsi_target_id;
5348 		if (be32_to_cpu(tgt->service_parms.class1_parms[0]) & 0x80000000)
5349 			rport->supported_classes |= FC_COS_CLASS1;
5350 		if (be32_to_cpu(tgt->service_parms.class2_parms[0]) & 0x80000000)
5351 			rport->supported_classes |= FC_COS_CLASS2;
5352 		if (be32_to_cpu(tgt->service_parms.class3_parms[0]) & 0x80000000)
5353 			rport->supported_classes |= FC_COS_CLASS3;
5354 		if (rport->rqst_q)
5355 			blk_queue_max_segments(rport->rqst_q, 1);
5356 	} else
5357 		tgt_dbg(tgt, "rport add failed\n");
5358 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
5359 }
5360 
5361 /**
5362  * ibmvfc_do_work - Do task level work
5363  * @vhost:		ibmvfc host struct
5364  *
5365  **/
5366 static void ibmvfc_do_work(struct ibmvfc_host *vhost)
5367 {
5368 	struct ibmvfc_target *tgt;
5369 	unsigned long flags;
5370 	struct fc_rport *rport;
5371 	LIST_HEAD(purge);
5372 	int rc;
5373 
5374 	ibmvfc_log_ae(vhost, vhost->events_to_log);
5375 	spin_lock_irqsave(vhost->host->host_lock, flags);
5376 	vhost->events_to_log = 0;
5377 	switch (vhost->action) {
5378 	case IBMVFC_HOST_ACTION_NONE:
5379 	case IBMVFC_HOST_ACTION_LOGO_WAIT:
5380 	case IBMVFC_HOST_ACTION_INIT_WAIT:
5381 		break;
5382 	case IBMVFC_HOST_ACTION_RESET:
5383 		list_splice_init(&vhost->purge, &purge);
5384 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
5385 		ibmvfc_complete_purge(&purge);
5386 		rc = ibmvfc_reset_crq(vhost);
5387 
5388 		spin_lock_irqsave(vhost->host->host_lock, flags);
5389 		if (!rc || rc == H_CLOSED)
5390 			vio_enable_interrupts(to_vio_dev(vhost->dev));
5391 		if (vhost->action == IBMVFC_HOST_ACTION_RESET) {
5392 			/*
5393 			 * The only action we could have changed to would have
5394 			 * been reenable, in which case, we skip the rest of
5395 			 * this path and wait until we've done the re-enable
5396 			 * before sending the crq init.
5397 			 */
5398 			vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
5399 
5400 			if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
5401 			    (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
5402 				ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5403 				dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
5404 			}
5405 		}
5406 		break;
5407 	case IBMVFC_HOST_ACTION_REENABLE:
5408 		list_splice_init(&vhost->purge, &purge);
5409 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
5410 		ibmvfc_complete_purge(&purge);
5411 		rc = ibmvfc_reenable_crq_queue(vhost);
5412 
5413 		spin_lock_irqsave(vhost->host->host_lock, flags);
5414 		if (vhost->action == IBMVFC_HOST_ACTION_REENABLE) {
5415 			/*
5416 			 * The only action we could have changed to would have
5417 			 * been reset, in which case, we skip the rest of this
5418 			 * path and wait until we've done the reset before
5419 			 * sending the crq init.
5420 			 */
5421 			vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
5422 			if (rc || (rc = ibmvfc_send_crq_init(vhost))) {
5423 				ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5424 				dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc);
5425 			}
5426 		}
5427 		break;
5428 	case IBMVFC_HOST_ACTION_LOGO:
5429 		vhost->job_step(vhost);
5430 		break;
5431 	case IBMVFC_HOST_ACTION_INIT:
5432 		BUG_ON(vhost->state != IBMVFC_INITIALIZING);
5433 		if (vhost->delay_init) {
5434 			vhost->delay_init = 0;
5435 			spin_unlock_irqrestore(vhost->host->host_lock, flags);
5436 			ssleep(15);
5437 			return;
5438 		} else
5439 			vhost->job_step(vhost);
5440 		break;
5441 	case IBMVFC_HOST_ACTION_QUERY:
5442 		list_for_each_entry(tgt, &vhost->targets, queue)
5443 			ibmvfc_init_tgt(tgt, ibmvfc_tgt_query_target);
5444 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY_TGTS);
5445 		break;
5446 	case IBMVFC_HOST_ACTION_QUERY_TGTS:
5447 		list_for_each_entry(tgt, &vhost->targets, queue) {
5448 			if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
5449 				tgt->job_step(tgt);
5450 				break;
5451 			}
5452 		}
5453 
5454 		if (!ibmvfc_dev_init_to_do(vhost))
5455 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
5456 		break;
5457 	case IBMVFC_HOST_ACTION_TGT_DEL:
5458 	case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
5459 		list_for_each_entry(tgt, &vhost->targets, queue) {
5460 			if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT) {
5461 				tgt->job_step(tgt);
5462 				break;
5463 			}
5464 		}
5465 
5466 		if (ibmvfc_dev_logo_to_do(vhost)) {
5467 			spin_unlock_irqrestore(vhost->host->host_lock, flags);
5468 			return;
5469 		}
5470 
5471 		list_for_each_entry(tgt, &vhost->targets, queue) {
5472 			if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
5473 				tgt_dbg(tgt, "Deleting rport\n");
5474 				rport = tgt->rport;
5475 				tgt->rport = NULL;
5476 				list_del(&tgt->queue);
5477 				ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT);
5478 				spin_unlock_irqrestore(vhost->host->host_lock, flags);
5479 				if (rport)
5480 					fc_remote_port_delete(rport);
5481 				del_timer_sync(&tgt->timer);
5482 				kref_put(&tgt->kref, ibmvfc_release_tgt);
5483 				return;
5484 			} else if (tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
5485 				tgt_dbg(tgt, "Deleting rport with I/O outstanding\n");
5486 				rport = tgt->rport;
5487 				tgt->rport = NULL;
5488 				ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT);
5489 				spin_unlock_irqrestore(vhost->host->host_lock, flags);
5490 				if (rport)
5491 					fc_remote_port_delete(rport);
5492 				return;
5493 			}
5494 		}
5495 
5496 		if (vhost->state == IBMVFC_INITIALIZING) {
5497 			if (vhost->action == IBMVFC_HOST_ACTION_TGT_DEL_FAILED) {
5498 				if (vhost->reinit) {
5499 					vhost->reinit = 0;
5500 					scsi_block_requests(vhost->host);
5501 					ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
5502 					spin_unlock_irqrestore(vhost->host->host_lock, flags);
5503 				} else {
5504 					ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE);
5505 					ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
5506 					wake_up(&vhost->init_wait_q);
5507 					schedule_work(&vhost->rport_add_work_q);
5508 					vhost->init_retries = 0;
5509 					spin_unlock_irqrestore(vhost->host->host_lock, flags);
5510 					scsi_unblock_requests(vhost->host);
5511 				}
5512 
5513 				return;
5514 			} else {
5515 				ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
5516 				vhost->job_step = ibmvfc_discover_targets;
5517 			}
5518 		} else {
5519 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
5520 			spin_unlock_irqrestore(vhost->host->host_lock, flags);
5521 			scsi_unblock_requests(vhost->host);
5522 			wake_up(&vhost->init_wait_q);
5523 			return;
5524 		}
5525 		break;
5526 	case IBMVFC_HOST_ACTION_ALLOC_TGTS:
5527 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_INIT);
5528 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
5529 		ibmvfc_alloc_targets(vhost);
5530 		spin_lock_irqsave(vhost->host->host_lock, flags);
5531 		break;
5532 	case IBMVFC_HOST_ACTION_TGT_INIT:
5533 		list_for_each_entry(tgt, &vhost->targets, queue) {
5534 			if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
5535 				tgt->job_step(tgt);
5536 				break;
5537 			}
5538 		}
5539 
5540 		if (!ibmvfc_dev_init_to_do(vhost))
5541 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL_FAILED);
5542 		break;
5543 	default:
5544 		break;
5545 	}
5546 
5547 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
5548 }
5549 
5550 /**
5551  * ibmvfc_work - Do task level work
5552  * @data:		ibmvfc host struct
5553  *
5554  * Returns:
5555  *	zero
5556  **/
5557 static int ibmvfc_work(void *data)
5558 {
5559 	struct ibmvfc_host *vhost = data;
5560 	int rc;
5561 
5562 	set_user_nice(current, MIN_NICE);
5563 
5564 	while (1) {
5565 		rc = wait_event_interruptible(vhost->work_wait_q,
5566 					      ibmvfc_work_to_do(vhost));
5567 
5568 		BUG_ON(rc);
5569 
5570 		if (kthread_should_stop())
5571 			break;
5572 
5573 		ibmvfc_do_work(vhost);
5574 	}
5575 
5576 	ibmvfc_dbg(vhost, "ibmvfc kthread exiting...\n");
5577 	return 0;
5578 }
5579 
5580 /**
5581  * ibmvfc_alloc_queue - Allocate queue
5582  * @vhost:	ibmvfc host struct
5583  * @queue:	ibmvfc queue to allocate
5584  * @fmt:	queue format to allocate
5585  *
5586  * Returns:
5587  *	0 on success / non-zero on failure
5588  **/
5589 static int ibmvfc_alloc_queue(struct ibmvfc_host *vhost,
5590 			      struct ibmvfc_queue *queue,
5591 			      enum ibmvfc_msg_fmt fmt)
5592 {
5593 	struct device *dev = vhost->dev;
5594 	size_t fmt_size;
5595 	unsigned int pool_size = 0;
5596 
5597 	ENTER;
5598 	spin_lock_init(&queue->_lock);
5599 	queue->q_lock = &queue->_lock;
5600 
5601 	switch (fmt) {
5602 	case IBMVFC_CRQ_FMT:
5603 		fmt_size = sizeof(*queue->msgs.crq);
5604 		pool_size = max_requests + IBMVFC_NUM_INTERNAL_REQ;
5605 		break;
5606 	case IBMVFC_ASYNC_FMT:
5607 		fmt_size = sizeof(*queue->msgs.async);
5608 		break;
5609 	case IBMVFC_SUB_CRQ_FMT:
5610 		fmt_size = sizeof(*queue->msgs.scrq);
5611 		/* We need one extra event for Cancel Commands */
5612 		pool_size = max_requests + 1;
5613 		break;
5614 	default:
5615 		dev_warn(dev, "Unknown command/response queue message format: %d\n", fmt);
5616 		return -EINVAL;
5617 	}
5618 
5619 	if (ibmvfc_init_event_pool(vhost, queue, pool_size)) {
5620 		dev_err(dev, "Couldn't initialize event pool.\n");
5621 		return -ENOMEM;
5622 	}
5623 
5624 	queue->msgs.handle = (void *)get_zeroed_page(GFP_KERNEL);
5625 	if (!queue->msgs.handle)
5626 		return -ENOMEM;
5627 
5628 	queue->msg_token = dma_map_single(dev, queue->msgs.handle, PAGE_SIZE,
5629 					  DMA_BIDIRECTIONAL);
5630 
5631 	if (dma_mapping_error(dev, queue->msg_token)) {
5632 		free_page((unsigned long)queue->msgs.handle);
5633 		queue->msgs.handle = NULL;
5634 		return -ENOMEM;
5635 	}
5636 
5637 	queue->cur = 0;
5638 	queue->fmt = fmt;
5639 	queue->size = PAGE_SIZE / fmt_size;
5640 	return 0;
5641 }
5642 
5643 /**
5644  * ibmvfc_init_crq - Initializes and registers CRQ with hypervisor
5645  * @vhost:	ibmvfc host struct
5646  *
5647  * Allocates a page for messages, maps it for dma, and registers
5648  * the crq with the hypervisor.
5649  *
5650  * Return value:
5651  *	zero on success / other on failure
5652  **/
5653 static int ibmvfc_init_crq(struct ibmvfc_host *vhost)
5654 {
5655 	int rc, retrc = -ENOMEM;
5656 	struct device *dev = vhost->dev;
5657 	struct vio_dev *vdev = to_vio_dev(dev);
5658 	struct ibmvfc_queue *crq = &vhost->crq;
5659 
5660 	ENTER;
5661 	if (ibmvfc_alloc_queue(vhost, crq, IBMVFC_CRQ_FMT))
5662 		return -ENOMEM;
5663 
5664 	retrc = rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
5665 					crq->msg_token, PAGE_SIZE);
5666 
5667 	if (rc == H_RESOURCE)
5668 		/* maybe kexecing and resource is busy. try a reset */
5669 		retrc = rc = ibmvfc_reset_crq(vhost);
5670 
5671 	if (rc == H_CLOSED)
5672 		dev_warn(dev, "Partner adapter not ready\n");
5673 	else if (rc) {
5674 		dev_warn(dev, "Error %d opening adapter\n", rc);
5675 		goto reg_crq_failed;
5676 	}
5677 
5678 	retrc = 0;
5679 
5680 	tasklet_init(&vhost->tasklet, (void *)ibmvfc_tasklet, (unsigned long)vhost);
5681 
5682 	if ((rc = request_irq(vdev->irq, ibmvfc_interrupt, 0, IBMVFC_NAME, vhost))) {
5683 		dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n", vdev->irq, rc);
5684 		goto req_irq_failed;
5685 	}
5686 
5687 	if ((rc = vio_enable_interrupts(vdev))) {
5688 		dev_err(dev, "Error %d enabling interrupts\n", rc);
5689 		goto req_irq_failed;
5690 	}
5691 
5692 	LEAVE;
5693 	return retrc;
5694 
5695 req_irq_failed:
5696 	tasklet_kill(&vhost->tasklet);
5697 	do {
5698 		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5699 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5700 reg_crq_failed:
5701 	ibmvfc_free_queue(vhost, crq);
5702 	return retrc;
5703 }
5704 
5705 static int ibmvfc_register_scsi_channel(struct ibmvfc_host *vhost,
5706 				  int index)
5707 {
5708 	struct device *dev = vhost->dev;
5709 	struct vio_dev *vdev = to_vio_dev(dev);
5710 	struct ibmvfc_queue *scrq = &vhost->scsi_scrqs.scrqs[index];
5711 	int rc = -ENOMEM;
5712 
5713 	ENTER;
5714 
5715 	if (ibmvfc_alloc_queue(vhost, scrq, IBMVFC_SUB_CRQ_FMT))
5716 		return -ENOMEM;
5717 
5718 	rc = h_reg_sub_crq(vdev->unit_address, scrq->msg_token, PAGE_SIZE,
5719 			   &scrq->cookie, &scrq->hw_irq);
5720 
5721 	/* H_CLOSED indicates successful register, but no CRQ partner */
5722 	if (rc && rc != H_CLOSED) {
5723 		dev_warn(dev, "Error registering sub-crq: %d\n", rc);
5724 		if (rc == H_PARAMETER)
5725 			dev_warn_once(dev, "Firmware may not support MQ\n");
5726 		goto reg_failed;
5727 	}
5728 
5729 	scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
5730 
5731 	if (!scrq->irq) {
5732 		rc = -EINVAL;
5733 		dev_err(dev, "Error mapping sub-crq[%d] irq\n", index);
5734 		goto irq_failed;
5735 	}
5736 
5737 	snprintf(scrq->name, sizeof(scrq->name), "ibmvfc-%x-scsi%d",
5738 		 vdev->unit_address, index);
5739 	rc = request_irq(scrq->irq, ibmvfc_interrupt_scsi, 0, scrq->name, scrq);
5740 
5741 	if (rc) {
5742 		dev_err(dev, "Couldn't register sub-crq[%d] irq\n", index);
5743 		irq_dispose_mapping(scrq->irq);
5744 		goto irq_failed;
5745 	}
5746 
5747 	scrq->hwq_id = index;
5748 	scrq->vhost = vhost;
5749 
5750 	LEAVE;
5751 	return 0;
5752 
5753 irq_failed:
5754 	do {
5755 		rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address, scrq->cookie);
5756 	} while (rtas_busy_delay(rc));
5757 reg_failed:
5758 	ibmvfc_free_queue(vhost, scrq);
5759 	LEAVE;
5760 	return rc;
5761 }
5762 
5763 static void ibmvfc_deregister_scsi_channel(struct ibmvfc_host *vhost, int index)
5764 {
5765 	struct device *dev = vhost->dev;
5766 	struct vio_dev *vdev = to_vio_dev(dev);
5767 	struct ibmvfc_queue *scrq = &vhost->scsi_scrqs.scrqs[index];
5768 	long rc;
5769 
5770 	ENTER;
5771 
5772 	free_irq(scrq->irq, scrq);
5773 	irq_dispose_mapping(scrq->irq);
5774 	scrq->irq = 0;
5775 
5776 	do {
5777 		rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address,
5778 					scrq->cookie);
5779 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5780 
5781 	if (rc)
5782 		dev_err(dev, "Failed to free sub-crq[%d]: rc=%ld\n", index, rc);
5783 
5784 	ibmvfc_free_queue(vhost, scrq);
5785 	LEAVE;
5786 }
5787 
5788 static void ibmvfc_init_sub_crqs(struct ibmvfc_host *vhost)
5789 {
5790 	int i, j;
5791 
5792 	ENTER;
5793 	if (!vhost->mq_enabled)
5794 		return;
5795 
5796 	vhost->scsi_scrqs.scrqs = kcalloc(nr_scsi_hw_queues,
5797 					  sizeof(*vhost->scsi_scrqs.scrqs),
5798 					  GFP_KERNEL);
5799 	if (!vhost->scsi_scrqs.scrqs) {
5800 		vhost->do_enquiry = 0;
5801 		return;
5802 	}
5803 
5804 	for (i = 0; i < nr_scsi_hw_queues; i++) {
5805 		if (ibmvfc_register_scsi_channel(vhost, i)) {
5806 			for (j = i; j > 0; j--)
5807 				ibmvfc_deregister_scsi_channel(vhost, j - 1);
5808 			kfree(vhost->scsi_scrqs.scrqs);
5809 			vhost->scsi_scrqs.scrqs = NULL;
5810 			vhost->scsi_scrqs.active_queues = 0;
5811 			vhost->do_enquiry = 0;
5812 			break;
5813 		}
5814 	}
5815 
5816 	LEAVE;
5817 }
5818 
5819 static void ibmvfc_release_sub_crqs(struct ibmvfc_host *vhost)
5820 {
5821 	int i;
5822 
5823 	ENTER;
5824 	if (!vhost->scsi_scrqs.scrqs)
5825 		return;
5826 
5827 	for (i = 0; i < nr_scsi_hw_queues; i++)
5828 		ibmvfc_deregister_scsi_channel(vhost, i);
5829 
5830 	kfree(vhost->scsi_scrqs.scrqs);
5831 	vhost->scsi_scrqs.scrqs = NULL;
5832 	vhost->scsi_scrqs.active_queues = 0;
5833 	LEAVE;
5834 }
5835 
5836 /**
5837  * ibmvfc_free_mem - Free memory for vhost
5838  * @vhost:	ibmvfc host struct
5839  *
5840  * Return value:
5841  * 	none
5842  **/
5843 static void ibmvfc_free_mem(struct ibmvfc_host *vhost)
5844 {
5845 	struct ibmvfc_queue *async_q = &vhost->async_crq;
5846 
5847 	ENTER;
5848 	mempool_destroy(vhost->tgt_pool);
5849 	kfree(vhost->trace);
5850 	dma_free_coherent(vhost->dev, vhost->disc_buf_sz, vhost->disc_buf,
5851 			  vhost->disc_buf_dma);
5852 	dma_free_coherent(vhost->dev, sizeof(*vhost->login_buf),
5853 			  vhost->login_buf, vhost->login_buf_dma);
5854 	dma_free_coherent(vhost->dev, sizeof(*vhost->channel_setup_buf),
5855 			  vhost->channel_setup_buf, vhost->channel_setup_dma);
5856 	dma_pool_destroy(vhost->sg_pool);
5857 	ibmvfc_free_queue(vhost, async_q);
5858 	LEAVE;
5859 }
5860 
5861 /**
5862  * ibmvfc_alloc_mem - Allocate memory for vhost
5863  * @vhost:	ibmvfc host struct
5864  *
5865  * Return value:
5866  * 	0 on success / non-zero on failure
5867  **/
5868 static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost)
5869 {
5870 	struct ibmvfc_queue *async_q = &vhost->async_crq;
5871 	struct device *dev = vhost->dev;
5872 
5873 	ENTER;
5874 	if (ibmvfc_alloc_queue(vhost, async_q, IBMVFC_ASYNC_FMT)) {
5875 		dev_err(dev, "Couldn't allocate/map async queue.\n");
5876 		goto nomem;
5877 	}
5878 
5879 	vhost->sg_pool = dma_pool_create(IBMVFC_NAME, dev,
5880 					 SG_ALL * sizeof(struct srp_direct_buf),
5881 					 sizeof(struct srp_direct_buf), 0);
5882 
5883 	if (!vhost->sg_pool) {
5884 		dev_err(dev, "Failed to allocate sg pool\n");
5885 		goto unmap_async_crq;
5886 	}
5887 
5888 	vhost->login_buf = dma_alloc_coherent(dev, sizeof(*vhost->login_buf),
5889 					      &vhost->login_buf_dma, GFP_KERNEL);
5890 
5891 	if (!vhost->login_buf) {
5892 		dev_err(dev, "Couldn't allocate NPIV login buffer\n");
5893 		goto free_sg_pool;
5894 	}
5895 
5896 	vhost->disc_buf_sz = sizeof(*vhost->disc_buf) * max_targets;
5897 	vhost->disc_buf = dma_alloc_coherent(dev, vhost->disc_buf_sz,
5898 					     &vhost->disc_buf_dma, GFP_KERNEL);
5899 
5900 	if (!vhost->disc_buf) {
5901 		dev_err(dev, "Couldn't allocate Discover Targets buffer\n");
5902 		goto free_login_buffer;
5903 	}
5904 
5905 	vhost->trace = kcalloc(IBMVFC_NUM_TRACE_ENTRIES,
5906 			       sizeof(struct ibmvfc_trace_entry), GFP_KERNEL);
5907 	atomic_set(&vhost->trace_index, -1);
5908 
5909 	if (!vhost->trace)
5910 		goto free_disc_buffer;
5911 
5912 	vhost->tgt_pool = mempool_create_kmalloc_pool(IBMVFC_TGT_MEMPOOL_SZ,
5913 						      sizeof(struct ibmvfc_target));
5914 
5915 	if (!vhost->tgt_pool) {
5916 		dev_err(dev, "Couldn't allocate target memory pool\n");
5917 		goto free_trace;
5918 	}
5919 
5920 	vhost->channel_setup_buf = dma_alloc_coherent(dev, sizeof(*vhost->channel_setup_buf),
5921 						      &vhost->channel_setup_dma,
5922 						      GFP_KERNEL);
5923 
5924 	if (!vhost->channel_setup_buf) {
5925 		dev_err(dev, "Couldn't allocate Channel Setup buffer\n");
5926 		goto free_tgt_pool;
5927 	}
5928 
5929 	LEAVE;
5930 	return 0;
5931 
5932 free_tgt_pool:
5933 	mempool_destroy(vhost->tgt_pool);
5934 free_trace:
5935 	kfree(vhost->trace);
5936 free_disc_buffer:
5937 	dma_free_coherent(dev, vhost->disc_buf_sz, vhost->disc_buf,
5938 			  vhost->disc_buf_dma);
5939 free_login_buffer:
5940 	dma_free_coherent(dev, sizeof(*vhost->login_buf),
5941 			  vhost->login_buf, vhost->login_buf_dma);
5942 free_sg_pool:
5943 	dma_pool_destroy(vhost->sg_pool);
5944 unmap_async_crq:
5945 	ibmvfc_free_queue(vhost, async_q);
5946 nomem:
5947 	LEAVE;
5948 	return -ENOMEM;
5949 }
5950 
5951 /**
5952  * ibmvfc_rport_add_thread - Worker thread for rport adds
5953  * @work:	work struct
5954  *
5955  **/
5956 static void ibmvfc_rport_add_thread(struct work_struct *work)
5957 {
5958 	struct ibmvfc_host *vhost = container_of(work, struct ibmvfc_host,
5959 						 rport_add_work_q);
5960 	struct ibmvfc_target *tgt;
5961 	struct fc_rport *rport;
5962 	unsigned long flags;
5963 	int did_work;
5964 
5965 	ENTER;
5966 	spin_lock_irqsave(vhost->host->host_lock, flags);
5967 	do {
5968 		did_work = 0;
5969 		if (vhost->state != IBMVFC_ACTIVE)
5970 			break;
5971 
5972 		list_for_each_entry(tgt, &vhost->targets, queue) {
5973 			if (tgt->add_rport) {
5974 				did_work = 1;
5975 				tgt->add_rport = 0;
5976 				kref_get(&tgt->kref);
5977 				rport = tgt->rport;
5978 				if (!rport) {
5979 					spin_unlock_irqrestore(vhost->host->host_lock, flags);
5980 					ibmvfc_tgt_add_rport(tgt);
5981 				} else if (get_device(&rport->dev)) {
5982 					spin_unlock_irqrestore(vhost->host->host_lock, flags);
5983 					tgt_dbg(tgt, "Setting rport roles\n");
5984 					fc_remote_port_rolechg(rport, tgt->ids.roles);
5985 					put_device(&rport->dev);
5986 				} else {
5987 					spin_unlock_irqrestore(vhost->host->host_lock, flags);
5988 				}
5989 
5990 				kref_put(&tgt->kref, ibmvfc_release_tgt);
5991 				spin_lock_irqsave(vhost->host->host_lock, flags);
5992 				break;
5993 			}
5994 		}
5995 	} while(did_work);
5996 
5997 	if (vhost->state == IBMVFC_ACTIVE)
5998 		vhost->scan_complete = 1;
5999 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
6000 	LEAVE;
6001 }
6002 
6003 /**
6004  * ibmvfc_probe - Adapter hot plug add entry point
6005  * @vdev:	vio device struct
6006  * @id:	vio device id struct
6007  *
6008  * Return value:
6009  * 	0 on success / non-zero on failure
6010  **/
6011 static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
6012 {
6013 	struct ibmvfc_host *vhost;
6014 	struct Scsi_Host *shost;
6015 	struct device *dev = &vdev->dev;
6016 	int rc = -ENOMEM;
6017 	unsigned int max_scsi_queues = IBMVFC_MAX_SCSI_QUEUES;
6018 
6019 	ENTER;
6020 	shost = scsi_host_alloc(&driver_template, sizeof(*vhost));
6021 	if (!shost) {
6022 		dev_err(dev, "Couldn't allocate host data\n");
6023 		goto out;
6024 	}
6025 
6026 	shost->transportt = ibmvfc_transport_template;
6027 	shost->can_queue = max_requests;
6028 	shost->max_lun = max_lun;
6029 	shost->max_id = max_targets;
6030 	shost->max_sectors = IBMVFC_MAX_SECTORS;
6031 	shost->max_cmd_len = IBMVFC_MAX_CDB_LEN;
6032 	shost->unique_id = shost->host_no;
6033 	shost->nr_hw_queues = mq_enabled ? min(max_scsi_queues, nr_scsi_hw_queues) : 1;
6034 
6035 	vhost = shost_priv(shost);
6036 	INIT_LIST_HEAD(&vhost->targets);
6037 	INIT_LIST_HEAD(&vhost->purge);
6038 	sprintf(vhost->name, IBMVFC_NAME);
6039 	vhost->host = shost;
6040 	vhost->dev = dev;
6041 	vhost->partition_number = -1;
6042 	vhost->log_level = log_level;
6043 	vhost->task_set = 1;
6044 
6045 	vhost->mq_enabled = mq_enabled;
6046 	vhost->client_scsi_channels = min(shost->nr_hw_queues, nr_scsi_channels);
6047 	vhost->using_channels = 0;
6048 	vhost->do_enquiry = 1;
6049 
6050 	strcpy(vhost->partition_name, "UNKNOWN");
6051 	init_waitqueue_head(&vhost->work_wait_q);
6052 	init_waitqueue_head(&vhost->init_wait_q);
6053 	INIT_WORK(&vhost->rport_add_work_q, ibmvfc_rport_add_thread);
6054 	mutex_init(&vhost->passthru_mutex);
6055 
6056 	if ((rc = ibmvfc_alloc_mem(vhost)))
6057 		goto free_scsi_host;
6058 
6059 	vhost->work_thread = kthread_run(ibmvfc_work, vhost, "%s_%d", IBMVFC_NAME,
6060 					 shost->host_no);
6061 
6062 	if (IS_ERR(vhost->work_thread)) {
6063 		dev_err(dev, "Couldn't create kernel thread: %ld\n",
6064 			PTR_ERR(vhost->work_thread));
6065 		rc = PTR_ERR(vhost->work_thread);
6066 		goto free_host_mem;
6067 	}
6068 
6069 	if ((rc = ibmvfc_init_crq(vhost))) {
6070 		dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
6071 		goto kill_kthread;
6072 	}
6073 
6074 	if ((rc = scsi_add_host(shost, dev)))
6075 		goto release_crq;
6076 
6077 	fc_host_dev_loss_tmo(shost) = IBMVFC_DEV_LOSS_TMO;
6078 
6079 	if ((rc = ibmvfc_create_trace_file(&shost->shost_dev.kobj,
6080 					   &ibmvfc_trace_attr))) {
6081 		dev_err(dev, "Failed to create trace file. rc=%d\n", rc);
6082 		goto remove_shost;
6083 	}
6084 
6085 	ibmvfc_init_sub_crqs(vhost);
6086 
6087 	if (shost_to_fc_host(shost)->rqst_q)
6088 		blk_queue_max_segments(shost_to_fc_host(shost)->rqst_q, 1);
6089 	dev_set_drvdata(dev, vhost);
6090 	spin_lock(&ibmvfc_driver_lock);
6091 	list_add_tail(&vhost->queue, &ibmvfc_head);
6092 	spin_unlock(&ibmvfc_driver_lock);
6093 
6094 	ibmvfc_send_crq_init(vhost);
6095 	scsi_scan_host(shost);
6096 	return 0;
6097 
6098 remove_shost:
6099 	scsi_remove_host(shost);
6100 release_crq:
6101 	ibmvfc_release_crq_queue(vhost);
6102 kill_kthread:
6103 	kthread_stop(vhost->work_thread);
6104 free_host_mem:
6105 	ibmvfc_free_mem(vhost);
6106 free_scsi_host:
6107 	scsi_host_put(shost);
6108 out:
6109 	LEAVE;
6110 	return rc;
6111 }
6112 
6113 /**
6114  * ibmvfc_remove - Adapter hot plug remove entry point
6115  * @vdev:	vio device struct
6116  *
6117  * Return value:
6118  * 	0
6119  **/
6120 static void ibmvfc_remove(struct vio_dev *vdev)
6121 {
6122 	struct ibmvfc_host *vhost = dev_get_drvdata(&vdev->dev);
6123 	LIST_HEAD(purge);
6124 	unsigned long flags;
6125 
6126 	ENTER;
6127 	ibmvfc_remove_trace_file(&vhost->host->shost_dev.kobj, &ibmvfc_trace_attr);
6128 
6129 	spin_lock_irqsave(vhost->host->host_lock, flags);
6130 	ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
6131 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
6132 
6133 	ibmvfc_wait_while_resetting(vhost);
6134 	kthread_stop(vhost->work_thread);
6135 	fc_remove_host(vhost->host);
6136 	scsi_remove_host(vhost->host);
6137 
6138 	spin_lock_irqsave(vhost->host->host_lock, flags);
6139 	ibmvfc_purge_requests(vhost, DID_ERROR);
6140 	list_splice_init(&vhost->purge, &purge);
6141 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
6142 	ibmvfc_complete_purge(&purge);
6143 	ibmvfc_release_sub_crqs(vhost);
6144 	ibmvfc_release_crq_queue(vhost);
6145 
6146 	ibmvfc_free_mem(vhost);
6147 	spin_lock(&ibmvfc_driver_lock);
6148 	list_del(&vhost->queue);
6149 	spin_unlock(&ibmvfc_driver_lock);
6150 	scsi_host_put(vhost->host);
6151 	LEAVE;
6152 }
6153 
6154 /**
6155  * ibmvfc_resume - Resume from suspend
6156  * @dev:	device struct
6157  *
6158  * We may have lost an interrupt across suspend/resume, so kick the
6159  * interrupt handler
6160  *
6161  */
6162 static int ibmvfc_resume(struct device *dev)
6163 {
6164 	unsigned long flags;
6165 	struct ibmvfc_host *vhost = dev_get_drvdata(dev);
6166 	struct vio_dev *vdev = to_vio_dev(dev);
6167 
6168 	spin_lock_irqsave(vhost->host->host_lock, flags);
6169 	vio_disable_interrupts(vdev);
6170 	tasklet_schedule(&vhost->tasklet);
6171 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
6172 	return 0;
6173 }
6174 
6175 /**
6176  * ibmvfc_get_desired_dma - Calculate DMA resources needed by the driver
6177  * @vdev:	vio device struct
6178  *
6179  * Return value:
6180  *	Number of bytes the driver will need to DMA map at the same time in
6181  *	order to perform well.
6182  */
6183 static unsigned long ibmvfc_get_desired_dma(struct vio_dev *vdev)
6184 {
6185 	unsigned long pool_dma = max_requests * sizeof(union ibmvfc_iu);
6186 	return pool_dma + ((512 * 1024) * driver_template.cmd_per_lun);
6187 }
6188 
6189 static const struct vio_device_id ibmvfc_device_table[] = {
6190 	{"fcp", "IBM,vfc-client"},
6191 	{ "", "" }
6192 };
6193 MODULE_DEVICE_TABLE(vio, ibmvfc_device_table);
6194 
6195 static const struct dev_pm_ops ibmvfc_pm_ops = {
6196 	.resume = ibmvfc_resume
6197 };
6198 
6199 static struct vio_driver ibmvfc_driver = {
6200 	.id_table = ibmvfc_device_table,
6201 	.probe = ibmvfc_probe,
6202 	.remove = ibmvfc_remove,
6203 	.get_desired_dma = ibmvfc_get_desired_dma,
6204 	.name = IBMVFC_NAME,
6205 	.pm = &ibmvfc_pm_ops,
6206 };
6207 
6208 static struct fc_function_template ibmvfc_transport_functions = {
6209 	.show_host_fabric_name = 1,
6210 	.show_host_node_name = 1,
6211 	.show_host_port_name = 1,
6212 	.show_host_supported_classes = 1,
6213 	.show_host_port_type = 1,
6214 	.show_host_port_id = 1,
6215 	.show_host_maxframe_size = 1,
6216 
6217 	.get_host_port_state = ibmvfc_get_host_port_state,
6218 	.show_host_port_state = 1,
6219 
6220 	.get_host_speed = ibmvfc_get_host_speed,
6221 	.show_host_speed = 1,
6222 
6223 	.issue_fc_host_lip = ibmvfc_issue_fc_host_lip,
6224 	.terminate_rport_io = ibmvfc_terminate_rport_io,
6225 
6226 	.show_rport_maxframe_size = 1,
6227 	.show_rport_supported_classes = 1,
6228 
6229 	.set_rport_dev_loss_tmo = ibmvfc_set_rport_dev_loss_tmo,
6230 	.show_rport_dev_loss_tmo = 1,
6231 
6232 	.get_starget_node_name = ibmvfc_get_starget_node_name,
6233 	.show_starget_node_name = 1,
6234 
6235 	.get_starget_port_name = ibmvfc_get_starget_port_name,
6236 	.show_starget_port_name = 1,
6237 
6238 	.get_starget_port_id = ibmvfc_get_starget_port_id,
6239 	.show_starget_port_id = 1,
6240 
6241 	.bsg_request = ibmvfc_bsg_request,
6242 	.bsg_timeout = ibmvfc_bsg_timeout,
6243 };
6244 
6245 /**
6246  * ibmvfc_module_init - Initialize the ibmvfc module
6247  *
6248  * Return value:
6249  * 	0 on success / other on failure
6250  **/
6251 static int __init ibmvfc_module_init(void)
6252 {
6253 	int rc;
6254 
6255 	if (!firmware_has_feature(FW_FEATURE_VIO))
6256 		return -ENODEV;
6257 
6258 	printk(KERN_INFO IBMVFC_NAME": IBM Virtual Fibre Channel Driver version: %s %s\n",
6259 	       IBMVFC_DRIVER_VERSION, IBMVFC_DRIVER_DATE);
6260 
6261 	ibmvfc_transport_template = fc_attach_transport(&ibmvfc_transport_functions);
6262 	if (!ibmvfc_transport_template)
6263 		return -ENOMEM;
6264 
6265 	rc = vio_register_driver(&ibmvfc_driver);
6266 	if (rc)
6267 		fc_release_transport(ibmvfc_transport_template);
6268 	return rc;
6269 }
6270 
6271 /**
6272  * ibmvfc_module_exit - Teardown the ibmvfc module
6273  *
6274  * Return value:
6275  * 	nothing
6276  **/
6277 static void __exit ibmvfc_module_exit(void)
6278 {
6279 	vio_unregister_driver(&ibmvfc_driver);
6280 	fc_release_transport(ibmvfc_transport_template);
6281 }
6282 
6283 module_init(ibmvfc_module_init);
6284 module_exit(ibmvfc_module_exit);
6285