xref: /openbmc/linux/drivers/scsi/ibmvscsi/ibmvfc.c (revision c4f7ac64)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * ibmvfc.c -- driver for IBM Power Virtual Fibre Channel Adapter
4  *
5  * Written By: Brian King <brking@linux.vnet.ibm.com>, IBM Corporation
6  *
7  * Copyright (C) IBM Corporation, 2008
8  */
9 
10 #include <linux/module.h>
11 #include <linux/moduleparam.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmapool.h>
14 #include <linux/delay.h>
15 #include <linux/interrupt.h>
16 #include <linux/kthread.h>
17 #include <linux/slab.h>
18 #include <linux/of.h>
19 #include <linux/pm.h>
20 #include <linux/stringify.h>
21 #include <linux/bsg-lib.h>
22 #include <asm/firmware.h>
23 #include <asm/irq.h>
24 #include <asm/rtas.h>
25 #include <asm/vio.h>
26 #include <scsi/scsi.h>
27 #include <scsi/scsi_cmnd.h>
28 #include <scsi/scsi_host.h>
29 #include <scsi/scsi_device.h>
30 #include <scsi/scsi_tcq.h>
31 #include <scsi/scsi_transport_fc.h>
32 #include <scsi/scsi_bsg_fc.h>
33 #include "ibmvfc.h"
34 
35 static unsigned int init_timeout = IBMVFC_INIT_TIMEOUT;
36 static unsigned int default_timeout = IBMVFC_DEFAULT_TIMEOUT;
37 static u64 max_lun = IBMVFC_MAX_LUN;
38 static unsigned int max_targets = IBMVFC_MAX_TARGETS;
39 static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT;
40 static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS;
41 static unsigned int ibmvfc_debug = IBMVFC_DEBUG;
42 static unsigned int log_level = IBMVFC_DEFAULT_LOG_LEVEL;
43 static unsigned int cls3_error = IBMVFC_CLS3_ERROR;
44 static unsigned int mq_enabled = IBMVFC_MQ;
45 static unsigned int nr_scsi_hw_queues = IBMVFC_SCSI_HW_QUEUES;
46 static unsigned int nr_scsi_channels = IBMVFC_SCSI_CHANNELS;
47 static unsigned int mig_channels_only = IBMVFC_MIG_NO_SUB_TO_CRQ;
48 static unsigned int mig_no_less_channels = IBMVFC_MIG_NO_N_TO_M;
49 
50 static LIST_HEAD(ibmvfc_head);
51 static DEFINE_SPINLOCK(ibmvfc_driver_lock);
52 static struct scsi_transport_template *ibmvfc_transport_template;
53 
54 MODULE_DESCRIPTION("IBM Virtual Fibre Channel Driver");
55 MODULE_AUTHOR("Brian King <brking@linux.vnet.ibm.com>");
56 MODULE_LICENSE("GPL");
57 MODULE_VERSION(IBMVFC_DRIVER_VERSION);
58 
59 module_param_named(mq, mq_enabled, uint, S_IRUGO);
60 MODULE_PARM_DESC(mq, "Enable multiqueue support. "
61 		 "[Default=" __stringify(IBMVFC_MQ) "]");
62 module_param_named(scsi_host_queues, nr_scsi_hw_queues, uint, S_IRUGO);
63 MODULE_PARM_DESC(scsi_host_queues, "Number of SCSI Host submission queues. "
64 		 "[Default=" __stringify(IBMVFC_SCSI_HW_QUEUES) "]");
65 module_param_named(scsi_hw_channels, nr_scsi_channels, uint, S_IRUGO);
66 MODULE_PARM_DESC(scsi_hw_channels, "Number of hw scsi channels to request. "
67 		 "[Default=" __stringify(IBMVFC_SCSI_CHANNELS) "]");
68 module_param_named(mig_channels_only, mig_channels_only, uint, S_IRUGO);
69 MODULE_PARM_DESC(mig_channels_only, "Prevent migration to non-channelized system. "
70 		 "[Default=" __stringify(IBMVFC_MIG_NO_SUB_TO_CRQ) "]");
71 module_param_named(mig_no_less_channels, mig_no_less_channels, uint, S_IRUGO);
72 MODULE_PARM_DESC(mig_no_less_channels, "Prevent migration to system with less channels. "
73 		 "[Default=" __stringify(IBMVFC_MIG_NO_N_TO_M) "]");
74 
75 module_param_named(init_timeout, init_timeout, uint, S_IRUGO | S_IWUSR);
76 MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds. "
77 		 "[Default=" __stringify(IBMVFC_INIT_TIMEOUT) "]");
78 module_param_named(default_timeout, default_timeout, uint, S_IRUGO | S_IWUSR);
79 MODULE_PARM_DESC(default_timeout,
80 		 "Default timeout in seconds for initialization and EH commands. "
81 		 "[Default=" __stringify(IBMVFC_DEFAULT_TIMEOUT) "]");
82 module_param_named(max_requests, max_requests, uint, S_IRUGO);
83 MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter. "
84 		 "[Default=" __stringify(IBMVFC_MAX_REQUESTS_DEFAULT) "]");
85 module_param_named(max_lun, max_lun, ullong, S_IRUGO);
86 MODULE_PARM_DESC(max_lun, "Maximum allowed LUN. "
87 		 "[Default=" __stringify(IBMVFC_MAX_LUN) "]");
88 module_param_named(max_targets, max_targets, uint, S_IRUGO);
89 MODULE_PARM_DESC(max_targets, "Maximum allowed targets. "
90 		 "[Default=" __stringify(IBMVFC_MAX_TARGETS) "]");
91 module_param_named(disc_threads, disc_threads, uint, S_IRUGO);
92 MODULE_PARM_DESC(disc_threads, "Number of device discovery threads to use. "
93 		 "[Default=" __stringify(IBMVFC_MAX_DISC_THREADS) "]");
94 module_param_named(debug, ibmvfc_debug, uint, S_IRUGO | S_IWUSR);
95 MODULE_PARM_DESC(debug, "Enable driver debug information. "
96 		 "[Default=" __stringify(IBMVFC_DEBUG) "]");
97 module_param_named(log_level, log_level, uint, 0);
98 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver. "
99 		 "[Default=" __stringify(IBMVFC_DEFAULT_LOG_LEVEL) "]");
100 module_param_named(cls3_error, cls3_error, uint, 0);
101 MODULE_PARM_DESC(cls3_error, "Enable FC Class 3 Error Recovery. "
102 		 "[Default=" __stringify(IBMVFC_CLS3_ERROR) "]");
103 
104 static const struct {
105 	u16 status;
106 	u16 error;
107 	u8 result;
108 	u8 retry;
109 	int log;
110 	char *name;
111 } cmd_status [] = {
112 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_ESTABLISH, DID_ERROR, 1, 1, "unable to establish" },
113 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_FAULT, DID_OK, 1, 0, "transport fault" },
114 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_CMD_TIMEOUT, DID_TIME_OUT, 1, 1, "command timeout" },
115 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_ENETDOWN, DID_TRANSPORT_DISRUPTED, 1, 1, "network down" },
116 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_HW_FAILURE, DID_ERROR, 1, 1, "hardware failure" },
117 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DOWN_ERR, DID_REQUEUE, 0, 0, "link down" },
118 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DEAD_ERR, DID_ERROR, 0, 0, "link dead" },
119 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_REGISTER, DID_ERROR, 1, 1, "unable to register" },
120 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_BUSY, DID_BUS_BUSY, 1, 0, "transport busy" },
121 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_DEAD, DID_ERROR, 0, 1, "transport dead" },
122 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_CONFIG_ERROR, DID_ERROR, 1, 1, "configuration error" },
123 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_NAME_SERVER_FAIL, DID_ERROR, 1, 1, "name server failure" },
124 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 1, 0, "link halted" },
125 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_GENERAL, DID_OK, 1, 0, "general transport error" },
126 
127 	{ IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" },
128 	{ IBMVFC_VIOS_FAILURE, IBMVFC_SW_FAILURE, DID_ERROR, 0, 1, "software failure" },
129 	{ IBMVFC_VIOS_FAILURE, IBMVFC_INVALID_PARAMETER, DID_ERROR, 0, 1, "invalid parameter" },
130 	{ IBMVFC_VIOS_FAILURE, IBMVFC_MISSING_PARAMETER, DID_ERROR, 0, 1, "missing parameter" },
131 	{ IBMVFC_VIOS_FAILURE, IBMVFC_HOST_IO_BUS, DID_ERROR, 1, 1, "host I/O bus failure" },
132 	{ IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED, DID_ERROR, 0, 1, "transaction cancelled" },
133 	{ IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED_IMPLICIT, DID_ERROR, 0, 1, "transaction cancelled implicit" },
134 	{ IBMVFC_VIOS_FAILURE, IBMVFC_INSUFFICIENT_RESOURCE, DID_REQUEUE, 1, 1, "insufficient resources" },
135 	{ IBMVFC_VIOS_FAILURE, IBMVFC_PLOGI_REQUIRED, DID_ERROR, 0, 1, "port login required" },
136 	{ IBMVFC_VIOS_FAILURE, IBMVFC_COMMAND_FAILED, DID_ERROR, 1, 1, "command failed" },
137 
138 	{ IBMVFC_FC_FAILURE, IBMVFC_INVALID_ELS_CMD_CODE, DID_ERROR, 0, 1, "invalid ELS command code" },
139 	{ IBMVFC_FC_FAILURE, IBMVFC_INVALID_VERSION, DID_ERROR, 0, 1, "invalid version level" },
140 	{ IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_ERROR, DID_ERROR, 1, 1, "logical error" },
141 	{ IBMVFC_FC_FAILURE, IBMVFC_INVALID_CT_IU_SIZE, DID_ERROR, 0, 1, "invalid CT_IU size" },
142 	{ IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_BUSY, DID_REQUEUE, 1, 0, "logical busy" },
143 	{ IBMVFC_FC_FAILURE, IBMVFC_PROTOCOL_ERROR, DID_ERROR, 1, 1, "protocol error" },
144 	{ IBMVFC_FC_FAILURE, IBMVFC_UNABLE_TO_PERFORM_REQ, DID_ERROR, 1, 1, "unable to perform request" },
145 	{ IBMVFC_FC_FAILURE, IBMVFC_CMD_NOT_SUPPORTED, DID_ERROR, 0, 0, "command not supported" },
146 	{ IBMVFC_FC_FAILURE, IBMVFC_SERVER_NOT_AVAIL, DID_ERROR, 0, 1, "server not available" },
147 	{ IBMVFC_FC_FAILURE, IBMVFC_CMD_IN_PROGRESS, DID_ERROR, 0, 1, "command already in progress" },
148 	{ IBMVFC_FC_FAILURE, IBMVFC_VENDOR_SPECIFIC, DID_ERROR, 1, 1, "vendor specific" },
149 
150 	{ IBMVFC_FC_SCSI_ERROR, 0, DID_OK, 1, 0, "SCSI error" },
151 	{ IBMVFC_FC_SCSI_ERROR, IBMVFC_COMMAND_FAILED, DID_ERROR, 0, 1, "PRLI to device failed." },
152 };
153 
154 static void ibmvfc_npiv_login(struct ibmvfc_host *);
155 static void ibmvfc_tgt_send_prli(struct ibmvfc_target *);
156 static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *);
157 static void ibmvfc_tgt_query_target(struct ibmvfc_target *);
158 static void ibmvfc_npiv_logout(struct ibmvfc_host *);
159 static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *);
160 static void ibmvfc_tgt_move_login(struct ibmvfc_target *);
161 
162 static void ibmvfc_release_sub_crqs(struct ibmvfc_host *);
163 static void ibmvfc_init_sub_crqs(struct ibmvfc_host *);
164 
165 static const char *unknown_error = "unknown error";
166 
167 static long h_reg_sub_crq(unsigned long unit_address, unsigned long ioba,
168 			  unsigned long length, unsigned long *cookie,
169 			  unsigned long *irq)
170 {
171 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
172 	long rc;
173 
174 	rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, ioba, length);
175 	*cookie = retbuf[0];
176 	*irq = retbuf[1];
177 
178 	return rc;
179 }
180 
181 static int ibmvfc_check_caps(struct ibmvfc_host *vhost, unsigned long cap_flags)
182 {
183 	u64 host_caps = be64_to_cpu(vhost->login_buf->resp.capabilities);
184 
185 	return (host_caps & cap_flags) ? 1 : 0;
186 }
187 
188 static struct ibmvfc_fcp_cmd_iu *ibmvfc_get_fcp_iu(struct ibmvfc_host *vhost,
189 						   struct ibmvfc_cmd *vfc_cmd)
190 {
191 	if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
192 		return &vfc_cmd->v2.iu;
193 	else
194 		return &vfc_cmd->v1.iu;
195 }
196 
197 static struct ibmvfc_fcp_rsp *ibmvfc_get_fcp_rsp(struct ibmvfc_host *vhost,
198 						 struct ibmvfc_cmd *vfc_cmd)
199 {
200 	if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
201 		return &vfc_cmd->v2.rsp;
202 	else
203 		return &vfc_cmd->v1.rsp;
204 }
205 
206 #ifdef CONFIG_SCSI_IBMVFC_TRACE
207 /**
208  * ibmvfc_trc_start - Log a start trace entry
209  * @evt:		ibmvfc event struct
210  *
211  **/
212 static void ibmvfc_trc_start(struct ibmvfc_event *evt)
213 {
214 	struct ibmvfc_host *vhost = evt->vhost;
215 	struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd;
216 	struct ibmvfc_mad_common *mad = &evt->iu.mad_common;
217 	struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
218 	struct ibmvfc_trace_entry *entry;
219 	int index = atomic_inc_return(&vhost->trace_index) & IBMVFC_TRACE_INDEX_MASK;
220 
221 	entry = &vhost->trace[index];
222 	entry->evt = evt;
223 	entry->time = jiffies;
224 	entry->fmt = evt->crq.format;
225 	entry->type = IBMVFC_TRC_START;
226 
227 	switch (entry->fmt) {
228 	case IBMVFC_CMD_FORMAT:
229 		entry->op_code = iu->cdb[0];
230 		entry->scsi_id = be64_to_cpu(vfc_cmd->tgt_scsi_id);
231 		entry->lun = scsilun_to_int(&iu->lun);
232 		entry->tmf_flags = iu->tmf_flags;
233 		entry->u.start.xfer_len = be32_to_cpu(iu->xfer_len);
234 		break;
235 	case IBMVFC_MAD_FORMAT:
236 		entry->op_code = be32_to_cpu(mad->opcode);
237 		break;
238 	default:
239 		break;
240 	}
241 }
242 
243 /**
244  * ibmvfc_trc_end - Log an end trace entry
245  * @evt:		ibmvfc event struct
246  *
247  **/
248 static void ibmvfc_trc_end(struct ibmvfc_event *evt)
249 {
250 	struct ibmvfc_host *vhost = evt->vhost;
251 	struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
252 	struct ibmvfc_mad_common *mad = &evt->xfer_iu->mad_common;
253 	struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
254 	struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
255 	struct ibmvfc_trace_entry *entry;
256 	int index = atomic_inc_return(&vhost->trace_index) & IBMVFC_TRACE_INDEX_MASK;
257 
258 	entry = &vhost->trace[index];
259 	entry->evt = evt;
260 	entry->time = jiffies;
261 	entry->fmt = evt->crq.format;
262 	entry->type = IBMVFC_TRC_END;
263 
264 	switch (entry->fmt) {
265 	case IBMVFC_CMD_FORMAT:
266 		entry->op_code = iu->cdb[0];
267 		entry->scsi_id = be64_to_cpu(vfc_cmd->tgt_scsi_id);
268 		entry->lun = scsilun_to_int(&iu->lun);
269 		entry->tmf_flags = iu->tmf_flags;
270 		entry->u.end.status = be16_to_cpu(vfc_cmd->status);
271 		entry->u.end.error = be16_to_cpu(vfc_cmd->error);
272 		entry->u.end.fcp_rsp_flags = rsp->flags;
273 		entry->u.end.rsp_code = rsp->data.info.rsp_code;
274 		entry->u.end.scsi_status = rsp->scsi_status;
275 		break;
276 	case IBMVFC_MAD_FORMAT:
277 		entry->op_code = be32_to_cpu(mad->opcode);
278 		entry->u.end.status = be16_to_cpu(mad->status);
279 		break;
280 	default:
281 		break;
282 
283 	}
284 }
285 
286 #else
287 #define ibmvfc_trc_start(evt) do { } while (0)
288 #define ibmvfc_trc_end(evt) do { } while (0)
289 #endif
290 
291 /**
292  * ibmvfc_get_err_index - Find the index into cmd_status for the fcp response
293  * @status:		status / error class
294  * @error:		error
295  *
296  * Return value:
297  *	index into cmd_status / -EINVAL on failure
298  **/
299 static int ibmvfc_get_err_index(u16 status, u16 error)
300 {
301 	int i;
302 
303 	for (i = 0; i < ARRAY_SIZE(cmd_status); i++)
304 		if ((cmd_status[i].status & status) == cmd_status[i].status &&
305 		    cmd_status[i].error == error)
306 			return i;
307 
308 	return -EINVAL;
309 }
310 
311 /**
312  * ibmvfc_get_cmd_error - Find the error description for the fcp response
313  * @status:		status / error class
314  * @error:		error
315  *
316  * Return value:
317  *	error description string
318  **/
319 static const char *ibmvfc_get_cmd_error(u16 status, u16 error)
320 {
321 	int rc = ibmvfc_get_err_index(status, error);
322 	if (rc >= 0)
323 		return cmd_status[rc].name;
324 	return unknown_error;
325 }
326 
327 /**
328  * ibmvfc_get_err_result - Find the scsi status to return for the fcp response
329  * @vhost:      ibmvfc host struct
330  * @vfc_cmd:	ibmvfc command struct
331  *
332  * Return value:
333  *	SCSI result value to return for completed command
334  **/
335 static int ibmvfc_get_err_result(struct ibmvfc_host *vhost, struct ibmvfc_cmd *vfc_cmd)
336 {
337 	int err;
338 	struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
339 	int fc_rsp_len = be32_to_cpu(rsp->fcp_rsp_len);
340 
341 	if ((rsp->flags & FCP_RSP_LEN_VALID) &&
342 	    ((fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) ||
343 	     rsp->data.info.rsp_code))
344 		return DID_ERROR << 16;
345 
346 	err = ibmvfc_get_err_index(be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error));
347 	if (err >= 0)
348 		return rsp->scsi_status | (cmd_status[err].result << 16);
349 	return rsp->scsi_status | (DID_ERROR << 16);
350 }
351 
352 /**
353  * ibmvfc_retry_cmd - Determine if error status is retryable
354  * @status:		status / error class
355  * @error:		error
356  *
357  * Return value:
358  *	1 if error should be retried / 0 if it should not
359  **/
360 static int ibmvfc_retry_cmd(u16 status, u16 error)
361 {
362 	int rc = ibmvfc_get_err_index(status, error);
363 
364 	if (rc >= 0)
365 		return cmd_status[rc].retry;
366 	return 1;
367 }
368 
369 static const char *unknown_fc_explain = "unknown fc explain";
370 
371 static const struct {
372 	u16 fc_explain;
373 	char *name;
374 } ls_explain [] = {
375 	{ 0x00, "no additional explanation" },
376 	{ 0x01, "service parameter error - options" },
377 	{ 0x03, "service parameter error - initiator control" },
378 	{ 0x05, "service parameter error - recipient control" },
379 	{ 0x07, "service parameter error - received data field size" },
380 	{ 0x09, "service parameter error - concurrent seq" },
381 	{ 0x0B, "service parameter error - credit" },
382 	{ 0x0D, "invalid N_Port/F_Port_Name" },
383 	{ 0x0E, "invalid node/Fabric Name" },
384 	{ 0x0F, "invalid common service parameters" },
385 	{ 0x11, "invalid association header" },
386 	{ 0x13, "association header required" },
387 	{ 0x15, "invalid originator S_ID" },
388 	{ 0x17, "invalid OX_ID-RX-ID combination" },
389 	{ 0x19, "command (request) already in progress" },
390 	{ 0x1E, "N_Port Login requested" },
391 	{ 0x1F, "Invalid N_Port_ID" },
392 };
393 
394 static const struct {
395 	u16 fc_explain;
396 	char *name;
397 } gs_explain [] = {
398 	{ 0x00, "no additional explanation" },
399 	{ 0x01, "port identifier not registered" },
400 	{ 0x02, "port name not registered" },
401 	{ 0x03, "node name not registered" },
402 	{ 0x04, "class of service not registered" },
403 	{ 0x06, "initial process associator not registered" },
404 	{ 0x07, "FC-4 TYPEs not registered" },
405 	{ 0x08, "symbolic port name not registered" },
406 	{ 0x09, "symbolic node name not registered" },
407 	{ 0x0A, "port type not registered" },
408 	{ 0xF0, "authorization exception" },
409 	{ 0xF1, "authentication exception" },
410 	{ 0xF2, "data base full" },
411 	{ 0xF3, "data base empty" },
412 	{ 0xF4, "processing request" },
413 	{ 0xF5, "unable to verify connection" },
414 	{ 0xF6, "devices not in a common zone" },
415 };
416 
417 /**
418  * ibmvfc_get_ls_explain - Return the FC Explain description text
419  * @status:	FC Explain status
420  *
421  * Returns:
422  *	error string
423  **/
424 static const char *ibmvfc_get_ls_explain(u16 status)
425 {
426 	int i;
427 
428 	for (i = 0; i < ARRAY_SIZE(ls_explain); i++)
429 		if (ls_explain[i].fc_explain == status)
430 			return ls_explain[i].name;
431 
432 	return unknown_fc_explain;
433 }
434 
435 /**
436  * ibmvfc_get_gs_explain - Return the FC Explain description text
437  * @status:	FC Explain status
438  *
439  * Returns:
440  *	error string
441  **/
442 static const char *ibmvfc_get_gs_explain(u16 status)
443 {
444 	int i;
445 
446 	for (i = 0; i < ARRAY_SIZE(gs_explain); i++)
447 		if (gs_explain[i].fc_explain == status)
448 			return gs_explain[i].name;
449 
450 	return unknown_fc_explain;
451 }
452 
453 static const struct {
454 	enum ibmvfc_fc_type fc_type;
455 	char *name;
456 } fc_type [] = {
457 	{ IBMVFC_FABRIC_REJECT, "fabric reject" },
458 	{ IBMVFC_PORT_REJECT, "port reject" },
459 	{ IBMVFC_LS_REJECT, "ELS reject" },
460 	{ IBMVFC_FABRIC_BUSY, "fabric busy" },
461 	{ IBMVFC_PORT_BUSY, "port busy" },
462 	{ IBMVFC_BASIC_REJECT, "basic reject" },
463 };
464 
465 static const char *unknown_fc_type = "unknown fc type";
466 
467 /**
468  * ibmvfc_get_fc_type - Return the FC Type description text
469  * @status:	FC Type error status
470  *
471  * Returns:
472  *	error string
473  **/
474 static const char *ibmvfc_get_fc_type(u16 status)
475 {
476 	int i;
477 
478 	for (i = 0; i < ARRAY_SIZE(fc_type); i++)
479 		if (fc_type[i].fc_type == status)
480 			return fc_type[i].name;
481 
482 	return unknown_fc_type;
483 }
484 
485 /**
486  * ibmvfc_set_tgt_action - Set the next init action for the target
487  * @tgt:		ibmvfc target struct
488  * @action:		action to perform
489  *
490  * Returns:
491  *	0 if action changed / non-zero if not changed
492  **/
493 static int ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
494 				  enum ibmvfc_target_action action)
495 {
496 	int rc = -EINVAL;
497 
498 	switch (tgt->action) {
499 	case IBMVFC_TGT_ACTION_LOGOUT_RPORT:
500 		if (action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT ||
501 		    action == IBMVFC_TGT_ACTION_DEL_RPORT) {
502 			tgt->action = action;
503 			rc = 0;
504 		}
505 		break;
506 	case IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT:
507 		if (action == IBMVFC_TGT_ACTION_DEL_RPORT ||
508 		    action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
509 			tgt->action = action;
510 			rc = 0;
511 		}
512 		break;
513 	case IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT:
514 		if (action == IBMVFC_TGT_ACTION_LOGOUT_RPORT) {
515 			tgt->action = action;
516 			rc = 0;
517 		}
518 		break;
519 	case IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT:
520 		if (action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
521 			tgt->action = action;
522 			rc = 0;
523 		}
524 		break;
525 	case IBMVFC_TGT_ACTION_DEL_RPORT:
526 		if (action == IBMVFC_TGT_ACTION_DELETED_RPORT) {
527 			tgt->action = action;
528 			rc = 0;
529 		}
530 		break;
531 	case IBMVFC_TGT_ACTION_DELETED_RPORT:
532 		break;
533 	default:
534 		tgt->action = action;
535 		rc = 0;
536 		break;
537 	}
538 
539 	if (action >= IBMVFC_TGT_ACTION_LOGOUT_RPORT)
540 		tgt->add_rport = 0;
541 
542 	return rc;
543 }
544 
545 /**
546  * ibmvfc_set_host_state - Set the state for the host
547  * @vhost:		ibmvfc host struct
548  * @state:		state to set host to
549  *
550  * Returns:
551  *	0 if state changed / non-zero if not changed
552  **/
553 static int ibmvfc_set_host_state(struct ibmvfc_host *vhost,
554 				  enum ibmvfc_host_state state)
555 {
556 	int rc = 0;
557 
558 	switch (vhost->state) {
559 	case IBMVFC_HOST_OFFLINE:
560 		rc = -EINVAL;
561 		break;
562 	default:
563 		vhost->state = state;
564 		break;
565 	}
566 
567 	return rc;
568 }
569 
570 /**
571  * ibmvfc_set_host_action - Set the next init action for the host
572  * @vhost:		ibmvfc host struct
573  * @action:		action to perform
574  *
575  **/
576 static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
577 				   enum ibmvfc_host_action action)
578 {
579 	switch (action) {
580 	case IBMVFC_HOST_ACTION_ALLOC_TGTS:
581 		if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT)
582 			vhost->action = action;
583 		break;
584 	case IBMVFC_HOST_ACTION_LOGO_WAIT:
585 		if (vhost->action == IBMVFC_HOST_ACTION_LOGO)
586 			vhost->action = action;
587 		break;
588 	case IBMVFC_HOST_ACTION_INIT_WAIT:
589 		if (vhost->action == IBMVFC_HOST_ACTION_INIT)
590 			vhost->action = action;
591 		break;
592 	case IBMVFC_HOST_ACTION_QUERY:
593 		switch (vhost->action) {
594 		case IBMVFC_HOST_ACTION_INIT_WAIT:
595 		case IBMVFC_HOST_ACTION_NONE:
596 		case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
597 			vhost->action = action;
598 			break;
599 		default:
600 			break;
601 		}
602 		break;
603 	case IBMVFC_HOST_ACTION_TGT_INIT:
604 		if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
605 			vhost->action = action;
606 		break;
607 	case IBMVFC_HOST_ACTION_REENABLE:
608 	case IBMVFC_HOST_ACTION_RESET:
609 		vhost->action = action;
610 		break;
611 	case IBMVFC_HOST_ACTION_INIT:
612 	case IBMVFC_HOST_ACTION_TGT_DEL:
613 	case IBMVFC_HOST_ACTION_LOGO:
614 	case IBMVFC_HOST_ACTION_QUERY_TGTS:
615 	case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
616 	case IBMVFC_HOST_ACTION_NONE:
617 	default:
618 		switch (vhost->action) {
619 		case IBMVFC_HOST_ACTION_RESET:
620 		case IBMVFC_HOST_ACTION_REENABLE:
621 			break;
622 		default:
623 			vhost->action = action;
624 			break;
625 		}
626 		break;
627 	}
628 }
629 
630 /**
631  * ibmvfc_reinit_host - Re-start host initialization (no NPIV Login)
632  * @vhost:		ibmvfc host struct
633  *
634  * Return value:
635  *	nothing
636  **/
637 static void ibmvfc_reinit_host(struct ibmvfc_host *vhost)
638 {
639 	if (vhost->action == IBMVFC_HOST_ACTION_NONE &&
640 	    vhost->state == IBMVFC_ACTIVE) {
641 		if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
642 			scsi_block_requests(vhost->host);
643 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
644 		}
645 	} else
646 		vhost->reinit = 1;
647 
648 	wake_up(&vhost->work_wait_q);
649 }
650 
651 /**
652  * ibmvfc_del_tgt - Schedule cleanup and removal of the target
653  * @tgt:		ibmvfc target struct
654  **/
655 static void ibmvfc_del_tgt(struct ibmvfc_target *tgt)
656 {
657 	if (!ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_RPORT)) {
658 		tgt->job_step = ibmvfc_tgt_implicit_logout_and_del;
659 		tgt->init_retries = 0;
660 	}
661 	wake_up(&tgt->vhost->work_wait_q);
662 }
663 
664 /**
665  * ibmvfc_link_down - Handle a link down event from the adapter
666  * @vhost:	ibmvfc host struct
667  * @state:	ibmvfc host state to enter
668  *
669  **/
670 static void ibmvfc_link_down(struct ibmvfc_host *vhost,
671 			     enum ibmvfc_host_state state)
672 {
673 	struct ibmvfc_target *tgt;
674 
675 	ENTER;
676 	scsi_block_requests(vhost->host);
677 	list_for_each_entry(tgt, &vhost->targets, queue)
678 		ibmvfc_del_tgt(tgt);
679 	ibmvfc_set_host_state(vhost, state);
680 	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
681 	vhost->events_to_log |= IBMVFC_AE_LINKDOWN;
682 	wake_up(&vhost->work_wait_q);
683 	LEAVE;
684 }
685 
686 /**
687  * ibmvfc_init_host - Start host initialization
688  * @vhost:		ibmvfc host struct
689  *
690  * Return value:
691  *	nothing
692  **/
693 static void ibmvfc_init_host(struct ibmvfc_host *vhost)
694 {
695 	struct ibmvfc_target *tgt;
696 
697 	if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
698 		if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
699 			dev_err(vhost->dev,
700 				"Host initialization retries exceeded. Taking adapter offline\n");
701 			ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
702 			return;
703 		}
704 	}
705 
706 	if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
707 		memset(vhost->async_crq.msgs.async, 0, PAGE_SIZE);
708 		vhost->async_crq.cur = 0;
709 
710 		list_for_each_entry(tgt, &vhost->targets, queue)
711 			ibmvfc_del_tgt(tgt);
712 		scsi_block_requests(vhost->host);
713 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
714 		vhost->job_step = ibmvfc_npiv_login;
715 		wake_up(&vhost->work_wait_q);
716 	}
717 }
718 
719 /**
720  * ibmvfc_send_crq - Send a CRQ
721  * @vhost:	ibmvfc host struct
722  * @word1:	the first 64 bits of the data
723  * @word2:	the second 64 bits of the data
724  *
725  * Return value:
726  *	0 on success / other on failure
727  **/
728 static int ibmvfc_send_crq(struct ibmvfc_host *vhost, u64 word1, u64 word2)
729 {
730 	struct vio_dev *vdev = to_vio_dev(vhost->dev);
731 	return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
732 }
733 
734 static int ibmvfc_send_sub_crq(struct ibmvfc_host *vhost, u64 cookie, u64 word1,
735 			       u64 word2, u64 word3, u64 word4)
736 {
737 	struct vio_dev *vdev = to_vio_dev(vhost->dev);
738 
739 	return plpar_hcall_norets(H_SEND_SUB_CRQ, vdev->unit_address, cookie,
740 				  word1, word2, word3, word4);
741 }
742 
743 /**
744  * ibmvfc_send_crq_init - Send a CRQ init message
745  * @vhost:	ibmvfc host struct
746  *
747  * Return value:
748  *	0 on success / other on failure
749  **/
750 static int ibmvfc_send_crq_init(struct ibmvfc_host *vhost)
751 {
752 	ibmvfc_dbg(vhost, "Sending CRQ init\n");
753 	return ibmvfc_send_crq(vhost, 0xC001000000000000LL, 0);
754 }
755 
756 /**
757  * ibmvfc_send_crq_init_complete - Send a CRQ init complete message
758  * @vhost:	ibmvfc host struct
759  *
760  * Return value:
761  *	0 on success / other on failure
762  **/
763 static int ibmvfc_send_crq_init_complete(struct ibmvfc_host *vhost)
764 {
765 	ibmvfc_dbg(vhost, "Sending CRQ init complete\n");
766 	return ibmvfc_send_crq(vhost, 0xC002000000000000LL, 0);
767 }
768 
769 /**
770  * ibmvfc_init_event_pool - Allocates and initializes the event pool for a host
771  * @vhost:	ibmvfc host who owns the event pool
772  * @queue:      ibmvfc queue struct
773  * @size:       pool size
774  *
775  * Returns zero on success.
776  **/
777 static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost,
778 				  struct ibmvfc_queue *queue,
779 				  unsigned int size)
780 {
781 	int i;
782 	struct ibmvfc_event_pool *pool = &queue->evt_pool;
783 
784 	ENTER;
785 	if (!size)
786 		return 0;
787 
788 	pool->size = size;
789 	pool->events = kcalloc(size, sizeof(*pool->events), GFP_KERNEL);
790 	if (!pool->events)
791 		return -ENOMEM;
792 
793 	pool->iu_storage = dma_alloc_coherent(vhost->dev,
794 					      size * sizeof(*pool->iu_storage),
795 					      &pool->iu_token, 0);
796 
797 	if (!pool->iu_storage) {
798 		kfree(pool->events);
799 		return -ENOMEM;
800 	}
801 
802 	INIT_LIST_HEAD(&queue->sent);
803 	INIT_LIST_HEAD(&queue->free);
804 	spin_lock_init(&queue->l_lock);
805 
806 	for (i = 0; i < size; ++i) {
807 		struct ibmvfc_event *evt = &pool->events[i];
808 
809 		atomic_set(&evt->free, 1);
810 		evt->crq.valid = 0x80;
811 		evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i));
812 		evt->xfer_iu = pool->iu_storage + i;
813 		evt->vhost = vhost;
814 		evt->queue = queue;
815 		evt->ext_list = NULL;
816 		list_add_tail(&evt->queue_list, &queue->free);
817 	}
818 
819 	LEAVE;
820 	return 0;
821 }
822 
823 /**
824  * ibmvfc_free_event_pool - Frees memory of the event pool of a host
825  * @vhost:	ibmvfc host who owns the event pool
826  * @queue:      ibmvfc queue struct
827  *
828  **/
829 static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost,
830 				   struct ibmvfc_queue *queue)
831 {
832 	int i;
833 	struct ibmvfc_event_pool *pool = &queue->evt_pool;
834 
835 	ENTER;
836 	for (i = 0; i < pool->size; ++i) {
837 		list_del(&pool->events[i].queue_list);
838 		BUG_ON(atomic_read(&pool->events[i].free) != 1);
839 		if (pool->events[i].ext_list)
840 			dma_pool_free(vhost->sg_pool,
841 				      pool->events[i].ext_list,
842 				      pool->events[i].ext_list_token);
843 	}
844 
845 	kfree(pool->events);
846 	dma_free_coherent(vhost->dev,
847 			  pool->size * sizeof(*pool->iu_storage),
848 			  pool->iu_storage, pool->iu_token);
849 	LEAVE;
850 }
851 
852 /**
853  * ibmvfc_free_queue - Deallocate queue
854  * @vhost:	ibmvfc host struct
855  * @queue:	ibmvfc queue struct
856  *
857  * Unmaps dma and deallocates page for messages
858  **/
859 static void ibmvfc_free_queue(struct ibmvfc_host *vhost,
860 			      struct ibmvfc_queue *queue)
861 {
862 	struct device *dev = vhost->dev;
863 
864 	dma_unmap_single(dev, queue->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
865 	free_page((unsigned long)queue->msgs.handle);
866 	queue->msgs.handle = NULL;
867 
868 	ibmvfc_free_event_pool(vhost, queue);
869 }
870 
871 /**
872  * ibmvfc_release_crq_queue - Deallocates data and unregisters CRQ
873  * @vhost:	ibmvfc host struct
874  *
875  * Frees irq, deallocates a page for messages, unmaps dma, and unregisters
876  * the crq with the hypervisor.
877  **/
878 static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
879 {
880 	long rc = 0;
881 	struct vio_dev *vdev = to_vio_dev(vhost->dev);
882 	struct ibmvfc_queue *crq = &vhost->crq;
883 
884 	ibmvfc_dbg(vhost, "Releasing CRQ\n");
885 	free_irq(vdev->irq, vhost);
886 	tasklet_kill(&vhost->tasklet);
887 	do {
888 		if (rc)
889 			msleep(100);
890 		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
891 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
892 
893 	vhost->state = IBMVFC_NO_CRQ;
894 	vhost->logged_in = 0;
895 
896 	ibmvfc_free_queue(vhost, crq);
897 }
898 
899 /**
900  * ibmvfc_reenable_crq_queue - reenables the CRQ
901  * @vhost:	ibmvfc host struct
902  *
903  * Return value:
904  *	0 on success / other on failure
905  **/
906 static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost)
907 {
908 	int rc = 0;
909 	struct vio_dev *vdev = to_vio_dev(vhost->dev);
910 	unsigned long flags;
911 
912 	ibmvfc_release_sub_crqs(vhost);
913 
914 	/* Re-enable the CRQ */
915 	do {
916 		if (rc)
917 			msleep(100);
918 		rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
919 	} while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
920 
921 	if (rc)
922 		dev_err(vhost->dev, "Error enabling adapter (rc=%d)\n", rc);
923 
924 	spin_lock_irqsave(vhost->host->host_lock, flags);
925 	spin_lock(vhost->crq.q_lock);
926 	vhost->do_enquiry = 1;
927 	vhost->using_channels = 0;
928 	spin_unlock(vhost->crq.q_lock);
929 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
930 
931 	ibmvfc_init_sub_crqs(vhost);
932 
933 	return rc;
934 }
935 
936 /**
937  * ibmvfc_reset_crq - resets a crq after a failure
938  * @vhost:	ibmvfc host struct
939  *
940  * Return value:
941  *	0 on success / other on failure
942  **/
943 static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
944 {
945 	int rc = 0;
946 	unsigned long flags;
947 	struct vio_dev *vdev = to_vio_dev(vhost->dev);
948 	struct ibmvfc_queue *crq = &vhost->crq;
949 
950 	ibmvfc_release_sub_crqs(vhost);
951 
952 	/* Close the CRQ */
953 	do {
954 		if (rc)
955 			msleep(100);
956 		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
957 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
958 
959 	spin_lock_irqsave(vhost->host->host_lock, flags);
960 	spin_lock(vhost->crq.q_lock);
961 	vhost->state = IBMVFC_NO_CRQ;
962 	vhost->logged_in = 0;
963 	vhost->do_enquiry = 1;
964 	vhost->using_channels = 0;
965 
966 	/* Clean out the queue */
967 	memset(crq->msgs.crq, 0, PAGE_SIZE);
968 	crq->cur = 0;
969 
970 	/* And re-open it again */
971 	rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
972 				crq->msg_token, PAGE_SIZE);
973 
974 	if (rc == H_CLOSED)
975 		/* Adapter is good, but other end is not ready */
976 		dev_warn(vhost->dev, "Partner adapter not ready\n");
977 	else if (rc != 0)
978 		dev_warn(vhost->dev, "Couldn't register crq (rc=%d)\n", rc);
979 
980 	spin_unlock(vhost->crq.q_lock);
981 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
982 
983 	ibmvfc_init_sub_crqs(vhost);
984 
985 	return rc;
986 }
987 
988 /**
989  * ibmvfc_valid_event - Determines if event is valid.
990  * @pool:	event_pool that contains the event
991  * @evt:	ibmvfc event to be checked for validity
992  *
993  * Return value:
994  *	1 if event is valid / 0 if event is not valid
995  **/
996 static int ibmvfc_valid_event(struct ibmvfc_event_pool *pool,
997 			      struct ibmvfc_event *evt)
998 {
999 	int index = evt - pool->events;
1000 	if (index < 0 || index >= pool->size)	/* outside of bounds */
1001 		return 0;
1002 	if (evt != pool->events + index)	/* unaligned */
1003 		return 0;
1004 	return 1;
1005 }
1006 
1007 /**
1008  * ibmvfc_free_event - Free the specified event
1009  * @evt:	ibmvfc_event to be freed
1010  *
1011  **/
1012 static void ibmvfc_free_event(struct ibmvfc_event *evt)
1013 {
1014 	struct ibmvfc_event_pool *pool = &evt->queue->evt_pool;
1015 	unsigned long flags;
1016 
1017 	BUG_ON(!ibmvfc_valid_event(pool, evt));
1018 	BUG_ON(atomic_inc_return(&evt->free) != 1);
1019 
1020 	spin_lock_irqsave(&evt->queue->l_lock, flags);
1021 	list_add_tail(&evt->queue_list, &evt->queue->free);
1022 	if (evt->eh_comp)
1023 		complete(evt->eh_comp);
1024 	spin_unlock_irqrestore(&evt->queue->l_lock, flags);
1025 }
1026 
1027 /**
1028  * ibmvfc_scsi_eh_done - EH done function for queuecommand commands
1029  * @evt:	ibmvfc event struct
1030  *
1031  * This function does not setup any error status, that must be done
1032  * before this function gets called.
1033  **/
1034 static void ibmvfc_scsi_eh_done(struct ibmvfc_event *evt)
1035 {
1036 	struct scsi_cmnd *cmnd = evt->cmnd;
1037 
1038 	if (cmnd) {
1039 		scsi_dma_unmap(cmnd);
1040 		cmnd->scsi_done(cmnd);
1041 	}
1042 
1043 	ibmvfc_free_event(evt);
1044 }
1045 
1046 /**
1047  * ibmvfc_complete_purge - Complete failed command list
1048  * @purge_list:		list head of failed commands
1049  *
1050  * This function runs completions on commands to fail as a result of a
1051  * host reset or platform migration.
1052  **/
1053 static void ibmvfc_complete_purge(struct list_head *purge_list)
1054 {
1055 	struct ibmvfc_event *evt, *pos;
1056 
1057 	list_for_each_entry_safe(evt, pos, purge_list, queue_list) {
1058 		list_del(&evt->queue_list);
1059 		ibmvfc_trc_end(evt);
1060 		evt->done(evt);
1061 	}
1062 }
1063 
1064 /**
1065  * ibmvfc_fail_request - Fail request with specified error code
1066  * @evt:		ibmvfc event struct
1067  * @error_code:	error code to fail request with
1068  *
1069  * Return value:
1070  *	none
1071  **/
1072 static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code)
1073 {
1074 	if (evt->cmnd) {
1075 		evt->cmnd->result = (error_code << 16);
1076 		evt->done = ibmvfc_scsi_eh_done;
1077 	} else
1078 		evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_DRIVER_FAILED);
1079 
1080 	del_timer(&evt->timer);
1081 }
1082 
1083 /**
1084  * ibmvfc_purge_requests - Our virtual adapter just shut down. Purge any sent requests
1085  * @vhost:		ibmvfc host struct
1086  * @error_code:	error code to fail requests with
1087  *
1088  * Return value:
1089  *	none
1090  **/
1091 static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code)
1092 {
1093 	struct ibmvfc_event *evt, *pos;
1094 	struct ibmvfc_queue *queues = vhost->scsi_scrqs.scrqs;
1095 	unsigned long flags;
1096 	int hwqs = 0;
1097 	int i;
1098 
1099 	if (vhost->using_channels)
1100 		hwqs = vhost->scsi_scrqs.active_queues;
1101 
1102 	ibmvfc_dbg(vhost, "Purging all requests\n");
1103 	spin_lock_irqsave(&vhost->crq.l_lock, flags);
1104 	list_for_each_entry_safe(evt, pos, &vhost->crq.sent, queue_list)
1105 		ibmvfc_fail_request(evt, error_code);
1106 	list_splice_init(&vhost->crq.sent, &vhost->purge);
1107 	spin_unlock_irqrestore(&vhost->crq.l_lock, flags);
1108 
1109 	for (i = 0; i < hwqs; i++) {
1110 		spin_lock_irqsave(queues[i].q_lock, flags);
1111 		spin_lock(&queues[i].l_lock);
1112 		list_for_each_entry_safe(evt, pos, &queues[i].sent, queue_list)
1113 			ibmvfc_fail_request(evt, error_code);
1114 		list_splice_init(&queues[i].sent, &vhost->purge);
1115 		spin_unlock(&queues[i].l_lock);
1116 		spin_unlock_irqrestore(queues[i].q_lock, flags);
1117 	}
1118 }
1119 
1120 /**
1121  * ibmvfc_hard_reset_host - Reset the connection to the server by breaking the CRQ
1122  * @vhost:	struct ibmvfc host to reset
1123  **/
1124 static void ibmvfc_hard_reset_host(struct ibmvfc_host *vhost)
1125 {
1126 	ibmvfc_purge_requests(vhost, DID_ERROR);
1127 	ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
1128 	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
1129 }
1130 
1131 /**
1132  * __ibmvfc_reset_host - Reset the connection to the server (no locking)
1133  * @vhost:	struct ibmvfc host to reset
1134  **/
1135 static void __ibmvfc_reset_host(struct ibmvfc_host *vhost)
1136 {
1137 	if (vhost->logged_in && vhost->action != IBMVFC_HOST_ACTION_LOGO_WAIT &&
1138 	    !ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
1139 		scsi_block_requests(vhost->host);
1140 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO);
1141 		vhost->job_step = ibmvfc_npiv_logout;
1142 		wake_up(&vhost->work_wait_q);
1143 	} else
1144 		ibmvfc_hard_reset_host(vhost);
1145 }
1146 
1147 /**
1148  * ibmvfc_reset_host - Reset the connection to the server
1149  * @vhost:	ibmvfc host struct
1150  **/
1151 static void ibmvfc_reset_host(struct ibmvfc_host *vhost)
1152 {
1153 	unsigned long flags;
1154 
1155 	spin_lock_irqsave(vhost->host->host_lock, flags);
1156 	__ibmvfc_reset_host(vhost);
1157 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
1158 }
1159 
1160 /**
1161  * ibmvfc_retry_host_init - Retry host initialization if allowed
1162  * @vhost:	ibmvfc host struct
1163  *
1164  * Returns: 1 if init will be retried / 0 if not
1165  *
1166  **/
1167 static int ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
1168 {
1169 	int retry = 0;
1170 
1171 	if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
1172 		vhost->delay_init = 1;
1173 		if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
1174 			dev_err(vhost->dev,
1175 				"Host initialization retries exceeded. Taking adapter offline\n");
1176 			ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
1177 		} else if (vhost->init_retries == IBMVFC_MAX_HOST_INIT_RETRIES)
1178 			__ibmvfc_reset_host(vhost);
1179 		else {
1180 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
1181 			retry = 1;
1182 		}
1183 	}
1184 
1185 	wake_up(&vhost->work_wait_q);
1186 	return retry;
1187 }
1188 
1189 /**
1190  * __ibmvfc_get_target - Find the specified scsi_target (no locking)
1191  * @starget:	scsi target struct
1192  *
1193  * Return value:
1194  *	ibmvfc_target struct / NULL if not found
1195  **/
1196 static struct ibmvfc_target *__ibmvfc_get_target(struct scsi_target *starget)
1197 {
1198 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1199 	struct ibmvfc_host *vhost = shost_priv(shost);
1200 	struct ibmvfc_target *tgt;
1201 
1202 	list_for_each_entry(tgt, &vhost->targets, queue)
1203 		if (tgt->target_id == starget->id) {
1204 			kref_get(&tgt->kref);
1205 			return tgt;
1206 		}
1207 	return NULL;
1208 }
1209 
1210 /**
1211  * ibmvfc_get_target - Find the specified scsi_target
1212  * @starget:	scsi target struct
1213  *
1214  * Return value:
1215  *	ibmvfc_target struct / NULL if not found
1216  **/
1217 static struct ibmvfc_target *ibmvfc_get_target(struct scsi_target *starget)
1218 {
1219 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1220 	struct ibmvfc_target *tgt;
1221 	unsigned long flags;
1222 
1223 	spin_lock_irqsave(shost->host_lock, flags);
1224 	tgt = __ibmvfc_get_target(starget);
1225 	spin_unlock_irqrestore(shost->host_lock, flags);
1226 	return tgt;
1227 }
1228 
1229 /**
1230  * ibmvfc_get_host_speed - Get host port speed
1231  * @shost:		scsi host struct
1232  *
1233  * Return value:
1234  * 	none
1235  **/
1236 static void ibmvfc_get_host_speed(struct Scsi_Host *shost)
1237 {
1238 	struct ibmvfc_host *vhost = shost_priv(shost);
1239 	unsigned long flags;
1240 
1241 	spin_lock_irqsave(shost->host_lock, flags);
1242 	if (vhost->state == IBMVFC_ACTIVE) {
1243 		switch (be64_to_cpu(vhost->login_buf->resp.link_speed) / 100) {
1244 		case 1:
1245 			fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
1246 			break;
1247 		case 2:
1248 			fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
1249 			break;
1250 		case 4:
1251 			fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
1252 			break;
1253 		case 8:
1254 			fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
1255 			break;
1256 		case 10:
1257 			fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
1258 			break;
1259 		case 16:
1260 			fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
1261 			break;
1262 		default:
1263 			ibmvfc_log(vhost, 3, "Unknown port speed: %lld Gbit\n",
1264 				   be64_to_cpu(vhost->login_buf->resp.link_speed) / 100);
1265 			fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
1266 			break;
1267 		}
1268 	} else
1269 		fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
1270 	spin_unlock_irqrestore(shost->host_lock, flags);
1271 }
1272 
1273 /**
1274  * ibmvfc_get_host_port_state - Get host port state
1275  * @shost:		scsi host struct
1276  *
1277  * Return value:
1278  * 	none
1279  **/
1280 static void ibmvfc_get_host_port_state(struct Scsi_Host *shost)
1281 {
1282 	struct ibmvfc_host *vhost = shost_priv(shost);
1283 	unsigned long flags;
1284 
1285 	spin_lock_irqsave(shost->host_lock, flags);
1286 	switch (vhost->state) {
1287 	case IBMVFC_INITIALIZING:
1288 	case IBMVFC_ACTIVE:
1289 		fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
1290 		break;
1291 	case IBMVFC_LINK_DOWN:
1292 		fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
1293 		break;
1294 	case IBMVFC_LINK_DEAD:
1295 	case IBMVFC_HOST_OFFLINE:
1296 		fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
1297 		break;
1298 	case IBMVFC_HALTED:
1299 		fc_host_port_state(shost) = FC_PORTSTATE_BLOCKED;
1300 		break;
1301 	case IBMVFC_NO_CRQ:
1302 		fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
1303 		break;
1304 	default:
1305 		ibmvfc_log(vhost, 3, "Unknown port state: %d\n", vhost->state);
1306 		fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
1307 		break;
1308 	}
1309 	spin_unlock_irqrestore(shost->host_lock, flags);
1310 }
1311 
1312 /**
1313  * ibmvfc_set_rport_dev_loss_tmo - Set rport's device loss timeout
1314  * @rport:		rport struct
1315  * @timeout:	timeout value
1316  *
1317  * Return value:
1318  * 	none
1319  **/
1320 static void ibmvfc_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
1321 {
1322 	if (timeout)
1323 		rport->dev_loss_tmo = timeout;
1324 	else
1325 		rport->dev_loss_tmo = 1;
1326 }
1327 
1328 /**
1329  * ibmvfc_release_tgt - Free memory allocated for a target
1330  * @kref:		kref struct
1331  *
1332  **/
1333 static void ibmvfc_release_tgt(struct kref *kref)
1334 {
1335 	struct ibmvfc_target *tgt = container_of(kref, struct ibmvfc_target, kref);
1336 	kfree(tgt);
1337 }
1338 
1339 /**
1340  * ibmvfc_get_starget_node_name - Get SCSI target's node name
1341  * @starget:	scsi target struct
1342  *
1343  * Return value:
1344  * 	none
1345  **/
1346 static void ibmvfc_get_starget_node_name(struct scsi_target *starget)
1347 {
1348 	struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1349 	fc_starget_port_name(starget) = tgt ? tgt->ids.node_name : 0;
1350 	if (tgt)
1351 		kref_put(&tgt->kref, ibmvfc_release_tgt);
1352 }
1353 
1354 /**
1355  * ibmvfc_get_starget_port_name - Get SCSI target's port name
1356  * @starget:	scsi target struct
1357  *
1358  * Return value:
1359  * 	none
1360  **/
1361 static void ibmvfc_get_starget_port_name(struct scsi_target *starget)
1362 {
1363 	struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1364 	fc_starget_port_name(starget) = tgt ? tgt->ids.port_name : 0;
1365 	if (tgt)
1366 		kref_put(&tgt->kref, ibmvfc_release_tgt);
1367 }
1368 
1369 /**
1370  * ibmvfc_get_starget_port_id - Get SCSI target's port ID
1371  * @starget:	scsi target struct
1372  *
1373  * Return value:
1374  * 	none
1375  **/
1376 static void ibmvfc_get_starget_port_id(struct scsi_target *starget)
1377 {
1378 	struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1379 	fc_starget_port_id(starget) = tgt ? tgt->scsi_id : -1;
1380 	if (tgt)
1381 		kref_put(&tgt->kref, ibmvfc_release_tgt);
1382 }
1383 
1384 /**
1385  * ibmvfc_wait_while_resetting - Wait while the host resets
1386  * @vhost:		ibmvfc host struct
1387  *
1388  * Return value:
1389  * 	0 on success / other on failure
1390  **/
1391 static int ibmvfc_wait_while_resetting(struct ibmvfc_host *vhost)
1392 {
1393 	long timeout = wait_event_timeout(vhost->init_wait_q,
1394 					  ((vhost->state == IBMVFC_ACTIVE ||
1395 					    vhost->state == IBMVFC_HOST_OFFLINE ||
1396 					    vhost->state == IBMVFC_LINK_DEAD) &&
1397 					   vhost->action == IBMVFC_HOST_ACTION_NONE),
1398 					  (init_timeout * HZ));
1399 
1400 	return timeout ? 0 : -EIO;
1401 }
1402 
1403 /**
1404  * ibmvfc_issue_fc_host_lip - Re-initiate link initialization
1405  * @shost:		scsi host struct
1406  *
1407  * Return value:
1408  * 	0 on success / other on failure
1409  **/
1410 static int ibmvfc_issue_fc_host_lip(struct Scsi_Host *shost)
1411 {
1412 	struct ibmvfc_host *vhost = shost_priv(shost);
1413 
1414 	dev_err(vhost->dev, "Initiating host LIP. Resetting connection\n");
1415 	ibmvfc_reset_host(vhost);
1416 	return ibmvfc_wait_while_resetting(vhost);
1417 }
1418 
1419 /**
1420  * ibmvfc_gather_partition_info - Gather info about the LPAR
1421  * @vhost:      ibmvfc host struct
1422  *
1423  * Return value:
1424  *	none
1425  **/
1426 static void ibmvfc_gather_partition_info(struct ibmvfc_host *vhost)
1427 {
1428 	struct device_node *rootdn;
1429 	const char *name;
1430 	const unsigned int *num;
1431 
1432 	rootdn = of_find_node_by_path("/");
1433 	if (!rootdn)
1434 		return;
1435 
1436 	name = of_get_property(rootdn, "ibm,partition-name", NULL);
1437 	if (name)
1438 		strncpy(vhost->partition_name, name, sizeof(vhost->partition_name));
1439 	num = of_get_property(rootdn, "ibm,partition-no", NULL);
1440 	if (num)
1441 		vhost->partition_number = *num;
1442 	of_node_put(rootdn);
1443 }
1444 
1445 /**
1446  * ibmvfc_set_login_info - Setup info for NPIV login
1447  * @vhost:	ibmvfc host struct
1448  *
1449  * Return value:
1450  *	none
1451  **/
1452 static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
1453 {
1454 	struct ibmvfc_npiv_login *login_info = &vhost->login_info;
1455 	struct ibmvfc_queue *async_crq = &vhost->async_crq;
1456 	struct device_node *of_node = vhost->dev->of_node;
1457 	const char *location;
1458 
1459 	memset(login_info, 0, sizeof(*login_info));
1460 
1461 	login_info->ostype = cpu_to_be32(IBMVFC_OS_LINUX);
1462 	login_info->max_dma_len = cpu_to_be64(IBMVFC_MAX_SECTORS << 9);
1463 	login_info->max_payload = cpu_to_be32(sizeof(struct ibmvfc_fcp_cmd_iu));
1464 	login_info->max_response = cpu_to_be32(sizeof(struct ibmvfc_fcp_rsp));
1465 	login_info->partition_num = cpu_to_be32(vhost->partition_number);
1466 	login_info->vfc_frame_version = cpu_to_be32(1);
1467 	login_info->fcp_version = cpu_to_be16(3);
1468 	login_info->flags = cpu_to_be16(IBMVFC_FLUSH_ON_HALT);
1469 	if (vhost->client_migrated)
1470 		login_info->flags |= cpu_to_be16(IBMVFC_CLIENT_MIGRATED);
1471 
1472 	login_info->max_cmds = cpu_to_be32(max_requests + IBMVFC_NUM_INTERNAL_REQ);
1473 	login_info->capabilities = cpu_to_be64(IBMVFC_CAN_MIGRATE | IBMVFC_CAN_SEND_VF_WWPN);
1474 
1475 	if (vhost->mq_enabled || vhost->using_channels)
1476 		login_info->capabilities |= cpu_to_be64(IBMVFC_CAN_USE_CHANNELS);
1477 
1478 	login_info->async.va = cpu_to_be64(vhost->async_crq.msg_token);
1479 	login_info->async.len = cpu_to_be32(async_crq->size *
1480 					    sizeof(*async_crq->msgs.async));
1481 	strncpy(login_info->partition_name, vhost->partition_name, IBMVFC_MAX_NAME);
1482 	strncpy(login_info->device_name,
1483 		dev_name(&vhost->host->shost_gendev), IBMVFC_MAX_NAME);
1484 
1485 	location = of_get_property(of_node, "ibm,loc-code", NULL);
1486 	location = location ? location : dev_name(vhost->dev);
1487 	strncpy(login_info->drc_name, location, IBMVFC_MAX_NAME);
1488 }
1489 
1490 /**
1491  * ibmvfc_get_event - Gets the next free event in pool
1492  * @queue:      ibmvfc queue struct
1493  *
1494  * Returns a free event from the pool.
1495  **/
1496 static struct ibmvfc_event *ibmvfc_get_event(struct ibmvfc_queue *queue)
1497 {
1498 	struct ibmvfc_event *evt;
1499 	unsigned long flags;
1500 
1501 	spin_lock_irqsave(&queue->l_lock, flags);
1502 	BUG_ON(list_empty(&queue->free));
1503 	evt = list_entry(queue->free.next, struct ibmvfc_event, queue_list);
1504 	atomic_set(&evt->free, 0);
1505 	list_del(&evt->queue_list);
1506 	spin_unlock_irqrestore(&queue->l_lock, flags);
1507 	return evt;
1508 }
1509 
1510 /**
1511  * ibmvfc_locked_done - Calls evt completion with host_lock held
1512  * @evt:	ibmvfc evt to complete
1513  *
1514  * All non-scsi command completion callbacks have the expectation that the
1515  * host_lock is held. This callback is used by ibmvfc_init_event to wrap a
1516  * MAD evt with the host_lock.
1517  **/
1518 static void ibmvfc_locked_done(struct ibmvfc_event *evt)
1519 {
1520 	unsigned long flags;
1521 
1522 	spin_lock_irqsave(evt->vhost->host->host_lock, flags);
1523 	evt->_done(evt);
1524 	spin_unlock_irqrestore(evt->vhost->host->host_lock, flags);
1525 }
1526 
1527 /**
1528  * ibmvfc_init_event - Initialize fields in an event struct that are always
1529  *				required.
1530  * @evt:	The event
1531  * @done:	Routine to call when the event is responded to
1532  * @format:	SRP or MAD format
1533  **/
1534 static void ibmvfc_init_event(struct ibmvfc_event *evt,
1535 			      void (*done) (struct ibmvfc_event *), u8 format)
1536 {
1537 	evt->cmnd = NULL;
1538 	evt->sync_iu = NULL;
1539 	evt->eh_comp = NULL;
1540 	evt->crq.format = format;
1541 	if (format == IBMVFC_CMD_FORMAT)
1542 		evt->done = done;
1543 	else {
1544 		evt->_done = done;
1545 		evt->done = ibmvfc_locked_done;
1546 	}
1547 	evt->hwq = 0;
1548 }
1549 
1550 /**
1551  * ibmvfc_map_sg_list - Initialize scatterlist
1552  * @scmd:	scsi command struct
1553  * @nseg:	number of scatterlist segments
1554  * @md:	memory descriptor list to initialize
1555  **/
1556 static void ibmvfc_map_sg_list(struct scsi_cmnd *scmd, int nseg,
1557 			       struct srp_direct_buf *md)
1558 {
1559 	int i;
1560 	struct scatterlist *sg;
1561 
1562 	scsi_for_each_sg(scmd, sg, nseg, i) {
1563 		md[i].va = cpu_to_be64(sg_dma_address(sg));
1564 		md[i].len = cpu_to_be32(sg_dma_len(sg));
1565 		md[i].key = 0;
1566 	}
1567 }
1568 
1569 /**
1570  * ibmvfc_map_sg_data - Maps dma for a scatterlist and initializes descriptor fields
1571  * @scmd:		struct scsi_cmnd with the scatterlist
1572  * @evt:		ibmvfc event struct
1573  * @vfc_cmd:	vfc_cmd that contains the memory descriptor
1574  * @dev:		device for which to map dma memory
1575  *
1576  * Returns:
1577  *	0 on success / non-zero on failure
1578  **/
1579 static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd,
1580 			      struct ibmvfc_event *evt,
1581 			      struct ibmvfc_cmd *vfc_cmd, struct device *dev)
1582 {
1583 
1584 	int sg_mapped;
1585 	struct srp_direct_buf *data = &vfc_cmd->ioba;
1586 	struct ibmvfc_host *vhost = dev_get_drvdata(dev);
1587 	struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(evt->vhost, vfc_cmd);
1588 
1589 	if (cls3_error)
1590 		vfc_cmd->flags |= cpu_to_be16(IBMVFC_CLASS_3_ERR);
1591 
1592 	sg_mapped = scsi_dma_map(scmd);
1593 	if (!sg_mapped) {
1594 		vfc_cmd->flags |= cpu_to_be16(IBMVFC_NO_MEM_DESC);
1595 		return 0;
1596 	} else if (unlikely(sg_mapped < 0)) {
1597 		if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1598 			scmd_printk(KERN_ERR, scmd, "Failed to map DMA buffer for command\n");
1599 		return sg_mapped;
1600 	}
1601 
1602 	if (scmd->sc_data_direction == DMA_TO_DEVICE) {
1603 		vfc_cmd->flags |= cpu_to_be16(IBMVFC_WRITE);
1604 		iu->add_cdb_len |= IBMVFC_WRDATA;
1605 	} else {
1606 		vfc_cmd->flags |= cpu_to_be16(IBMVFC_READ);
1607 		iu->add_cdb_len |= IBMVFC_RDDATA;
1608 	}
1609 
1610 	if (sg_mapped == 1) {
1611 		ibmvfc_map_sg_list(scmd, sg_mapped, data);
1612 		return 0;
1613 	}
1614 
1615 	vfc_cmd->flags |= cpu_to_be16(IBMVFC_SCATTERLIST);
1616 
1617 	if (!evt->ext_list) {
1618 		evt->ext_list = dma_pool_alloc(vhost->sg_pool, GFP_ATOMIC,
1619 					       &evt->ext_list_token);
1620 
1621 		if (!evt->ext_list) {
1622 			scsi_dma_unmap(scmd);
1623 			if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1624 				scmd_printk(KERN_ERR, scmd, "Can't allocate memory for scatterlist\n");
1625 			return -ENOMEM;
1626 		}
1627 	}
1628 
1629 	ibmvfc_map_sg_list(scmd, sg_mapped, evt->ext_list);
1630 
1631 	data->va = cpu_to_be64(evt->ext_list_token);
1632 	data->len = cpu_to_be32(sg_mapped * sizeof(struct srp_direct_buf));
1633 	data->key = 0;
1634 	return 0;
1635 }
1636 
1637 /**
1638  * ibmvfc_timeout - Internal command timeout handler
1639  * @t:	struct ibmvfc_event that timed out
1640  *
1641  * Called when an internally generated command times out
1642  **/
1643 static void ibmvfc_timeout(struct timer_list *t)
1644 {
1645 	struct ibmvfc_event *evt = from_timer(evt, t, timer);
1646 	struct ibmvfc_host *vhost = evt->vhost;
1647 	dev_err(vhost->dev, "Command timed out (%p). Resetting connection\n", evt);
1648 	ibmvfc_reset_host(vhost);
1649 }
1650 
1651 /**
1652  * ibmvfc_send_event - Transforms event to u64 array and calls send_crq()
1653  * @evt:		event to be sent
1654  * @vhost:		ibmvfc host struct
1655  * @timeout:	timeout in seconds - 0 means do not time command
1656  *
1657  * Returns the value returned from ibmvfc_send_crq(). (Zero for success)
1658  **/
1659 static int ibmvfc_send_event(struct ibmvfc_event *evt,
1660 			     struct ibmvfc_host *vhost, unsigned long timeout)
1661 {
1662 	__be64 *crq_as_u64 = (__be64 *) &evt->crq;
1663 	unsigned long flags;
1664 	int rc;
1665 
1666 	/* Copy the IU into the transfer area */
1667 	*evt->xfer_iu = evt->iu;
1668 	if (evt->crq.format == IBMVFC_CMD_FORMAT)
1669 		evt->xfer_iu->cmd.tag = cpu_to_be64((u64)evt);
1670 	else if (evt->crq.format == IBMVFC_MAD_FORMAT)
1671 		evt->xfer_iu->mad_common.tag = cpu_to_be64((u64)evt);
1672 	else
1673 		BUG();
1674 
1675 	timer_setup(&evt->timer, ibmvfc_timeout, 0);
1676 
1677 	if (timeout) {
1678 		evt->timer.expires = jiffies + (timeout * HZ);
1679 		add_timer(&evt->timer);
1680 	}
1681 
1682 	spin_lock_irqsave(&evt->queue->l_lock, flags);
1683 	list_add_tail(&evt->queue_list, &evt->queue->sent);
1684 
1685 	mb();
1686 
1687 	if (evt->queue->fmt == IBMVFC_SUB_CRQ_FMT)
1688 		rc = ibmvfc_send_sub_crq(vhost,
1689 					 evt->queue->vios_cookie,
1690 					 be64_to_cpu(crq_as_u64[0]),
1691 					 be64_to_cpu(crq_as_u64[1]),
1692 					 0, 0);
1693 	else
1694 		rc = ibmvfc_send_crq(vhost, be64_to_cpu(crq_as_u64[0]),
1695 				     be64_to_cpu(crq_as_u64[1]));
1696 
1697 	if (rc) {
1698 		list_del(&evt->queue_list);
1699 		spin_unlock_irqrestore(&evt->queue->l_lock, flags);
1700 		del_timer(&evt->timer);
1701 
1702 		/* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY.
1703 		 * Firmware will send a CRQ with a transport event (0xFF) to
1704 		 * tell this client what has happened to the transport. This
1705 		 * will be handled in ibmvfc_handle_crq()
1706 		 */
1707 		if (rc == H_CLOSED) {
1708 			if (printk_ratelimit())
1709 				dev_warn(vhost->dev, "Send warning. Receive queue closed, will retry.\n");
1710 			if (evt->cmnd)
1711 				scsi_dma_unmap(evt->cmnd);
1712 			ibmvfc_free_event(evt);
1713 			return SCSI_MLQUEUE_HOST_BUSY;
1714 		}
1715 
1716 		dev_err(vhost->dev, "Send error (rc=%d)\n", rc);
1717 		if (evt->cmnd) {
1718 			evt->cmnd->result = DID_ERROR << 16;
1719 			evt->done = ibmvfc_scsi_eh_done;
1720 		} else
1721 			evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_CRQ_ERROR);
1722 
1723 		evt->done(evt);
1724 	} else {
1725 		spin_unlock_irqrestore(&evt->queue->l_lock, flags);
1726 		ibmvfc_trc_start(evt);
1727 	}
1728 
1729 	return 0;
1730 }
1731 
1732 /**
1733  * ibmvfc_log_error - Log an error for the failed command if appropriate
1734  * @evt:	ibmvfc event to log
1735  *
1736  **/
1737 static void ibmvfc_log_error(struct ibmvfc_event *evt)
1738 {
1739 	struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
1740 	struct ibmvfc_host *vhost = evt->vhost;
1741 	struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
1742 	struct scsi_cmnd *cmnd = evt->cmnd;
1743 	const char *err = unknown_error;
1744 	int index = ibmvfc_get_err_index(be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error));
1745 	int logerr = 0;
1746 	int rsp_code = 0;
1747 
1748 	if (index >= 0) {
1749 		logerr = cmd_status[index].log;
1750 		err = cmd_status[index].name;
1751 	}
1752 
1753 	if (!logerr && (vhost->log_level <= (IBMVFC_DEFAULT_LOG_LEVEL + 1)))
1754 		return;
1755 
1756 	if (rsp->flags & FCP_RSP_LEN_VALID)
1757 		rsp_code = rsp->data.info.rsp_code;
1758 
1759 	scmd_printk(KERN_ERR, cmnd, "Command (%02X) : %s (%x:%x) "
1760 		    "flags: %x fcp_rsp: %x, resid=%d, scsi_status: %x\n",
1761 		    cmnd->cmnd[0], err, be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error),
1762 		    rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status);
1763 }
1764 
1765 /**
1766  * ibmvfc_relogin - Log back into the specified device
1767  * @sdev:	scsi device struct
1768  *
1769  **/
1770 static void ibmvfc_relogin(struct scsi_device *sdev)
1771 {
1772 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
1773 	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1774 	struct ibmvfc_target *tgt;
1775 	unsigned long flags;
1776 
1777 	spin_lock_irqsave(vhost->host->host_lock, flags);
1778 	list_for_each_entry(tgt, &vhost->targets, queue) {
1779 		if (rport == tgt->rport) {
1780 			ibmvfc_del_tgt(tgt);
1781 			break;
1782 		}
1783 	}
1784 
1785 	ibmvfc_reinit_host(vhost);
1786 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
1787 }
1788 
1789 /**
1790  * ibmvfc_scsi_done - Handle responses from commands
1791  * @evt:	ibmvfc event to be handled
1792  *
1793  * Used as a callback when sending scsi cmds.
1794  **/
1795 static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
1796 {
1797 	struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
1798 	struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(evt->vhost, vfc_cmd);
1799 	struct scsi_cmnd *cmnd = evt->cmnd;
1800 	u32 rsp_len = 0;
1801 	u32 sense_len = be32_to_cpu(rsp->fcp_sense_len);
1802 
1803 	if (cmnd) {
1804 		if (be16_to_cpu(vfc_cmd->response_flags) & IBMVFC_ADAPTER_RESID_VALID)
1805 			scsi_set_resid(cmnd, be32_to_cpu(vfc_cmd->adapter_resid));
1806 		else if (rsp->flags & FCP_RESID_UNDER)
1807 			scsi_set_resid(cmnd, be32_to_cpu(rsp->fcp_resid));
1808 		else
1809 			scsi_set_resid(cmnd, 0);
1810 
1811 		if (vfc_cmd->status) {
1812 			cmnd->result = ibmvfc_get_err_result(evt->vhost, vfc_cmd);
1813 
1814 			if (rsp->flags & FCP_RSP_LEN_VALID)
1815 				rsp_len = be32_to_cpu(rsp->fcp_rsp_len);
1816 			if ((sense_len + rsp_len) > SCSI_SENSE_BUFFERSIZE)
1817 				sense_len = SCSI_SENSE_BUFFERSIZE - rsp_len;
1818 			if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8)
1819 				memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len);
1820 			if ((be16_to_cpu(vfc_cmd->status) & IBMVFC_VIOS_FAILURE) &&
1821 			    (be16_to_cpu(vfc_cmd->error) == IBMVFC_PLOGI_REQUIRED))
1822 				ibmvfc_relogin(cmnd->device);
1823 
1824 			if (!cmnd->result && (!scsi_get_resid(cmnd) || (rsp->flags & FCP_RESID_OVER)))
1825 				cmnd->result = (DID_ERROR << 16);
1826 
1827 			ibmvfc_log_error(evt);
1828 		}
1829 
1830 		if (!cmnd->result &&
1831 		    (scsi_bufflen(cmnd) - scsi_get_resid(cmnd) < cmnd->underflow))
1832 			cmnd->result = (DID_ERROR << 16);
1833 
1834 		scsi_dma_unmap(cmnd);
1835 		cmnd->scsi_done(cmnd);
1836 	}
1837 
1838 	ibmvfc_free_event(evt);
1839 }
1840 
1841 /**
1842  * ibmvfc_host_chkready - Check if the host can accept commands
1843  * @vhost:	 struct ibmvfc host
1844  *
1845  * Returns:
1846  *	1 if host can accept command / 0 if not
1847  **/
1848 static inline int ibmvfc_host_chkready(struct ibmvfc_host *vhost)
1849 {
1850 	int result = 0;
1851 
1852 	switch (vhost->state) {
1853 	case IBMVFC_LINK_DEAD:
1854 	case IBMVFC_HOST_OFFLINE:
1855 		result = DID_NO_CONNECT << 16;
1856 		break;
1857 	case IBMVFC_NO_CRQ:
1858 	case IBMVFC_INITIALIZING:
1859 	case IBMVFC_HALTED:
1860 	case IBMVFC_LINK_DOWN:
1861 		result = DID_REQUEUE << 16;
1862 		break;
1863 	case IBMVFC_ACTIVE:
1864 		result = 0;
1865 		break;
1866 	}
1867 
1868 	return result;
1869 }
1870 
1871 static struct ibmvfc_cmd *ibmvfc_init_vfc_cmd(struct ibmvfc_event *evt, struct scsi_device *sdev)
1872 {
1873 	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1874 	struct ibmvfc_host *vhost = evt->vhost;
1875 	struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd;
1876 	struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
1877 	struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
1878 	size_t offset;
1879 
1880 	memset(vfc_cmd, 0, sizeof(*vfc_cmd));
1881 	if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
1882 		offset = offsetof(struct ibmvfc_cmd, v2.rsp);
1883 		vfc_cmd->target_wwpn = cpu_to_be64(rport->port_name);
1884 	} else
1885 		offset = offsetof(struct ibmvfc_cmd, v1.rsp);
1886 	vfc_cmd->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offset);
1887 	vfc_cmd->resp.len = cpu_to_be32(sizeof(*rsp));
1888 	vfc_cmd->frame_type = cpu_to_be32(IBMVFC_SCSI_FCP_TYPE);
1889 	vfc_cmd->payload_len = cpu_to_be32(sizeof(*iu));
1890 	vfc_cmd->resp_len = cpu_to_be32(sizeof(*rsp));
1891 	vfc_cmd->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
1892 	vfc_cmd->tgt_scsi_id = cpu_to_be64(rport->port_id);
1893 	int_to_scsilun(sdev->lun, &iu->lun);
1894 
1895 	return vfc_cmd;
1896 }
1897 
1898 /**
1899  * ibmvfc_queuecommand - The queuecommand function of the scsi template
1900  * @shost:	scsi host struct
1901  * @cmnd:	struct scsi_cmnd to be executed
1902  *
1903  * Returns:
1904  *	0 on success / other on failure
1905  **/
1906 static int ibmvfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
1907 {
1908 	struct ibmvfc_host *vhost = shost_priv(shost);
1909 	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
1910 	struct ibmvfc_cmd *vfc_cmd;
1911 	struct ibmvfc_fcp_cmd_iu *iu;
1912 	struct ibmvfc_event *evt;
1913 	u32 tag_and_hwq = blk_mq_unique_tag(cmnd->request);
1914 	u16 hwq = blk_mq_unique_tag_to_hwq(tag_and_hwq);
1915 	u16 scsi_channel;
1916 	int rc;
1917 
1918 	if (unlikely((rc = fc_remote_port_chkready(rport))) ||
1919 	    unlikely((rc = ibmvfc_host_chkready(vhost)))) {
1920 		cmnd->result = rc;
1921 		cmnd->scsi_done(cmnd);
1922 		return 0;
1923 	}
1924 
1925 	cmnd->result = (DID_OK << 16);
1926 	if (vhost->using_channels) {
1927 		scsi_channel = hwq % vhost->scsi_scrqs.active_queues;
1928 		evt = ibmvfc_get_event(&vhost->scsi_scrqs.scrqs[scsi_channel]);
1929 		evt->hwq = hwq % vhost->scsi_scrqs.active_queues;
1930 	} else
1931 		evt = ibmvfc_get_event(&vhost->crq);
1932 
1933 	ibmvfc_init_event(evt, ibmvfc_scsi_done, IBMVFC_CMD_FORMAT);
1934 	evt->cmnd = cmnd;
1935 
1936 	vfc_cmd = ibmvfc_init_vfc_cmd(evt, cmnd->device);
1937 	iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
1938 
1939 	iu->xfer_len = cpu_to_be32(scsi_bufflen(cmnd));
1940 	memcpy(iu->cdb, cmnd->cmnd, cmnd->cmd_len);
1941 
1942 	if (cmnd->flags & SCMD_TAGGED) {
1943 		vfc_cmd->task_tag = cpu_to_be64(cmnd->tag);
1944 		iu->pri_task_attr = IBMVFC_SIMPLE_TASK;
1945 	}
1946 
1947 	vfc_cmd->correlation = cpu_to_be64((u64)evt);
1948 
1949 	if (likely(!(rc = ibmvfc_map_sg_data(cmnd, evt, vfc_cmd, vhost->dev))))
1950 		return ibmvfc_send_event(evt, vhost, 0);
1951 
1952 	ibmvfc_free_event(evt);
1953 	if (rc == -ENOMEM)
1954 		return SCSI_MLQUEUE_HOST_BUSY;
1955 
1956 	if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1957 		scmd_printk(KERN_ERR, cmnd,
1958 			    "Failed to map DMA buffer for command. rc=%d\n", rc);
1959 
1960 	cmnd->result = DID_ERROR << 16;
1961 	cmnd->scsi_done(cmnd);
1962 	return 0;
1963 }
1964 
1965 /**
1966  * ibmvfc_sync_completion - Signal that a synchronous command has completed
1967  * @evt:	ibmvfc event struct
1968  *
1969  **/
1970 static void ibmvfc_sync_completion(struct ibmvfc_event *evt)
1971 {
1972 	/* copy the response back */
1973 	if (evt->sync_iu)
1974 		*evt->sync_iu = *evt->xfer_iu;
1975 
1976 	complete(&evt->comp);
1977 }
1978 
1979 /**
1980  * ibmvfc_bsg_timeout_done - Completion handler for cancelling BSG commands
1981  * @evt:	struct ibmvfc_event
1982  *
1983  **/
1984 static void ibmvfc_bsg_timeout_done(struct ibmvfc_event *evt)
1985 {
1986 	struct ibmvfc_host *vhost = evt->vhost;
1987 
1988 	ibmvfc_free_event(evt);
1989 	vhost->aborting_passthru = 0;
1990 	dev_info(vhost->dev, "Passthru command cancelled\n");
1991 }
1992 
1993 /**
1994  * ibmvfc_bsg_timeout - Handle a BSG timeout
1995  * @job:	struct bsg_job that timed out
1996  *
1997  * Returns:
1998  *	0 on success / other on failure
1999  **/
2000 static int ibmvfc_bsg_timeout(struct bsg_job *job)
2001 {
2002 	struct ibmvfc_host *vhost = shost_priv(fc_bsg_to_shost(job));
2003 	unsigned long port_id = (unsigned long)job->dd_data;
2004 	struct ibmvfc_event *evt;
2005 	struct ibmvfc_tmf *tmf;
2006 	unsigned long flags;
2007 	int rc;
2008 
2009 	ENTER;
2010 	spin_lock_irqsave(vhost->host->host_lock, flags);
2011 	if (vhost->aborting_passthru || vhost->state != IBMVFC_ACTIVE) {
2012 		__ibmvfc_reset_host(vhost);
2013 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
2014 		return 0;
2015 	}
2016 
2017 	vhost->aborting_passthru = 1;
2018 	evt = ibmvfc_get_event(&vhost->crq);
2019 	ibmvfc_init_event(evt, ibmvfc_bsg_timeout_done, IBMVFC_MAD_FORMAT);
2020 
2021 	tmf = &evt->iu.tmf;
2022 	memset(tmf, 0, sizeof(*tmf));
2023 	tmf->common.version = cpu_to_be32(1);
2024 	tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
2025 	tmf->common.length = cpu_to_be16(sizeof(*tmf));
2026 	tmf->scsi_id = cpu_to_be64(port_id);
2027 	tmf->cancel_key = cpu_to_be32(IBMVFC_PASSTHRU_CANCEL_KEY);
2028 	tmf->my_cancel_key = cpu_to_be32(IBMVFC_INTERNAL_CANCEL_KEY);
2029 	rc = ibmvfc_send_event(evt, vhost, default_timeout);
2030 
2031 	if (rc != 0) {
2032 		vhost->aborting_passthru = 0;
2033 		dev_err(vhost->dev, "Failed to send cancel event. rc=%d\n", rc);
2034 		rc = -EIO;
2035 	} else
2036 		dev_info(vhost->dev, "Cancelling passthru command to port id 0x%lx\n",
2037 			 port_id);
2038 
2039 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2040 
2041 	LEAVE;
2042 	return rc;
2043 }
2044 
2045 /**
2046  * ibmvfc_bsg_plogi - PLOGI into a target to handle a BSG command
2047  * @vhost:		struct ibmvfc_host to send command
2048  * @port_id:	port ID to send command
2049  *
2050  * Returns:
2051  *	0 on success / other on failure
2052  **/
2053 static int ibmvfc_bsg_plogi(struct ibmvfc_host *vhost, unsigned int port_id)
2054 {
2055 	struct ibmvfc_port_login *plogi;
2056 	struct ibmvfc_target *tgt;
2057 	struct ibmvfc_event *evt;
2058 	union ibmvfc_iu rsp_iu;
2059 	unsigned long flags;
2060 	int rc = 0, issue_login = 1;
2061 
2062 	ENTER;
2063 	spin_lock_irqsave(vhost->host->host_lock, flags);
2064 	list_for_each_entry(tgt, &vhost->targets, queue) {
2065 		if (tgt->scsi_id == port_id) {
2066 			issue_login = 0;
2067 			break;
2068 		}
2069 	}
2070 
2071 	if (!issue_login)
2072 		goto unlock_out;
2073 	if (unlikely((rc = ibmvfc_host_chkready(vhost))))
2074 		goto unlock_out;
2075 
2076 	evt = ibmvfc_get_event(&vhost->crq);
2077 	ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
2078 	plogi = &evt->iu.plogi;
2079 	memset(plogi, 0, sizeof(*plogi));
2080 	plogi->common.version = cpu_to_be32(1);
2081 	plogi->common.opcode = cpu_to_be32(IBMVFC_PORT_LOGIN);
2082 	plogi->common.length = cpu_to_be16(sizeof(*plogi));
2083 	plogi->scsi_id = cpu_to_be64(port_id);
2084 	evt->sync_iu = &rsp_iu;
2085 	init_completion(&evt->comp);
2086 
2087 	rc = ibmvfc_send_event(evt, vhost, default_timeout);
2088 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2089 
2090 	if (rc)
2091 		return -EIO;
2092 
2093 	wait_for_completion(&evt->comp);
2094 
2095 	if (rsp_iu.plogi.common.status)
2096 		rc = -EIO;
2097 
2098 	spin_lock_irqsave(vhost->host->host_lock, flags);
2099 	ibmvfc_free_event(evt);
2100 unlock_out:
2101 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2102 	LEAVE;
2103 	return rc;
2104 }
2105 
2106 /**
2107  * ibmvfc_bsg_request - Handle a BSG request
2108  * @job:	struct bsg_job to be executed
2109  *
2110  * Returns:
2111  *	0 on success / other on failure
2112  **/
2113 static int ibmvfc_bsg_request(struct bsg_job *job)
2114 {
2115 	struct ibmvfc_host *vhost = shost_priv(fc_bsg_to_shost(job));
2116 	struct fc_rport *rport = fc_bsg_to_rport(job);
2117 	struct ibmvfc_passthru_mad *mad;
2118 	struct ibmvfc_event *evt;
2119 	union ibmvfc_iu rsp_iu;
2120 	unsigned long flags, port_id = -1;
2121 	struct fc_bsg_request *bsg_request = job->request;
2122 	struct fc_bsg_reply *bsg_reply = job->reply;
2123 	unsigned int code = bsg_request->msgcode;
2124 	int rc = 0, req_seg, rsp_seg, issue_login = 0;
2125 	u32 fc_flags, rsp_len;
2126 
2127 	ENTER;
2128 	bsg_reply->reply_payload_rcv_len = 0;
2129 	if (rport)
2130 		port_id = rport->port_id;
2131 
2132 	switch (code) {
2133 	case FC_BSG_HST_ELS_NOLOGIN:
2134 		port_id = (bsg_request->rqst_data.h_els.port_id[0] << 16) |
2135 			(bsg_request->rqst_data.h_els.port_id[1] << 8) |
2136 			bsg_request->rqst_data.h_els.port_id[2];
2137 		fallthrough;
2138 	case FC_BSG_RPT_ELS:
2139 		fc_flags = IBMVFC_FC_ELS;
2140 		break;
2141 	case FC_BSG_HST_CT:
2142 		issue_login = 1;
2143 		port_id = (bsg_request->rqst_data.h_ct.port_id[0] << 16) |
2144 			(bsg_request->rqst_data.h_ct.port_id[1] << 8) |
2145 			bsg_request->rqst_data.h_ct.port_id[2];
2146 		fallthrough;
2147 	case FC_BSG_RPT_CT:
2148 		fc_flags = IBMVFC_FC_CT_IU;
2149 		break;
2150 	default:
2151 		return -ENOTSUPP;
2152 	}
2153 
2154 	if (port_id == -1)
2155 		return -EINVAL;
2156 	if (!mutex_trylock(&vhost->passthru_mutex))
2157 		return -EBUSY;
2158 
2159 	job->dd_data = (void *)port_id;
2160 	req_seg = dma_map_sg(vhost->dev, job->request_payload.sg_list,
2161 			     job->request_payload.sg_cnt, DMA_TO_DEVICE);
2162 
2163 	if (!req_seg) {
2164 		mutex_unlock(&vhost->passthru_mutex);
2165 		return -ENOMEM;
2166 	}
2167 
2168 	rsp_seg = dma_map_sg(vhost->dev, job->reply_payload.sg_list,
2169 			     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2170 
2171 	if (!rsp_seg) {
2172 		dma_unmap_sg(vhost->dev, job->request_payload.sg_list,
2173 			     job->request_payload.sg_cnt, DMA_TO_DEVICE);
2174 		mutex_unlock(&vhost->passthru_mutex);
2175 		return -ENOMEM;
2176 	}
2177 
2178 	if (req_seg > 1 || rsp_seg > 1) {
2179 		rc = -EINVAL;
2180 		goto out;
2181 	}
2182 
2183 	if (issue_login)
2184 		rc = ibmvfc_bsg_plogi(vhost, port_id);
2185 
2186 	spin_lock_irqsave(vhost->host->host_lock, flags);
2187 
2188 	if (unlikely(rc || (rport && (rc = fc_remote_port_chkready(rport)))) ||
2189 	    unlikely((rc = ibmvfc_host_chkready(vhost)))) {
2190 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
2191 		goto out;
2192 	}
2193 
2194 	evt = ibmvfc_get_event(&vhost->crq);
2195 	ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
2196 	mad = &evt->iu.passthru;
2197 
2198 	memset(mad, 0, sizeof(*mad));
2199 	mad->common.version = cpu_to_be32(1);
2200 	mad->common.opcode = cpu_to_be32(IBMVFC_PASSTHRU);
2201 	mad->common.length = cpu_to_be16(sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu));
2202 
2203 	mad->cmd_ioba.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) +
2204 		offsetof(struct ibmvfc_passthru_mad, iu));
2205 	mad->cmd_ioba.len = cpu_to_be32(sizeof(mad->iu));
2206 
2207 	mad->iu.cmd_len = cpu_to_be32(job->request_payload.payload_len);
2208 	mad->iu.rsp_len = cpu_to_be32(job->reply_payload.payload_len);
2209 	mad->iu.flags = cpu_to_be32(fc_flags);
2210 	mad->iu.cancel_key = cpu_to_be32(IBMVFC_PASSTHRU_CANCEL_KEY);
2211 
2212 	mad->iu.cmd.va = cpu_to_be64(sg_dma_address(job->request_payload.sg_list));
2213 	mad->iu.cmd.len = cpu_to_be32(sg_dma_len(job->request_payload.sg_list));
2214 	mad->iu.rsp.va = cpu_to_be64(sg_dma_address(job->reply_payload.sg_list));
2215 	mad->iu.rsp.len = cpu_to_be32(sg_dma_len(job->reply_payload.sg_list));
2216 	mad->iu.scsi_id = cpu_to_be64(port_id);
2217 	mad->iu.tag = cpu_to_be64((u64)evt);
2218 	rsp_len = be32_to_cpu(mad->iu.rsp.len);
2219 
2220 	evt->sync_iu = &rsp_iu;
2221 	init_completion(&evt->comp);
2222 	rc = ibmvfc_send_event(evt, vhost, 0);
2223 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2224 
2225 	if (rc) {
2226 		rc = -EIO;
2227 		goto out;
2228 	}
2229 
2230 	wait_for_completion(&evt->comp);
2231 
2232 	if (rsp_iu.passthru.common.status)
2233 		rc = -EIO;
2234 	else
2235 		bsg_reply->reply_payload_rcv_len = rsp_len;
2236 
2237 	spin_lock_irqsave(vhost->host->host_lock, flags);
2238 	ibmvfc_free_event(evt);
2239 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2240 	bsg_reply->result = rc;
2241 	bsg_job_done(job, bsg_reply->result,
2242 		       bsg_reply->reply_payload_rcv_len);
2243 	rc = 0;
2244 out:
2245 	dma_unmap_sg(vhost->dev, job->request_payload.sg_list,
2246 		     job->request_payload.sg_cnt, DMA_TO_DEVICE);
2247 	dma_unmap_sg(vhost->dev, job->reply_payload.sg_list,
2248 		     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2249 	mutex_unlock(&vhost->passthru_mutex);
2250 	LEAVE;
2251 	return rc;
2252 }
2253 
2254 /**
2255  * ibmvfc_reset_device - Reset the device with the specified reset type
2256  * @sdev:	scsi device to reset
2257  * @type:	reset type
2258  * @desc:	reset type description for log messages
2259  *
2260  * Returns:
2261  *	0 on success / other on failure
2262  **/
2263 static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
2264 {
2265 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2266 	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2267 	struct ibmvfc_cmd *tmf;
2268 	struct ibmvfc_event *evt = NULL;
2269 	union ibmvfc_iu rsp_iu;
2270 	struct ibmvfc_fcp_cmd_iu *iu;
2271 	struct ibmvfc_fcp_rsp *fc_rsp = ibmvfc_get_fcp_rsp(vhost, &rsp_iu.cmd);
2272 	int rsp_rc = -EBUSY;
2273 	unsigned long flags;
2274 	int rsp_code = 0;
2275 
2276 	spin_lock_irqsave(vhost->host->host_lock, flags);
2277 	if (vhost->state == IBMVFC_ACTIVE) {
2278 		if (vhost->using_channels)
2279 			evt = ibmvfc_get_event(&vhost->scsi_scrqs.scrqs[0]);
2280 		else
2281 			evt = ibmvfc_get_event(&vhost->crq);
2282 
2283 		ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
2284 		tmf = ibmvfc_init_vfc_cmd(evt, sdev);
2285 		iu = ibmvfc_get_fcp_iu(vhost, tmf);
2286 
2287 		tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF));
2288 		if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
2289 			tmf->target_wwpn = cpu_to_be64(rport->port_name);
2290 		iu->tmf_flags = type;
2291 		evt->sync_iu = &rsp_iu;
2292 
2293 		init_completion(&evt->comp);
2294 		rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
2295 	}
2296 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2297 
2298 	if (rsp_rc != 0) {
2299 		sdev_printk(KERN_ERR, sdev, "Failed to send %s reset event. rc=%d\n",
2300 			    desc, rsp_rc);
2301 		return -EIO;
2302 	}
2303 
2304 	sdev_printk(KERN_INFO, sdev, "Resetting %s\n", desc);
2305 	wait_for_completion(&evt->comp);
2306 
2307 	if (rsp_iu.cmd.status)
2308 		rsp_code = ibmvfc_get_err_result(vhost, &rsp_iu.cmd);
2309 
2310 	if (rsp_code) {
2311 		if (fc_rsp->flags & FCP_RSP_LEN_VALID)
2312 			rsp_code = fc_rsp->data.info.rsp_code;
2313 
2314 		sdev_printk(KERN_ERR, sdev, "%s reset failed: %s (%x:%x) "
2315 			    "flags: %x fcp_rsp: %x, scsi_status: %x\n", desc,
2316 			    ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
2317 			    be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
2318 			    fc_rsp->scsi_status);
2319 		rsp_rc = -EIO;
2320 	} else
2321 		sdev_printk(KERN_INFO, sdev, "%s reset successful\n", desc);
2322 
2323 	spin_lock_irqsave(vhost->host->host_lock, flags);
2324 	ibmvfc_free_event(evt);
2325 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2326 	return rsp_rc;
2327 }
2328 
2329 /**
2330  * ibmvfc_match_rport - Match function for specified remote port
2331  * @evt:	ibmvfc event struct
2332  * @rport:	device to match
2333  *
2334  * Returns:
2335  *	1 if event matches rport / 0 if event does not match rport
2336  **/
2337 static int ibmvfc_match_rport(struct ibmvfc_event *evt, void *rport)
2338 {
2339 	struct fc_rport *cmd_rport;
2340 
2341 	if (evt->cmnd) {
2342 		cmd_rport = starget_to_rport(scsi_target(evt->cmnd->device));
2343 		if (cmd_rport == rport)
2344 			return 1;
2345 	}
2346 	return 0;
2347 }
2348 
2349 /**
2350  * ibmvfc_match_target - Match function for specified target
2351  * @evt:	ibmvfc event struct
2352  * @device:	device to match (starget)
2353  *
2354  * Returns:
2355  *	1 if event matches starget / 0 if event does not match starget
2356  **/
2357 static int ibmvfc_match_target(struct ibmvfc_event *evt, void *device)
2358 {
2359 	if (evt->cmnd && scsi_target(evt->cmnd->device) == device)
2360 		return 1;
2361 	return 0;
2362 }
2363 
2364 /**
2365  * ibmvfc_match_lun - Match function for specified LUN
2366  * @evt:	ibmvfc event struct
2367  * @device:	device to match (sdev)
2368  *
2369  * Returns:
2370  *	1 if event matches sdev / 0 if event does not match sdev
2371  **/
2372 static int ibmvfc_match_lun(struct ibmvfc_event *evt, void *device)
2373 {
2374 	if (evt->cmnd && evt->cmnd->device == device)
2375 		return 1;
2376 	return 0;
2377 }
2378 
2379 /**
2380  * ibmvfc_event_is_free - Check if event is free or not
2381  * @evt:	ibmvfc event struct
2382  *
2383  * Returns:
2384  *	true / false
2385  **/
2386 static bool ibmvfc_event_is_free(struct ibmvfc_event *evt)
2387 {
2388 	struct ibmvfc_event *loop_evt;
2389 
2390 	list_for_each_entry(loop_evt, &evt->queue->free, queue_list)
2391 		if (loop_evt == evt)
2392 			return true;
2393 
2394 	return false;
2395 }
2396 
2397 /**
2398  * ibmvfc_wait_for_ops - Wait for ops to complete
2399  * @vhost:	ibmvfc host struct
2400  * @device:	device to match (starget or sdev)
2401  * @match:	match function
2402  *
2403  * Returns:
2404  *	SUCCESS / FAILED
2405  **/
2406 static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device,
2407 			       int (*match) (struct ibmvfc_event *, void *))
2408 {
2409 	struct ibmvfc_event *evt;
2410 	DECLARE_COMPLETION_ONSTACK(comp);
2411 	int wait, i, q_index, q_size;
2412 	unsigned long flags;
2413 	signed long timeout = IBMVFC_ABORT_WAIT_TIMEOUT * HZ;
2414 	struct ibmvfc_queue *queues;
2415 
2416 	ENTER;
2417 	if (vhost->mq_enabled && vhost->using_channels) {
2418 		queues = vhost->scsi_scrqs.scrqs;
2419 		q_size = vhost->scsi_scrqs.active_queues;
2420 	} else {
2421 		queues = &vhost->crq;
2422 		q_size = 1;
2423 	}
2424 
2425 	do {
2426 		wait = 0;
2427 		spin_lock_irqsave(vhost->host->host_lock, flags);
2428 		for (q_index = 0; q_index < q_size; q_index++) {
2429 			spin_lock(&queues[q_index].l_lock);
2430 			for (i = 0; i < queues[q_index].evt_pool.size; i++) {
2431 				evt = &queues[q_index].evt_pool.events[i];
2432 				if (!ibmvfc_event_is_free(evt)) {
2433 					if (match(evt, device)) {
2434 						evt->eh_comp = &comp;
2435 						wait++;
2436 					}
2437 				}
2438 			}
2439 			spin_unlock(&queues[q_index].l_lock);
2440 		}
2441 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
2442 
2443 		if (wait) {
2444 			timeout = wait_for_completion_timeout(&comp, timeout);
2445 
2446 			if (!timeout) {
2447 				wait = 0;
2448 				spin_lock_irqsave(vhost->host->host_lock, flags);
2449 				for (q_index = 0; q_index < q_size; q_index++) {
2450 					spin_lock(&queues[q_index].l_lock);
2451 					for (i = 0; i < queues[q_index].evt_pool.size; i++) {
2452 						evt = &queues[q_index].evt_pool.events[i];
2453 						if (!ibmvfc_event_is_free(evt)) {
2454 							if (match(evt, device)) {
2455 								evt->eh_comp = NULL;
2456 								wait++;
2457 							}
2458 						}
2459 					}
2460 					spin_unlock(&queues[q_index].l_lock);
2461 				}
2462 				spin_unlock_irqrestore(vhost->host->host_lock, flags);
2463 				if (wait)
2464 					dev_err(vhost->dev, "Timed out waiting for aborted commands\n");
2465 				LEAVE;
2466 				return wait ? FAILED : SUCCESS;
2467 			}
2468 		}
2469 	} while (wait);
2470 
2471 	LEAVE;
2472 	return SUCCESS;
2473 }
2474 
2475 static struct ibmvfc_event *ibmvfc_init_tmf(struct ibmvfc_queue *queue,
2476 					    struct scsi_device *sdev,
2477 					    int type)
2478 {
2479 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2480 	struct scsi_target *starget = scsi_target(sdev);
2481 	struct fc_rport *rport = starget_to_rport(starget);
2482 	struct ibmvfc_event *evt;
2483 	struct ibmvfc_tmf *tmf;
2484 
2485 	evt = ibmvfc_get_event(queue);
2486 	ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
2487 
2488 	tmf = &evt->iu.tmf;
2489 	memset(tmf, 0, sizeof(*tmf));
2490 	if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
2491 		tmf->common.version = cpu_to_be32(2);
2492 		tmf->target_wwpn = cpu_to_be64(rport->port_name);
2493 	} else {
2494 		tmf->common.version = cpu_to_be32(1);
2495 	}
2496 	tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
2497 	tmf->common.length = cpu_to_be16(sizeof(*tmf));
2498 	tmf->scsi_id = cpu_to_be64(rport->port_id);
2499 	int_to_scsilun(sdev->lun, &tmf->lun);
2500 	if (!ibmvfc_check_caps(vhost, IBMVFC_CAN_SUPPRESS_ABTS))
2501 		type &= ~IBMVFC_TMF_SUPPRESS_ABTS;
2502 	if (vhost->state == IBMVFC_ACTIVE)
2503 		tmf->flags = cpu_to_be32((type | IBMVFC_TMF_LUA_VALID));
2504 	else
2505 		tmf->flags = cpu_to_be32(((type & IBMVFC_TMF_SUPPRESS_ABTS) | IBMVFC_TMF_LUA_VALID));
2506 	tmf->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
2507 	tmf->my_cancel_key = cpu_to_be32((unsigned long)starget->hostdata);
2508 
2509 	init_completion(&evt->comp);
2510 
2511 	return evt;
2512 }
2513 
2514 static int ibmvfc_cancel_all_mq(struct scsi_device *sdev, int type)
2515 {
2516 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2517 	struct ibmvfc_event *evt, *found_evt, *temp;
2518 	struct ibmvfc_queue *queues = vhost->scsi_scrqs.scrqs;
2519 	unsigned long flags;
2520 	int num_hwq, i;
2521 	int fail = 0;
2522 	LIST_HEAD(cancelq);
2523 	u16 status;
2524 
2525 	ENTER;
2526 	spin_lock_irqsave(vhost->host->host_lock, flags);
2527 	num_hwq = vhost->scsi_scrqs.active_queues;
2528 	for (i = 0; i < num_hwq; i++) {
2529 		spin_lock(queues[i].q_lock);
2530 		spin_lock(&queues[i].l_lock);
2531 		found_evt = NULL;
2532 		list_for_each_entry(evt, &queues[i].sent, queue_list) {
2533 			if (evt->cmnd && evt->cmnd->device == sdev) {
2534 				found_evt = evt;
2535 				break;
2536 			}
2537 		}
2538 		spin_unlock(&queues[i].l_lock);
2539 
2540 		if (found_evt && vhost->logged_in) {
2541 			evt = ibmvfc_init_tmf(&queues[i], sdev, type);
2542 			evt->sync_iu = &queues[i].cancel_rsp;
2543 			ibmvfc_send_event(evt, vhost, default_timeout);
2544 			list_add_tail(&evt->cancel, &cancelq);
2545 		}
2546 
2547 		spin_unlock(queues[i].q_lock);
2548 	}
2549 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2550 
2551 	if (list_empty(&cancelq)) {
2552 		if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
2553 			sdev_printk(KERN_INFO, sdev, "No events found to cancel\n");
2554 		return 0;
2555 	}
2556 
2557 	sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
2558 
2559 	list_for_each_entry_safe(evt, temp, &cancelq, cancel) {
2560 		wait_for_completion(&evt->comp);
2561 		status = be16_to_cpu(evt->queue->cancel_rsp.mad_common.status);
2562 		list_del(&evt->cancel);
2563 		ibmvfc_free_event(evt);
2564 
2565 		if (status != IBMVFC_MAD_SUCCESS) {
2566 			sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
2567 			switch (status) {
2568 			case IBMVFC_MAD_DRIVER_FAILED:
2569 			case IBMVFC_MAD_CRQ_ERROR:
2570 			/* Host adapter most likely going through reset, return success to
2571 			 * the caller will wait for the command being cancelled to get returned
2572 			 */
2573 				break;
2574 			default:
2575 				fail = 1;
2576 				break;
2577 			}
2578 		}
2579 	}
2580 
2581 	if (fail)
2582 		return -EIO;
2583 
2584 	sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
2585 	LEAVE;
2586 	return 0;
2587 }
2588 
2589 static int ibmvfc_cancel_all_sq(struct scsi_device *sdev, int type)
2590 {
2591 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2592 	struct ibmvfc_event *evt, *found_evt;
2593 	union ibmvfc_iu rsp;
2594 	int rsp_rc = -EBUSY;
2595 	unsigned long flags;
2596 	u16 status;
2597 
2598 	ENTER;
2599 	found_evt = NULL;
2600 	spin_lock_irqsave(vhost->host->host_lock, flags);
2601 	spin_lock(&vhost->crq.l_lock);
2602 	list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
2603 		if (evt->cmnd && evt->cmnd->device == sdev) {
2604 			found_evt = evt;
2605 			break;
2606 		}
2607 	}
2608 	spin_unlock(&vhost->crq.l_lock);
2609 
2610 	if (!found_evt) {
2611 		if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
2612 			sdev_printk(KERN_INFO, sdev, "No events found to cancel\n");
2613 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
2614 		return 0;
2615 	}
2616 
2617 	if (vhost->logged_in) {
2618 		evt = ibmvfc_init_tmf(&vhost->crq, sdev, type);
2619 		evt->sync_iu = &rsp;
2620 		rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
2621 	}
2622 
2623 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2624 
2625 	if (rsp_rc != 0) {
2626 		sdev_printk(KERN_ERR, sdev, "Failed to send cancel event. rc=%d\n", rsp_rc);
2627 		/* If failure is received, the host adapter is most likely going
2628 		 through reset, return success so the caller will wait for the command
2629 		 being cancelled to get returned */
2630 		return 0;
2631 	}
2632 
2633 	sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
2634 
2635 	wait_for_completion(&evt->comp);
2636 	status = be16_to_cpu(rsp.mad_common.status);
2637 	spin_lock_irqsave(vhost->host->host_lock, flags);
2638 	ibmvfc_free_event(evt);
2639 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2640 
2641 	if (status != IBMVFC_MAD_SUCCESS) {
2642 		sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
2643 		switch (status) {
2644 		case IBMVFC_MAD_DRIVER_FAILED:
2645 		case IBMVFC_MAD_CRQ_ERROR:
2646 			/* Host adapter most likely going through reset, return success to
2647 			 the caller will wait for the command being cancelled to get returned */
2648 			return 0;
2649 		default:
2650 			return -EIO;
2651 		};
2652 	}
2653 
2654 	sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
2655 	return 0;
2656 }
2657 
2658 /**
2659  * ibmvfc_cancel_all - Cancel all outstanding commands to the device
2660  * @sdev:	scsi device to cancel commands
2661  * @type:	type of error recovery being performed
2662  *
2663  * This sends a cancel to the VIOS for the specified device. This does
2664  * NOT send any abort to the actual device. That must be done separately.
2665  *
2666  * Returns:
2667  *	0 on success / other on failure
2668  **/
2669 static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
2670 {
2671 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2672 
2673 	if (vhost->mq_enabled && vhost->using_channels)
2674 		return ibmvfc_cancel_all_mq(sdev, type);
2675 	else
2676 		return ibmvfc_cancel_all_sq(sdev, type);
2677 }
2678 
2679 /**
2680  * ibmvfc_match_key - Match function for specified cancel key
2681  * @evt:	ibmvfc event struct
2682  * @key:	cancel key to match
2683  *
2684  * Returns:
2685  *	1 if event matches key / 0 if event does not match key
2686  **/
2687 static int ibmvfc_match_key(struct ibmvfc_event *evt, void *key)
2688 {
2689 	unsigned long cancel_key = (unsigned long)key;
2690 
2691 	if (evt->crq.format == IBMVFC_CMD_FORMAT &&
2692 	    be32_to_cpu(evt->iu.cmd.cancel_key) == cancel_key)
2693 		return 1;
2694 	return 0;
2695 }
2696 
2697 /**
2698  * ibmvfc_match_evt - Match function for specified event
2699  * @evt:	ibmvfc event struct
2700  * @match:	event to match
2701  *
2702  * Returns:
2703  *	1 if event matches key / 0 if event does not match key
2704  **/
2705 static int ibmvfc_match_evt(struct ibmvfc_event *evt, void *match)
2706 {
2707 	if (evt == match)
2708 		return 1;
2709 	return 0;
2710 }
2711 
2712 /**
2713  * ibmvfc_abort_task_set - Abort outstanding commands to the device
2714  * @sdev:	scsi device to abort commands
2715  *
2716  * This sends an Abort Task Set to the VIOS for the specified device. This does
2717  * NOT send any cancel to the VIOS. That must be done separately.
2718  *
2719  * Returns:
2720  *	0 on success / other on failure
2721  **/
2722 static int ibmvfc_abort_task_set(struct scsi_device *sdev)
2723 {
2724 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2725 	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2726 	struct ibmvfc_cmd *tmf;
2727 	struct ibmvfc_event *evt, *found_evt;
2728 	union ibmvfc_iu rsp_iu;
2729 	struct ibmvfc_fcp_cmd_iu *iu;
2730 	struct ibmvfc_fcp_rsp *fc_rsp = ibmvfc_get_fcp_rsp(vhost, &rsp_iu.cmd);
2731 	int rc, rsp_rc = -EBUSY;
2732 	unsigned long flags, timeout = IBMVFC_ABORT_TIMEOUT;
2733 	int rsp_code = 0;
2734 
2735 	found_evt = NULL;
2736 	spin_lock_irqsave(vhost->host->host_lock, flags);
2737 	spin_lock(&vhost->crq.l_lock);
2738 	list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
2739 		if (evt->cmnd && evt->cmnd->device == sdev) {
2740 			found_evt = evt;
2741 			break;
2742 		}
2743 	}
2744 	spin_unlock(&vhost->crq.l_lock);
2745 
2746 	if (!found_evt) {
2747 		if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
2748 			sdev_printk(KERN_INFO, sdev, "No events found to abort\n");
2749 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
2750 		return 0;
2751 	}
2752 
2753 	if (vhost->state == IBMVFC_ACTIVE) {
2754 		evt = ibmvfc_get_event(&vhost->crq);
2755 		ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
2756 		tmf = ibmvfc_init_vfc_cmd(evt, sdev);
2757 		iu = ibmvfc_get_fcp_iu(vhost, tmf);
2758 
2759 		if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
2760 			tmf->target_wwpn = cpu_to_be64(rport->port_name);
2761 		iu->tmf_flags = IBMVFC_ABORT_TASK_SET;
2762 		tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF));
2763 		evt->sync_iu = &rsp_iu;
2764 
2765 		tmf->correlation = cpu_to_be64((u64)evt);
2766 
2767 		init_completion(&evt->comp);
2768 		rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
2769 	}
2770 
2771 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2772 
2773 	if (rsp_rc != 0) {
2774 		sdev_printk(KERN_ERR, sdev, "Failed to send abort. rc=%d\n", rsp_rc);
2775 		return -EIO;
2776 	}
2777 
2778 	sdev_printk(KERN_INFO, sdev, "Aborting outstanding commands\n");
2779 	timeout = wait_for_completion_timeout(&evt->comp, timeout);
2780 
2781 	if (!timeout) {
2782 		rc = ibmvfc_cancel_all(sdev, 0);
2783 		if (!rc) {
2784 			rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key);
2785 			if (rc == SUCCESS)
2786 				rc = 0;
2787 		}
2788 
2789 		if (rc) {
2790 			sdev_printk(KERN_INFO, sdev, "Cancel failed, resetting host\n");
2791 			ibmvfc_reset_host(vhost);
2792 			rsp_rc = -EIO;
2793 			rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key);
2794 
2795 			if (rc == SUCCESS)
2796 				rsp_rc = 0;
2797 
2798 			rc = ibmvfc_wait_for_ops(vhost, evt, ibmvfc_match_evt);
2799 			if (rc != SUCCESS) {
2800 				spin_lock_irqsave(vhost->host->host_lock, flags);
2801 				ibmvfc_hard_reset_host(vhost);
2802 				spin_unlock_irqrestore(vhost->host->host_lock, flags);
2803 				rsp_rc = 0;
2804 			}
2805 
2806 			goto out;
2807 		}
2808 	}
2809 
2810 	if (rsp_iu.cmd.status)
2811 		rsp_code = ibmvfc_get_err_result(vhost, &rsp_iu.cmd);
2812 
2813 	if (rsp_code) {
2814 		if (fc_rsp->flags & FCP_RSP_LEN_VALID)
2815 			rsp_code = fc_rsp->data.info.rsp_code;
2816 
2817 		sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) "
2818 			    "flags: %x fcp_rsp: %x, scsi_status: %x\n",
2819 			    ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
2820 			    be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
2821 			    fc_rsp->scsi_status);
2822 		rsp_rc = -EIO;
2823 	} else
2824 		sdev_printk(KERN_INFO, sdev, "Abort successful\n");
2825 
2826 out:
2827 	spin_lock_irqsave(vhost->host->host_lock, flags);
2828 	ibmvfc_free_event(evt);
2829 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2830 	return rsp_rc;
2831 }
2832 
2833 /**
2834  * ibmvfc_eh_abort_handler - Abort a command
2835  * @cmd:	scsi command to abort
2836  *
2837  * Returns:
2838  *	SUCCESS / FAST_IO_FAIL / FAILED
2839  **/
2840 static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
2841 {
2842 	struct scsi_device *sdev = cmd->device;
2843 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2844 	int cancel_rc, block_rc;
2845 	int rc = FAILED;
2846 
2847 	ENTER;
2848 	block_rc = fc_block_scsi_eh(cmd);
2849 	ibmvfc_wait_while_resetting(vhost);
2850 	if (block_rc != FAST_IO_FAIL) {
2851 		cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
2852 		ibmvfc_abort_task_set(sdev);
2853 	} else
2854 		cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2855 
2856 	if (!cancel_rc)
2857 		rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
2858 
2859 	if (block_rc == FAST_IO_FAIL && rc != FAILED)
2860 		rc = FAST_IO_FAIL;
2861 
2862 	LEAVE;
2863 	return rc;
2864 }
2865 
2866 /**
2867  * ibmvfc_eh_device_reset_handler - Reset a single LUN
2868  * @cmd:	scsi command struct
2869  *
2870  * Returns:
2871  *	SUCCESS / FAST_IO_FAIL / FAILED
2872  **/
2873 static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd)
2874 {
2875 	struct scsi_device *sdev = cmd->device;
2876 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2877 	int cancel_rc, block_rc, reset_rc = 0;
2878 	int rc = FAILED;
2879 
2880 	ENTER;
2881 	block_rc = fc_block_scsi_eh(cmd);
2882 	ibmvfc_wait_while_resetting(vhost);
2883 	if (block_rc != FAST_IO_FAIL) {
2884 		cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET);
2885 		reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN");
2886 	} else
2887 		cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2888 
2889 	if (!cancel_rc && !reset_rc)
2890 		rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
2891 
2892 	if (block_rc == FAST_IO_FAIL && rc != FAILED)
2893 		rc = FAST_IO_FAIL;
2894 
2895 	LEAVE;
2896 	return rc;
2897 }
2898 
2899 /**
2900  * ibmvfc_dev_cancel_all_noreset - Device iterated cancel all function
2901  * @sdev:	scsi device struct
2902  * @data:	return code
2903  *
2904  **/
2905 static void ibmvfc_dev_cancel_all_noreset(struct scsi_device *sdev, void *data)
2906 {
2907 	unsigned long *rc = data;
2908 	*rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2909 }
2910 
2911 /**
2912  * ibmvfc_dev_cancel_all_reset - Device iterated cancel all function
2913  * @sdev:	scsi device struct
2914  * @data:	return code
2915  *
2916  **/
2917 static void ibmvfc_dev_cancel_all_reset(struct scsi_device *sdev, void *data)
2918 {
2919 	unsigned long *rc = data;
2920 	*rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_TGT_RESET);
2921 }
2922 
2923 /**
2924  * ibmvfc_eh_target_reset_handler - Reset the target
2925  * @cmd:	scsi command struct
2926  *
2927  * Returns:
2928  *	SUCCESS / FAST_IO_FAIL / FAILED
2929  **/
2930 static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
2931 {
2932 	struct scsi_device *sdev = cmd->device;
2933 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2934 	struct scsi_target *starget = scsi_target(sdev);
2935 	int block_rc;
2936 	int reset_rc = 0;
2937 	int rc = FAILED;
2938 	unsigned long cancel_rc = 0;
2939 
2940 	ENTER;
2941 	block_rc = fc_block_scsi_eh(cmd);
2942 	ibmvfc_wait_while_resetting(vhost);
2943 	if (block_rc != FAST_IO_FAIL) {
2944 		starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset);
2945 		reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target");
2946 	} else
2947 		starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_noreset);
2948 
2949 	if (!cancel_rc && !reset_rc)
2950 		rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target);
2951 
2952 	if (block_rc == FAST_IO_FAIL && rc != FAILED)
2953 		rc = FAST_IO_FAIL;
2954 
2955 	LEAVE;
2956 	return rc;
2957 }
2958 
2959 /**
2960  * ibmvfc_eh_host_reset_handler - Reset the connection to the server
2961  * @cmd:	struct scsi_cmnd having problems
2962  *
2963  **/
2964 static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd)
2965 {
2966 	int rc;
2967 	struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
2968 
2969 	dev_err(vhost->dev, "Resetting connection due to error recovery\n");
2970 	rc = ibmvfc_issue_fc_host_lip(vhost->host);
2971 
2972 	return rc ? FAILED : SUCCESS;
2973 }
2974 
2975 /**
2976  * ibmvfc_terminate_rport_io - Terminate all pending I/O to the rport.
2977  * @rport:		rport struct
2978  *
2979  * Return value:
2980  * 	none
2981  **/
2982 static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
2983 {
2984 	struct Scsi_Host *shost = rport_to_shost(rport);
2985 	struct ibmvfc_host *vhost = shost_priv(shost);
2986 	struct fc_rport *dev_rport;
2987 	struct scsi_device *sdev;
2988 	struct ibmvfc_target *tgt;
2989 	unsigned long rc, flags;
2990 	unsigned int found;
2991 
2992 	ENTER;
2993 	shost_for_each_device(sdev, shost) {
2994 		dev_rport = starget_to_rport(scsi_target(sdev));
2995 		if (dev_rport != rport)
2996 			continue;
2997 		ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2998 	}
2999 
3000 	rc = ibmvfc_wait_for_ops(vhost, rport, ibmvfc_match_rport);
3001 
3002 	if (rc == FAILED)
3003 		ibmvfc_issue_fc_host_lip(shost);
3004 
3005 	spin_lock_irqsave(shost->host_lock, flags);
3006 	found = 0;
3007 	list_for_each_entry(tgt, &vhost->targets, queue) {
3008 		if (tgt->scsi_id == rport->port_id) {
3009 			found++;
3010 			break;
3011 		}
3012 	}
3013 
3014 	if (found && tgt->action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
3015 		/*
3016 		 * If we get here, that means we previously attempted to send
3017 		 * an implicit logout to the target but it failed, most likely
3018 		 * due to I/O being pending, so we need to send it again
3019 		 */
3020 		ibmvfc_del_tgt(tgt);
3021 		ibmvfc_reinit_host(vhost);
3022 	}
3023 
3024 	spin_unlock_irqrestore(shost->host_lock, flags);
3025 	LEAVE;
3026 }
3027 
3028 static const struct ibmvfc_async_desc ae_desc [] = {
3029 	{ "PLOGI",	IBMVFC_AE_ELS_PLOGI,	IBMVFC_DEFAULT_LOG_LEVEL + 1 },
3030 	{ "LOGO",	IBMVFC_AE_ELS_LOGO,	IBMVFC_DEFAULT_LOG_LEVEL + 1 },
3031 	{ "PRLO",	IBMVFC_AE_ELS_PRLO,	IBMVFC_DEFAULT_LOG_LEVEL + 1 },
3032 	{ "N-Port SCN",	IBMVFC_AE_SCN_NPORT,	IBMVFC_DEFAULT_LOG_LEVEL + 1 },
3033 	{ "Group SCN",	IBMVFC_AE_SCN_GROUP,	IBMVFC_DEFAULT_LOG_LEVEL + 1 },
3034 	{ "Domain SCN",	IBMVFC_AE_SCN_DOMAIN,	IBMVFC_DEFAULT_LOG_LEVEL },
3035 	{ "Fabric SCN",	IBMVFC_AE_SCN_FABRIC,	IBMVFC_DEFAULT_LOG_LEVEL },
3036 	{ "Link Up",	IBMVFC_AE_LINK_UP,	IBMVFC_DEFAULT_LOG_LEVEL },
3037 	{ "Link Down",	IBMVFC_AE_LINK_DOWN,	IBMVFC_DEFAULT_LOG_LEVEL },
3038 	{ "Link Dead",	IBMVFC_AE_LINK_DEAD,	IBMVFC_DEFAULT_LOG_LEVEL },
3039 	{ "Halt",	IBMVFC_AE_HALT,		IBMVFC_DEFAULT_LOG_LEVEL },
3040 	{ "Resume",	IBMVFC_AE_RESUME,	IBMVFC_DEFAULT_LOG_LEVEL },
3041 	{ "Adapter Failed", IBMVFC_AE_ADAPTER_FAILED, IBMVFC_DEFAULT_LOG_LEVEL },
3042 };
3043 
3044 static const struct ibmvfc_async_desc unknown_ae = {
3045 	"Unknown async", 0, IBMVFC_DEFAULT_LOG_LEVEL
3046 };
3047 
3048 /**
3049  * ibmvfc_get_ae_desc - Get text description for async event
3050  * @ae:	async event
3051  *
3052  **/
3053 static const struct ibmvfc_async_desc *ibmvfc_get_ae_desc(u64 ae)
3054 {
3055 	int i;
3056 
3057 	for (i = 0; i < ARRAY_SIZE(ae_desc); i++)
3058 		if (ae_desc[i].ae == ae)
3059 			return &ae_desc[i];
3060 
3061 	return &unknown_ae;
3062 }
3063 
3064 static const struct {
3065 	enum ibmvfc_ae_link_state state;
3066 	const char *desc;
3067 } link_desc [] = {
3068 	{ IBMVFC_AE_LS_LINK_UP,		" link up" },
3069 	{ IBMVFC_AE_LS_LINK_BOUNCED,	" link bounced" },
3070 	{ IBMVFC_AE_LS_LINK_DOWN,	" link down" },
3071 	{ IBMVFC_AE_LS_LINK_DEAD,	" link dead" },
3072 };
3073 
3074 /**
3075  * ibmvfc_get_link_state - Get text description for link state
3076  * @state:	link state
3077  *
3078  **/
3079 static const char *ibmvfc_get_link_state(enum ibmvfc_ae_link_state state)
3080 {
3081 	int i;
3082 
3083 	for (i = 0; i < ARRAY_SIZE(link_desc); i++)
3084 		if (link_desc[i].state == state)
3085 			return link_desc[i].desc;
3086 
3087 	return "";
3088 }
3089 
3090 /**
3091  * ibmvfc_handle_async - Handle an async event from the adapter
3092  * @crq:	crq to process
3093  * @vhost:	ibmvfc host struct
3094  *
3095  **/
3096 static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
3097 				struct ibmvfc_host *vhost)
3098 {
3099 	const struct ibmvfc_async_desc *desc = ibmvfc_get_ae_desc(be64_to_cpu(crq->event));
3100 	struct ibmvfc_target *tgt;
3101 
3102 	ibmvfc_log(vhost, desc->log_level, "%s event received. scsi_id: %llx, wwpn: %llx,"
3103 		   " node_name: %llx%s\n", desc->desc, be64_to_cpu(crq->scsi_id),
3104 		   be64_to_cpu(crq->wwpn), be64_to_cpu(crq->node_name),
3105 		   ibmvfc_get_link_state(crq->link_state));
3106 
3107 	switch (be64_to_cpu(crq->event)) {
3108 	case IBMVFC_AE_RESUME:
3109 		switch (crq->link_state) {
3110 		case IBMVFC_AE_LS_LINK_DOWN:
3111 			ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
3112 			break;
3113 		case IBMVFC_AE_LS_LINK_DEAD:
3114 			ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3115 			break;
3116 		case IBMVFC_AE_LS_LINK_UP:
3117 		case IBMVFC_AE_LS_LINK_BOUNCED:
3118 		default:
3119 			vhost->events_to_log |= IBMVFC_AE_LINKUP;
3120 			vhost->delay_init = 1;
3121 			__ibmvfc_reset_host(vhost);
3122 			break;
3123 		}
3124 
3125 		break;
3126 	case IBMVFC_AE_LINK_UP:
3127 		vhost->events_to_log |= IBMVFC_AE_LINKUP;
3128 		vhost->delay_init = 1;
3129 		__ibmvfc_reset_host(vhost);
3130 		break;
3131 	case IBMVFC_AE_SCN_FABRIC:
3132 	case IBMVFC_AE_SCN_DOMAIN:
3133 		vhost->events_to_log |= IBMVFC_AE_RSCN;
3134 		if (vhost->state < IBMVFC_HALTED) {
3135 			vhost->delay_init = 1;
3136 			__ibmvfc_reset_host(vhost);
3137 		}
3138 		break;
3139 	case IBMVFC_AE_SCN_NPORT:
3140 	case IBMVFC_AE_SCN_GROUP:
3141 		vhost->events_to_log |= IBMVFC_AE_RSCN;
3142 		ibmvfc_reinit_host(vhost);
3143 		break;
3144 	case IBMVFC_AE_ELS_LOGO:
3145 	case IBMVFC_AE_ELS_PRLO:
3146 	case IBMVFC_AE_ELS_PLOGI:
3147 		list_for_each_entry(tgt, &vhost->targets, queue) {
3148 			if (!crq->scsi_id && !crq->wwpn && !crq->node_name)
3149 				break;
3150 			if (crq->scsi_id && cpu_to_be64(tgt->scsi_id) != crq->scsi_id)
3151 				continue;
3152 			if (crq->wwpn && cpu_to_be64(tgt->ids.port_name) != crq->wwpn)
3153 				continue;
3154 			if (crq->node_name && cpu_to_be64(tgt->ids.node_name) != crq->node_name)
3155 				continue;
3156 			if (tgt->need_login && be64_to_cpu(crq->event) == IBMVFC_AE_ELS_LOGO)
3157 				tgt->logo_rcvd = 1;
3158 			if (!tgt->need_login || be64_to_cpu(crq->event) == IBMVFC_AE_ELS_PLOGI) {
3159 				ibmvfc_del_tgt(tgt);
3160 				ibmvfc_reinit_host(vhost);
3161 			}
3162 		}
3163 		break;
3164 	case IBMVFC_AE_LINK_DOWN:
3165 	case IBMVFC_AE_ADAPTER_FAILED:
3166 		ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
3167 		break;
3168 	case IBMVFC_AE_LINK_DEAD:
3169 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3170 		break;
3171 	case IBMVFC_AE_HALT:
3172 		ibmvfc_link_down(vhost, IBMVFC_HALTED);
3173 		break;
3174 	default:
3175 		dev_err(vhost->dev, "Unknown async event received: %lld\n", crq->event);
3176 		break;
3177 	}
3178 }
3179 
3180 /**
3181  * ibmvfc_handle_crq - Handles and frees received events in the CRQ
3182  * @crq:	Command/Response queue
3183  * @vhost:	ibmvfc host struct
3184  * @evt_doneq:	Event done queue
3185  *
3186 **/
3187 static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost,
3188 			      struct list_head *evt_doneq)
3189 {
3190 	long rc;
3191 	struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba);
3192 
3193 	switch (crq->valid) {
3194 	case IBMVFC_CRQ_INIT_RSP:
3195 		switch (crq->format) {
3196 		case IBMVFC_CRQ_INIT:
3197 			dev_info(vhost->dev, "Partner initialized\n");
3198 			/* Send back a response */
3199 			rc = ibmvfc_send_crq_init_complete(vhost);
3200 			if (rc == 0)
3201 				ibmvfc_init_host(vhost);
3202 			else
3203 				dev_err(vhost->dev, "Unable to send init rsp. rc=%ld\n", rc);
3204 			break;
3205 		case IBMVFC_CRQ_INIT_COMPLETE:
3206 			dev_info(vhost->dev, "Partner initialization complete\n");
3207 			ibmvfc_init_host(vhost);
3208 			break;
3209 		default:
3210 			dev_err(vhost->dev, "Unknown crq message type: %d\n", crq->format);
3211 		}
3212 		return;
3213 	case IBMVFC_CRQ_XPORT_EVENT:
3214 		vhost->state = IBMVFC_NO_CRQ;
3215 		vhost->logged_in = 0;
3216 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
3217 		if (crq->format == IBMVFC_PARTITION_MIGRATED) {
3218 			/* We need to re-setup the interpartition connection */
3219 			dev_info(vhost->dev, "Partition migrated, Re-enabling adapter\n");
3220 			vhost->client_migrated = 1;
3221 			ibmvfc_purge_requests(vhost, DID_REQUEUE);
3222 			ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
3223 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_REENABLE);
3224 		} else if (crq->format == IBMVFC_PARTNER_FAILED || crq->format == IBMVFC_PARTNER_DEREGISTER) {
3225 			dev_err(vhost->dev, "Host partner adapter deregistered or failed (rc=%d)\n", crq->format);
3226 			ibmvfc_purge_requests(vhost, DID_ERROR);
3227 			ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
3228 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
3229 		} else {
3230 			dev_err(vhost->dev, "Received unknown transport event from partner (rc=%d)\n", crq->format);
3231 		}
3232 		return;
3233 	case IBMVFC_CRQ_CMD_RSP:
3234 		break;
3235 	default:
3236 		dev_err(vhost->dev, "Got an invalid message type 0x%02x\n", crq->valid);
3237 		return;
3238 	}
3239 
3240 	if (crq->format == IBMVFC_ASYNC_EVENT)
3241 		return;
3242 
3243 	/* The only kind of payload CRQs we should get are responses to
3244 	 * things we send. Make sure this response is to something we
3245 	 * actually sent
3246 	 */
3247 	if (unlikely(!ibmvfc_valid_event(&vhost->crq.evt_pool, evt))) {
3248 		dev_err(vhost->dev, "Returned correlation_token 0x%08llx is invalid!\n",
3249 			crq->ioba);
3250 		return;
3251 	}
3252 
3253 	if (unlikely(atomic_read(&evt->free))) {
3254 		dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n",
3255 			crq->ioba);
3256 		return;
3257 	}
3258 
3259 	spin_lock(&evt->queue->l_lock);
3260 	list_move_tail(&evt->queue_list, evt_doneq);
3261 	spin_unlock(&evt->queue->l_lock);
3262 }
3263 
3264 /**
3265  * ibmvfc_scan_finished - Check if the device scan is done.
3266  * @shost:	scsi host struct
3267  * @time:	current elapsed time
3268  *
3269  * Returns:
3270  *	0 if scan is not done / 1 if scan is done
3271  **/
3272 static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
3273 {
3274 	unsigned long flags;
3275 	struct ibmvfc_host *vhost = shost_priv(shost);
3276 	int done = 0;
3277 
3278 	spin_lock_irqsave(shost->host_lock, flags);
3279 	if (time >= (init_timeout * HZ)) {
3280 		dev_info(vhost->dev, "Scan taking longer than %d seconds, "
3281 			 "continuing initialization\n", init_timeout);
3282 		done = 1;
3283 	}
3284 
3285 	if (vhost->scan_complete)
3286 		done = 1;
3287 	spin_unlock_irqrestore(shost->host_lock, flags);
3288 	return done;
3289 }
3290 
3291 /**
3292  * ibmvfc_slave_alloc - Setup the device's task set value
3293  * @sdev:	struct scsi_device device to configure
3294  *
3295  * Set the device's task set value so that error handling works as
3296  * expected.
3297  *
3298  * Returns:
3299  *	0 on success / -ENXIO if device does not exist
3300  **/
3301 static int ibmvfc_slave_alloc(struct scsi_device *sdev)
3302 {
3303 	struct Scsi_Host *shost = sdev->host;
3304 	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
3305 	struct ibmvfc_host *vhost = shost_priv(shost);
3306 	unsigned long flags = 0;
3307 
3308 	if (!rport || fc_remote_port_chkready(rport))
3309 		return -ENXIO;
3310 
3311 	spin_lock_irqsave(shost->host_lock, flags);
3312 	sdev->hostdata = (void *)(unsigned long)vhost->task_set++;
3313 	spin_unlock_irqrestore(shost->host_lock, flags);
3314 	return 0;
3315 }
3316 
3317 /**
3318  * ibmvfc_target_alloc - Setup the target's task set value
3319  * @starget:	struct scsi_target
3320  *
3321  * Set the target's task set value so that error handling works as
3322  * expected.
3323  *
3324  * Returns:
3325  *	0 on success / -ENXIO if device does not exist
3326  **/
3327 static int ibmvfc_target_alloc(struct scsi_target *starget)
3328 {
3329 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
3330 	struct ibmvfc_host *vhost = shost_priv(shost);
3331 	unsigned long flags = 0;
3332 
3333 	spin_lock_irqsave(shost->host_lock, flags);
3334 	starget->hostdata = (void *)(unsigned long)vhost->task_set++;
3335 	spin_unlock_irqrestore(shost->host_lock, flags);
3336 	return 0;
3337 }
3338 
3339 /**
3340  * ibmvfc_slave_configure - Configure the device
3341  * @sdev:	struct scsi_device device to configure
3342  *
3343  * Enable allow_restart for a device if it is a disk. Adjust the
3344  * queue_depth here also.
3345  *
3346  * Returns:
3347  *	0
3348  **/
3349 static int ibmvfc_slave_configure(struct scsi_device *sdev)
3350 {
3351 	struct Scsi_Host *shost = sdev->host;
3352 	unsigned long flags = 0;
3353 
3354 	spin_lock_irqsave(shost->host_lock, flags);
3355 	if (sdev->type == TYPE_DISK) {
3356 		sdev->allow_restart = 1;
3357 		blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
3358 	}
3359 	spin_unlock_irqrestore(shost->host_lock, flags);
3360 	return 0;
3361 }
3362 
3363 /**
3364  * ibmvfc_change_queue_depth - Change the device's queue depth
3365  * @sdev:	scsi device struct
3366  * @qdepth:	depth to set
3367  *
3368  * Return value:
3369  * 	actual depth set
3370  **/
3371 static int ibmvfc_change_queue_depth(struct scsi_device *sdev, int qdepth)
3372 {
3373 	if (qdepth > IBMVFC_MAX_CMDS_PER_LUN)
3374 		qdepth = IBMVFC_MAX_CMDS_PER_LUN;
3375 
3376 	return scsi_change_queue_depth(sdev, qdepth);
3377 }
3378 
3379 static ssize_t ibmvfc_show_host_partition_name(struct device *dev,
3380 						 struct device_attribute *attr, char *buf)
3381 {
3382 	struct Scsi_Host *shost = class_to_shost(dev);
3383 	struct ibmvfc_host *vhost = shost_priv(shost);
3384 
3385 	return snprintf(buf, PAGE_SIZE, "%s\n",
3386 			vhost->login_buf->resp.partition_name);
3387 }
3388 
3389 static ssize_t ibmvfc_show_host_device_name(struct device *dev,
3390 					    struct device_attribute *attr, char *buf)
3391 {
3392 	struct Scsi_Host *shost = class_to_shost(dev);
3393 	struct ibmvfc_host *vhost = shost_priv(shost);
3394 
3395 	return snprintf(buf, PAGE_SIZE, "%s\n",
3396 			vhost->login_buf->resp.device_name);
3397 }
3398 
3399 static ssize_t ibmvfc_show_host_loc_code(struct device *dev,
3400 					 struct device_attribute *attr, char *buf)
3401 {
3402 	struct Scsi_Host *shost = class_to_shost(dev);
3403 	struct ibmvfc_host *vhost = shost_priv(shost);
3404 
3405 	return snprintf(buf, PAGE_SIZE, "%s\n",
3406 			vhost->login_buf->resp.port_loc_code);
3407 }
3408 
3409 static ssize_t ibmvfc_show_host_drc_name(struct device *dev,
3410 					 struct device_attribute *attr, char *buf)
3411 {
3412 	struct Scsi_Host *shost = class_to_shost(dev);
3413 	struct ibmvfc_host *vhost = shost_priv(shost);
3414 
3415 	return snprintf(buf, PAGE_SIZE, "%s\n",
3416 			vhost->login_buf->resp.drc_name);
3417 }
3418 
3419 static ssize_t ibmvfc_show_host_npiv_version(struct device *dev,
3420 					     struct device_attribute *attr, char *buf)
3421 {
3422 	struct Scsi_Host *shost = class_to_shost(dev);
3423 	struct ibmvfc_host *vhost = shost_priv(shost);
3424 	return snprintf(buf, PAGE_SIZE, "%d\n", be32_to_cpu(vhost->login_buf->resp.version));
3425 }
3426 
3427 static ssize_t ibmvfc_show_host_capabilities(struct device *dev,
3428 					     struct device_attribute *attr, char *buf)
3429 {
3430 	struct Scsi_Host *shost = class_to_shost(dev);
3431 	struct ibmvfc_host *vhost = shost_priv(shost);
3432 	return snprintf(buf, PAGE_SIZE, "%llx\n", be64_to_cpu(vhost->login_buf->resp.capabilities));
3433 }
3434 
3435 /**
3436  * ibmvfc_show_log_level - Show the adapter's error logging level
3437  * @dev:	class device struct
3438  * @attr:	unused
3439  * @buf:	buffer
3440  *
3441  * Return value:
3442  * 	number of bytes printed to buffer
3443  **/
3444 static ssize_t ibmvfc_show_log_level(struct device *dev,
3445 				     struct device_attribute *attr, char *buf)
3446 {
3447 	struct Scsi_Host *shost = class_to_shost(dev);
3448 	struct ibmvfc_host *vhost = shost_priv(shost);
3449 	unsigned long flags = 0;
3450 	int len;
3451 
3452 	spin_lock_irqsave(shost->host_lock, flags);
3453 	len = snprintf(buf, PAGE_SIZE, "%d\n", vhost->log_level);
3454 	spin_unlock_irqrestore(shost->host_lock, flags);
3455 	return len;
3456 }
3457 
3458 /**
3459  * ibmvfc_store_log_level - Change the adapter's error logging level
3460  * @dev:	class device struct
3461  * @attr:	unused
3462  * @buf:	buffer
3463  * @count:      buffer size
3464  *
3465  * Return value:
3466  * 	number of bytes printed to buffer
3467  **/
3468 static ssize_t ibmvfc_store_log_level(struct device *dev,
3469 				      struct device_attribute *attr,
3470 				      const char *buf, size_t count)
3471 {
3472 	struct Scsi_Host *shost = class_to_shost(dev);
3473 	struct ibmvfc_host *vhost = shost_priv(shost);
3474 	unsigned long flags = 0;
3475 
3476 	spin_lock_irqsave(shost->host_lock, flags);
3477 	vhost->log_level = simple_strtoul(buf, NULL, 10);
3478 	spin_unlock_irqrestore(shost->host_lock, flags);
3479 	return strlen(buf);
3480 }
3481 
3482 static ssize_t ibmvfc_show_scsi_channels(struct device *dev,
3483 					 struct device_attribute *attr, char *buf)
3484 {
3485 	struct Scsi_Host *shost = class_to_shost(dev);
3486 	struct ibmvfc_host *vhost = shost_priv(shost);
3487 	unsigned long flags = 0;
3488 	int len;
3489 
3490 	spin_lock_irqsave(shost->host_lock, flags);
3491 	len = snprintf(buf, PAGE_SIZE, "%d\n", vhost->client_scsi_channels);
3492 	spin_unlock_irqrestore(shost->host_lock, flags);
3493 	return len;
3494 }
3495 
3496 static ssize_t ibmvfc_store_scsi_channels(struct device *dev,
3497 					 struct device_attribute *attr,
3498 					 const char *buf, size_t count)
3499 {
3500 	struct Scsi_Host *shost = class_to_shost(dev);
3501 	struct ibmvfc_host *vhost = shost_priv(shost);
3502 	unsigned long flags = 0;
3503 	unsigned int channels;
3504 
3505 	spin_lock_irqsave(shost->host_lock, flags);
3506 	channels = simple_strtoul(buf, NULL, 10);
3507 	vhost->client_scsi_channels = min(channels, nr_scsi_hw_queues);
3508 	ibmvfc_hard_reset_host(vhost);
3509 	spin_unlock_irqrestore(shost->host_lock, flags);
3510 	return strlen(buf);
3511 }
3512 
3513 static DEVICE_ATTR(partition_name, S_IRUGO, ibmvfc_show_host_partition_name, NULL);
3514 static DEVICE_ATTR(device_name, S_IRUGO, ibmvfc_show_host_device_name, NULL);
3515 static DEVICE_ATTR(port_loc_code, S_IRUGO, ibmvfc_show_host_loc_code, NULL);
3516 static DEVICE_ATTR(drc_name, S_IRUGO, ibmvfc_show_host_drc_name, NULL);
3517 static DEVICE_ATTR(npiv_version, S_IRUGO, ibmvfc_show_host_npiv_version, NULL);
3518 static DEVICE_ATTR(capabilities, S_IRUGO, ibmvfc_show_host_capabilities, NULL);
3519 static DEVICE_ATTR(log_level, S_IRUGO | S_IWUSR,
3520 		   ibmvfc_show_log_level, ibmvfc_store_log_level);
3521 static DEVICE_ATTR(nr_scsi_channels, S_IRUGO | S_IWUSR,
3522 		   ibmvfc_show_scsi_channels, ibmvfc_store_scsi_channels);
3523 
3524 #ifdef CONFIG_SCSI_IBMVFC_TRACE
3525 /**
3526  * ibmvfc_read_trace - Dump the adapter trace
3527  * @filp:		open sysfs file
3528  * @kobj:		kobject struct
3529  * @bin_attr:	bin_attribute struct
3530  * @buf:		buffer
3531  * @off:		offset
3532  * @count:		buffer size
3533  *
3534  * Return value:
3535  *	number of bytes printed to buffer
3536  **/
3537 static ssize_t ibmvfc_read_trace(struct file *filp, struct kobject *kobj,
3538 				 struct bin_attribute *bin_attr,
3539 				 char *buf, loff_t off, size_t count)
3540 {
3541 	struct device *dev = kobj_to_dev(kobj);
3542 	struct Scsi_Host *shost = class_to_shost(dev);
3543 	struct ibmvfc_host *vhost = shost_priv(shost);
3544 	unsigned long flags = 0;
3545 	int size = IBMVFC_TRACE_SIZE;
3546 	char *src = (char *)vhost->trace;
3547 
3548 	if (off > size)
3549 		return 0;
3550 	if (off + count > size) {
3551 		size -= off;
3552 		count = size;
3553 	}
3554 
3555 	spin_lock_irqsave(shost->host_lock, flags);
3556 	memcpy(buf, &src[off], count);
3557 	spin_unlock_irqrestore(shost->host_lock, flags);
3558 	return count;
3559 }
3560 
3561 static struct bin_attribute ibmvfc_trace_attr = {
3562 	.attr =	{
3563 		.name = "trace",
3564 		.mode = S_IRUGO,
3565 	},
3566 	.size = 0,
3567 	.read = ibmvfc_read_trace,
3568 };
3569 #endif
3570 
3571 static struct device_attribute *ibmvfc_attrs[] = {
3572 	&dev_attr_partition_name,
3573 	&dev_attr_device_name,
3574 	&dev_attr_port_loc_code,
3575 	&dev_attr_drc_name,
3576 	&dev_attr_npiv_version,
3577 	&dev_attr_capabilities,
3578 	&dev_attr_log_level,
3579 	&dev_attr_nr_scsi_channels,
3580 	NULL
3581 };
3582 
3583 static struct scsi_host_template driver_template = {
3584 	.module = THIS_MODULE,
3585 	.name = "IBM POWER Virtual FC Adapter",
3586 	.proc_name = IBMVFC_NAME,
3587 	.queuecommand = ibmvfc_queuecommand,
3588 	.eh_timed_out = fc_eh_timed_out,
3589 	.eh_abort_handler = ibmvfc_eh_abort_handler,
3590 	.eh_device_reset_handler = ibmvfc_eh_device_reset_handler,
3591 	.eh_target_reset_handler = ibmvfc_eh_target_reset_handler,
3592 	.eh_host_reset_handler = ibmvfc_eh_host_reset_handler,
3593 	.slave_alloc = ibmvfc_slave_alloc,
3594 	.slave_configure = ibmvfc_slave_configure,
3595 	.target_alloc = ibmvfc_target_alloc,
3596 	.scan_finished = ibmvfc_scan_finished,
3597 	.change_queue_depth = ibmvfc_change_queue_depth,
3598 	.cmd_per_lun = 16,
3599 	.can_queue = IBMVFC_MAX_REQUESTS_DEFAULT,
3600 	.this_id = -1,
3601 	.sg_tablesize = SG_ALL,
3602 	.max_sectors = IBMVFC_MAX_SECTORS,
3603 	.shost_attrs = ibmvfc_attrs,
3604 	.track_queue_depth = 1,
3605 	.host_tagset = 1,
3606 };
3607 
3608 /**
3609  * ibmvfc_next_async_crq - Returns the next entry in async queue
3610  * @vhost:	ibmvfc host struct
3611  *
3612  * Returns:
3613  *	Pointer to next entry in queue / NULL if empty
3614  **/
3615 static struct ibmvfc_async_crq *ibmvfc_next_async_crq(struct ibmvfc_host *vhost)
3616 {
3617 	struct ibmvfc_queue *async_crq = &vhost->async_crq;
3618 	struct ibmvfc_async_crq *crq;
3619 
3620 	crq = &async_crq->msgs.async[async_crq->cur];
3621 	if (crq->valid & 0x80) {
3622 		if (++async_crq->cur == async_crq->size)
3623 			async_crq->cur = 0;
3624 		rmb();
3625 	} else
3626 		crq = NULL;
3627 
3628 	return crq;
3629 }
3630 
3631 /**
3632  * ibmvfc_next_crq - Returns the next entry in message queue
3633  * @vhost:	ibmvfc host struct
3634  *
3635  * Returns:
3636  *	Pointer to next entry in queue / NULL if empty
3637  **/
3638 static struct ibmvfc_crq *ibmvfc_next_crq(struct ibmvfc_host *vhost)
3639 {
3640 	struct ibmvfc_queue *queue = &vhost->crq;
3641 	struct ibmvfc_crq *crq;
3642 
3643 	crq = &queue->msgs.crq[queue->cur];
3644 	if (crq->valid & 0x80) {
3645 		if (++queue->cur == queue->size)
3646 			queue->cur = 0;
3647 		rmb();
3648 	} else
3649 		crq = NULL;
3650 
3651 	return crq;
3652 }
3653 
3654 /**
3655  * ibmvfc_interrupt - Interrupt handler
3656  * @irq:		number of irq to handle, not used
3657  * @dev_instance: ibmvfc_host that received interrupt
3658  *
3659  * Returns:
3660  *	IRQ_HANDLED
3661  **/
3662 static irqreturn_t ibmvfc_interrupt(int irq, void *dev_instance)
3663 {
3664 	struct ibmvfc_host *vhost = (struct ibmvfc_host *)dev_instance;
3665 	unsigned long flags;
3666 
3667 	spin_lock_irqsave(vhost->host->host_lock, flags);
3668 	vio_disable_interrupts(to_vio_dev(vhost->dev));
3669 	tasklet_schedule(&vhost->tasklet);
3670 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
3671 	return IRQ_HANDLED;
3672 }
3673 
3674 /**
3675  * ibmvfc_tasklet - Interrupt handler tasklet
3676  * @data:		ibmvfc host struct
3677  *
3678  * Returns:
3679  *	Nothing
3680  **/
3681 static void ibmvfc_tasklet(void *data)
3682 {
3683 	struct ibmvfc_host *vhost = data;
3684 	struct vio_dev *vdev = to_vio_dev(vhost->dev);
3685 	struct ibmvfc_crq *crq;
3686 	struct ibmvfc_async_crq *async;
3687 	struct ibmvfc_event *evt, *temp;
3688 	unsigned long flags;
3689 	int done = 0;
3690 	LIST_HEAD(evt_doneq);
3691 
3692 	spin_lock_irqsave(vhost->host->host_lock, flags);
3693 	spin_lock(vhost->crq.q_lock);
3694 	while (!done) {
3695 		/* Pull all the valid messages off the async CRQ */
3696 		while ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
3697 			ibmvfc_handle_async(async, vhost);
3698 			async->valid = 0;
3699 			wmb();
3700 		}
3701 
3702 		/* Pull all the valid messages off the CRQ */
3703 		while ((crq = ibmvfc_next_crq(vhost)) != NULL) {
3704 			ibmvfc_handle_crq(crq, vhost, &evt_doneq);
3705 			crq->valid = 0;
3706 			wmb();
3707 		}
3708 
3709 		vio_enable_interrupts(vdev);
3710 		if ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
3711 			vio_disable_interrupts(vdev);
3712 			ibmvfc_handle_async(async, vhost);
3713 			async->valid = 0;
3714 			wmb();
3715 		} else if ((crq = ibmvfc_next_crq(vhost)) != NULL) {
3716 			vio_disable_interrupts(vdev);
3717 			ibmvfc_handle_crq(crq, vhost, &evt_doneq);
3718 			crq->valid = 0;
3719 			wmb();
3720 		} else
3721 			done = 1;
3722 	}
3723 
3724 	spin_unlock(vhost->crq.q_lock);
3725 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
3726 
3727 	list_for_each_entry_safe(evt, temp, &evt_doneq, queue_list) {
3728 		del_timer(&evt->timer);
3729 		list_del(&evt->queue_list);
3730 		ibmvfc_trc_end(evt);
3731 		evt->done(evt);
3732 	}
3733 }
3734 
3735 static int ibmvfc_toggle_scrq_irq(struct ibmvfc_queue *scrq, int enable)
3736 {
3737 	struct device *dev = scrq->vhost->dev;
3738 	struct vio_dev *vdev = to_vio_dev(dev);
3739 	unsigned long rc;
3740 	int irq_action = H_ENABLE_VIO_INTERRUPT;
3741 
3742 	if (!enable)
3743 		irq_action = H_DISABLE_VIO_INTERRUPT;
3744 
3745 	rc = plpar_hcall_norets(H_VIOCTL, vdev->unit_address, irq_action,
3746 				scrq->hw_irq, 0, 0);
3747 
3748 	if (rc)
3749 		dev_err(dev, "Couldn't %s sub-crq[%lu] irq. rc=%ld\n",
3750 			enable ? "enable" : "disable", scrq->hwq_id, rc);
3751 
3752 	return rc;
3753 }
3754 
3755 static void ibmvfc_handle_scrq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost,
3756 			       struct list_head *evt_doneq)
3757 {
3758 	struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba);
3759 
3760 	switch (crq->valid) {
3761 	case IBMVFC_CRQ_CMD_RSP:
3762 		break;
3763 	case IBMVFC_CRQ_XPORT_EVENT:
3764 		return;
3765 	default:
3766 		dev_err(vhost->dev, "Got and invalid message type 0x%02x\n", crq->valid);
3767 		return;
3768 	}
3769 
3770 	/* The only kind of payload CRQs we should get are responses to
3771 	 * things we send. Make sure this response is to something we
3772 	 * actually sent
3773 	 */
3774 	if (unlikely(!ibmvfc_valid_event(&evt->queue->evt_pool, evt))) {
3775 		dev_err(vhost->dev, "Returned correlation_token 0x%08llx is invalid!\n",
3776 			crq->ioba);
3777 		return;
3778 	}
3779 
3780 	if (unlikely(atomic_read(&evt->free))) {
3781 		dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n",
3782 			crq->ioba);
3783 		return;
3784 	}
3785 
3786 	spin_lock(&evt->queue->l_lock);
3787 	list_move_tail(&evt->queue_list, evt_doneq);
3788 	spin_unlock(&evt->queue->l_lock);
3789 }
3790 
3791 static struct ibmvfc_crq *ibmvfc_next_scrq(struct ibmvfc_queue *scrq)
3792 {
3793 	struct ibmvfc_crq *crq;
3794 
3795 	crq = &scrq->msgs.scrq[scrq->cur].crq;
3796 	if (crq->valid & 0x80) {
3797 		if (++scrq->cur == scrq->size)
3798 			scrq->cur = 0;
3799 		rmb();
3800 	} else
3801 		crq = NULL;
3802 
3803 	return crq;
3804 }
3805 
3806 static void ibmvfc_drain_sub_crq(struct ibmvfc_queue *scrq)
3807 {
3808 	struct ibmvfc_crq *crq;
3809 	struct ibmvfc_event *evt, *temp;
3810 	unsigned long flags;
3811 	int done = 0;
3812 	LIST_HEAD(evt_doneq);
3813 
3814 	spin_lock_irqsave(scrq->q_lock, flags);
3815 	while (!done) {
3816 		while ((crq = ibmvfc_next_scrq(scrq)) != NULL) {
3817 			ibmvfc_handle_scrq(crq, scrq->vhost, &evt_doneq);
3818 			crq->valid = 0;
3819 			wmb();
3820 		}
3821 
3822 		ibmvfc_toggle_scrq_irq(scrq, 1);
3823 		if ((crq = ibmvfc_next_scrq(scrq)) != NULL) {
3824 			ibmvfc_toggle_scrq_irq(scrq, 0);
3825 			ibmvfc_handle_scrq(crq, scrq->vhost, &evt_doneq);
3826 			crq->valid = 0;
3827 			wmb();
3828 		} else
3829 			done = 1;
3830 	}
3831 	spin_unlock_irqrestore(scrq->q_lock, flags);
3832 
3833 	list_for_each_entry_safe(evt, temp, &evt_doneq, queue_list) {
3834 		del_timer(&evt->timer);
3835 		list_del(&evt->queue_list);
3836 		ibmvfc_trc_end(evt);
3837 		evt->done(evt);
3838 	}
3839 }
3840 
3841 static irqreturn_t ibmvfc_interrupt_scsi(int irq, void *scrq_instance)
3842 {
3843 	struct ibmvfc_queue *scrq = (struct ibmvfc_queue *)scrq_instance;
3844 
3845 	ibmvfc_toggle_scrq_irq(scrq, 0);
3846 	ibmvfc_drain_sub_crq(scrq);
3847 
3848 	return IRQ_HANDLED;
3849 }
3850 
3851 /**
3852  * ibmvfc_init_tgt - Set the next init job step for the target
3853  * @tgt:		ibmvfc target struct
3854  * @job_step:	job step to perform
3855  *
3856  **/
3857 static void ibmvfc_init_tgt(struct ibmvfc_target *tgt,
3858 			    void (*job_step) (struct ibmvfc_target *))
3859 {
3860 	if (!ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT))
3861 		tgt->job_step = job_step;
3862 	wake_up(&tgt->vhost->work_wait_q);
3863 }
3864 
3865 /**
3866  * ibmvfc_retry_tgt_init - Attempt to retry a step in target initialization
3867  * @tgt:		ibmvfc target struct
3868  * @job_step:	initialization job step
3869  *
3870  * Returns: 1 if step will be retried / 0 if not
3871  *
3872  **/
3873 static int ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
3874 				  void (*job_step) (struct ibmvfc_target *))
3875 {
3876 	if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) {
3877 		ibmvfc_del_tgt(tgt);
3878 		wake_up(&tgt->vhost->work_wait_q);
3879 		return 0;
3880 	} else
3881 		ibmvfc_init_tgt(tgt, job_step);
3882 	return 1;
3883 }
3884 
3885 /* Defined in FC-LS */
3886 static const struct {
3887 	int code;
3888 	int retry;
3889 	int logged_in;
3890 } prli_rsp [] = {
3891 	{ 0, 1, 0 },
3892 	{ 1, 0, 1 },
3893 	{ 2, 1, 0 },
3894 	{ 3, 1, 0 },
3895 	{ 4, 0, 0 },
3896 	{ 5, 0, 0 },
3897 	{ 6, 0, 1 },
3898 	{ 7, 0, 0 },
3899 	{ 8, 1, 0 },
3900 };
3901 
3902 /**
3903  * ibmvfc_get_prli_rsp - Find PRLI response index
3904  * @flags:	PRLI response flags
3905  *
3906  **/
3907 static int ibmvfc_get_prli_rsp(u16 flags)
3908 {
3909 	int i;
3910 	int code = (flags & 0x0f00) >> 8;
3911 
3912 	for (i = 0; i < ARRAY_SIZE(prli_rsp); i++)
3913 		if (prli_rsp[i].code == code)
3914 			return i;
3915 
3916 	return 0;
3917 }
3918 
3919 /**
3920  * ibmvfc_tgt_prli_done - Completion handler for Process Login
3921  * @evt:	ibmvfc event struct
3922  *
3923  **/
3924 static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
3925 {
3926 	struct ibmvfc_target *tgt = evt->tgt;
3927 	struct ibmvfc_host *vhost = evt->vhost;
3928 	struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli;
3929 	struct ibmvfc_prli_svc_parms *parms = &rsp->parms;
3930 	u32 status = be16_to_cpu(rsp->common.status);
3931 	int index, level = IBMVFC_DEFAULT_LOG_LEVEL;
3932 
3933 	vhost->discovery_threads--;
3934 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3935 	switch (status) {
3936 	case IBMVFC_MAD_SUCCESS:
3937 		tgt_dbg(tgt, "Process Login succeeded: %X %02X %04X\n",
3938 			parms->type, parms->flags, parms->service_parms);
3939 
3940 		if (parms->type == IBMVFC_SCSI_FCP_TYPE) {
3941 			index = ibmvfc_get_prli_rsp(be16_to_cpu(parms->flags));
3942 			if (prli_rsp[index].logged_in) {
3943 				if (be16_to_cpu(parms->flags) & IBMVFC_PRLI_EST_IMG_PAIR) {
3944 					tgt->need_login = 0;
3945 					tgt->ids.roles = 0;
3946 					if (be32_to_cpu(parms->service_parms) & IBMVFC_PRLI_TARGET_FUNC)
3947 						tgt->ids.roles |= FC_PORT_ROLE_FCP_TARGET;
3948 					if (be32_to_cpu(parms->service_parms) & IBMVFC_PRLI_INITIATOR_FUNC)
3949 						tgt->ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
3950 					tgt->add_rport = 1;
3951 				} else
3952 					ibmvfc_del_tgt(tgt);
3953 			} else if (prli_rsp[index].retry)
3954 				ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
3955 			else
3956 				ibmvfc_del_tgt(tgt);
3957 		} else
3958 			ibmvfc_del_tgt(tgt);
3959 		break;
3960 	case IBMVFC_MAD_DRIVER_FAILED:
3961 		break;
3962 	case IBMVFC_MAD_CRQ_ERROR:
3963 		ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
3964 		break;
3965 	case IBMVFC_MAD_FAILED:
3966 	default:
3967 		if ((be16_to_cpu(rsp->status) & IBMVFC_VIOS_FAILURE) &&
3968 		     be16_to_cpu(rsp->error) == IBMVFC_PLOGI_REQUIRED)
3969 			level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
3970 		else if (tgt->logo_rcvd)
3971 			level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
3972 		else if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
3973 			level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
3974 		else
3975 			ibmvfc_del_tgt(tgt);
3976 
3977 		tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
3978 			ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
3979 			be16_to_cpu(rsp->status), be16_to_cpu(rsp->error), status);
3980 		break;
3981 	}
3982 
3983 	kref_put(&tgt->kref, ibmvfc_release_tgt);
3984 	ibmvfc_free_event(evt);
3985 	wake_up(&vhost->work_wait_q);
3986 }
3987 
3988 /**
3989  * ibmvfc_tgt_send_prli - Send a process login
3990  * @tgt:	ibmvfc target struct
3991  *
3992  **/
3993 static void ibmvfc_tgt_send_prli(struct ibmvfc_target *tgt)
3994 {
3995 	struct ibmvfc_process_login *prli;
3996 	struct ibmvfc_host *vhost = tgt->vhost;
3997 	struct ibmvfc_event *evt;
3998 
3999 	if (vhost->discovery_threads >= disc_threads)
4000 		return;
4001 
4002 	kref_get(&tgt->kref);
4003 	evt = ibmvfc_get_event(&vhost->crq);
4004 	vhost->discovery_threads++;
4005 	ibmvfc_init_event(evt, ibmvfc_tgt_prli_done, IBMVFC_MAD_FORMAT);
4006 	evt->tgt = tgt;
4007 	prli = &evt->iu.prli;
4008 	memset(prli, 0, sizeof(*prli));
4009 	if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
4010 		prli->common.version = cpu_to_be32(2);
4011 		prli->target_wwpn = cpu_to_be64(tgt->wwpn);
4012 	} else {
4013 		prli->common.version = cpu_to_be32(1);
4014 	}
4015 	prli->common.opcode = cpu_to_be32(IBMVFC_PROCESS_LOGIN);
4016 	prli->common.length = cpu_to_be16(sizeof(*prli));
4017 	prli->scsi_id = cpu_to_be64(tgt->scsi_id);
4018 
4019 	prli->parms.type = IBMVFC_SCSI_FCP_TYPE;
4020 	prli->parms.flags = cpu_to_be16(IBMVFC_PRLI_EST_IMG_PAIR);
4021 	prli->parms.service_parms = cpu_to_be32(IBMVFC_PRLI_INITIATOR_FUNC);
4022 	prli->parms.service_parms |= cpu_to_be32(IBMVFC_PRLI_READ_FCP_XFER_RDY_DISABLED);
4023 
4024 	if (cls3_error)
4025 		prli->parms.service_parms |= cpu_to_be32(IBMVFC_PRLI_RETRY);
4026 
4027 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4028 	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4029 		vhost->discovery_threads--;
4030 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4031 		kref_put(&tgt->kref, ibmvfc_release_tgt);
4032 	} else
4033 		tgt_dbg(tgt, "Sent process login\n");
4034 }
4035 
4036 /**
4037  * ibmvfc_tgt_plogi_done - Completion handler for Port Login
4038  * @evt:	ibmvfc event struct
4039  *
4040  **/
4041 static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
4042 {
4043 	struct ibmvfc_target *tgt = evt->tgt;
4044 	struct ibmvfc_host *vhost = evt->vhost;
4045 	struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi;
4046 	u32 status = be16_to_cpu(rsp->common.status);
4047 	int level = IBMVFC_DEFAULT_LOG_LEVEL;
4048 
4049 	vhost->discovery_threads--;
4050 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4051 	switch (status) {
4052 	case IBMVFC_MAD_SUCCESS:
4053 		tgt_dbg(tgt, "Port Login succeeded\n");
4054 		if (tgt->ids.port_name &&
4055 		    tgt->ids.port_name != wwn_to_u64(rsp->service_parms.port_name)) {
4056 			vhost->reinit = 1;
4057 			tgt_dbg(tgt, "Port re-init required\n");
4058 			break;
4059 		}
4060 		tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name);
4061 		tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name);
4062 		tgt->ids.port_id = tgt->scsi_id;
4063 		memcpy(&tgt->service_parms, &rsp->service_parms,
4064 		       sizeof(tgt->service_parms));
4065 		memcpy(&tgt->service_parms_change, &rsp->service_parms_change,
4066 		       sizeof(tgt->service_parms_change));
4067 		ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_prli);
4068 		break;
4069 	case IBMVFC_MAD_DRIVER_FAILED:
4070 		break;
4071 	case IBMVFC_MAD_CRQ_ERROR:
4072 		ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
4073 		break;
4074 	case IBMVFC_MAD_FAILED:
4075 	default:
4076 		if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
4077 			level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
4078 		else
4079 			ibmvfc_del_tgt(tgt);
4080 
4081 		tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
4082 			ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
4083 					     be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
4084 			ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
4085 			ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain), status);
4086 		break;
4087 	}
4088 
4089 	kref_put(&tgt->kref, ibmvfc_release_tgt);
4090 	ibmvfc_free_event(evt);
4091 	wake_up(&vhost->work_wait_q);
4092 }
4093 
4094 /**
4095  * ibmvfc_tgt_send_plogi - Send PLOGI to the specified target
4096  * @tgt:	ibmvfc target struct
4097  *
4098  **/
4099 static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *tgt)
4100 {
4101 	struct ibmvfc_port_login *plogi;
4102 	struct ibmvfc_host *vhost = tgt->vhost;
4103 	struct ibmvfc_event *evt;
4104 
4105 	if (vhost->discovery_threads >= disc_threads)
4106 		return;
4107 
4108 	kref_get(&tgt->kref);
4109 	tgt->logo_rcvd = 0;
4110 	evt = ibmvfc_get_event(&vhost->crq);
4111 	vhost->discovery_threads++;
4112 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4113 	ibmvfc_init_event(evt, ibmvfc_tgt_plogi_done, IBMVFC_MAD_FORMAT);
4114 	evt->tgt = tgt;
4115 	plogi = &evt->iu.plogi;
4116 	memset(plogi, 0, sizeof(*plogi));
4117 	if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
4118 		plogi->common.version = cpu_to_be32(2);
4119 		plogi->target_wwpn = cpu_to_be64(tgt->wwpn);
4120 	} else {
4121 		plogi->common.version = cpu_to_be32(1);
4122 	}
4123 	plogi->common.opcode = cpu_to_be32(IBMVFC_PORT_LOGIN);
4124 	plogi->common.length = cpu_to_be16(sizeof(*plogi));
4125 	plogi->scsi_id = cpu_to_be64(tgt->scsi_id);
4126 
4127 	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4128 		vhost->discovery_threads--;
4129 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4130 		kref_put(&tgt->kref, ibmvfc_release_tgt);
4131 	} else
4132 		tgt_dbg(tgt, "Sent port login\n");
4133 }
4134 
4135 /**
4136  * ibmvfc_tgt_implicit_logout_done - Completion handler for Implicit Logout MAD
4137  * @evt:	ibmvfc event struct
4138  *
4139  **/
4140 static void ibmvfc_tgt_implicit_logout_done(struct ibmvfc_event *evt)
4141 {
4142 	struct ibmvfc_target *tgt = evt->tgt;
4143 	struct ibmvfc_host *vhost = evt->vhost;
4144 	struct ibmvfc_implicit_logout *rsp = &evt->xfer_iu->implicit_logout;
4145 	u32 status = be16_to_cpu(rsp->common.status);
4146 
4147 	vhost->discovery_threads--;
4148 	ibmvfc_free_event(evt);
4149 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4150 
4151 	switch (status) {
4152 	case IBMVFC_MAD_SUCCESS:
4153 		tgt_dbg(tgt, "Implicit Logout succeeded\n");
4154 		break;
4155 	case IBMVFC_MAD_DRIVER_FAILED:
4156 		kref_put(&tgt->kref, ibmvfc_release_tgt);
4157 		wake_up(&vhost->work_wait_q);
4158 		return;
4159 	case IBMVFC_MAD_FAILED:
4160 	default:
4161 		tgt_err(tgt, "Implicit Logout failed: rc=0x%02X\n", status);
4162 		break;
4163 	}
4164 
4165 	ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_plogi);
4166 	kref_put(&tgt->kref, ibmvfc_release_tgt);
4167 	wake_up(&vhost->work_wait_q);
4168 }
4169 
4170 /**
4171  * __ibmvfc_tgt_get_implicit_logout_evt - Allocate and init an event for implicit logout
4172  * @tgt:		ibmvfc target struct
4173  * @done:		Routine to call when the event is responded to
4174  *
4175  * Returns:
4176  *	Allocated and initialized ibmvfc_event struct
4177  **/
4178 static struct ibmvfc_event *__ibmvfc_tgt_get_implicit_logout_evt(struct ibmvfc_target *tgt,
4179 								 void (*done) (struct ibmvfc_event *))
4180 {
4181 	struct ibmvfc_implicit_logout *mad;
4182 	struct ibmvfc_host *vhost = tgt->vhost;
4183 	struct ibmvfc_event *evt;
4184 
4185 	kref_get(&tgt->kref);
4186 	evt = ibmvfc_get_event(&vhost->crq);
4187 	ibmvfc_init_event(evt, done, IBMVFC_MAD_FORMAT);
4188 	evt->tgt = tgt;
4189 	mad = &evt->iu.implicit_logout;
4190 	memset(mad, 0, sizeof(*mad));
4191 	mad->common.version = cpu_to_be32(1);
4192 	mad->common.opcode = cpu_to_be32(IBMVFC_IMPLICIT_LOGOUT);
4193 	mad->common.length = cpu_to_be16(sizeof(*mad));
4194 	mad->old_scsi_id = cpu_to_be64(tgt->scsi_id);
4195 	return evt;
4196 }
4197 
4198 /**
4199  * ibmvfc_tgt_implicit_logout - Initiate an Implicit Logout for specified target
4200  * @tgt:		ibmvfc target struct
4201  *
4202  **/
4203 static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
4204 {
4205 	struct ibmvfc_host *vhost = tgt->vhost;
4206 	struct ibmvfc_event *evt;
4207 
4208 	if (vhost->discovery_threads >= disc_threads)
4209 		return;
4210 
4211 	vhost->discovery_threads++;
4212 	evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt,
4213 						   ibmvfc_tgt_implicit_logout_done);
4214 
4215 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4216 	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4217 		vhost->discovery_threads--;
4218 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4219 		kref_put(&tgt->kref, ibmvfc_release_tgt);
4220 	} else
4221 		tgt_dbg(tgt, "Sent Implicit Logout\n");
4222 }
4223 
4224 /**
4225  * ibmvfc_tgt_implicit_logout_and_del_done - Completion handler for Implicit Logout MAD
4226  * @evt:	ibmvfc event struct
4227  *
4228  **/
4229 static void ibmvfc_tgt_implicit_logout_and_del_done(struct ibmvfc_event *evt)
4230 {
4231 	struct ibmvfc_target *tgt = evt->tgt;
4232 	struct ibmvfc_host *vhost = evt->vhost;
4233 	struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru;
4234 	u32 status = be16_to_cpu(mad->common.status);
4235 
4236 	vhost->discovery_threads--;
4237 	ibmvfc_free_event(evt);
4238 
4239 	/*
4240 	 * If our state is IBMVFC_HOST_OFFLINE, we could be unloading the
4241 	 * driver in which case we need to free up all the targets. If we are
4242 	 * not unloading, we will still go through a hard reset to get out of
4243 	 * offline state, so there is no need to track the old targets in that
4244 	 * case.
4245 	 */
4246 	if (status == IBMVFC_MAD_SUCCESS || vhost->state == IBMVFC_HOST_OFFLINE)
4247 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
4248 	else
4249 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT);
4250 
4251 	tgt_dbg(tgt, "Implicit Logout %s\n", (status == IBMVFC_MAD_SUCCESS) ? "succeeded" : "failed");
4252 	kref_put(&tgt->kref, ibmvfc_release_tgt);
4253 	wake_up(&vhost->work_wait_q);
4254 }
4255 
4256 /**
4257  * ibmvfc_tgt_implicit_logout_and_del - Initiate an Implicit Logout for specified target
4258  * @tgt:		ibmvfc target struct
4259  *
4260  **/
4261 static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *tgt)
4262 {
4263 	struct ibmvfc_host *vhost = tgt->vhost;
4264 	struct ibmvfc_event *evt;
4265 
4266 	if (!vhost->logged_in) {
4267 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
4268 		return;
4269 	}
4270 
4271 	if (vhost->discovery_threads >= disc_threads)
4272 		return;
4273 
4274 	vhost->discovery_threads++;
4275 	evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt,
4276 						   ibmvfc_tgt_implicit_logout_and_del_done);
4277 
4278 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT);
4279 	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4280 		vhost->discovery_threads--;
4281 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
4282 		kref_put(&tgt->kref, ibmvfc_release_tgt);
4283 	} else
4284 		tgt_dbg(tgt, "Sent Implicit Logout\n");
4285 }
4286 
4287 /**
4288  * ibmvfc_tgt_move_login_done - Completion handler for Move Login
4289  * @evt:	ibmvfc event struct
4290  *
4291  **/
4292 static void ibmvfc_tgt_move_login_done(struct ibmvfc_event *evt)
4293 {
4294 	struct ibmvfc_target *tgt = evt->tgt;
4295 	struct ibmvfc_host *vhost = evt->vhost;
4296 	struct ibmvfc_move_login *rsp = &evt->xfer_iu->move_login;
4297 	u32 status = be16_to_cpu(rsp->common.status);
4298 	int level = IBMVFC_DEFAULT_LOG_LEVEL;
4299 
4300 	vhost->discovery_threads--;
4301 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4302 	switch (status) {
4303 	case IBMVFC_MAD_SUCCESS:
4304 		tgt_dbg(tgt, "Move Login succeeded for new scsi_id: %llX\n", tgt->new_scsi_id);
4305 		tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name);
4306 		tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name);
4307 		tgt->scsi_id = tgt->new_scsi_id;
4308 		tgt->ids.port_id = tgt->scsi_id;
4309 		memcpy(&tgt->service_parms, &rsp->service_parms,
4310 		       sizeof(tgt->service_parms));
4311 		memcpy(&tgt->service_parms_change, &rsp->service_parms_change,
4312 		       sizeof(tgt->service_parms_change));
4313 		ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_prli);
4314 		break;
4315 	case IBMVFC_MAD_DRIVER_FAILED:
4316 		break;
4317 	case IBMVFC_MAD_CRQ_ERROR:
4318 		ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_move_login);
4319 		break;
4320 	case IBMVFC_MAD_FAILED:
4321 	default:
4322 		level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_move_login);
4323 
4324 		tgt_log(tgt, level,
4325 			"Move Login failed: new scsi_id: %llX, flags:%x, vios_flags:%x, rc=0x%02X\n",
4326 			tgt->new_scsi_id, be32_to_cpu(rsp->flags), be16_to_cpu(rsp->vios_flags),
4327 			status);
4328 		break;
4329 	}
4330 
4331 	kref_put(&tgt->kref, ibmvfc_release_tgt);
4332 	ibmvfc_free_event(evt);
4333 	wake_up(&vhost->work_wait_q);
4334 }
4335 
4336 
4337 /**
4338  * ibmvfc_tgt_move_login - Initiate a move login for specified target
4339  * @tgt:		ibmvfc target struct
4340  *
4341  **/
4342 static void ibmvfc_tgt_move_login(struct ibmvfc_target *tgt)
4343 {
4344 	struct ibmvfc_host *vhost = tgt->vhost;
4345 	struct ibmvfc_move_login *move;
4346 	struct ibmvfc_event *evt;
4347 
4348 	if (vhost->discovery_threads >= disc_threads)
4349 		return;
4350 
4351 	kref_get(&tgt->kref);
4352 	evt = ibmvfc_get_event(&vhost->crq);
4353 	vhost->discovery_threads++;
4354 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4355 	ibmvfc_init_event(evt, ibmvfc_tgt_move_login_done, IBMVFC_MAD_FORMAT);
4356 	evt->tgt = tgt;
4357 	move = &evt->iu.move_login;
4358 	memset(move, 0, sizeof(*move));
4359 	move->common.version = cpu_to_be32(1);
4360 	move->common.opcode = cpu_to_be32(IBMVFC_MOVE_LOGIN);
4361 	move->common.length = cpu_to_be16(sizeof(*move));
4362 
4363 	move->old_scsi_id = cpu_to_be64(tgt->scsi_id);
4364 	move->new_scsi_id = cpu_to_be64(tgt->new_scsi_id);
4365 	move->wwpn = cpu_to_be64(tgt->wwpn);
4366 	move->node_name = cpu_to_be64(tgt->ids.node_name);
4367 
4368 	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4369 		vhost->discovery_threads--;
4370 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
4371 		kref_put(&tgt->kref, ibmvfc_release_tgt);
4372 	} else
4373 		tgt_dbg(tgt, "Sent Move Login for new scsi_id: %llX\n", tgt->new_scsi_id);
4374 }
4375 
4376 /**
4377  * ibmvfc_adisc_needs_plogi - Does device need PLOGI?
4378  * @mad:	ibmvfc passthru mad struct
4379  * @tgt:	ibmvfc target struct
4380  *
4381  * Returns:
4382  *	1 if PLOGI needed / 0 if PLOGI not needed
4383  **/
4384 static int ibmvfc_adisc_needs_plogi(struct ibmvfc_passthru_mad *mad,
4385 				    struct ibmvfc_target *tgt)
4386 {
4387 	if (wwn_to_u64((u8 *)&mad->fc_iu.response[2]) != tgt->ids.port_name)
4388 		return 1;
4389 	if (wwn_to_u64((u8 *)&mad->fc_iu.response[4]) != tgt->ids.node_name)
4390 		return 1;
4391 	if (be32_to_cpu(mad->fc_iu.response[6]) != tgt->scsi_id)
4392 		return 1;
4393 	return 0;
4394 }
4395 
4396 /**
4397  * ibmvfc_tgt_adisc_done - Completion handler for ADISC
4398  * @evt:	ibmvfc event struct
4399  *
4400  **/
4401 static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
4402 {
4403 	struct ibmvfc_target *tgt = evt->tgt;
4404 	struct ibmvfc_host *vhost = evt->vhost;
4405 	struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru;
4406 	u32 status = be16_to_cpu(mad->common.status);
4407 	u8 fc_reason, fc_explain;
4408 
4409 	vhost->discovery_threads--;
4410 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4411 	del_timer(&tgt->timer);
4412 
4413 	switch (status) {
4414 	case IBMVFC_MAD_SUCCESS:
4415 		tgt_dbg(tgt, "ADISC succeeded\n");
4416 		if (ibmvfc_adisc_needs_plogi(mad, tgt))
4417 			ibmvfc_del_tgt(tgt);
4418 		break;
4419 	case IBMVFC_MAD_DRIVER_FAILED:
4420 		break;
4421 	case IBMVFC_MAD_FAILED:
4422 	default:
4423 		ibmvfc_del_tgt(tgt);
4424 		fc_reason = (be32_to_cpu(mad->fc_iu.response[1]) & 0x00ff0000) >> 16;
4425 		fc_explain = (be32_to_cpu(mad->fc_iu.response[1]) & 0x0000ff00) >> 8;
4426 		tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
4427 			 ibmvfc_get_cmd_error(be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error)),
4428 			 be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error),
4429 			 ibmvfc_get_fc_type(fc_reason), fc_reason,
4430 			 ibmvfc_get_ls_explain(fc_explain), fc_explain, status);
4431 		break;
4432 	}
4433 
4434 	kref_put(&tgt->kref, ibmvfc_release_tgt);
4435 	ibmvfc_free_event(evt);
4436 	wake_up(&vhost->work_wait_q);
4437 }
4438 
4439 /**
4440  * ibmvfc_init_passthru - Initialize an event struct for FC passthru
4441  * @evt:		ibmvfc event struct
4442  *
4443  **/
4444 static void ibmvfc_init_passthru(struct ibmvfc_event *evt)
4445 {
4446 	struct ibmvfc_passthru_mad *mad = &evt->iu.passthru;
4447 
4448 	memset(mad, 0, sizeof(*mad));
4449 	mad->common.version = cpu_to_be32(1);
4450 	mad->common.opcode = cpu_to_be32(IBMVFC_PASSTHRU);
4451 	mad->common.length = cpu_to_be16(sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu));
4452 	mad->cmd_ioba.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
4453 		offsetof(struct ibmvfc_passthru_mad, iu));
4454 	mad->cmd_ioba.len = cpu_to_be32(sizeof(mad->iu));
4455 	mad->iu.cmd_len = cpu_to_be32(sizeof(mad->fc_iu.payload));
4456 	mad->iu.rsp_len = cpu_to_be32(sizeof(mad->fc_iu.response));
4457 	mad->iu.cmd.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
4458 		offsetof(struct ibmvfc_passthru_mad, fc_iu) +
4459 		offsetof(struct ibmvfc_passthru_fc_iu, payload));
4460 	mad->iu.cmd.len = cpu_to_be32(sizeof(mad->fc_iu.payload));
4461 	mad->iu.rsp.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
4462 		offsetof(struct ibmvfc_passthru_mad, fc_iu) +
4463 		offsetof(struct ibmvfc_passthru_fc_iu, response));
4464 	mad->iu.rsp.len = cpu_to_be32(sizeof(mad->fc_iu.response));
4465 }
4466 
4467 /**
4468  * ibmvfc_tgt_adisc_cancel_done - Completion handler when cancelling an ADISC
4469  * @evt:		ibmvfc event struct
4470  *
4471  * Just cleanup this event struct. Everything else is handled by
4472  * the ADISC completion handler. If the ADISC never actually comes
4473  * back, we still have the timer running on the ADISC event struct
4474  * which will fire and cause the CRQ to get reset.
4475  *
4476  **/
4477 static void ibmvfc_tgt_adisc_cancel_done(struct ibmvfc_event *evt)
4478 {
4479 	struct ibmvfc_host *vhost = evt->vhost;
4480 	struct ibmvfc_target *tgt = evt->tgt;
4481 
4482 	tgt_dbg(tgt, "ADISC cancel complete\n");
4483 	vhost->abort_threads--;
4484 	ibmvfc_free_event(evt);
4485 	kref_put(&tgt->kref, ibmvfc_release_tgt);
4486 	wake_up(&vhost->work_wait_q);
4487 }
4488 
4489 /**
4490  * ibmvfc_adisc_timeout - Handle an ADISC timeout
4491  * @t:		ibmvfc target struct
4492  *
4493  * If an ADISC times out, send a cancel. If the cancel times
4494  * out, reset the CRQ. When the ADISC comes back as cancelled,
4495  * log back into the target.
4496  **/
4497 static void ibmvfc_adisc_timeout(struct timer_list *t)
4498 {
4499 	struct ibmvfc_target *tgt = from_timer(tgt, t, timer);
4500 	struct ibmvfc_host *vhost = tgt->vhost;
4501 	struct ibmvfc_event *evt;
4502 	struct ibmvfc_tmf *tmf;
4503 	unsigned long flags;
4504 	int rc;
4505 
4506 	tgt_dbg(tgt, "ADISC timeout\n");
4507 	spin_lock_irqsave(vhost->host->host_lock, flags);
4508 	if (vhost->abort_threads >= disc_threads ||
4509 	    tgt->action != IBMVFC_TGT_ACTION_INIT_WAIT ||
4510 	    vhost->state != IBMVFC_INITIALIZING ||
4511 	    vhost->action != IBMVFC_HOST_ACTION_QUERY_TGTS) {
4512 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
4513 		return;
4514 	}
4515 
4516 	vhost->abort_threads++;
4517 	kref_get(&tgt->kref);
4518 	evt = ibmvfc_get_event(&vhost->crq);
4519 	ibmvfc_init_event(evt, ibmvfc_tgt_adisc_cancel_done, IBMVFC_MAD_FORMAT);
4520 
4521 	evt->tgt = tgt;
4522 	tmf = &evt->iu.tmf;
4523 	memset(tmf, 0, sizeof(*tmf));
4524 	if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
4525 		tmf->common.version = cpu_to_be32(2);
4526 		tmf->target_wwpn = cpu_to_be64(tgt->wwpn);
4527 	} else {
4528 		tmf->common.version = cpu_to_be32(1);
4529 	}
4530 	tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
4531 	tmf->common.length = cpu_to_be16(sizeof(*tmf));
4532 	tmf->scsi_id = cpu_to_be64(tgt->scsi_id);
4533 	tmf->cancel_key = cpu_to_be32(tgt->cancel_key);
4534 
4535 	rc = ibmvfc_send_event(evt, vhost, default_timeout);
4536 
4537 	if (rc) {
4538 		tgt_err(tgt, "Failed to send cancel event for ADISC. rc=%d\n", rc);
4539 		vhost->abort_threads--;
4540 		kref_put(&tgt->kref, ibmvfc_release_tgt);
4541 		__ibmvfc_reset_host(vhost);
4542 	} else
4543 		tgt_dbg(tgt, "Attempting to cancel ADISC\n");
4544 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
4545 }
4546 
4547 /**
4548  * ibmvfc_tgt_adisc - Initiate an ADISC for specified target
4549  * @tgt:		ibmvfc target struct
4550  *
4551  * When sending an ADISC we end up with two timers running. The
4552  * first timer is the timer in the ibmvfc target struct. If this
4553  * fires, we send a cancel to the target. The second timer is the
4554  * timer on the ibmvfc event for the ADISC, which is longer. If that
4555  * fires, it means the ADISC timed out and our attempt to cancel it
4556  * also failed, so we need to reset the CRQ.
4557  **/
4558 static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt)
4559 {
4560 	struct ibmvfc_passthru_mad *mad;
4561 	struct ibmvfc_host *vhost = tgt->vhost;
4562 	struct ibmvfc_event *evt;
4563 
4564 	if (vhost->discovery_threads >= disc_threads)
4565 		return;
4566 
4567 	kref_get(&tgt->kref);
4568 	evt = ibmvfc_get_event(&vhost->crq);
4569 	vhost->discovery_threads++;
4570 	ibmvfc_init_event(evt, ibmvfc_tgt_adisc_done, IBMVFC_MAD_FORMAT);
4571 	evt->tgt = tgt;
4572 
4573 	ibmvfc_init_passthru(evt);
4574 	mad = &evt->iu.passthru;
4575 	mad->iu.flags = cpu_to_be32(IBMVFC_FC_ELS);
4576 	mad->iu.scsi_id = cpu_to_be64(tgt->scsi_id);
4577 	mad->iu.cancel_key = cpu_to_be32(tgt->cancel_key);
4578 
4579 	mad->fc_iu.payload[0] = cpu_to_be32(IBMVFC_ADISC);
4580 	memcpy(&mad->fc_iu.payload[2], &vhost->login_buf->resp.port_name,
4581 	       sizeof(vhost->login_buf->resp.port_name));
4582 	memcpy(&mad->fc_iu.payload[4], &vhost->login_buf->resp.node_name,
4583 	       sizeof(vhost->login_buf->resp.node_name));
4584 	mad->fc_iu.payload[6] = cpu_to_be32(be64_to_cpu(vhost->login_buf->resp.scsi_id) & 0x00ffffff);
4585 
4586 	if (timer_pending(&tgt->timer))
4587 		mod_timer(&tgt->timer, jiffies + (IBMVFC_ADISC_TIMEOUT * HZ));
4588 	else {
4589 		tgt->timer.expires = jiffies + (IBMVFC_ADISC_TIMEOUT * HZ);
4590 		add_timer(&tgt->timer);
4591 	}
4592 
4593 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4594 	if (ibmvfc_send_event(evt, vhost, IBMVFC_ADISC_PLUS_CANCEL_TIMEOUT)) {
4595 		vhost->discovery_threads--;
4596 		del_timer(&tgt->timer);
4597 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4598 		kref_put(&tgt->kref, ibmvfc_release_tgt);
4599 	} else
4600 		tgt_dbg(tgt, "Sent ADISC\n");
4601 }
4602 
4603 /**
4604  * ibmvfc_tgt_query_target_done - Completion handler for Query Target MAD
4605  * @evt:	ibmvfc event struct
4606  *
4607  **/
4608 static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
4609 {
4610 	struct ibmvfc_target *tgt = evt->tgt;
4611 	struct ibmvfc_host *vhost = evt->vhost;
4612 	struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt;
4613 	u32 status = be16_to_cpu(rsp->common.status);
4614 	int level = IBMVFC_DEFAULT_LOG_LEVEL;
4615 
4616 	vhost->discovery_threads--;
4617 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4618 	switch (status) {
4619 	case IBMVFC_MAD_SUCCESS:
4620 		tgt_dbg(tgt, "Query Target succeeded\n");
4621 		if (be64_to_cpu(rsp->scsi_id) != tgt->scsi_id)
4622 			ibmvfc_del_tgt(tgt);
4623 		else
4624 			ibmvfc_init_tgt(tgt, ibmvfc_tgt_adisc);
4625 		break;
4626 	case IBMVFC_MAD_DRIVER_FAILED:
4627 		break;
4628 	case IBMVFC_MAD_CRQ_ERROR:
4629 		ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
4630 		break;
4631 	case IBMVFC_MAD_FAILED:
4632 	default:
4633 		if ((be16_to_cpu(rsp->status) & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED &&
4634 		    be16_to_cpu(rsp->error) == IBMVFC_UNABLE_TO_PERFORM_REQ &&
4635 		    be16_to_cpu(rsp->fc_explain) == IBMVFC_PORT_NAME_NOT_REG)
4636 			ibmvfc_del_tgt(tgt);
4637 		else if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
4638 			level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
4639 		else
4640 			ibmvfc_del_tgt(tgt);
4641 
4642 		tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
4643 			ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
4644 			be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
4645 			ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
4646 			ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain),
4647 			status);
4648 		break;
4649 	}
4650 
4651 	kref_put(&tgt->kref, ibmvfc_release_tgt);
4652 	ibmvfc_free_event(evt);
4653 	wake_up(&vhost->work_wait_q);
4654 }
4655 
4656 /**
4657  * ibmvfc_tgt_query_target - Initiate a Query Target for specified target
4658  * @tgt:	ibmvfc target struct
4659  *
4660  **/
4661 static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt)
4662 {
4663 	struct ibmvfc_query_tgt *query_tgt;
4664 	struct ibmvfc_host *vhost = tgt->vhost;
4665 	struct ibmvfc_event *evt;
4666 
4667 	if (vhost->discovery_threads >= disc_threads)
4668 		return;
4669 
4670 	kref_get(&tgt->kref);
4671 	evt = ibmvfc_get_event(&vhost->crq);
4672 	vhost->discovery_threads++;
4673 	evt->tgt = tgt;
4674 	ibmvfc_init_event(evt, ibmvfc_tgt_query_target_done, IBMVFC_MAD_FORMAT);
4675 	query_tgt = &evt->iu.query_tgt;
4676 	memset(query_tgt, 0, sizeof(*query_tgt));
4677 	query_tgt->common.version = cpu_to_be32(1);
4678 	query_tgt->common.opcode = cpu_to_be32(IBMVFC_QUERY_TARGET);
4679 	query_tgt->common.length = cpu_to_be16(sizeof(*query_tgt));
4680 	query_tgt->wwpn = cpu_to_be64(tgt->ids.port_name);
4681 
4682 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4683 	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4684 		vhost->discovery_threads--;
4685 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4686 		kref_put(&tgt->kref, ibmvfc_release_tgt);
4687 	} else
4688 		tgt_dbg(tgt, "Sent Query Target\n");
4689 }
4690 
4691 /**
4692  * ibmvfc_alloc_target - Allocate and initialize an ibmvfc target
4693  * @vhost:		ibmvfc host struct
4694  * @target:		Holds SCSI ID to allocate target forand the WWPN
4695  *
4696  * Returns:
4697  *	0 on success / other on failure
4698  **/
4699 static int ibmvfc_alloc_target(struct ibmvfc_host *vhost,
4700 			       struct ibmvfc_discover_targets_entry *target)
4701 {
4702 	struct ibmvfc_target *stgt = NULL;
4703 	struct ibmvfc_target *wtgt = NULL;
4704 	struct ibmvfc_target *tgt;
4705 	unsigned long flags;
4706 	u64 scsi_id = be32_to_cpu(target->scsi_id) & IBMVFC_DISC_TGT_SCSI_ID_MASK;
4707 	u64 wwpn = be64_to_cpu(target->wwpn);
4708 
4709 	/* Look to see if we already have a target allocated for this SCSI ID or WWPN */
4710 	spin_lock_irqsave(vhost->host->host_lock, flags);
4711 	list_for_each_entry(tgt, &vhost->targets, queue) {
4712 		if (tgt->wwpn == wwpn) {
4713 			wtgt = tgt;
4714 			break;
4715 		}
4716 	}
4717 
4718 	list_for_each_entry(tgt, &vhost->targets, queue) {
4719 		if (tgt->scsi_id == scsi_id) {
4720 			stgt = tgt;
4721 			break;
4722 		}
4723 	}
4724 
4725 	if (wtgt && !stgt) {
4726 		/*
4727 		 * A WWPN target has moved and we still are tracking the old
4728 		 * SCSI ID.  The only way we should be able to get here is if
4729 		 * we attempted to send an implicit logout for the old SCSI ID
4730 		 * and it failed for some reason, such as there being I/O
4731 		 * pending to the target. In this case, we will have already
4732 		 * deleted the rport from the FC transport so we do a move
4733 		 * login, which works even with I/O pending, however, if
4734 		 * there is still I/O pending, it will stay outstanding, so
4735 		 * we only do this if fast fail is disabled for the rport,
4736 		 * otherwise we let terminate_rport_io clean up the port
4737 		 * before we login at the new location.
4738 		 */
4739 		if (wtgt->action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
4740 			if (wtgt->move_login) {
4741 				/*
4742 				 * Do a move login here. The old target is no longer
4743 				 * known to the transport layer We don't use the
4744 				 * normal ibmvfc_set_tgt_action to set this, as we
4745 				 * don't normally want to allow this state change.
4746 				 */
4747 				wtgt->new_scsi_id = scsi_id;
4748 				wtgt->action = IBMVFC_TGT_ACTION_INIT;
4749 				wtgt->init_retries = 0;
4750 				ibmvfc_init_tgt(wtgt, ibmvfc_tgt_move_login);
4751 			}
4752 			goto unlock_out;
4753 		} else {
4754 			tgt_err(wtgt, "Unexpected target state: %d, %p\n",
4755 				wtgt->action, wtgt->rport);
4756 		}
4757 	} else if (stgt) {
4758 		if (tgt->need_login)
4759 			ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
4760 		goto unlock_out;
4761 	}
4762 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
4763 
4764 	tgt = mempool_alloc(vhost->tgt_pool, GFP_NOIO);
4765 	memset(tgt, 0, sizeof(*tgt));
4766 	tgt->scsi_id = scsi_id;
4767 	tgt->wwpn = wwpn;
4768 	tgt->vhost = vhost;
4769 	tgt->need_login = 1;
4770 	timer_setup(&tgt->timer, ibmvfc_adisc_timeout, 0);
4771 	kref_init(&tgt->kref);
4772 	ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
4773 	spin_lock_irqsave(vhost->host->host_lock, flags);
4774 	tgt->cancel_key = vhost->task_set++;
4775 	list_add_tail(&tgt->queue, &vhost->targets);
4776 
4777 unlock_out:
4778 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
4779 	return 0;
4780 }
4781 
4782 /**
4783  * ibmvfc_alloc_targets - Allocate and initialize ibmvfc targets
4784  * @vhost:		ibmvfc host struct
4785  *
4786  * Returns:
4787  *	0 on success / other on failure
4788  **/
4789 static int ibmvfc_alloc_targets(struct ibmvfc_host *vhost)
4790 {
4791 	int i, rc;
4792 
4793 	for (i = 0, rc = 0; !rc && i < vhost->num_targets; i++)
4794 		rc = ibmvfc_alloc_target(vhost, &vhost->disc_buf[i]);
4795 
4796 	return rc;
4797 }
4798 
4799 /**
4800  * ibmvfc_discover_targets_done - Completion handler for discover targets MAD
4801  * @evt:	ibmvfc event struct
4802  *
4803  **/
4804 static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
4805 {
4806 	struct ibmvfc_host *vhost = evt->vhost;
4807 	struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets;
4808 	u32 mad_status = be16_to_cpu(rsp->common.status);
4809 	int level = IBMVFC_DEFAULT_LOG_LEVEL;
4810 
4811 	switch (mad_status) {
4812 	case IBMVFC_MAD_SUCCESS:
4813 		ibmvfc_dbg(vhost, "Discover Targets succeeded\n");
4814 		vhost->num_targets = be32_to_cpu(rsp->num_written);
4815 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS);
4816 		break;
4817 	case IBMVFC_MAD_FAILED:
4818 		level += ibmvfc_retry_host_init(vhost);
4819 		ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n",
4820 			   ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
4821 			   be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
4822 		break;
4823 	case IBMVFC_MAD_DRIVER_FAILED:
4824 		break;
4825 	default:
4826 		dev_err(vhost->dev, "Invalid Discover Targets response: 0x%x\n", mad_status);
4827 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4828 		break;
4829 	}
4830 
4831 	ibmvfc_free_event(evt);
4832 	wake_up(&vhost->work_wait_q);
4833 }
4834 
4835 /**
4836  * ibmvfc_discover_targets - Send Discover Targets MAD
4837  * @vhost:	ibmvfc host struct
4838  *
4839  **/
4840 static void ibmvfc_discover_targets(struct ibmvfc_host *vhost)
4841 {
4842 	struct ibmvfc_discover_targets *mad;
4843 	struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
4844 
4845 	ibmvfc_init_event(evt, ibmvfc_discover_targets_done, IBMVFC_MAD_FORMAT);
4846 	mad = &evt->iu.discover_targets;
4847 	memset(mad, 0, sizeof(*mad));
4848 	mad->common.version = cpu_to_be32(1);
4849 	mad->common.opcode = cpu_to_be32(IBMVFC_DISC_TARGETS);
4850 	mad->common.length = cpu_to_be16(sizeof(*mad));
4851 	mad->bufflen = cpu_to_be32(vhost->disc_buf_sz);
4852 	mad->buffer.va = cpu_to_be64(vhost->disc_buf_dma);
4853 	mad->buffer.len = cpu_to_be32(vhost->disc_buf_sz);
4854 	mad->flags = cpu_to_be32(IBMVFC_DISC_TGT_PORT_ID_WWPN_LIST);
4855 	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
4856 
4857 	if (!ibmvfc_send_event(evt, vhost, default_timeout))
4858 		ibmvfc_dbg(vhost, "Sent discover targets\n");
4859 	else
4860 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4861 }
4862 
4863 static void ibmvfc_channel_setup_done(struct ibmvfc_event *evt)
4864 {
4865 	struct ibmvfc_host *vhost = evt->vhost;
4866 	struct ibmvfc_channel_setup *setup = vhost->channel_setup_buf;
4867 	struct ibmvfc_scsi_channels *scrqs = &vhost->scsi_scrqs;
4868 	u32 mad_status = be16_to_cpu(evt->xfer_iu->channel_setup.common.status);
4869 	int level = IBMVFC_DEFAULT_LOG_LEVEL;
4870 	int flags, active_queues, i;
4871 
4872 	ibmvfc_free_event(evt);
4873 
4874 	switch (mad_status) {
4875 	case IBMVFC_MAD_SUCCESS:
4876 		ibmvfc_dbg(vhost, "Channel Setup succeeded\n");
4877 		flags = be32_to_cpu(setup->flags);
4878 		vhost->do_enquiry = 0;
4879 		active_queues = be32_to_cpu(setup->num_scsi_subq_channels);
4880 		scrqs->active_queues = active_queues;
4881 
4882 		if (flags & IBMVFC_CHANNELS_CANCELED) {
4883 			ibmvfc_dbg(vhost, "Channels Canceled\n");
4884 			vhost->using_channels = 0;
4885 		} else {
4886 			if (active_queues)
4887 				vhost->using_channels = 1;
4888 			for (i = 0; i < active_queues; i++)
4889 				scrqs->scrqs[i].vios_cookie =
4890 					be64_to_cpu(setup->channel_handles[i]);
4891 
4892 			ibmvfc_dbg(vhost, "Using %u channels\n",
4893 				   vhost->scsi_scrqs.active_queues);
4894 		}
4895 		break;
4896 	case IBMVFC_MAD_FAILED:
4897 		level += ibmvfc_retry_host_init(vhost);
4898 		ibmvfc_log(vhost, level, "Channel Setup failed\n");
4899 		fallthrough;
4900 	case IBMVFC_MAD_DRIVER_FAILED:
4901 		return;
4902 	default:
4903 		dev_err(vhost->dev, "Invalid Channel Setup response: 0x%x\n",
4904 			mad_status);
4905 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4906 		return;
4907 	}
4908 
4909 	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
4910 	wake_up(&vhost->work_wait_q);
4911 }
4912 
4913 static void ibmvfc_channel_setup(struct ibmvfc_host *vhost)
4914 {
4915 	struct ibmvfc_channel_setup_mad *mad;
4916 	struct ibmvfc_channel_setup *setup_buf = vhost->channel_setup_buf;
4917 	struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
4918 	struct ibmvfc_scsi_channels *scrqs = &vhost->scsi_scrqs;
4919 	unsigned int num_channels =
4920 		min(vhost->client_scsi_channels, vhost->max_vios_scsi_channels);
4921 	int i;
4922 
4923 	memset(setup_buf, 0, sizeof(*setup_buf));
4924 	if (num_channels == 0)
4925 		setup_buf->flags = cpu_to_be32(IBMVFC_CANCEL_CHANNELS);
4926 	else {
4927 		setup_buf->num_scsi_subq_channels = cpu_to_be32(num_channels);
4928 		for (i = 0; i < num_channels; i++)
4929 			setup_buf->channel_handles[i] = cpu_to_be64(scrqs->scrqs[i].cookie);
4930 	}
4931 
4932 	ibmvfc_init_event(evt, ibmvfc_channel_setup_done, IBMVFC_MAD_FORMAT);
4933 	mad = &evt->iu.channel_setup;
4934 	memset(mad, 0, sizeof(*mad));
4935 	mad->common.version = cpu_to_be32(1);
4936 	mad->common.opcode = cpu_to_be32(IBMVFC_CHANNEL_SETUP);
4937 	mad->common.length = cpu_to_be16(sizeof(*mad));
4938 	mad->buffer.va = cpu_to_be64(vhost->channel_setup_dma);
4939 	mad->buffer.len = cpu_to_be32(sizeof(*vhost->channel_setup_buf));
4940 
4941 	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
4942 
4943 	if (!ibmvfc_send_event(evt, vhost, default_timeout))
4944 		ibmvfc_dbg(vhost, "Sent channel setup\n");
4945 	else
4946 		ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
4947 }
4948 
4949 static void ibmvfc_channel_enquiry_done(struct ibmvfc_event *evt)
4950 {
4951 	struct ibmvfc_host *vhost = evt->vhost;
4952 	struct ibmvfc_channel_enquiry *rsp = &evt->xfer_iu->channel_enquiry;
4953 	u32 mad_status = be16_to_cpu(rsp->common.status);
4954 	int level = IBMVFC_DEFAULT_LOG_LEVEL;
4955 
4956 	switch (mad_status) {
4957 	case IBMVFC_MAD_SUCCESS:
4958 		ibmvfc_dbg(vhost, "Channel Enquiry succeeded\n");
4959 		vhost->max_vios_scsi_channels = be32_to_cpu(rsp->num_scsi_subq_channels);
4960 		ibmvfc_free_event(evt);
4961 		break;
4962 	case IBMVFC_MAD_FAILED:
4963 		level += ibmvfc_retry_host_init(vhost);
4964 		ibmvfc_log(vhost, level, "Channel Enquiry failed\n");
4965 		fallthrough;
4966 	case IBMVFC_MAD_DRIVER_FAILED:
4967 		ibmvfc_free_event(evt);
4968 		return;
4969 	default:
4970 		dev_err(vhost->dev, "Invalid Channel Enquiry response: 0x%x\n",
4971 			mad_status);
4972 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4973 		ibmvfc_free_event(evt);
4974 		return;
4975 	}
4976 
4977 	ibmvfc_channel_setup(vhost);
4978 }
4979 
4980 static void ibmvfc_channel_enquiry(struct ibmvfc_host *vhost)
4981 {
4982 	struct ibmvfc_channel_enquiry *mad;
4983 	struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
4984 
4985 	ibmvfc_init_event(evt, ibmvfc_channel_enquiry_done, IBMVFC_MAD_FORMAT);
4986 	mad = &evt->iu.channel_enquiry;
4987 	memset(mad, 0, sizeof(*mad));
4988 	mad->common.version = cpu_to_be32(1);
4989 	mad->common.opcode = cpu_to_be32(IBMVFC_CHANNEL_ENQUIRY);
4990 	mad->common.length = cpu_to_be16(sizeof(*mad));
4991 
4992 	if (mig_channels_only)
4993 		mad->flags |= cpu_to_be32(IBMVFC_NO_CHANNELS_TO_CRQ_SUPPORT);
4994 	if (mig_no_less_channels)
4995 		mad->flags |= cpu_to_be32(IBMVFC_NO_N_TO_M_CHANNELS_SUPPORT);
4996 
4997 	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
4998 
4999 	if (!ibmvfc_send_event(evt, vhost, default_timeout))
5000 		ibmvfc_dbg(vhost, "Send channel enquiry\n");
5001 	else
5002 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5003 }
5004 
5005 /**
5006  * ibmvfc_npiv_login_done - Completion handler for NPIV Login
5007  * @evt:	ibmvfc event struct
5008  *
5009  **/
5010 static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
5011 {
5012 	struct ibmvfc_host *vhost = evt->vhost;
5013 	u32 mad_status = be16_to_cpu(evt->xfer_iu->npiv_login.common.status);
5014 	struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp;
5015 	unsigned int npiv_max_sectors;
5016 	int level = IBMVFC_DEFAULT_LOG_LEVEL;
5017 
5018 	switch (mad_status) {
5019 	case IBMVFC_MAD_SUCCESS:
5020 		ibmvfc_free_event(evt);
5021 		break;
5022 	case IBMVFC_MAD_FAILED:
5023 		if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
5024 			level += ibmvfc_retry_host_init(vhost);
5025 		else
5026 			ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5027 		ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n",
5028 			   ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
5029 						be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
5030 		ibmvfc_free_event(evt);
5031 		return;
5032 	case IBMVFC_MAD_CRQ_ERROR:
5033 		ibmvfc_retry_host_init(vhost);
5034 		fallthrough;
5035 	case IBMVFC_MAD_DRIVER_FAILED:
5036 		ibmvfc_free_event(evt);
5037 		return;
5038 	default:
5039 		dev_err(vhost->dev, "Invalid NPIV Login response: 0x%x\n", mad_status);
5040 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5041 		ibmvfc_free_event(evt);
5042 		return;
5043 	}
5044 
5045 	vhost->client_migrated = 0;
5046 
5047 	if (!(be32_to_cpu(rsp->flags) & IBMVFC_NATIVE_FC)) {
5048 		dev_err(vhost->dev, "Virtual adapter does not support FC. %x\n",
5049 			rsp->flags);
5050 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5051 		wake_up(&vhost->work_wait_q);
5052 		return;
5053 	}
5054 
5055 	if (be32_to_cpu(rsp->max_cmds) <= IBMVFC_NUM_INTERNAL_REQ) {
5056 		dev_err(vhost->dev, "Virtual adapter supported queue depth too small: %d\n",
5057 			rsp->max_cmds);
5058 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5059 		wake_up(&vhost->work_wait_q);
5060 		return;
5061 	}
5062 
5063 	vhost->logged_in = 1;
5064 	npiv_max_sectors = min((uint)(be64_to_cpu(rsp->max_dma_len) >> 9), IBMVFC_MAX_SECTORS);
5065 	dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n",
5066 		 rsp->partition_name, rsp->device_name, rsp->port_loc_code,
5067 		 rsp->drc_name, npiv_max_sectors);
5068 
5069 	fc_host_fabric_name(vhost->host) = be64_to_cpu(rsp->node_name);
5070 	fc_host_node_name(vhost->host) = be64_to_cpu(rsp->node_name);
5071 	fc_host_port_name(vhost->host) = be64_to_cpu(rsp->port_name);
5072 	fc_host_port_id(vhost->host) = be64_to_cpu(rsp->scsi_id);
5073 	fc_host_port_type(vhost->host) = FC_PORTTYPE_NPIV;
5074 	fc_host_supported_classes(vhost->host) = 0;
5075 	if (be32_to_cpu(rsp->service_parms.class1_parms[0]) & 0x80000000)
5076 		fc_host_supported_classes(vhost->host) |= FC_COS_CLASS1;
5077 	if (be32_to_cpu(rsp->service_parms.class2_parms[0]) & 0x80000000)
5078 		fc_host_supported_classes(vhost->host) |= FC_COS_CLASS2;
5079 	if (be32_to_cpu(rsp->service_parms.class3_parms[0]) & 0x80000000)
5080 		fc_host_supported_classes(vhost->host) |= FC_COS_CLASS3;
5081 	fc_host_maxframe_size(vhost->host) =
5082 		be16_to_cpu(rsp->service_parms.common.bb_rcv_sz) & 0x0fff;
5083 
5084 	vhost->host->can_queue = be32_to_cpu(rsp->max_cmds) - IBMVFC_NUM_INTERNAL_REQ;
5085 	vhost->host->max_sectors = npiv_max_sectors;
5086 
5087 	if (ibmvfc_check_caps(vhost, IBMVFC_CAN_SUPPORT_CHANNELS) && vhost->do_enquiry) {
5088 		ibmvfc_channel_enquiry(vhost);
5089 	} else {
5090 		vhost->do_enquiry = 0;
5091 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
5092 		wake_up(&vhost->work_wait_q);
5093 	}
5094 }
5095 
5096 /**
5097  * ibmvfc_npiv_login - Sends NPIV login
5098  * @vhost:	ibmvfc host struct
5099  *
5100  **/
5101 static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
5102 {
5103 	struct ibmvfc_npiv_login_mad *mad;
5104 	struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
5105 
5106 	ibmvfc_gather_partition_info(vhost);
5107 	ibmvfc_set_login_info(vhost);
5108 	ibmvfc_init_event(evt, ibmvfc_npiv_login_done, IBMVFC_MAD_FORMAT);
5109 
5110 	memcpy(vhost->login_buf, &vhost->login_info, sizeof(vhost->login_info));
5111 	mad = &evt->iu.npiv_login;
5112 	memset(mad, 0, sizeof(struct ibmvfc_npiv_login_mad));
5113 	mad->common.version = cpu_to_be32(1);
5114 	mad->common.opcode = cpu_to_be32(IBMVFC_NPIV_LOGIN);
5115 	mad->common.length = cpu_to_be16(sizeof(struct ibmvfc_npiv_login_mad));
5116 	mad->buffer.va = cpu_to_be64(vhost->login_buf_dma);
5117 	mad->buffer.len = cpu_to_be32(sizeof(*vhost->login_buf));
5118 
5119 	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
5120 
5121 	if (!ibmvfc_send_event(evt, vhost, default_timeout))
5122 		ibmvfc_dbg(vhost, "Sent NPIV login\n");
5123 	else
5124 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5125 }
5126 
5127 /**
5128  * ibmvfc_npiv_logout_done - Completion handler for NPIV Logout
5129  * @evt:		ibmvfc event struct
5130  *
5131  **/
5132 static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt)
5133 {
5134 	struct ibmvfc_host *vhost = evt->vhost;
5135 	u32 mad_status = be16_to_cpu(evt->xfer_iu->npiv_logout.common.status);
5136 
5137 	ibmvfc_free_event(evt);
5138 
5139 	switch (mad_status) {
5140 	case IBMVFC_MAD_SUCCESS:
5141 		if (list_empty(&vhost->crq.sent) &&
5142 		    vhost->action == IBMVFC_HOST_ACTION_LOGO_WAIT) {
5143 			ibmvfc_init_host(vhost);
5144 			return;
5145 		}
5146 		break;
5147 	case IBMVFC_MAD_FAILED:
5148 	case IBMVFC_MAD_NOT_SUPPORTED:
5149 	case IBMVFC_MAD_CRQ_ERROR:
5150 	case IBMVFC_MAD_DRIVER_FAILED:
5151 	default:
5152 		ibmvfc_dbg(vhost, "NPIV Logout failed. 0x%X\n", mad_status);
5153 		break;
5154 	}
5155 
5156 	ibmvfc_hard_reset_host(vhost);
5157 }
5158 
5159 /**
5160  * ibmvfc_npiv_logout - Issue an NPIV Logout
5161  * @vhost:		ibmvfc host struct
5162  *
5163  **/
5164 static void ibmvfc_npiv_logout(struct ibmvfc_host *vhost)
5165 {
5166 	struct ibmvfc_npiv_logout_mad *mad;
5167 	struct ibmvfc_event *evt;
5168 
5169 	evt = ibmvfc_get_event(&vhost->crq);
5170 	ibmvfc_init_event(evt, ibmvfc_npiv_logout_done, IBMVFC_MAD_FORMAT);
5171 
5172 	mad = &evt->iu.npiv_logout;
5173 	memset(mad, 0, sizeof(*mad));
5174 	mad->common.version = cpu_to_be32(1);
5175 	mad->common.opcode = cpu_to_be32(IBMVFC_NPIV_LOGOUT);
5176 	mad->common.length = cpu_to_be16(sizeof(struct ibmvfc_npiv_logout_mad));
5177 
5178 	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO_WAIT);
5179 
5180 	if (!ibmvfc_send_event(evt, vhost, default_timeout))
5181 		ibmvfc_dbg(vhost, "Sent NPIV logout\n");
5182 	else
5183 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5184 }
5185 
5186 /**
5187  * ibmvfc_dev_init_to_do - Is there target initialization work to do?
5188  * @vhost:		ibmvfc host struct
5189  *
5190  * Returns:
5191  *	1 if work to do / 0 if not
5192  **/
5193 static int ibmvfc_dev_init_to_do(struct ibmvfc_host *vhost)
5194 {
5195 	struct ibmvfc_target *tgt;
5196 
5197 	list_for_each_entry(tgt, &vhost->targets, queue) {
5198 		if (tgt->action == IBMVFC_TGT_ACTION_INIT ||
5199 		    tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
5200 			return 1;
5201 	}
5202 
5203 	return 0;
5204 }
5205 
5206 /**
5207  * ibmvfc_dev_logo_to_do - Is there target logout work to do?
5208  * @vhost:		ibmvfc host struct
5209  *
5210  * Returns:
5211  *	1 if work to do / 0 if not
5212  **/
5213 static int ibmvfc_dev_logo_to_do(struct ibmvfc_host *vhost)
5214 {
5215 	struct ibmvfc_target *tgt;
5216 
5217 	list_for_each_entry(tgt, &vhost->targets, queue) {
5218 		if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT ||
5219 		    tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT)
5220 			return 1;
5221 	}
5222 	return 0;
5223 }
5224 
5225 /**
5226  * __ibmvfc_work_to_do - Is there task level work to do? (no locking)
5227  * @vhost:		ibmvfc host struct
5228  *
5229  * Returns:
5230  *	1 if work to do / 0 if not
5231  **/
5232 static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
5233 {
5234 	struct ibmvfc_target *tgt;
5235 
5236 	if (kthread_should_stop())
5237 		return 1;
5238 	switch (vhost->action) {
5239 	case IBMVFC_HOST_ACTION_NONE:
5240 	case IBMVFC_HOST_ACTION_INIT_WAIT:
5241 	case IBMVFC_HOST_ACTION_LOGO_WAIT:
5242 		return 0;
5243 	case IBMVFC_HOST_ACTION_TGT_INIT:
5244 	case IBMVFC_HOST_ACTION_QUERY_TGTS:
5245 		if (vhost->discovery_threads == disc_threads)
5246 			return 0;
5247 		list_for_each_entry(tgt, &vhost->targets, queue)
5248 			if (tgt->action == IBMVFC_TGT_ACTION_INIT)
5249 				return 1;
5250 		list_for_each_entry(tgt, &vhost->targets, queue)
5251 			if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
5252 				return 0;
5253 		return 1;
5254 	case IBMVFC_HOST_ACTION_TGT_DEL:
5255 	case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
5256 		if (vhost->discovery_threads == disc_threads)
5257 			return 0;
5258 		list_for_each_entry(tgt, &vhost->targets, queue)
5259 			if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT)
5260 				return 1;
5261 		list_for_each_entry(tgt, &vhost->targets, queue)
5262 			if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT)
5263 				return 0;
5264 		return 1;
5265 	case IBMVFC_HOST_ACTION_LOGO:
5266 	case IBMVFC_HOST_ACTION_INIT:
5267 	case IBMVFC_HOST_ACTION_ALLOC_TGTS:
5268 	case IBMVFC_HOST_ACTION_QUERY:
5269 	case IBMVFC_HOST_ACTION_RESET:
5270 	case IBMVFC_HOST_ACTION_REENABLE:
5271 	default:
5272 		break;
5273 	}
5274 
5275 	return 1;
5276 }
5277 
5278 /**
5279  * ibmvfc_work_to_do - Is there task level work to do?
5280  * @vhost:		ibmvfc host struct
5281  *
5282  * Returns:
5283  *	1 if work to do / 0 if not
5284  **/
5285 static int ibmvfc_work_to_do(struct ibmvfc_host *vhost)
5286 {
5287 	unsigned long flags;
5288 	int rc;
5289 
5290 	spin_lock_irqsave(vhost->host->host_lock, flags);
5291 	rc = __ibmvfc_work_to_do(vhost);
5292 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
5293 	return rc;
5294 }
5295 
5296 /**
5297  * ibmvfc_log_ae - Log async events if necessary
5298  * @vhost:		ibmvfc host struct
5299  * @events:		events to log
5300  *
5301  **/
5302 static void ibmvfc_log_ae(struct ibmvfc_host *vhost, int events)
5303 {
5304 	if (events & IBMVFC_AE_RSCN)
5305 		fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_RSCN, 0);
5306 	if ((events & IBMVFC_AE_LINKDOWN) &&
5307 	    vhost->state >= IBMVFC_HALTED)
5308 		fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
5309 	if ((events & IBMVFC_AE_LINKUP) &&
5310 	    vhost->state == IBMVFC_INITIALIZING)
5311 		fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKUP, 0);
5312 }
5313 
5314 /**
5315  * ibmvfc_tgt_add_rport - Tell the FC transport about a new remote port
5316  * @tgt:		ibmvfc target struct
5317  *
5318  **/
5319 static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
5320 {
5321 	struct ibmvfc_host *vhost = tgt->vhost;
5322 	struct fc_rport *rport;
5323 	unsigned long flags;
5324 
5325 	tgt_dbg(tgt, "Adding rport\n");
5326 	rport = fc_remote_port_add(vhost->host, 0, &tgt->ids);
5327 	spin_lock_irqsave(vhost->host->host_lock, flags);
5328 
5329 	if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
5330 		tgt_dbg(tgt, "Deleting rport\n");
5331 		list_del(&tgt->queue);
5332 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT);
5333 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
5334 		fc_remote_port_delete(rport);
5335 		del_timer_sync(&tgt->timer);
5336 		kref_put(&tgt->kref, ibmvfc_release_tgt);
5337 		return;
5338 	} else if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
5339 		tgt_dbg(tgt, "Deleting rport with outstanding I/O\n");
5340 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT);
5341 		tgt->rport = NULL;
5342 		tgt->init_retries = 0;
5343 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
5344 		fc_remote_port_delete(rport);
5345 		return;
5346 	} else if (rport && tgt->action == IBMVFC_TGT_ACTION_DELETED_RPORT) {
5347 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
5348 		return;
5349 	}
5350 
5351 	if (rport) {
5352 		tgt_dbg(tgt, "rport add succeeded\n");
5353 		tgt->rport = rport;
5354 		rport->maxframe_size = be16_to_cpu(tgt->service_parms.common.bb_rcv_sz) & 0x0fff;
5355 		rport->supported_classes = 0;
5356 		tgt->target_id = rport->scsi_target_id;
5357 		if (be32_to_cpu(tgt->service_parms.class1_parms[0]) & 0x80000000)
5358 			rport->supported_classes |= FC_COS_CLASS1;
5359 		if (be32_to_cpu(tgt->service_parms.class2_parms[0]) & 0x80000000)
5360 			rport->supported_classes |= FC_COS_CLASS2;
5361 		if (be32_to_cpu(tgt->service_parms.class3_parms[0]) & 0x80000000)
5362 			rport->supported_classes |= FC_COS_CLASS3;
5363 		if (rport->rqst_q)
5364 			blk_queue_max_segments(rport->rqst_q, 1);
5365 	} else
5366 		tgt_dbg(tgt, "rport add failed\n");
5367 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
5368 }
5369 
5370 /**
5371  * ibmvfc_do_work - Do task level work
5372  * @vhost:		ibmvfc host struct
5373  *
5374  **/
5375 static void ibmvfc_do_work(struct ibmvfc_host *vhost)
5376 {
5377 	struct ibmvfc_target *tgt;
5378 	unsigned long flags;
5379 	struct fc_rport *rport;
5380 	LIST_HEAD(purge);
5381 	int rc;
5382 
5383 	ibmvfc_log_ae(vhost, vhost->events_to_log);
5384 	spin_lock_irqsave(vhost->host->host_lock, flags);
5385 	vhost->events_to_log = 0;
5386 	switch (vhost->action) {
5387 	case IBMVFC_HOST_ACTION_NONE:
5388 	case IBMVFC_HOST_ACTION_LOGO_WAIT:
5389 	case IBMVFC_HOST_ACTION_INIT_WAIT:
5390 		break;
5391 	case IBMVFC_HOST_ACTION_RESET:
5392 		list_splice_init(&vhost->purge, &purge);
5393 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
5394 		ibmvfc_complete_purge(&purge);
5395 		rc = ibmvfc_reset_crq(vhost);
5396 
5397 		spin_lock_irqsave(vhost->host->host_lock, flags);
5398 		if (!rc || rc == H_CLOSED)
5399 			vio_enable_interrupts(to_vio_dev(vhost->dev));
5400 		if (vhost->action == IBMVFC_HOST_ACTION_RESET) {
5401 			/*
5402 			 * The only action we could have changed to would have
5403 			 * been reenable, in which case, we skip the rest of
5404 			 * this path and wait until we've done the re-enable
5405 			 * before sending the crq init.
5406 			 */
5407 			vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
5408 
5409 			if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
5410 			    (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
5411 				ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5412 				dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
5413 			}
5414 		}
5415 		break;
5416 	case IBMVFC_HOST_ACTION_REENABLE:
5417 		list_splice_init(&vhost->purge, &purge);
5418 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
5419 		ibmvfc_complete_purge(&purge);
5420 		rc = ibmvfc_reenable_crq_queue(vhost);
5421 
5422 		spin_lock_irqsave(vhost->host->host_lock, flags);
5423 		if (vhost->action == IBMVFC_HOST_ACTION_REENABLE) {
5424 			/*
5425 			 * The only action we could have changed to would have
5426 			 * been reset, in which case, we skip the rest of this
5427 			 * path and wait until we've done the reset before
5428 			 * sending the crq init.
5429 			 */
5430 			vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
5431 			if (rc || (rc = ibmvfc_send_crq_init(vhost))) {
5432 				ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5433 				dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc);
5434 			}
5435 		}
5436 		break;
5437 	case IBMVFC_HOST_ACTION_LOGO:
5438 		vhost->job_step(vhost);
5439 		break;
5440 	case IBMVFC_HOST_ACTION_INIT:
5441 		BUG_ON(vhost->state != IBMVFC_INITIALIZING);
5442 		if (vhost->delay_init) {
5443 			vhost->delay_init = 0;
5444 			spin_unlock_irqrestore(vhost->host->host_lock, flags);
5445 			ssleep(15);
5446 			return;
5447 		} else
5448 			vhost->job_step(vhost);
5449 		break;
5450 	case IBMVFC_HOST_ACTION_QUERY:
5451 		list_for_each_entry(tgt, &vhost->targets, queue)
5452 			ibmvfc_init_tgt(tgt, ibmvfc_tgt_query_target);
5453 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY_TGTS);
5454 		break;
5455 	case IBMVFC_HOST_ACTION_QUERY_TGTS:
5456 		list_for_each_entry(tgt, &vhost->targets, queue) {
5457 			if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
5458 				tgt->job_step(tgt);
5459 				break;
5460 			}
5461 		}
5462 
5463 		if (!ibmvfc_dev_init_to_do(vhost))
5464 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
5465 		break;
5466 	case IBMVFC_HOST_ACTION_TGT_DEL:
5467 	case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
5468 		list_for_each_entry(tgt, &vhost->targets, queue) {
5469 			if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT) {
5470 				tgt->job_step(tgt);
5471 				break;
5472 			}
5473 		}
5474 
5475 		if (ibmvfc_dev_logo_to_do(vhost)) {
5476 			spin_unlock_irqrestore(vhost->host->host_lock, flags);
5477 			return;
5478 		}
5479 
5480 		list_for_each_entry(tgt, &vhost->targets, queue) {
5481 			if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
5482 				tgt_dbg(tgt, "Deleting rport\n");
5483 				rport = tgt->rport;
5484 				tgt->rport = NULL;
5485 				list_del(&tgt->queue);
5486 				ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT);
5487 				spin_unlock_irqrestore(vhost->host->host_lock, flags);
5488 				if (rport)
5489 					fc_remote_port_delete(rport);
5490 				del_timer_sync(&tgt->timer);
5491 				kref_put(&tgt->kref, ibmvfc_release_tgt);
5492 				return;
5493 			} else if (tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
5494 				tgt_dbg(tgt, "Deleting rport with I/O outstanding\n");
5495 				rport = tgt->rport;
5496 				tgt->rport = NULL;
5497 				tgt->init_retries = 0;
5498 				ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT);
5499 
5500 				/*
5501 				 * If fast fail is enabled, we wait for it to fire and then clean up
5502 				 * the old port, since we expect the fast fail timer to clean up the
5503 				 * outstanding I/O faster than waiting for normal command timeouts.
5504 				 * However, if fast fail is disabled, any I/O outstanding to the
5505 				 * rport LUNs will stay outstanding indefinitely, since the EH handlers
5506 				 * won't get invoked for I/O's timing out. If this is a NPIV failover
5507 				 * scenario, the better alternative is to use the move login.
5508 				 */
5509 				if (rport && rport->fast_io_fail_tmo == -1)
5510 					tgt->move_login = 1;
5511 				spin_unlock_irqrestore(vhost->host->host_lock, flags);
5512 				if (rport)
5513 					fc_remote_port_delete(rport);
5514 				return;
5515 			}
5516 		}
5517 
5518 		if (vhost->state == IBMVFC_INITIALIZING) {
5519 			if (vhost->action == IBMVFC_HOST_ACTION_TGT_DEL_FAILED) {
5520 				if (vhost->reinit) {
5521 					vhost->reinit = 0;
5522 					scsi_block_requests(vhost->host);
5523 					ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
5524 					spin_unlock_irqrestore(vhost->host->host_lock, flags);
5525 				} else {
5526 					ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE);
5527 					ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
5528 					wake_up(&vhost->init_wait_q);
5529 					schedule_work(&vhost->rport_add_work_q);
5530 					vhost->init_retries = 0;
5531 					spin_unlock_irqrestore(vhost->host->host_lock, flags);
5532 					scsi_unblock_requests(vhost->host);
5533 				}
5534 
5535 				return;
5536 			} else {
5537 				ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
5538 				vhost->job_step = ibmvfc_discover_targets;
5539 			}
5540 		} else {
5541 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
5542 			spin_unlock_irqrestore(vhost->host->host_lock, flags);
5543 			scsi_unblock_requests(vhost->host);
5544 			wake_up(&vhost->init_wait_q);
5545 			return;
5546 		}
5547 		break;
5548 	case IBMVFC_HOST_ACTION_ALLOC_TGTS:
5549 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_INIT);
5550 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
5551 		ibmvfc_alloc_targets(vhost);
5552 		spin_lock_irqsave(vhost->host->host_lock, flags);
5553 		break;
5554 	case IBMVFC_HOST_ACTION_TGT_INIT:
5555 		list_for_each_entry(tgt, &vhost->targets, queue) {
5556 			if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
5557 				tgt->job_step(tgt);
5558 				break;
5559 			}
5560 		}
5561 
5562 		if (!ibmvfc_dev_init_to_do(vhost))
5563 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL_FAILED);
5564 		break;
5565 	default:
5566 		break;
5567 	}
5568 
5569 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
5570 }
5571 
5572 /**
5573  * ibmvfc_work - Do task level work
5574  * @data:		ibmvfc host struct
5575  *
5576  * Returns:
5577  *	zero
5578  **/
5579 static int ibmvfc_work(void *data)
5580 {
5581 	struct ibmvfc_host *vhost = data;
5582 	int rc;
5583 
5584 	set_user_nice(current, MIN_NICE);
5585 
5586 	while (1) {
5587 		rc = wait_event_interruptible(vhost->work_wait_q,
5588 					      ibmvfc_work_to_do(vhost));
5589 
5590 		BUG_ON(rc);
5591 
5592 		if (kthread_should_stop())
5593 			break;
5594 
5595 		ibmvfc_do_work(vhost);
5596 	}
5597 
5598 	ibmvfc_dbg(vhost, "ibmvfc kthread exiting...\n");
5599 	return 0;
5600 }
5601 
5602 /**
5603  * ibmvfc_alloc_queue - Allocate queue
5604  * @vhost:	ibmvfc host struct
5605  * @queue:	ibmvfc queue to allocate
5606  * @fmt:	queue format to allocate
5607  *
5608  * Returns:
5609  *	0 on success / non-zero on failure
5610  **/
5611 static int ibmvfc_alloc_queue(struct ibmvfc_host *vhost,
5612 			      struct ibmvfc_queue *queue,
5613 			      enum ibmvfc_msg_fmt fmt)
5614 {
5615 	struct device *dev = vhost->dev;
5616 	size_t fmt_size;
5617 	unsigned int pool_size = 0;
5618 
5619 	ENTER;
5620 	spin_lock_init(&queue->_lock);
5621 	queue->q_lock = &queue->_lock;
5622 
5623 	switch (fmt) {
5624 	case IBMVFC_CRQ_FMT:
5625 		fmt_size = sizeof(*queue->msgs.crq);
5626 		pool_size = max_requests + IBMVFC_NUM_INTERNAL_REQ;
5627 		break;
5628 	case IBMVFC_ASYNC_FMT:
5629 		fmt_size = sizeof(*queue->msgs.async);
5630 		break;
5631 	case IBMVFC_SUB_CRQ_FMT:
5632 		fmt_size = sizeof(*queue->msgs.scrq);
5633 		/* We need one extra event for Cancel Commands */
5634 		pool_size = max_requests + 1;
5635 		break;
5636 	default:
5637 		dev_warn(dev, "Unknown command/response queue message format: %d\n", fmt);
5638 		return -EINVAL;
5639 	}
5640 
5641 	if (ibmvfc_init_event_pool(vhost, queue, pool_size)) {
5642 		dev_err(dev, "Couldn't initialize event pool.\n");
5643 		return -ENOMEM;
5644 	}
5645 
5646 	queue->msgs.handle = (void *)get_zeroed_page(GFP_KERNEL);
5647 	if (!queue->msgs.handle)
5648 		return -ENOMEM;
5649 
5650 	queue->msg_token = dma_map_single(dev, queue->msgs.handle, PAGE_SIZE,
5651 					  DMA_BIDIRECTIONAL);
5652 
5653 	if (dma_mapping_error(dev, queue->msg_token)) {
5654 		free_page((unsigned long)queue->msgs.handle);
5655 		queue->msgs.handle = NULL;
5656 		return -ENOMEM;
5657 	}
5658 
5659 	queue->cur = 0;
5660 	queue->fmt = fmt;
5661 	queue->size = PAGE_SIZE / fmt_size;
5662 	return 0;
5663 }
5664 
5665 /**
5666  * ibmvfc_init_crq - Initializes and registers CRQ with hypervisor
5667  * @vhost:	ibmvfc host struct
5668  *
5669  * Allocates a page for messages, maps it for dma, and registers
5670  * the crq with the hypervisor.
5671  *
5672  * Return value:
5673  *	zero on success / other on failure
5674  **/
5675 static int ibmvfc_init_crq(struct ibmvfc_host *vhost)
5676 {
5677 	int rc, retrc = -ENOMEM;
5678 	struct device *dev = vhost->dev;
5679 	struct vio_dev *vdev = to_vio_dev(dev);
5680 	struct ibmvfc_queue *crq = &vhost->crq;
5681 
5682 	ENTER;
5683 	if (ibmvfc_alloc_queue(vhost, crq, IBMVFC_CRQ_FMT))
5684 		return -ENOMEM;
5685 
5686 	retrc = rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
5687 					crq->msg_token, PAGE_SIZE);
5688 
5689 	if (rc == H_RESOURCE)
5690 		/* maybe kexecing and resource is busy. try a reset */
5691 		retrc = rc = ibmvfc_reset_crq(vhost);
5692 
5693 	if (rc == H_CLOSED)
5694 		dev_warn(dev, "Partner adapter not ready\n");
5695 	else if (rc) {
5696 		dev_warn(dev, "Error %d opening adapter\n", rc);
5697 		goto reg_crq_failed;
5698 	}
5699 
5700 	retrc = 0;
5701 
5702 	tasklet_init(&vhost->tasklet, (void *)ibmvfc_tasklet, (unsigned long)vhost);
5703 
5704 	if ((rc = request_irq(vdev->irq, ibmvfc_interrupt, 0, IBMVFC_NAME, vhost))) {
5705 		dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n", vdev->irq, rc);
5706 		goto req_irq_failed;
5707 	}
5708 
5709 	if ((rc = vio_enable_interrupts(vdev))) {
5710 		dev_err(dev, "Error %d enabling interrupts\n", rc);
5711 		goto req_irq_failed;
5712 	}
5713 
5714 	LEAVE;
5715 	return retrc;
5716 
5717 req_irq_failed:
5718 	tasklet_kill(&vhost->tasklet);
5719 	do {
5720 		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5721 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5722 reg_crq_failed:
5723 	ibmvfc_free_queue(vhost, crq);
5724 	return retrc;
5725 }
5726 
5727 static int ibmvfc_register_scsi_channel(struct ibmvfc_host *vhost,
5728 				  int index)
5729 {
5730 	struct device *dev = vhost->dev;
5731 	struct vio_dev *vdev = to_vio_dev(dev);
5732 	struct ibmvfc_queue *scrq = &vhost->scsi_scrqs.scrqs[index];
5733 	int rc = -ENOMEM;
5734 
5735 	ENTER;
5736 
5737 	if (ibmvfc_alloc_queue(vhost, scrq, IBMVFC_SUB_CRQ_FMT))
5738 		return -ENOMEM;
5739 
5740 	rc = h_reg_sub_crq(vdev->unit_address, scrq->msg_token, PAGE_SIZE,
5741 			   &scrq->cookie, &scrq->hw_irq);
5742 
5743 	/* H_CLOSED indicates successful register, but no CRQ partner */
5744 	if (rc && rc != H_CLOSED) {
5745 		dev_warn(dev, "Error registering sub-crq: %d\n", rc);
5746 		if (rc == H_PARAMETER)
5747 			dev_warn_once(dev, "Firmware may not support MQ\n");
5748 		goto reg_failed;
5749 	}
5750 
5751 	scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
5752 
5753 	if (!scrq->irq) {
5754 		rc = -EINVAL;
5755 		dev_err(dev, "Error mapping sub-crq[%d] irq\n", index);
5756 		goto irq_failed;
5757 	}
5758 
5759 	snprintf(scrq->name, sizeof(scrq->name), "ibmvfc-%x-scsi%d",
5760 		 vdev->unit_address, index);
5761 	rc = request_irq(scrq->irq, ibmvfc_interrupt_scsi, 0, scrq->name, scrq);
5762 
5763 	if (rc) {
5764 		dev_err(dev, "Couldn't register sub-crq[%d] irq\n", index);
5765 		irq_dispose_mapping(scrq->irq);
5766 		goto irq_failed;
5767 	}
5768 
5769 	scrq->hwq_id = index;
5770 	scrq->vhost = vhost;
5771 
5772 	LEAVE;
5773 	return 0;
5774 
5775 irq_failed:
5776 	do {
5777 		rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address, scrq->cookie);
5778 	} while (rtas_busy_delay(rc));
5779 reg_failed:
5780 	ibmvfc_free_queue(vhost, scrq);
5781 	LEAVE;
5782 	return rc;
5783 }
5784 
5785 static void ibmvfc_deregister_scsi_channel(struct ibmvfc_host *vhost, int index)
5786 {
5787 	struct device *dev = vhost->dev;
5788 	struct vio_dev *vdev = to_vio_dev(dev);
5789 	struct ibmvfc_queue *scrq = &vhost->scsi_scrqs.scrqs[index];
5790 	long rc;
5791 
5792 	ENTER;
5793 
5794 	free_irq(scrq->irq, scrq);
5795 	irq_dispose_mapping(scrq->irq);
5796 	scrq->irq = 0;
5797 
5798 	do {
5799 		rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address,
5800 					scrq->cookie);
5801 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5802 
5803 	if (rc)
5804 		dev_err(dev, "Failed to free sub-crq[%d]: rc=%ld\n", index, rc);
5805 
5806 	ibmvfc_free_queue(vhost, scrq);
5807 	LEAVE;
5808 }
5809 
5810 static void ibmvfc_init_sub_crqs(struct ibmvfc_host *vhost)
5811 {
5812 	int i, j;
5813 
5814 	ENTER;
5815 	if (!vhost->mq_enabled)
5816 		return;
5817 
5818 	vhost->scsi_scrqs.scrqs = kcalloc(nr_scsi_hw_queues,
5819 					  sizeof(*vhost->scsi_scrqs.scrqs),
5820 					  GFP_KERNEL);
5821 	if (!vhost->scsi_scrqs.scrqs) {
5822 		vhost->do_enquiry = 0;
5823 		return;
5824 	}
5825 
5826 	for (i = 0; i < nr_scsi_hw_queues; i++) {
5827 		if (ibmvfc_register_scsi_channel(vhost, i)) {
5828 			for (j = i; j > 0; j--)
5829 				ibmvfc_deregister_scsi_channel(vhost, j - 1);
5830 			kfree(vhost->scsi_scrqs.scrqs);
5831 			vhost->scsi_scrqs.scrqs = NULL;
5832 			vhost->scsi_scrqs.active_queues = 0;
5833 			vhost->do_enquiry = 0;
5834 			break;
5835 		}
5836 	}
5837 
5838 	LEAVE;
5839 }
5840 
5841 static void ibmvfc_release_sub_crqs(struct ibmvfc_host *vhost)
5842 {
5843 	int i;
5844 
5845 	ENTER;
5846 	if (!vhost->scsi_scrqs.scrqs)
5847 		return;
5848 
5849 	for (i = 0; i < nr_scsi_hw_queues; i++)
5850 		ibmvfc_deregister_scsi_channel(vhost, i);
5851 
5852 	kfree(vhost->scsi_scrqs.scrqs);
5853 	vhost->scsi_scrqs.scrqs = NULL;
5854 	vhost->scsi_scrqs.active_queues = 0;
5855 	LEAVE;
5856 }
5857 
5858 /**
5859  * ibmvfc_free_mem - Free memory for vhost
5860  * @vhost:	ibmvfc host struct
5861  *
5862  * Return value:
5863  * 	none
5864  **/
5865 static void ibmvfc_free_mem(struct ibmvfc_host *vhost)
5866 {
5867 	struct ibmvfc_queue *async_q = &vhost->async_crq;
5868 
5869 	ENTER;
5870 	mempool_destroy(vhost->tgt_pool);
5871 	kfree(vhost->trace);
5872 	dma_free_coherent(vhost->dev, vhost->disc_buf_sz, vhost->disc_buf,
5873 			  vhost->disc_buf_dma);
5874 	dma_free_coherent(vhost->dev, sizeof(*vhost->login_buf),
5875 			  vhost->login_buf, vhost->login_buf_dma);
5876 	dma_free_coherent(vhost->dev, sizeof(*vhost->channel_setup_buf),
5877 			  vhost->channel_setup_buf, vhost->channel_setup_dma);
5878 	dma_pool_destroy(vhost->sg_pool);
5879 	ibmvfc_free_queue(vhost, async_q);
5880 	LEAVE;
5881 }
5882 
5883 /**
5884  * ibmvfc_alloc_mem - Allocate memory for vhost
5885  * @vhost:	ibmvfc host struct
5886  *
5887  * Return value:
5888  * 	0 on success / non-zero on failure
5889  **/
5890 static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost)
5891 {
5892 	struct ibmvfc_queue *async_q = &vhost->async_crq;
5893 	struct device *dev = vhost->dev;
5894 
5895 	ENTER;
5896 	if (ibmvfc_alloc_queue(vhost, async_q, IBMVFC_ASYNC_FMT)) {
5897 		dev_err(dev, "Couldn't allocate/map async queue.\n");
5898 		goto nomem;
5899 	}
5900 
5901 	vhost->sg_pool = dma_pool_create(IBMVFC_NAME, dev,
5902 					 SG_ALL * sizeof(struct srp_direct_buf),
5903 					 sizeof(struct srp_direct_buf), 0);
5904 
5905 	if (!vhost->sg_pool) {
5906 		dev_err(dev, "Failed to allocate sg pool\n");
5907 		goto unmap_async_crq;
5908 	}
5909 
5910 	vhost->login_buf = dma_alloc_coherent(dev, sizeof(*vhost->login_buf),
5911 					      &vhost->login_buf_dma, GFP_KERNEL);
5912 
5913 	if (!vhost->login_buf) {
5914 		dev_err(dev, "Couldn't allocate NPIV login buffer\n");
5915 		goto free_sg_pool;
5916 	}
5917 
5918 	vhost->disc_buf_sz = sizeof(*vhost->disc_buf) * max_targets;
5919 	vhost->disc_buf = dma_alloc_coherent(dev, vhost->disc_buf_sz,
5920 					     &vhost->disc_buf_dma, GFP_KERNEL);
5921 
5922 	if (!vhost->disc_buf) {
5923 		dev_err(dev, "Couldn't allocate Discover Targets buffer\n");
5924 		goto free_login_buffer;
5925 	}
5926 
5927 	vhost->trace = kcalloc(IBMVFC_NUM_TRACE_ENTRIES,
5928 			       sizeof(struct ibmvfc_trace_entry), GFP_KERNEL);
5929 	atomic_set(&vhost->trace_index, -1);
5930 
5931 	if (!vhost->trace)
5932 		goto free_disc_buffer;
5933 
5934 	vhost->tgt_pool = mempool_create_kmalloc_pool(IBMVFC_TGT_MEMPOOL_SZ,
5935 						      sizeof(struct ibmvfc_target));
5936 
5937 	if (!vhost->tgt_pool) {
5938 		dev_err(dev, "Couldn't allocate target memory pool\n");
5939 		goto free_trace;
5940 	}
5941 
5942 	vhost->channel_setup_buf = dma_alloc_coherent(dev, sizeof(*vhost->channel_setup_buf),
5943 						      &vhost->channel_setup_dma,
5944 						      GFP_KERNEL);
5945 
5946 	if (!vhost->channel_setup_buf) {
5947 		dev_err(dev, "Couldn't allocate Channel Setup buffer\n");
5948 		goto free_tgt_pool;
5949 	}
5950 
5951 	LEAVE;
5952 	return 0;
5953 
5954 free_tgt_pool:
5955 	mempool_destroy(vhost->tgt_pool);
5956 free_trace:
5957 	kfree(vhost->trace);
5958 free_disc_buffer:
5959 	dma_free_coherent(dev, vhost->disc_buf_sz, vhost->disc_buf,
5960 			  vhost->disc_buf_dma);
5961 free_login_buffer:
5962 	dma_free_coherent(dev, sizeof(*vhost->login_buf),
5963 			  vhost->login_buf, vhost->login_buf_dma);
5964 free_sg_pool:
5965 	dma_pool_destroy(vhost->sg_pool);
5966 unmap_async_crq:
5967 	ibmvfc_free_queue(vhost, async_q);
5968 nomem:
5969 	LEAVE;
5970 	return -ENOMEM;
5971 }
5972 
5973 /**
5974  * ibmvfc_rport_add_thread - Worker thread for rport adds
5975  * @work:	work struct
5976  *
5977  **/
5978 static void ibmvfc_rport_add_thread(struct work_struct *work)
5979 {
5980 	struct ibmvfc_host *vhost = container_of(work, struct ibmvfc_host,
5981 						 rport_add_work_q);
5982 	struct ibmvfc_target *tgt;
5983 	struct fc_rport *rport;
5984 	unsigned long flags;
5985 	int did_work;
5986 
5987 	ENTER;
5988 	spin_lock_irqsave(vhost->host->host_lock, flags);
5989 	do {
5990 		did_work = 0;
5991 		if (vhost->state != IBMVFC_ACTIVE)
5992 			break;
5993 
5994 		list_for_each_entry(tgt, &vhost->targets, queue) {
5995 			if (tgt->add_rport) {
5996 				did_work = 1;
5997 				tgt->add_rport = 0;
5998 				kref_get(&tgt->kref);
5999 				rport = tgt->rport;
6000 				if (!rport) {
6001 					spin_unlock_irqrestore(vhost->host->host_lock, flags);
6002 					ibmvfc_tgt_add_rport(tgt);
6003 				} else if (get_device(&rport->dev)) {
6004 					spin_unlock_irqrestore(vhost->host->host_lock, flags);
6005 					tgt_dbg(tgt, "Setting rport roles\n");
6006 					fc_remote_port_rolechg(rport, tgt->ids.roles);
6007 					put_device(&rport->dev);
6008 				} else {
6009 					spin_unlock_irqrestore(vhost->host->host_lock, flags);
6010 				}
6011 
6012 				kref_put(&tgt->kref, ibmvfc_release_tgt);
6013 				spin_lock_irqsave(vhost->host->host_lock, flags);
6014 				break;
6015 			}
6016 		}
6017 	} while(did_work);
6018 
6019 	if (vhost->state == IBMVFC_ACTIVE)
6020 		vhost->scan_complete = 1;
6021 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
6022 	LEAVE;
6023 }
6024 
6025 /**
6026  * ibmvfc_probe - Adapter hot plug add entry point
6027  * @vdev:	vio device struct
6028  * @id:	vio device id struct
6029  *
6030  * Return value:
6031  * 	0 on success / non-zero on failure
6032  **/
6033 static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
6034 {
6035 	struct ibmvfc_host *vhost;
6036 	struct Scsi_Host *shost;
6037 	struct device *dev = &vdev->dev;
6038 	int rc = -ENOMEM;
6039 	unsigned int max_scsi_queues = IBMVFC_MAX_SCSI_QUEUES;
6040 
6041 	ENTER;
6042 	shost = scsi_host_alloc(&driver_template, sizeof(*vhost));
6043 	if (!shost) {
6044 		dev_err(dev, "Couldn't allocate host data\n");
6045 		goto out;
6046 	}
6047 
6048 	shost->transportt = ibmvfc_transport_template;
6049 	shost->can_queue = max_requests;
6050 	shost->max_lun = max_lun;
6051 	shost->max_id = max_targets;
6052 	shost->max_sectors = IBMVFC_MAX_SECTORS;
6053 	shost->max_cmd_len = IBMVFC_MAX_CDB_LEN;
6054 	shost->unique_id = shost->host_no;
6055 	shost->nr_hw_queues = mq_enabled ? min(max_scsi_queues, nr_scsi_hw_queues) : 1;
6056 
6057 	vhost = shost_priv(shost);
6058 	INIT_LIST_HEAD(&vhost->targets);
6059 	INIT_LIST_HEAD(&vhost->purge);
6060 	sprintf(vhost->name, IBMVFC_NAME);
6061 	vhost->host = shost;
6062 	vhost->dev = dev;
6063 	vhost->partition_number = -1;
6064 	vhost->log_level = log_level;
6065 	vhost->task_set = 1;
6066 
6067 	vhost->mq_enabled = mq_enabled;
6068 	vhost->client_scsi_channels = min(shost->nr_hw_queues, nr_scsi_channels);
6069 	vhost->using_channels = 0;
6070 	vhost->do_enquiry = 1;
6071 
6072 	strcpy(vhost->partition_name, "UNKNOWN");
6073 	init_waitqueue_head(&vhost->work_wait_q);
6074 	init_waitqueue_head(&vhost->init_wait_q);
6075 	INIT_WORK(&vhost->rport_add_work_q, ibmvfc_rport_add_thread);
6076 	mutex_init(&vhost->passthru_mutex);
6077 
6078 	if ((rc = ibmvfc_alloc_mem(vhost)))
6079 		goto free_scsi_host;
6080 
6081 	vhost->work_thread = kthread_run(ibmvfc_work, vhost, "%s_%d", IBMVFC_NAME,
6082 					 shost->host_no);
6083 
6084 	if (IS_ERR(vhost->work_thread)) {
6085 		dev_err(dev, "Couldn't create kernel thread: %ld\n",
6086 			PTR_ERR(vhost->work_thread));
6087 		rc = PTR_ERR(vhost->work_thread);
6088 		goto free_host_mem;
6089 	}
6090 
6091 	if ((rc = ibmvfc_init_crq(vhost))) {
6092 		dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
6093 		goto kill_kthread;
6094 	}
6095 
6096 	if ((rc = scsi_add_host(shost, dev)))
6097 		goto release_crq;
6098 
6099 	fc_host_dev_loss_tmo(shost) = IBMVFC_DEV_LOSS_TMO;
6100 
6101 	if ((rc = ibmvfc_create_trace_file(&shost->shost_dev.kobj,
6102 					   &ibmvfc_trace_attr))) {
6103 		dev_err(dev, "Failed to create trace file. rc=%d\n", rc);
6104 		goto remove_shost;
6105 	}
6106 
6107 	ibmvfc_init_sub_crqs(vhost);
6108 
6109 	if (shost_to_fc_host(shost)->rqst_q)
6110 		blk_queue_max_segments(shost_to_fc_host(shost)->rqst_q, 1);
6111 	dev_set_drvdata(dev, vhost);
6112 	spin_lock(&ibmvfc_driver_lock);
6113 	list_add_tail(&vhost->queue, &ibmvfc_head);
6114 	spin_unlock(&ibmvfc_driver_lock);
6115 
6116 	ibmvfc_send_crq_init(vhost);
6117 	scsi_scan_host(shost);
6118 	return 0;
6119 
6120 remove_shost:
6121 	scsi_remove_host(shost);
6122 release_crq:
6123 	ibmvfc_release_crq_queue(vhost);
6124 kill_kthread:
6125 	kthread_stop(vhost->work_thread);
6126 free_host_mem:
6127 	ibmvfc_free_mem(vhost);
6128 free_scsi_host:
6129 	scsi_host_put(shost);
6130 out:
6131 	LEAVE;
6132 	return rc;
6133 }
6134 
6135 /**
6136  * ibmvfc_remove - Adapter hot plug remove entry point
6137  * @vdev:	vio device struct
6138  *
6139  * Return value:
6140  * 	0
6141  **/
6142 static void ibmvfc_remove(struct vio_dev *vdev)
6143 {
6144 	struct ibmvfc_host *vhost = dev_get_drvdata(&vdev->dev);
6145 	LIST_HEAD(purge);
6146 	unsigned long flags;
6147 
6148 	ENTER;
6149 	ibmvfc_remove_trace_file(&vhost->host->shost_dev.kobj, &ibmvfc_trace_attr);
6150 
6151 	spin_lock_irqsave(vhost->host->host_lock, flags);
6152 	ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
6153 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
6154 
6155 	ibmvfc_wait_while_resetting(vhost);
6156 	kthread_stop(vhost->work_thread);
6157 	fc_remove_host(vhost->host);
6158 	scsi_remove_host(vhost->host);
6159 
6160 	spin_lock_irqsave(vhost->host->host_lock, flags);
6161 	ibmvfc_purge_requests(vhost, DID_ERROR);
6162 	list_splice_init(&vhost->purge, &purge);
6163 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
6164 	ibmvfc_complete_purge(&purge);
6165 	ibmvfc_release_sub_crqs(vhost);
6166 	ibmvfc_release_crq_queue(vhost);
6167 
6168 	ibmvfc_free_mem(vhost);
6169 	spin_lock(&ibmvfc_driver_lock);
6170 	list_del(&vhost->queue);
6171 	spin_unlock(&ibmvfc_driver_lock);
6172 	scsi_host_put(vhost->host);
6173 	LEAVE;
6174 }
6175 
6176 /**
6177  * ibmvfc_resume - Resume from suspend
6178  * @dev:	device struct
6179  *
6180  * We may have lost an interrupt across suspend/resume, so kick the
6181  * interrupt handler
6182  *
6183  */
6184 static int ibmvfc_resume(struct device *dev)
6185 {
6186 	unsigned long flags;
6187 	struct ibmvfc_host *vhost = dev_get_drvdata(dev);
6188 	struct vio_dev *vdev = to_vio_dev(dev);
6189 
6190 	spin_lock_irqsave(vhost->host->host_lock, flags);
6191 	vio_disable_interrupts(vdev);
6192 	tasklet_schedule(&vhost->tasklet);
6193 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
6194 	return 0;
6195 }
6196 
6197 /**
6198  * ibmvfc_get_desired_dma - Calculate DMA resources needed by the driver
6199  * @vdev:	vio device struct
6200  *
6201  * Return value:
6202  *	Number of bytes the driver will need to DMA map at the same time in
6203  *	order to perform well.
6204  */
6205 static unsigned long ibmvfc_get_desired_dma(struct vio_dev *vdev)
6206 {
6207 	unsigned long pool_dma = max_requests * sizeof(union ibmvfc_iu);
6208 	return pool_dma + ((512 * 1024) * driver_template.cmd_per_lun);
6209 }
6210 
6211 static const struct vio_device_id ibmvfc_device_table[] = {
6212 	{"fcp", "IBM,vfc-client"},
6213 	{ "", "" }
6214 };
6215 MODULE_DEVICE_TABLE(vio, ibmvfc_device_table);
6216 
6217 static const struct dev_pm_ops ibmvfc_pm_ops = {
6218 	.resume = ibmvfc_resume
6219 };
6220 
6221 static struct vio_driver ibmvfc_driver = {
6222 	.id_table = ibmvfc_device_table,
6223 	.probe = ibmvfc_probe,
6224 	.remove = ibmvfc_remove,
6225 	.get_desired_dma = ibmvfc_get_desired_dma,
6226 	.name = IBMVFC_NAME,
6227 	.pm = &ibmvfc_pm_ops,
6228 };
6229 
6230 static struct fc_function_template ibmvfc_transport_functions = {
6231 	.show_host_fabric_name = 1,
6232 	.show_host_node_name = 1,
6233 	.show_host_port_name = 1,
6234 	.show_host_supported_classes = 1,
6235 	.show_host_port_type = 1,
6236 	.show_host_port_id = 1,
6237 	.show_host_maxframe_size = 1,
6238 
6239 	.get_host_port_state = ibmvfc_get_host_port_state,
6240 	.show_host_port_state = 1,
6241 
6242 	.get_host_speed = ibmvfc_get_host_speed,
6243 	.show_host_speed = 1,
6244 
6245 	.issue_fc_host_lip = ibmvfc_issue_fc_host_lip,
6246 	.terminate_rport_io = ibmvfc_terminate_rport_io,
6247 
6248 	.show_rport_maxframe_size = 1,
6249 	.show_rport_supported_classes = 1,
6250 
6251 	.set_rport_dev_loss_tmo = ibmvfc_set_rport_dev_loss_tmo,
6252 	.show_rport_dev_loss_tmo = 1,
6253 
6254 	.get_starget_node_name = ibmvfc_get_starget_node_name,
6255 	.show_starget_node_name = 1,
6256 
6257 	.get_starget_port_name = ibmvfc_get_starget_port_name,
6258 	.show_starget_port_name = 1,
6259 
6260 	.get_starget_port_id = ibmvfc_get_starget_port_id,
6261 	.show_starget_port_id = 1,
6262 
6263 	.bsg_request = ibmvfc_bsg_request,
6264 	.bsg_timeout = ibmvfc_bsg_timeout,
6265 };
6266 
6267 /**
6268  * ibmvfc_module_init - Initialize the ibmvfc module
6269  *
6270  * Return value:
6271  * 	0 on success / other on failure
6272  **/
6273 static int __init ibmvfc_module_init(void)
6274 {
6275 	int rc;
6276 
6277 	if (!firmware_has_feature(FW_FEATURE_VIO))
6278 		return -ENODEV;
6279 
6280 	printk(KERN_INFO IBMVFC_NAME": IBM Virtual Fibre Channel Driver version: %s %s\n",
6281 	       IBMVFC_DRIVER_VERSION, IBMVFC_DRIVER_DATE);
6282 
6283 	ibmvfc_transport_template = fc_attach_transport(&ibmvfc_transport_functions);
6284 	if (!ibmvfc_transport_template)
6285 		return -ENOMEM;
6286 
6287 	rc = vio_register_driver(&ibmvfc_driver);
6288 	if (rc)
6289 		fc_release_transport(ibmvfc_transport_template);
6290 	return rc;
6291 }
6292 
6293 /**
6294  * ibmvfc_module_exit - Teardown the ibmvfc module
6295  *
6296  * Return value:
6297  * 	nothing
6298  **/
6299 static void __exit ibmvfc_module_exit(void)
6300 {
6301 	vio_unregister_driver(&ibmvfc_driver);
6302 	fc_release_transport(ibmvfc_transport_template);
6303 }
6304 
6305 module_init(ibmvfc_module_init);
6306 module_exit(ibmvfc_module_exit);
6307