xref: /openbmc/linux/drivers/scsi/ibmvscsi/ibmvfc.c (revision 8938c48f)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * ibmvfc.c -- driver for IBM Power Virtual Fibre Channel Adapter
4  *
5  * Written By: Brian King <brking@linux.vnet.ibm.com>, IBM Corporation
6  *
7  * Copyright (C) IBM Corporation, 2008
8  */
9 
10 #include <linux/module.h>
11 #include <linux/moduleparam.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmapool.h>
14 #include <linux/delay.h>
15 #include <linux/interrupt.h>
16 #include <linux/kthread.h>
17 #include <linux/slab.h>
18 #include <linux/of.h>
19 #include <linux/pm.h>
20 #include <linux/stringify.h>
21 #include <linux/bsg-lib.h>
22 #include <asm/firmware.h>
23 #include <asm/irq.h>
24 #include <asm/vio.h>
25 #include <scsi/scsi.h>
26 #include <scsi/scsi_cmnd.h>
27 #include <scsi/scsi_host.h>
28 #include <scsi/scsi_device.h>
29 #include <scsi/scsi_tcq.h>
30 #include <scsi/scsi_transport_fc.h>
31 #include <scsi/scsi_bsg_fc.h>
32 #include "ibmvfc.h"
33 
34 static unsigned int init_timeout = IBMVFC_INIT_TIMEOUT;
35 static unsigned int default_timeout = IBMVFC_DEFAULT_TIMEOUT;
36 static u64 max_lun = IBMVFC_MAX_LUN;
37 static unsigned int max_targets = IBMVFC_MAX_TARGETS;
38 static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT;
39 static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS;
40 static unsigned int ibmvfc_debug = IBMVFC_DEBUG;
41 static unsigned int log_level = IBMVFC_DEFAULT_LOG_LEVEL;
42 static unsigned int cls3_error = IBMVFC_CLS3_ERROR;
43 static unsigned int mq_enabled = IBMVFC_MQ;
44 static unsigned int nr_scsi_hw_queues = IBMVFC_SCSI_HW_QUEUES;
45 static unsigned int nr_scsi_channels = IBMVFC_SCSI_CHANNELS;
46 static unsigned int mig_channels_only = IBMVFC_MIG_NO_SUB_TO_CRQ;
47 static unsigned int mig_no_less_channels = IBMVFC_MIG_NO_N_TO_M;
48 
49 static LIST_HEAD(ibmvfc_head);
50 static DEFINE_SPINLOCK(ibmvfc_driver_lock);
51 static struct scsi_transport_template *ibmvfc_transport_template;
52 
53 MODULE_DESCRIPTION("IBM Virtual Fibre Channel Driver");
54 MODULE_AUTHOR("Brian King <brking@linux.vnet.ibm.com>");
55 MODULE_LICENSE("GPL");
56 MODULE_VERSION(IBMVFC_DRIVER_VERSION);
57 
58 module_param_named(mq, mq_enabled, uint, S_IRUGO);
59 MODULE_PARM_DESC(mq, "Enable multiqueue support. "
60 		 "[Default=" __stringify(IBMVFC_MQ) "]");
61 module_param_named(scsi_host_queues, nr_scsi_hw_queues, uint, S_IRUGO);
62 MODULE_PARM_DESC(scsi_host_queues, "Number of SCSI Host submission queues. "
63 		 "[Default=" __stringify(IBMVFC_SCSI_HW_QUEUES) "]");
64 module_param_named(scsi_hw_channels, nr_scsi_channels, uint, S_IRUGO);
65 MODULE_PARM_DESC(scsi_hw_channels, "Number of hw scsi channels to request. "
66 		 "[Default=" __stringify(IBMVFC_SCSI_CHANNELS) "]");
67 module_param_named(mig_channels_only, mig_channels_only, uint, S_IRUGO);
68 MODULE_PARM_DESC(mig_channels_only, "Prevent migration to non-channelized system. "
69 		 "[Default=" __stringify(IBMVFC_MIG_NO_SUB_TO_CRQ) "]");
70 module_param_named(mig_no_less_channels, mig_no_less_channels, uint, S_IRUGO);
71 MODULE_PARM_DESC(mig_no_less_channels, "Prevent migration to system with less channels. "
72 		 "[Default=" __stringify(IBMVFC_MIG_NO_N_TO_M) "]");
73 
74 module_param_named(init_timeout, init_timeout, uint, S_IRUGO | S_IWUSR);
75 MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds. "
76 		 "[Default=" __stringify(IBMVFC_INIT_TIMEOUT) "]");
77 module_param_named(default_timeout, default_timeout, uint, S_IRUGO | S_IWUSR);
78 MODULE_PARM_DESC(default_timeout,
79 		 "Default timeout in seconds for initialization and EH commands. "
80 		 "[Default=" __stringify(IBMVFC_DEFAULT_TIMEOUT) "]");
81 module_param_named(max_requests, max_requests, uint, S_IRUGO);
82 MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter. "
83 		 "[Default=" __stringify(IBMVFC_MAX_REQUESTS_DEFAULT) "]");
84 module_param_named(max_lun, max_lun, ullong, S_IRUGO);
85 MODULE_PARM_DESC(max_lun, "Maximum allowed LUN. "
86 		 "[Default=" __stringify(IBMVFC_MAX_LUN) "]");
87 module_param_named(max_targets, max_targets, uint, S_IRUGO);
88 MODULE_PARM_DESC(max_targets, "Maximum allowed targets. "
89 		 "[Default=" __stringify(IBMVFC_MAX_TARGETS) "]");
90 module_param_named(disc_threads, disc_threads, uint, S_IRUGO);
91 MODULE_PARM_DESC(disc_threads, "Number of device discovery threads to use. "
92 		 "[Default=" __stringify(IBMVFC_MAX_DISC_THREADS) "]");
93 module_param_named(debug, ibmvfc_debug, uint, S_IRUGO | S_IWUSR);
94 MODULE_PARM_DESC(debug, "Enable driver debug information. "
95 		 "[Default=" __stringify(IBMVFC_DEBUG) "]");
96 module_param_named(log_level, log_level, uint, 0);
97 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver. "
98 		 "[Default=" __stringify(IBMVFC_DEFAULT_LOG_LEVEL) "]");
99 module_param_named(cls3_error, cls3_error, uint, 0);
100 MODULE_PARM_DESC(cls3_error, "Enable FC Class 3 Error Recovery. "
101 		 "[Default=" __stringify(IBMVFC_CLS3_ERROR) "]");
102 
103 static const struct {
104 	u16 status;
105 	u16 error;
106 	u8 result;
107 	u8 retry;
108 	int log;
109 	char *name;
110 } cmd_status [] = {
111 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_ESTABLISH, DID_ERROR, 1, 1, "unable to establish" },
112 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_FAULT, DID_OK, 1, 0, "transport fault" },
113 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_CMD_TIMEOUT, DID_TIME_OUT, 1, 1, "command timeout" },
114 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_ENETDOWN, DID_TRANSPORT_DISRUPTED, 1, 1, "network down" },
115 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_HW_FAILURE, DID_ERROR, 1, 1, "hardware failure" },
116 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DOWN_ERR, DID_REQUEUE, 0, 0, "link down" },
117 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DEAD_ERR, DID_ERROR, 0, 0, "link dead" },
118 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_REGISTER, DID_ERROR, 1, 1, "unable to register" },
119 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_BUSY, DID_BUS_BUSY, 1, 0, "transport busy" },
120 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_DEAD, DID_ERROR, 0, 1, "transport dead" },
121 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_CONFIG_ERROR, DID_ERROR, 1, 1, "configuration error" },
122 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_NAME_SERVER_FAIL, DID_ERROR, 1, 1, "name server failure" },
123 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 1, 0, "link halted" },
124 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_GENERAL, DID_OK, 1, 0, "general transport error" },
125 
126 	{ IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" },
127 	{ IBMVFC_VIOS_FAILURE, IBMVFC_SW_FAILURE, DID_ERROR, 0, 1, "software failure" },
128 	{ IBMVFC_VIOS_FAILURE, IBMVFC_INVALID_PARAMETER, DID_ERROR, 0, 1, "invalid parameter" },
129 	{ IBMVFC_VIOS_FAILURE, IBMVFC_MISSING_PARAMETER, DID_ERROR, 0, 1, "missing parameter" },
130 	{ IBMVFC_VIOS_FAILURE, IBMVFC_HOST_IO_BUS, DID_ERROR, 1, 1, "host I/O bus failure" },
131 	{ IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED, DID_ERROR, 0, 1, "transaction cancelled" },
132 	{ IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED_IMPLICIT, DID_ERROR, 0, 1, "transaction cancelled implicit" },
133 	{ IBMVFC_VIOS_FAILURE, IBMVFC_INSUFFICIENT_RESOURCE, DID_REQUEUE, 1, 1, "insufficient resources" },
134 	{ IBMVFC_VIOS_FAILURE, IBMVFC_PLOGI_REQUIRED, DID_ERROR, 0, 1, "port login required" },
135 	{ IBMVFC_VIOS_FAILURE, IBMVFC_COMMAND_FAILED, DID_ERROR, 1, 1, "command failed" },
136 
137 	{ IBMVFC_FC_FAILURE, IBMVFC_INVALID_ELS_CMD_CODE, DID_ERROR, 0, 1, "invalid ELS command code" },
138 	{ IBMVFC_FC_FAILURE, IBMVFC_INVALID_VERSION, DID_ERROR, 0, 1, "invalid version level" },
139 	{ IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_ERROR, DID_ERROR, 1, 1, "logical error" },
140 	{ IBMVFC_FC_FAILURE, IBMVFC_INVALID_CT_IU_SIZE, DID_ERROR, 0, 1, "invalid CT_IU size" },
141 	{ IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_BUSY, DID_REQUEUE, 1, 0, "logical busy" },
142 	{ IBMVFC_FC_FAILURE, IBMVFC_PROTOCOL_ERROR, DID_ERROR, 1, 1, "protocol error" },
143 	{ IBMVFC_FC_FAILURE, IBMVFC_UNABLE_TO_PERFORM_REQ, DID_ERROR, 1, 1, "unable to perform request" },
144 	{ IBMVFC_FC_FAILURE, IBMVFC_CMD_NOT_SUPPORTED, DID_ERROR, 0, 0, "command not supported" },
145 	{ IBMVFC_FC_FAILURE, IBMVFC_SERVER_NOT_AVAIL, DID_ERROR, 0, 1, "server not available" },
146 	{ IBMVFC_FC_FAILURE, IBMVFC_CMD_IN_PROGRESS, DID_ERROR, 0, 1, "command already in progress" },
147 	{ IBMVFC_FC_FAILURE, IBMVFC_VENDOR_SPECIFIC, DID_ERROR, 1, 1, "vendor specific" },
148 
149 	{ IBMVFC_FC_SCSI_ERROR, 0, DID_OK, 1, 0, "SCSI error" },
150 	{ IBMVFC_FC_SCSI_ERROR, IBMVFC_COMMAND_FAILED, DID_ERROR, 0, 1, "PRLI to device failed." },
151 };
152 
153 static void ibmvfc_npiv_login(struct ibmvfc_host *);
154 static void ibmvfc_tgt_send_prli(struct ibmvfc_target *);
155 static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *);
156 static void ibmvfc_tgt_query_target(struct ibmvfc_target *);
157 static void ibmvfc_npiv_logout(struct ibmvfc_host *);
158 static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *);
159 static void ibmvfc_tgt_move_login(struct ibmvfc_target *);
160 
161 static const char *unknown_error = "unknown error";
162 
163 static long h_reg_sub_crq(unsigned long unit_address, unsigned long ioba,
164 			  unsigned long length, unsigned long *cookie,
165 			  unsigned long *irq)
166 {
167 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
168 	long rc;
169 
170 	rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, ioba, length);
171 	*cookie = retbuf[0];
172 	*irq = retbuf[1];
173 
174 	return rc;
175 }
176 
177 static int ibmvfc_check_caps(struct ibmvfc_host *vhost, unsigned long cap_flags)
178 {
179 	u64 host_caps = be64_to_cpu(vhost->login_buf->resp.capabilities);
180 
181 	return (host_caps & cap_flags) ? 1 : 0;
182 }
183 
184 static struct ibmvfc_fcp_cmd_iu *ibmvfc_get_fcp_iu(struct ibmvfc_host *vhost,
185 						   struct ibmvfc_cmd *vfc_cmd)
186 {
187 	if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
188 		return &vfc_cmd->v2.iu;
189 	else
190 		return &vfc_cmd->v1.iu;
191 }
192 
193 static struct ibmvfc_fcp_rsp *ibmvfc_get_fcp_rsp(struct ibmvfc_host *vhost,
194 						 struct ibmvfc_cmd *vfc_cmd)
195 {
196 	if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
197 		return &vfc_cmd->v2.rsp;
198 	else
199 		return &vfc_cmd->v1.rsp;
200 }
201 
202 #ifdef CONFIG_SCSI_IBMVFC_TRACE
203 /**
204  * ibmvfc_trc_start - Log a start trace entry
205  * @evt:		ibmvfc event struct
206  *
207  **/
208 static void ibmvfc_trc_start(struct ibmvfc_event *evt)
209 {
210 	struct ibmvfc_host *vhost = evt->vhost;
211 	struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd;
212 	struct ibmvfc_mad_common *mad = &evt->iu.mad_common;
213 	struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
214 	struct ibmvfc_trace_entry *entry;
215 	int index = atomic_inc_return(&vhost->trace_index) & IBMVFC_TRACE_INDEX_MASK;
216 
217 	entry = &vhost->trace[index];
218 	entry->evt = evt;
219 	entry->time = jiffies;
220 	entry->fmt = evt->crq.format;
221 	entry->type = IBMVFC_TRC_START;
222 
223 	switch (entry->fmt) {
224 	case IBMVFC_CMD_FORMAT:
225 		entry->op_code = iu->cdb[0];
226 		entry->scsi_id = be64_to_cpu(vfc_cmd->tgt_scsi_id);
227 		entry->lun = scsilun_to_int(&iu->lun);
228 		entry->tmf_flags = iu->tmf_flags;
229 		entry->u.start.xfer_len = be32_to_cpu(iu->xfer_len);
230 		break;
231 	case IBMVFC_MAD_FORMAT:
232 		entry->op_code = be32_to_cpu(mad->opcode);
233 		break;
234 	default:
235 		break;
236 	}
237 }
238 
239 /**
240  * ibmvfc_trc_end - Log an end trace entry
241  * @evt:		ibmvfc event struct
242  *
243  **/
244 static void ibmvfc_trc_end(struct ibmvfc_event *evt)
245 {
246 	struct ibmvfc_host *vhost = evt->vhost;
247 	struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
248 	struct ibmvfc_mad_common *mad = &evt->xfer_iu->mad_common;
249 	struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
250 	struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
251 	struct ibmvfc_trace_entry *entry;
252 	int index = atomic_inc_return(&vhost->trace_index) & IBMVFC_TRACE_INDEX_MASK;
253 
254 	entry = &vhost->trace[index];
255 	entry->evt = evt;
256 	entry->time = jiffies;
257 	entry->fmt = evt->crq.format;
258 	entry->type = IBMVFC_TRC_END;
259 
260 	switch (entry->fmt) {
261 	case IBMVFC_CMD_FORMAT:
262 		entry->op_code = iu->cdb[0];
263 		entry->scsi_id = be64_to_cpu(vfc_cmd->tgt_scsi_id);
264 		entry->lun = scsilun_to_int(&iu->lun);
265 		entry->tmf_flags = iu->tmf_flags;
266 		entry->u.end.status = be16_to_cpu(vfc_cmd->status);
267 		entry->u.end.error = be16_to_cpu(vfc_cmd->error);
268 		entry->u.end.fcp_rsp_flags = rsp->flags;
269 		entry->u.end.rsp_code = rsp->data.info.rsp_code;
270 		entry->u.end.scsi_status = rsp->scsi_status;
271 		break;
272 	case IBMVFC_MAD_FORMAT:
273 		entry->op_code = be32_to_cpu(mad->opcode);
274 		entry->u.end.status = be16_to_cpu(mad->status);
275 		break;
276 	default:
277 		break;
278 
279 	}
280 }
281 
282 #else
283 #define ibmvfc_trc_start(evt) do { } while (0)
284 #define ibmvfc_trc_end(evt) do { } while (0)
285 #endif
286 
287 /**
288  * ibmvfc_get_err_index - Find the index into cmd_status for the fcp response
289  * @status:		status / error class
290  * @error:		error
291  *
292  * Return value:
293  *	index into cmd_status / -EINVAL on failure
294  **/
295 static int ibmvfc_get_err_index(u16 status, u16 error)
296 {
297 	int i;
298 
299 	for (i = 0; i < ARRAY_SIZE(cmd_status); i++)
300 		if ((cmd_status[i].status & status) == cmd_status[i].status &&
301 		    cmd_status[i].error == error)
302 			return i;
303 
304 	return -EINVAL;
305 }
306 
307 /**
308  * ibmvfc_get_cmd_error - Find the error description for the fcp response
309  * @status:		status / error class
310  * @error:		error
311  *
312  * Return value:
313  *	error description string
314  **/
315 static const char *ibmvfc_get_cmd_error(u16 status, u16 error)
316 {
317 	int rc = ibmvfc_get_err_index(status, error);
318 	if (rc >= 0)
319 		return cmd_status[rc].name;
320 	return unknown_error;
321 }
322 
323 /**
324  * ibmvfc_get_err_result - Find the scsi status to return for the fcp response
325  * @vfc_cmd:	ibmvfc command struct
326  *
327  * Return value:
328  *	SCSI result value to return for completed command
329  **/
330 static int ibmvfc_get_err_result(struct ibmvfc_host *vhost, struct ibmvfc_cmd *vfc_cmd)
331 {
332 	int err;
333 	struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
334 	int fc_rsp_len = be32_to_cpu(rsp->fcp_rsp_len);
335 
336 	if ((rsp->flags & FCP_RSP_LEN_VALID) &&
337 	    ((fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) ||
338 	     rsp->data.info.rsp_code))
339 		return DID_ERROR << 16;
340 
341 	err = ibmvfc_get_err_index(be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error));
342 	if (err >= 0)
343 		return rsp->scsi_status | (cmd_status[err].result << 16);
344 	return rsp->scsi_status | (DID_ERROR << 16);
345 }
346 
347 /**
348  * ibmvfc_retry_cmd - Determine if error status is retryable
349  * @status:		status / error class
350  * @error:		error
351  *
352  * Return value:
353  *	1 if error should be retried / 0 if it should not
354  **/
355 static int ibmvfc_retry_cmd(u16 status, u16 error)
356 {
357 	int rc = ibmvfc_get_err_index(status, error);
358 
359 	if (rc >= 0)
360 		return cmd_status[rc].retry;
361 	return 1;
362 }
363 
364 static const char *unknown_fc_explain = "unknown fc explain";
365 
366 static const struct {
367 	u16 fc_explain;
368 	char *name;
369 } ls_explain [] = {
370 	{ 0x00, "no additional explanation" },
371 	{ 0x01, "service parameter error - options" },
372 	{ 0x03, "service parameter error - initiator control" },
373 	{ 0x05, "service parameter error - recipient control" },
374 	{ 0x07, "service parameter error - received data field size" },
375 	{ 0x09, "service parameter error - concurrent seq" },
376 	{ 0x0B, "service parameter error - credit" },
377 	{ 0x0D, "invalid N_Port/F_Port_Name" },
378 	{ 0x0E, "invalid node/Fabric Name" },
379 	{ 0x0F, "invalid common service parameters" },
380 	{ 0x11, "invalid association header" },
381 	{ 0x13, "association header required" },
382 	{ 0x15, "invalid originator S_ID" },
383 	{ 0x17, "invalid OX_ID-RX-ID combination" },
384 	{ 0x19, "command (request) already in progress" },
385 	{ 0x1E, "N_Port Login requested" },
386 	{ 0x1F, "Invalid N_Port_ID" },
387 };
388 
389 static const struct {
390 	u16 fc_explain;
391 	char *name;
392 } gs_explain [] = {
393 	{ 0x00, "no additional explanation" },
394 	{ 0x01, "port identifier not registered" },
395 	{ 0x02, "port name not registered" },
396 	{ 0x03, "node name not registered" },
397 	{ 0x04, "class of service not registered" },
398 	{ 0x06, "initial process associator not registered" },
399 	{ 0x07, "FC-4 TYPEs not registered" },
400 	{ 0x08, "symbolic port name not registered" },
401 	{ 0x09, "symbolic node name not registered" },
402 	{ 0x0A, "port type not registered" },
403 	{ 0xF0, "authorization exception" },
404 	{ 0xF1, "authentication exception" },
405 	{ 0xF2, "data base full" },
406 	{ 0xF3, "data base empty" },
407 	{ 0xF4, "processing request" },
408 	{ 0xF5, "unable to verify connection" },
409 	{ 0xF6, "devices not in a common zone" },
410 };
411 
412 /**
413  * ibmvfc_get_ls_explain - Return the FC Explain description text
414  * @status:	FC Explain status
415  *
416  * Returns:
417  *	error string
418  **/
419 static const char *ibmvfc_get_ls_explain(u16 status)
420 {
421 	int i;
422 
423 	for (i = 0; i < ARRAY_SIZE(ls_explain); i++)
424 		if (ls_explain[i].fc_explain == status)
425 			return ls_explain[i].name;
426 
427 	return unknown_fc_explain;
428 }
429 
430 /**
431  * ibmvfc_get_gs_explain - Return the FC Explain description text
432  * @status:	FC Explain status
433  *
434  * Returns:
435  *	error string
436  **/
437 static const char *ibmvfc_get_gs_explain(u16 status)
438 {
439 	int i;
440 
441 	for (i = 0; i < ARRAY_SIZE(gs_explain); i++)
442 		if (gs_explain[i].fc_explain == status)
443 			return gs_explain[i].name;
444 
445 	return unknown_fc_explain;
446 }
447 
448 static const struct {
449 	enum ibmvfc_fc_type fc_type;
450 	char *name;
451 } fc_type [] = {
452 	{ IBMVFC_FABRIC_REJECT, "fabric reject" },
453 	{ IBMVFC_PORT_REJECT, "port reject" },
454 	{ IBMVFC_LS_REJECT, "ELS reject" },
455 	{ IBMVFC_FABRIC_BUSY, "fabric busy" },
456 	{ IBMVFC_PORT_BUSY, "port busy" },
457 	{ IBMVFC_BASIC_REJECT, "basic reject" },
458 };
459 
460 static const char *unknown_fc_type = "unknown fc type";
461 
462 /**
463  * ibmvfc_get_fc_type - Return the FC Type description text
464  * @status:	FC Type error status
465  *
466  * Returns:
467  *	error string
468  **/
469 static const char *ibmvfc_get_fc_type(u16 status)
470 {
471 	int i;
472 
473 	for (i = 0; i < ARRAY_SIZE(fc_type); i++)
474 		if (fc_type[i].fc_type == status)
475 			return fc_type[i].name;
476 
477 	return unknown_fc_type;
478 }
479 
480 /**
481  * ibmvfc_set_tgt_action - Set the next init action for the target
482  * @tgt:		ibmvfc target struct
483  * @action:		action to perform
484  *
485  * Returns:
486  *	0 if action changed / non-zero if not changed
487  **/
488 static int ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
489 				  enum ibmvfc_target_action action)
490 {
491 	int rc = -EINVAL;
492 
493 	switch (tgt->action) {
494 	case IBMVFC_TGT_ACTION_LOGOUT_RPORT:
495 		if (action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT ||
496 		    action == IBMVFC_TGT_ACTION_DEL_RPORT) {
497 			tgt->action = action;
498 			rc = 0;
499 		}
500 		break;
501 	case IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT:
502 		if (action == IBMVFC_TGT_ACTION_DEL_RPORT ||
503 		    action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
504 			tgt->action = action;
505 			rc = 0;
506 		}
507 		break;
508 	case IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT:
509 		if (action == IBMVFC_TGT_ACTION_LOGOUT_RPORT) {
510 			tgt->action = action;
511 			rc = 0;
512 		}
513 		break;
514 	case IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT:
515 		if (action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
516 			tgt->action = action;
517 			rc = 0;
518 		}
519 		break;
520 	case IBMVFC_TGT_ACTION_DEL_RPORT:
521 		if (action == IBMVFC_TGT_ACTION_DELETED_RPORT) {
522 			tgt->action = action;
523 			rc = 0;
524 		}
525 		break;
526 	case IBMVFC_TGT_ACTION_DELETED_RPORT:
527 		break;
528 	default:
529 		tgt->action = action;
530 		rc = 0;
531 		break;
532 	}
533 
534 	if (action >= IBMVFC_TGT_ACTION_LOGOUT_RPORT)
535 		tgt->add_rport = 0;
536 
537 	return rc;
538 }
539 
540 /**
541  * ibmvfc_set_host_state - Set the state for the host
542  * @vhost:		ibmvfc host struct
543  * @state:		state to set host to
544  *
545  * Returns:
546  *	0 if state changed / non-zero if not changed
547  **/
548 static int ibmvfc_set_host_state(struct ibmvfc_host *vhost,
549 				  enum ibmvfc_host_state state)
550 {
551 	int rc = 0;
552 
553 	switch (vhost->state) {
554 	case IBMVFC_HOST_OFFLINE:
555 		rc = -EINVAL;
556 		break;
557 	default:
558 		vhost->state = state;
559 		break;
560 	}
561 
562 	return rc;
563 }
564 
565 /**
566  * ibmvfc_set_host_action - Set the next init action for the host
567  * @vhost:		ibmvfc host struct
568  * @action:		action to perform
569  *
570  **/
571 static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
572 				   enum ibmvfc_host_action action)
573 {
574 	switch (action) {
575 	case IBMVFC_HOST_ACTION_ALLOC_TGTS:
576 		if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT)
577 			vhost->action = action;
578 		break;
579 	case IBMVFC_HOST_ACTION_LOGO_WAIT:
580 		if (vhost->action == IBMVFC_HOST_ACTION_LOGO)
581 			vhost->action = action;
582 		break;
583 	case IBMVFC_HOST_ACTION_INIT_WAIT:
584 		if (vhost->action == IBMVFC_HOST_ACTION_INIT)
585 			vhost->action = action;
586 		break;
587 	case IBMVFC_HOST_ACTION_QUERY:
588 		switch (vhost->action) {
589 		case IBMVFC_HOST_ACTION_INIT_WAIT:
590 		case IBMVFC_HOST_ACTION_NONE:
591 		case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
592 			vhost->action = action;
593 			break;
594 		default:
595 			break;
596 		}
597 		break;
598 	case IBMVFC_HOST_ACTION_TGT_INIT:
599 		if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
600 			vhost->action = action;
601 		break;
602 	case IBMVFC_HOST_ACTION_INIT:
603 	case IBMVFC_HOST_ACTION_TGT_DEL:
604 		switch (vhost->action) {
605 		case IBMVFC_HOST_ACTION_RESET:
606 		case IBMVFC_HOST_ACTION_REENABLE:
607 			break;
608 		default:
609 			vhost->action = action;
610 			break;
611 		}
612 		break;
613 	case IBMVFC_HOST_ACTION_LOGO:
614 	case IBMVFC_HOST_ACTION_QUERY_TGTS:
615 	case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
616 	case IBMVFC_HOST_ACTION_NONE:
617 	case IBMVFC_HOST_ACTION_RESET:
618 	case IBMVFC_HOST_ACTION_REENABLE:
619 	default:
620 		vhost->action = action;
621 		break;
622 	}
623 }
624 
625 /**
626  * ibmvfc_reinit_host - Re-start host initialization (no NPIV Login)
627  * @vhost:		ibmvfc host struct
628  *
629  * Return value:
630  *	nothing
631  **/
632 static void ibmvfc_reinit_host(struct ibmvfc_host *vhost)
633 {
634 	if (vhost->action == IBMVFC_HOST_ACTION_NONE &&
635 	    vhost->state == IBMVFC_ACTIVE) {
636 		if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
637 			scsi_block_requests(vhost->host);
638 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
639 		}
640 	} else
641 		vhost->reinit = 1;
642 
643 	wake_up(&vhost->work_wait_q);
644 }
645 
646 /**
647  * ibmvfc_del_tgt - Schedule cleanup and removal of the target
648  * @tgt:		ibmvfc target struct
649  * @job_step:	job step to perform
650  *
651  **/
652 static void ibmvfc_del_tgt(struct ibmvfc_target *tgt)
653 {
654 	if (!ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_RPORT))
655 		tgt->job_step = ibmvfc_tgt_implicit_logout_and_del;
656 	wake_up(&tgt->vhost->work_wait_q);
657 }
658 
659 /**
660  * ibmvfc_link_down - Handle a link down event from the adapter
661  * @vhost:	ibmvfc host struct
662  * @state:	ibmvfc host state to enter
663  *
664  **/
665 static void ibmvfc_link_down(struct ibmvfc_host *vhost,
666 			     enum ibmvfc_host_state state)
667 {
668 	struct ibmvfc_target *tgt;
669 
670 	ENTER;
671 	scsi_block_requests(vhost->host);
672 	list_for_each_entry(tgt, &vhost->targets, queue)
673 		ibmvfc_del_tgt(tgt);
674 	ibmvfc_set_host_state(vhost, state);
675 	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
676 	vhost->events_to_log |= IBMVFC_AE_LINKDOWN;
677 	wake_up(&vhost->work_wait_q);
678 	LEAVE;
679 }
680 
681 /**
682  * ibmvfc_init_host - Start host initialization
683  * @vhost:		ibmvfc host struct
684  *
685  * Return value:
686  *	nothing
687  **/
688 static void ibmvfc_init_host(struct ibmvfc_host *vhost)
689 {
690 	struct ibmvfc_target *tgt;
691 
692 	if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
693 		if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
694 			dev_err(vhost->dev,
695 				"Host initialization retries exceeded. Taking adapter offline\n");
696 			ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
697 			return;
698 		}
699 	}
700 
701 	if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
702 		memset(vhost->async_crq.msgs.async, 0, PAGE_SIZE);
703 		vhost->async_crq.cur = 0;
704 
705 		list_for_each_entry(tgt, &vhost->targets, queue)
706 			ibmvfc_del_tgt(tgt);
707 		scsi_block_requests(vhost->host);
708 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
709 		vhost->job_step = ibmvfc_npiv_login;
710 		wake_up(&vhost->work_wait_q);
711 	}
712 }
713 
714 /**
715  * ibmvfc_send_crq - Send a CRQ
716  * @vhost:	ibmvfc host struct
717  * @word1:	the first 64 bits of the data
718  * @word2:	the second 64 bits of the data
719  *
720  * Return value:
721  *	0 on success / other on failure
722  **/
723 static int ibmvfc_send_crq(struct ibmvfc_host *vhost, u64 word1, u64 word2)
724 {
725 	struct vio_dev *vdev = to_vio_dev(vhost->dev);
726 	return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
727 }
728 
729 static int ibmvfc_send_sub_crq(struct ibmvfc_host *vhost, u64 cookie, u64 word1,
730 			       u64 word2, u64 word3, u64 word4)
731 {
732 	struct vio_dev *vdev = to_vio_dev(vhost->dev);
733 
734 	return plpar_hcall_norets(H_SEND_SUB_CRQ, vdev->unit_address, cookie,
735 				  word1, word2, word3, word4);
736 }
737 
738 /**
739  * ibmvfc_send_crq_init - Send a CRQ init message
740  * @vhost:	ibmvfc host struct
741  *
742  * Return value:
743  *	0 on success / other on failure
744  **/
745 static int ibmvfc_send_crq_init(struct ibmvfc_host *vhost)
746 {
747 	ibmvfc_dbg(vhost, "Sending CRQ init\n");
748 	return ibmvfc_send_crq(vhost, 0xC001000000000000LL, 0);
749 }
750 
751 /**
752  * ibmvfc_send_crq_init_complete - Send a CRQ init complete message
753  * @vhost:	ibmvfc host struct
754  *
755  * Return value:
756  *	0 on success / other on failure
757  **/
758 static int ibmvfc_send_crq_init_complete(struct ibmvfc_host *vhost)
759 {
760 	ibmvfc_dbg(vhost, "Sending CRQ init complete\n");
761 	return ibmvfc_send_crq(vhost, 0xC002000000000000LL, 0);
762 }
763 
764 /**
765  * ibmvfc_init_event_pool - Allocates and initializes the event pool for a host
766  * @vhost:	ibmvfc host who owns the event pool
767  *
768  * Returns zero on success.
769  **/
770 static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost,
771 				  struct ibmvfc_queue *queue,
772 				  unsigned int size)
773 {
774 	int i;
775 	struct ibmvfc_event_pool *pool = &queue->evt_pool;
776 
777 	ENTER;
778 	if (!size)
779 		return 0;
780 
781 	pool->size = size;
782 	pool->events = kcalloc(size, sizeof(*pool->events), GFP_KERNEL);
783 	if (!pool->events)
784 		return -ENOMEM;
785 
786 	pool->iu_storage = dma_alloc_coherent(vhost->dev,
787 					      size * sizeof(*pool->iu_storage),
788 					      &pool->iu_token, 0);
789 
790 	if (!pool->iu_storage) {
791 		kfree(pool->events);
792 		return -ENOMEM;
793 	}
794 
795 	INIT_LIST_HEAD(&queue->sent);
796 	INIT_LIST_HEAD(&queue->free);
797 	spin_lock_init(&queue->l_lock);
798 
799 	for (i = 0; i < size; ++i) {
800 		struct ibmvfc_event *evt = &pool->events[i];
801 
802 		atomic_set(&evt->free, 1);
803 		evt->crq.valid = 0x80;
804 		evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i));
805 		evt->xfer_iu = pool->iu_storage + i;
806 		evt->vhost = vhost;
807 		evt->queue = queue;
808 		evt->ext_list = NULL;
809 		list_add_tail(&evt->queue_list, &queue->free);
810 	}
811 
812 	LEAVE;
813 	return 0;
814 }
815 
816 /**
817  * ibmvfc_free_event_pool - Frees memory of the event pool of a host
818  * @vhost:	ibmvfc host who owns the event pool
819  *
820  **/
821 static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost,
822 				   struct ibmvfc_queue *queue)
823 {
824 	int i;
825 	struct ibmvfc_event_pool *pool = &queue->evt_pool;
826 
827 	ENTER;
828 	for (i = 0; i < pool->size; ++i) {
829 		list_del(&pool->events[i].queue_list);
830 		BUG_ON(atomic_read(&pool->events[i].free) != 1);
831 		if (pool->events[i].ext_list)
832 			dma_pool_free(vhost->sg_pool,
833 				      pool->events[i].ext_list,
834 				      pool->events[i].ext_list_token);
835 	}
836 
837 	kfree(pool->events);
838 	dma_free_coherent(vhost->dev,
839 			  pool->size * sizeof(*pool->iu_storage),
840 			  pool->iu_storage, pool->iu_token);
841 	LEAVE;
842 }
843 
844 /**
845  * ibmvfc_free_queue - Deallocate queue
846  * @vhost:	ibmvfc host struct
847  * @queue:	ibmvfc queue struct
848  *
849  * Unmaps dma and deallocates page for messages
850  **/
851 static void ibmvfc_free_queue(struct ibmvfc_host *vhost,
852 			      struct ibmvfc_queue *queue)
853 {
854 	struct device *dev = vhost->dev;
855 
856 	dma_unmap_single(dev, queue->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
857 	free_page((unsigned long)queue->msgs.handle);
858 	queue->msgs.handle = NULL;
859 
860 	ibmvfc_free_event_pool(vhost, queue);
861 }
862 
863 /**
864  * ibmvfc_release_crq_queue - Deallocates data and unregisters CRQ
865  * @vhost:	ibmvfc host struct
866  *
867  * Frees irq, deallocates a page for messages, unmaps dma, and unregisters
868  * the crq with the hypervisor.
869  **/
870 static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
871 {
872 	long rc = 0;
873 	struct vio_dev *vdev = to_vio_dev(vhost->dev);
874 	struct ibmvfc_queue *crq = &vhost->crq;
875 
876 	ibmvfc_dbg(vhost, "Releasing CRQ\n");
877 	free_irq(vdev->irq, vhost);
878 	tasklet_kill(&vhost->tasklet);
879 	do {
880 		if (rc)
881 			msleep(100);
882 		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
883 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
884 
885 	vhost->state = IBMVFC_NO_CRQ;
886 	vhost->logged_in = 0;
887 
888 	ibmvfc_free_queue(vhost, crq);
889 }
890 
891 /**
892  * ibmvfc_reenable_crq_queue - reenables the CRQ
893  * @vhost:	ibmvfc host struct
894  *
895  * Return value:
896  *	0 on success / other on failure
897  **/
898 static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost)
899 {
900 	int rc = 0;
901 	struct vio_dev *vdev = to_vio_dev(vhost->dev);
902 
903 	/* Re-enable the CRQ */
904 	do {
905 		if (rc)
906 			msleep(100);
907 		rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
908 	} while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
909 
910 	if (rc)
911 		dev_err(vhost->dev, "Error enabling adapter (rc=%d)\n", rc);
912 
913 	return rc;
914 }
915 
916 /**
917  * ibmvfc_reset_crq - resets a crq after a failure
918  * @vhost:	ibmvfc host struct
919  *
920  * Return value:
921  *	0 on success / other on failure
922  **/
923 static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
924 {
925 	int rc = 0;
926 	unsigned long flags;
927 	struct vio_dev *vdev = to_vio_dev(vhost->dev);
928 	struct ibmvfc_queue *crq = &vhost->crq;
929 	struct ibmvfc_queue *scrq;
930 	int i;
931 
932 	/* Close the CRQ */
933 	do {
934 		if (rc)
935 			msleep(100);
936 		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
937 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
938 
939 	spin_lock_irqsave(vhost->host->host_lock, flags);
940 	spin_lock(vhost->crq.q_lock);
941 	vhost->state = IBMVFC_NO_CRQ;
942 	vhost->logged_in = 0;
943 	vhost->do_enquiry = 1;
944 	vhost->using_channels = 0;
945 
946 	/* Clean out the queue */
947 	memset(crq->msgs.crq, 0, PAGE_SIZE);
948 	crq->cur = 0;
949 
950 	if (vhost->scsi_scrqs.scrqs) {
951 		for (i = 0; i < nr_scsi_hw_queues; i++) {
952 			scrq = &vhost->scsi_scrqs.scrqs[i];
953 			spin_lock(scrq->q_lock);
954 			memset(scrq->msgs.scrq, 0, PAGE_SIZE);
955 			scrq->cur = 0;
956 			spin_unlock(scrq->q_lock);
957 		}
958 	}
959 
960 	/* And re-open it again */
961 	rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
962 				crq->msg_token, PAGE_SIZE);
963 
964 	if (rc == H_CLOSED)
965 		/* Adapter is good, but other end is not ready */
966 		dev_warn(vhost->dev, "Partner adapter not ready\n");
967 	else if (rc != 0)
968 		dev_warn(vhost->dev, "Couldn't register crq (rc=%d)\n", rc);
969 	spin_unlock(vhost->crq.q_lock);
970 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
971 
972 	return rc;
973 }
974 
975 /**
976  * ibmvfc_valid_event - Determines if event is valid.
977  * @pool:	event_pool that contains the event
978  * @evt:	ibmvfc event to be checked for validity
979  *
980  * Return value:
981  *	1 if event is valid / 0 if event is not valid
982  **/
983 static int ibmvfc_valid_event(struct ibmvfc_event_pool *pool,
984 			      struct ibmvfc_event *evt)
985 {
986 	int index = evt - pool->events;
987 	if (index < 0 || index >= pool->size)	/* outside of bounds */
988 		return 0;
989 	if (evt != pool->events + index)	/* unaligned */
990 		return 0;
991 	return 1;
992 }
993 
994 /**
995  * ibmvfc_free_event - Free the specified event
996  * @evt:	ibmvfc_event to be freed
997  *
998  **/
999 static void ibmvfc_free_event(struct ibmvfc_event *evt)
1000 {
1001 	struct ibmvfc_event_pool *pool = &evt->queue->evt_pool;
1002 	unsigned long flags;
1003 
1004 	BUG_ON(!ibmvfc_valid_event(pool, evt));
1005 	BUG_ON(atomic_inc_return(&evt->free) != 1);
1006 
1007 	spin_lock_irqsave(&evt->queue->l_lock, flags);
1008 	list_add_tail(&evt->queue_list, &evt->queue->free);
1009 	if (evt->eh_comp)
1010 		complete(evt->eh_comp);
1011 	spin_unlock_irqrestore(&evt->queue->l_lock, flags);
1012 }
1013 
1014 /**
1015  * ibmvfc_scsi_eh_done - EH done function for queuecommand commands
1016  * @evt:	ibmvfc event struct
1017  *
1018  * This function does not setup any error status, that must be done
1019  * before this function gets called.
1020  **/
1021 static void ibmvfc_scsi_eh_done(struct ibmvfc_event *evt)
1022 {
1023 	struct scsi_cmnd *cmnd = evt->cmnd;
1024 
1025 	if (cmnd) {
1026 		scsi_dma_unmap(cmnd);
1027 		cmnd->scsi_done(cmnd);
1028 	}
1029 
1030 	ibmvfc_free_event(evt);
1031 }
1032 
1033 /**
1034  * ibmvfc_complete_purge - Complete failed command list
1035  * @purge_list:		list head of failed commands
1036  *
1037  * This function runs completions on commands to fail as a result of a
1038  * host reset or platform migration.
1039  **/
1040 static void ibmvfc_complete_purge(struct list_head *purge_list)
1041 {
1042 	struct ibmvfc_event *evt, *pos;
1043 
1044 	list_for_each_entry_safe(evt, pos, purge_list, queue_list) {
1045 		list_del(&evt->queue_list);
1046 		ibmvfc_trc_end(evt);
1047 		evt->done(evt);
1048 	}
1049 }
1050 
1051 /**
1052  * ibmvfc_fail_request - Fail request with specified error code
1053  * @evt:		ibmvfc event struct
1054  * @error_code:	error code to fail request with
1055  *
1056  * Return value:
1057  *	none
1058  **/
1059 static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code)
1060 {
1061 	if (evt->cmnd) {
1062 		evt->cmnd->result = (error_code << 16);
1063 		evt->done = ibmvfc_scsi_eh_done;
1064 	} else
1065 		evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_DRIVER_FAILED);
1066 
1067 	del_timer(&evt->timer);
1068 }
1069 
1070 /**
1071  * ibmvfc_purge_requests - Our virtual adapter just shut down. Purge any sent requests
1072  * @vhost:		ibmvfc host struct
1073  * @error_code:	error code to fail requests with
1074  *
1075  * Return value:
1076  *	none
1077  **/
1078 static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code)
1079 {
1080 	struct ibmvfc_event *evt, *pos;
1081 	struct ibmvfc_queue *queues = vhost->scsi_scrqs.scrqs;
1082 	unsigned long flags;
1083 	int hwqs = 0;
1084 	int i;
1085 
1086 	if (vhost->using_channels)
1087 		hwqs = vhost->scsi_scrqs.active_queues;
1088 
1089 	ibmvfc_dbg(vhost, "Purging all requests\n");
1090 	spin_lock_irqsave(&vhost->crq.l_lock, flags);
1091 	list_for_each_entry_safe(evt, pos, &vhost->crq.sent, queue_list)
1092 		ibmvfc_fail_request(evt, error_code);
1093 	list_splice_init(&vhost->crq.sent, &vhost->purge);
1094 	spin_unlock_irqrestore(&vhost->crq.l_lock, flags);
1095 
1096 	for (i = 0; i < hwqs; i++) {
1097 		spin_lock_irqsave(queues[i].q_lock, flags);
1098 		spin_lock(&queues[i].l_lock);
1099 		list_for_each_entry_safe(evt, pos, &queues[i].sent, queue_list)
1100 			ibmvfc_fail_request(evt, error_code);
1101 		list_splice_init(&queues[i].sent, &vhost->purge);
1102 		spin_unlock(&queues[i].l_lock);
1103 		spin_unlock_irqrestore(queues[i].q_lock, flags);
1104 	}
1105 }
1106 
1107 /**
1108  * ibmvfc_hard_reset_host - Reset the connection to the server by breaking the CRQ
1109  * @vhost:	struct ibmvfc host to reset
1110  **/
1111 static void ibmvfc_hard_reset_host(struct ibmvfc_host *vhost)
1112 {
1113 	ibmvfc_purge_requests(vhost, DID_ERROR);
1114 	ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
1115 	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
1116 }
1117 
1118 /**
1119  * __ibmvfc_reset_host - Reset the connection to the server (no locking)
1120  * @vhost:	struct ibmvfc host to reset
1121  **/
1122 static void __ibmvfc_reset_host(struct ibmvfc_host *vhost)
1123 {
1124 	if (vhost->logged_in && vhost->action != IBMVFC_HOST_ACTION_LOGO_WAIT &&
1125 	    !ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
1126 		scsi_block_requests(vhost->host);
1127 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO);
1128 		vhost->job_step = ibmvfc_npiv_logout;
1129 		wake_up(&vhost->work_wait_q);
1130 	} else
1131 		ibmvfc_hard_reset_host(vhost);
1132 }
1133 
1134 /**
1135  * ibmvfc_reset_host - Reset the connection to the server
1136  * @vhost:	ibmvfc host struct
1137  **/
1138 static void ibmvfc_reset_host(struct ibmvfc_host *vhost)
1139 {
1140 	unsigned long flags;
1141 
1142 	spin_lock_irqsave(vhost->host->host_lock, flags);
1143 	__ibmvfc_reset_host(vhost);
1144 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
1145 }
1146 
1147 /**
1148  * ibmvfc_retry_host_init - Retry host initialization if allowed
1149  * @vhost:	ibmvfc host struct
1150  *
1151  * Returns: 1 if init will be retried / 0 if not
1152  *
1153  **/
1154 static int ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
1155 {
1156 	int retry = 0;
1157 
1158 	if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
1159 		vhost->delay_init = 1;
1160 		if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
1161 			dev_err(vhost->dev,
1162 				"Host initialization retries exceeded. Taking adapter offline\n");
1163 			ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
1164 		} else if (vhost->init_retries == IBMVFC_MAX_HOST_INIT_RETRIES)
1165 			__ibmvfc_reset_host(vhost);
1166 		else {
1167 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
1168 			retry = 1;
1169 		}
1170 	}
1171 
1172 	wake_up(&vhost->work_wait_q);
1173 	return retry;
1174 }
1175 
1176 /**
1177  * __ibmvfc_get_target - Find the specified scsi_target (no locking)
1178  * @starget:	scsi target struct
1179  *
1180  * Return value:
1181  *	ibmvfc_target struct / NULL if not found
1182  **/
1183 static struct ibmvfc_target *__ibmvfc_get_target(struct scsi_target *starget)
1184 {
1185 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1186 	struct ibmvfc_host *vhost = shost_priv(shost);
1187 	struct ibmvfc_target *tgt;
1188 
1189 	list_for_each_entry(tgt, &vhost->targets, queue)
1190 		if (tgt->target_id == starget->id) {
1191 			kref_get(&tgt->kref);
1192 			return tgt;
1193 		}
1194 	return NULL;
1195 }
1196 
1197 /**
1198  * ibmvfc_get_target - Find the specified scsi_target
1199  * @starget:	scsi target struct
1200  *
1201  * Return value:
1202  *	ibmvfc_target struct / NULL if not found
1203  **/
1204 static struct ibmvfc_target *ibmvfc_get_target(struct scsi_target *starget)
1205 {
1206 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1207 	struct ibmvfc_target *tgt;
1208 	unsigned long flags;
1209 
1210 	spin_lock_irqsave(shost->host_lock, flags);
1211 	tgt = __ibmvfc_get_target(starget);
1212 	spin_unlock_irqrestore(shost->host_lock, flags);
1213 	return tgt;
1214 }
1215 
1216 /**
1217  * ibmvfc_get_host_speed - Get host port speed
1218  * @shost:		scsi host struct
1219  *
1220  * Return value:
1221  * 	none
1222  **/
1223 static void ibmvfc_get_host_speed(struct Scsi_Host *shost)
1224 {
1225 	struct ibmvfc_host *vhost = shost_priv(shost);
1226 	unsigned long flags;
1227 
1228 	spin_lock_irqsave(shost->host_lock, flags);
1229 	if (vhost->state == IBMVFC_ACTIVE) {
1230 		switch (be64_to_cpu(vhost->login_buf->resp.link_speed) / 100) {
1231 		case 1:
1232 			fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
1233 			break;
1234 		case 2:
1235 			fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
1236 			break;
1237 		case 4:
1238 			fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
1239 			break;
1240 		case 8:
1241 			fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
1242 			break;
1243 		case 10:
1244 			fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
1245 			break;
1246 		case 16:
1247 			fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
1248 			break;
1249 		default:
1250 			ibmvfc_log(vhost, 3, "Unknown port speed: %lld Gbit\n",
1251 				   be64_to_cpu(vhost->login_buf->resp.link_speed) / 100);
1252 			fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
1253 			break;
1254 		}
1255 	} else
1256 		fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
1257 	spin_unlock_irqrestore(shost->host_lock, flags);
1258 }
1259 
1260 /**
1261  * ibmvfc_get_host_port_state - Get host port state
1262  * @shost:		scsi host struct
1263  *
1264  * Return value:
1265  * 	none
1266  **/
1267 static void ibmvfc_get_host_port_state(struct Scsi_Host *shost)
1268 {
1269 	struct ibmvfc_host *vhost = shost_priv(shost);
1270 	unsigned long flags;
1271 
1272 	spin_lock_irqsave(shost->host_lock, flags);
1273 	switch (vhost->state) {
1274 	case IBMVFC_INITIALIZING:
1275 	case IBMVFC_ACTIVE:
1276 		fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
1277 		break;
1278 	case IBMVFC_LINK_DOWN:
1279 		fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
1280 		break;
1281 	case IBMVFC_LINK_DEAD:
1282 	case IBMVFC_HOST_OFFLINE:
1283 		fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
1284 		break;
1285 	case IBMVFC_HALTED:
1286 		fc_host_port_state(shost) = FC_PORTSTATE_BLOCKED;
1287 		break;
1288 	case IBMVFC_NO_CRQ:
1289 		fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
1290 		break;
1291 	default:
1292 		ibmvfc_log(vhost, 3, "Unknown port state: %d\n", vhost->state);
1293 		fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
1294 		break;
1295 	}
1296 	spin_unlock_irqrestore(shost->host_lock, flags);
1297 }
1298 
1299 /**
1300  * ibmvfc_set_rport_dev_loss_tmo - Set rport's device loss timeout
1301  * @rport:		rport struct
1302  * @timeout:	timeout value
1303  *
1304  * Return value:
1305  * 	none
1306  **/
1307 static void ibmvfc_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
1308 {
1309 	if (timeout)
1310 		rport->dev_loss_tmo = timeout;
1311 	else
1312 		rport->dev_loss_tmo = 1;
1313 }
1314 
1315 /**
1316  * ibmvfc_release_tgt - Free memory allocated for a target
1317  * @kref:		kref struct
1318  *
1319  **/
1320 static void ibmvfc_release_tgt(struct kref *kref)
1321 {
1322 	struct ibmvfc_target *tgt = container_of(kref, struct ibmvfc_target, kref);
1323 	kfree(tgt);
1324 }
1325 
1326 /**
1327  * ibmvfc_get_starget_node_name - Get SCSI target's node name
1328  * @starget:	scsi target struct
1329  *
1330  * Return value:
1331  * 	none
1332  **/
1333 static void ibmvfc_get_starget_node_name(struct scsi_target *starget)
1334 {
1335 	struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1336 	fc_starget_port_name(starget) = tgt ? tgt->ids.node_name : 0;
1337 	if (tgt)
1338 		kref_put(&tgt->kref, ibmvfc_release_tgt);
1339 }
1340 
1341 /**
1342  * ibmvfc_get_starget_port_name - Get SCSI target's port name
1343  * @starget:	scsi target struct
1344  *
1345  * Return value:
1346  * 	none
1347  **/
1348 static void ibmvfc_get_starget_port_name(struct scsi_target *starget)
1349 {
1350 	struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1351 	fc_starget_port_name(starget) = tgt ? tgt->ids.port_name : 0;
1352 	if (tgt)
1353 		kref_put(&tgt->kref, ibmvfc_release_tgt);
1354 }
1355 
1356 /**
1357  * ibmvfc_get_starget_port_id - Get SCSI target's port ID
1358  * @starget:	scsi target struct
1359  *
1360  * Return value:
1361  * 	none
1362  **/
1363 static void ibmvfc_get_starget_port_id(struct scsi_target *starget)
1364 {
1365 	struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1366 	fc_starget_port_id(starget) = tgt ? tgt->scsi_id : -1;
1367 	if (tgt)
1368 		kref_put(&tgt->kref, ibmvfc_release_tgt);
1369 }
1370 
1371 /**
1372  * ibmvfc_wait_while_resetting - Wait while the host resets
1373  * @vhost:		ibmvfc host struct
1374  *
1375  * Return value:
1376  * 	0 on success / other on failure
1377  **/
1378 static int ibmvfc_wait_while_resetting(struct ibmvfc_host *vhost)
1379 {
1380 	long timeout = wait_event_timeout(vhost->init_wait_q,
1381 					  ((vhost->state == IBMVFC_ACTIVE ||
1382 					    vhost->state == IBMVFC_HOST_OFFLINE ||
1383 					    vhost->state == IBMVFC_LINK_DEAD) &&
1384 					   vhost->action == IBMVFC_HOST_ACTION_NONE),
1385 					  (init_timeout * HZ));
1386 
1387 	return timeout ? 0 : -EIO;
1388 }
1389 
1390 /**
1391  * ibmvfc_issue_fc_host_lip - Re-initiate link initialization
1392  * @shost:		scsi host struct
1393  *
1394  * Return value:
1395  * 	0 on success / other on failure
1396  **/
1397 static int ibmvfc_issue_fc_host_lip(struct Scsi_Host *shost)
1398 {
1399 	struct ibmvfc_host *vhost = shost_priv(shost);
1400 
1401 	dev_err(vhost->dev, "Initiating host LIP. Resetting connection\n");
1402 	ibmvfc_reset_host(vhost);
1403 	return ibmvfc_wait_while_resetting(vhost);
1404 }
1405 
1406 /**
1407  * ibmvfc_gather_partition_info - Gather info about the LPAR
1408  *
1409  * Return value:
1410  *	none
1411  **/
1412 static void ibmvfc_gather_partition_info(struct ibmvfc_host *vhost)
1413 {
1414 	struct device_node *rootdn;
1415 	const char *name;
1416 	const unsigned int *num;
1417 
1418 	rootdn = of_find_node_by_path("/");
1419 	if (!rootdn)
1420 		return;
1421 
1422 	name = of_get_property(rootdn, "ibm,partition-name", NULL);
1423 	if (name)
1424 		strncpy(vhost->partition_name, name, sizeof(vhost->partition_name));
1425 	num = of_get_property(rootdn, "ibm,partition-no", NULL);
1426 	if (num)
1427 		vhost->partition_number = *num;
1428 	of_node_put(rootdn);
1429 }
1430 
1431 /**
1432  * ibmvfc_set_login_info - Setup info for NPIV login
1433  * @vhost:	ibmvfc host struct
1434  *
1435  * Return value:
1436  *	none
1437  **/
1438 static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
1439 {
1440 	struct ibmvfc_npiv_login *login_info = &vhost->login_info;
1441 	struct ibmvfc_queue *async_crq = &vhost->async_crq;
1442 	struct device_node *of_node = vhost->dev->of_node;
1443 	const char *location;
1444 
1445 	memset(login_info, 0, sizeof(*login_info));
1446 
1447 	login_info->ostype = cpu_to_be32(IBMVFC_OS_LINUX);
1448 	login_info->max_dma_len = cpu_to_be64(IBMVFC_MAX_SECTORS << 9);
1449 	login_info->max_payload = cpu_to_be32(sizeof(struct ibmvfc_fcp_cmd_iu));
1450 	login_info->max_response = cpu_to_be32(sizeof(struct ibmvfc_fcp_rsp));
1451 	login_info->partition_num = cpu_to_be32(vhost->partition_number);
1452 	login_info->vfc_frame_version = cpu_to_be32(1);
1453 	login_info->fcp_version = cpu_to_be16(3);
1454 	login_info->flags = cpu_to_be16(IBMVFC_FLUSH_ON_HALT);
1455 	if (vhost->client_migrated)
1456 		login_info->flags |= cpu_to_be16(IBMVFC_CLIENT_MIGRATED);
1457 
1458 	login_info->max_cmds = cpu_to_be32(max_requests + IBMVFC_NUM_INTERNAL_REQ);
1459 	login_info->capabilities = cpu_to_be64(IBMVFC_CAN_MIGRATE | IBMVFC_CAN_SEND_VF_WWPN);
1460 
1461 	if (vhost->mq_enabled || vhost->using_channels)
1462 		login_info->capabilities |= cpu_to_be64(IBMVFC_CAN_USE_CHANNELS);
1463 
1464 	login_info->async.va = cpu_to_be64(vhost->async_crq.msg_token);
1465 	login_info->async.len = cpu_to_be32(async_crq->size *
1466 					    sizeof(*async_crq->msgs.async));
1467 	strncpy(login_info->partition_name, vhost->partition_name, IBMVFC_MAX_NAME);
1468 	strncpy(login_info->device_name,
1469 		dev_name(&vhost->host->shost_gendev), IBMVFC_MAX_NAME);
1470 
1471 	location = of_get_property(of_node, "ibm,loc-code", NULL);
1472 	location = location ? location : dev_name(vhost->dev);
1473 	strncpy(login_info->drc_name, location, IBMVFC_MAX_NAME);
1474 }
1475 
1476 /**
1477  * ibmvfc_get_event - Gets the next free event in pool
1478  * @vhost:	ibmvfc host struct
1479  *
1480  * Returns a free event from the pool.
1481  **/
1482 static struct ibmvfc_event *ibmvfc_get_event(struct ibmvfc_queue *queue)
1483 {
1484 	struct ibmvfc_event *evt;
1485 	unsigned long flags;
1486 
1487 	spin_lock_irqsave(&queue->l_lock, flags);
1488 	BUG_ON(list_empty(&queue->free));
1489 	evt = list_entry(queue->free.next, struct ibmvfc_event, queue_list);
1490 	atomic_set(&evt->free, 0);
1491 	list_del(&evt->queue_list);
1492 	spin_unlock_irqrestore(&queue->l_lock, flags);
1493 	return evt;
1494 }
1495 
1496 /**
1497  * ibmvfc_locked_done - Calls evt completion with host_lock held
1498  * @evt:	ibmvfc evt to complete
1499  *
1500  * All non-scsi command completion callbacks have the expectation that the
1501  * host_lock is held. This callback is used by ibmvfc_init_event to wrap a
1502  * MAD evt with the host_lock.
1503  **/
1504 static void ibmvfc_locked_done(struct ibmvfc_event *evt)
1505 {
1506 	unsigned long flags;
1507 
1508 	spin_lock_irqsave(evt->vhost->host->host_lock, flags);
1509 	evt->_done(evt);
1510 	spin_unlock_irqrestore(evt->vhost->host->host_lock, flags);
1511 }
1512 
1513 /**
1514  * ibmvfc_init_event - Initialize fields in an event struct that are always
1515  *				required.
1516  * @evt:	The event
1517  * @done:	Routine to call when the event is responded to
1518  * @format:	SRP or MAD format
1519  **/
1520 static void ibmvfc_init_event(struct ibmvfc_event *evt,
1521 			      void (*done) (struct ibmvfc_event *), u8 format)
1522 {
1523 	evt->cmnd = NULL;
1524 	evt->sync_iu = NULL;
1525 	evt->eh_comp = NULL;
1526 	evt->crq.format = format;
1527 	if (format == IBMVFC_CMD_FORMAT)
1528 		evt->done = done;
1529 	else {
1530 		evt->_done = done;
1531 		evt->done = ibmvfc_locked_done;
1532 	}
1533 	evt->hwq = 0;
1534 }
1535 
1536 /**
1537  * ibmvfc_map_sg_list - Initialize scatterlist
1538  * @scmd:	scsi command struct
1539  * @nseg:	number of scatterlist segments
1540  * @md:	memory descriptor list to initialize
1541  **/
1542 static void ibmvfc_map_sg_list(struct scsi_cmnd *scmd, int nseg,
1543 			       struct srp_direct_buf *md)
1544 {
1545 	int i;
1546 	struct scatterlist *sg;
1547 
1548 	scsi_for_each_sg(scmd, sg, nseg, i) {
1549 		md[i].va = cpu_to_be64(sg_dma_address(sg));
1550 		md[i].len = cpu_to_be32(sg_dma_len(sg));
1551 		md[i].key = 0;
1552 	}
1553 }
1554 
1555 /**
1556  * ibmvfc_map_sg_data - Maps dma for a scatterlist and initializes descriptor fields
1557  * @scmd:		struct scsi_cmnd with the scatterlist
1558  * @evt:		ibmvfc event struct
1559  * @vfc_cmd:	vfc_cmd that contains the memory descriptor
1560  * @dev:		device for which to map dma memory
1561  *
1562  * Returns:
1563  *	0 on success / non-zero on failure
1564  **/
1565 static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd,
1566 			      struct ibmvfc_event *evt,
1567 			      struct ibmvfc_cmd *vfc_cmd, struct device *dev)
1568 {
1569 
1570 	int sg_mapped;
1571 	struct srp_direct_buf *data = &vfc_cmd->ioba;
1572 	struct ibmvfc_host *vhost = dev_get_drvdata(dev);
1573 	struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(evt->vhost, vfc_cmd);
1574 
1575 	if (cls3_error)
1576 		vfc_cmd->flags |= cpu_to_be16(IBMVFC_CLASS_3_ERR);
1577 
1578 	sg_mapped = scsi_dma_map(scmd);
1579 	if (!sg_mapped) {
1580 		vfc_cmd->flags |= cpu_to_be16(IBMVFC_NO_MEM_DESC);
1581 		return 0;
1582 	} else if (unlikely(sg_mapped < 0)) {
1583 		if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1584 			scmd_printk(KERN_ERR, scmd, "Failed to map DMA buffer for command\n");
1585 		return sg_mapped;
1586 	}
1587 
1588 	if (scmd->sc_data_direction == DMA_TO_DEVICE) {
1589 		vfc_cmd->flags |= cpu_to_be16(IBMVFC_WRITE);
1590 		iu->add_cdb_len |= IBMVFC_WRDATA;
1591 	} else {
1592 		vfc_cmd->flags |= cpu_to_be16(IBMVFC_READ);
1593 		iu->add_cdb_len |= IBMVFC_RDDATA;
1594 	}
1595 
1596 	if (sg_mapped == 1) {
1597 		ibmvfc_map_sg_list(scmd, sg_mapped, data);
1598 		return 0;
1599 	}
1600 
1601 	vfc_cmd->flags |= cpu_to_be16(IBMVFC_SCATTERLIST);
1602 
1603 	if (!evt->ext_list) {
1604 		evt->ext_list = dma_pool_alloc(vhost->sg_pool, GFP_ATOMIC,
1605 					       &evt->ext_list_token);
1606 
1607 		if (!evt->ext_list) {
1608 			scsi_dma_unmap(scmd);
1609 			if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1610 				scmd_printk(KERN_ERR, scmd, "Can't allocate memory for scatterlist\n");
1611 			return -ENOMEM;
1612 		}
1613 	}
1614 
1615 	ibmvfc_map_sg_list(scmd, sg_mapped, evt->ext_list);
1616 
1617 	data->va = cpu_to_be64(evt->ext_list_token);
1618 	data->len = cpu_to_be32(sg_mapped * sizeof(struct srp_direct_buf));
1619 	data->key = 0;
1620 	return 0;
1621 }
1622 
1623 /**
1624  * ibmvfc_timeout - Internal command timeout handler
1625  * @evt:	struct ibmvfc_event that timed out
1626  *
1627  * Called when an internally generated command times out
1628  **/
1629 static void ibmvfc_timeout(struct timer_list *t)
1630 {
1631 	struct ibmvfc_event *evt = from_timer(evt, t, timer);
1632 	struct ibmvfc_host *vhost = evt->vhost;
1633 	dev_err(vhost->dev, "Command timed out (%p). Resetting connection\n", evt);
1634 	ibmvfc_reset_host(vhost);
1635 }
1636 
1637 /**
1638  * ibmvfc_send_event - Transforms event to u64 array and calls send_crq()
1639  * @evt:		event to be sent
1640  * @vhost:		ibmvfc host struct
1641  * @timeout:	timeout in seconds - 0 means do not time command
1642  *
1643  * Returns the value returned from ibmvfc_send_crq(). (Zero for success)
1644  **/
1645 static int ibmvfc_send_event(struct ibmvfc_event *evt,
1646 			     struct ibmvfc_host *vhost, unsigned long timeout)
1647 {
1648 	__be64 *crq_as_u64 = (__be64 *) &evt->crq;
1649 	unsigned long flags;
1650 	int rc;
1651 
1652 	/* Copy the IU into the transfer area */
1653 	*evt->xfer_iu = evt->iu;
1654 	if (evt->crq.format == IBMVFC_CMD_FORMAT)
1655 		evt->xfer_iu->cmd.tag = cpu_to_be64((u64)evt);
1656 	else if (evt->crq.format == IBMVFC_MAD_FORMAT)
1657 		evt->xfer_iu->mad_common.tag = cpu_to_be64((u64)evt);
1658 	else
1659 		BUG();
1660 
1661 	timer_setup(&evt->timer, ibmvfc_timeout, 0);
1662 
1663 	if (timeout) {
1664 		evt->timer.expires = jiffies + (timeout * HZ);
1665 		add_timer(&evt->timer);
1666 	}
1667 
1668 	spin_lock_irqsave(&evt->queue->l_lock, flags);
1669 	list_add_tail(&evt->queue_list, &evt->queue->sent);
1670 
1671 	mb();
1672 
1673 	if (evt->queue->fmt == IBMVFC_SUB_CRQ_FMT)
1674 		rc = ibmvfc_send_sub_crq(vhost,
1675 					 evt->queue->vios_cookie,
1676 					 be64_to_cpu(crq_as_u64[0]),
1677 					 be64_to_cpu(crq_as_u64[1]),
1678 					 0, 0);
1679 	else
1680 		rc = ibmvfc_send_crq(vhost, be64_to_cpu(crq_as_u64[0]),
1681 				     be64_to_cpu(crq_as_u64[1]));
1682 
1683 	if (rc) {
1684 		list_del(&evt->queue_list);
1685 		spin_unlock_irqrestore(&evt->queue->l_lock, flags);
1686 		del_timer(&evt->timer);
1687 
1688 		/* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY.
1689 		 * Firmware will send a CRQ with a transport event (0xFF) to
1690 		 * tell this client what has happened to the transport. This
1691 		 * will be handled in ibmvfc_handle_crq()
1692 		 */
1693 		if (rc == H_CLOSED) {
1694 			if (printk_ratelimit())
1695 				dev_warn(vhost->dev, "Send warning. Receive queue closed, will retry.\n");
1696 			if (evt->cmnd)
1697 				scsi_dma_unmap(evt->cmnd);
1698 			ibmvfc_free_event(evt);
1699 			return SCSI_MLQUEUE_HOST_BUSY;
1700 		}
1701 
1702 		dev_err(vhost->dev, "Send error (rc=%d)\n", rc);
1703 		if (evt->cmnd) {
1704 			evt->cmnd->result = DID_ERROR << 16;
1705 			evt->done = ibmvfc_scsi_eh_done;
1706 		} else
1707 			evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_CRQ_ERROR);
1708 
1709 		evt->done(evt);
1710 	} else {
1711 		spin_unlock_irqrestore(&evt->queue->l_lock, flags);
1712 		ibmvfc_trc_start(evt);
1713 	}
1714 
1715 	return 0;
1716 }
1717 
1718 /**
1719  * ibmvfc_log_error - Log an error for the failed command if appropriate
1720  * @evt:	ibmvfc event to log
1721  *
1722  **/
1723 static void ibmvfc_log_error(struct ibmvfc_event *evt)
1724 {
1725 	struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
1726 	struct ibmvfc_host *vhost = evt->vhost;
1727 	struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
1728 	struct scsi_cmnd *cmnd = evt->cmnd;
1729 	const char *err = unknown_error;
1730 	int index = ibmvfc_get_err_index(be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error));
1731 	int logerr = 0;
1732 	int rsp_code = 0;
1733 
1734 	if (index >= 0) {
1735 		logerr = cmd_status[index].log;
1736 		err = cmd_status[index].name;
1737 	}
1738 
1739 	if (!logerr && (vhost->log_level <= (IBMVFC_DEFAULT_LOG_LEVEL + 1)))
1740 		return;
1741 
1742 	if (rsp->flags & FCP_RSP_LEN_VALID)
1743 		rsp_code = rsp->data.info.rsp_code;
1744 
1745 	scmd_printk(KERN_ERR, cmnd, "Command (%02X) : %s (%x:%x) "
1746 		    "flags: %x fcp_rsp: %x, resid=%d, scsi_status: %x\n",
1747 		    cmnd->cmnd[0], err, be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error),
1748 		    rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status);
1749 }
1750 
1751 /**
1752  * ibmvfc_relogin - Log back into the specified device
1753  * @sdev:	scsi device struct
1754  *
1755  **/
1756 static void ibmvfc_relogin(struct scsi_device *sdev)
1757 {
1758 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
1759 	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1760 	struct ibmvfc_target *tgt;
1761 	unsigned long flags;
1762 
1763 	spin_lock_irqsave(vhost->host->host_lock, flags);
1764 	list_for_each_entry(tgt, &vhost->targets, queue) {
1765 		if (rport == tgt->rport) {
1766 			ibmvfc_del_tgt(tgt);
1767 			break;
1768 		}
1769 	}
1770 
1771 	ibmvfc_reinit_host(vhost);
1772 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
1773 }
1774 
1775 /**
1776  * ibmvfc_scsi_done - Handle responses from commands
1777  * @evt:	ibmvfc event to be handled
1778  *
1779  * Used as a callback when sending scsi cmds.
1780  **/
1781 static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
1782 {
1783 	struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
1784 	struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(evt->vhost, vfc_cmd);
1785 	struct scsi_cmnd *cmnd = evt->cmnd;
1786 	u32 rsp_len = 0;
1787 	u32 sense_len = be32_to_cpu(rsp->fcp_sense_len);
1788 
1789 	if (cmnd) {
1790 		if (be16_to_cpu(vfc_cmd->response_flags) & IBMVFC_ADAPTER_RESID_VALID)
1791 			scsi_set_resid(cmnd, be32_to_cpu(vfc_cmd->adapter_resid));
1792 		else if (rsp->flags & FCP_RESID_UNDER)
1793 			scsi_set_resid(cmnd, be32_to_cpu(rsp->fcp_resid));
1794 		else
1795 			scsi_set_resid(cmnd, 0);
1796 
1797 		if (vfc_cmd->status) {
1798 			cmnd->result = ibmvfc_get_err_result(evt->vhost, vfc_cmd);
1799 
1800 			if (rsp->flags & FCP_RSP_LEN_VALID)
1801 				rsp_len = be32_to_cpu(rsp->fcp_rsp_len);
1802 			if ((sense_len + rsp_len) > SCSI_SENSE_BUFFERSIZE)
1803 				sense_len = SCSI_SENSE_BUFFERSIZE - rsp_len;
1804 			if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8)
1805 				memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len);
1806 			if ((be16_to_cpu(vfc_cmd->status) & IBMVFC_VIOS_FAILURE) &&
1807 			    (be16_to_cpu(vfc_cmd->error) == IBMVFC_PLOGI_REQUIRED))
1808 				ibmvfc_relogin(cmnd->device);
1809 
1810 			if (!cmnd->result && (!scsi_get_resid(cmnd) || (rsp->flags & FCP_RESID_OVER)))
1811 				cmnd->result = (DID_ERROR << 16);
1812 
1813 			ibmvfc_log_error(evt);
1814 		}
1815 
1816 		if (!cmnd->result &&
1817 		    (scsi_bufflen(cmnd) - scsi_get_resid(cmnd) < cmnd->underflow))
1818 			cmnd->result = (DID_ERROR << 16);
1819 
1820 		scsi_dma_unmap(cmnd);
1821 		cmnd->scsi_done(cmnd);
1822 	}
1823 
1824 	ibmvfc_free_event(evt);
1825 }
1826 
1827 /**
1828  * ibmvfc_host_chkready - Check if the host can accept commands
1829  * @vhost:	 struct ibmvfc host
1830  *
1831  * Returns:
1832  *	1 if host can accept command / 0 if not
1833  **/
1834 static inline int ibmvfc_host_chkready(struct ibmvfc_host *vhost)
1835 {
1836 	int result = 0;
1837 
1838 	switch (vhost->state) {
1839 	case IBMVFC_LINK_DEAD:
1840 	case IBMVFC_HOST_OFFLINE:
1841 		result = DID_NO_CONNECT << 16;
1842 		break;
1843 	case IBMVFC_NO_CRQ:
1844 	case IBMVFC_INITIALIZING:
1845 	case IBMVFC_HALTED:
1846 	case IBMVFC_LINK_DOWN:
1847 		result = DID_REQUEUE << 16;
1848 		break;
1849 	case IBMVFC_ACTIVE:
1850 		result = 0;
1851 		break;
1852 	}
1853 
1854 	return result;
1855 }
1856 
1857 static struct ibmvfc_cmd *ibmvfc_init_vfc_cmd(struct ibmvfc_event *evt, struct scsi_device *sdev)
1858 {
1859 	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1860 	struct ibmvfc_host *vhost = evt->vhost;
1861 	struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd;
1862 	struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
1863 	struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
1864 	size_t offset;
1865 
1866 	memset(vfc_cmd, 0, sizeof(*vfc_cmd));
1867 	if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
1868 		offset = offsetof(struct ibmvfc_cmd, v2.rsp);
1869 		vfc_cmd->target_wwpn = cpu_to_be64(rport->port_name);
1870 	} else
1871 		offset = offsetof(struct ibmvfc_cmd, v1.rsp);
1872 	vfc_cmd->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offset);
1873 	vfc_cmd->resp.len = cpu_to_be32(sizeof(*rsp));
1874 	vfc_cmd->frame_type = cpu_to_be32(IBMVFC_SCSI_FCP_TYPE);
1875 	vfc_cmd->payload_len = cpu_to_be32(sizeof(*iu));
1876 	vfc_cmd->resp_len = cpu_to_be32(sizeof(*rsp));
1877 	vfc_cmd->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
1878 	vfc_cmd->tgt_scsi_id = cpu_to_be64(rport->port_id);
1879 	int_to_scsilun(sdev->lun, &iu->lun);
1880 
1881 	return vfc_cmd;
1882 }
1883 
1884 /**
1885  * ibmvfc_queuecommand - The queuecommand function of the scsi template
1886  * @cmnd:	struct scsi_cmnd to be executed
1887  * @done:	Callback function to be called when cmnd is completed
1888  *
1889  * Returns:
1890  *	0 on success / other on failure
1891  **/
1892 static int ibmvfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
1893 {
1894 	struct ibmvfc_host *vhost = shost_priv(shost);
1895 	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
1896 	struct ibmvfc_cmd *vfc_cmd;
1897 	struct ibmvfc_fcp_cmd_iu *iu;
1898 	struct ibmvfc_event *evt;
1899 	u32 tag_and_hwq = blk_mq_unique_tag(cmnd->request);
1900 	u16 hwq = blk_mq_unique_tag_to_hwq(tag_and_hwq);
1901 	u16 scsi_channel;
1902 	int rc;
1903 
1904 	if (unlikely((rc = fc_remote_port_chkready(rport))) ||
1905 	    unlikely((rc = ibmvfc_host_chkready(vhost)))) {
1906 		cmnd->result = rc;
1907 		cmnd->scsi_done(cmnd);
1908 		return 0;
1909 	}
1910 
1911 	cmnd->result = (DID_OK << 16);
1912 	if (vhost->using_channels) {
1913 		scsi_channel = hwq % vhost->scsi_scrqs.active_queues;
1914 		evt = ibmvfc_get_event(&vhost->scsi_scrqs.scrqs[scsi_channel]);
1915 		evt->hwq = hwq % vhost->scsi_scrqs.active_queues;
1916 	} else
1917 		evt = ibmvfc_get_event(&vhost->crq);
1918 
1919 	ibmvfc_init_event(evt, ibmvfc_scsi_done, IBMVFC_CMD_FORMAT);
1920 	evt->cmnd = cmnd;
1921 
1922 	vfc_cmd = ibmvfc_init_vfc_cmd(evt, cmnd->device);
1923 	iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
1924 
1925 	iu->xfer_len = cpu_to_be32(scsi_bufflen(cmnd));
1926 	memcpy(iu->cdb, cmnd->cmnd, cmnd->cmd_len);
1927 
1928 	if (cmnd->flags & SCMD_TAGGED) {
1929 		vfc_cmd->task_tag = cpu_to_be64(cmnd->tag);
1930 		iu->pri_task_attr = IBMVFC_SIMPLE_TASK;
1931 	}
1932 
1933 	vfc_cmd->correlation = cpu_to_be64((u64)evt);
1934 
1935 	if (likely(!(rc = ibmvfc_map_sg_data(cmnd, evt, vfc_cmd, vhost->dev))))
1936 		return ibmvfc_send_event(evt, vhost, 0);
1937 
1938 	ibmvfc_free_event(evt);
1939 	if (rc == -ENOMEM)
1940 		return SCSI_MLQUEUE_HOST_BUSY;
1941 
1942 	if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1943 		scmd_printk(KERN_ERR, cmnd,
1944 			    "Failed to map DMA buffer for command. rc=%d\n", rc);
1945 
1946 	cmnd->result = DID_ERROR << 16;
1947 	cmnd->scsi_done(cmnd);
1948 	return 0;
1949 }
1950 
1951 /**
1952  * ibmvfc_sync_completion - Signal that a synchronous command has completed
1953  * @evt:	ibmvfc event struct
1954  *
1955  **/
1956 static void ibmvfc_sync_completion(struct ibmvfc_event *evt)
1957 {
1958 	/* copy the response back */
1959 	if (evt->sync_iu)
1960 		*evt->sync_iu = *evt->xfer_iu;
1961 
1962 	complete(&evt->comp);
1963 }
1964 
1965 /**
1966  * ibmvfc_bsg_timeout_done - Completion handler for cancelling BSG commands
1967  * @evt:	struct ibmvfc_event
1968  *
1969  **/
1970 static void ibmvfc_bsg_timeout_done(struct ibmvfc_event *evt)
1971 {
1972 	struct ibmvfc_host *vhost = evt->vhost;
1973 
1974 	ibmvfc_free_event(evt);
1975 	vhost->aborting_passthru = 0;
1976 	dev_info(vhost->dev, "Passthru command cancelled\n");
1977 }
1978 
1979 /**
1980  * ibmvfc_bsg_timeout - Handle a BSG timeout
1981  * @job:	struct bsg_job that timed out
1982  *
1983  * Returns:
1984  *	0 on success / other on failure
1985  **/
1986 static int ibmvfc_bsg_timeout(struct bsg_job *job)
1987 {
1988 	struct ibmvfc_host *vhost = shost_priv(fc_bsg_to_shost(job));
1989 	unsigned long port_id = (unsigned long)job->dd_data;
1990 	struct ibmvfc_event *evt;
1991 	struct ibmvfc_tmf *tmf;
1992 	unsigned long flags;
1993 	int rc;
1994 
1995 	ENTER;
1996 	spin_lock_irqsave(vhost->host->host_lock, flags);
1997 	if (vhost->aborting_passthru || vhost->state != IBMVFC_ACTIVE) {
1998 		__ibmvfc_reset_host(vhost);
1999 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
2000 		return 0;
2001 	}
2002 
2003 	vhost->aborting_passthru = 1;
2004 	evt = ibmvfc_get_event(&vhost->crq);
2005 	ibmvfc_init_event(evt, ibmvfc_bsg_timeout_done, IBMVFC_MAD_FORMAT);
2006 
2007 	tmf = &evt->iu.tmf;
2008 	memset(tmf, 0, sizeof(*tmf));
2009 	tmf->common.version = cpu_to_be32(1);
2010 	tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
2011 	tmf->common.length = cpu_to_be16(sizeof(*tmf));
2012 	tmf->scsi_id = cpu_to_be64(port_id);
2013 	tmf->cancel_key = cpu_to_be32(IBMVFC_PASSTHRU_CANCEL_KEY);
2014 	tmf->my_cancel_key = cpu_to_be32(IBMVFC_INTERNAL_CANCEL_KEY);
2015 	rc = ibmvfc_send_event(evt, vhost, default_timeout);
2016 
2017 	if (rc != 0) {
2018 		vhost->aborting_passthru = 0;
2019 		dev_err(vhost->dev, "Failed to send cancel event. rc=%d\n", rc);
2020 		rc = -EIO;
2021 	} else
2022 		dev_info(vhost->dev, "Cancelling passthru command to port id 0x%lx\n",
2023 			 port_id);
2024 
2025 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2026 
2027 	LEAVE;
2028 	return rc;
2029 }
2030 
2031 /**
2032  * ibmvfc_bsg_plogi - PLOGI into a target to handle a BSG command
2033  * @vhost:		struct ibmvfc_host to send command
2034  * @port_id:	port ID to send command
2035  *
2036  * Returns:
2037  *	0 on success / other on failure
2038  **/
2039 static int ibmvfc_bsg_plogi(struct ibmvfc_host *vhost, unsigned int port_id)
2040 {
2041 	struct ibmvfc_port_login *plogi;
2042 	struct ibmvfc_target *tgt;
2043 	struct ibmvfc_event *evt;
2044 	union ibmvfc_iu rsp_iu;
2045 	unsigned long flags;
2046 	int rc = 0, issue_login = 1;
2047 
2048 	ENTER;
2049 	spin_lock_irqsave(vhost->host->host_lock, flags);
2050 	list_for_each_entry(tgt, &vhost->targets, queue) {
2051 		if (tgt->scsi_id == port_id) {
2052 			issue_login = 0;
2053 			break;
2054 		}
2055 	}
2056 
2057 	if (!issue_login)
2058 		goto unlock_out;
2059 	if (unlikely((rc = ibmvfc_host_chkready(vhost))))
2060 		goto unlock_out;
2061 
2062 	evt = ibmvfc_get_event(&vhost->crq);
2063 	ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
2064 	plogi = &evt->iu.plogi;
2065 	memset(plogi, 0, sizeof(*plogi));
2066 	plogi->common.version = cpu_to_be32(1);
2067 	plogi->common.opcode = cpu_to_be32(IBMVFC_PORT_LOGIN);
2068 	plogi->common.length = cpu_to_be16(sizeof(*plogi));
2069 	plogi->scsi_id = cpu_to_be64(port_id);
2070 	evt->sync_iu = &rsp_iu;
2071 	init_completion(&evt->comp);
2072 
2073 	rc = ibmvfc_send_event(evt, vhost, default_timeout);
2074 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2075 
2076 	if (rc)
2077 		return -EIO;
2078 
2079 	wait_for_completion(&evt->comp);
2080 
2081 	if (rsp_iu.plogi.common.status)
2082 		rc = -EIO;
2083 
2084 	spin_lock_irqsave(vhost->host->host_lock, flags);
2085 	ibmvfc_free_event(evt);
2086 unlock_out:
2087 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2088 	LEAVE;
2089 	return rc;
2090 }
2091 
2092 /**
2093  * ibmvfc_bsg_request - Handle a BSG request
2094  * @job:	struct bsg_job to be executed
2095  *
2096  * Returns:
2097  *	0 on success / other on failure
2098  **/
2099 static int ibmvfc_bsg_request(struct bsg_job *job)
2100 {
2101 	struct ibmvfc_host *vhost = shost_priv(fc_bsg_to_shost(job));
2102 	struct fc_rport *rport = fc_bsg_to_rport(job);
2103 	struct ibmvfc_passthru_mad *mad;
2104 	struct ibmvfc_event *evt;
2105 	union ibmvfc_iu rsp_iu;
2106 	unsigned long flags, port_id = -1;
2107 	struct fc_bsg_request *bsg_request = job->request;
2108 	struct fc_bsg_reply *bsg_reply = job->reply;
2109 	unsigned int code = bsg_request->msgcode;
2110 	int rc = 0, req_seg, rsp_seg, issue_login = 0;
2111 	u32 fc_flags, rsp_len;
2112 
2113 	ENTER;
2114 	bsg_reply->reply_payload_rcv_len = 0;
2115 	if (rport)
2116 		port_id = rport->port_id;
2117 
2118 	switch (code) {
2119 	case FC_BSG_HST_ELS_NOLOGIN:
2120 		port_id = (bsg_request->rqst_data.h_els.port_id[0] << 16) |
2121 			(bsg_request->rqst_data.h_els.port_id[1] << 8) |
2122 			bsg_request->rqst_data.h_els.port_id[2];
2123 		fallthrough;
2124 	case FC_BSG_RPT_ELS:
2125 		fc_flags = IBMVFC_FC_ELS;
2126 		break;
2127 	case FC_BSG_HST_CT:
2128 		issue_login = 1;
2129 		port_id = (bsg_request->rqst_data.h_ct.port_id[0] << 16) |
2130 			(bsg_request->rqst_data.h_ct.port_id[1] << 8) |
2131 			bsg_request->rqst_data.h_ct.port_id[2];
2132 		fallthrough;
2133 	case FC_BSG_RPT_CT:
2134 		fc_flags = IBMVFC_FC_CT_IU;
2135 		break;
2136 	default:
2137 		return -ENOTSUPP;
2138 	}
2139 
2140 	if (port_id == -1)
2141 		return -EINVAL;
2142 	if (!mutex_trylock(&vhost->passthru_mutex))
2143 		return -EBUSY;
2144 
2145 	job->dd_data = (void *)port_id;
2146 	req_seg = dma_map_sg(vhost->dev, job->request_payload.sg_list,
2147 			     job->request_payload.sg_cnt, DMA_TO_DEVICE);
2148 
2149 	if (!req_seg) {
2150 		mutex_unlock(&vhost->passthru_mutex);
2151 		return -ENOMEM;
2152 	}
2153 
2154 	rsp_seg = dma_map_sg(vhost->dev, job->reply_payload.sg_list,
2155 			     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2156 
2157 	if (!rsp_seg) {
2158 		dma_unmap_sg(vhost->dev, job->request_payload.sg_list,
2159 			     job->request_payload.sg_cnt, DMA_TO_DEVICE);
2160 		mutex_unlock(&vhost->passthru_mutex);
2161 		return -ENOMEM;
2162 	}
2163 
2164 	if (req_seg > 1 || rsp_seg > 1) {
2165 		rc = -EINVAL;
2166 		goto out;
2167 	}
2168 
2169 	if (issue_login)
2170 		rc = ibmvfc_bsg_plogi(vhost, port_id);
2171 
2172 	spin_lock_irqsave(vhost->host->host_lock, flags);
2173 
2174 	if (unlikely(rc || (rport && (rc = fc_remote_port_chkready(rport)))) ||
2175 	    unlikely((rc = ibmvfc_host_chkready(vhost)))) {
2176 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
2177 		goto out;
2178 	}
2179 
2180 	evt = ibmvfc_get_event(&vhost->crq);
2181 	ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
2182 	mad = &evt->iu.passthru;
2183 
2184 	memset(mad, 0, sizeof(*mad));
2185 	mad->common.version = cpu_to_be32(1);
2186 	mad->common.opcode = cpu_to_be32(IBMVFC_PASSTHRU);
2187 	mad->common.length = cpu_to_be16(sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu));
2188 
2189 	mad->cmd_ioba.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) +
2190 		offsetof(struct ibmvfc_passthru_mad, iu));
2191 	mad->cmd_ioba.len = cpu_to_be32(sizeof(mad->iu));
2192 
2193 	mad->iu.cmd_len = cpu_to_be32(job->request_payload.payload_len);
2194 	mad->iu.rsp_len = cpu_to_be32(job->reply_payload.payload_len);
2195 	mad->iu.flags = cpu_to_be32(fc_flags);
2196 	mad->iu.cancel_key = cpu_to_be32(IBMVFC_PASSTHRU_CANCEL_KEY);
2197 
2198 	mad->iu.cmd.va = cpu_to_be64(sg_dma_address(job->request_payload.sg_list));
2199 	mad->iu.cmd.len = cpu_to_be32(sg_dma_len(job->request_payload.sg_list));
2200 	mad->iu.rsp.va = cpu_to_be64(sg_dma_address(job->reply_payload.sg_list));
2201 	mad->iu.rsp.len = cpu_to_be32(sg_dma_len(job->reply_payload.sg_list));
2202 	mad->iu.scsi_id = cpu_to_be64(port_id);
2203 	mad->iu.tag = cpu_to_be64((u64)evt);
2204 	rsp_len = be32_to_cpu(mad->iu.rsp.len);
2205 
2206 	evt->sync_iu = &rsp_iu;
2207 	init_completion(&evt->comp);
2208 	rc = ibmvfc_send_event(evt, vhost, 0);
2209 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2210 
2211 	if (rc) {
2212 		rc = -EIO;
2213 		goto out;
2214 	}
2215 
2216 	wait_for_completion(&evt->comp);
2217 
2218 	if (rsp_iu.passthru.common.status)
2219 		rc = -EIO;
2220 	else
2221 		bsg_reply->reply_payload_rcv_len = rsp_len;
2222 
2223 	spin_lock_irqsave(vhost->host->host_lock, flags);
2224 	ibmvfc_free_event(evt);
2225 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2226 	bsg_reply->result = rc;
2227 	bsg_job_done(job, bsg_reply->result,
2228 		       bsg_reply->reply_payload_rcv_len);
2229 	rc = 0;
2230 out:
2231 	dma_unmap_sg(vhost->dev, job->request_payload.sg_list,
2232 		     job->request_payload.sg_cnt, DMA_TO_DEVICE);
2233 	dma_unmap_sg(vhost->dev, job->reply_payload.sg_list,
2234 		     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2235 	mutex_unlock(&vhost->passthru_mutex);
2236 	LEAVE;
2237 	return rc;
2238 }
2239 
2240 /**
2241  * ibmvfc_reset_device - Reset the device with the specified reset type
2242  * @sdev:	scsi device to reset
2243  * @type:	reset type
2244  * @desc:	reset type description for log messages
2245  *
2246  * Returns:
2247  *	0 on success / other on failure
2248  **/
2249 static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
2250 {
2251 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2252 	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2253 	struct ibmvfc_cmd *tmf;
2254 	struct ibmvfc_event *evt = NULL;
2255 	union ibmvfc_iu rsp_iu;
2256 	struct ibmvfc_fcp_cmd_iu *iu;
2257 	struct ibmvfc_fcp_rsp *fc_rsp = ibmvfc_get_fcp_rsp(vhost, &rsp_iu.cmd);
2258 	int rsp_rc = -EBUSY;
2259 	unsigned long flags;
2260 	int rsp_code = 0;
2261 
2262 	spin_lock_irqsave(vhost->host->host_lock, flags);
2263 	if (vhost->state == IBMVFC_ACTIVE) {
2264 		if (vhost->using_channels)
2265 			evt = ibmvfc_get_event(&vhost->scsi_scrqs.scrqs[0]);
2266 		else
2267 			evt = ibmvfc_get_event(&vhost->crq);
2268 
2269 		ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
2270 		tmf = ibmvfc_init_vfc_cmd(evt, sdev);
2271 		iu = ibmvfc_get_fcp_iu(vhost, tmf);
2272 
2273 		tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF));
2274 		if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
2275 			tmf->target_wwpn = cpu_to_be64(rport->port_name);
2276 		iu->tmf_flags = type;
2277 		evt->sync_iu = &rsp_iu;
2278 
2279 		init_completion(&evt->comp);
2280 		rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
2281 	}
2282 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2283 
2284 	if (rsp_rc != 0) {
2285 		sdev_printk(KERN_ERR, sdev, "Failed to send %s reset event. rc=%d\n",
2286 			    desc, rsp_rc);
2287 		return -EIO;
2288 	}
2289 
2290 	sdev_printk(KERN_INFO, sdev, "Resetting %s\n", desc);
2291 	wait_for_completion(&evt->comp);
2292 
2293 	if (rsp_iu.cmd.status)
2294 		rsp_code = ibmvfc_get_err_result(vhost, &rsp_iu.cmd);
2295 
2296 	if (rsp_code) {
2297 		if (fc_rsp->flags & FCP_RSP_LEN_VALID)
2298 			rsp_code = fc_rsp->data.info.rsp_code;
2299 
2300 		sdev_printk(KERN_ERR, sdev, "%s reset failed: %s (%x:%x) "
2301 			    "flags: %x fcp_rsp: %x, scsi_status: %x\n", desc,
2302 			    ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
2303 			    be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
2304 			    fc_rsp->scsi_status);
2305 		rsp_rc = -EIO;
2306 	} else
2307 		sdev_printk(KERN_INFO, sdev, "%s reset successful\n", desc);
2308 
2309 	spin_lock_irqsave(vhost->host->host_lock, flags);
2310 	ibmvfc_free_event(evt);
2311 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2312 	return rsp_rc;
2313 }
2314 
2315 /**
2316  * ibmvfc_match_rport - Match function for specified remote port
2317  * @evt:	ibmvfc event struct
2318  * @device:	device to match (rport)
2319  *
2320  * Returns:
2321  *	1 if event matches rport / 0 if event does not match rport
2322  **/
2323 static int ibmvfc_match_rport(struct ibmvfc_event *evt, void *rport)
2324 {
2325 	struct fc_rport *cmd_rport;
2326 
2327 	if (evt->cmnd) {
2328 		cmd_rport = starget_to_rport(scsi_target(evt->cmnd->device));
2329 		if (cmd_rport == rport)
2330 			return 1;
2331 	}
2332 	return 0;
2333 }
2334 
2335 /**
2336  * ibmvfc_match_target - Match function for specified target
2337  * @evt:	ibmvfc event struct
2338  * @device:	device to match (starget)
2339  *
2340  * Returns:
2341  *	1 if event matches starget / 0 if event does not match starget
2342  **/
2343 static int ibmvfc_match_target(struct ibmvfc_event *evt, void *device)
2344 {
2345 	if (evt->cmnd && scsi_target(evt->cmnd->device) == device)
2346 		return 1;
2347 	return 0;
2348 }
2349 
2350 /**
2351  * ibmvfc_match_lun - Match function for specified LUN
2352  * @evt:	ibmvfc event struct
2353  * @device:	device to match (sdev)
2354  *
2355  * Returns:
2356  *	1 if event matches sdev / 0 if event does not match sdev
2357  **/
2358 static int ibmvfc_match_lun(struct ibmvfc_event *evt, void *device)
2359 {
2360 	if (evt->cmnd && evt->cmnd->device == device)
2361 		return 1;
2362 	return 0;
2363 }
2364 
2365 /**
2366  * ibmvfc_wait_for_ops - Wait for ops to complete
2367  * @vhost:	ibmvfc host struct
2368  * @device:	device to match (starget or sdev)
2369  * @match:	match function
2370  *
2371  * Returns:
2372  *	SUCCESS / FAILED
2373  **/
2374 static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device,
2375 			       int (*match) (struct ibmvfc_event *, void *))
2376 {
2377 	struct ibmvfc_event *evt;
2378 	DECLARE_COMPLETION_ONSTACK(comp);
2379 	int wait;
2380 	unsigned long flags;
2381 	signed long timeout = IBMVFC_ABORT_WAIT_TIMEOUT * HZ;
2382 
2383 	ENTER;
2384 	do {
2385 		wait = 0;
2386 		spin_lock_irqsave(&vhost->crq.l_lock, flags);
2387 		list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
2388 			if (match(evt, device)) {
2389 				evt->eh_comp = &comp;
2390 				wait++;
2391 			}
2392 		}
2393 		spin_unlock_irqrestore(&vhost->crq.l_lock, flags);
2394 
2395 		if (wait) {
2396 			timeout = wait_for_completion_timeout(&comp, timeout);
2397 
2398 			if (!timeout) {
2399 				wait = 0;
2400 				spin_lock_irqsave(&vhost->crq.l_lock, flags);
2401 				list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
2402 					if (match(evt, device)) {
2403 						evt->eh_comp = NULL;
2404 						wait++;
2405 					}
2406 				}
2407 				spin_unlock_irqrestore(&vhost->crq.l_lock, flags);
2408 				if (wait)
2409 					dev_err(vhost->dev, "Timed out waiting for aborted commands\n");
2410 				LEAVE;
2411 				return wait ? FAILED : SUCCESS;
2412 			}
2413 		}
2414 	} while (wait);
2415 
2416 	LEAVE;
2417 	return SUCCESS;
2418 }
2419 
2420 static struct ibmvfc_event *ibmvfc_init_tmf(struct ibmvfc_queue *queue,
2421 					    struct scsi_device *sdev,
2422 					    int type)
2423 {
2424 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2425 	struct scsi_target *starget = scsi_target(sdev);
2426 	struct fc_rport *rport = starget_to_rport(starget);
2427 	struct ibmvfc_event *evt;
2428 	struct ibmvfc_tmf *tmf;
2429 
2430 	evt = ibmvfc_get_event(queue);
2431 	ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
2432 
2433 	tmf = &evt->iu.tmf;
2434 	memset(tmf, 0, sizeof(*tmf));
2435 	if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
2436 		tmf->common.version = cpu_to_be32(2);
2437 		tmf->target_wwpn = cpu_to_be64(rport->port_name);
2438 	} else {
2439 		tmf->common.version = cpu_to_be32(1);
2440 	}
2441 	tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
2442 	tmf->common.length = cpu_to_be16(sizeof(*tmf));
2443 	tmf->scsi_id = cpu_to_be64(rport->port_id);
2444 	int_to_scsilun(sdev->lun, &tmf->lun);
2445 	if (!ibmvfc_check_caps(vhost, IBMVFC_CAN_SUPPRESS_ABTS))
2446 		type &= ~IBMVFC_TMF_SUPPRESS_ABTS;
2447 	if (vhost->state == IBMVFC_ACTIVE)
2448 		tmf->flags = cpu_to_be32((type | IBMVFC_TMF_LUA_VALID));
2449 	else
2450 		tmf->flags = cpu_to_be32(((type & IBMVFC_TMF_SUPPRESS_ABTS) | IBMVFC_TMF_LUA_VALID));
2451 	tmf->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
2452 	tmf->my_cancel_key = cpu_to_be32((unsigned long)starget->hostdata);
2453 
2454 	init_completion(&evt->comp);
2455 
2456 	return evt;
2457 }
2458 
2459 static int ibmvfc_cancel_all_mq(struct scsi_device *sdev, int type)
2460 {
2461 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2462 	struct ibmvfc_event *evt, *found_evt, *temp;
2463 	struct ibmvfc_queue *queues = vhost->scsi_scrqs.scrqs;
2464 	unsigned long flags;
2465 	int num_hwq, i;
2466 	int fail = 0;
2467 	LIST_HEAD(cancelq);
2468 	u16 status;
2469 
2470 	ENTER;
2471 	spin_lock_irqsave(vhost->host->host_lock, flags);
2472 	num_hwq = vhost->scsi_scrqs.active_queues;
2473 	for (i = 0; i < num_hwq; i++) {
2474 		spin_lock(queues[i].q_lock);
2475 		spin_lock(&queues[i].l_lock);
2476 		found_evt = NULL;
2477 		list_for_each_entry(evt, &queues[i].sent, queue_list) {
2478 			if (evt->cmnd && evt->cmnd->device == sdev) {
2479 				found_evt = evt;
2480 				break;
2481 			}
2482 		}
2483 		spin_unlock(&queues[i].l_lock);
2484 
2485 		if (found_evt && vhost->logged_in) {
2486 			evt = ibmvfc_init_tmf(&queues[i], sdev, type);
2487 			evt->sync_iu = &queues[i].cancel_rsp;
2488 			ibmvfc_send_event(evt, vhost, default_timeout);
2489 			list_add_tail(&evt->cancel, &cancelq);
2490 		}
2491 
2492 		spin_unlock(queues[i].q_lock);
2493 	}
2494 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2495 
2496 	if (list_empty(&cancelq)) {
2497 		if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
2498 			sdev_printk(KERN_INFO, sdev, "No events found to cancel\n");
2499 		return 0;
2500 	}
2501 
2502 	sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
2503 
2504 	list_for_each_entry_safe(evt, temp, &cancelq, cancel) {
2505 		wait_for_completion(&evt->comp);
2506 		status = be16_to_cpu(evt->queue->cancel_rsp.mad_common.status);
2507 		list_del(&evt->cancel);
2508 		ibmvfc_free_event(evt);
2509 
2510 		if (status != IBMVFC_MAD_SUCCESS) {
2511 			sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
2512 			switch (status) {
2513 			case IBMVFC_MAD_DRIVER_FAILED:
2514 			case IBMVFC_MAD_CRQ_ERROR:
2515 			/* Host adapter most likely going through reset, return success to
2516 			 * the caller will wait for the command being cancelled to get returned
2517 			 */
2518 				break;
2519 			default:
2520 				fail = 1;
2521 				break;
2522 			}
2523 		}
2524 	}
2525 
2526 	if (fail)
2527 		return -EIO;
2528 
2529 	sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
2530 	LEAVE;
2531 	return 0;
2532 }
2533 
2534 static int ibmvfc_cancel_all_sq(struct scsi_device *sdev, int type)
2535 {
2536 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2537 	struct ibmvfc_event *evt, *found_evt;
2538 	union ibmvfc_iu rsp;
2539 	int rsp_rc = -EBUSY;
2540 	unsigned long flags;
2541 	u16 status;
2542 
2543 	ENTER;
2544 	found_evt = NULL;
2545 	spin_lock_irqsave(vhost->host->host_lock, flags);
2546 	spin_lock(&vhost->crq.l_lock);
2547 	list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
2548 		if (evt->cmnd && evt->cmnd->device == sdev) {
2549 			found_evt = evt;
2550 			break;
2551 		}
2552 	}
2553 	spin_unlock(&vhost->crq.l_lock);
2554 
2555 	if (!found_evt) {
2556 		if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
2557 			sdev_printk(KERN_INFO, sdev, "No events found to cancel\n");
2558 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
2559 		return 0;
2560 	}
2561 
2562 	if (vhost->logged_in) {
2563 		evt = ibmvfc_init_tmf(&vhost->crq, sdev, type);
2564 		evt->sync_iu = &rsp;
2565 		rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
2566 	}
2567 
2568 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2569 
2570 	if (rsp_rc != 0) {
2571 		sdev_printk(KERN_ERR, sdev, "Failed to send cancel event. rc=%d\n", rsp_rc);
2572 		/* If failure is received, the host adapter is most likely going
2573 		 through reset, return success so the caller will wait for the command
2574 		 being cancelled to get returned */
2575 		return 0;
2576 	}
2577 
2578 	sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
2579 
2580 	wait_for_completion(&evt->comp);
2581 	status = be16_to_cpu(rsp.mad_common.status);
2582 	spin_lock_irqsave(vhost->host->host_lock, flags);
2583 	ibmvfc_free_event(evt);
2584 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2585 
2586 	if (status != IBMVFC_MAD_SUCCESS) {
2587 		sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
2588 		switch (status) {
2589 		case IBMVFC_MAD_DRIVER_FAILED:
2590 		case IBMVFC_MAD_CRQ_ERROR:
2591 			/* Host adapter most likely going through reset, return success to
2592 			 the caller will wait for the command being cancelled to get returned */
2593 			return 0;
2594 		default:
2595 			return -EIO;
2596 		};
2597 	}
2598 
2599 	sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
2600 	return 0;
2601 }
2602 
2603 /**
2604  * ibmvfc_cancel_all - Cancel all outstanding commands to the device
2605  * @sdev:	scsi device to cancel commands
2606  * @type:	type of error recovery being performed
2607  *
2608  * This sends a cancel to the VIOS for the specified device. This does
2609  * NOT send any abort to the actual device. That must be done separately.
2610  *
2611  * Returns:
2612  *	0 on success / other on failure
2613  **/
2614 static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
2615 {
2616 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2617 
2618 	if (vhost->mq_enabled && vhost->using_channels)
2619 		return ibmvfc_cancel_all_mq(sdev, type);
2620 	else
2621 		return ibmvfc_cancel_all_sq(sdev, type);
2622 }
2623 
2624 /**
2625  * ibmvfc_match_key - Match function for specified cancel key
2626  * @evt:	ibmvfc event struct
2627  * @key:	cancel key to match
2628  *
2629  * Returns:
2630  *	1 if event matches key / 0 if event does not match key
2631  **/
2632 static int ibmvfc_match_key(struct ibmvfc_event *evt, void *key)
2633 {
2634 	unsigned long cancel_key = (unsigned long)key;
2635 
2636 	if (evt->crq.format == IBMVFC_CMD_FORMAT &&
2637 	    be32_to_cpu(evt->iu.cmd.cancel_key) == cancel_key)
2638 		return 1;
2639 	return 0;
2640 }
2641 
2642 /**
2643  * ibmvfc_match_evt - Match function for specified event
2644  * @evt:	ibmvfc event struct
2645  * @match:	event to match
2646  *
2647  * Returns:
2648  *	1 if event matches key / 0 if event does not match key
2649  **/
2650 static int ibmvfc_match_evt(struct ibmvfc_event *evt, void *match)
2651 {
2652 	if (evt == match)
2653 		return 1;
2654 	return 0;
2655 }
2656 
2657 /**
2658  * ibmvfc_abort_task_set - Abort outstanding commands to the device
2659  * @sdev:	scsi device to abort commands
2660  *
2661  * This sends an Abort Task Set to the VIOS for the specified device. This does
2662  * NOT send any cancel to the VIOS. That must be done separately.
2663  *
2664  * Returns:
2665  *	0 on success / other on failure
2666  **/
2667 static int ibmvfc_abort_task_set(struct scsi_device *sdev)
2668 {
2669 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2670 	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2671 	struct ibmvfc_cmd *tmf;
2672 	struct ibmvfc_event *evt, *found_evt;
2673 	union ibmvfc_iu rsp_iu;
2674 	struct ibmvfc_fcp_cmd_iu *iu;
2675 	struct ibmvfc_fcp_rsp *fc_rsp = ibmvfc_get_fcp_rsp(vhost, &rsp_iu.cmd);
2676 	int rc, rsp_rc = -EBUSY;
2677 	unsigned long flags, timeout = IBMVFC_ABORT_TIMEOUT;
2678 	int rsp_code = 0;
2679 
2680 	found_evt = NULL;
2681 	spin_lock_irqsave(vhost->host->host_lock, flags);
2682 	spin_lock(&vhost->crq.l_lock);
2683 	list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
2684 		if (evt->cmnd && evt->cmnd->device == sdev) {
2685 			found_evt = evt;
2686 			break;
2687 		}
2688 	}
2689 	spin_unlock(&vhost->crq.l_lock);
2690 
2691 	if (!found_evt) {
2692 		if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
2693 			sdev_printk(KERN_INFO, sdev, "No events found to abort\n");
2694 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
2695 		return 0;
2696 	}
2697 
2698 	if (vhost->state == IBMVFC_ACTIVE) {
2699 		evt = ibmvfc_get_event(&vhost->crq);
2700 		ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
2701 		tmf = ibmvfc_init_vfc_cmd(evt, sdev);
2702 		iu = ibmvfc_get_fcp_iu(vhost, tmf);
2703 
2704 		if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
2705 			tmf->target_wwpn = cpu_to_be64(rport->port_name);
2706 		iu->tmf_flags = IBMVFC_ABORT_TASK_SET;
2707 		tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF));
2708 		evt->sync_iu = &rsp_iu;
2709 
2710 		tmf->correlation = cpu_to_be64((u64)evt);
2711 
2712 		init_completion(&evt->comp);
2713 		rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
2714 	}
2715 
2716 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2717 
2718 	if (rsp_rc != 0) {
2719 		sdev_printk(KERN_ERR, sdev, "Failed to send abort. rc=%d\n", rsp_rc);
2720 		return -EIO;
2721 	}
2722 
2723 	sdev_printk(KERN_INFO, sdev, "Aborting outstanding commands\n");
2724 	timeout = wait_for_completion_timeout(&evt->comp, timeout);
2725 
2726 	if (!timeout) {
2727 		rc = ibmvfc_cancel_all(sdev, 0);
2728 		if (!rc) {
2729 			rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key);
2730 			if (rc == SUCCESS)
2731 				rc = 0;
2732 		}
2733 
2734 		if (rc) {
2735 			sdev_printk(KERN_INFO, sdev, "Cancel failed, resetting host\n");
2736 			ibmvfc_reset_host(vhost);
2737 			rsp_rc = -EIO;
2738 			rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key);
2739 
2740 			if (rc == SUCCESS)
2741 				rsp_rc = 0;
2742 
2743 			rc = ibmvfc_wait_for_ops(vhost, evt, ibmvfc_match_evt);
2744 			if (rc != SUCCESS) {
2745 				spin_lock_irqsave(vhost->host->host_lock, flags);
2746 				ibmvfc_hard_reset_host(vhost);
2747 				spin_unlock_irqrestore(vhost->host->host_lock, flags);
2748 				rsp_rc = 0;
2749 			}
2750 
2751 			goto out;
2752 		}
2753 	}
2754 
2755 	if (rsp_iu.cmd.status)
2756 		rsp_code = ibmvfc_get_err_result(vhost, &rsp_iu.cmd);
2757 
2758 	if (rsp_code) {
2759 		if (fc_rsp->flags & FCP_RSP_LEN_VALID)
2760 			rsp_code = fc_rsp->data.info.rsp_code;
2761 
2762 		sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) "
2763 			    "flags: %x fcp_rsp: %x, scsi_status: %x\n",
2764 			    ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
2765 			    be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
2766 			    fc_rsp->scsi_status);
2767 		rsp_rc = -EIO;
2768 	} else
2769 		sdev_printk(KERN_INFO, sdev, "Abort successful\n");
2770 
2771 out:
2772 	spin_lock_irqsave(vhost->host->host_lock, flags);
2773 	ibmvfc_free_event(evt);
2774 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2775 	return rsp_rc;
2776 }
2777 
2778 /**
2779  * ibmvfc_eh_abort_handler - Abort a command
2780  * @cmd:	scsi command to abort
2781  *
2782  * Returns:
2783  *	SUCCESS / FAST_IO_FAIL / FAILED
2784  **/
2785 static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
2786 {
2787 	struct scsi_device *sdev = cmd->device;
2788 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2789 	int cancel_rc, block_rc;
2790 	int rc = FAILED;
2791 
2792 	ENTER;
2793 	block_rc = fc_block_scsi_eh(cmd);
2794 	ibmvfc_wait_while_resetting(vhost);
2795 	if (block_rc != FAST_IO_FAIL) {
2796 		cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
2797 		ibmvfc_abort_task_set(sdev);
2798 	} else
2799 		cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2800 
2801 	if (!cancel_rc)
2802 		rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
2803 
2804 	if (block_rc == FAST_IO_FAIL && rc != FAILED)
2805 		rc = FAST_IO_FAIL;
2806 
2807 	LEAVE;
2808 	return rc;
2809 }
2810 
2811 /**
2812  * ibmvfc_eh_device_reset_handler - Reset a single LUN
2813  * @cmd:	scsi command struct
2814  *
2815  * Returns:
2816  *	SUCCESS / FAST_IO_FAIL / FAILED
2817  **/
2818 static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd)
2819 {
2820 	struct scsi_device *sdev = cmd->device;
2821 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2822 	int cancel_rc, block_rc, reset_rc = 0;
2823 	int rc = FAILED;
2824 
2825 	ENTER;
2826 	block_rc = fc_block_scsi_eh(cmd);
2827 	ibmvfc_wait_while_resetting(vhost);
2828 	if (block_rc != FAST_IO_FAIL) {
2829 		cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET);
2830 		reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN");
2831 	} else
2832 		cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2833 
2834 	if (!cancel_rc && !reset_rc)
2835 		rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
2836 
2837 	if (block_rc == FAST_IO_FAIL && rc != FAILED)
2838 		rc = FAST_IO_FAIL;
2839 
2840 	LEAVE;
2841 	return rc;
2842 }
2843 
2844 /**
2845  * ibmvfc_dev_cancel_all_noreset - Device iterated cancel all function
2846  * @sdev:	scsi device struct
2847  * @data:	return code
2848  *
2849  **/
2850 static void ibmvfc_dev_cancel_all_noreset(struct scsi_device *sdev, void *data)
2851 {
2852 	unsigned long *rc = data;
2853 	*rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2854 }
2855 
2856 /**
2857  * ibmvfc_dev_cancel_all_reset - Device iterated cancel all function
2858  * @sdev:	scsi device struct
2859  * @data:	return code
2860  *
2861  **/
2862 static void ibmvfc_dev_cancel_all_reset(struct scsi_device *sdev, void *data)
2863 {
2864 	unsigned long *rc = data;
2865 	*rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_TGT_RESET);
2866 }
2867 
2868 /**
2869  * ibmvfc_eh_target_reset_handler - Reset the target
2870  * @cmd:	scsi command struct
2871  *
2872  * Returns:
2873  *	SUCCESS / FAST_IO_FAIL / FAILED
2874  **/
2875 static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
2876 {
2877 	struct scsi_device *sdev = cmd->device;
2878 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2879 	struct scsi_target *starget = scsi_target(sdev);
2880 	int block_rc;
2881 	int reset_rc = 0;
2882 	int rc = FAILED;
2883 	unsigned long cancel_rc = 0;
2884 
2885 	ENTER;
2886 	block_rc = fc_block_scsi_eh(cmd);
2887 	ibmvfc_wait_while_resetting(vhost);
2888 	if (block_rc != FAST_IO_FAIL) {
2889 		starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset);
2890 		reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target");
2891 	} else
2892 		starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_noreset);
2893 
2894 	if (!cancel_rc && !reset_rc)
2895 		rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target);
2896 
2897 	if (block_rc == FAST_IO_FAIL && rc != FAILED)
2898 		rc = FAST_IO_FAIL;
2899 
2900 	LEAVE;
2901 	return rc;
2902 }
2903 
2904 /**
2905  * ibmvfc_eh_host_reset_handler - Reset the connection to the server
2906  * @cmd:	struct scsi_cmnd having problems
2907  *
2908  **/
2909 static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd)
2910 {
2911 	int rc;
2912 	struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
2913 
2914 	dev_err(vhost->dev, "Resetting connection due to error recovery\n");
2915 	rc = ibmvfc_issue_fc_host_lip(vhost->host);
2916 
2917 	return rc ? FAILED : SUCCESS;
2918 }
2919 
2920 /**
2921  * ibmvfc_terminate_rport_io - Terminate all pending I/O to the rport.
2922  * @rport:		rport struct
2923  *
2924  * Return value:
2925  * 	none
2926  **/
2927 static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
2928 {
2929 	struct Scsi_Host *shost = rport_to_shost(rport);
2930 	struct ibmvfc_host *vhost = shost_priv(shost);
2931 	struct fc_rport *dev_rport;
2932 	struct scsi_device *sdev;
2933 	struct ibmvfc_target *tgt;
2934 	unsigned long rc, flags;
2935 	unsigned int found;
2936 
2937 	ENTER;
2938 	shost_for_each_device(sdev, shost) {
2939 		dev_rport = starget_to_rport(scsi_target(sdev));
2940 		if (dev_rport != rport)
2941 			continue;
2942 		ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2943 	}
2944 
2945 	rc = ibmvfc_wait_for_ops(vhost, rport, ibmvfc_match_rport);
2946 
2947 	if (rc == FAILED)
2948 		ibmvfc_issue_fc_host_lip(shost);
2949 
2950 	spin_lock_irqsave(shost->host_lock, flags);
2951 	found = 0;
2952 	list_for_each_entry(tgt, &vhost->targets, queue) {
2953 		if (tgt->scsi_id == rport->port_id) {
2954 			found++;
2955 			break;
2956 		}
2957 	}
2958 
2959 	if (found && tgt->action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
2960 		/*
2961 		 * If we get here, that means we previously attempted to send
2962 		 * an implicit logout to the target but it failed, most likely
2963 		 * due to I/O being pending, so we need to send it again
2964 		 */
2965 		ibmvfc_del_tgt(tgt);
2966 		ibmvfc_reinit_host(vhost);
2967 	}
2968 
2969 	spin_unlock_irqrestore(shost->host_lock, flags);
2970 	LEAVE;
2971 }
2972 
2973 static const struct ibmvfc_async_desc ae_desc [] = {
2974 	{ "PLOGI",	IBMVFC_AE_ELS_PLOGI,	IBMVFC_DEFAULT_LOG_LEVEL + 1 },
2975 	{ "LOGO",	IBMVFC_AE_ELS_LOGO,	IBMVFC_DEFAULT_LOG_LEVEL + 1 },
2976 	{ "PRLO",	IBMVFC_AE_ELS_PRLO,	IBMVFC_DEFAULT_LOG_LEVEL + 1 },
2977 	{ "N-Port SCN",	IBMVFC_AE_SCN_NPORT,	IBMVFC_DEFAULT_LOG_LEVEL + 1 },
2978 	{ "Group SCN",	IBMVFC_AE_SCN_GROUP,	IBMVFC_DEFAULT_LOG_LEVEL + 1 },
2979 	{ "Domain SCN",	IBMVFC_AE_SCN_DOMAIN,	IBMVFC_DEFAULT_LOG_LEVEL },
2980 	{ "Fabric SCN",	IBMVFC_AE_SCN_FABRIC,	IBMVFC_DEFAULT_LOG_LEVEL },
2981 	{ "Link Up",	IBMVFC_AE_LINK_UP,	IBMVFC_DEFAULT_LOG_LEVEL },
2982 	{ "Link Down",	IBMVFC_AE_LINK_DOWN,	IBMVFC_DEFAULT_LOG_LEVEL },
2983 	{ "Link Dead",	IBMVFC_AE_LINK_DEAD,	IBMVFC_DEFAULT_LOG_LEVEL },
2984 	{ "Halt",	IBMVFC_AE_HALT,		IBMVFC_DEFAULT_LOG_LEVEL },
2985 	{ "Resume",	IBMVFC_AE_RESUME,	IBMVFC_DEFAULT_LOG_LEVEL },
2986 	{ "Adapter Failed", IBMVFC_AE_ADAPTER_FAILED, IBMVFC_DEFAULT_LOG_LEVEL },
2987 };
2988 
2989 static const struct ibmvfc_async_desc unknown_ae = {
2990 	"Unknown async", 0, IBMVFC_DEFAULT_LOG_LEVEL
2991 };
2992 
2993 /**
2994  * ibmvfc_get_ae_desc - Get text description for async event
2995  * @ae:	async event
2996  *
2997  **/
2998 static const struct ibmvfc_async_desc *ibmvfc_get_ae_desc(u64 ae)
2999 {
3000 	int i;
3001 
3002 	for (i = 0; i < ARRAY_SIZE(ae_desc); i++)
3003 		if (ae_desc[i].ae == ae)
3004 			return &ae_desc[i];
3005 
3006 	return &unknown_ae;
3007 }
3008 
3009 static const struct {
3010 	enum ibmvfc_ae_link_state state;
3011 	const char *desc;
3012 } link_desc [] = {
3013 	{ IBMVFC_AE_LS_LINK_UP,		" link up" },
3014 	{ IBMVFC_AE_LS_LINK_BOUNCED,	" link bounced" },
3015 	{ IBMVFC_AE_LS_LINK_DOWN,	" link down" },
3016 	{ IBMVFC_AE_LS_LINK_DEAD,	" link dead" },
3017 };
3018 
3019 /**
3020  * ibmvfc_get_link_state - Get text description for link state
3021  * @state:	link state
3022  *
3023  **/
3024 static const char *ibmvfc_get_link_state(enum ibmvfc_ae_link_state state)
3025 {
3026 	int i;
3027 
3028 	for (i = 0; i < ARRAY_SIZE(link_desc); i++)
3029 		if (link_desc[i].state == state)
3030 			return link_desc[i].desc;
3031 
3032 	return "";
3033 }
3034 
3035 /**
3036  * ibmvfc_handle_async - Handle an async event from the adapter
3037  * @crq:	crq to process
3038  * @vhost:	ibmvfc host struct
3039  *
3040  **/
3041 static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
3042 				struct ibmvfc_host *vhost)
3043 {
3044 	const struct ibmvfc_async_desc *desc = ibmvfc_get_ae_desc(be64_to_cpu(crq->event));
3045 	struct ibmvfc_target *tgt;
3046 
3047 	ibmvfc_log(vhost, desc->log_level, "%s event received. scsi_id: %llx, wwpn: %llx,"
3048 		   " node_name: %llx%s\n", desc->desc, be64_to_cpu(crq->scsi_id),
3049 		   be64_to_cpu(crq->wwpn), be64_to_cpu(crq->node_name),
3050 		   ibmvfc_get_link_state(crq->link_state));
3051 
3052 	switch (be64_to_cpu(crq->event)) {
3053 	case IBMVFC_AE_RESUME:
3054 		switch (crq->link_state) {
3055 		case IBMVFC_AE_LS_LINK_DOWN:
3056 			ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
3057 			break;
3058 		case IBMVFC_AE_LS_LINK_DEAD:
3059 			ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3060 			break;
3061 		case IBMVFC_AE_LS_LINK_UP:
3062 		case IBMVFC_AE_LS_LINK_BOUNCED:
3063 		default:
3064 			vhost->events_to_log |= IBMVFC_AE_LINKUP;
3065 			vhost->delay_init = 1;
3066 			__ibmvfc_reset_host(vhost);
3067 			break;
3068 		}
3069 
3070 		break;
3071 	case IBMVFC_AE_LINK_UP:
3072 		vhost->events_to_log |= IBMVFC_AE_LINKUP;
3073 		vhost->delay_init = 1;
3074 		__ibmvfc_reset_host(vhost);
3075 		break;
3076 	case IBMVFC_AE_SCN_FABRIC:
3077 	case IBMVFC_AE_SCN_DOMAIN:
3078 		vhost->events_to_log |= IBMVFC_AE_RSCN;
3079 		if (vhost->state < IBMVFC_HALTED) {
3080 			vhost->delay_init = 1;
3081 			__ibmvfc_reset_host(vhost);
3082 		}
3083 		break;
3084 	case IBMVFC_AE_SCN_NPORT:
3085 	case IBMVFC_AE_SCN_GROUP:
3086 		vhost->events_to_log |= IBMVFC_AE_RSCN;
3087 		ibmvfc_reinit_host(vhost);
3088 		break;
3089 	case IBMVFC_AE_ELS_LOGO:
3090 	case IBMVFC_AE_ELS_PRLO:
3091 	case IBMVFC_AE_ELS_PLOGI:
3092 		list_for_each_entry(tgt, &vhost->targets, queue) {
3093 			if (!crq->scsi_id && !crq->wwpn && !crq->node_name)
3094 				break;
3095 			if (crq->scsi_id && cpu_to_be64(tgt->scsi_id) != crq->scsi_id)
3096 				continue;
3097 			if (crq->wwpn && cpu_to_be64(tgt->ids.port_name) != crq->wwpn)
3098 				continue;
3099 			if (crq->node_name && cpu_to_be64(tgt->ids.node_name) != crq->node_name)
3100 				continue;
3101 			if (tgt->need_login && be64_to_cpu(crq->event) == IBMVFC_AE_ELS_LOGO)
3102 				tgt->logo_rcvd = 1;
3103 			if (!tgt->need_login || be64_to_cpu(crq->event) == IBMVFC_AE_ELS_PLOGI) {
3104 				ibmvfc_del_tgt(tgt);
3105 				ibmvfc_reinit_host(vhost);
3106 			}
3107 		}
3108 		break;
3109 	case IBMVFC_AE_LINK_DOWN:
3110 	case IBMVFC_AE_ADAPTER_FAILED:
3111 		ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
3112 		break;
3113 	case IBMVFC_AE_LINK_DEAD:
3114 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
3115 		break;
3116 	case IBMVFC_AE_HALT:
3117 		ibmvfc_link_down(vhost, IBMVFC_HALTED);
3118 		break;
3119 	default:
3120 		dev_err(vhost->dev, "Unknown async event received: %lld\n", crq->event);
3121 		break;
3122 	}
3123 }
3124 
3125 /**
3126  * ibmvfc_handle_crq - Handles and frees received events in the CRQ
3127  * @crq:	Command/Response queue
3128  * @vhost:	ibmvfc host struct
3129  *
3130  **/
3131 static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost,
3132 			      struct list_head *evt_doneq)
3133 {
3134 	long rc;
3135 	struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba);
3136 
3137 	switch (crq->valid) {
3138 	case IBMVFC_CRQ_INIT_RSP:
3139 		switch (crq->format) {
3140 		case IBMVFC_CRQ_INIT:
3141 			dev_info(vhost->dev, "Partner initialized\n");
3142 			/* Send back a response */
3143 			rc = ibmvfc_send_crq_init_complete(vhost);
3144 			if (rc == 0)
3145 				ibmvfc_init_host(vhost);
3146 			else
3147 				dev_err(vhost->dev, "Unable to send init rsp. rc=%ld\n", rc);
3148 			break;
3149 		case IBMVFC_CRQ_INIT_COMPLETE:
3150 			dev_info(vhost->dev, "Partner initialization complete\n");
3151 			ibmvfc_init_host(vhost);
3152 			break;
3153 		default:
3154 			dev_err(vhost->dev, "Unknown crq message type: %d\n", crq->format);
3155 		}
3156 		return;
3157 	case IBMVFC_CRQ_XPORT_EVENT:
3158 		vhost->state = IBMVFC_NO_CRQ;
3159 		vhost->logged_in = 0;
3160 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
3161 		if (crq->format == IBMVFC_PARTITION_MIGRATED) {
3162 			/* We need to re-setup the interpartition connection */
3163 			dev_info(vhost->dev, "Partition migrated, Re-enabling adapter\n");
3164 			vhost->client_migrated = 1;
3165 			ibmvfc_purge_requests(vhost, DID_REQUEUE);
3166 			ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
3167 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_REENABLE);
3168 		} else if (crq->format == IBMVFC_PARTNER_FAILED || crq->format == IBMVFC_PARTNER_DEREGISTER) {
3169 			dev_err(vhost->dev, "Host partner adapter deregistered or failed (rc=%d)\n", crq->format);
3170 			ibmvfc_purge_requests(vhost, DID_ERROR);
3171 			ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
3172 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
3173 		} else {
3174 			dev_err(vhost->dev, "Received unknown transport event from partner (rc=%d)\n", crq->format);
3175 		}
3176 		return;
3177 	case IBMVFC_CRQ_CMD_RSP:
3178 		break;
3179 	default:
3180 		dev_err(vhost->dev, "Got an invalid message type 0x%02x\n", crq->valid);
3181 		return;
3182 	}
3183 
3184 	if (crq->format == IBMVFC_ASYNC_EVENT)
3185 		return;
3186 
3187 	/* The only kind of payload CRQs we should get are responses to
3188 	 * things we send. Make sure this response is to something we
3189 	 * actually sent
3190 	 */
3191 	if (unlikely(!ibmvfc_valid_event(&vhost->crq.evt_pool, evt))) {
3192 		dev_err(vhost->dev, "Returned correlation_token 0x%08llx is invalid!\n",
3193 			crq->ioba);
3194 		return;
3195 	}
3196 
3197 	if (unlikely(atomic_read(&evt->free))) {
3198 		dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n",
3199 			crq->ioba);
3200 		return;
3201 	}
3202 
3203 	spin_lock(&evt->queue->l_lock);
3204 	list_move_tail(&evt->queue_list, evt_doneq);
3205 	spin_unlock(&evt->queue->l_lock);
3206 }
3207 
3208 /**
3209  * ibmvfc_scan_finished - Check if the device scan is done.
3210  * @shost:	scsi host struct
3211  * @time:	current elapsed time
3212  *
3213  * Returns:
3214  *	0 if scan is not done / 1 if scan is done
3215  **/
3216 static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
3217 {
3218 	unsigned long flags;
3219 	struct ibmvfc_host *vhost = shost_priv(shost);
3220 	int done = 0;
3221 
3222 	spin_lock_irqsave(shost->host_lock, flags);
3223 	if (time >= (init_timeout * HZ)) {
3224 		dev_info(vhost->dev, "Scan taking longer than %d seconds, "
3225 			 "continuing initialization\n", init_timeout);
3226 		done = 1;
3227 	}
3228 
3229 	if (vhost->scan_complete)
3230 		done = 1;
3231 	spin_unlock_irqrestore(shost->host_lock, flags);
3232 	return done;
3233 }
3234 
3235 /**
3236  * ibmvfc_slave_alloc - Setup the device's task set value
3237  * @sdev:	struct scsi_device device to configure
3238  *
3239  * Set the device's task set value so that error handling works as
3240  * expected.
3241  *
3242  * Returns:
3243  *	0 on success / -ENXIO if device does not exist
3244  **/
3245 static int ibmvfc_slave_alloc(struct scsi_device *sdev)
3246 {
3247 	struct Scsi_Host *shost = sdev->host;
3248 	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
3249 	struct ibmvfc_host *vhost = shost_priv(shost);
3250 	unsigned long flags = 0;
3251 
3252 	if (!rport || fc_remote_port_chkready(rport))
3253 		return -ENXIO;
3254 
3255 	spin_lock_irqsave(shost->host_lock, flags);
3256 	sdev->hostdata = (void *)(unsigned long)vhost->task_set++;
3257 	spin_unlock_irqrestore(shost->host_lock, flags);
3258 	return 0;
3259 }
3260 
3261 /**
3262  * ibmvfc_target_alloc - Setup the target's task set value
3263  * @starget:	struct scsi_target
3264  *
3265  * Set the target's task set value so that error handling works as
3266  * expected.
3267  *
3268  * Returns:
3269  *	0 on success / -ENXIO if device does not exist
3270  **/
3271 static int ibmvfc_target_alloc(struct scsi_target *starget)
3272 {
3273 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
3274 	struct ibmvfc_host *vhost = shost_priv(shost);
3275 	unsigned long flags = 0;
3276 
3277 	spin_lock_irqsave(shost->host_lock, flags);
3278 	starget->hostdata = (void *)(unsigned long)vhost->task_set++;
3279 	spin_unlock_irqrestore(shost->host_lock, flags);
3280 	return 0;
3281 }
3282 
3283 /**
3284  * ibmvfc_slave_configure - Configure the device
3285  * @sdev:	struct scsi_device device to configure
3286  *
3287  * Enable allow_restart for a device if it is a disk. Adjust the
3288  * queue_depth here also.
3289  *
3290  * Returns:
3291  *	0
3292  **/
3293 static int ibmvfc_slave_configure(struct scsi_device *sdev)
3294 {
3295 	struct Scsi_Host *shost = sdev->host;
3296 	unsigned long flags = 0;
3297 
3298 	spin_lock_irqsave(shost->host_lock, flags);
3299 	if (sdev->type == TYPE_DISK) {
3300 		sdev->allow_restart = 1;
3301 		blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
3302 	}
3303 	spin_unlock_irqrestore(shost->host_lock, flags);
3304 	return 0;
3305 }
3306 
3307 /**
3308  * ibmvfc_change_queue_depth - Change the device's queue depth
3309  * @sdev:	scsi device struct
3310  * @qdepth:	depth to set
3311  * @reason:	calling context
3312  *
3313  * Return value:
3314  * 	actual depth set
3315  **/
3316 static int ibmvfc_change_queue_depth(struct scsi_device *sdev, int qdepth)
3317 {
3318 	if (qdepth > IBMVFC_MAX_CMDS_PER_LUN)
3319 		qdepth = IBMVFC_MAX_CMDS_PER_LUN;
3320 
3321 	return scsi_change_queue_depth(sdev, qdepth);
3322 }
3323 
3324 static ssize_t ibmvfc_show_host_partition_name(struct device *dev,
3325 						 struct device_attribute *attr, char *buf)
3326 {
3327 	struct Scsi_Host *shost = class_to_shost(dev);
3328 	struct ibmvfc_host *vhost = shost_priv(shost);
3329 
3330 	return snprintf(buf, PAGE_SIZE, "%s\n",
3331 			vhost->login_buf->resp.partition_name);
3332 }
3333 
3334 static ssize_t ibmvfc_show_host_device_name(struct device *dev,
3335 					    struct device_attribute *attr, char *buf)
3336 {
3337 	struct Scsi_Host *shost = class_to_shost(dev);
3338 	struct ibmvfc_host *vhost = shost_priv(shost);
3339 
3340 	return snprintf(buf, PAGE_SIZE, "%s\n",
3341 			vhost->login_buf->resp.device_name);
3342 }
3343 
3344 static ssize_t ibmvfc_show_host_loc_code(struct device *dev,
3345 					 struct device_attribute *attr, char *buf)
3346 {
3347 	struct Scsi_Host *shost = class_to_shost(dev);
3348 	struct ibmvfc_host *vhost = shost_priv(shost);
3349 
3350 	return snprintf(buf, PAGE_SIZE, "%s\n",
3351 			vhost->login_buf->resp.port_loc_code);
3352 }
3353 
3354 static ssize_t ibmvfc_show_host_drc_name(struct device *dev,
3355 					 struct device_attribute *attr, char *buf)
3356 {
3357 	struct Scsi_Host *shost = class_to_shost(dev);
3358 	struct ibmvfc_host *vhost = shost_priv(shost);
3359 
3360 	return snprintf(buf, PAGE_SIZE, "%s\n",
3361 			vhost->login_buf->resp.drc_name);
3362 }
3363 
3364 static ssize_t ibmvfc_show_host_npiv_version(struct device *dev,
3365 					     struct device_attribute *attr, char *buf)
3366 {
3367 	struct Scsi_Host *shost = class_to_shost(dev);
3368 	struct ibmvfc_host *vhost = shost_priv(shost);
3369 	return snprintf(buf, PAGE_SIZE, "%d\n", be32_to_cpu(vhost->login_buf->resp.version));
3370 }
3371 
3372 static ssize_t ibmvfc_show_host_capabilities(struct device *dev,
3373 					     struct device_attribute *attr, char *buf)
3374 {
3375 	struct Scsi_Host *shost = class_to_shost(dev);
3376 	struct ibmvfc_host *vhost = shost_priv(shost);
3377 	return snprintf(buf, PAGE_SIZE, "%llx\n", be64_to_cpu(vhost->login_buf->resp.capabilities));
3378 }
3379 
3380 /**
3381  * ibmvfc_show_log_level - Show the adapter's error logging level
3382  * @dev:	class device struct
3383  * @buf:	buffer
3384  *
3385  * Return value:
3386  * 	number of bytes printed to buffer
3387  **/
3388 static ssize_t ibmvfc_show_log_level(struct device *dev,
3389 				     struct device_attribute *attr, char *buf)
3390 {
3391 	struct Scsi_Host *shost = class_to_shost(dev);
3392 	struct ibmvfc_host *vhost = shost_priv(shost);
3393 	unsigned long flags = 0;
3394 	int len;
3395 
3396 	spin_lock_irqsave(shost->host_lock, flags);
3397 	len = snprintf(buf, PAGE_SIZE, "%d\n", vhost->log_level);
3398 	spin_unlock_irqrestore(shost->host_lock, flags);
3399 	return len;
3400 }
3401 
3402 /**
3403  * ibmvfc_store_log_level - Change the adapter's error logging level
3404  * @dev:	class device struct
3405  * @buf:	buffer
3406  *
3407  * Return value:
3408  * 	number of bytes printed to buffer
3409  **/
3410 static ssize_t ibmvfc_store_log_level(struct device *dev,
3411 				      struct device_attribute *attr,
3412 				      const char *buf, size_t count)
3413 {
3414 	struct Scsi_Host *shost = class_to_shost(dev);
3415 	struct ibmvfc_host *vhost = shost_priv(shost);
3416 	unsigned long flags = 0;
3417 
3418 	spin_lock_irqsave(shost->host_lock, flags);
3419 	vhost->log_level = simple_strtoul(buf, NULL, 10);
3420 	spin_unlock_irqrestore(shost->host_lock, flags);
3421 	return strlen(buf);
3422 }
3423 
3424 static ssize_t ibmvfc_show_scsi_channels(struct device *dev,
3425 					 struct device_attribute *attr, char *buf)
3426 {
3427 	struct Scsi_Host *shost = class_to_shost(dev);
3428 	struct ibmvfc_host *vhost = shost_priv(shost);
3429 	unsigned long flags = 0;
3430 	int len;
3431 
3432 	spin_lock_irqsave(shost->host_lock, flags);
3433 	len = snprintf(buf, PAGE_SIZE, "%d\n", vhost->client_scsi_channels);
3434 	spin_unlock_irqrestore(shost->host_lock, flags);
3435 	return len;
3436 }
3437 
3438 static ssize_t ibmvfc_store_scsi_channels(struct device *dev,
3439 					 struct device_attribute *attr,
3440 					 const char *buf, size_t count)
3441 {
3442 	struct Scsi_Host *shost = class_to_shost(dev);
3443 	struct ibmvfc_host *vhost = shost_priv(shost);
3444 	unsigned long flags = 0;
3445 	unsigned int channels;
3446 
3447 	spin_lock_irqsave(shost->host_lock, flags);
3448 	channels = simple_strtoul(buf, NULL, 10);
3449 	vhost->client_scsi_channels = min(channels, nr_scsi_hw_queues);
3450 	ibmvfc_hard_reset_host(vhost);
3451 	spin_unlock_irqrestore(shost->host_lock, flags);
3452 	return strlen(buf);
3453 }
3454 
3455 static DEVICE_ATTR(partition_name, S_IRUGO, ibmvfc_show_host_partition_name, NULL);
3456 static DEVICE_ATTR(device_name, S_IRUGO, ibmvfc_show_host_device_name, NULL);
3457 static DEVICE_ATTR(port_loc_code, S_IRUGO, ibmvfc_show_host_loc_code, NULL);
3458 static DEVICE_ATTR(drc_name, S_IRUGO, ibmvfc_show_host_drc_name, NULL);
3459 static DEVICE_ATTR(npiv_version, S_IRUGO, ibmvfc_show_host_npiv_version, NULL);
3460 static DEVICE_ATTR(capabilities, S_IRUGO, ibmvfc_show_host_capabilities, NULL);
3461 static DEVICE_ATTR(log_level, S_IRUGO | S_IWUSR,
3462 		   ibmvfc_show_log_level, ibmvfc_store_log_level);
3463 static DEVICE_ATTR(nr_scsi_channels, S_IRUGO | S_IWUSR,
3464 		   ibmvfc_show_scsi_channels, ibmvfc_store_scsi_channels);
3465 
3466 #ifdef CONFIG_SCSI_IBMVFC_TRACE
3467 /**
3468  * ibmvfc_read_trace - Dump the adapter trace
3469  * @filp:		open sysfs file
3470  * @kobj:		kobject struct
3471  * @bin_attr:	bin_attribute struct
3472  * @buf:		buffer
3473  * @off:		offset
3474  * @count:		buffer size
3475  *
3476  * Return value:
3477  *	number of bytes printed to buffer
3478  **/
3479 static ssize_t ibmvfc_read_trace(struct file *filp, struct kobject *kobj,
3480 				 struct bin_attribute *bin_attr,
3481 				 char *buf, loff_t off, size_t count)
3482 {
3483 	struct device *dev = container_of(kobj, struct device, kobj);
3484 	struct Scsi_Host *shost = class_to_shost(dev);
3485 	struct ibmvfc_host *vhost = shost_priv(shost);
3486 	unsigned long flags = 0;
3487 	int size = IBMVFC_TRACE_SIZE;
3488 	char *src = (char *)vhost->trace;
3489 
3490 	if (off > size)
3491 		return 0;
3492 	if (off + count > size) {
3493 		size -= off;
3494 		count = size;
3495 	}
3496 
3497 	spin_lock_irqsave(shost->host_lock, flags);
3498 	memcpy(buf, &src[off], count);
3499 	spin_unlock_irqrestore(shost->host_lock, flags);
3500 	return count;
3501 }
3502 
3503 static struct bin_attribute ibmvfc_trace_attr = {
3504 	.attr =	{
3505 		.name = "trace",
3506 		.mode = S_IRUGO,
3507 	},
3508 	.size = 0,
3509 	.read = ibmvfc_read_trace,
3510 };
3511 #endif
3512 
3513 static struct device_attribute *ibmvfc_attrs[] = {
3514 	&dev_attr_partition_name,
3515 	&dev_attr_device_name,
3516 	&dev_attr_port_loc_code,
3517 	&dev_attr_drc_name,
3518 	&dev_attr_npiv_version,
3519 	&dev_attr_capabilities,
3520 	&dev_attr_log_level,
3521 	&dev_attr_nr_scsi_channels,
3522 	NULL
3523 };
3524 
3525 static struct scsi_host_template driver_template = {
3526 	.module = THIS_MODULE,
3527 	.name = "IBM POWER Virtual FC Adapter",
3528 	.proc_name = IBMVFC_NAME,
3529 	.queuecommand = ibmvfc_queuecommand,
3530 	.eh_timed_out = fc_eh_timed_out,
3531 	.eh_abort_handler = ibmvfc_eh_abort_handler,
3532 	.eh_device_reset_handler = ibmvfc_eh_device_reset_handler,
3533 	.eh_target_reset_handler = ibmvfc_eh_target_reset_handler,
3534 	.eh_host_reset_handler = ibmvfc_eh_host_reset_handler,
3535 	.slave_alloc = ibmvfc_slave_alloc,
3536 	.slave_configure = ibmvfc_slave_configure,
3537 	.target_alloc = ibmvfc_target_alloc,
3538 	.scan_finished = ibmvfc_scan_finished,
3539 	.change_queue_depth = ibmvfc_change_queue_depth,
3540 	.cmd_per_lun = 16,
3541 	.can_queue = IBMVFC_MAX_REQUESTS_DEFAULT,
3542 	.this_id = -1,
3543 	.sg_tablesize = SG_ALL,
3544 	.max_sectors = IBMVFC_MAX_SECTORS,
3545 	.shost_attrs = ibmvfc_attrs,
3546 	.track_queue_depth = 1,
3547 	.host_tagset = 1,
3548 };
3549 
3550 /**
3551  * ibmvfc_next_async_crq - Returns the next entry in async queue
3552  * @vhost:	ibmvfc host struct
3553  *
3554  * Returns:
3555  *	Pointer to next entry in queue / NULL if empty
3556  **/
3557 static struct ibmvfc_async_crq *ibmvfc_next_async_crq(struct ibmvfc_host *vhost)
3558 {
3559 	struct ibmvfc_queue *async_crq = &vhost->async_crq;
3560 	struct ibmvfc_async_crq *crq;
3561 
3562 	crq = &async_crq->msgs.async[async_crq->cur];
3563 	if (crq->valid & 0x80) {
3564 		if (++async_crq->cur == async_crq->size)
3565 			async_crq->cur = 0;
3566 		rmb();
3567 	} else
3568 		crq = NULL;
3569 
3570 	return crq;
3571 }
3572 
3573 /**
3574  * ibmvfc_next_crq - Returns the next entry in message queue
3575  * @vhost:	ibmvfc host struct
3576  *
3577  * Returns:
3578  *	Pointer to next entry in queue / NULL if empty
3579  **/
3580 static struct ibmvfc_crq *ibmvfc_next_crq(struct ibmvfc_host *vhost)
3581 {
3582 	struct ibmvfc_queue *queue = &vhost->crq;
3583 	struct ibmvfc_crq *crq;
3584 
3585 	crq = &queue->msgs.crq[queue->cur];
3586 	if (crq->valid & 0x80) {
3587 		if (++queue->cur == queue->size)
3588 			queue->cur = 0;
3589 		rmb();
3590 	} else
3591 		crq = NULL;
3592 
3593 	return crq;
3594 }
3595 
3596 /**
3597  * ibmvfc_interrupt - Interrupt handler
3598  * @irq:		number of irq to handle, not used
3599  * @dev_instance: ibmvfc_host that received interrupt
3600  *
3601  * Returns:
3602  *	IRQ_HANDLED
3603  **/
3604 static irqreturn_t ibmvfc_interrupt(int irq, void *dev_instance)
3605 {
3606 	struct ibmvfc_host *vhost = (struct ibmvfc_host *)dev_instance;
3607 	unsigned long flags;
3608 
3609 	spin_lock_irqsave(vhost->host->host_lock, flags);
3610 	vio_disable_interrupts(to_vio_dev(vhost->dev));
3611 	tasklet_schedule(&vhost->tasklet);
3612 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
3613 	return IRQ_HANDLED;
3614 }
3615 
3616 /**
3617  * ibmvfc_tasklet - Interrupt handler tasklet
3618  * @data:		ibmvfc host struct
3619  *
3620  * Returns:
3621  *	Nothing
3622  **/
3623 static void ibmvfc_tasklet(void *data)
3624 {
3625 	struct ibmvfc_host *vhost = data;
3626 	struct vio_dev *vdev = to_vio_dev(vhost->dev);
3627 	struct ibmvfc_crq *crq;
3628 	struct ibmvfc_async_crq *async;
3629 	struct ibmvfc_event *evt, *temp;
3630 	unsigned long flags;
3631 	int done = 0;
3632 	LIST_HEAD(evt_doneq);
3633 
3634 	spin_lock_irqsave(vhost->host->host_lock, flags);
3635 	spin_lock(vhost->crq.q_lock);
3636 	while (!done) {
3637 		/* Pull all the valid messages off the async CRQ */
3638 		while ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
3639 			ibmvfc_handle_async(async, vhost);
3640 			async->valid = 0;
3641 			wmb();
3642 		}
3643 
3644 		/* Pull all the valid messages off the CRQ */
3645 		while ((crq = ibmvfc_next_crq(vhost)) != NULL) {
3646 			ibmvfc_handle_crq(crq, vhost, &evt_doneq);
3647 			crq->valid = 0;
3648 			wmb();
3649 		}
3650 
3651 		vio_enable_interrupts(vdev);
3652 		if ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
3653 			vio_disable_interrupts(vdev);
3654 			ibmvfc_handle_async(async, vhost);
3655 			async->valid = 0;
3656 			wmb();
3657 		} else if ((crq = ibmvfc_next_crq(vhost)) != NULL) {
3658 			vio_disable_interrupts(vdev);
3659 			ibmvfc_handle_crq(crq, vhost, &evt_doneq);
3660 			crq->valid = 0;
3661 			wmb();
3662 		} else
3663 			done = 1;
3664 	}
3665 
3666 	spin_unlock(vhost->crq.q_lock);
3667 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
3668 
3669 	list_for_each_entry_safe(evt, temp, &evt_doneq, queue_list) {
3670 		del_timer(&evt->timer);
3671 		list_del(&evt->queue_list);
3672 		ibmvfc_trc_end(evt);
3673 		evt->done(evt);
3674 	}
3675 }
3676 
3677 static int ibmvfc_toggle_scrq_irq(struct ibmvfc_queue *scrq, int enable)
3678 {
3679 	struct device *dev = scrq->vhost->dev;
3680 	struct vio_dev *vdev = to_vio_dev(dev);
3681 	unsigned long rc;
3682 	int irq_action = H_ENABLE_VIO_INTERRUPT;
3683 
3684 	if (!enable)
3685 		irq_action = H_DISABLE_VIO_INTERRUPT;
3686 
3687 	rc = plpar_hcall_norets(H_VIOCTL, vdev->unit_address, irq_action,
3688 				scrq->hw_irq, 0, 0);
3689 
3690 	if (rc)
3691 		dev_err(dev, "Couldn't %s sub-crq[%lu] irq. rc=%ld\n",
3692 			enable ? "enable" : "disable", scrq->hwq_id, rc);
3693 
3694 	return rc;
3695 }
3696 
3697 static void ibmvfc_handle_scrq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost,
3698 			       struct list_head *evt_doneq)
3699 {
3700 	struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba);
3701 
3702 	switch (crq->valid) {
3703 	case IBMVFC_CRQ_CMD_RSP:
3704 		break;
3705 	case IBMVFC_CRQ_XPORT_EVENT:
3706 		return;
3707 	default:
3708 		dev_err(vhost->dev, "Got and invalid message type 0x%02x\n", crq->valid);
3709 		return;
3710 	}
3711 
3712 	/* The only kind of payload CRQs we should get are responses to
3713 	 * things we send. Make sure this response is to something we
3714 	 * actually sent
3715 	 */
3716 	if (unlikely(!ibmvfc_valid_event(&evt->queue->evt_pool, evt))) {
3717 		dev_err(vhost->dev, "Returned correlation_token 0x%08llx is invalid!\n",
3718 			crq->ioba);
3719 		return;
3720 	}
3721 
3722 	if (unlikely(atomic_read(&evt->free))) {
3723 		dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n",
3724 			crq->ioba);
3725 		return;
3726 	}
3727 
3728 	spin_lock(&evt->queue->l_lock);
3729 	list_move_tail(&evt->queue_list, evt_doneq);
3730 	spin_unlock(&evt->queue->l_lock);
3731 }
3732 
3733 static struct ibmvfc_crq *ibmvfc_next_scrq(struct ibmvfc_queue *scrq)
3734 {
3735 	struct ibmvfc_crq *crq;
3736 
3737 	crq = &scrq->msgs.scrq[scrq->cur].crq;
3738 	if (crq->valid & 0x80) {
3739 		if (++scrq->cur == scrq->size)
3740 			scrq->cur = 0;
3741 		rmb();
3742 	} else
3743 		crq = NULL;
3744 
3745 	return crq;
3746 }
3747 
3748 static void ibmvfc_drain_sub_crq(struct ibmvfc_queue *scrq)
3749 {
3750 	struct ibmvfc_crq *crq;
3751 	struct ibmvfc_event *evt, *temp;
3752 	unsigned long flags;
3753 	int done = 0;
3754 	LIST_HEAD(evt_doneq);
3755 
3756 	spin_lock_irqsave(scrq->q_lock, flags);
3757 	while (!done) {
3758 		while ((crq = ibmvfc_next_scrq(scrq)) != NULL) {
3759 			ibmvfc_handle_scrq(crq, scrq->vhost, &evt_doneq);
3760 			crq->valid = 0;
3761 			wmb();
3762 		}
3763 
3764 		ibmvfc_toggle_scrq_irq(scrq, 1);
3765 		if ((crq = ibmvfc_next_scrq(scrq)) != NULL) {
3766 			ibmvfc_toggle_scrq_irq(scrq, 0);
3767 			ibmvfc_handle_scrq(crq, scrq->vhost, &evt_doneq);
3768 			crq->valid = 0;
3769 			wmb();
3770 		} else
3771 			done = 1;
3772 	}
3773 	spin_unlock_irqrestore(scrq->q_lock, flags);
3774 
3775 	list_for_each_entry_safe(evt, temp, &evt_doneq, queue_list) {
3776 		del_timer(&evt->timer);
3777 		list_del(&evt->queue_list);
3778 		ibmvfc_trc_end(evt);
3779 		evt->done(evt);
3780 	}
3781 }
3782 
3783 static irqreturn_t ibmvfc_interrupt_scsi(int irq, void *scrq_instance)
3784 {
3785 	struct ibmvfc_queue *scrq = (struct ibmvfc_queue *)scrq_instance;
3786 
3787 	ibmvfc_toggle_scrq_irq(scrq, 0);
3788 	ibmvfc_drain_sub_crq(scrq);
3789 
3790 	return IRQ_HANDLED;
3791 }
3792 
3793 /**
3794  * ibmvfc_init_tgt - Set the next init job step for the target
3795  * @tgt:		ibmvfc target struct
3796  * @job_step:	job step to perform
3797  *
3798  **/
3799 static void ibmvfc_init_tgt(struct ibmvfc_target *tgt,
3800 			    void (*job_step) (struct ibmvfc_target *))
3801 {
3802 	if (!ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT))
3803 		tgt->job_step = job_step;
3804 	wake_up(&tgt->vhost->work_wait_q);
3805 }
3806 
3807 /**
3808  * ibmvfc_retry_tgt_init - Attempt to retry a step in target initialization
3809  * @tgt:		ibmvfc target struct
3810  * @job_step:	initialization job step
3811  *
3812  * Returns: 1 if step will be retried / 0 if not
3813  *
3814  **/
3815 static int ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
3816 				  void (*job_step) (struct ibmvfc_target *))
3817 {
3818 	if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) {
3819 		ibmvfc_del_tgt(tgt);
3820 		wake_up(&tgt->vhost->work_wait_q);
3821 		return 0;
3822 	} else
3823 		ibmvfc_init_tgt(tgt, job_step);
3824 	return 1;
3825 }
3826 
3827 /* Defined in FC-LS */
3828 static const struct {
3829 	int code;
3830 	int retry;
3831 	int logged_in;
3832 } prli_rsp [] = {
3833 	{ 0, 1, 0 },
3834 	{ 1, 0, 1 },
3835 	{ 2, 1, 0 },
3836 	{ 3, 1, 0 },
3837 	{ 4, 0, 0 },
3838 	{ 5, 0, 0 },
3839 	{ 6, 0, 1 },
3840 	{ 7, 0, 0 },
3841 	{ 8, 1, 0 },
3842 };
3843 
3844 /**
3845  * ibmvfc_get_prli_rsp - Find PRLI response index
3846  * @flags:	PRLI response flags
3847  *
3848  **/
3849 static int ibmvfc_get_prli_rsp(u16 flags)
3850 {
3851 	int i;
3852 	int code = (flags & 0x0f00) >> 8;
3853 
3854 	for (i = 0; i < ARRAY_SIZE(prli_rsp); i++)
3855 		if (prli_rsp[i].code == code)
3856 			return i;
3857 
3858 	return 0;
3859 }
3860 
3861 /**
3862  * ibmvfc_tgt_prli_done - Completion handler for Process Login
3863  * @evt:	ibmvfc event struct
3864  *
3865  **/
3866 static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
3867 {
3868 	struct ibmvfc_target *tgt = evt->tgt;
3869 	struct ibmvfc_host *vhost = evt->vhost;
3870 	struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli;
3871 	struct ibmvfc_prli_svc_parms *parms = &rsp->parms;
3872 	u32 status = be16_to_cpu(rsp->common.status);
3873 	int index, level = IBMVFC_DEFAULT_LOG_LEVEL;
3874 
3875 	vhost->discovery_threads--;
3876 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3877 	switch (status) {
3878 	case IBMVFC_MAD_SUCCESS:
3879 		tgt_dbg(tgt, "Process Login succeeded: %X %02X %04X\n",
3880 			parms->type, parms->flags, parms->service_parms);
3881 
3882 		if (parms->type == IBMVFC_SCSI_FCP_TYPE) {
3883 			index = ibmvfc_get_prli_rsp(be16_to_cpu(parms->flags));
3884 			if (prli_rsp[index].logged_in) {
3885 				if (be16_to_cpu(parms->flags) & IBMVFC_PRLI_EST_IMG_PAIR) {
3886 					tgt->need_login = 0;
3887 					tgt->ids.roles = 0;
3888 					if (be32_to_cpu(parms->service_parms) & IBMVFC_PRLI_TARGET_FUNC)
3889 						tgt->ids.roles |= FC_PORT_ROLE_FCP_TARGET;
3890 					if (be32_to_cpu(parms->service_parms) & IBMVFC_PRLI_INITIATOR_FUNC)
3891 						tgt->ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
3892 					tgt->add_rport = 1;
3893 				} else
3894 					ibmvfc_del_tgt(tgt);
3895 			} else if (prli_rsp[index].retry)
3896 				ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
3897 			else
3898 				ibmvfc_del_tgt(tgt);
3899 		} else
3900 			ibmvfc_del_tgt(tgt);
3901 		break;
3902 	case IBMVFC_MAD_DRIVER_FAILED:
3903 		break;
3904 	case IBMVFC_MAD_CRQ_ERROR:
3905 		ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
3906 		break;
3907 	case IBMVFC_MAD_FAILED:
3908 	default:
3909 		if ((be16_to_cpu(rsp->status) & IBMVFC_VIOS_FAILURE) &&
3910 		     be16_to_cpu(rsp->error) == IBMVFC_PLOGI_REQUIRED)
3911 			level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
3912 		else if (tgt->logo_rcvd)
3913 			level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
3914 		else if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
3915 			level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
3916 		else
3917 			ibmvfc_del_tgt(tgt);
3918 
3919 		tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
3920 			ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
3921 			be16_to_cpu(rsp->status), be16_to_cpu(rsp->error), status);
3922 		break;
3923 	}
3924 
3925 	kref_put(&tgt->kref, ibmvfc_release_tgt);
3926 	ibmvfc_free_event(evt);
3927 	wake_up(&vhost->work_wait_q);
3928 }
3929 
3930 /**
3931  * ibmvfc_tgt_send_prli - Send a process login
3932  * @tgt:	ibmvfc target struct
3933  *
3934  **/
3935 static void ibmvfc_tgt_send_prli(struct ibmvfc_target *tgt)
3936 {
3937 	struct ibmvfc_process_login *prli;
3938 	struct ibmvfc_host *vhost = tgt->vhost;
3939 	struct ibmvfc_event *evt;
3940 
3941 	if (vhost->discovery_threads >= disc_threads)
3942 		return;
3943 
3944 	kref_get(&tgt->kref);
3945 	evt = ibmvfc_get_event(&vhost->crq);
3946 	vhost->discovery_threads++;
3947 	ibmvfc_init_event(evt, ibmvfc_tgt_prli_done, IBMVFC_MAD_FORMAT);
3948 	evt->tgt = tgt;
3949 	prli = &evt->iu.prli;
3950 	memset(prli, 0, sizeof(*prli));
3951 	if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
3952 		prli->common.version = cpu_to_be32(2);
3953 		prli->target_wwpn = cpu_to_be64(tgt->wwpn);
3954 	} else {
3955 		prli->common.version = cpu_to_be32(1);
3956 	}
3957 	prli->common.opcode = cpu_to_be32(IBMVFC_PROCESS_LOGIN);
3958 	prli->common.length = cpu_to_be16(sizeof(*prli));
3959 	prli->scsi_id = cpu_to_be64(tgt->scsi_id);
3960 
3961 	prli->parms.type = IBMVFC_SCSI_FCP_TYPE;
3962 	prli->parms.flags = cpu_to_be16(IBMVFC_PRLI_EST_IMG_PAIR);
3963 	prli->parms.service_parms = cpu_to_be32(IBMVFC_PRLI_INITIATOR_FUNC);
3964 	prli->parms.service_parms |= cpu_to_be32(IBMVFC_PRLI_READ_FCP_XFER_RDY_DISABLED);
3965 
3966 	if (cls3_error)
3967 		prli->parms.service_parms |= cpu_to_be32(IBMVFC_PRLI_RETRY);
3968 
3969 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
3970 	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
3971 		vhost->discovery_threads--;
3972 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3973 		kref_put(&tgt->kref, ibmvfc_release_tgt);
3974 	} else
3975 		tgt_dbg(tgt, "Sent process login\n");
3976 }
3977 
3978 /**
3979  * ibmvfc_tgt_plogi_done - Completion handler for Port Login
3980  * @evt:	ibmvfc event struct
3981  *
3982  **/
3983 static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
3984 {
3985 	struct ibmvfc_target *tgt = evt->tgt;
3986 	struct ibmvfc_host *vhost = evt->vhost;
3987 	struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi;
3988 	u32 status = be16_to_cpu(rsp->common.status);
3989 	int level = IBMVFC_DEFAULT_LOG_LEVEL;
3990 
3991 	vhost->discovery_threads--;
3992 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3993 	switch (status) {
3994 	case IBMVFC_MAD_SUCCESS:
3995 		tgt_dbg(tgt, "Port Login succeeded\n");
3996 		if (tgt->ids.port_name &&
3997 		    tgt->ids.port_name != wwn_to_u64(rsp->service_parms.port_name)) {
3998 			vhost->reinit = 1;
3999 			tgt_dbg(tgt, "Port re-init required\n");
4000 			break;
4001 		}
4002 		tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name);
4003 		tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name);
4004 		tgt->ids.port_id = tgt->scsi_id;
4005 		memcpy(&tgt->service_parms, &rsp->service_parms,
4006 		       sizeof(tgt->service_parms));
4007 		memcpy(&tgt->service_parms_change, &rsp->service_parms_change,
4008 		       sizeof(tgt->service_parms_change));
4009 		ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_prli);
4010 		break;
4011 	case IBMVFC_MAD_DRIVER_FAILED:
4012 		break;
4013 	case IBMVFC_MAD_CRQ_ERROR:
4014 		ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
4015 		break;
4016 	case IBMVFC_MAD_FAILED:
4017 	default:
4018 		if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
4019 			level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
4020 		else
4021 			ibmvfc_del_tgt(tgt);
4022 
4023 		tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
4024 			ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
4025 					     be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
4026 			ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
4027 			ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain), status);
4028 		break;
4029 	}
4030 
4031 	kref_put(&tgt->kref, ibmvfc_release_tgt);
4032 	ibmvfc_free_event(evt);
4033 	wake_up(&vhost->work_wait_q);
4034 }
4035 
4036 /**
4037  * ibmvfc_tgt_send_plogi - Send PLOGI to the specified target
4038  * @tgt:	ibmvfc target struct
4039  *
4040  **/
4041 static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *tgt)
4042 {
4043 	struct ibmvfc_port_login *plogi;
4044 	struct ibmvfc_host *vhost = tgt->vhost;
4045 	struct ibmvfc_event *evt;
4046 
4047 	if (vhost->discovery_threads >= disc_threads)
4048 		return;
4049 
4050 	kref_get(&tgt->kref);
4051 	tgt->logo_rcvd = 0;
4052 	evt = ibmvfc_get_event(&vhost->crq);
4053 	vhost->discovery_threads++;
4054 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4055 	ibmvfc_init_event(evt, ibmvfc_tgt_plogi_done, IBMVFC_MAD_FORMAT);
4056 	evt->tgt = tgt;
4057 	plogi = &evt->iu.plogi;
4058 	memset(plogi, 0, sizeof(*plogi));
4059 	if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
4060 		plogi->common.version = cpu_to_be32(2);
4061 		plogi->target_wwpn = cpu_to_be64(tgt->wwpn);
4062 	} else {
4063 		plogi->common.version = cpu_to_be32(1);
4064 	}
4065 	plogi->common.opcode = cpu_to_be32(IBMVFC_PORT_LOGIN);
4066 	plogi->common.length = cpu_to_be16(sizeof(*plogi));
4067 	plogi->scsi_id = cpu_to_be64(tgt->scsi_id);
4068 
4069 	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4070 		vhost->discovery_threads--;
4071 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4072 		kref_put(&tgt->kref, ibmvfc_release_tgt);
4073 	} else
4074 		tgt_dbg(tgt, "Sent port login\n");
4075 }
4076 
4077 /**
4078  * ibmvfc_tgt_implicit_logout_done - Completion handler for Implicit Logout MAD
4079  * @evt:	ibmvfc event struct
4080  *
4081  **/
4082 static void ibmvfc_tgt_implicit_logout_done(struct ibmvfc_event *evt)
4083 {
4084 	struct ibmvfc_target *tgt = evt->tgt;
4085 	struct ibmvfc_host *vhost = evt->vhost;
4086 	struct ibmvfc_implicit_logout *rsp = &evt->xfer_iu->implicit_logout;
4087 	u32 status = be16_to_cpu(rsp->common.status);
4088 
4089 	vhost->discovery_threads--;
4090 	ibmvfc_free_event(evt);
4091 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4092 
4093 	switch (status) {
4094 	case IBMVFC_MAD_SUCCESS:
4095 		tgt_dbg(tgt, "Implicit Logout succeeded\n");
4096 		break;
4097 	case IBMVFC_MAD_DRIVER_FAILED:
4098 		kref_put(&tgt->kref, ibmvfc_release_tgt);
4099 		wake_up(&vhost->work_wait_q);
4100 		return;
4101 	case IBMVFC_MAD_FAILED:
4102 	default:
4103 		tgt_err(tgt, "Implicit Logout failed: rc=0x%02X\n", status);
4104 		break;
4105 	}
4106 
4107 	ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_plogi);
4108 	kref_put(&tgt->kref, ibmvfc_release_tgt);
4109 	wake_up(&vhost->work_wait_q);
4110 }
4111 
4112 /**
4113  * __ibmvfc_tgt_get_implicit_logout_evt - Allocate and init an event for implicit logout
4114  * @tgt:		ibmvfc target struct
4115  *
4116  * Returns:
4117  *	Allocated and initialized ibmvfc_event struct
4118  **/
4119 static struct ibmvfc_event *__ibmvfc_tgt_get_implicit_logout_evt(struct ibmvfc_target *tgt,
4120 								 void (*done) (struct ibmvfc_event *))
4121 {
4122 	struct ibmvfc_implicit_logout *mad;
4123 	struct ibmvfc_host *vhost = tgt->vhost;
4124 	struct ibmvfc_event *evt;
4125 
4126 	kref_get(&tgt->kref);
4127 	evt = ibmvfc_get_event(&vhost->crq);
4128 	ibmvfc_init_event(evt, done, IBMVFC_MAD_FORMAT);
4129 	evt->tgt = tgt;
4130 	mad = &evt->iu.implicit_logout;
4131 	memset(mad, 0, sizeof(*mad));
4132 	mad->common.version = cpu_to_be32(1);
4133 	mad->common.opcode = cpu_to_be32(IBMVFC_IMPLICIT_LOGOUT);
4134 	mad->common.length = cpu_to_be16(sizeof(*mad));
4135 	mad->old_scsi_id = cpu_to_be64(tgt->scsi_id);
4136 	return evt;
4137 }
4138 
4139 /**
4140  * ibmvfc_tgt_implicit_logout - Initiate an Implicit Logout for specified target
4141  * @tgt:		ibmvfc target struct
4142  *
4143  **/
4144 static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
4145 {
4146 	struct ibmvfc_host *vhost = tgt->vhost;
4147 	struct ibmvfc_event *evt;
4148 
4149 	if (vhost->discovery_threads >= disc_threads)
4150 		return;
4151 
4152 	vhost->discovery_threads++;
4153 	evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt,
4154 						   ibmvfc_tgt_implicit_logout_done);
4155 
4156 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4157 	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4158 		vhost->discovery_threads--;
4159 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4160 		kref_put(&tgt->kref, ibmvfc_release_tgt);
4161 	} else
4162 		tgt_dbg(tgt, "Sent Implicit Logout\n");
4163 }
4164 
4165 /**
4166  * ibmvfc_tgt_implicit_logout_and_del_done - Completion handler for Implicit Logout MAD
4167  * @evt:	ibmvfc event struct
4168  *
4169  **/
4170 static void ibmvfc_tgt_implicit_logout_and_del_done(struct ibmvfc_event *evt)
4171 {
4172 	struct ibmvfc_target *tgt = evt->tgt;
4173 	struct ibmvfc_host *vhost = evt->vhost;
4174 	struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru;
4175 	u32 status = be16_to_cpu(mad->common.status);
4176 
4177 	vhost->discovery_threads--;
4178 	ibmvfc_free_event(evt);
4179 
4180 	/*
4181 	 * If our state is IBMVFC_HOST_OFFLINE, we could be unloading the
4182 	 * driver in which case we need to free up all the targets. If we are
4183 	 * not unloading, we will still go through a hard reset to get out of
4184 	 * offline state, so there is no need to track the old targets in that
4185 	 * case.
4186 	 */
4187 	if (status == IBMVFC_MAD_SUCCESS || vhost->state == IBMVFC_HOST_OFFLINE)
4188 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
4189 	else
4190 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT);
4191 
4192 	tgt_dbg(tgt, "Implicit Logout %s\n", (status == IBMVFC_MAD_SUCCESS) ? "succeeded" : "failed");
4193 	kref_put(&tgt->kref, ibmvfc_release_tgt);
4194 	wake_up(&vhost->work_wait_q);
4195 }
4196 
4197 /**
4198  * ibmvfc_tgt_implicit_logout_and_del - Initiate an Implicit Logout for specified target
4199  * @tgt:		ibmvfc target struct
4200  *
4201  **/
4202 static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *tgt)
4203 {
4204 	struct ibmvfc_host *vhost = tgt->vhost;
4205 	struct ibmvfc_event *evt;
4206 
4207 	if (!vhost->logged_in) {
4208 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
4209 		return;
4210 	}
4211 
4212 	if (vhost->discovery_threads >= disc_threads)
4213 		return;
4214 
4215 	vhost->discovery_threads++;
4216 	evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt,
4217 						   ibmvfc_tgt_implicit_logout_and_del_done);
4218 
4219 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT);
4220 	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4221 		vhost->discovery_threads--;
4222 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
4223 		kref_put(&tgt->kref, ibmvfc_release_tgt);
4224 	} else
4225 		tgt_dbg(tgt, "Sent Implicit Logout\n");
4226 }
4227 
4228 /**
4229  * ibmvfc_tgt_move_login_done - Completion handler for Move Login
4230  * @evt:	ibmvfc event struct
4231  *
4232  **/
4233 static void ibmvfc_tgt_move_login_done(struct ibmvfc_event *evt)
4234 {
4235 	struct ibmvfc_target *tgt = evt->tgt;
4236 	struct ibmvfc_host *vhost = evt->vhost;
4237 	struct ibmvfc_move_login *rsp = &evt->xfer_iu->move_login;
4238 	u32 status = be16_to_cpu(rsp->common.status);
4239 	int level = IBMVFC_DEFAULT_LOG_LEVEL;
4240 
4241 	vhost->discovery_threads--;
4242 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4243 	switch (status) {
4244 	case IBMVFC_MAD_SUCCESS:
4245 		tgt_dbg(tgt, "Move Login succeeded for old scsi_id: %llX\n", tgt->old_scsi_id);
4246 		tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name);
4247 		tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name);
4248 		tgt->ids.port_id = tgt->scsi_id;
4249 		memcpy(&tgt->service_parms, &rsp->service_parms,
4250 		       sizeof(tgt->service_parms));
4251 		memcpy(&tgt->service_parms_change, &rsp->service_parms_change,
4252 		       sizeof(tgt->service_parms_change));
4253 		ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_prli);
4254 		break;
4255 	case IBMVFC_MAD_DRIVER_FAILED:
4256 		break;
4257 	case IBMVFC_MAD_CRQ_ERROR:
4258 		ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_move_login);
4259 		break;
4260 	case IBMVFC_MAD_FAILED:
4261 	default:
4262 		level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_move_login);
4263 
4264 		tgt_log(tgt, level,
4265 			"Move Login failed: old scsi_id: %llX, flags:%x, vios_flags:%x, rc=0x%02X\n",
4266 			tgt->old_scsi_id, be32_to_cpu(rsp->flags), be16_to_cpu(rsp->vios_flags),
4267 			status);
4268 		break;
4269 	}
4270 
4271 	kref_put(&tgt->kref, ibmvfc_release_tgt);
4272 	ibmvfc_free_event(evt);
4273 	wake_up(&vhost->work_wait_q);
4274 }
4275 
4276 
4277 /**
4278  * ibmvfc_tgt_move_login - Initiate a move login for specified target
4279  * @tgt:		ibmvfc target struct
4280  *
4281  **/
4282 static void ibmvfc_tgt_move_login(struct ibmvfc_target *tgt)
4283 {
4284 	struct ibmvfc_host *vhost = tgt->vhost;
4285 	struct ibmvfc_move_login *move;
4286 	struct ibmvfc_event *evt;
4287 
4288 	if (vhost->discovery_threads >= disc_threads)
4289 		return;
4290 
4291 	kref_get(&tgt->kref);
4292 	evt = ibmvfc_get_event(&vhost->crq);
4293 	vhost->discovery_threads++;
4294 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4295 	ibmvfc_init_event(evt, ibmvfc_tgt_move_login_done, IBMVFC_MAD_FORMAT);
4296 	evt->tgt = tgt;
4297 	move = &evt->iu.move_login;
4298 	memset(move, 0, sizeof(*move));
4299 	move->common.version = cpu_to_be32(1);
4300 	move->common.opcode = cpu_to_be32(IBMVFC_MOVE_LOGIN);
4301 	move->common.length = cpu_to_be16(sizeof(*move));
4302 
4303 	move->old_scsi_id = cpu_to_be64(tgt->old_scsi_id);
4304 	move->new_scsi_id = cpu_to_be64(tgt->scsi_id);
4305 	move->wwpn = cpu_to_be64(tgt->wwpn);
4306 	move->node_name = cpu_to_be64(tgt->ids.node_name);
4307 
4308 	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4309 		vhost->discovery_threads--;
4310 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
4311 		kref_put(&tgt->kref, ibmvfc_release_tgt);
4312 	} else
4313 		tgt_dbg(tgt, "Sent Move Login for old scsi_id: %llX\n", tgt->old_scsi_id);
4314 }
4315 
4316 /**
4317  * ibmvfc_adisc_needs_plogi - Does device need PLOGI?
4318  * @mad:	ibmvfc passthru mad struct
4319  * @tgt:	ibmvfc target struct
4320  *
4321  * Returns:
4322  *	1 if PLOGI needed / 0 if PLOGI not needed
4323  **/
4324 static int ibmvfc_adisc_needs_plogi(struct ibmvfc_passthru_mad *mad,
4325 				    struct ibmvfc_target *tgt)
4326 {
4327 	if (wwn_to_u64((u8 *)&mad->fc_iu.response[2]) != tgt->ids.port_name)
4328 		return 1;
4329 	if (wwn_to_u64((u8 *)&mad->fc_iu.response[4]) != tgt->ids.node_name)
4330 		return 1;
4331 	if (be32_to_cpu(mad->fc_iu.response[6]) != tgt->scsi_id)
4332 		return 1;
4333 	return 0;
4334 }
4335 
4336 /**
4337  * ibmvfc_tgt_adisc_done - Completion handler for ADISC
4338  * @evt:	ibmvfc event struct
4339  *
4340  **/
4341 static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
4342 {
4343 	struct ibmvfc_target *tgt = evt->tgt;
4344 	struct ibmvfc_host *vhost = evt->vhost;
4345 	struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru;
4346 	u32 status = be16_to_cpu(mad->common.status);
4347 	u8 fc_reason, fc_explain;
4348 
4349 	vhost->discovery_threads--;
4350 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4351 	del_timer(&tgt->timer);
4352 
4353 	switch (status) {
4354 	case IBMVFC_MAD_SUCCESS:
4355 		tgt_dbg(tgt, "ADISC succeeded\n");
4356 		if (ibmvfc_adisc_needs_plogi(mad, tgt))
4357 			ibmvfc_del_tgt(tgt);
4358 		break;
4359 	case IBMVFC_MAD_DRIVER_FAILED:
4360 		break;
4361 	case IBMVFC_MAD_FAILED:
4362 	default:
4363 		ibmvfc_del_tgt(tgt);
4364 		fc_reason = (be32_to_cpu(mad->fc_iu.response[1]) & 0x00ff0000) >> 16;
4365 		fc_explain = (be32_to_cpu(mad->fc_iu.response[1]) & 0x0000ff00) >> 8;
4366 		tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
4367 			 ibmvfc_get_cmd_error(be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error)),
4368 			 be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error),
4369 			 ibmvfc_get_fc_type(fc_reason), fc_reason,
4370 			 ibmvfc_get_ls_explain(fc_explain), fc_explain, status);
4371 		break;
4372 	}
4373 
4374 	kref_put(&tgt->kref, ibmvfc_release_tgt);
4375 	ibmvfc_free_event(evt);
4376 	wake_up(&vhost->work_wait_q);
4377 }
4378 
4379 /**
4380  * ibmvfc_init_passthru - Initialize an event struct for FC passthru
4381  * @evt:		ibmvfc event struct
4382  *
4383  **/
4384 static void ibmvfc_init_passthru(struct ibmvfc_event *evt)
4385 {
4386 	struct ibmvfc_passthru_mad *mad = &evt->iu.passthru;
4387 
4388 	memset(mad, 0, sizeof(*mad));
4389 	mad->common.version = cpu_to_be32(1);
4390 	mad->common.opcode = cpu_to_be32(IBMVFC_PASSTHRU);
4391 	mad->common.length = cpu_to_be16(sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu));
4392 	mad->cmd_ioba.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
4393 		offsetof(struct ibmvfc_passthru_mad, iu));
4394 	mad->cmd_ioba.len = cpu_to_be32(sizeof(mad->iu));
4395 	mad->iu.cmd_len = cpu_to_be32(sizeof(mad->fc_iu.payload));
4396 	mad->iu.rsp_len = cpu_to_be32(sizeof(mad->fc_iu.response));
4397 	mad->iu.cmd.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
4398 		offsetof(struct ibmvfc_passthru_mad, fc_iu) +
4399 		offsetof(struct ibmvfc_passthru_fc_iu, payload));
4400 	mad->iu.cmd.len = cpu_to_be32(sizeof(mad->fc_iu.payload));
4401 	mad->iu.rsp.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
4402 		offsetof(struct ibmvfc_passthru_mad, fc_iu) +
4403 		offsetof(struct ibmvfc_passthru_fc_iu, response));
4404 	mad->iu.rsp.len = cpu_to_be32(sizeof(mad->fc_iu.response));
4405 }
4406 
4407 /**
4408  * ibmvfc_tgt_adisc_cancel_done - Completion handler when cancelling an ADISC
4409  * @evt:		ibmvfc event struct
4410  *
4411  * Just cleanup this event struct. Everything else is handled by
4412  * the ADISC completion handler. If the ADISC never actually comes
4413  * back, we still have the timer running on the ADISC event struct
4414  * which will fire and cause the CRQ to get reset.
4415  *
4416  **/
4417 static void ibmvfc_tgt_adisc_cancel_done(struct ibmvfc_event *evt)
4418 {
4419 	struct ibmvfc_host *vhost = evt->vhost;
4420 	struct ibmvfc_target *tgt = evt->tgt;
4421 
4422 	tgt_dbg(tgt, "ADISC cancel complete\n");
4423 	vhost->abort_threads--;
4424 	ibmvfc_free_event(evt);
4425 	kref_put(&tgt->kref, ibmvfc_release_tgt);
4426 	wake_up(&vhost->work_wait_q);
4427 }
4428 
4429 /**
4430  * ibmvfc_adisc_timeout - Handle an ADISC timeout
4431  * @tgt:		ibmvfc target struct
4432  *
4433  * If an ADISC times out, send a cancel. If the cancel times
4434  * out, reset the CRQ. When the ADISC comes back as cancelled,
4435  * log back into the target.
4436  **/
4437 static void ibmvfc_adisc_timeout(struct timer_list *t)
4438 {
4439 	struct ibmvfc_target *tgt = from_timer(tgt, t, timer);
4440 	struct ibmvfc_host *vhost = tgt->vhost;
4441 	struct ibmvfc_event *evt;
4442 	struct ibmvfc_tmf *tmf;
4443 	unsigned long flags;
4444 	int rc;
4445 
4446 	tgt_dbg(tgt, "ADISC timeout\n");
4447 	spin_lock_irqsave(vhost->host->host_lock, flags);
4448 	if (vhost->abort_threads >= disc_threads ||
4449 	    tgt->action != IBMVFC_TGT_ACTION_INIT_WAIT ||
4450 	    vhost->state != IBMVFC_INITIALIZING ||
4451 	    vhost->action != IBMVFC_HOST_ACTION_QUERY_TGTS) {
4452 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
4453 		return;
4454 	}
4455 
4456 	vhost->abort_threads++;
4457 	kref_get(&tgt->kref);
4458 	evt = ibmvfc_get_event(&vhost->crq);
4459 	ibmvfc_init_event(evt, ibmvfc_tgt_adisc_cancel_done, IBMVFC_MAD_FORMAT);
4460 
4461 	evt->tgt = tgt;
4462 	tmf = &evt->iu.tmf;
4463 	memset(tmf, 0, sizeof(*tmf));
4464 	if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
4465 		tmf->common.version = cpu_to_be32(2);
4466 		tmf->target_wwpn = cpu_to_be64(tgt->wwpn);
4467 	} else {
4468 		tmf->common.version = cpu_to_be32(1);
4469 	}
4470 	tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
4471 	tmf->common.length = cpu_to_be16(sizeof(*tmf));
4472 	tmf->scsi_id = cpu_to_be64(tgt->scsi_id);
4473 	tmf->cancel_key = cpu_to_be32(tgt->cancel_key);
4474 
4475 	rc = ibmvfc_send_event(evt, vhost, default_timeout);
4476 
4477 	if (rc) {
4478 		tgt_err(tgt, "Failed to send cancel event for ADISC. rc=%d\n", rc);
4479 		vhost->abort_threads--;
4480 		kref_put(&tgt->kref, ibmvfc_release_tgt);
4481 		__ibmvfc_reset_host(vhost);
4482 	} else
4483 		tgt_dbg(tgt, "Attempting to cancel ADISC\n");
4484 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
4485 }
4486 
4487 /**
4488  * ibmvfc_tgt_adisc - Initiate an ADISC for specified target
4489  * @tgt:		ibmvfc target struct
4490  *
4491  * When sending an ADISC we end up with two timers running. The
4492  * first timer is the timer in the ibmvfc target struct. If this
4493  * fires, we send a cancel to the target. The second timer is the
4494  * timer on the ibmvfc event for the ADISC, which is longer. If that
4495  * fires, it means the ADISC timed out and our attempt to cancel it
4496  * also failed, so we need to reset the CRQ.
4497  **/
4498 static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt)
4499 {
4500 	struct ibmvfc_passthru_mad *mad;
4501 	struct ibmvfc_host *vhost = tgt->vhost;
4502 	struct ibmvfc_event *evt;
4503 
4504 	if (vhost->discovery_threads >= disc_threads)
4505 		return;
4506 
4507 	kref_get(&tgt->kref);
4508 	evt = ibmvfc_get_event(&vhost->crq);
4509 	vhost->discovery_threads++;
4510 	ibmvfc_init_event(evt, ibmvfc_tgt_adisc_done, IBMVFC_MAD_FORMAT);
4511 	evt->tgt = tgt;
4512 
4513 	ibmvfc_init_passthru(evt);
4514 	mad = &evt->iu.passthru;
4515 	mad->iu.flags = cpu_to_be32(IBMVFC_FC_ELS);
4516 	mad->iu.scsi_id = cpu_to_be64(tgt->scsi_id);
4517 	mad->iu.cancel_key = cpu_to_be32(tgt->cancel_key);
4518 
4519 	mad->fc_iu.payload[0] = cpu_to_be32(IBMVFC_ADISC);
4520 	memcpy(&mad->fc_iu.payload[2], &vhost->login_buf->resp.port_name,
4521 	       sizeof(vhost->login_buf->resp.port_name));
4522 	memcpy(&mad->fc_iu.payload[4], &vhost->login_buf->resp.node_name,
4523 	       sizeof(vhost->login_buf->resp.node_name));
4524 	mad->fc_iu.payload[6] = cpu_to_be32(be64_to_cpu(vhost->login_buf->resp.scsi_id) & 0x00ffffff);
4525 
4526 	if (timer_pending(&tgt->timer))
4527 		mod_timer(&tgt->timer, jiffies + (IBMVFC_ADISC_TIMEOUT * HZ));
4528 	else {
4529 		tgt->timer.expires = jiffies + (IBMVFC_ADISC_TIMEOUT * HZ);
4530 		add_timer(&tgt->timer);
4531 	}
4532 
4533 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4534 	if (ibmvfc_send_event(evt, vhost, IBMVFC_ADISC_PLUS_CANCEL_TIMEOUT)) {
4535 		vhost->discovery_threads--;
4536 		del_timer(&tgt->timer);
4537 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4538 		kref_put(&tgt->kref, ibmvfc_release_tgt);
4539 	} else
4540 		tgt_dbg(tgt, "Sent ADISC\n");
4541 }
4542 
4543 /**
4544  * ibmvfc_tgt_query_target_done - Completion handler for Query Target MAD
4545  * @evt:	ibmvfc event struct
4546  *
4547  **/
4548 static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
4549 {
4550 	struct ibmvfc_target *tgt = evt->tgt;
4551 	struct ibmvfc_host *vhost = evt->vhost;
4552 	struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt;
4553 	u32 status = be16_to_cpu(rsp->common.status);
4554 	int level = IBMVFC_DEFAULT_LOG_LEVEL;
4555 
4556 	vhost->discovery_threads--;
4557 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4558 	switch (status) {
4559 	case IBMVFC_MAD_SUCCESS:
4560 		tgt_dbg(tgt, "Query Target succeeded\n");
4561 		if (be64_to_cpu(rsp->scsi_id) != tgt->scsi_id)
4562 			ibmvfc_del_tgt(tgt);
4563 		else
4564 			ibmvfc_init_tgt(tgt, ibmvfc_tgt_adisc);
4565 		break;
4566 	case IBMVFC_MAD_DRIVER_FAILED:
4567 		break;
4568 	case IBMVFC_MAD_CRQ_ERROR:
4569 		ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
4570 		break;
4571 	case IBMVFC_MAD_FAILED:
4572 	default:
4573 		if ((be16_to_cpu(rsp->status) & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED &&
4574 		    be16_to_cpu(rsp->error) == IBMVFC_UNABLE_TO_PERFORM_REQ &&
4575 		    be16_to_cpu(rsp->fc_explain) == IBMVFC_PORT_NAME_NOT_REG)
4576 			ibmvfc_del_tgt(tgt);
4577 		else if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
4578 			level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
4579 		else
4580 			ibmvfc_del_tgt(tgt);
4581 
4582 		tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
4583 			ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
4584 			be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
4585 			ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
4586 			ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain),
4587 			status);
4588 		break;
4589 	}
4590 
4591 	kref_put(&tgt->kref, ibmvfc_release_tgt);
4592 	ibmvfc_free_event(evt);
4593 	wake_up(&vhost->work_wait_q);
4594 }
4595 
4596 /**
4597  * ibmvfc_tgt_query_target - Initiate a Query Target for specified target
4598  * @tgt:	ibmvfc target struct
4599  *
4600  **/
4601 static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt)
4602 {
4603 	struct ibmvfc_query_tgt *query_tgt;
4604 	struct ibmvfc_host *vhost = tgt->vhost;
4605 	struct ibmvfc_event *evt;
4606 
4607 	if (vhost->discovery_threads >= disc_threads)
4608 		return;
4609 
4610 	kref_get(&tgt->kref);
4611 	evt = ibmvfc_get_event(&vhost->crq);
4612 	vhost->discovery_threads++;
4613 	evt->tgt = tgt;
4614 	ibmvfc_init_event(evt, ibmvfc_tgt_query_target_done, IBMVFC_MAD_FORMAT);
4615 	query_tgt = &evt->iu.query_tgt;
4616 	memset(query_tgt, 0, sizeof(*query_tgt));
4617 	query_tgt->common.version = cpu_to_be32(1);
4618 	query_tgt->common.opcode = cpu_to_be32(IBMVFC_QUERY_TARGET);
4619 	query_tgt->common.length = cpu_to_be16(sizeof(*query_tgt));
4620 	query_tgt->wwpn = cpu_to_be64(tgt->ids.port_name);
4621 
4622 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4623 	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4624 		vhost->discovery_threads--;
4625 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4626 		kref_put(&tgt->kref, ibmvfc_release_tgt);
4627 	} else
4628 		tgt_dbg(tgt, "Sent Query Target\n");
4629 }
4630 
4631 /**
4632  * ibmvfc_alloc_target - Allocate and initialize an ibmvfc target
4633  * @vhost:		ibmvfc host struct
4634  * @scsi_id:	SCSI ID to allocate target for
4635  *
4636  * Returns:
4637  *	0 on success / other on failure
4638  **/
4639 static int ibmvfc_alloc_target(struct ibmvfc_host *vhost,
4640 			       struct ibmvfc_discover_targets_entry *target)
4641 {
4642 	struct ibmvfc_target *stgt = NULL;
4643 	struct ibmvfc_target *wtgt = NULL;
4644 	struct ibmvfc_target *tgt;
4645 	unsigned long flags;
4646 	u64 scsi_id = be32_to_cpu(target->scsi_id) & IBMVFC_DISC_TGT_SCSI_ID_MASK;
4647 	u64 wwpn = be64_to_cpu(target->wwpn);
4648 
4649 	/* Look to see if we already have a target allocated for this SCSI ID or WWPN */
4650 	spin_lock_irqsave(vhost->host->host_lock, flags);
4651 	list_for_each_entry(tgt, &vhost->targets, queue) {
4652 		if (tgt->wwpn == wwpn) {
4653 			wtgt = tgt;
4654 			break;
4655 		}
4656 	}
4657 
4658 	list_for_each_entry(tgt, &vhost->targets, queue) {
4659 		if (tgt->scsi_id == scsi_id) {
4660 			stgt = tgt;
4661 			break;
4662 		}
4663 	}
4664 
4665 	if (wtgt && !stgt) {
4666 		/*
4667 		 * A WWPN target has moved and we still are tracking the old
4668 		 * SCSI ID.  The only way we should be able to get here is if
4669 		 * we attempted to send an implicit logout for the old SCSI ID
4670 		 * and it failed for some reason, such as there being I/O
4671 		 * pending to the target. In this case, we will have already
4672 		 * deleted the rport from the FC transport so we do a move
4673 		 * login, which works even with I/O pending, as it will cancel
4674 		 * any active commands.
4675 		 */
4676 		if (wtgt->action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
4677 			/*
4678 			 * Do a move login here. The old target is no longer
4679 			 * known to the transport layer We don't use the
4680 			 * normal ibmvfc_set_tgt_action to set this, as we
4681 			 * don't normally want to allow this state change.
4682 			 */
4683 			wtgt->old_scsi_id = wtgt->scsi_id;
4684 			wtgt->scsi_id = scsi_id;
4685 			wtgt->action = IBMVFC_TGT_ACTION_INIT;
4686 			ibmvfc_init_tgt(wtgt, ibmvfc_tgt_move_login);
4687 			goto unlock_out;
4688 		} else {
4689 			tgt_err(wtgt, "Unexpected target state: %d, %p\n",
4690 				wtgt->action, wtgt->rport);
4691 		}
4692 	} else if (stgt) {
4693 		if (tgt->need_login)
4694 			ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
4695 		goto unlock_out;
4696 	}
4697 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
4698 
4699 	tgt = mempool_alloc(vhost->tgt_pool, GFP_NOIO);
4700 	memset(tgt, 0, sizeof(*tgt));
4701 	tgt->scsi_id = scsi_id;
4702 	tgt->wwpn = wwpn;
4703 	tgt->vhost = vhost;
4704 	tgt->need_login = 1;
4705 	timer_setup(&tgt->timer, ibmvfc_adisc_timeout, 0);
4706 	kref_init(&tgt->kref);
4707 	ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
4708 	spin_lock_irqsave(vhost->host->host_lock, flags);
4709 	tgt->cancel_key = vhost->task_set++;
4710 	list_add_tail(&tgt->queue, &vhost->targets);
4711 
4712 unlock_out:
4713 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
4714 	return 0;
4715 }
4716 
4717 /**
4718  * ibmvfc_alloc_targets - Allocate and initialize ibmvfc targets
4719  * @vhost:		ibmvfc host struct
4720  *
4721  * Returns:
4722  *	0 on success / other on failure
4723  **/
4724 static int ibmvfc_alloc_targets(struct ibmvfc_host *vhost)
4725 {
4726 	int i, rc;
4727 
4728 	for (i = 0, rc = 0; !rc && i < vhost->num_targets; i++)
4729 		rc = ibmvfc_alloc_target(vhost, &vhost->disc_buf[i]);
4730 
4731 	return rc;
4732 }
4733 
4734 /**
4735  * ibmvfc_discover_targets_done - Completion handler for discover targets MAD
4736  * @evt:	ibmvfc event struct
4737  *
4738  **/
4739 static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
4740 {
4741 	struct ibmvfc_host *vhost = evt->vhost;
4742 	struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets;
4743 	u32 mad_status = be16_to_cpu(rsp->common.status);
4744 	int level = IBMVFC_DEFAULT_LOG_LEVEL;
4745 
4746 	switch (mad_status) {
4747 	case IBMVFC_MAD_SUCCESS:
4748 		ibmvfc_dbg(vhost, "Discover Targets succeeded\n");
4749 		vhost->num_targets = be32_to_cpu(rsp->num_written);
4750 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS);
4751 		break;
4752 	case IBMVFC_MAD_FAILED:
4753 		level += ibmvfc_retry_host_init(vhost);
4754 		ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n",
4755 			   ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
4756 			   be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
4757 		break;
4758 	case IBMVFC_MAD_DRIVER_FAILED:
4759 		break;
4760 	default:
4761 		dev_err(vhost->dev, "Invalid Discover Targets response: 0x%x\n", mad_status);
4762 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4763 		break;
4764 	}
4765 
4766 	ibmvfc_free_event(evt);
4767 	wake_up(&vhost->work_wait_q);
4768 }
4769 
4770 /**
4771  * ibmvfc_discover_targets - Send Discover Targets MAD
4772  * @vhost:	ibmvfc host struct
4773  *
4774  **/
4775 static void ibmvfc_discover_targets(struct ibmvfc_host *vhost)
4776 {
4777 	struct ibmvfc_discover_targets *mad;
4778 	struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
4779 
4780 	ibmvfc_init_event(evt, ibmvfc_discover_targets_done, IBMVFC_MAD_FORMAT);
4781 	mad = &evt->iu.discover_targets;
4782 	memset(mad, 0, sizeof(*mad));
4783 	mad->common.version = cpu_to_be32(1);
4784 	mad->common.opcode = cpu_to_be32(IBMVFC_DISC_TARGETS);
4785 	mad->common.length = cpu_to_be16(sizeof(*mad));
4786 	mad->bufflen = cpu_to_be32(vhost->disc_buf_sz);
4787 	mad->buffer.va = cpu_to_be64(vhost->disc_buf_dma);
4788 	mad->buffer.len = cpu_to_be32(vhost->disc_buf_sz);
4789 	mad->flags = cpu_to_be32(IBMVFC_DISC_TGT_PORT_ID_WWPN_LIST);
4790 	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
4791 
4792 	if (!ibmvfc_send_event(evt, vhost, default_timeout))
4793 		ibmvfc_dbg(vhost, "Sent discover targets\n");
4794 	else
4795 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4796 }
4797 
4798 static void ibmvfc_channel_setup_done(struct ibmvfc_event *evt)
4799 {
4800 	struct ibmvfc_host *vhost = evt->vhost;
4801 	struct ibmvfc_channel_setup *setup = vhost->channel_setup_buf;
4802 	struct ibmvfc_scsi_channels *scrqs = &vhost->scsi_scrqs;
4803 	u32 mad_status = be16_to_cpu(evt->xfer_iu->channel_setup.common.status);
4804 	int level = IBMVFC_DEFAULT_LOG_LEVEL;
4805 	int flags, active_queues, i;
4806 
4807 	ibmvfc_free_event(evt);
4808 
4809 	switch (mad_status) {
4810 	case IBMVFC_MAD_SUCCESS:
4811 		ibmvfc_dbg(vhost, "Channel Setup succeeded\n");
4812 		flags = be32_to_cpu(setup->flags);
4813 		vhost->do_enquiry = 0;
4814 		active_queues = be32_to_cpu(setup->num_scsi_subq_channels);
4815 		scrqs->active_queues = active_queues;
4816 
4817 		if (flags & IBMVFC_CHANNELS_CANCELED) {
4818 			ibmvfc_dbg(vhost, "Channels Canceled\n");
4819 			vhost->using_channels = 0;
4820 		} else {
4821 			if (active_queues)
4822 				vhost->using_channels = 1;
4823 			for (i = 0; i < active_queues; i++)
4824 				scrqs->scrqs[i].vios_cookie =
4825 					be64_to_cpu(setup->channel_handles[i]);
4826 
4827 			ibmvfc_dbg(vhost, "Using %u channels\n",
4828 				   vhost->scsi_scrqs.active_queues);
4829 		}
4830 		break;
4831 	case IBMVFC_MAD_FAILED:
4832 		level += ibmvfc_retry_host_init(vhost);
4833 		ibmvfc_log(vhost, level, "Channel Setup failed\n");
4834 		fallthrough;
4835 	case IBMVFC_MAD_DRIVER_FAILED:
4836 		return;
4837 	default:
4838 		dev_err(vhost->dev, "Invalid Channel Setup response: 0x%x\n",
4839 			mad_status);
4840 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4841 		return;
4842 	}
4843 
4844 	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
4845 	wake_up(&vhost->work_wait_q);
4846 }
4847 
4848 static void ibmvfc_channel_setup(struct ibmvfc_host *vhost)
4849 {
4850 	struct ibmvfc_channel_setup_mad *mad;
4851 	struct ibmvfc_channel_setup *setup_buf = vhost->channel_setup_buf;
4852 	struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
4853 	struct ibmvfc_scsi_channels *scrqs = &vhost->scsi_scrqs;
4854 	unsigned int num_channels =
4855 		min(vhost->client_scsi_channels, vhost->max_vios_scsi_channels);
4856 	int i;
4857 
4858 	memset(setup_buf, 0, sizeof(*setup_buf));
4859 	if (num_channels == 0)
4860 		setup_buf->flags = cpu_to_be32(IBMVFC_CANCEL_CHANNELS);
4861 	else {
4862 		setup_buf->num_scsi_subq_channels = cpu_to_be32(num_channels);
4863 		for (i = 0; i < num_channels; i++)
4864 			setup_buf->channel_handles[i] = cpu_to_be64(scrqs->scrqs[i].cookie);
4865 	}
4866 
4867 	ibmvfc_init_event(evt, ibmvfc_channel_setup_done, IBMVFC_MAD_FORMAT);
4868 	mad = &evt->iu.channel_setup;
4869 	memset(mad, 0, sizeof(*mad));
4870 	mad->common.version = cpu_to_be32(1);
4871 	mad->common.opcode = cpu_to_be32(IBMVFC_CHANNEL_SETUP);
4872 	mad->common.length = cpu_to_be16(sizeof(*mad));
4873 	mad->buffer.va = cpu_to_be64(vhost->channel_setup_dma);
4874 	mad->buffer.len = cpu_to_be32(sizeof(*vhost->channel_setup_buf));
4875 
4876 	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
4877 
4878 	if (!ibmvfc_send_event(evt, vhost, default_timeout))
4879 		ibmvfc_dbg(vhost, "Sent channel setup\n");
4880 	else
4881 		ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
4882 }
4883 
4884 static void ibmvfc_channel_enquiry_done(struct ibmvfc_event *evt)
4885 {
4886 	struct ibmvfc_host *vhost = evt->vhost;
4887 	struct ibmvfc_channel_enquiry *rsp = &evt->xfer_iu->channel_enquiry;
4888 	u32 mad_status = be16_to_cpu(rsp->common.status);
4889 	int level = IBMVFC_DEFAULT_LOG_LEVEL;
4890 
4891 	switch (mad_status) {
4892 	case IBMVFC_MAD_SUCCESS:
4893 		ibmvfc_dbg(vhost, "Channel Enquiry succeeded\n");
4894 		vhost->max_vios_scsi_channels = be32_to_cpu(rsp->num_scsi_subq_channels);
4895 		ibmvfc_free_event(evt);
4896 		break;
4897 	case IBMVFC_MAD_FAILED:
4898 		level += ibmvfc_retry_host_init(vhost);
4899 		ibmvfc_log(vhost, level, "Channel Enquiry failed\n");
4900 		fallthrough;
4901 	case IBMVFC_MAD_DRIVER_FAILED:
4902 		ibmvfc_free_event(evt);
4903 		return;
4904 	default:
4905 		dev_err(vhost->dev, "Invalid Channel Enquiry response: 0x%x\n",
4906 			mad_status);
4907 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4908 		ibmvfc_free_event(evt);
4909 		return;
4910 	}
4911 
4912 	ibmvfc_channel_setup(vhost);
4913 }
4914 
4915 static void ibmvfc_channel_enquiry(struct ibmvfc_host *vhost)
4916 {
4917 	struct ibmvfc_channel_enquiry *mad;
4918 	struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
4919 
4920 	ibmvfc_init_event(evt, ibmvfc_channel_enquiry_done, IBMVFC_MAD_FORMAT);
4921 	mad = &evt->iu.channel_enquiry;
4922 	memset(mad, 0, sizeof(*mad));
4923 	mad->common.version = cpu_to_be32(1);
4924 	mad->common.opcode = cpu_to_be32(IBMVFC_CHANNEL_ENQUIRY);
4925 	mad->common.length = cpu_to_be16(sizeof(*mad));
4926 
4927 	if (mig_channels_only)
4928 		mad->flags |= cpu_to_be32(IBMVFC_NO_CHANNELS_TO_CRQ_SUPPORT);
4929 	if (mig_no_less_channels)
4930 		mad->flags |= cpu_to_be32(IBMVFC_NO_N_TO_M_CHANNELS_SUPPORT);
4931 
4932 	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
4933 
4934 	if (!ibmvfc_send_event(evt, vhost, default_timeout))
4935 		ibmvfc_dbg(vhost, "Send channel enquiry\n");
4936 	else
4937 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4938 }
4939 
4940 /**
4941  * ibmvfc_npiv_login_done - Completion handler for NPIV Login
4942  * @evt:	ibmvfc event struct
4943  *
4944  **/
4945 static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
4946 {
4947 	struct ibmvfc_host *vhost = evt->vhost;
4948 	u32 mad_status = be16_to_cpu(evt->xfer_iu->npiv_login.common.status);
4949 	struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp;
4950 	unsigned int npiv_max_sectors;
4951 	int level = IBMVFC_DEFAULT_LOG_LEVEL;
4952 
4953 	switch (mad_status) {
4954 	case IBMVFC_MAD_SUCCESS:
4955 		ibmvfc_free_event(evt);
4956 		break;
4957 	case IBMVFC_MAD_FAILED:
4958 		if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
4959 			level += ibmvfc_retry_host_init(vhost);
4960 		else
4961 			ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4962 		ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n",
4963 			   ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
4964 						be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
4965 		ibmvfc_free_event(evt);
4966 		return;
4967 	case IBMVFC_MAD_CRQ_ERROR:
4968 		ibmvfc_retry_host_init(vhost);
4969 		fallthrough;
4970 	case IBMVFC_MAD_DRIVER_FAILED:
4971 		ibmvfc_free_event(evt);
4972 		return;
4973 	default:
4974 		dev_err(vhost->dev, "Invalid NPIV Login response: 0x%x\n", mad_status);
4975 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4976 		ibmvfc_free_event(evt);
4977 		return;
4978 	}
4979 
4980 	vhost->client_migrated = 0;
4981 
4982 	if (!(be32_to_cpu(rsp->flags) & IBMVFC_NATIVE_FC)) {
4983 		dev_err(vhost->dev, "Virtual adapter does not support FC. %x\n",
4984 			rsp->flags);
4985 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4986 		wake_up(&vhost->work_wait_q);
4987 		return;
4988 	}
4989 
4990 	if (be32_to_cpu(rsp->max_cmds) <= IBMVFC_NUM_INTERNAL_REQ) {
4991 		dev_err(vhost->dev, "Virtual adapter supported queue depth too small: %d\n",
4992 			rsp->max_cmds);
4993 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4994 		wake_up(&vhost->work_wait_q);
4995 		return;
4996 	}
4997 
4998 	vhost->logged_in = 1;
4999 	npiv_max_sectors = min((uint)(be64_to_cpu(rsp->max_dma_len) >> 9), IBMVFC_MAX_SECTORS);
5000 	dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n",
5001 		 rsp->partition_name, rsp->device_name, rsp->port_loc_code,
5002 		 rsp->drc_name, npiv_max_sectors);
5003 
5004 	fc_host_fabric_name(vhost->host) = be64_to_cpu(rsp->node_name);
5005 	fc_host_node_name(vhost->host) = be64_to_cpu(rsp->node_name);
5006 	fc_host_port_name(vhost->host) = be64_to_cpu(rsp->port_name);
5007 	fc_host_port_id(vhost->host) = be64_to_cpu(rsp->scsi_id);
5008 	fc_host_port_type(vhost->host) = FC_PORTTYPE_NPIV;
5009 	fc_host_supported_classes(vhost->host) = 0;
5010 	if (be32_to_cpu(rsp->service_parms.class1_parms[0]) & 0x80000000)
5011 		fc_host_supported_classes(vhost->host) |= FC_COS_CLASS1;
5012 	if (be32_to_cpu(rsp->service_parms.class2_parms[0]) & 0x80000000)
5013 		fc_host_supported_classes(vhost->host) |= FC_COS_CLASS2;
5014 	if (be32_to_cpu(rsp->service_parms.class3_parms[0]) & 0x80000000)
5015 		fc_host_supported_classes(vhost->host) |= FC_COS_CLASS3;
5016 	fc_host_maxframe_size(vhost->host) =
5017 		be16_to_cpu(rsp->service_parms.common.bb_rcv_sz) & 0x0fff;
5018 
5019 	vhost->host->can_queue = be32_to_cpu(rsp->max_cmds) - IBMVFC_NUM_INTERNAL_REQ;
5020 	vhost->host->max_sectors = npiv_max_sectors;
5021 
5022 	if (ibmvfc_check_caps(vhost, IBMVFC_CAN_SUPPORT_CHANNELS) && vhost->do_enquiry) {
5023 		ibmvfc_channel_enquiry(vhost);
5024 	} else {
5025 		vhost->do_enquiry = 0;
5026 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
5027 		wake_up(&vhost->work_wait_q);
5028 	}
5029 }
5030 
5031 /**
5032  * ibmvfc_npiv_login - Sends NPIV login
5033  * @vhost:	ibmvfc host struct
5034  *
5035  **/
5036 static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
5037 {
5038 	struct ibmvfc_npiv_login_mad *mad;
5039 	struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
5040 
5041 	ibmvfc_gather_partition_info(vhost);
5042 	ibmvfc_set_login_info(vhost);
5043 	ibmvfc_init_event(evt, ibmvfc_npiv_login_done, IBMVFC_MAD_FORMAT);
5044 
5045 	memcpy(vhost->login_buf, &vhost->login_info, sizeof(vhost->login_info));
5046 	mad = &evt->iu.npiv_login;
5047 	memset(mad, 0, sizeof(struct ibmvfc_npiv_login_mad));
5048 	mad->common.version = cpu_to_be32(1);
5049 	mad->common.opcode = cpu_to_be32(IBMVFC_NPIV_LOGIN);
5050 	mad->common.length = cpu_to_be16(sizeof(struct ibmvfc_npiv_login_mad));
5051 	mad->buffer.va = cpu_to_be64(vhost->login_buf_dma);
5052 	mad->buffer.len = cpu_to_be32(sizeof(*vhost->login_buf));
5053 
5054 	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
5055 
5056 	if (!ibmvfc_send_event(evt, vhost, default_timeout))
5057 		ibmvfc_dbg(vhost, "Sent NPIV login\n");
5058 	else
5059 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5060 }
5061 
5062 /**
5063  * ibmvfc_npiv_logout_done - Completion handler for NPIV Logout
5064  * @vhost:		ibmvfc host struct
5065  *
5066  **/
5067 static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt)
5068 {
5069 	struct ibmvfc_host *vhost = evt->vhost;
5070 	u32 mad_status = be16_to_cpu(evt->xfer_iu->npiv_logout.common.status);
5071 
5072 	ibmvfc_free_event(evt);
5073 
5074 	switch (mad_status) {
5075 	case IBMVFC_MAD_SUCCESS:
5076 		if (list_empty(&vhost->crq.sent) &&
5077 		    vhost->action == IBMVFC_HOST_ACTION_LOGO_WAIT) {
5078 			ibmvfc_init_host(vhost);
5079 			return;
5080 		}
5081 		break;
5082 	case IBMVFC_MAD_FAILED:
5083 	case IBMVFC_MAD_NOT_SUPPORTED:
5084 	case IBMVFC_MAD_CRQ_ERROR:
5085 	case IBMVFC_MAD_DRIVER_FAILED:
5086 	default:
5087 		ibmvfc_dbg(vhost, "NPIV Logout failed. 0x%X\n", mad_status);
5088 		break;
5089 	}
5090 
5091 	ibmvfc_hard_reset_host(vhost);
5092 }
5093 
5094 /**
5095  * ibmvfc_npiv_logout - Issue an NPIV Logout
5096  * @vhost:		ibmvfc host struct
5097  *
5098  **/
5099 static void ibmvfc_npiv_logout(struct ibmvfc_host *vhost)
5100 {
5101 	struct ibmvfc_npiv_logout_mad *mad;
5102 	struct ibmvfc_event *evt;
5103 
5104 	evt = ibmvfc_get_event(&vhost->crq);
5105 	ibmvfc_init_event(evt, ibmvfc_npiv_logout_done, IBMVFC_MAD_FORMAT);
5106 
5107 	mad = &evt->iu.npiv_logout;
5108 	memset(mad, 0, sizeof(*mad));
5109 	mad->common.version = cpu_to_be32(1);
5110 	mad->common.opcode = cpu_to_be32(IBMVFC_NPIV_LOGOUT);
5111 	mad->common.length = cpu_to_be16(sizeof(struct ibmvfc_npiv_logout_mad));
5112 
5113 	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO_WAIT);
5114 
5115 	if (!ibmvfc_send_event(evt, vhost, default_timeout))
5116 		ibmvfc_dbg(vhost, "Sent NPIV logout\n");
5117 	else
5118 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5119 }
5120 
5121 /**
5122  * ibmvfc_dev_init_to_do - Is there target initialization work to do?
5123  * @vhost:		ibmvfc host struct
5124  *
5125  * Returns:
5126  *	1 if work to do / 0 if not
5127  **/
5128 static int ibmvfc_dev_init_to_do(struct ibmvfc_host *vhost)
5129 {
5130 	struct ibmvfc_target *tgt;
5131 
5132 	list_for_each_entry(tgt, &vhost->targets, queue) {
5133 		if (tgt->action == IBMVFC_TGT_ACTION_INIT ||
5134 		    tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
5135 			return 1;
5136 	}
5137 
5138 	return 0;
5139 }
5140 
5141 /**
5142  * ibmvfc_dev_logo_to_do - Is there target logout work to do?
5143  * @vhost:		ibmvfc host struct
5144  *
5145  * Returns:
5146  *	1 if work to do / 0 if not
5147  **/
5148 static int ibmvfc_dev_logo_to_do(struct ibmvfc_host *vhost)
5149 {
5150 	struct ibmvfc_target *tgt;
5151 
5152 	list_for_each_entry(tgt, &vhost->targets, queue) {
5153 		if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT ||
5154 		    tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT)
5155 			return 1;
5156 	}
5157 	return 0;
5158 }
5159 
5160 /**
5161  * __ibmvfc_work_to_do - Is there task level work to do? (no locking)
5162  * @vhost:		ibmvfc host struct
5163  *
5164  * Returns:
5165  *	1 if work to do / 0 if not
5166  **/
5167 static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
5168 {
5169 	struct ibmvfc_target *tgt;
5170 
5171 	if (kthread_should_stop())
5172 		return 1;
5173 	switch (vhost->action) {
5174 	case IBMVFC_HOST_ACTION_NONE:
5175 	case IBMVFC_HOST_ACTION_INIT_WAIT:
5176 	case IBMVFC_HOST_ACTION_LOGO_WAIT:
5177 		return 0;
5178 	case IBMVFC_HOST_ACTION_TGT_INIT:
5179 	case IBMVFC_HOST_ACTION_QUERY_TGTS:
5180 		if (vhost->discovery_threads == disc_threads)
5181 			return 0;
5182 		list_for_each_entry(tgt, &vhost->targets, queue)
5183 			if (tgt->action == IBMVFC_TGT_ACTION_INIT)
5184 				return 1;
5185 		list_for_each_entry(tgt, &vhost->targets, queue)
5186 			if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
5187 				return 0;
5188 		return 1;
5189 	case IBMVFC_HOST_ACTION_TGT_DEL:
5190 	case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
5191 		if (vhost->discovery_threads == disc_threads)
5192 			return 0;
5193 		list_for_each_entry(tgt, &vhost->targets, queue)
5194 			if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT)
5195 				return 1;
5196 		list_for_each_entry(tgt, &vhost->targets, queue)
5197 			if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT)
5198 				return 0;
5199 		return 1;
5200 	case IBMVFC_HOST_ACTION_LOGO:
5201 	case IBMVFC_HOST_ACTION_INIT:
5202 	case IBMVFC_HOST_ACTION_ALLOC_TGTS:
5203 	case IBMVFC_HOST_ACTION_QUERY:
5204 	case IBMVFC_HOST_ACTION_RESET:
5205 	case IBMVFC_HOST_ACTION_REENABLE:
5206 	default:
5207 		break;
5208 	}
5209 
5210 	return 1;
5211 }
5212 
5213 /**
5214  * ibmvfc_work_to_do - Is there task level work to do?
5215  * @vhost:		ibmvfc host struct
5216  *
5217  * Returns:
5218  *	1 if work to do / 0 if not
5219  **/
5220 static int ibmvfc_work_to_do(struct ibmvfc_host *vhost)
5221 {
5222 	unsigned long flags;
5223 	int rc;
5224 
5225 	spin_lock_irqsave(vhost->host->host_lock, flags);
5226 	rc = __ibmvfc_work_to_do(vhost);
5227 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
5228 	return rc;
5229 }
5230 
5231 /**
5232  * ibmvfc_log_ae - Log async events if necessary
5233  * @vhost:		ibmvfc host struct
5234  * @events:		events to log
5235  *
5236  **/
5237 static void ibmvfc_log_ae(struct ibmvfc_host *vhost, int events)
5238 {
5239 	if (events & IBMVFC_AE_RSCN)
5240 		fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_RSCN, 0);
5241 	if ((events & IBMVFC_AE_LINKDOWN) &&
5242 	    vhost->state >= IBMVFC_HALTED)
5243 		fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
5244 	if ((events & IBMVFC_AE_LINKUP) &&
5245 	    vhost->state == IBMVFC_INITIALIZING)
5246 		fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKUP, 0);
5247 }
5248 
5249 /**
5250  * ibmvfc_tgt_add_rport - Tell the FC transport about a new remote port
5251  * @tgt:		ibmvfc target struct
5252  *
5253  **/
5254 static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
5255 {
5256 	struct ibmvfc_host *vhost = tgt->vhost;
5257 	struct fc_rport *rport;
5258 	unsigned long flags;
5259 
5260 	tgt_dbg(tgt, "Adding rport\n");
5261 	rport = fc_remote_port_add(vhost->host, 0, &tgt->ids);
5262 	spin_lock_irqsave(vhost->host->host_lock, flags);
5263 
5264 	if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
5265 		tgt_dbg(tgt, "Deleting rport\n");
5266 		list_del(&tgt->queue);
5267 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT);
5268 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
5269 		fc_remote_port_delete(rport);
5270 		del_timer_sync(&tgt->timer);
5271 		kref_put(&tgt->kref, ibmvfc_release_tgt);
5272 		return;
5273 	} else if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
5274 		tgt_dbg(tgt, "Deleting rport with outstanding I/O\n");
5275 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT);
5276 		tgt->rport = NULL;
5277 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
5278 		fc_remote_port_delete(rport);
5279 		return;
5280 	} else if (rport && tgt->action == IBMVFC_TGT_ACTION_DELETED_RPORT) {
5281 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
5282 		return;
5283 	}
5284 
5285 	if (rport) {
5286 		tgt_dbg(tgt, "rport add succeeded\n");
5287 		tgt->rport = rport;
5288 		rport->maxframe_size = be16_to_cpu(tgt->service_parms.common.bb_rcv_sz) & 0x0fff;
5289 		rport->supported_classes = 0;
5290 		tgt->target_id = rport->scsi_target_id;
5291 		if (be32_to_cpu(tgt->service_parms.class1_parms[0]) & 0x80000000)
5292 			rport->supported_classes |= FC_COS_CLASS1;
5293 		if (be32_to_cpu(tgt->service_parms.class2_parms[0]) & 0x80000000)
5294 			rport->supported_classes |= FC_COS_CLASS2;
5295 		if (be32_to_cpu(tgt->service_parms.class3_parms[0]) & 0x80000000)
5296 			rport->supported_classes |= FC_COS_CLASS3;
5297 		if (rport->rqst_q)
5298 			blk_queue_max_segments(rport->rqst_q, 1);
5299 	} else
5300 		tgt_dbg(tgt, "rport add failed\n");
5301 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
5302 }
5303 
5304 /**
5305  * ibmvfc_do_work - Do task level work
5306  * @vhost:		ibmvfc host struct
5307  *
5308  **/
5309 static void ibmvfc_do_work(struct ibmvfc_host *vhost)
5310 {
5311 	struct ibmvfc_target *tgt;
5312 	unsigned long flags;
5313 	struct fc_rport *rport;
5314 	LIST_HEAD(purge);
5315 	int rc;
5316 
5317 	ibmvfc_log_ae(vhost, vhost->events_to_log);
5318 	spin_lock_irqsave(vhost->host->host_lock, flags);
5319 	vhost->events_to_log = 0;
5320 	switch (vhost->action) {
5321 	case IBMVFC_HOST_ACTION_NONE:
5322 	case IBMVFC_HOST_ACTION_LOGO_WAIT:
5323 	case IBMVFC_HOST_ACTION_INIT_WAIT:
5324 		break;
5325 	case IBMVFC_HOST_ACTION_RESET:
5326 		vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
5327 		list_splice_init(&vhost->purge, &purge);
5328 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
5329 		ibmvfc_complete_purge(&purge);
5330 		rc = ibmvfc_reset_crq(vhost);
5331 		spin_lock_irqsave(vhost->host->host_lock, flags);
5332 		if (rc == H_CLOSED)
5333 			vio_enable_interrupts(to_vio_dev(vhost->dev));
5334 		if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
5335 		    (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
5336 			ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5337 			dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
5338 		}
5339 		break;
5340 	case IBMVFC_HOST_ACTION_REENABLE:
5341 		vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
5342 		list_splice_init(&vhost->purge, &purge);
5343 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
5344 		ibmvfc_complete_purge(&purge);
5345 		rc = ibmvfc_reenable_crq_queue(vhost);
5346 		spin_lock_irqsave(vhost->host->host_lock, flags);
5347 		if (rc || (rc = ibmvfc_send_crq_init(vhost))) {
5348 			ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
5349 			dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc);
5350 		}
5351 		break;
5352 	case IBMVFC_HOST_ACTION_LOGO:
5353 		vhost->job_step(vhost);
5354 		break;
5355 	case IBMVFC_HOST_ACTION_INIT:
5356 		BUG_ON(vhost->state != IBMVFC_INITIALIZING);
5357 		if (vhost->delay_init) {
5358 			vhost->delay_init = 0;
5359 			spin_unlock_irqrestore(vhost->host->host_lock, flags);
5360 			ssleep(15);
5361 			return;
5362 		} else
5363 			vhost->job_step(vhost);
5364 		break;
5365 	case IBMVFC_HOST_ACTION_QUERY:
5366 		list_for_each_entry(tgt, &vhost->targets, queue)
5367 			ibmvfc_init_tgt(tgt, ibmvfc_tgt_query_target);
5368 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY_TGTS);
5369 		break;
5370 	case IBMVFC_HOST_ACTION_QUERY_TGTS:
5371 		list_for_each_entry(tgt, &vhost->targets, queue) {
5372 			if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
5373 				tgt->job_step(tgt);
5374 				break;
5375 			}
5376 		}
5377 
5378 		if (!ibmvfc_dev_init_to_do(vhost))
5379 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
5380 		break;
5381 	case IBMVFC_HOST_ACTION_TGT_DEL:
5382 	case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
5383 		list_for_each_entry(tgt, &vhost->targets, queue) {
5384 			if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT) {
5385 				tgt->job_step(tgt);
5386 				break;
5387 			}
5388 		}
5389 
5390 		if (ibmvfc_dev_logo_to_do(vhost)) {
5391 			spin_unlock_irqrestore(vhost->host->host_lock, flags);
5392 			return;
5393 		}
5394 
5395 		list_for_each_entry(tgt, &vhost->targets, queue) {
5396 			if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
5397 				tgt_dbg(tgt, "Deleting rport\n");
5398 				rport = tgt->rport;
5399 				tgt->rport = NULL;
5400 				list_del(&tgt->queue);
5401 				ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT);
5402 				spin_unlock_irqrestore(vhost->host->host_lock, flags);
5403 				if (rport)
5404 					fc_remote_port_delete(rport);
5405 				del_timer_sync(&tgt->timer);
5406 				kref_put(&tgt->kref, ibmvfc_release_tgt);
5407 				return;
5408 			} else if (tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
5409 				tgt_dbg(tgt, "Deleting rport with I/O outstanding\n");
5410 				rport = tgt->rport;
5411 				tgt->rport = NULL;
5412 				ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT);
5413 				spin_unlock_irqrestore(vhost->host->host_lock, flags);
5414 				if (rport)
5415 					fc_remote_port_delete(rport);
5416 				return;
5417 			}
5418 		}
5419 
5420 		if (vhost->state == IBMVFC_INITIALIZING) {
5421 			if (vhost->action == IBMVFC_HOST_ACTION_TGT_DEL_FAILED) {
5422 				if (vhost->reinit) {
5423 					vhost->reinit = 0;
5424 					scsi_block_requests(vhost->host);
5425 					ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
5426 					spin_unlock_irqrestore(vhost->host->host_lock, flags);
5427 				} else {
5428 					ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE);
5429 					ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
5430 					wake_up(&vhost->init_wait_q);
5431 					schedule_work(&vhost->rport_add_work_q);
5432 					vhost->init_retries = 0;
5433 					spin_unlock_irqrestore(vhost->host->host_lock, flags);
5434 					scsi_unblock_requests(vhost->host);
5435 				}
5436 
5437 				return;
5438 			} else {
5439 				ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
5440 				vhost->job_step = ibmvfc_discover_targets;
5441 			}
5442 		} else {
5443 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
5444 			spin_unlock_irqrestore(vhost->host->host_lock, flags);
5445 			scsi_unblock_requests(vhost->host);
5446 			wake_up(&vhost->init_wait_q);
5447 			return;
5448 		}
5449 		break;
5450 	case IBMVFC_HOST_ACTION_ALLOC_TGTS:
5451 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_INIT);
5452 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
5453 		ibmvfc_alloc_targets(vhost);
5454 		spin_lock_irqsave(vhost->host->host_lock, flags);
5455 		break;
5456 	case IBMVFC_HOST_ACTION_TGT_INIT:
5457 		list_for_each_entry(tgt, &vhost->targets, queue) {
5458 			if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
5459 				tgt->job_step(tgt);
5460 				break;
5461 			}
5462 		}
5463 
5464 		if (!ibmvfc_dev_init_to_do(vhost))
5465 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL_FAILED);
5466 		break;
5467 	default:
5468 		break;
5469 	}
5470 
5471 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
5472 }
5473 
5474 /**
5475  * ibmvfc_work - Do task level work
5476  * @data:		ibmvfc host struct
5477  *
5478  * Returns:
5479  *	zero
5480  **/
5481 static int ibmvfc_work(void *data)
5482 {
5483 	struct ibmvfc_host *vhost = data;
5484 	int rc;
5485 
5486 	set_user_nice(current, MIN_NICE);
5487 
5488 	while (1) {
5489 		rc = wait_event_interruptible(vhost->work_wait_q,
5490 					      ibmvfc_work_to_do(vhost));
5491 
5492 		BUG_ON(rc);
5493 
5494 		if (kthread_should_stop())
5495 			break;
5496 
5497 		ibmvfc_do_work(vhost);
5498 	}
5499 
5500 	ibmvfc_dbg(vhost, "ibmvfc kthread exiting...\n");
5501 	return 0;
5502 }
5503 
5504 /**
5505  * ibmvfc_alloc_queue - Allocate queue
5506  * @vhost:	ibmvfc host struct
5507  * @queue:	ibmvfc queue to allocate
5508  * @fmt:	queue format to allocate
5509  *
5510  * Returns:
5511  *	0 on success / non-zero on failure
5512  **/
5513 static int ibmvfc_alloc_queue(struct ibmvfc_host *vhost,
5514 			      struct ibmvfc_queue *queue,
5515 			      enum ibmvfc_msg_fmt fmt)
5516 {
5517 	struct device *dev = vhost->dev;
5518 	size_t fmt_size;
5519 	unsigned int pool_size = 0;
5520 
5521 	ENTER;
5522 	spin_lock_init(&queue->_lock);
5523 	queue->q_lock = &queue->_lock;
5524 
5525 	switch (fmt) {
5526 	case IBMVFC_CRQ_FMT:
5527 		fmt_size = sizeof(*queue->msgs.crq);
5528 		pool_size = max_requests + IBMVFC_NUM_INTERNAL_REQ;
5529 		break;
5530 	case IBMVFC_ASYNC_FMT:
5531 		fmt_size = sizeof(*queue->msgs.async);
5532 		break;
5533 	case IBMVFC_SUB_CRQ_FMT:
5534 		fmt_size = sizeof(*queue->msgs.scrq);
5535 		/* We need one extra event for Cancel Commands */
5536 		pool_size = max_requests + 1;
5537 		break;
5538 	default:
5539 		dev_warn(dev, "Unknown command/response queue message format: %d\n", fmt);
5540 		return -EINVAL;
5541 	}
5542 
5543 	if (ibmvfc_init_event_pool(vhost, queue, pool_size)) {
5544 		dev_err(dev, "Couldn't initialize event pool.\n");
5545 		return -ENOMEM;
5546 	}
5547 
5548 	queue->msgs.handle = (void *)get_zeroed_page(GFP_KERNEL);
5549 	if (!queue->msgs.handle)
5550 		return -ENOMEM;
5551 
5552 	queue->msg_token = dma_map_single(dev, queue->msgs.handle, PAGE_SIZE,
5553 					  DMA_BIDIRECTIONAL);
5554 
5555 	if (dma_mapping_error(dev, queue->msg_token)) {
5556 		free_page((unsigned long)queue->msgs.handle);
5557 		queue->msgs.handle = NULL;
5558 		return -ENOMEM;
5559 	}
5560 
5561 	queue->cur = 0;
5562 	queue->fmt = fmt;
5563 	queue->size = PAGE_SIZE / fmt_size;
5564 	return 0;
5565 }
5566 
5567 /**
5568  * ibmvfc_init_crq - Initializes and registers CRQ with hypervisor
5569  * @vhost:	ibmvfc host struct
5570  *
5571  * Allocates a page for messages, maps it for dma, and registers
5572  * the crq with the hypervisor.
5573  *
5574  * Return value:
5575  *	zero on success / other on failure
5576  **/
5577 static int ibmvfc_init_crq(struct ibmvfc_host *vhost)
5578 {
5579 	int rc, retrc = -ENOMEM;
5580 	struct device *dev = vhost->dev;
5581 	struct vio_dev *vdev = to_vio_dev(dev);
5582 	struct ibmvfc_queue *crq = &vhost->crq;
5583 
5584 	ENTER;
5585 	if (ibmvfc_alloc_queue(vhost, crq, IBMVFC_CRQ_FMT))
5586 		return -ENOMEM;
5587 
5588 	retrc = rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
5589 					crq->msg_token, PAGE_SIZE);
5590 
5591 	if (rc == H_RESOURCE)
5592 		/* maybe kexecing and resource is busy. try a reset */
5593 		retrc = rc = ibmvfc_reset_crq(vhost);
5594 
5595 	if (rc == H_CLOSED)
5596 		dev_warn(dev, "Partner adapter not ready\n");
5597 	else if (rc) {
5598 		dev_warn(dev, "Error %d opening adapter\n", rc);
5599 		goto reg_crq_failed;
5600 	}
5601 
5602 	retrc = 0;
5603 
5604 	tasklet_init(&vhost->tasklet, (void *)ibmvfc_tasklet, (unsigned long)vhost);
5605 
5606 	if ((rc = request_irq(vdev->irq, ibmvfc_interrupt, 0, IBMVFC_NAME, vhost))) {
5607 		dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n", vdev->irq, rc);
5608 		goto req_irq_failed;
5609 	}
5610 
5611 	if ((rc = vio_enable_interrupts(vdev))) {
5612 		dev_err(dev, "Error %d enabling interrupts\n", rc);
5613 		goto req_irq_failed;
5614 	}
5615 
5616 	LEAVE;
5617 	return retrc;
5618 
5619 req_irq_failed:
5620 	tasklet_kill(&vhost->tasklet);
5621 	do {
5622 		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5623 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5624 reg_crq_failed:
5625 	ibmvfc_free_queue(vhost, crq);
5626 	return retrc;
5627 }
5628 
5629 static int ibmvfc_register_scsi_channel(struct ibmvfc_host *vhost,
5630 				  int index)
5631 {
5632 	struct device *dev = vhost->dev;
5633 	struct vio_dev *vdev = to_vio_dev(dev);
5634 	struct ibmvfc_queue *scrq = &vhost->scsi_scrqs.scrqs[index];
5635 	int rc = -ENOMEM;
5636 
5637 	ENTER;
5638 
5639 	if (ibmvfc_alloc_queue(vhost, scrq, IBMVFC_SUB_CRQ_FMT))
5640 		return -ENOMEM;
5641 
5642 	rc = h_reg_sub_crq(vdev->unit_address, scrq->msg_token, PAGE_SIZE,
5643 			   &scrq->cookie, &scrq->hw_irq);
5644 
5645 	if (rc) {
5646 		dev_warn(dev, "Error registering sub-crq: %d\n", rc);
5647 		if (rc == H_PARAMETER)
5648 			dev_warn_once(dev, "Firmware may not support MQ\n");
5649 		goto reg_failed;
5650 	}
5651 
5652 	scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
5653 
5654 	if (!scrq->irq) {
5655 		rc = -EINVAL;
5656 		dev_err(dev, "Error mapping sub-crq[%d] irq\n", index);
5657 		goto irq_failed;
5658 	}
5659 
5660 	snprintf(scrq->name, sizeof(scrq->name), "ibmvfc-%x-scsi%d",
5661 		 vdev->unit_address, index);
5662 	rc = request_irq(scrq->irq, ibmvfc_interrupt_scsi, 0, scrq->name, scrq);
5663 
5664 	if (rc) {
5665 		dev_err(dev, "Couldn't register sub-crq[%d] irq\n", index);
5666 		irq_dispose_mapping(scrq->irq);
5667 		goto irq_failed;
5668 	}
5669 
5670 	scrq->hwq_id = index;
5671 	scrq->vhost = vhost;
5672 
5673 	LEAVE;
5674 	return 0;
5675 
5676 irq_failed:
5677 	do {
5678 		plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address, scrq->cookie);
5679 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5680 reg_failed:
5681 	ibmvfc_free_queue(vhost, scrq);
5682 	LEAVE;
5683 	return rc;
5684 }
5685 
5686 static void ibmvfc_deregister_scsi_channel(struct ibmvfc_host *vhost, int index)
5687 {
5688 	struct device *dev = vhost->dev;
5689 	struct vio_dev *vdev = to_vio_dev(dev);
5690 	struct ibmvfc_queue *scrq = &vhost->scsi_scrqs.scrqs[index];
5691 	long rc;
5692 
5693 	ENTER;
5694 
5695 	free_irq(scrq->irq, scrq);
5696 	irq_dispose_mapping(scrq->irq);
5697 
5698 	do {
5699 		rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address,
5700 					scrq->cookie);
5701 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5702 
5703 	if (rc)
5704 		dev_err(dev, "Failed to free sub-crq[%d]: rc=%ld\n", index, rc);
5705 
5706 	ibmvfc_free_queue(vhost, scrq);
5707 	LEAVE;
5708 }
5709 
5710 static int ibmvfc_init_sub_crqs(struct ibmvfc_host *vhost)
5711 {
5712 	int i, j;
5713 
5714 	ENTER;
5715 
5716 	vhost->scsi_scrqs.scrqs = kcalloc(nr_scsi_hw_queues,
5717 					  sizeof(*vhost->scsi_scrqs.scrqs),
5718 					  GFP_KERNEL);
5719 	if (!vhost->scsi_scrqs.scrqs)
5720 		return -1;
5721 
5722 	for (i = 0; i < nr_scsi_hw_queues; i++) {
5723 		if (ibmvfc_register_scsi_channel(vhost, i)) {
5724 			for (j = i; j > 0; j--)
5725 				ibmvfc_deregister_scsi_channel(vhost, j - 1);
5726 			kfree(vhost->scsi_scrqs.scrqs);
5727 			vhost->scsi_scrqs.scrqs = NULL;
5728 			vhost->scsi_scrqs.active_queues = 0;
5729 			LEAVE;
5730 			return -1;
5731 		}
5732 	}
5733 
5734 	LEAVE;
5735 	return 0;
5736 }
5737 
5738 static void ibmvfc_release_sub_crqs(struct ibmvfc_host *vhost)
5739 {
5740 	int i;
5741 
5742 	ENTER;
5743 	if (!vhost->scsi_scrqs.scrqs)
5744 		return;
5745 
5746 	for (i = 0; i < nr_scsi_hw_queues; i++)
5747 		ibmvfc_deregister_scsi_channel(vhost, i);
5748 
5749 	kfree(vhost->scsi_scrqs.scrqs);
5750 	vhost->scsi_scrqs.scrqs = NULL;
5751 	vhost->scsi_scrqs.active_queues = 0;
5752 	LEAVE;
5753 }
5754 
5755 /**
5756  * ibmvfc_free_mem - Free memory for vhost
5757  * @vhost:	ibmvfc host struct
5758  *
5759  * Return value:
5760  * 	none
5761  **/
5762 static void ibmvfc_free_mem(struct ibmvfc_host *vhost)
5763 {
5764 	struct ibmvfc_queue *async_q = &vhost->async_crq;
5765 
5766 	ENTER;
5767 	mempool_destroy(vhost->tgt_pool);
5768 	kfree(vhost->trace);
5769 	dma_free_coherent(vhost->dev, vhost->disc_buf_sz, vhost->disc_buf,
5770 			  vhost->disc_buf_dma);
5771 	dma_free_coherent(vhost->dev, sizeof(*vhost->login_buf),
5772 			  vhost->login_buf, vhost->login_buf_dma);
5773 	dma_pool_destroy(vhost->sg_pool);
5774 	ibmvfc_free_queue(vhost, async_q);
5775 	LEAVE;
5776 }
5777 
5778 /**
5779  * ibmvfc_alloc_mem - Allocate memory for vhost
5780  * @vhost:	ibmvfc host struct
5781  *
5782  * Return value:
5783  * 	0 on success / non-zero on failure
5784  **/
5785 static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost)
5786 {
5787 	struct ibmvfc_queue *async_q = &vhost->async_crq;
5788 	struct device *dev = vhost->dev;
5789 
5790 	ENTER;
5791 	if (ibmvfc_alloc_queue(vhost, async_q, IBMVFC_ASYNC_FMT)) {
5792 		dev_err(dev, "Couldn't allocate/map async queue.\n");
5793 		goto nomem;
5794 	}
5795 
5796 	vhost->sg_pool = dma_pool_create(IBMVFC_NAME, dev,
5797 					 SG_ALL * sizeof(struct srp_direct_buf),
5798 					 sizeof(struct srp_direct_buf), 0);
5799 
5800 	if (!vhost->sg_pool) {
5801 		dev_err(dev, "Failed to allocate sg pool\n");
5802 		goto unmap_async_crq;
5803 	}
5804 
5805 	vhost->login_buf = dma_alloc_coherent(dev, sizeof(*vhost->login_buf),
5806 					      &vhost->login_buf_dma, GFP_KERNEL);
5807 
5808 	if (!vhost->login_buf) {
5809 		dev_err(dev, "Couldn't allocate NPIV login buffer\n");
5810 		goto free_sg_pool;
5811 	}
5812 
5813 	vhost->disc_buf_sz = sizeof(*vhost->disc_buf) * max_targets;
5814 	vhost->disc_buf = dma_alloc_coherent(dev, vhost->disc_buf_sz,
5815 					     &vhost->disc_buf_dma, GFP_KERNEL);
5816 
5817 	if (!vhost->disc_buf) {
5818 		dev_err(dev, "Couldn't allocate Discover Targets buffer\n");
5819 		goto free_login_buffer;
5820 	}
5821 
5822 	vhost->trace = kcalloc(IBMVFC_NUM_TRACE_ENTRIES,
5823 			       sizeof(struct ibmvfc_trace_entry), GFP_KERNEL);
5824 	atomic_set(&vhost->trace_index, -1);
5825 
5826 	if (!vhost->trace)
5827 		goto free_disc_buffer;
5828 
5829 	vhost->tgt_pool = mempool_create_kmalloc_pool(IBMVFC_TGT_MEMPOOL_SZ,
5830 						      sizeof(struct ibmvfc_target));
5831 
5832 	if (!vhost->tgt_pool) {
5833 		dev_err(dev, "Couldn't allocate target memory pool\n");
5834 		goto free_trace;
5835 	}
5836 
5837 	vhost->channel_setup_buf = dma_alloc_coherent(dev, sizeof(*vhost->channel_setup_buf),
5838 						      &vhost->channel_setup_dma,
5839 						      GFP_KERNEL);
5840 
5841 	if (!vhost->channel_setup_buf) {
5842 		dev_err(dev, "Couldn't allocate Channel Setup buffer\n");
5843 		goto free_tgt_pool;
5844 	}
5845 
5846 	LEAVE;
5847 	return 0;
5848 
5849 free_tgt_pool:
5850 	mempool_destroy(vhost->tgt_pool);
5851 free_trace:
5852 	kfree(vhost->trace);
5853 free_disc_buffer:
5854 	dma_free_coherent(dev, vhost->disc_buf_sz, vhost->disc_buf,
5855 			  vhost->disc_buf_dma);
5856 free_login_buffer:
5857 	dma_free_coherent(dev, sizeof(*vhost->login_buf),
5858 			  vhost->login_buf, vhost->login_buf_dma);
5859 free_sg_pool:
5860 	dma_pool_destroy(vhost->sg_pool);
5861 unmap_async_crq:
5862 	ibmvfc_free_queue(vhost, async_q);
5863 nomem:
5864 	LEAVE;
5865 	return -ENOMEM;
5866 }
5867 
5868 /**
5869  * ibmvfc_rport_add_thread - Worker thread for rport adds
5870  * @work:	work struct
5871  *
5872  **/
5873 static void ibmvfc_rport_add_thread(struct work_struct *work)
5874 {
5875 	struct ibmvfc_host *vhost = container_of(work, struct ibmvfc_host,
5876 						 rport_add_work_q);
5877 	struct ibmvfc_target *tgt;
5878 	struct fc_rport *rport;
5879 	unsigned long flags;
5880 	int did_work;
5881 
5882 	ENTER;
5883 	spin_lock_irqsave(vhost->host->host_lock, flags);
5884 	do {
5885 		did_work = 0;
5886 		if (vhost->state != IBMVFC_ACTIVE)
5887 			break;
5888 
5889 		list_for_each_entry(tgt, &vhost->targets, queue) {
5890 			if (tgt->add_rport) {
5891 				did_work = 1;
5892 				tgt->add_rport = 0;
5893 				kref_get(&tgt->kref);
5894 				rport = tgt->rport;
5895 				if (!rport) {
5896 					spin_unlock_irqrestore(vhost->host->host_lock, flags);
5897 					ibmvfc_tgt_add_rport(tgt);
5898 				} else if (get_device(&rport->dev)) {
5899 					spin_unlock_irqrestore(vhost->host->host_lock, flags);
5900 					tgt_dbg(tgt, "Setting rport roles\n");
5901 					fc_remote_port_rolechg(rport, tgt->ids.roles);
5902 					put_device(&rport->dev);
5903 				} else {
5904 					spin_unlock_irqrestore(vhost->host->host_lock, flags);
5905 				}
5906 
5907 				kref_put(&tgt->kref, ibmvfc_release_tgt);
5908 				spin_lock_irqsave(vhost->host->host_lock, flags);
5909 				break;
5910 			}
5911 		}
5912 	} while(did_work);
5913 
5914 	if (vhost->state == IBMVFC_ACTIVE)
5915 		vhost->scan_complete = 1;
5916 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
5917 	LEAVE;
5918 }
5919 
5920 /**
5921  * ibmvfc_probe - Adapter hot plug add entry point
5922  * @vdev:	vio device struct
5923  * @id:	vio device id struct
5924  *
5925  * Return value:
5926  * 	0 on success / non-zero on failure
5927  **/
5928 static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
5929 {
5930 	struct ibmvfc_host *vhost;
5931 	struct Scsi_Host *shost;
5932 	struct device *dev = &vdev->dev;
5933 	int rc = -ENOMEM;
5934 	unsigned int max_scsi_queues = IBMVFC_MAX_SCSI_QUEUES;
5935 
5936 	ENTER;
5937 	shost = scsi_host_alloc(&driver_template, sizeof(*vhost));
5938 	if (!shost) {
5939 		dev_err(dev, "Couldn't allocate host data\n");
5940 		goto out;
5941 	}
5942 
5943 	shost->transportt = ibmvfc_transport_template;
5944 	shost->can_queue = max_requests;
5945 	shost->max_lun = max_lun;
5946 	shost->max_id = max_targets;
5947 	shost->max_sectors = IBMVFC_MAX_SECTORS;
5948 	shost->max_cmd_len = IBMVFC_MAX_CDB_LEN;
5949 	shost->unique_id = shost->host_no;
5950 	shost->nr_hw_queues = mq_enabled ? min(max_scsi_queues, nr_scsi_hw_queues) : 1;
5951 
5952 	vhost = shost_priv(shost);
5953 	INIT_LIST_HEAD(&vhost->targets);
5954 	INIT_LIST_HEAD(&vhost->purge);
5955 	sprintf(vhost->name, IBMVFC_NAME);
5956 	vhost->host = shost;
5957 	vhost->dev = dev;
5958 	vhost->partition_number = -1;
5959 	vhost->log_level = log_level;
5960 	vhost->task_set = 1;
5961 
5962 	vhost->mq_enabled = mq_enabled;
5963 	vhost->client_scsi_channels = min(shost->nr_hw_queues, nr_scsi_channels);
5964 	vhost->using_channels = 0;
5965 	vhost->do_enquiry = 1;
5966 
5967 	strcpy(vhost->partition_name, "UNKNOWN");
5968 	init_waitqueue_head(&vhost->work_wait_q);
5969 	init_waitqueue_head(&vhost->init_wait_q);
5970 	INIT_WORK(&vhost->rport_add_work_q, ibmvfc_rport_add_thread);
5971 	mutex_init(&vhost->passthru_mutex);
5972 
5973 	if ((rc = ibmvfc_alloc_mem(vhost)))
5974 		goto free_scsi_host;
5975 
5976 	vhost->work_thread = kthread_run(ibmvfc_work, vhost, "%s_%d", IBMVFC_NAME,
5977 					 shost->host_no);
5978 
5979 	if (IS_ERR(vhost->work_thread)) {
5980 		dev_err(dev, "Couldn't create kernel thread: %ld\n",
5981 			PTR_ERR(vhost->work_thread));
5982 		rc = PTR_ERR(vhost->work_thread);
5983 		goto free_host_mem;
5984 	}
5985 
5986 	if ((rc = ibmvfc_init_crq(vhost))) {
5987 		dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
5988 		goto kill_kthread;
5989 	}
5990 
5991 	if ((rc = scsi_add_host(shost, dev)))
5992 		goto release_crq;
5993 
5994 	fc_host_dev_loss_tmo(shost) = IBMVFC_DEV_LOSS_TMO;
5995 
5996 	if ((rc = ibmvfc_create_trace_file(&shost->shost_dev.kobj,
5997 					   &ibmvfc_trace_attr))) {
5998 		dev_err(dev, "Failed to create trace file. rc=%d\n", rc);
5999 		goto remove_shost;
6000 	}
6001 
6002 	if (vhost->mq_enabled) {
6003 		rc = ibmvfc_init_sub_crqs(vhost);
6004 		if (rc)
6005 			dev_warn(dev, "Failed to allocate Sub-CRQs. rc=%d\n", rc);
6006 	}
6007 
6008 	if (shost_to_fc_host(shost)->rqst_q)
6009 		blk_queue_max_segments(shost_to_fc_host(shost)->rqst_q, 1);
6010 	dev_set_drvdata(dev, vhost);
6011 	spin_lock(&ibmvfc_driver_lock);
6012 	list_add_tail(&vhost->queue, &ibmvfc_head);
6013 	spin_unlock(&ibmvfc_driver_lock);
6014 
6015 	ibmvfc_send_crq_init(vhost);
6016 	scsi_scan_host(shost);
6017 	return 0;
6018 
6019 remove_shost:
6020 	scsi_remove_host(shost);
6021 release_crq:
6022 	ibmvfc_release_crq_queue(vhost);
6023 kill_kthread:
6024 	kthread_stop(vhost->work_thread);
6025 free_host_mem:
6026 	ibmvfc_free_mem(vhost);
6027 free_scsi_host:
6028 	scsi_host_put(shost);
6029 out:
6030 	LEAVE;
6031 	return rc;
6032 }
6033 
6034 /**
6035  * ibmvfc_remove - Adapter hot plug remove entry point
6036  * @vdev:	vio device struct
6037  *
6038  * Return value:
6039  * 	0
6040  **/
6041 static int ibmvfc_remove(struct vio_dev *vdev)
6042 {
6043 	struct ibmvfc_host *vhost = dev_get_drvdata(&vdev->dev);
6044 	LIST_HEAD(purge);
6045 	unsigned long flags;
6046 
6047 	ENTER;
6048 	ibmvfc_remove_trace_file(&vhost->host->shost_dev.kobj, &ibmvfc_trace_attr);
6049 
6050 	spin_lock_irqsave(vhost->host->host_lock, flags);
6051 	ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
6052 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
6053 
6054 	ibmvfc_wait_while_resetting(vhost);
6055 	kthread_stop(vhost->work_thread);
6056 	fc_remove_host(vhost->host);
6057 	scsi_remove_host(vhost->host);
6058 
6059 	spin_lock_irqsave(vhost->host->host_lock, flags);
6060 	ibmvfc_purge_requests(vhost, DID_ERROR);
6061 	list_splice_init(&vhost->purge, &purge);
6062 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
6063 	ibmvfc_complete_purge(&purge);
6064 	ibmvfc_release_sub_crqs(vhost);
6065 	ibmvfc_release_crq_queue(vhost);
6066 
6067 	ibmvfc_free_mem(vhost);
6068 	spin_lock(&ibmvfc_driver_lock);
6069 	list_del(&vhost->queue);
6070 	spin_unlock(&ibmvfc_driver_lock);
6071 	scsi_host_put(vhost->host);
6072 	LEAVE;
6073 	return 0;
6074 }
6075 
6076 /**
6077  * ibmvfc_resume - Resume from suspend
6078  * @dev:	device struct
6079  *
6080  * We may have lost an interrupt across suspend/resume, so kick the
6081  * interrupt handler
6082  *
6083  */
6084 static int ibmvfc_resume(struct device *dev)
6085 {
6086 	unsigned long flags;
6087 	struct ibmvfc_host *vhost = dev_get_drvdata(dev);
6088 	struct vio_dev *vdev = to_vio_dev(dev);
6089 
6090 	spin_lock_irqsave(vhost->host->host_lock, flags);
6091 	vio_disable_interrupts(vdev);
6092 	tasklet_schedule(&vhost->tasklet);
6093 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
6094 	return 0;
6095 }
6096 
6097 /**
6098  * ibmvfc_get_desired_dma - Calculate DMA resources needed by the driver
6099  * @vdev:	vio device struct
6100  *
6101  * Return value:
6102  *	Number of bytes the driver will need to DMA map at the same time in
6103  *	order to perform well.
6104  */
6105 static unsigned long ibmvfc_get_desired_dma(struct vio_dev *vdev)
6106 {
6107 	unsigned long pool_dma = max_requests * sizeof(union ibmvfc_iu);
6108 	return pool_dma + ((512 * 1024) * driver_template.cmd_per_lun);
6109 }
6110 
6111 static const struct vio_device_id ibmvfc_device_table[] = {
6112 	{"fcp", "IBM,vfc-client"},
6113 	{ "", "" }
6114 };
6115 MODULE_DEVICE_TABLE(vio, ibmvfc_device_table);
6116 
6117 static const struct dev_pm_ops ibmvfc_pm_ops = {
6118 	.resume = ibmvfc_resume
6119 };
6120 
6121 static struct vio_driver ibmvfc_driver = {
6122 	.id_table = ibmvfc_device_table,
6123 	.probe = ibmvfc_probe,
6124 	.remove = ibmvfc_remove,
6125 	.get_desired_dma = ibmvfc_get_desired_dma,
6126 	.name = IBMVFC_NAME,
6127 	.pm = &ibmvfc_pm_ops,
6128 };
6129 
6130 static struct fc_function_template ibmvfc_transport_functions = {
6131 	.show_host_fabric_name = 1,
6132 	.show_host_node_name = 1,
6133 	.show_host_port_name = 1,
6134 	.show_host_supported_classes = 1,
6135 	.show_host_port_type = 1,
6136 	.show_host_port_id = 1,
6137 	.show_host_maxframe_size = 1,
6138 
6139 	.get_host_port_state = ibmvfc_get_host_port_state,
6140 	.show_host_port_state = 1,
6141 
6142 	.get_host_speed = ibmvfc_get_host_speed,
6143 	.show_host_speed = 1,
6144 
6145 	.issue_fc_host_lip = ibmvfc_issue_fc_host_lip,
6146 	.terminate_rport_io = ibmvfc_terminate_rport_io,
6147 
6148 	.show_rport_maxframe_size = 1,
6149 	.show_rport_supported_classes = 1,
6150 
6151 	.set_rport_dev_loss_tmo = ibmvfc_set_rport_dev_loss_tmo,
6152 	.show_rport_dev_loss_tmo = 1,
6153 
6154 	.get_starget_node_name = ibmvfc_get_starget_node_name,
6155 	.show_starget_node_name = 1,
6156 
6157 	.get_starget_port_name = ibmvfc_get_starget_port_name,
6158 	.show_starget_port_name = 1,
6159 
6160 	.get_starget_port_id = ibmvfc_get_starget_port_id,
6161 	.show_starget_port_id = 1,
6162 
6163 	.bsg_request = ibmvfc_bsg_request,
6164 	.bsg_timeout = ibmvfc_bsg_timeout,
6165 };
6166 
6167 /**
6168  * ibmvfc_module_init - Initialize the ibmvfc module
6169  *
6170  * Return value:
6171  * 	0 on success / other on failure
6172  **/
6173 static int __init ibmvfc_module_init(void)
6174 {
6175 	int rc;
6176 
6177 	if (!firmware_has_feature(FW_FEATURE_VIO))
6178 		return -ENODEV;
6179 
6180 	printk(KERN_INFO IBMVFC_NAME": IBM Virtual Fibre Channel Driver version: %s %s\n",
6181 	       IBMVFC_DRIVER_VERSION, IBMVFC_DRIVER_DATE);
6182 
6183 	ibmvfc_transport_template = fc_attach_transport(&ibmvfc_transport_functions);
6184 	if (!ibmvfc_transport_template)
6185 		return -ENOMEM;
6186 
6187 	rc = vio_register_driver(&ibmvfc_driver);
6188 	if (rc)
6189 		fc_release_transport(ibmvfc_transport_template);
6190 	return rc;
6191 }
6192 
6193 /**
6194  * ibmvfc_module_exit - Teardown the ibmvfc module
6195  *
6196  * Return value:
6197  * 	nothing
6198  **/
6199 static void __exit ibmvfc_module_exit(void)
6200 {
6201 	vio_unregister_driver(&ibmvfc_driver);
6202 	fc_release_transport(ibmvfc_transport_template);
6203 }
6204 
6205 module_init(ibmvfc_module_init);
6206 module_exit(ibmvfc_module_exit);
6207