xref: /openbmc/linux/drivers/scsi/ibmvscsi/ibmvfc.c (revision 0b26ca68)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * ibmvfc.c -- driver for IBM Power Virtual Fibre Channel Adapter
4  *
5  * Written By: Brian King <brking@linux.vnet.ibm.com>, IBM Corporation
6  *
7  * Copyright (C) IBM Corporation, 2008
8  */
9 
10 #include <linux/module.h>
11 #include <linux/moduleparam.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/dmapool.h>
14 #include <linux/delay.h>
15 #include <linux/interrupt.h>
16 #include <linux/kthread.h>
17 #include <linux/slab.h>
18 #include <linux/of.h>
19 #include <linux/pm.h>
20 #include <linux/stringify.h>
21 #include <linux/bsg-lib.h>
22 #include <asm/firmware.h>
23 #include <asm/irq.h>
24 #include <asm/vio.h>
25 #include <scsi/scsi.h>
26 #include <scsi/scsi_cmnd.h>
27 #include <scsi/scsi_host.h>
28 #include <scsi/scsi_device.h>
29 #include <scsi/scsi_tcq.h>
30 #include <scsi/scsi_transport_fc.h>
31 #include <scsi/scsi_bsg_fc.h>
32 #include "ibmvfc.h"
33 
34 static unsigned int init_timeout = IBMVFC_INIT_TIMEOUT;
35 static unsigned int default_timeout = IBMVFC_DEFAULT_TIMEOUT;
36 static u64 max_lun = IBMVFC_MAX_LUN;
37 static unsigned int max_targets = IBMVFC_MAX_TARGETS;
38 static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT;
39 static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS;
40 static unsigned int ibmvfc_debug = IBMVFC_DEBUG;
41 static unsigned int log_level = IBMVFC_DEFAULT_LOG_LEVEL;
42 static unsigned int cls3_error = IBMVFC_CLS3_ERROR;
43 static LIST_HEAD(ibmvfc_head);
44 static DEFINE_SPINLOCK(ibmvfc_driver_lock);
45 static struct scsi_transport_template *ibmvfc_transport_template;
46 
47 MODULE_DESCRIPTION("IBM Virtual Fibre Channel Driver");
48 MODULE_AUTHOR("Brian King <brking@linux.vnet.ibm.com>");
49 MODULE_LICENSE("GPL");
50 MODULE_VERSION(IBMVFC_DRIVER_VERSION);
51 
52 module_param_named(init_timeout, init_timeout, uint, S_IRUGO | S_IWUSR);
53 MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds. "
54 		 "[Default=" __stringify(IBMVFC_INIT_TIMEOUT) "]");
55 module_param_named(default_timeout, default_timeout, uint, S_IRUGO | S_IWUSR);
56 MODULE_PARM_DESC(default_timeout,
57 		 "Default timeout in seconds for initialization and EH commands. "
58 		 "[Default=" __stringify(IBMVFC_DEFAULT_TIMEOUT) "]");
59 module_param_named(max_requests, max_requests, uint, S_IRUGO);
60 MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter. "
61 		 "[Default=" __stringify(IBMVFC_MAX_REQUESTS_DEFAULT) "]");
62 module_param_named(max_lun, max_lun, ullong, S_IRUGO);
63 MODULE_PARM_DESC(max_lun, "Maximum allowed LUN. "
64 		 "[Default=" __stringify(IBMVFC_MAX_LUN) "]");
65 module_param_named(max_targets, max_targets, uint, S_IRUGO);
66 MODULE_PARM_DESC(max_targets, "Maximum allowed targets. "
67 		 "[Default=" __stringify(IBMVFC_MAX_TARGETS) "]");
68 module_param_named(disc_threads, disc_threads, uint, S_IRUGO);
69 MODULE_PARM_DESC(disc_threads, "Number of device discovery threads to use. "
70 		 "[Default=" __stringify(IBMVFC_MAX_DISC_THREADS) "]");
71 module_param_named(debug, ibmvfc_debug, uint, S_IRUGO | S_IWUSR);
72 MODULE_PARM_DESC(debug, "Enable driver debug information. "
73 		 "[Default=" __stringify(IBMVFC_DEBUG) "]");
74 module_param_named(log_level, log_level, uint, 0);
75 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver. "
76 		 "[Default=" __stringify(IBMVFC_DEFAULT_LOG_LEVEL) "]");
77 module_param_named(cls3_error, cls3_error, uint, 0);
78 MODULE_PARM_DESC(cls3_error, "Enable FC Class 3 Error Recovery. "
79 		 "[Default=" __stringify(IBMVFC_CLS3_ERROR) "]");
80 
81 static const struct {
82 	u16 status;
83 	u16 error;
84 	u8 result;
85 	u8 retry;
86 	int log;
87 	char *name;
88 } cmd_status [] = {
89 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_ESTABLISH, DID_ERROR, 1, 1, "unable to establish" },
90 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_FAULT, DID_OK, 1, 0, "transport fault" },
91 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_CMD_TIMEOUT, DID_TIME_OUT, 1, 1, "command timeout" },
92 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_ENETDOWN, DID_TRANSPORT_DISRUPTED, 1, 1, "network down" },
93 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_HW_FAILURE, DID_ERROR, 1, 1, "hardware failure" },
94 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DOWN_ERR, DID_REQUEUE, 0, 0, "link down" },
95 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DEAD_ERR, DID_ERROR, 0, 0, "link dead" },
96 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_REGISTER, DID_ERROR, 1, 1, "unable to register" },
97 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_BUSY, DID_BUS_BUSY, 1, 0, "transport busy" },
98 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_DEAD, DID_ERROR, 0, 1, "transport dead" },
99 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_CONFIG_ERROR, DID_ERROR, 1, 1, "configuration error" },
100 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_NAME_SERVER_FAIL, DID_ERROR, 1, 1, "name server failure" },
101 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 1, 0, "link halted" },
102 	{ IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_GENERAL, DID_OK, 1, 0, "general transport error" },
103 
104 	{ IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" },
105 	{ IBMVFC_VIOS_FAILURE, IBMVFC_SW_FAILURE, DID_ERROR, 0, 1, "software failure" },
106 	{ IBMVFC_VIOS_FAILURE, IBMVFC_INVALID_PARAMETER, DID_ERROR, 0, 1, "invalid parameter" },
107 	{ IBMVFC_VIOS_FAILURE, IBMVFC_MISSING_PARAMETER, DID_ERROR, 0, 1, "missing parameter" },
108 	{ IBMVFC_VIOS_FAILURE, IBMVFC_HOST_IO_BUS, DID_ERROR, 1, 1, "host I/O bus failure" },
109 	{ IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED, DID_ERROR, 0, 1, "transaction cancelled" },
110 	{ IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED_IMPLICIT, DID_ERROR, 0, 1, "transaction cancelled implicit" },
111 	{ IBMVFC_VIOS_FAILURE, IBMVFC_INSUFFICIENT_RESOURCE, DID_REQUEUE, 1, 1, "insufficient resources" },
112 	{ IBMVFC_VIOS_FAILURE, IBMVFC_PLOGI_REQUIRED, DID_ERROR, 0, 1, "port login required" },
113 	{ IBMVFC_VIOS_FAILURE, IBMVFC_COMMAND_FAILED, DID_ERROR, 1, 1, "command failed" },
114 
115 	{ IBMVFC_FC_FAILURE, IBMVFC_INVALID_ELS_CMD_CODE, DID_ERROR, 0, 1, "invalid ELS command code" },
116 	{ IBMVFC_FC_FAILURE, IBMVFC_INVALID_VERSION, DID_ERROR, 0, 1, "invalid version level" },
117 	{ IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_ERROR, DID_ERROR, 1, 1, "logical error" },
118 	{ IBMVFC_FC_FAILURE, IBMVFC_INVALID_CT_IU_SIZE, DID_ERROR, 0, 1, "invalid CT_IU size" },
119 	{ IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_BUSY, DID_REQUEUE, 1, 0, "logical busy" },
120 	{ IBMVFC_FC_FAILURE, IBMVFC_PROTOCOL_ERROR, DID_ERROR, 1, 1, "protocol error" },
121 	{ IBMVFC_FC_FAILURE, IBMVFC_UNABLE_TO_PERFORM_REQ, DID_ERROR, 1, 1, "unable to perform request" },
122 	{ IBMVFC_FC_FAILURE, IBMVFC_CMD_NOT_SUPPORTED, DID_ERROR, 0, 0, "command not supported" },
123 	{ IBMVFC_FC_FAILURE, IBMVFC_SERVER_NOT_AVAIL, DID_ERROR, 0, 1, "server not available" },
124 	{ IBMVFC_FC_FAILURE, IBMVFC_CMD_IN_PROGRESS, DID_ERROR, 0, 1, "command already in progress" },
125 	{ IBMVFC_FC_FAILURE, IBMVFC_VENDOR_SPECIFIC, DID_ERROR, 1, 1, "vendor specific" },
126 
127 	{ IBMVFC_FC_SCSI_ERROR, 0, DID_OK, 1, 0, "SCSI error" },
128 	{ IBMVFC_FC_SCSI_ERROR, IBMVFC_COMMAND_FAILED, DID_ERROR, 0, 1, "PRLI to device failed." },
129 };
130 
131 static void ibmvfc_npiv_login(struct ibmvfc_host *);
132 static void ibmvfc_tgt_send_prli(struct ibmvfc_target *);
133 static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *);
134 static void ibmvfc_tgt_query_target(struct ibmvfc_target *);
135 static void ibmvfc_npiv_logout(struct ibmvfc_host *);
136 static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *);
137 static void ibmvfc_tgt_move_login(struct ibmvfc_target *);
138 
139 static const char *unknown_error = "unknown error";
140 
141 static int ibmvfc_check_caps(struct ibmvfc_host *vhost, unsigned long cap_flags)
142 {
143 	u64 host_caps = be64_to_cpu(vhost->login_buf->resp.capabilities);
144 
145 	return (host_caps & cap_flags) ? 1 : 0;
146 }
147 
148 static struct ibmvfc_fcp_cmd_iu *ibmvfc_get_fcp_iu(struct ibmvfc_host *vhost,
149 						   struct ibmvfc_cmd *vfc_cmd)
150 {
151 	if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
152 		return &vfc_cmd->v2.iu;
153 	else
154 		return &vfc_cmd->v1.iu;
155 }
156 
157 static struct ibmvfc_fcp_rsp *ibmvfc_get_fcp_rsp(struct ibmvfc_host *vhost,
158 						 struct ibmvfc_cmd *vfc_cmd)
159 {
160 	if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
161 		return &vfc_cmd->v2.rsp;
162 	else
163 		return &vfc_cmd->v1.rsp;
164 }
165 
166 #ifdef CONFIG_SCSI_IBMVFC_TRACE
167 /**
168  * ibmvfc_trc_start - Log a start trace entry
169  * @evt:		ibmvfc event struct
170  *
171  **/
172 static void ibmvfc_trc_start(struct ibmvfc_event *evt)
173 {
174 	struct ibmvfc_host *vhost = evt->vhost;
175 	struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd;
176 	struct ibmvfc_mad_common *mad = &evt->iu.mad_common;
177 	struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
178 	struct ibmvfc_trace_entry *entry;
179 
180 	entry = &vhost->trace[vhost->trace_index++];
181 	entry->evt = evt;
182 	entry->time = jiffies;
183 	entry->fmt = evt->crq.format;
184 	entry->type = IBMVFC_TRC_START;
185 
186 	switch (entry->fmt) {
187 	case IBMVFC_CMD_FORMAT:
188 		entry->op_code = iu->cdb[0];
189 		entry->scsi_id = be64_to_cpu(vfc_cmd->tgt_scsi_id);
190 		entry->lun = scsilun_to_int(&iu->lun);
191 		entry->tmf_flags = iu->tmf_flags;
192 		entry->u.start.xfer_len = be32_to_cpu(iu->xfer_len);
193 		break;
194 	case IBMVFC_MAD_FORMAT:
195 		entry->op_code = be32_to_cpu(mad->opcode);
196 		break;
197 	default:
198 		break;
199 	}
200 }
201 
202 /**
203  * ibmvfc_trc_end - Log an end trace entry
204  * @evt:		ibmvfc event struct
205  *
206  **/
207 static void ibmvfc_trc_end(struct ibmvfc_event *evt)
208 {
209 	struct ibmvfc_host *vhost = evt->vhost;
210 	struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
211 	struct ibmvfc_mad_common *mad = &evt->xfer_iu->mad_common;
212 	struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
213 	struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
214 	struct ibmvfc_trace_entry *entry = &vhost->trace[vhost->trace_index++];
215 
216 	entry->evt = evt;
217 	entry->time = jiffies;
218 	entry->fmt = evt->crq.format;
219 	entry->type = IBMVFC_TRC_END;
220 
221 	switch (entry->fmt) {
222 	case IBMVFC_CMD_FORMAT:
223 		entry->op_code = iu->cdb[0];
224 		entry->scsi_id = be64_to_cpu(vfc_cmd->tgt_scsi_id);
225 		entry->lun = scsilun_to_int(&iu->lun);
226 		entry->tmf_flags = iu->tmf_flags;
227 		entry->u.end.status = be16_to_cpu(vfc_cmd->status);
228 		entry->u.end.error = be16_to_cpu(vfc_cmd->error);
229 		entry->u.end.fcp_rsp_flags = rsp->flags;
230 		entry->u.end.rsp_code = rsp->data.info.rsp_code;
231 		entry->u.end.scsi_status = rsp->scsi_status;
232 		break;
233 	case IBMVFC_MAD_FORMAT:
234 		entry->op_code = be32_to_cpu(mad->opcode);
235 		entry->u.end.status = be16_to_cpu(mad->status);
236 		break;
237 	default:
238 		break;
239 
240 	}
241 }
242 
243 #else
244 #define ibmvfc_trc_start(evt) do { } while (0)
245 #define ibmvfc_trc_end(evt) do { } while (0)
246 #endif
247 
248 /**
249  * ibmvfc_get_err_index - Find the index into cmd_status for the fcp response
250  * @status:		status / error class
251  * @error:		error
252  *
253  * Return value:
254  *	index into cmd_status / -EINVAL on failure
255  **/
256 static int ibmvfc_get_err_index(u16 status, u16 error)
257 {
258 	int i;
259 
260 	for (i = 0; i < ARRAY_SIZE(cmd_status); i++)
261 		if ((cmd_status[i].status & status) == cmd_status[i].status &&
262 		    cmd_status[i].error == error)
263 			return i;
264 
265 	return -EINVAL;
266 }
267 
268 /**
269  * ibmvfc_get_cmd_error - Find the error description for the fcp response
270  * @status:		status / error class
271  * @error:		error
272  *
273  * Return value:
274  *	error description string
275  **/
276 static const char *ibmvfc_get_cmd_error(u16 status, u16 error)
277 {
278 	int rc = ibmvfc_get_err_index(status, error);
279 	if (rc >= 0)
280 		return cmd_status[rc].name;
281 	return unknown_error;
282 }
283 
284 /**
285  * ibmvfc_get_err_result - Find the scsi status to return for the fcp response
286  * @vfc_cmd:	ibmvfc command struct
287  *
288  * Return value:
289  *	SCSI result value to return for completed command
290  **/
291 static int ibmvfc_get_err_result(struct ibmvfc_host *vhost, struct ibmvfc_cmd *vfc_cmd)
292 {
293 	int err;
294 	struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
295 	int fc_rsp_len = be32_to_cpu(rsp->fcp_rsp_len);
296 
297 	if ((rsp->flags & FCP_RSP_LEN_VALID) &&
298 	    ((fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) ||
299 	     rsp->data.info.rsp_code))
300 		return DID_ERROR << 16;
301 
302 	err = ibmvfc_get_err_index(be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error));
303 	if (err >= 0)
304 		return rsp->scsi_status | (cmd_status[err].result << 16);
305 	return rsp->scsi_status | (DID_ERROR << 16);
306 }
307 
308 /**
309  * ibmvfc_retry_cmd - Determine if error status is retryable
310  * @status:		status / error class
311  * @error:		error
312  *
313  * Return value:
314  *	1 if error should be retried / 0 if it should not
315  **/
316 static int ibmvfc_retry_cmd(u16 status, u16 error)
317 {
318 	int rc = ibmvfc_get_err_index(status, error);
319 
320 	if (rc >= 0)
321 		return cmd_status[rc].retry;
322 	return 1;
323 }
324 
325 static const char *unknown_fc_explain = "unknown fc explain";
326 
327 static const struct {
328 	u16 fc_explain;
329 	char *name;
330 } ls_explain [] = {
331 	{ 0x00, "no additional explanation" },
332 	{ 0x01, "service parameter error - options" },
333 	{ 0x03, "service parameter error - initiator control" },
334 	{ 0x05, "service parameter error - recipient control" },
335 	{ 0x07, "service parameter error - received data field size" },
336 	{ 0x09, "service parameter error - concurrent seq" },
337 	{ 0x0B, "service parameter error - credit" },
338 	{ 0x0D, "invalid N_Port/F_Port_Name" },
339 	{ 0x0E, "invalid node/Fabric Name" },
340 	{ 0x0F, "invalid common service parameters" },
341 	{ 0x11, "invalid association header" },
342 	{ 0x13, "association header required" },
343 	{ 0x15, "invalid originator S_ID" },
344 	{ 0x17, "invalid OX_ID-RX-ID combination" },
345 	{ 0x19, "command (request) already in progress" },
346 	{ 0x1E, "N_Port Login requested" },
347 	{ 0x1F, "Invalid N_Port_ID" },
348 };
349 
350 static const struct {
351 	u16 fc_explain;
352 	char *name;
353 } gs_explain [] = {
354 	{ 0x00, "no additional explanation" },
355 	{ 0x01, "port identifier not registered" },
356 	{ 0x02, "port name not registered" },
357 	{ 0x03, "node name not registered" },
358 	{ 0x04, "class of service not registered" },
359 	{ 0x06, "initial process associator not registered" },
360 	{ 0x07, "FC-4 TYPEs not registered" },
361 	{ 0x08, "symbolic port name not registered" },
362 	{ 0x09, "symbolic node name not registered" },
363 	{ 0x0A, "port type not registered" },
364 	{ 0xF0, "authorization exception" },
365 	{ 0xF1, "authentication exception" },
366 	{ 0xF2, "data base full" },
367 	{ 0xF3, "data base empty" },
368 	{ 0xF4, "processing request" },
369 	{ 0xF5, "unable to verify connection" },
370 	{ 0xF6, "devices not in a common zone" },
371 };
372 
373 /**
374  * ibmvfc_get_ls_explain - Return the FC Explain description text
375  * @status:	FC Explain status
376  *
377  * Returns:
378  *	error string
379  **/
380 static const char *ibmvfc_get_ls_explain(u16 status)
381 {
382 	int i;
383 
384 	for (i = 0; i < ARRAY_SIZE(ls_explain); i++)
385 		if (ls_explain[i].fc_explain == status)
386 			return ls_explain[i].name;
387 
388 	return unknown_fc_explain;
389 }
390 
391 /**
392  * ibmvfc_get_gs_explain - Return the FC Explain description text
393  * @status:	FC Explain status
394  *
395  * Returns:
396  *	error string
397  **/
398 static const char *ibmvfc_get_gs_explain(u16 status)
399 {
400 	int i;
401 
402 	for (i = 0; i < ARRAY_SIZE(gs_explain); i++)
403 		if (gs_explain[i].fc_explain == status)
404 			return gs_explain[i].name;
405 
406 	return unknown_fc_explain;
407 }
408 
409 static const struct {
410 	enum ibmvfc_fc_type fc_type;
411 	char *name;
412 } fc_type [] = {
413 	{ IBMVFC_FABRIC_REJECT, "fabric reject" },
414 	{ IBMVFC_PORT_REJECT, "port reject" },
415 	{ IBMVFC_LS_REJECT, "ELS reject" },
416 	{ IBMVFC_FABRIC_BUSY, "fabric busy" },
417 	{ IBMVFC_PORT_BUSY, "port busy" },
418 	{ IBMVFC_BASIC_REJECT, "basic reject" },
419 };
420 
421 static const char *unknown_fc_type = "unknown fc type";
422 
423 /**
424  * ibmvfc_get_fc_type - Return the FC Type description text
425  * @status:	FC Type error status
426  *
427  * Returns:
428  *	error string
429  **/
430 static const char *ibmvfc_get_fc_type(u16 status)
431 {
432 	int i;
433 
434 	for (i = 0; i < ARRAY_SIZE(fc_type); i++)
435 		if (fc_type[i].fc_type == status)
436 			return fc_type[i].name;
437 
438 	return unknown_fc_type;
439 }
440 
441 /**
442  * ibmvfc_set_tgt_action - Set the next init action for the target
443  * @tgt:		ibmvfc target struct
444  * @action:		action to perform
445  *
446  * Returns:
447  *	0 if action changed / non-zero if not changed
448  **/
449 static int ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
450 				  enum ibmvfc_target_action action)
451 {
452 	int rc = -EINVAL;
453 
454 	switch (tgt->action) {
455 	case IBMVFC_TGT_ACTION_LOGOUT_RPORT:
456 		if (action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT ||
457 		    action == IBMVFC_TGT_ACTION_DEL_RPORT) {
458 			tgt->action = action;
459 			rc = 0;
460 		}
461 		break;
462 	case IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT:
463 		if (action == IBMVFC_TGT_ACTION_DEL_RPORT ||
464 		    action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
465 			tgt->action = action;
466 			rc = 0;
467 		}
468 		break;
469 	case IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT:
470 		if (action == IBMVFC_TGT_ACTION_LOGOUT_RPORT) {
471 			tgt->action = action;
472 			rc = 0;
473 		}
474 		break;
475 	case IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT:
476 		if (action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
477 			tgt->action = action;
478 			rc = 0;
479 		}
480 		break;
481 	case IBMVFC_TGT_ACTION_DEL_RPORT:
482 		if (action == IBMVFC_TGT_ACTION_DELETED_RPORT) {
483 			tgt->action = action;
484 			rc = 0;
485 		}
486 		break;
487 	case IBMVFC_TGT_ACTION_DELETED_RPORT:
488 		break;
489 	default:
490 		tgt->action = action;
491 		rc = 0;
492 		break;
493 	}
494 
495 	if (action >= IBMVFC_TGT_ACTION_LOGOUT_RPORT)
496 		tgt->add_rport = 0;
497 
498 	return rc;
499 }
500 
501 /**
502  * ibmvfc_set_host_state - Set the state for the host
503  * @vhost:		ibmvfc host struct
504  * @state:		state to set host to
505  *
506  * Returns:
507  *	0 if state changed / non-zero if not changed
508  **/
509 static int ibmvfc_set_host_state(struct ibmvfc_host *vhost,
510 				  enum ibmvfc_host_state state)
511 {
512 	int rc = 0;
513 
514 	switch (vhost->state) {
515 	case IBMVFC_HOST_OFFLINE:
516 		rc = -EINVAL;
517 		break;
518 	default:
519 		vhost->state = state;
520 		break;
521 	}
522 
523 	return rc;
524 }
525 
526 /**
527  * ibmvfc_set_host_action - Set the next init action for the host
528  * @vhost:		ibmvfc host struct
529  * @action:		action to perform
530  *
531  **/
532 static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
533 				   enum ibmvfc_host_action action)
534 {
535 	switch (action) {
536 	case IBMVFC_HOST_ACTION_ALLOC_TGTS:
537 		if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT)
538 			vhost->action = action;
539 		break;
540 	case IBMVFC_HOST_ACTION_LOGO_WAIT:
541 		if (vhost->action == IBMVFC_HOST_ACTION_LOGO)
542 			vhost->action = action;
543 		break;
544 	case IBMVFC_HOST_ACTION_INIT_WAIT:
545 		if (vhost->action == IBMVFC_HOST_ACTION_INIT)
546 			vhost->action = action;
547 		break;
548 	case IBMVFC_HOST_ACTION_QUERY:
549 		switch (vhost->action) {
550 		case IBMVFC_HOST_ACTION_INIT_WAIT:
551 		case IBMVFC_HOST_ACTION_NONE:
552 		case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
553 			vhost->action = action;
554 			break;
555 		default:
556 			break;
557 		}
558 		break;
559 	case IBMVFC_HOST_ACTION_TGT_INIT:
560 		if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
561 			vhost->action = action;
562 		break;
563 	case IBMVFC_HOST_ACTION_INIT:
564 	case IBMVFC_HOST_ACTION_TGT_DEL:
565 		switch (vhost->action) {
566 		case IBMVFC_HOST_ACTION_RESET:
567 		case IBMVFC_HOST_ACTION_REENABLE:
568 			break;
569 		default:
570 			vhost->action = action;
571 			break;
572 		}
573 		break;
574 	case IBMVFC_HOST_ACTION_LOGO:
575 	case IBMVFC_HOST_ACTION_QUERY_TGTS:
576 	case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
577 	case IBMVFC_HOST_ACTION_NONE:
578 	case IBMVFC_HOST_ACTION_RESET:
579 	case IBMVFC_HOST_ACTION_REENABLE:
580 	default:
581 		vhost->action = action;
582 		break;
583 	}
584 }
585 
586 /**
587  * ibmvfc_reinit_host - Re-start host initialization (no NPIV Login)
588  * @vhost:		ibmvfc host struct
589  *
590  * Return value:
591  *	nothing
592  **/
593 static void ibmvfc_reinit_host(struct ibmvfc_host *vhost)
594 {
595 	if (vhost->action == IBMVFC_HOST_ACTION_NONE &&
596 	    vhost->state == IBMVFC_ACTIVE) {
597 		if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
598 			scsi_block_requests(vhost->host);
599 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
600 		}
601 	} else
602 		vhost->reinit = 1;
603 
604 	wake_up(&vhost->work_wait_q);
605 }
606 
607 /**
608  * ibmvfc_del_tgt - Schedule cleanup and removal of the target
609  * @tgt:		ibmvfc target struct
610  * @job_step:	job step to perform
611  *
612  **/
613 static void ibmvfc_del_tgt(struct ibmvfc_target *tgt)
614 {
615 	if (!ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_RPORT))
616 		tgt->job_step = ibmvfc_tgt_implicit_logout_and_del;
617 	wake_up(&tgt->vhost->work_wait_q);
618 }
619 
620 /**
621  * ibmvfc_link_down - Handle a link down event from the adapter
622  * @vhost:	ibmvfc host struct
623  * @state:	ibmvfc host state to enter
624  *
625  **/
626 static void ibmvfc_link_down(struct ibmvfc_host *vhost,
627 			     enum ibmvfc_host_state state)
628 {
629 	struct ibmvfc_target *tgt;
630 
631 	ENTER;
632 	scsi_block_requests(vhost->host);
633 	list_for_each_entry(tgt, &vhost->targets, queue)
634 		ibmvfc_del_tgt(tgt);
635 	ibmvfc_set_host_state(vhost, state);
636 	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
637 	vhost->events_to_log |= IBMVFC_AE_LINKDOWN;
638 	wake_up(&vhost->work_wait_q);
639 	LEAVE;
640 }
641 
642 /**
643  * ibmvfc_init_host - Start host initialization
644  * @vhost:		ibmvfc host struct
645  *
646  * Return value:
647  *	nothing
648  **/
649 static void ibmvfc_init_host(struct ibmvfc_host *vhost)
650 {
651 	struct ibmvfc_target *tgt;
652 
653 	if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
654 		if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
655 			dev_err(vhost->dev,
656 				"Host initialization retries exceeded. Taking adapter offline\n");
657 			ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
658 			return;
659 		}
660 	}
661 
662 	if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
663 		memset(vhost->async_crq.msgs, 0, PAGE_SIZE);
664 		vhost->async_crq.cur = 0;
665 
666 		list_for_each_entry(tgt, &vhost->targets, queue)
667 			ibmvfc_del_tgt(tgt);
668 		scsi_block_requests(vhost->host);
669 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
670 		vhost->job_step = ibmvfc_npiv_login;
671 		wake_up(&vhost->work_wait_q);
672 	}
673 }
674 
675 /**
676  * ibmvfc_send_crq - Send a CRQ
677  * @vhost:	ibmvfc host struct
678  * @word1:	the first 64 bits of the data
679  * @word2:	the second 64 bits of the data
680  *
681  * Return value:
682  *	0 on success / other on failure
683  **/
684 static int ibmvfc_send_crq(struct ibmvfc_host *vhost, u64 word1, u64 word2)
685 {
686 	struct vio_dev *vdev = to_vio_dev(vhost->dev);
687 	return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
688 }
689 
690 /**
691  * ibmvfc_send_crq_init - Send a CRQ init message
692  * @vhost:	ibmvfc host struct
693  *
694  * Return value:
695  *	0 on success / other on failure
696  **/
697 static int ibmvfc_send_crq_init(struct ibmvfc_host *vhost)
698 {
699 	ibmvfc_dbg(vhost, "Sending CRQ init\n");
700 	return ibmvfc_send_crq(vhost, 0xC001000000000000LL, 0);
701 }
702 
703 /**
704  * ibmvfc_send_crq_init_complete - Send a CRQ init complete message
705  * @vhost:	ibmvfc host struct
706  *
707  * Return value:
708  *	0 on success / other on failure
709  **/
710 static int ibmvfc_send_crq_init_complete(struct ibmvfc_host *vhost)
711 {
712 	ibmvfc_dbg(vhost, "Sending CRQ init complete\n");
713 	return ibmvfc_send_crq(vhost, 0xC002000000000000LL, 0);
714 }
715 
716 /**
717  * ibmvfc_release_crq_queue - Deallocates data and unregisters CRQ
718  * @vhost:	ibmvfc host struct
719  *
720  * Frees irq, deallocates a page for messages, unmaps dma, and unregisters
721  * the crq with the hypervisor.
722  **/
723 static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
724 {
725 	long rc = 0;
726 	struct vio_dev *vdev = to_vio_dev(vhost->dev);
727 	struct ibmvfc_crq_queue *crq = &vhost->crq;
728 
729 	ibmvfc_dbg(vhost, "Releasing CRQ\n");
730 	free_irq(vdev->irq, vhost);
731 	tasklet_kill(&vhost->tasklet);
732 	do {
733 		if (rc)
734 			msleep(100);
735 		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
736 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
737 
738 	vhost->state = IBMVFC_NO_CRQ;
739 	vhost->logged_in = 0;
740 	dma_unmap_single(vhost->dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
741 	free_page((unsigned long)crq->msgs);
742 }
743 
744 /**
745  * ibmvfc_reenable_crq_queue - reenables the CRQ
746  * @vhost:	ibmvfc host struct
747  *
748  * Return value:
749  *	0 on success / other on failure
750  **/
751 static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost)
752 {
753 	int rc = 0;
754 	struct vio_dev *vdev = to_vio_dev(vhost->dev);
755 
756 	/* Re-enable the CRQ */
757 	do {
758 		if (rc)
759 			msleep(100);
760 		rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
761 	} while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
762 
763 	if (rc)
764 		dev_err(vhost->dev, "Error enabling adapter (rc=%d)\n", rc);
765 
766 	return rc;
767 }
768 
769 /**
770  * ibmvfc_reset_crq - resets a crq after a failure
771  * @vhost:	ibmvfc host struct
772  *
773  * Return value:
774  *	0 on success / other on failure
775  **/
776 static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
777 {
778 	int rc = 0;
779 	unsigned long flags;
780 	struct vio_dev *vdev = to_vio_dev(vhost->dev);
781 	struct ibmvfc_crq_queue *crq = &vhost->crq;
782 
783 	/* Close the CRQ */
784 	do {
785 		if (rc)
786 			msleep(100);
787 		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
788 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
789 
790 	spin_lock_irqsave(vhost->host->host_lock, flags);
791 	vhost->state = IBMVFC_NO_CRQ;
792 	vhost->logged_in = 0;
793 
794 	/* Clean out the queue */
795 	memset(crq->msgs, 0, PAGE_SIZE);
796 	crq->cur = 0;
797 
798 	/* And re-open it again */
799 	rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
800 				crq->msg_token, PAGE_SIZE);
801 
802 	if (rc == H_CLOSED)
803 		/* Adapter is good, but other end is not ready */
804 		dev_warn(vhost->dev, "Partner adapter not ready\n");
805 	else if (rc != 0)
806 		dev_warn(vhost->dev, "Couldn't register crq (rc=%d)\n", rc);
807 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
808 
809 	return rc;
810 }
811 
812 /**
813  * ibmvfc_valid_event - Determines if event is valid.
814  * @pool:	event_pool that contains the event
815  * @evt:	ibmvfc event to be checked for validity
816  *
817  * Return value:
818  *	1 if event is valid / 0 if event is not valid
819  **/
820 static int ibmvfc_valid_event(struct ibmvfc_event_pool *pool,
821 			      struct ibmvfc_event *evt)
822 {
823 	int index = evt - pool->events;
824 	if (index < 0 || index >= pool->size)	/* outside of bounds */
825 		return 0;
826 	if (evt != pool->events + index)	/* unaligned */
827 		return 0;
828 	return 1;
829 }
830 
831 /**
832  * ibmvfc_free_event - Free the specified event
833  * @evt:	ibmvfc_event to be freed
834  *
835  **/
836 static void ibmvfc_free_event(struct ibmvfc_event *evt)
837 {
838 	struct ibmvfc_host *vhost = evt->vhost;
839 	struct ibmvfc_event_pool *pool = &vhost->pool;
840 
841 	BUG_ON(!ibmvfc_valid_event(pool, evt));
842 	BUG_ON(atomic_inc_return(&evt->free) != 1);
843 	list_add_tail(&evt->queue, &vhost->free);
844 }
845 
846 /**
847  * ibmvfc_scsi_eh_done - EH done function for queuecommand commands
848  * @evt:	ibmvfc event struct
849  *
850  * This function does not setup any error status, that must be done
851  * before this function gets called.
852  **/
853 static void ibmvfc_scsi_eh_done(struct ibmvfc_event *evt)
854 {
855 	struct scsi_cmnd *cmnd = evt->cmnd;
856 
857 	if (cmnd) {
858 		scsi_dma_unmap(cmnd);
859 		cmnd->scsi_done(cmnd);
860 	}
861 
862 	if (evt->eh_comp)
863 		complete(evt->eh_comp);
864 
865 	ibmvfc_free_event(evt);
866 }
867 
868 /**
869  * ibmvfc_fail_request - Fail request with specified error code
870  * @evt:		ibmvfc event struct
871  * @error_code:	error code to fail request with
872  *
873  * Return value:
874  *	none
875  **/
876 static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code)
877 {
878 	if (evt->cmnd) {
879 		evt->cmnd->result = (error_code << 16);
880 		evt->done = ibmvfc_scsi_eh_done;
881 	} else
882 		evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_DRIVER_FAILED);
883 
884 	list_del(&evt->queue);
885 	del_timer(&evt->timer);
886 	ibmvfc_trc_end(evt);
887 	evt->done(evt);
888 }
889 
890 /**
891  * ibmvfc_purge_requests - Our virtual adapter just shut down. Purge any sent requests
892  * @vhost:		ibmvfc host struct
893  * @error_code:	error code to fail requests with
894  *
895  * Return value:
896  *	none
897  **/
898 static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code)
899 {
900 	struct ibmvfc_event *evt, *pos;
901 
902 	ibmvfc_dbg(vhost, "Purging all requests\n");
903 	list_for_each_entry_safe(evt, pos, &vhost->sent, queue)
904 		ibmvfc_fail_request(evt, error_code);
905 }
906 
907 /**
908  * ibmvfc_hard_reset_host - Reset the connection to the server by breaking the CRQ
909  * @vhost:	struct ibmvfc host to reset
910  **/
911 static void ibmvfc_hard_reset_host(struct ibmvfc_host *vhost)
912 {
913 	ibmvfc_purge_requests(vhost, DID_ERROR);
914 	ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
915 	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
916 }
917 
918 /**
919  * __ibmvfc_reset_host - Reset the connection to the server (no locking)
920  * @vhost:	struct ibmvfc host to reset
921  **/
922 static void __ibmvfc_reset_host(struct ibmvfc_host *vhost)
923 {
924 	if (vhost->logged_in && vhost->action != IBMVFC_HOST_ACTION_LOGO_WAIT &&
925 	    !ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
926 		scsi_block_requests(vhost->host);
927 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO);
928 		vhost->job_step = ibmvfc_npiv_logout;
929 		wake_up(&vhost->work_wait_q);
930 	} else
931 		ibmvfc_hard_reset_host(vhost);
932 }
933 
934 /**
935  * ibmvfc_reset_host - Reset the connection to the server
936  * @vhost:	ibmvfc host struct
937  **/
938 static void ibmvfc_reset_host(struct ibmvfc_host *vhost)
939 {
940 	unsigned long flags;
941 
942 	spin_lock_irqsave(vhost->host->host_lock, flags);
943 	__ibmvfc_reset_host(vhost);
944 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
945 }
946 
947 /**
948  * ibmvfc_retry_host_init - Retry host initialization if allowed
949  * @vhost:	ibmvfc host struct
950  *
951  * Returns: 1 if init will be retried / 0 if not
952  *
953  **/
954 static int ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
955 {
956 	int retry = 0;
957 
958 	if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
959 		vhost->delay_init = 1;
960 		if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
961 			dev_err(vhost->dev,
962 				"Host initialization retries exceeded. Taking adapter offline\n");
963 			ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
964 		} else if (vhost->init_retries == IBMVFC_MAX_HOST_INIT_RETRIES)
965 			__ibmvfc_reset_host(vhost);
966 		else {
967 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
968 			retry = 1;
969 		}
970 	}
971 
972 	wake_up(&vhost->work_wait_q);
973 	return retry;
974 }
975 
976 /**
977  * __ibmvfc_get_target - Find the specified scsi_target (no locking)
978  * @starget:	scsi target struct
979  *
980  * Return value:
981  *	ibmvfc_target struct / NULL if not found
982  **/
983 static struct ibmvfc_target *__ibmvfc_get_target(struct scsi_target *starget)
984 {
985 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
986 	struct ibmvfc_host *vhost = shost_priv(shost);
987 	struct ibmvfc_target *tgt;
988 
989 	list_for_each_entry(tgt, &vhost->targets, queue)
990 		if (tgt->target_id == starget->id) {
991 			kref_get(&tgt->kref);
992 			return tgt;
993 		}
994 	return NULL;
995 }
996 
997 /**
998  * ibmvfc_get_target - Find the specified scsi_target
999  * @starget:	scsi target struct
1000  *
1001  * Return value:
1002  *	ibmvfc_target struct / NULL if not found
1003  **/
1004 static struct ibmvfc_target *ibmvfc_get_target(struct scsi_target *starget)
1005 {
1006 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1007 	struct ibmvfc_target *tgt;
1008 	unsigned long flags;
1009 
1010 	spin_lock_irqsave(shost->host_lock, flags);
1011 	tgt = __ibmvfc_get_target(starget);
1012 	spin_unlock_irqrestore(shost->host_lock, flags);
1013 	return tgt;
1014 }
1015 
1016 /**
1017  * ibmvfc_get_host_speed - Get host port speed
1018  * @shost:		scsi host struct
1019  *
1020  * Return value:
1021  * 	none
1022  **/
1023 static void ibmvfc_get_host_speed(struct Scsi_Host *shost)
1024 {
1025 	struct ibmvfc_host *vhost = shost_priv(shost);
1026 	unsigned long flags;
1027 
1028 	spin_lock_irqsave(shost->host_lock, flags);
1029 	if (vhost->state == IBMVFC_ACTIVE) {
1030 		switch (be64_to_cpu(vhost->login_buf->resp.link_speed) / 100) {
1031 		case 1:
1032 			fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
1033 			break;
1034 		case 2:
1035 			fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
1036 			break;
1037 		case 4:
1038 			fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
1039 			break;
1040 		case 8:
1041 			fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
1042 			break;
1043 		case 10:
1044 			fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
1045 			break;
1046 		case 16:
1047 			fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
1048 			break;
1049 		default:
1050 			ibmvfc_log(vhost, 3, "Unknown port speed: %lld Gbit\n",
1051 				   be64_to_cpu(vhost->login_buf->resp.link_speed) / 100);
1052 			fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
1053 			break;
1054 		}
1055 	} else
1056 		fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
1057 	spin_unlock_irqrestore(shost->host_lock, flags);
1058 }
1059 
1060 /**
1061  * ibmvfc_get_host_port_state - Get host port state
1062  * @shost:		scsi host struct
1063  *
1064  * Return value:
1065  * 	none
1066  **/
1067 static void ibmvfc_get_host_port_state(struct Scsi_Host *shost)
1068 {
1069 	struct ibmvfc_host *vhost = shost_priv(shost);
1070 	unsigned long flags;
1071 
1072 	spin_lock_irqsave(shost->host_lock, flags);
1073 	switch (vhost->state) {
1074 	case IBMVFC_INITIALIZING:
1075 	case IBMVFC_ACTIVE:
1076 		fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
1077 		break;
1078 	case IBMVFC_LINK_DOWN:
1079 		fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
1080 		break;
1081 	case IBMVFC_LINK_DEAD:
1082 	case IBMVFC_HOST_OFFLINE:
1083 		fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
1084 		break;
1085 	case IBMVFC_HALTED:
1086 		fc_host_port_state(shost) = FC_PORTSTATE_BLOCKED;
1087 		break;
1088 	case IBMVFC_NO_CRQ:
1089 		fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
1090 		break;
1091 	default:
1092 		ibmvfc_log(vhost, 3, "Unknown port state: %d\n", vhost->state);
1093 		fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
1094 		break;
1095 	}
1096 	spin_unlock_irqrestore(shost->host_lock, flags);
1097 }
1098 
1099 /**
1100  * ibmvfc_set_rport_dev_loss_tmo - Set rport's device loss timeout
1101  * @rport:		rport struct
1102  * @timeout:	timeout value
1103  *
1104  * Return value:
1105  * 	none
1106  **/
1107 static void ibmvfc_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
1108 {
1109 	if (timeout)
1110 		rport->dev_loss_tmo = timeout;
1111 	else
1112 		rport->dev_loss_tmo = 1;
1113 }
1114 
1115 /**
1116  * ibmvfc_release_tgt - Free memory allocated for a target
1117  * @kref:		kref struct
1118  *
1119  **/
1120 static void ibmvfc_release_tgt(struct kref *kref)
1121 {
1122 	struct ibmvfc_target *tgt = container_of(kref, struct ibmvfc_target, kref);
1123 	kfree(tgt);
1124 }
1125 
1126 /**
1127  * ibmvfc_get_starget_node_name - Get SCSI target's node name
1128  * @starget:	scsi target struct
1129  *
1130  * Return value:
1131  * 	none
1132  **/
1133 static void ibmvfc_get_starget_node_name(struct scsi_target *starget)
1134 {
1135 	struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1136 	fc_starget_port_name(starget) = tgt ? tgt->ids.node_name : 0;
1137 	if (tgt)
1138 		kref_put(&tgt->kref, ibmvfc_release_tgt);
1139 }
1140 
1141 /**
1142  * ibmvfc_get_starget_port_name - Get SCSI target's port name
1143  * @starget:	scsi target struct
1144  *
1145  * Return value:
1146  * 	none
1147  **/
1148 static void ibmvfc_get_starget_port_name(struct scsi_target *starget)
1149 {
1150 	struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1151 	fc_starget_port_name(starget) = tgt ? tgt->ids.port_name : 0;
1152 	if (tgt)
1153 		kref_put(&tgt->kref, ibmvfc_release_tgt);
1154 }
1155 
1156 /**
1157  * ibmvfc_get_starget_port_id - Get SCSI target's port ID
1158  * @starget:	scsi target struct
1159  *
1160  * Return value:
1161  * 	none
1162  **/
1163 static void ibmvfc_get_starget_port_id(struct scsi_target *starget)
1164 {
1165 	struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
1166 	fc_starget_port_id(starget) = tgt ? tgt->scsi_id : -1;
1167 	if (tgt)
1168 		kref_put(&tgt->kref, ibmvfc_release_tgt);
1169 }
1170 
1171 /**
1172  * ibmvfc_wait_while_resetting - Wait while the host resets
1173  * @vhost:		ibmvfc host struct
1174  *
1175  * Return value:
1176  * 	0 on success / other on failure
1177  **/
1178 static int ibmvfc_wait_while_resetting(struct ibmvfc_host *vhost)
1179 {
1180 	long timeout = wait_event_timeout(vhost->init_wait_q,
1181 					  ((vhost->state == IBMVFC_ACTIVE ||
1182 					    vhost->state == IBMVFC_HOST_OFFLINE ||
1183 					    vhost->state == IBMVFC_LINK_DEAD) &&
1184 					   vhost->action == IBMVFC_HOST_ACTION_NONE),
1185 					  (init_timeout * HZ));
1186 
1187 	return timeout ? 0 : -EIO;
1188 }
1189 
1190 /**
1191  * ibmvfc_issue_fc_host_lip - Re-initiate link initialization
1192  * @shost:		scsi host struct
1193  *
1194  * Return value:
1195  * 	0 on success / other on failure
1196  **/
1197 static int ibmvfc_issue_fc_host_lip(struct Scsi_Host *shost)
1198 {
1199 	struct ibmvfc_host *vhost = shost_priv(shost);
1200 
1201 	dev_err(vhost->dev, "Initiating host LIP. Resetting connection\n");
1202 	ibmvfc_reset_host(vhost);
1203 	return ibmvfc_wait_while_resetting(vhost);
1204 }
1205 
1206 /**
1207  * ibmvfc_gather_partition_info - Gather info about the LPAR
1208  *
1209  * Return value:
1210  *	none
1211  **/
1212 static void ibmvfc_gather_partition_info(struct ibmvfc_host *vhost)
1213 {
1214 	struct device_node *rootdn;
1215 	const char *name;
1216 	const unsigned int *num;
1217 
1218 	rootdn = of_find_node_by_path("/");
1219 	if (!rootdn)
1220 		return;
1221 
1222 	name = of_get_property(rootdn, "ibm,partition-name", NULL);
1223 	if (name)
1224 		strncpy(vhost->partition_name, name, sizeof(vhost->partition_name));
1225 	num = of_get_property(rootdn, "ibm,partition-no", NULL);
1226 	if (num)
1227 		vhost->partition_number = *num;
1228 	of_node_put(rootdn);
1229 }
1230 
1231 /**
1232  * ibmvfc_set_login_info - Setup info for NPIV login
1233  * @vhost:	ibmvfc host struct
1234  *
1235  * Return value:
1236  *	none
1237  **/
1238 static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
1239 {
1240 	struct ibmvfc_npiv_login *login_info = &vhost->login_info;
1241 	struct device_node *of_node = vhost->dev->of_node;
1242 	const char *location;
1243 
1244 	memset(login_info, 0, sizeof(*login_info));
1245 
1246 	login_info->ostype = cpu_to_be32(IBMVFC_OS_LINUX);
1247 	login_info->max_dma_len = cpu_to_be64(IBMVFC_MAX_SECTORS << 9);
1248 	login_info->max_payload = cpu_to_be32(sizeof(struct ibmvfc_fcp_cmd_iu));
1249 	login_info->max_response = cpu_to_be32(sizeof(struct ibmvfc_fcp_rsp));
1250 	login_info->partition_num = cpu_to_be32(vhost->partition_number);
1251 	login_info->vfc_frame_version = cpu_to_be32(1);
1252 	login_info->fcp_version = cpu_to_be16(3);
1253 	login_info->flags = cpu_to_be16(IBMVFC_FLUSH_ON_HALT);
1254 	if (vhost->client_migrated)
1255 		login_info->flags |= cpu_to_be16(IBMVFC_CLIENT_MIGRATED);
1256 
1257 	login_info->max_cmds = cpu_to_be32(max_requests + IBMVFC_NUM_INTERNAL_REQ);
1258 	login_info->capabilities = cpu_to_be64(IBMVFC_CAN_MIGRATE | IBMVFC_CAN_SEND_VF_WWPN);
1259 	login_info->async.va = cpu_to_be64(vhost->async_crq.msg_token);
1260 	login_info->async.len = cpu_to_be32(vhost->async_crq.size * sizeof(*vhost->async_crq.msgs));
1261 	strncpy(login_info->partition_name, vhost->partition_name, IBMVFC_MAX_NAME);
1262 	strncpy(login_info->device_name,
1263 		dev_name(&vhost->host->shost_gendev), IBMVFC_MAX_NAME);
1264 
1265 	location = of_get_property(of_node, "ibm,loc-code", NULL);
1266 	location = location ? location : dev_name(vhost->dev);
1267 	strncpy(login_info->drc_name, location, IBMVFC_MAX_NAME);
1268 }
1269 
1270 /**
1271  * ibmvfc_init_event_pool - Allocates and initializes the event pool for a host
1272  * @vhost:	ibmvfc host who owns the event pool
1273  *
1274  * Returns zero on success.
1275  **/
1276 static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost)
1277 {
1278 	int i;
1279 	struct ibmvfc_event_pool *pool = &vhost->pool;
1280 
1281 	ENTER;
1282 	pool->size = max_requests + IBMVFC_NUM_INTERNAL_REQ;
1283 	pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL);
1284 	if (!pool->events)
1285 		return -ENOMEM;
1286 
1287 	pool->iu_storage = dma_alloc_coherent(vhost->dev,
1288 					      pool->size * sizeof(*pool->iu_storage),
1289 					      &pool->iu_token, 0);
1290 
1291 	if (!pool->iu_storage) {
1292 		kfree(pool->events);
1293 		return -ENOMEM;
1294 	}
1295 
1296 	for (i = 0; i < pool->size; ++i) {
1297 		struct ibmvfc_event *evt = &pool->events[i];
1298 		atomic_set(&evt->free, 1);
1299 		evt->crq.valid = 0x80;
1300 		evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i));
1301 		evt->xfer_iu = pool->iu_storage + i;
1302 		evt->vhost = vhost;
1303 		evt->ext_list = NULL;
1304 		list_add_tail(&evt->queue, &vhost->free);
1305 	}
1306 
1307 	LEAVE;
1308 	return 0;
1309 }
1310 
1311 /**
1312  * ibmvfc_free_event_pool - Frees memory of the event pool of a host
1313  * @vhost:	ibmvfc host who owns the event pool
1314  *
1315  **/
1316 static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost)
1317 {
1318 	int i;
1319 	struct ibmvfc_event_pool *pool = &vhost->pool;
1320 
1321 	ENTER;
1322 	for (i = 0; i < pool->size; ++i) {
1323 		list_del(&pool->events[i].queue);
1324 		BUG_ON(atomic_read(&pool->events[i].free) != 1);
1325 		if (pool->events[i].ext_list)
1326 			dma_pool_free(vhost->sg_pool,
1327 				      pool->events[i].ext_list,
1328 				      pool->events[i].ext_list_token);
1329 	}
1330 
1331 	kfree(pool->events);
1332 	dma_free_coherent(vhost->dev,
1333 			  pool->size * sizeof(*pool->iu_storage),
1334 			  pool->iu_storage, pool->iu_token);
1335 	LEAVE;
1336 }
1337 
1338 /**
1339  * ibmvfc_get_event - Gets the next free event in pool
1340  * @vhost:	ibmvfc host struct
1341  *
1342  * Returns a free event from the pool.
1343  **/
1344 static struct ibmvfc_event *ibmvfc_get_event(struct ibmvfc_host *vhost)
1345 {
1346 	struct ibmvfc_event *evt;
1347 
1348 	BUG_ON(list_empty(&vhost->free));
1349 	evt = list_entry(vhost->free.next, struct ibmvfc_event, queue);
1350 	atomic_set(&evt->free, 0);
1351 	list_del(&evt->queue);
1352 	return evt;
1353 }
1354 
1355 /**
1356  * ibmvfc_init_event - Initialize fields in an event struct that are always
1357  *				required.
1358  * @evt:	The event
1359  * @done:	Routine to call when the event is responded to
1360  * @format:	SRP or MAD format
1361  **/
1362 static void ibmvfc_init_event(struct ibmvfc_event *evt,
1363 			      void (*done) (struct ibmvfc_event *), u8 format)
1364 {
1365 	evt->cmnd = NULL;
1366 	evt->sync_iu = NULL;
1367 	evt->crq.format = format;
1368 	evt->done = done;
1369 	evt->eh_comp = NULL;
1370 }
1371 
1372 /**
1373  * ibmvfc_map_sg_list - Initialize scatterlist
1374  * @scmd:	scsi command struct
1375  * @nseg:	number of scatterlist segments
1376  * @md:	memory descriptor list to initialize
1377  **/
1378 static void ibmvfc_map_sg_list(struct scsi_cmnd *scmd, int nseg,
1379 			       struct srp_direct_buf *md)
1380 {
1381 	int i;
1382 	struct scatterlist *sg;
1383 
1384 	scsi_for_each_sg(scmd, sg, nseg, i) {
1385 		md[i].va = cpu_to_be64(sg_dma_address(sg));
1386 		md[i].len = cpu_to_be32(sg_dma_len(sg));
1387 		md[i].key = 0;
1388 	}
1389 }
1390 
1391 /**
1392  * ibmvfc_map_sg_data - Maps dma for a scatterlist and initializes descriptor fields
1393  * @scmd:		struct scsi_cmnd with the scatterlist
1394  * @evt:		ibmvfc event struct
1395  * @vfc_cmd:	vfc_cmd that contains the memory descriptor
1396  * @dev:		device for which to map dma memory
1397  *
1398  * Returns:
1399  *	0 on success / non-zero on failure
1400  **/
1401 static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd,
1402 			      struct ibmvfc_event *evt,
1403 			      struct ibmvfc_cmd *vfc_cmd, struct device *dev)
1404 {
1405 
1406 	int sg_mapped;
1407 	struct srp_direct_buf *data = &vfc_cmd->ioba;
1408 	struct ibmvfc_host *vhost = dev_get_drvdata(dev);
1409 	struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(evt->vhost, vfc_cmd);
1410 
1411 	if (cls3_error)
1412 		vfc_cmd->flags |= cpu_to_be16(IBMVFC_CLASS_3_ERR);
1413 
1414 	sg_mapped = scsi_dma_map(scmd);
1415 	if (!sg_mapped) {
1416 		vfc_cmd->flags |= cpu_to_be16(IBMVFC_NO_MEM_DESC);
1417 		return 0;
1418 	} else if (unlikely(sg_mapped < 0)) {
1419 		if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1420 			scmd_printk(KERN_ERR, scmd, "Failed to map DMA buffer for command\n");
1421 		return sg_mapped;
1422 	}
1423 
1424 	if (scmd->sc_data_direction == DMA_TO_DEVICE) {
1425 		vfc_cmd->flags |= cpu_to_be16(IBMVFC_WRITE);
1426 		iu->add_cdb_len |= IBMVFC_WRDATA;
1427 	} else {
1428 		vfc_cmd->flags |= cpu_to_be16(IBMVFC_READ);
1429 		iu->add_cdb_len |= IBMVFC_RDDATA;
1430 	}
1431 
1432 	if (sg_mapped == 1) {
1433 		ibmvfc_map_sg_list(scmd, sg_mapped, data);
1434 		return 0;
1435 	}
1436 
1437 	vfc_cmd->flags |= cpu_to_be16(IBMVFC_SCATTERLIST);
1438 
1439 	if (!evt->ext_list) {
1440 		evt->ext_list = dma_pool_alloc(vhost->sg_pool, GFP_ATOMIC,
1441 					       &evt->ext_list_token);
1442 
1443 		if (!evt->ext_list) {
1444 			scsi_dma_unmap(scmd);
1445 			if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1446 				scmd_printk(KERN_ERR, scmd, "Can't allocate memory for scatterlist\n");
1447 			return -ENOMEM;
1448 		}
1449 	}
1450 
1451 	ibmvfc_map_sg_list(scmd, sg_mapped, evt->ext_list);
1452 
1453 	data->va = cpu_to_be64(evt->ext_list_token);
1454 	data->len = cpu_to_be32(sg_mapped * sizeof(struct srp_direct_buf));
1455 	data->key = 0;
1456 	return 0;
1457 }
1458 
1459 /**
1460  * ibmvfc_timeout - Internal command timeout handler
1461  * @evt:	struct ibmvfc_event that timed out
1462  *
1463  * Called when an internally generated command times out
1464  **/
1465 static void ibmvfc_timeout(struct timer_list *t)
1466 {
1467 	struct ibmvfc_event *evt = from_timer(evt, t, timer);
1468 	struct ibmvfc_host *vhost = evt->vhost;
1469 	dev_err(vhost->dev, "Command timed out (%p). Resetting connection\n", evt);
1470 	ibmvfc_reset_host(vhost);
1471 }
1472 
1473 /**
1474  * ibmvfc_send_event - Transforms event to u64 array and calls send_crq()
1475  * @evt:		event to be sent
1476  * @vhost:		ibmvfc host struct
1477  * @timeout:	timeout in seconds - 0 means do not time command
1478  *
1479  * Returns the value returned from ibmvfc_send_crq(). (Zero for success)
1480  **/
1481 static int ibmvfc_send_event(struct ibmvfc_event *evt,
1482 			     struct ibmvfc_host *vhost, unsigned long timeout)
1483 {
1484 	__be64 *crq_as_u64 = (__be64 *) &evt->crq;
1485 	int rc;
1486 
1487 	/* Copy the IU into the transfer area */
1488 	*evt->xfer_iu = evt->iu;
1489 	if (evt->crq.format == IBMVFC_CMD_FORMAT)
1490 		evt->xfer_iu->cmd.tag = cpu_to_be64((u64)evt);
1491 	else if (evt->crq.format == IBMVFC_MAD_FORMAT)
1492 		evt->xfer_iu->mad_common.tag = cpu_to_be64((u64)evt);
1493 	else
1494 		BUG();
1495 
1496 	list_add_tail(&evt->queue, &vhost->sent);
1497 	timer_setup(&evt->timer, ibmvfc_timeout, 0);
1498 
1499 	if (timeout) {
1500 		evt->timer.expires = jiffies + (timeout * HZ);
1501 		add_timer(&evt->timer);
1502 	}
1503 
1504 	mb();
1505 
1506 	if ((rc = ibmvfc_send_crq(vhost, be64_to_cpu(crq_as_u64[0]),
1507 				  be64_to_cpu(crq_as_u64[1])))) {
1508 		list_del(&evt->queue);
1509 		del_timer(&evt->timer);
1510 
1511 		/* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY.
1512 		 * Firmware will send a CRQ with a transport event (0xFF) to
1513 		 * tell this client what has happened to the transport. This
1514 		 * will be handled in ibmvfc_handle_crq()
1515 		 */
1516 		if (rc == H_CLOSED) {
1517 			if (printk_ratelimit())
1518 				dev_warn(vhost->dev, "Send warning. Receive queue closed, will retry.\n");
1519 			if (evt->cmnd)
1520 				scsi_dma_unmap(evt->cmnd);
1521 			ibmvfc_free_event(evt);
1522 			return SCSI_MLQUEUE_HOST_BUSY;
1523 		}
1524 
1525 		dev_err(vhost->dev, "Send error (rc=%d)\n", rc);
1526 		if (evt->cmnd) {
1527 			evt->cmnd->result = DID_ERROR << 16;
1528 			evt->done = ibmvfc_scsi_eh_done;
1529 		} else
1530 			evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_CRQ_ERROR);
1531 
1532 		evt->done(evt);
1533 	} else
1534 		ibmvfc_trc_start(evt);
1535 
1536 	return 0;
1537 }
1538 
1539 /**
1540  * ibmvfc_log_error - Log an error for the failed command if appropriate
1541  * @evt:	ibmvfc event to log
1542  *
1543  **/
1544 static void ibmvfc_log_error(struct ibmvfc_event *evt)
1545 {
1546 	struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
1547 	struct ibmvfc_host *vhost = evt->vhost;
1548 	struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
1549 	struct scsi_cmnd *cmnd = evt->cmnd;
1550 	const char *err = unknown_error;
1551 	int index = ibmvfc_get_err_index(be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error));
1552 	int logerr = 0;
1553 	int rsp_code = 0;
1554 
1555 	if (index >= 0) {
1556 		logerr = cmd_status[index].log;
1557 		err = cmd_status[index].name;
1558 	}
1559 
1560 	if (!logerr && (vhost->log_level <= (IBMVFC_DEFAULT_LOG_LEVEL + 1)))
1561 		return;
1562 
1563 	if (rsp->flags & FCP_RSP_LEN_VALID)
1564 		rsp_code = rsp->data.info.rsp_code;
1565 
1566 	scmd_printk(KERN_ERR, cmnd, "Command (%02X) : %s (%x:%x) "
1567 		    "flags: %x fcp_rsp: %x, resid=%d, scsi_status: %x\n",
1568 		    cmnd->cmnd[0], err, be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error),
1569 		    rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status);
1570 }
1571 
1572 /**
1573  * ibmvfc_relogin - Log back into the specified device
1574  * @sdev:	scsi device struct
1575  *
1576  **/
1577 static void ibmvfc_relogin(struct scsi_device *sdev)
1578 {
1579 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
1580 	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1581 	struct ibmvfc_target *tgt;
1582 
1583 	list_for_each_entry(tgt, &vhost->targets, queue) {
1584 		if (rport == tgt->rport) {
1585 			ibmvfc_del_tgt(tgt);
1586 			break;
1587 		}
1588 	}
1589 
1590 	ibmvfc_reinit_host(vhost);
1591 }
1592 
1593 /**
1594  * ibmvfc_scsi_done - Handle responses from commands
1595  * @evt:	ibmvfc event to be handled
1596  *
1597  * Used as a callback when sending scsi cmds.
1598  **/
1599 static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
1600 {
1601 	struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
1602 	struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(evt->vhost, vfc_cmd);
1603 	struct scsi_cmnd *cmnd = evt->cmnd;
1604 	u32 rsp_len = 0;
1605 	u32 sense_len = be32_to_cpu(rsp->fcp_sense_len);
1606 
1607 	if (cmnd) {
1608 		if (be16_to_cpu(vfc_cmd->response_flags) & IBMVFC_ADAPTER_RESID_VALID)
1609 			scsi_set_resid(cmnd, be32_to_cpu(vfc_cmd->adapter_resid));
1610 		else if (rsp->flags & FCP_RESID_UNDER)
1611 			scsi_set_resid(cmnd, be32_to_cpu(rsp->fcp_resid));
1612 		else
1613 			scsi_set_resid(cmnd, 0);
1614 
1615 		if (vfc_cmd->status) {
1616 			cmnd->result = ibmvfc_get_err_result(evt->vhost, vfc_cmd);
1617 
1618 			if (rsp->flags & FCP_RSP_LEN_VALID)
1619 				rsp_len = be32_to_cpu(rsp->fcp_rsp_len);
1620 			if ((sense_len + rsp_len) > SCSI_SENSE_BUFFERSIZE)
1621 				sense_len = SCSI_SENSE_BUFFERSIZE - rsp_len;
1622 			if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8)
1623 				memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len);
1624 			if ((be16_to_cpu(vfc_cmd->status) & IBMVFC_VIOS_FAILURE) &&
1625 			    (be16_to_cpu(vfc_cmd->error) == IBMVFC_PLOGI_REQUIRED))
1626 				ibmvfc_relogin(cmnd->device);
1627 
1628 			if (!cmnd->result && (!scsi_get_resid(cmnd) || (rsp->flags & FCP_RESID_OVER)))
1629 				cmnd->result = (DID_ERROR << 16);
1630 
1631 			ibmvfc_log_error(evt);
1632 		}
1633 
1634 		if (!cmnd->result &&
1635 		    (scsi_bufflen(cmnd) - scsi_get_resid(cmnd) < cmnd->underflow))
1636 			cmnd->result = (DID_ERROR << 16);
1637 
1638 		scsi_dma_unmap(cmnd);
1639 		cmnd->scsi_done(cmnd);
1640 	}
1641 
1642 	if (evt->eh_comp)
1643 		complete(evt->eh_comp);
1644 
1645 	ibmvfc_free_event(evt);
1646 }
1647 
1648 /**
1649  * ibmvfc_host_chkready - Check if the host can accept commands
1650  * @vhost:	 struct ibmvfc host
1651  *
1652  * Returns:
1653  *	1 if host can accept command / 0 if not
1654  **/
1655 static inline int ibmvfc_host_chkready(struct ibmvfc_host *vhost)
1656 {
1657 	int result = 0;
1658 
1659 	switch (vhost->state) {
1660 	case IBMVFC_LINK_DEAD:
1661 	case IBMVFC_HOST_OFFLINE:
1662 		result = DID_NO_CONNECT << 16;
1663 		break;
1664 	case IBMVFC_NO_CRQ:
1665 	case IBMVFC_INITIALIZING:
1666 	case IBMVFC_HALTED:
1667 	case IBMVFC_LINK_DOWN:
1668 		result = DID_REQUEUE << 16;
1669 		break;
1670 	case IBMVFC_ACTIVE:
1671 		result = 0;
1672 		break;
1673 	}
1674 
1675 	return result;
1676 }
1677 
1678 static struct ibmvfc_cmd *ibmvfc_init_vfc_cmd(struct ibmvfc_event *evt, struct scsi_device *sdev)
1679 {
1680 	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1681 	struct ibmvfc_host *vhost = evt->vhost;
1682 	struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd;
1683 	struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
1684 	struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
1685 	size_t offset;
1686 
1687 	memset(vfc_cmd, 0, sizeof(*vfc_cmd));
1688 	if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
1689 		offset = offsetof(struct ibmvfc_cmd, v2.rsp);
1690 		vfc_cmd->target_wwpn = cpu_to_be64(rport->port_name);
1691 	} else
1692 		offset = offsetof(struct ibmvfc_cmd, v1.rsp);
1693 	vfc_cmd->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offset);
1694 	vfc_cmd->resp.len = cpu_to_be32(sizeof(*rsp));
1695 	vfc_cmd->frame_type = cpu_to_be32(IBMVFC_SCSI_FCP_TYPE);
1696 	vfc_cmd->payload_len = cpu_to_be32(sizeof(*iu));
1697 	vfc_cmd->resp_len = cpu_to_be32(sizeof(*rsp));
1698 	vfc_cmd->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
1699 	vfc_cmd->tgt_scsi_id = cpu_to_be64(rport->port_id);
1700 	int_to_scsilun(sdev->lun, &iu->lun);
1701 
1702 	return vfc_cmd;
1703 }
1704 
1705 /**
1706  * ibmvfc_queuecommand - The queuecommand function of the scsi template
1707  * @cmnd:	struct scsi_cmnd to be executed
1708  * @done:	Callback function to be called when cmnd is completed
1709  *
1710  * Returns:
1711  *	0 on success / other on failure
1712  **/
1713 static int ibmvfc_queuecommand_lck(struct scsi_cmnd *cmnd,
1714 			       void (*done) (struct scsi_cmnd *))
1715 {
1716 	struct ibmvfc_host *vhost = shost_priv(cmnd->device->host);
1717 	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
1718 	struct ibmvfc_cmd *vfc_cmd;
1719 	struct ibmvfc_fcp_cmd_iu *iu;
1720 	struct ibmvfc_event *evt;
1721 	int rc;
1722 
1723 	if (unlikely((rc = fc_remote_port_chkready(rport))) ||
1724 	    unlikely((rc = ibmvfc_host_chkready(vhost)))) {
1725 		cmnd->result = rc;
1726 		done(cmnd);
1727 		return 0;
1728 	}
1729 
1730 	cmnd->result = (DID_OK << 16);
1731 	evt = ibmvfc_get_event(vhost);
1732 	ibmvfc_init_event(evt, ibmvfc_scsi_done, IBMVFC_CMD_FORMAT);
1733 	evt->cmnd = cmnd;
1734 	cmnd->scsi_done = done;
1735 
1736 	vfc_cmd = ibmvfc_init_vfc_cmd(evt, cmnd->device);
1737 	iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
1738 
1739 	iu->xfer_len = cpu_to_be32(scsi_bufflen(cmnd));
1740 	memcpy(iu->cdb, cmnd->cmnd, cmnd->cmd_len);
1741 
1742 	if (cmnd->flags & SCMD_TAGGED) {
1743 		vfc_cmd->task_tag = cpu_to_be64(cmnd->tag);
1744 		iu->pri_task_attr = IBMVFC_SIMPLE_TASK;
1745 	}
1746 
1747 	vfc_cmd->correlation = cpu_to_be64((u64)evt);
1748 
1749 	if (likely(!(rc = ibmvfc_map_sg_data(cmnd, evt, vfc_cmd, vhost->dev))))
1750 		return ibmvfc_send_event(evt, vhost, 0);
1751 
1752 	ibmvfc_free_event(evt);
1753 	if (rc == -ENOMEM)
1754 		return SCSI_MLQUEUE_HOST_BUSY;
1755 
1756 	if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
1757 		scmd_printk(KERN_ERR, cmnd,
1758 			    "Failed to map DMA buffer for command. rc=%d\n", rc);
1759 
1760 	cmnd->result = DID_ERROR << 16;
1761 	done(cmnd);
1762 	return 0;
1763 }
1764 
1765 static DEF_SCSI_QCMD(ibmvfc_queuecommand)
1766 
1767 /**
1768  * ibmvfc_sync_completion - Signal that a synchronous command has completed
1769  * @evt:	ibmvfc event struct
1770  *
1771  **/
1772 static void ibmvfc_sync_completion(struct ibmvfc_event *evt)
1773 {
1774 	/* copy the response back */
1775 	if (evt->sync_iu)
1776 		*evt->sync_iu = *evt->xfer_iu;
1777 
1778 	complete(&evt->comp);
1779 }
1780 
1781 /**
1782  * ibmvfc_bsg_timeout_done - Completion handler for cancelling BSG commands
1783  * @evt:	struct ibmvfc_event
1784  *
1785  **/
1786 static void ibmvfc_bsg_timeout_done(struct ibmvfc_event *evt)
1787 {
1788 	struct ibmvfc_host *vhost = evt->vhost;
1789 
1790 	ibmvfc_free_event(evt);
1791 	vhost->aborting_passthru = 0;
1792 	dev_info(vhost->dev, "Passthru command cancelled\n");
1793 }
1794 
1795 /**
1796  * ibmvfc_bsg_timeout - Handle a BSG timeout
1797  * @job:	struct bsg_job that timed out
1798  *
1799  * Returns:
1800  *	0 on success / other on failure
1801  **/
1802 static int ibmvfc_bsg_timeout(struct bsg_job *job)
1803 {
1804 	struct ibmvfc_host *vhost = shost_priv(fc_bsg_to_shost(job));
1805 	unsigned long port_id = (unsigned long)job->dd_data;
1806 	struct ibmvfc_event *evt;
1807 	struct ibmvfc_tmf *tmf;
1808 	unsigned long flags;
1809 	int rc;
1810 
1811 	ENTER;
1812 	spin_lock_irqsave(vhost->host->host_lock, flags);
1813 	if (vhost->aborting_passthru || vhost->state != IBMVFC_ACTIVE) {
1814 		__ibmvfc_reset_host(vhost);
1815 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
1816 		return 0;
1817 	}
1818 
1819 	vhost->aborting_passthru = 1;
1820 	evt = ibmvfc_get_event(vhost);
1821 	ibmvfc_init_event(evt, ibmvfc_bsg_timeout_done, IBMVFC_MAD_FORMAT);
1822 
1823 	tmf = &evt->iu.tmf;
1824 	memset(tmf, 0, sizeof(*tmf));
1825 	tmf->common.version = cpu_to_be32(1);
1826 	tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
1827 	tmf->common.length = cpu_to_be16(sizeof(*tmf));
1828 	tmf->scsi_id = cpu_to_be64(port_id);
1829 	tmf->cancel_key = cpu_to_be32(IBMVFC_PASSTHRU_CANCEL_KEY);
1830 	tmf->my_cancel_key = cpu_to_be32(IBMVFC_INTERNAL_CANCEL_KEY);
1831 	rc = ibmvfc_send_event(evt, vhost, default_timeout);
1832 
1833 	if (rc != 0) {
1834 		vhost->aborting_passthru = 0;
1835 		dev_err(vhost->dev, "Failed to send cancel event. rc=%d\n", rc);
1836 		rc = -EIO;
1837 	} else
1838 		dev_info(vhost->dev, "Cancelling passthru command to port id 0x%lx\n",
1839 			 port_id);
1840 
1841 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
1842 
1843 	LEAVE;
1844 	return rc;
1845 }
1846 
1847 /**
1848  * ibmvfc_bsg_plogi - PLOGI into a target to handle a BSG command
1849  * @vhost:		struct ibmvfc_host to send command
1850  * @port_id:	port ID to send command
1851  *
1852  * Returns:
1853  *	0 on success / other on failure
1854  **/
1855 static int ibmvfc_bsg_plogi(struct ibmvfc_host *vhost, unsigned int port_id)
1856 {
1857 	struct ibmvfc_port_login *plogi;
1858 	struct ibmvfc_target *tgt;
1859 	struct ibmvfc_event *evt;
1860 	union ibmvfc_iu rsp_iu;
1861 	unsigned long flags;
1862 	int rc = 0, issue_login = 1;
1863 
1864 	ENTER;
1865 	spin_lock_irqsave(vhost->host->host_lock, flags);
1866 	list_for_each_entry(tgt, &vhost->targets, queue) {
1867 		if (tgt->scsi_id == port_id) {
1868 			issue_login = 0;
1869 			break;
1870 		}
1871 	}
1872 
1873 	if (!issue_login)
1874 		goto unlock_out;
1875 	if (unlikely((rc = ibmvfc_host_chkready(vhost))))
1876 		goto unlock_out;
1877 
1878 	evt = ibmvfc_get_event(vhost);
1879 	ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
1880 	plogi = &evt->iu.plogi;
1881 	memset(plogi, 0, sizeof(*plogi));
1882 	plogi->common.version = cpu_to_be32(1);
1883 	plogi->common.opcode = cpu_to_be32(IBMVFC_PORT_LOGIN);
1884 	plogi->common.length = cpu_to_be16(sizeof(*plogi));
1885 	plogi->scsi_id = cpu_to_be64(port_id);
1886 	evt->sync_iu = &rsp_iu;
1887 	init_completion(&evt->comp);
1888 
1889 	rc = ibmvfc_send_event(evt, vhost, default_timeout);
1890 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
1891 
1892 	if (rc)
1893 		return -EIO;
1894 
1895 	wait_for_completion(&evt->comp);
1896 
1897 	if (rsp_iu.plogi.common.status)
1898 		rc = -EIO;
1899 
1900 	spin_lock_irqsave(vhost->host->host_lock, flags);
1901 	ibmvfc_free_event(evt);
1902 unlock_out:
1903 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
1904 	LEAVE;
1905 	return rc;
1906 }
1907 
1908 /**
1909  * ibmvfc_bsg_request - Handle a BSG request
1910  * @job:	struct bsg_job to be executed
1911  *
1912  * Returns:
1913  *	0 on success / other on failure
1914  **/
1915 static int ibmvfc_bsg_request(struct bsg_job *job)
1916 {
1917 	struct ibmvfc_host *vhost = shost_priv(fc_bsg_to_shost(job));
1918 	struct fc_rport *rport = fc_bsg_to_rport(job);
1919 	struct ibmvfc_passthru_mad *mad;
1920 	struct ibmvfc_event *evt;
1921 	union ibmvfc_iu rsp_iu;
1922 	unsigned long flags, port_id = -1;
1923 	struct fc_bsg_request *bsg_request = job->request;
1924 	struct fc_bsg_reply *bsg_reply = job->reply;
1925 	unsigned int code = bsg_request->msgcode;
1926 	int rc = 0, req_seg, rsp_seg, issue_login = 0;
1927 	u32 fc_flags, rsp_len;
1928 
1929 	ENTER;
1930 	bsg_reply->reply_payload_rcv_len = 0;
1931 	if (rport)
1932 		port_id = rport->port_id;
1933 
1934 	switch (code) {
1935 	case FC_BSG_HST_ELS_NOLOGIN:
1936 		port_id = (bsg_request->rqst_data.h_els.port_id[0] << 16) |
1937 			(bsg_request->rqst_data.h_els.port_id[1] << 8) |
1938 			bsg_request->rqst_data.h_els.port_id[2];
1939 		fallthrough;
1940 	case FC_BSG_RPT_ELS:
1941 		fc_flags = IBMVFC_FC_ELS;
1942 		break;
1943 	case FC_BSG_HST_CT:
1944 		issue_login = 1;
1945 		port_id = (bsg_request->rqst_data.h_ct.port_id[0] << 16) |
1946 			(bsg_request->rqst_data.h_ct.port_id[1] << 8) |
1947 			bsg_request->rqst_data.h_ct.port_id[2];
1948 		fallthrough;
1949 	case FC_BSG_RPT_CT:
1950 		fc_flags = IBMVFC_FC_CT_IU;
1951 		break;
1952 	default:
1953 		return -ENOTSUPP;
1954 	}
1955 
1956 	if (port_id == -1)
1957 		return -EINVAL;
1958 	if (!mutex_trylock(&vhost->passthru_mutex))
1959 		return -EBUSY;
1960 
1961 	job->dd_data = (void *)port_id;
1962 	req_seg = dma_map_sg(vhost->dev, job->request_payload.sg_list,
1963 			     job->request_payload.sg_cnt, DMA_TO_DEVICE);
1964 
1965 	if (!req_seg) {
1966 		mutex_unlock(&vhost->passthru_mutex);
1967 		return -ENOMEM;
1968 	}
1969 
1970 	rsp_seg = dma_map_sg(vhost->dev, job->reply_payload.sg_list,
1971 			     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1972 
1973 	if (!rsp_seg) {
1974 		dma_unmap_sg(vhost->dev, job->request_payload.sg_list,
1975 			     job->request_payload.sg_cnt, DMA_TO_DEVICE);
1976 		mutex_unlock(&vhost->passthru_mutex);
1977 		return -ENOMEM;
1978 	}
1979 
1980 	if (req_seg > 1 || rsp_seg > 1) {
1981 		rc = -EINVAL;
1982 		goto out;
1983 	}
1984 
1985 	if (issue_login)
1986 		rc = ibmvfc_bsg_plogi(vhost, port_id);
1987 
1988 	spin_lock_irqsave(vhost->host->host_lock, flags);
1989 
1990 	if (unlikely(rc || (rport && (rc = fc_remote_port_chkready(rport)))) ||
1991 	    unlikely((rc = ibmvfc_host_chkready(vhost)))) {
1992 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
1993 		goto out;
1994 	}
1995 
1996 	evt = ibmvfc_get_event(vhost);
1997 	ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
1998 	mad = &evt->iu.passthru;
1999 
2000 	memset(mad, 0, sizeof(*mad));
2001 	mad->common.version = cpu_to_be32(1);
2002 	mad->common.opcode = cpu_to_be32(IBMVFC_PASSTHRU);
2003 	mad->common.length = cpu_to_be16(sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu));
2004 
2005 	mad->cmd_ioba.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) +
2006 		offsetof(struct ibmvfc_passthru_mad, iu));
2007 	mad->cmd_ioba.len = cpu_to_be32(sizeof(mad->iu));
2008 
2009 	mad->iu.cmd_len = cpu_to_be32(job->request_payload.payload_len);
2010 	mad->iu.rsp_len = cpu_to_be32(job->reply_payload.payload_len);
2011 	mad->iu.flags = cpu_to_be32(fc_flags);
2012 	mad->iu.cancel_key = cpu_to_be32(IBMVFC_PASSTHRU_CANCEL_KEY);
2013 
2014 	mad->iu.cmd.va = cpu_to_be64(sg_dma_address(job->request_payload.sg_list));
2015 	mad->iu.cmd.len = cpu_to_be32(sg_dma_len(job->request_payload.sg_list));
2016 	mad->iu.rsp.va = cpu_to_be64(sg_dma_address(job->reply_payload.sg_list));
2017 	mad->iu.rsp.len = cpu_to_be32(sg_dma_len(job->reply_payload.sg_list));
2018 	mad->iu.scsi_id = cpu_to_be64(port_id);
2019 	mad->iu.tag = cpu_to_be64((u64)evt);
2020 	rsp_len = be32_to_cpu(mad->iu.rsp.len);
2021 
2022 	evt->sync_iu = &rsp_iu;
2023 	init_completion(&evt->comp);
2024 	rc = ibmvfc_send_event(evt, vhost, 0);
2025 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2026 
2027 	if (rc) {
2028 		rc = -EIO;
2029 		goto out;
2030 	}
2031 
2032 	wait_for_completion(&evt->comp);
2033 
2034 	if (rsp_iu.passthru.common.status)
2035 		rc = -EIO;
2036 	else
2037 		bsg_reply->reply_payload_rcv_len = rsp_len;
2038 
2039 	spin_lock_irqsave(vhost->host->host_lock, flags);
2040 	ibmvfc_free_event(evt);
2041 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2042 	bsg_reply->result = rc;
2043 	bsg_job_done(job, bsg_reply->result,
2044 		       bsg_reply->reply_payload_rcv_len);
2045 	rc = 0;
2046 out:
2047 	dma_unmap_sg(vhost->dev, job->request_payload.sg_list,
2048 		     job->request_payload.sg_cnt, DMA_TO_DEVICE);
2049 	dma_unmap_sg(vhost->dev, job->reply_payload.sg_list,
2050 		     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2051 	mutex_unlock(&vhost->passthru_mutex);
2052 	LEAVE;
2053 	return rc;
2054 }
2055 
2056 /**
2057  * ibmvfc_reset_device - Reset the device with the specified reset type
2058  * @sdev:	scsi device to reset
2059  * @type:	reset type
2060  * @desc:	reset type description for log messages
2061  *
2062  * Returns:
2063  *	0 on success / other on failure
2064  **/
2065 static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
2066 {
2067 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2068 	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2069 	struct ibmvfc_cmd *tmf;
2070 	struct ibmvfc_event *evt = NULL;
2071 	union ibmvfc_iu rsp_iu;
2072 	struct ibmvfc_fcp_cmd_iu *iu;
2073 	struct ibmvfc_fcp_rsp *fc_rsp = ibmvfc_get_fcp_rsp(vhost, &rsp_iu.cmd);
2074 	int rsp_rc = -EBUSY;
2075 	unsigned long flags;
2076 	int rsp_code = 0;
2077 
2078 	spin_lock_irqsave(vhost->host->host_lock, flags);
2079 	if (vhost->state == IBMVFC_ACTIVE) {
2080 		evt = ibmvfc_get_event(vhost);
2081 		ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
2082 		tmf = ibmvfc_init_vfc_cmd(evt, sdev);
2083 		iu = ibmvfc_get_fcp_iu(vhost, tmf);
2084 
2085 		tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF));
2086 		if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
2087 			tmf->target_wwpn = cpu_to_be64(rport->port_name);
2088 		iu->tmf_flags = type;
2089 		evt->sync_iu = &rsp_iu;
2090 
2091 		init_completion(&evt->comp);
2092 		rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
2093 	}
2094 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2095 
2096 	if (rsp_rc != 0) {
2097 		sdev_printk(KERN_ERR, sdev, "Failed to send %s reset event. rc=%d\n",
2098 			    desc, rsp_rc);
2099 		return -EIO;
2100 	}
2101 
2102 	sdev_printk(KERN_INFO, sdev, "Resetting %s\n", desc);
2103 	wait_for_completion(&evt->comp);
2104 
2105 	if (rsp_iu.cmd.status)
2106 		rsp_code = ibmvfc_get_err_result(vhost, &rsp_iu.cmd);
2107 
2108 	if (rsp_code) {
2109 		if (fc_rsp->flags & FCP_RSP_LEN_VALID)
2110 			rsp_code = fc_rsp->data.info.rsp_code;
2111 
2112 		sdev_printk(KERN_ERR, sdev, "%s reset failed: %s (%x:%x) "
2113 			    "flags: %x fcp_rsp: %x, scsi_status: %x\n", desc,
2114 			    ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
2115 			    be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
2116 			    fc_rsp->scsi_status);
2117 		rsp_rc = -EIO;
2118 	} else
2119 		sdev_printk(KERN_INFO, sdev, "%s reset successful\n", desc);
2120 
2121 	spin_lock_irqsave(vhost->host->host_lock, flags);
2122 	ibmvfc_free_event(evt);
2123 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2124 	return rsp_rc;
2125 }
2126 
2127 /**
2128  * ibmvfc_match_rport - Match function for specified remote port
2129  * @evt:	ibmvfc event struct
2130  * @device:	device to match (rport)
2131  *
2132  * Returns:
2133  *	1 if event matches rport / 0 if event does not match rport
2134  **/
2135 static int ibmvfc_match_rport(struct ibmvfc_event *evt, void *rport)
2136 {
2137 	struct fc_rport *cmd_rport;
2138 
2139 	if (evt->cmnd) {
2140 		cmd_rport = starget_to_rport(scsi_target(evt->cmnd->device));
2141 		if (cmd_rport == rport)
2142 			return 1;
2143 	}
2144 	return 0;
2145 }
2146 
2147 /**
2148  * ibmvfc_match_target - Match function for specified target
2149  * @evt:	ibmvfc event struct
2150  * @device:	device to match (starget)
2151  *
2152  * Returns:
2153  *	1 if event matches starget / 0 if event does not match starget
2154  **/
2155 static int ibmvfc_match_target(struct ibmvfc_event *evt, void *device)
2156 {
2157 	if (evt->cmnd && scsi_target(evt->cmnd->device) == device)
2158 		return 1;
2159 	return 0;
2160 }
2161 
2162 /**
2163  * ibmvfc_match_lun - Match function for specified LUN
2164  * @evt:	ibmvfc event struct
2165  * @device:	device to match (sdev)
2166  *
2167  * Returns:
2168  *	1 if event matches sdev / 0 if event does not match sdev
2169  **/
2170 static int ibmvfc_match_lun(struct ibmvfc_event *evt, void *device)
2171 {
2172 	if (evt->cmnd && evt->cmnd->device == device)
2173 		return 1;
2174 	return 0;
2175 }
2176 
2177 /**
2178  * ibmvfc_wait_for_ops - Wait for ops to complete
2179  * @vhost:	ibmvfc host struct
2180  * @device:	device to match (starget or sdev)
2181  * @match:	match function
2182  *
2183  * Returns:
2184  *	SUCCESS / FAILED
2185  **/
2186 static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device,
2187 			       int (*match) (struct ibmvfc_event *, void *))
2188 {
2189 	struct ibmvfc_event *evt;
2190 	DECLARE_COMPLETION_ONSTACK(comp);
2191 	int wait;
2192 	unsigned long flags;
2193 	signed long timeout = IBMVFC_ABORT_WAIT_TIMEOUT * HZ;
2194 
2195 	ENTER;
2196 	do {
2197 		wait = 0;
2198 		spin_lock_irqsave(vhost->host->host_lock, flags);
2199 		list_for_each_entry(evt, &vhost->sent, queue) {
2200 			if (match(evt, device)) {
2201 				evt->eh_comp = &comp;
2202 				wait++;
2203 			}
2204 		}
2205 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
2206 
2207 		if (wait) {
2208 			timeout = wait_for_completion_timeout(&comp, timeout);
2209 
2210 			if (!timeout) {
2211 				wait = 0;
2212 				spin_lock_irqsave(vhost->host->host_lock, flags);
2213 				list_for_each_entry(evt, &vhost->sent, queue) {
2214 					if (match(evt, device)) {
2215 						evt->eh_comp = NULL;
2216 						wait++;
2217 					}
2218 				}
2219 				spin_unlock_irqrestore(vhost->host->host_lock, flags);
2220 				if (wait)
2221 					dev_err(vhost->dev, "Timed out waiting for aborted commands\n");
2222 				LEAVE;
2223 				return wait ? FAILED : SUCCESS;
2224 			}
2225 		}
2226 	} while (wait);
2227 
2228 	LEAVE;
2229 	return SUCCESS;
2230 }
2231 
2232 /**
2233  * ibmvfc_cancel_all - Cancel all outstanding commands to the device
2234  * @sdev:	scsi device to cancel commands
2235  * @type:	type of error recovery being performed
2236  *
2237  * This sends a cancel to the VIOS for the specified device. This does
2238  * NOT send any abort to the actual device. That must be done separately.
2239  *
2240  * Returns:
2241  *	0 on success / other on failure
2242  **/
2243 static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
2244 {
2245 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2246 	struct scsi_target *starget = scsi_target(sdev);
2247 	struct fc_rport *rport = starget_to_rport(starget);
2248 	struct ibmvfc_tmf *tmf;
2249 	struct ibmvfc_event *evt, *found_evt;
2250 	union ibmvfc_iu rsp;
2251 	int rsp_rc = -EBUSY;
2252 	unsigned long flags;
2253 	u16 status;
2254 
2255 	ENTER;
2256 	spin_lock_irqsave(vhost->host->host_lock, flags);
2257 	found_evt = NULL;
2258 	list_for_each_entry(evt, &vhost->sent, queue) {
2259 		if (evt->cmnd && evt->cmnd->device == sdev) {
2260 			found_evt = evt;
2261 			break;
2262 		}
2263 	}
2264 
2265 	if (!found_evt) {
2266 		if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
2267 			sdev_printk(KERN_INFO, sdev, "No events found to cancel\n");
2268 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
2269 		return 0;
2270 	}
2271 
2272 	if (vhost->logged_in) {
2273 		evt = ibmvfc_get_event(vhost);
2274 		ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
2275 
2276 		tmf = &evt->iu.tmf;
2277 		memset(tmf, 0, sizeof(*tmf));
2278 		if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
2279 			tmf->common.version = cpu_to_be32(2);
2280 			tmf->target_wwpn = cpu_to_be64(rport->port_name);
2281 		} else {
2282 			tmf->common.version = cpu_to_be32(1);
2283 		}
2284 		tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
2285 		tmf->common.length = cpu_to_be16(sizeof(*tmf));
2286 		tmf->scsi_id = cpu_to_be64(rport->port_id);
2287 		int_to_scsilun(sdev->lun, &tmf->lun);
2288 		if (!ibmvfc_check_caps(vhost, IBMVFC_CAN_SUPPRESS_ABTS))
2289 			type &= ~IBMVFC_TMF_SUPPRESS_ABTS;
2290 		if (vhost->state == IBMVFC_ACTIVE)
2291 			tmf->flags = cpu_to_be32((type | IBMVFC_TMF_LUA_VALID));
2292 		else
2293 			tmf->flags = cpu_to_be32(((type & IBMVFC_TMF_SUPPRESS_ABTS) | IBMVFC_TMF_LUA_VALID));
2294 		tmf->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
2295 		tmf->my_cancel_key = cpu_to_be32((unsigned long)starget->hostdata);
2296 
2297 		evt->sync_iu = &rsp;
2298 		init_completion(&evt->comp);
2299 		rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
2300 	}
2301 
2302 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2303 
2304 	if (rsp_rc != 0) {
2305 		sdev_printk(KERN_ERR, sdev, "Failed to send cancel event. rc=%d\n", rsp_rc);
2306 		/* If failure is received, the host adapter is most likely going
2307 		 through reset, return success so the caller will wait for the command
2308 		 being cancelled to get returned */
2309 		return 0;
2310 	}
2311 
2312 	sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
2313 
2314 	wait_for_completion(&evt->comp);
2315 	status = be16_to_cpu(rsp.mad_common.status);
2316 	spin_lock_irqsave(vhost->host->host_lock, flags);
2317 	ibmvfc_free_event(evt);
2318 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2319 
2320 	if (status != IBMVFC_MAD_SUCCESS) {
2321 		sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
2322 		switch (status) {
2323 		case IBMVFC_MAD_DRIVER_FAILED:
2324 		case IBMVFC_MAD_CRQ_ERROR:
2325 			/* Host adapter most likely going through reset, return success to
2326 			 the caller will wait for the command being cancelled to get returned */
2327 			return 0;
2328 		default:
2329 			return -EIO;
2330 		};
2331 	}
2332 
2333 	sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
2334 	return 0;
2335 }
2336 
2337 /**
2338  * ibmvfc_match_key - Match function for specified cancel key
2339  * @evt:	ibmvfc event struct
2340  * @key:	cancel key to match
2341  *
2342  * Returns:
2343  *	1 if event matches key / 0 if event does not match key
2344  **/
2345 static int ibmvfc_match_key(struct ibmvfc_event *evt, void *key)
2346 {
2347 	unsigned long cancel_key = (unsigned long)key;
2348 
2349 	if (evt->crq.format == IBMVFC_CMD_FORMAT &&
2350 	    be32_to_cpu(evt->iu.cmd.cancel_key) == cancel_key)
2351 		return 1;
2352 	return 0;
2353 }
2354 
2355 /**
2356  * ibmvfc_match_evt - Match function for specified event
2357  * @evt:	ibmvfc event struct
2358  * @match:	event to match
2359  *
2360  * Returns:
2361  *	1 if event matches key / 0 if event does not match key
2362  **/
2363 static int ibmvfc_match_evt(struct ibmvfc_event *evt, void *match)
2364 {
2365 	if (evt == match)
2366 		return 1;
2367 	return 0;
2368 }
2369 
2370 /**
2371  * ibmvfc_abort_task_set - Abort outstanding commands to the device
2372  * @sdev:	scsi device to abort commands
2373  *
2374  * This sends an Abort Task Set to the VIOS for the specified device. This does
2375  * NOT send any cancel to the VIOS. That must be done separately.
2376  *
2377  * Returns:
2378  *	0 on success / other on failure
2379  **/
2380 static int ibmvfc_abort_task_set(struct scsi_device *sdev)
2381 {
2382 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2383 	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2384 	struct ibmvfc_cmd *tmf;
2385 	struct ibmvfc_event *evt, *found_evt;
2386 	union ibmvfc_iu rsp_iu;
2387 	struct ibmvfc_fcp_cmd_iu *iu;
2388 	struct ibmvfc_fcp_rsp *fc_rsp = ibmvfc_get_fcp_rsp(vhost, &rsp_iu.cmd);
2389 	int rc, rsp_rc = -EBUSY;
2390 	unsigned long flags, timeout = IBMVFC_ABORT_TIMEOUT;
2391 	int rsp_code = 0;
2392 
2393 	spin_lock_irqsave(vhost->host->host_lock, flags);
2394 	found_evt = NULL;
2395 	list_for_each_entry(evt, &vhost->sent, queue) {
2396 		if (evt->cmnd && evt->cmnd->device == sdev) {
2397 			found_evt = evt;
2398 			break;
2399 		}
2400 	}
2401 
2402 	if (!found_evt) {
2403 		if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
2404 			sdev_printk(KERN_INFO, sdev, "No events found to abort\n");
2405 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
2406 		return 0;
2407 	}
2408 
2409 	if (vhost->state == IBMVFC_ACTIVE) {
2410 		evt = ibmvfc_get_event(vhost);
2411 		ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
2412 		tmf = ibmvfc_init_vfc_cmd(evt, sdev);
2413 		iu = ibmvfc_get_fcp_iu(vhost, tmf);
2414 
2415 		if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN))
2416 			tmf->target_wwpn = cpu_to_be64(rport->port_name);
2417 		iu->tmf_flags = IBMVFC_ABORT_TASK_SET;
2418 		tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF));
2419 		evt->sync_iu = &rsp_iu;
2420 
2421 		tmf->correlation = cpu_to_be64((u64)evt);
2422 
2423 		init_completion(&evt->comp);
2424 		rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
2425 	}
2426 
2427 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2428 
2429 	if (rsp_rc != 0) {
2430 		sdev_printk(KERN_ERR, sdev, "Failed to send abort. rc=%d\n", rsp_rc);
2431 		return -EIO;
2432 	}
2433 
2434 	sdev_printk(KERN_INFO, sdev, "Aborting outstanding commands\n");
2435 	timeout = wait_for_completion_timeout(&evt->comp, timeout);
2436 
2437 	if (!timeout) {
2438 		rc = ibmvfc_cancel_all(sdev, 0);
2439 		if (!rc) {
2440 			rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key);
2441 			if (rc == SUCCESS)
2442 				rc = 0;
2443 		}
2444 
2445 		if (rc) {
2446 			sdev_printk(KERN_INFO, sdev, "Cancel failed, resetting host\n");
2447 			ibmvfc_reset_host(vhost);
2448 			rsp_rc = -EIO;
2449 			rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key);
2450 
2451 			if (rc == SUCCESS)
2452 				rsp_rc = 0;
2453 
2454 			rc = ibmvfc_wait_for_ops(vhost, evt, ibmvfc_match_evt);
2455 			if (rc != SUCCESS) {
2456 				spin_lock_irqsave(vhost->host->host_lock, flags);
2457 				ibmvfc_hard_reset_host(vhost);
2458 				spin_unlock_irqrestore(vhost->host->host_lock, flags);
2459 				rsp_rc = 0;
2460 			}
2461 
2462 			goto out;
2463 		}
2464 	}
2465 
2466 	if (rsp_iu.cmd.status)
2467 		rsp_code = ibmvfc_get_err_result(vhost, &rsp_iu.cmd);
2468 
2469 	if (rsp_code) {
2470 		if (fc_rsp->flags & FCP_RSP_LEN_VALID)
2471 			rsp_code = fc_rsp->data.info.rsp_code;
2472 
2473 		sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) "
2474 			    "flags: %x fcp_rsp: %x, scsi_status: %x\n",
2475 			    ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
2476 			    be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
2477 			    fc_rsp->scsi_status);
2478 		rsp_rc = -EIO;
2479 	} else
2480 		sdev_printk(KERN_INFO, sdev, "Abort successful\n");
2481 
2482 out:
2483 	spin_lock_irqsave(vhost->host->host_lock, flags);
2484 	ibmvfc_free_event(evt);
2485 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
2486 	return rsp_rc;
2487 }
2488 
2489 /**
2490  * ibmvfc_eh_abort_handler - Abort a command
2491  * @cmd:	scsi command to abort
2492  *
2493  * Returns:
2494  *	SUCCESS / FAST_IO_FAIL / FAILED
2495  **/
2496 static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
2497 {
2498 	struct scsi_device *sdev = cmd->device;
2499 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2500 	int cancel_rc, block_rc;
2501 	int rc = FAILED;
2502 
2503 	ENTER;
2504 	block_rc = fc_block_scsi_eh(cmd);
2505 	ibmvfc_wait_while_resetting(vhost);
2506 	if (block_rc != FAST_IO_FAIL) {
2507 		cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
2508 		ibmvfc_abort_task_set(sdev);
2509 	} else
2510 		cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2511 
2512 	if (!cancel_rc)
2513 		rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
2514 
2515 	if (block_rc == FAST_IO_FAIL && rc != FAILED)
2516 		rc = FAST_IO_FAIL;
2517 
2518 	LEAVE;
2519 	return rc;
2520 }
2521 
2522 /**
2523  * ibmvfc_eh_device_reset_handler - Reset a single LUN
2524  * @cmd:	scsi command struct
2525  *
2526  * Returns:
2527  *	SUCCESS / FAST_IO_FAIL / FAILED
2528  **/
2529 static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd)
2530 {
2531 	struct scsi_device *sdev = cmd->device;
2532 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2533 	int cancel_rc, block_rc, reset_rc = 0;
2534 	int rc = FAILED;
2535 
2536 	ENTER;
2537 	block_rc = fc_block_scsi_eh(cmd);
2538 	ibmvfc_wait_while_resetting(vhost);
2539 	if (block_rc != FAST_IO_FAIL) {
2540 		cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET);
2541 		reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN");
2542 	} else
2543 		cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2544 
2545 	if (!cancel_rc && !reset_rc)
2546 		rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
2547 
2548 	if (block_rc == FAST_IO_FAIL && rc != FAILED)
2549 		rc = FAST_IO_FAIL;
2550 
2551 	LEAVE;
2552 	return rc;
2553 }
2554 
2555 /**
2556  * ibmvfc_dev_cancel_all_noreset - Device iterated cancel all function
2557  * @sdev:	scsi device struct
2558  * @data:	return code
2559  *
2560  **/
2561 static void ibmvfc_dev_cancel_all_noreset(struct scsi_device *sdev, void *data)
2562 {
2563 	unsigned long *rc = data;
2564 	*rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2565 }
2566 
2567 /**
2568  * ibmvfc_dev_cancel_all_reset - Device iterated cancel all function
2569  * @sdev:	scsi device struct
2570  * @data:	return code
2571  *
2572  **/
2573 static void ibmvfc_dev_cancel_all_reset(struct scsi_device *sdev, void *data)
2574 {
2575 	unsigned long *rc = data;
2576 	*rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_TGT_RESET);
2577 }
2578 
2579 /**
2580  * ibmvfc_eh_target_reset_handler - Reset the target
2581  * @cmd:	scsi command struct
2582  *
2583  * Returns:
2584  *	SUCCESS / FAST_IO_FAIL / FAILED
2585  **/
2586 static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
2587 {
2588 	struct scsi_device *sdev = cmd->device;
2589 	struct ibmvfc_host *vhost = shost_priv(sdev->host);
2590 	struct scsi_target *starget = scsi_target(sdev);
2591 	int block_rc;
2592 	int reset_rc = 0;
2593 	int rc = FAILED;
2594 	unsigned long cancel_rc = 0;
2595 
2596 	ENTER;
2597 	block_rc = fc_block_scsi_eh(cmd);
2598 	ibmvfc_wait_while_resetting(vhost);
2599 	if (block_rc != FAST_IO_FAIL) {
2600 		starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset);
2601 		reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target");
2602 	} else
2603 		starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_noreset);
2604 
2605 	if (!cancel_rc && !reset_rc)
2606 		rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target);
2607 
2608 	if (block_rc == FAST_IO_FAIL && rc != FAILED)
2609 		rc = FAST_IO_FAIL;
2610 
2611 	LEAVE;
2612 	return rc;
2613 }
2614 
2615 /**
2616  * ibmvfc_eh_host_reset_handler - Reset the connection to the server
2617  * @cmd:	struct scsi_cmnd having problems
2618  *
2619  **/
2620 static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd)
2621 {
2622 	int rc;
2623 	struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
2624 
2625 	dev_err(vhost->dev, "Resetting connection due to error recovery\n");
2626 	rc = ibmvfc_issue_fc_host_lip(vhost->host);
2627 
2628 	return rc ? FAILED : SUCCESS;
2629 }
2630 
2631 /**
2632  * ibmvfc_terminate_rport_io - Terminate all pending I/O to the rport.
2633  * @rport:		rport struct
2634  *
2635  * Return value:
2636  * 	none
2637  **/
2638 static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
2639 {
2640 	struct Scsi_Host *shost = rport_to_shost(rport);
2641 	struct ibmvfc_host *vhost = shost_priv(shost);
2642 	struct fc_rport *dev_rport;
2643 	struct scsi_device *sdev;
2644 	struct ibmvfc_target *tgt;
2645 	unsigned long rc, flags;
2646 	unsigned int found;
2647 
2648 	ENTER;
2649 	shost_for_each_device(sdev, shost) {
2650 		dev_rport = starget_to_rport(scsi_target(sdev));
2651 		if (dev_rport != rport)
2652 			continue;
2653 		ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
2654 	}
2655 
2656 	rc = ibmvfc_wait_for_ops(vhost, rport, ibmvfc_match_rport);
2657 
2658 	if (rc == FAILED)
2659 		ibmvfc_issue_fc_host_lip(shost);
2660 
2661 	spin_lock_irqsave(shost->host_lock, flags);
2662 	found = 0;
2663 	list_for_each_entry(tgt, &vhost->targets, queue) {
2664 		if (tgt->scsi_id == rport->port_id) {
2665 			found++;
2666 			break;
2667 		}
2668 	}
2669 
2670 	if (found && tgt->action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
2671 		/*
2672 		 * If we get here, that means we previously attempted to send
2673 		 * an implicit logout to the target but it failed, most likely
2674 		 * due to I/O being pending, so we need to send it again
2675 		 */
2676 		ibmvfc_del_tgt(tgt);
2677 		ibmvfc_reinit_host(vhost);
2678 	}
2679 
2680 	spin_unlock_irqrestore(shost->host_lock, flags);
2681 	LEAVE;
2682 }
2683 
2684 static const struct ibmvfc_async_desc ae_desc [] = {
2685 	{ "PLOGI",	IBMVFC_AE_ELS_PLOGI,	IBMVFC_DEFAULT_LOG_LEVEL + 1 },
2686 	{ "LOGO",	IBMVFC_AE_ELS_LOGO,	IBMVFC_DEFAULT_LOG_LEVEL + 1 },
2687 	{ "PRLO",	IBMVFC_AE_ELS_PRLO,	IBMVFC_DEFAULT_LOG_LEVEL + 1 },
2688 	{ "N-Port SCN",	IBMVFC_AE_SCN_NPORT,	IBMVFC_DEFAULT_LOG_LEVEL + 1 },
2689 	{ "Group SCN",	IBMVFC_AE_SCN_GROUP,	IBMVFC_DEFAULT_LOG_LEVEL + 1 },
2690 	{ "Domain SCN",	IBMVFC_AE_SCN_DOMAIN,	IBMVFC_DEFAULT_LOG_LEVEL },
2691 	{ "Fabric SCN",	IBMVFC_AE_SCN_FABRIC,	IBMVFC_DEFAULT_LOG_LEVEL },
2692 	{ "Link Up",	IBMVFC_AE_LINK_UP,	IBMVFC_DEFAULT_LOG_LEVEL },
2693 	{ "Link Down",	IBMVFC_AE_LINK_DOWN,	IBMVFC_DEFAULT_LOG_LEVEL },
2694 	{ "Link Dead",	IBMVFC_AE_LINK_DEAD,	IBMVFC_DEFAULT_LOG_LEVEL },
2695 	{ "Halt",	IBMVFC_AE_HALT,		IBMVFC_DEFAULT_LOG_LEVEL },
2696 	{ "Resume",	IBMVFC_AE_RESUME,	IBMVFC_DEFAULT_LOG_LEVEL },
2697 	{ "Adapter Failed", IBMVFC_AE_ADAPTER_FAILED, IBMVFC_DEFAULT_LOG_LEVEL },
2698 };
2699 
2700 static const struct ibmvfc_async_desc unknown_ae = {
2701 	"Unknown async", 0, IBMVFC_DEFAULT_LOG_LEVEL
2702 };
2703 
2704 /**
2705  * ibmvfc_get_ae_desc - Get text description for async event
2706  * @ae:	async event
2707  *
2708  **/
2709 static const struct ibmvfc_async_desc *ibmvfc_get_ae_desc(u64 ae)
2710 {
2711 	int i;
2712 
2713 	for (i = 0; i < ARRAY_SIZE(ae_desc); i++)
2714 		if (ae_desc[i].ae == ae)
2715 			return &ae_desc[i];
2716 
2717 	return &unknown_ae;
2718 }
2719 
2720 static const struct {
2721 	enum ibmvfc_ae_link_state state;
2722 	const char *desc;
2723 } link_desc [] = {
2724 	{ IBMVFC_AE_LS_LINK_UP,		" link up" },
2725 	{ IBMVFC_AE_LS_LINK_BOUNCED,	" link bounced" },
2726 	{ IBMVFC_AE_LS_LINK_DOWN,	" link down" },
2727 	{ IBMVFC_AE_LS_LINK_DEAD,	" link dead" },
2728 };
2729 
2730 /**
2731  * ibmvfc_get_link_state - Get text description for link state
2732  * @state:	link state
2733  *
2734  **/
2735 static const char *ibmvfc_get_link_state(enum ibmvfc_ae_link_state state)
2736 {
2737 	int i;
2738 
2739 	for (i = 0; i < ARRAY_SIZE(link_desc); i++)
2740 		if (link_desc[i].state == state)
2741 			return link_desc[i].desc;
2742 
2743 	return "";
2744 }
2745 
2746 /**
2747  * ibmvfc_handle_async - Handle an async event from the adapter
2748  * @crq:	crq to process
2749  * @vhost:	ibmvfc host struct
2750  *
2751  **/
2752 static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
2753 				struct ibmvfc_host *vhost)
2754 {
2755 	const struct ibmvfc_async_desc *desc = ibmvfc_get_ae_desc(be64_to_cpu(crq->event));
2756 	struct ibmvfc_target *tgt;
2757 
2758 	ibmvfc_log(vhost, desc->log_level, "%s event received. scsi_id: %llx, wwpn: %llx,"
2759 		   " node_name: %llx%s\n", desc->desc, be64_to_cpu(crq->scsi_id),
2760 		   be64_to_cpu(crq->wwpn), be64_to_cpu(crq->node_name),
2761 		   ibmvfc_get_link_state(crq->link_state));
2762 
2763 	switch (be64_to_cpu(crq->event)) {
2764 	case IBMVFC_AE_RESUME:
2765 		switch (crq->link_state) {
2766 		case IBMVFC_AE_LS_LINK_DOWN:
2767 			ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
2768 			break;
2769 		case IBMVFC_AE_LS_LINK_DEAD:
2770 			ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
2771 			break;
2772 		case IBMVFC_AE_LS_LINK_UP:
2773 		case IBMVFC_AE_LS_LINK_BOUNCED:
2774 		default:
2775 			vhost->events_to_log |= IBMVFC_AE_LINKUP;
2776 			vhost->delay_init = 1;
2777 			__ibmvfc_reset_host(vhost);
2778 			break;
2779 		}
2780 
2781 		break;
2782 	case IBMVFC_AE_LINK_UP:
2783 		vhost->events_to_log |= IBMVFC_AE_LINKUP;
2784 		vhost->delay_init = 1;
2785 		__ibmvfc_reset_host(vhost);
2786 		break;
2787 	case IBMVFC_AE_SCN_FABRIC:
2788 	case IBMVFC_AE_SCN_DOMAIN:
2789 		vhost->events_to_log |= IBMVFC_AE_RSCN;
2790 		if (vhost->state < IBMVFC_HALTED) {
2791 			vhost->delay_init = 1;
2792 			__ibmvfc_reset_host(vhost);
2793 		}
2794 		break;
2795 	case IBMVFC_AE_SCN_NPORT:
2796 	case IBMVFC_AE_SCN_GROUP:
2797 		vhost->events_to_log |= IBMVFC_AE_RSCN;
2798 		ibmvfc_reinit_host(vhost);
2799 		break;
2800 	case IBMVFC_AE_ELS_LOGO:
2801 	case IBMVFC_AE_ELS_PRLO:
2802 	case IBMVFC_AE_ELS_PLOGI:
2803 		list_for_each_entry(tgt, &vhost->targets, queue) {
2804 			if (!crq->scsi_id && !crq->wwpn && !crq->node_name)
2805 				break;
2806 			if (crq->scsi_id && cpu_to_be64(tgt->scsi_id) != crq->scsi_id)
2807 				continue;
2808 			if (crq->wwpn && cpu_to_be64(tgt->ids.port_name) != crq->wwpn)
2809 				continue;
2810 			if (crq->node_name && cpu_to_be64(tgt->ids.node_name) != crq->node_name)
2811 				continue;
2812 			if (tgt->need_login && be64_to_cpu(crq->event) == IBMVFC_AE_ELS_LOGO)
2813 				tgt->logo_rcvd = 1;
2814 			if (!tgt->need_login || be64_to_cpu(crq->event) == IBMVFC_AE_ELS_PLOGI) {
2815 				ibmvfc_del_tgt(tgt);
2816 				ibmvfc_reinit_host(vhost);
2817 			}
2818 		}
2819 		break;
2820 	case IBMVFC_AE_LINK_DOWN:
2821 	case IBMVFC_AE_ADAPTER_FAILED:
2822 		ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
2823 		break;
2824 	case IBMVFC_AE_LINK_DEAD:
2825 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
2826 		break;
2827 	case IBMVFC_AE_HALT:
2828 		ibmvfc_link_down(vhost, IBMVFC_HALTED);
2829 		break;
2830 	default:
2831 		dev_err(vhost->dev, "Unknown async event received: %lld\n", crq->event);
2832 		break;
2833 	}
2834 }
2835 
2836 /**
2837  * ibmvfc_handle_crq - Handles and frees received events in the CRQ
2838  * @crq:	Command/Response queue
2839  * @vhost:	ibmvfc host struct
2840  *
2841  **/
2842 static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
2843 {
2844 	long rc;
2845 	struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba);
2846 
2847 	switch (crq->valid) {
2848 	case IBMVFC_CRQ_INIT_RSP:
2849 		switch (crq->format) {
2850 		case IBMVFC_CRQ_INIT:
2851 			dev_info(vhost->dev, "Partner initialized\n");
2852 			/* Send back a response */
2853 			rc = ibmvfc_send_crq_init_complete(vhost);
2854 			if (rc == 0)
2855 				ibmvfc_init_host(vhost);
2856 			else
2857 				dev_err(vhost->dev, "Unable to send init rsp. rc=%ld\n", rc);
2858 			break;
2859 		case IBMVFC_CRQ_INIT_COMPLETE:
2860 			dev_info(vhost->dev, "Partner initialization complete\n");
2861 			ibmvfc_init_host(vhost);
2862 			break;
2863 		default:
2864 			dev_err(vhost->dev, "Unknown crq message type: %d\n", crq->format);
2865 		}
2866 		return;
2867 	case IBMVFC_CRQ_XPORT_EVENT:
2868 		vhost->state = IBMVFC_NO_CRQ;
2869 		vhost->logged_in = 0;
2870 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
2871 		if (crq->format == IBMVFC_PARTITION_MIGRATED) {
2872 			/* We need to re-setup the interpartition connection */
2873 			dev_info(vhost->dev, "Partition migrated, Re-enabling adapter\n");
2874 			vhost->client_migrated = 1;
2875 			ibmvfc_purge_requests(vhost, DID_REQUEUE);
2876 			ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
2877 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_REENABLE);
2878 		} else if (crq->format == IBMVFC_PARTNER_FAILED || crq->format == IBMVFC_PARTNER_DEREGISTER) {
2879 			dev_err(vhost->dev, "Host partner adapter deregistered or failed (rc=%d)\n", crq->format);
2880 			ibmvfc_purge_requests(vhost, DID_ERROR);
2881 			ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
2882 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
2883 		} else {
2884 			dev_err(vhost->dev, "Received unknown transport event from partner (rc=%d)\n", crq->format);
2885 		}
2886 		return;
2887 	case IBMVFC_CRQ_CMD_RSP:
2888 		break;
2889 	default:
2890 		dev_err(vhost->dev, "Got an invalid message type 0x%02x\n", crq->valid);
2891 		return;
2892 	}
2893 
2894 	if (crq->format == IBMVFC_ASYNC_EVENT)
2895 		return;
2896 
2897 	/* The only kind of payload CRQs we should get are responses to
2898 	 * things we send. Make sure this response is to something we
2899 	 * actually sent
2900 	 */
2901 	if (unlikely(!ibmvfc_valid_event(&vhost->pool, evt))) {
2902 		dev_err(vhost->dev, "Returned correlation_token 0x%08llx is invalid!\n",
2903 			crq->ioba);
2904 		return;
2905 	}
2906 
2907 	if (unlikely(atomic_read(&evt->free))) {
2908 		dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n",
2909 			crq->ioba);
2910 		return;
2911 	}
2912 
2913 	del_timer(&evt->timer);
2914 	list_del(&evt->queue);
2915 	ibmvfc_trc_end(evt);
2916 	evt->done(evt);
2917 }
2918 
2919 /**
2920  * ibmvfc_scan_finished - Check if the device scan is done.
2921  * @shost:	scsi host struct
2922  * @time:	current elapsed time
2923  *
2924  * Returns:
2925  *	0 if scan is not done / 1 if scan is done
2926  **/
2927 static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
2928 {
2929 	unsigned long flags;
2930 	struct ibmvfc_host *vhost = shost_priv(shost);
2931 	int done = 0;
2932 
2933 	spin_lock_irqsave(shost->host_lock, flags);
2934 	if (time >= (init_timeout * HZ)) {
2935 		dev_info(vhost->dev, "Scan taking longer than %d seconds, "
2936 			 "continuing initialization\n", init_timeout);
2937 		done = 1;
2938 	}
2939 
2940 	if (vhost->scan_complete)
2941 		done = 1;
2942 	spin_unlock_irqrestore(shost->host_lock, flags);
2943 	return done;
2944 }
2945 
2946 /**
2947  * ibmvfc_slave_alloc - Setup the device's task set value
2948  * @sdev:	struct scsi_device device to configure
2949  *
2950  * Set the device's task set value so that error handling works as
2951  * expected.
2952  *
2953  * Returns:
2954  *	0 on success / -ENXIO if device does not exist
2955  **/
2956 static int ibmvfc_slave_alloc(struct scsi_device *sdev)
2957 {
2958 	struct Scsi_Host *shost = sdev->host;
2959 	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
2960 	struct ibmvfc_host *vhost = shost_priv(shost);
2961 	unsigned long flags = 0;
2962 
2963 	if (!rport || fc_remote_port_chkready(rport))
2964 		return -ENXIO;
2965 
2966 	spin_lock_irqsave(shost->host_lock, flags);
2967 	sdev->hostdata = (void *)(unsigned long)vhost->task_set++;
2968 	spin_unlock_irqrestore(shost->host_lock, flags);
2969 	return 0;
2970 }
2971 
2972 /**
2973  * ibmvfc_target_alloc - Setup the target's task set value
2974  * @starget:	struct scsi_target
2975  *
2976  * Set the target's task set value so that error handling works as
2977  * expected.
2978  *
2979  * Returns:
2980  *	0 on success / -ENXIO if device does not exist
2981  **/
2982 static int ibmvfc_target_alloc(struct scsi_target *starget)
2983 {
2984 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
2985 	struct ibmvfc_host *vhost = shost_priv(shost);
2986 	unsigned long flags = 0;
2987 
2988 	spin_lock_irqsave(shost->host_lock, flags);
2989 	starget->hostdata = (void *)(unsigned long)vhost->task_set++;
2990 	spin_unlock_irqrestore(shost->host_lock, flags);
2991 	return 0;
2992 }
2993 
2994 /**
2995  * ibmvfc_slave_configure - Configure the device
2996  * @sdev:	struct scsi_device device to configure
2997  *
2998  * Enable allow_restart for a device if it is a disk. Adjust the
2999  * queue_depth here also.
3000  *
3001  * Returns:
3002  *	0
3003  **/
3004 static int ibmvfc_slave_configure(struct scsi_device *sdev)
3005 {
3006 	struct Scsi_Host *shost = sdev->host;
3007 	unsigned long flags = 0;
3008 
3009 	spin_lock_irqsave(shost->host_lock, flags);
3010 	if (sdev->type == TYPE_DISK) {
3011 		sdev->allow_restart = 1;
3012 		blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
3013 	}
3014 	spin_unlock_irqrestore(shost->host_lock, flags);
3015 	return 0;
3016 }
3017 
3018 /**
3019  * ibmvfc_change_queue_depth - Change the device's queue depth
3020  * @sdev:	scsi device struct
3021  * @qdepth:	depth to set
3022  * @reason:	calling context
3023  *
3024  * Return value:
3025  * 	actual depth set
3026  **/
3027 static int ibmvfc_change_queue_depth(struct scsi_device *sdev, int qdepth)
3028 {
3029 	if (qdepth > IBMVFC_MAX_CMDS_PER_LUN)
3030 		qdepth = IBMVFC_MAX_CMDS_PER_LUN;
3031 
3032 	return scsi_change_queue_depth(sdev, qdepth);
3033 }
3034 
3035 static ssize_t ibmvfc_show_host_partition_name(struct device *dev,
3036 						 struct device_attribute *attr, char *buf)
3037 {
3038 	struct Scsi_Host *shost = class_to_shost(dev);
3039 	struct ibmvfc_host *vhost = shost_priv(shost);
3040 
3041 	return snprintf(buf, PAGE_SIZE, "%s\n",
3042 			vhost->login_buf->resp.partition_name);
3043 }
3044 
3045 static ssize_t ibmvfc_show_host_device_name(struct device *dev,
3046 					    struct device_attribute *attr, char *buf)
3047 {
3048 	struct Scsi_Host *shost = class_to_shost(dev);
3049 	struct ibmvfc_host *vhost = shost_priv(shost);
3050 
3051 	return snprintf(buf, PAGE_SIZE, "%s\n",
3052 			vhost->login_buf->resp.device_name);
3053 }
3054 
3055 static ssize_t ibmvfc_show_host_loc_code(struct device *dev,
3056 					 struct device_attribute *attr, char *buf)
3057 {
3058 	struct Scsi_Host *shost = class_to_shost(dev);
3059 	struct ibmvfc_host *vhost = shost_priv(shost);
3060 
3061 	return snprintf(buf, PAGE_SIZE, "%s\n",
3062 			vhost->login_buf->resp.port_loc_code);
3063 }
3064 
3065 static ssize_t ibmvfc_show_host_drc_name(struct device *dev,
3066 					 struct device_attribute *attr, char *buf)
3067 {
3068 	struct Scsi_Host *shost = class_to_shost(dev);
3069 	struct ibmvfc_host *vhost = shost_priv(shost);
3070 
3071 	return snprintf(buf, PAGE_SIZE, "%s\n",
3072 			vhost->login_buf->resp.drc_name);
3073 }
3074 
3075 static ssize_t ibmvfc_show_host_npiv_version(struct device *dev,
3076 					     struct device_attribute *attr, char *buf)
3077 {
3078 	struct Scsi_Host *shost = class_to_shost(dev);
3079 	struct ibmvfc_host *vhost = shost_priv(shost);
3080 	return snprintf(buf, PAGE_SIZE, "%d\n", be32_to_cpu(vhost->login_buf->resp.version));
3081 }
3082 
3083 static ssize_t ibmvfc_show_host_capabilities(struct device *dev,
3084 					     struct device_attribute *attr, char *buf)
3085 {
3086 	struct Scsi_Host *shost = class_to_shost(dev);
3087 	struct ibmvfc_host *vhost = shost_priv(shost);
3088 	return snprintf(buf, PAGE_SIZE, "%llx\n", be64_to_cpu(vhost->login_buf->resp.capabilities));
3089 }
3090 
3091 /**
3092  * ibmvfc_show_log_level - Show the adapter's error logging level
3093  * @dev:	class device struct
3094  * @buf:	buffer
3095  *
3096  * Return value:
3097  * 	number of bytes printed to buffer
3098  **/
3099 static ssize_t ibmvfc_show_log_level(struct device *dev,
3100 				     struct device_attribute *attr, char *buf)
3101 {
3102 	struct Scsi_Host *shost = class_to_shost(dev);
3103 	struct ibmvfc_host *vhost = shost_priv(shost);
3104 	unsigned long flags = 0;
3105 	int len;
3106 
3107 	spin_lock_irqsave(shost->host_lock, flags);
3108 	len = snprintf(buf, PAGE_SIZE, "%d\n", vhost->log_level);
3109 	spin_unlock_irqrestore(shost->host_lock, flags);
3110 	return len;
3111 }
3112 
3113 /**
3114  * ibmvfc_store_log_level - Change the adapter's error logging level
3115  * @dev:	class device struct
3116  * @buf:	buffer
3117  *
3118  * Return value:
3119  * 	number of bytes printed to buffer
3120  **/
3121 static ssize_t ibmvfc_store_log_level(struct device *dev,
3122 				      struct device_attribute *attr,
3123 				      const char *buf, size_t count)
3124 {
3125 	struct Scsi_Host *shost = class_to_shost(dev);
3126 	struct ibmvfc_host *vhost = shost_priv(shost);
3127 	unsigned long flags = 0;
3128 
3129 	spin_lock_irqsave(shost->host_lock, flags);
3130 	vhost->log_level = simple_strtoul(buf, NULL, 10);
3131 	spin_unlock_irqrestore(shost->host_lock, flags);
3132 	return strlen(buf);
3133 }
3134 
3135 static DEVICE_ATTR(partition_name, S_IRUGO, ibmvfc_show_host_partition_name, NULL);
3136 static DEVICE_ATTR(device_name, S_IRUGO, ibmvfc_show_host_device_name, NULL);
3137 static DEVICE_ATTR(port_loc_code, S_IRUGO, ibmvfc_show_host_loc_code, NULL);
3138 static DEVICE_ATTR(drc_name, S_IRUGO, ibmvfc_show_host_drc_name, NULL);
3139 static DEVICE_ATTR(npiv_version, S_IRUGO, ibmvfc_show_host_npiv_version, NULL);
3140 static DEVICE_ATTR(capabilities, S_IRUGO, ibmvfc_show_host_capabilities, NULL);
3141 static DEVICE_ATTR(log_level, S_IRUGO | S_IWUSR,
3142 		   ibmvfc_show_log_level, ibmvfc_store_log_level);
3143 
3144 #ifdef CONFIG_SCSI_IBMVFC_TRACE
3145 /**
3146  * ibmvfc_read_trace - Dump the adapter trace
3147  * @filp:		open sysfs file
3148  * @kobj:		kobject struct
3149  * @bin_attr:	bin_attribute struct
3150  * @buf:		buffer
3151  * @off:		offset
3152  * @count:		buffer size
3153  *
3154  * Return value:
3155  *	number of bytes printed to buffer
3156  **/
3157 static ssize_t ibmvfc_read_trace(struct file *filp, struct kobject *kobj,
3158 				 struct bin_attribute *bin_attr,
3159 				 char *buf, loff_t off, size_t count)
3160 {
3161 	struct device *dev = container_of(kobj, struct device, kobj);
3162 	struct Scsi_Host *shost = class_to_shost(dev);
3163 	struct ibmvfc_host *vhost = shost_priv(shost);
3164 	unsigned long flags = 0;
3165 	int size = IBMVFC_TRACE_SIZE;
3166 	char *src = (char *)vhost->trace;
3167 
3168 	if (off > size)
3169 		return 0;
3170 	if (off + count > size) {
3171 		size -= off;
3172 		count = size;
3173 	}
3174 
3175 	spin_lock_irqsave(shost->host_lock, flags);
3176 	memcpy(buf, &src[off], count);
3177 	spin_unlock_irqrestore(shost->host_lock, flags);
3178 	return count;
3179 }
3180 
3181 static struct bin_attribute ibmvfc_trace_attr = {
3182 	.attr =	{
3183 		.name = "trace",
3184 		.mode = S_IRUGO,
3185 	},
3186 	.size = 0,
3187 	.read = ibmvfc_read_trace,
3188 };
3189 #endif
3190 
3191 static struct device_attribute *ibmvfc_attrs[] = {
3192 	&dev_attr_partition_name,
3193 	&dev_attr_device_name,
3194 	&dev_attr_port_loc_code,
3195 	&dev_attr_drc_name,
3196 	&dev_attr_npiv_version,
3197 	&dev_attr_capabilities,
3198 	&dev_attr_log_level,
3199 	NULL
3200 };
3201 
3202 static struct scsi_host_template driver_template = {
3203 	.module = THIS_MODULE,
3204 	.name = "IBM POWER Virtual FC Adapter",
3205 	.proc_name = IBMVFC_NAME,
3206 	.queuecommand = ibmvfc_queuecommand,
3207 	.eh_timed_out = fc_eh_timed_out,
3208 	.eh_abort_handler = ibmvfc_eh_abort_handler,
3209 	.eh_device_reset_handler = ibmvfc_eh_device_reset_handler,
3210 	.eh_target_reset_handler = ibmvfc_eh_target_reset_handler,
3211 	.eh_host_reset_handler = ibmvfc_eh_host_reset_handler,
3212 	.slave_alloc = ibmvfc_slave_alloc,
3213 	.slave_configure = ibmvfc_slave_configure,
3214 	.target_alloc = ibmvfc_target_alloc,
3215 	.scan_finished = ibmvfc_scan_finished,
3216 	.change_queue_depth = ibmvfc_change_queue_depth,
3217 	.cmd_per_lun = 16,
3218 	.can_queue = IBMVFC_MAX_REQUESTS_DEFAULT,
3219 	.this_id = -1,
3220 	.sg_tablesize = SG_ALL,
3221 	.max_sectors = IBMVFC_MAX_SECTORS,
3222 	.shost_attrs = ibmvfc_attrs,
3223 	.track_queue_depth = 1,
3224 };
3225 
3226 /**
3227  * ibmvfc_next_async_crq - Returns the next entry in async queue
3228  * @vhost:	ibmvfc host struct
3229  *
3230  * Returns:
3231  *	Pointer to next entry in queue / NULL if empty
3232  **/
3233 static struct ibmvfc_async_crq *ibmvfc_next_async_crq(struct ibmvfc_host *vhost)
3234 {
3235 	struct ibmvfc_async_crq_queue *async_crq = &vhost->async_crq;
3236 	struct ibmvfc_async_crq *crq;
3237 
3238 	crq = &async_crq->msgs[async_crq->cur];
3239 	if (crq->valid & 0x80) {
3240 		if (++async_crq->cur == async_crq->size)
3241 			async_crq->cur = 0;
3242 		rmb();
3243 	} else
3244 		crq = NULL;
3245 
3246 	return crq;
3247 }
3248 
3249 /**
3250  * ibmvfc_next_crq - Returns the next entry in message queue
3251  * @vhost:	ibmvfc host struct
3252  *
3253  * Returns:
3254  *	Pointer to next entry in queue / NULL if empty
3255  **/
3256 static struct ibmvfc_crq *ibmvfc_next_crq(struct ibmvfc_host *vhost)
3257 {
3258 	struct ibmvfc_crq_queue *queue = &vhost->crq;
3259 	struct ibmvfc_crq *crq;
3260 
3261 	crq = &queue->msgs[queue->cur];
3262 	if (crq->valid & 0x80) {
3263 		if (++queue->cur == queue->size)
3264 			queue->cur = 0;
3265 		rmb();
3266 	} else
3267 		crq = NULL;
3268 
3269 	return crq;
3270 }
3271 
3272 /**
3273  * ibmvfc_interrupt - Interrupt handler
3274  * @irq:		number of irq to handle, not used
3275  * @dev_instance: ibmvfc_host that received interrupt
3276  *
3277  * Returns:
3278  *	IRQ_HANDLED
3279  **/
3280 static irqreturn_t ibmvfc_interrupt(int irq, void *dev_instance)
3281 {
3282 	struct ibmvfc_host *vhost = (struct ibmvfc_host *)dev_instance;
3283 	unsigned long flags;
3284 
3285 	spin_lock_irqsave(vhost->host->host_lock, flags);
3286 	vio_disable_interrupts(to_vio_dev(vhost->dev));
3287 	tasklet_schedule(&vhost->tasklet);
3288 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
3289 	return IRQ_HANDLED;
3290 }
3291 
3292 /**
3293  * ibmvfc_tasklet - Interrupt handler tasklet
3294  * @data:		ibmvfc host struct
3295  *
3296  * Returns:
3297  *	Nothing
3298  **/
3299 static void ibmvfc_tasklet(void *data)
3300 {
3301 	struct ibmvfc_host *vhost = data;
3302 	struct vio_dev *vdev = to_vio_dev(vhost->dev);
3303 	struct ibmvfc_crq *crq;
3304 	struct ibmvfc_async_crq *async;
3305 	unsigned long flags;
3306 	int done = 0;
3307 
3308 	spin_lock_irqsave(vhost->host->host_lock, flags);
3309 	while (!done) {
3310 		/* Pull all the valid messages off the async CRQ */
3311 		while ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
3312 			ibmvfc_handle_async(async, vhost);
3313 			async->valid = 0;
3314 			wmb();
3315 		}
3316 
3317 		/* Pull all the valid messages off the CRQ */
3318 		while ((crq = ibmvfc_next_crq(vhost)) != NULL) {
3319 			ibmvfc_handle_crq(crq, vhost);
3320 			crq->valid = 0;
3321 			wmb();
3322 		}
3323 
3324 		vio_enable_interrupts(vdev);
3325 		if ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
3326 			vio_disable_interrupts(vdev);
3327 			ibmvfc_handle_async(async, vhost);
3328 			async->valid = 0;
3329 			wmb();
3330 		} else if ((crq = ibmvfc_next_crq(vhost)) != NULL) {
3331 			vio_disable_interrupts(vdev);
3332 			ibmvfc_handle_crq(crq, vhost);
3333 			crq->valid = 0;
3334 			wmb();
3335 		} else
3336 			done = 1;
3337 	}
3338 
3339 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
3340 }
3341 
3342 /**
3343  * ibmvfc_init_tgt - Set the next init job step for the target
3344  * @tgt:		ibmvfc target struct
3345  * @job_step:	job step to perform
3346  *
3347  **/
3348 static void ibmvfc_init_tgt(struct ibmvfc_target *tgt,
3349 			    void (*job_step) (struct ibmvfc_target *))
3350 {
3351 	if (!ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT))
3352 		tgt->job_step = job_step;
3353 	wake_up(&tgt->vhost->work_wait_q);
3354 }
3355 
3356 /**
3357  * ibmvfc_retry_tgt_init - Attempt to retry a step in target initialization
3358  * @tgt:		ibmvfc target struct
3359  * @job_step:	initialization job step
3360  *
3361  * Returns: 1 if step will be retried / 0 if not
3362  *
3363  **/
3364 static int ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
3365 				  void (*job_step) (struct ibmvfc_target *))
3366 {
3367 	if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) {
3368 		ibmvfc_del_tgt(tgt);
3369 		wake_up(&tgt->vhost->work_wait_q);
3370 		return 0;
3371 	} else
3372 		ibmvfc_init_tgt(tgt, job_step);
3373 	return 1;
3374 }
3375 
3376 /* Defined in FC-LS */
3377 static const struct {
3378 	int code;
3379 	int retry;
3380 	int logged_in;
3381 } prli_rsp [] = {
3382 	{ 0, 1, 0 },
3383 	{ 1, 0, 1 },
3384 	{ 2, 1, 0 },
3385 	{ 3, 1, 0 },
3386 	{ 4, 0, 0 },
3387 	{ 5, 0, 0 },
3388 	{ 6, 0, 1 },
3389 	{ 7, 0, 0 },
3390 	{ 8, 1, 0 },
3391 };
3392 
3393 /**
3394  * ibmvfc_get_prli_rsp - Find PRLI response index
3395  * @flags:	PRLI response flags
3396  *
3397  **/
3398 static int ibmvfc_get_prli_rsp(u16 flags)
3399 {
3400 	int i;
3401 	int code = (flags & 0x0f00) >> 8;
3402 
3403 	for (i = 0; i < ARRAY_SIZE(prli_rsp); i++)
3404 		if (prli_rsp[i].code == code)
3405 			return i;
3406 
3407 	return 0;
3408 }
3409 
3410 /**
3411  * ibmvfc_tgt_prli_done - Completion handler for Process Login
3412  * @evt:	ibmvfc event struct
3413  *
3414  **/
3415 static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
3416 {
3417 	struct ibmvfc_target *tgt = evt->tgt;
3418 	struct ibmvfc_host *vhost = evt->vhost;
3419 	struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli;
3420 	struct ibmvfc_prli_svc_parms *parms = &rsp->parms;
3421 	u32 status = be16_to_cpu(rsp->common.status);
3422 	int index, level = IBMVFC_DEFAULT_LOG_LEVEL;
3423 
3424 	vhost->discovery_threads--;
3425 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3426 	switch (status) {
3427 	case IBMVFC_MAD_SUCCESS:
3428 		tgt_dbg(tgt, "Process Login succeeded: %X %02X %04X\n",
3429 			parms->type, parms->flags, parms->service_parms);
3430 
3431 		if (parms->type == IBMVFC_SCSI_FCP_TYPE) {
3432 			index = ibmvfc_get_prli_rsp(be16_to_cpu(parms->flags));
3433 			if (prli_rsp[index].logged_in) {
3434 				if (be16_to_cpu(parms->flags) & IBMVFC_PRLI_EST_IMG_PAIR) {
3435 					tgt->need_login = 0;
3436 					tgt->ids.roles = 0;
3437 					if (be32_to_cpu(parms->service_parms) & IBMVFC_PRLI_TARGET_FUNC)
3438 						tgt->ids.roles |= FC_PORT_ROLE_FCP_TARGET;
3439 					if (be32_to_cpu(parms->service_parms) & IBMVFC_PRLI_INITIATOR_FUNC)
3440 						tgt->ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
3441 					tgt->add_rport = 1;
3442 				} else
3443 					ibmvfc_del_tgt(tgt);
3444 			} else if (prli_rsp[index].retry)
3445 				ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
3446 			else
3447 				ibmvfc_del_tgt(tgt);
3448 		} else
3449 			ibmvfc_del_tgt(tgt);
3450 		break;
3451 	case IBMVFC_MAD_DRIVER_FAILED:
3452 		break;
3453 	case IBMVFC_MAD_CRQ_ERROR:
3454 		ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
3455 		break;
3456 	case IBMVFC_MAD_FAILED:
3457 	default:
3458 		if ((be16_to_cpu(rsp->status) & IBMVFC_VIOS_FAILURE) &&
3459 		     be16_to_cpu(rsp->error) == IBMVFC_PLOGI_REQUIRED)
3460 			level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
3461 		else if (tgt->logo_rcvd)
3462 			level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
3463 		else if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
3464 			level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
3465 		else
3466 			ibmvfc_del_tgt(tgt);
3467 
3468 		tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
3469 			ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
3470 			be16_to_cpu(rsp->status), be16_to_cpu(rsp->error), status);
3471 		break;
3472 	}
3473 
3474 	kref_put(&tgt->kref, ibmvfc_release_tgt);
3475 	ibmvfc_free_event(evt);
3476 	wake_up(&vhost->work_wait_q);
3477 }
3478 
3479 /**
3480  * ibmvfc_tgt_send_prli - Send a process login
3481  * @tgt:	ibmvfc target struct
3482  *
3483  **/
3484 static void ibmvfc_tgt_send_prli(struct ibmvfc_target *tgt)
3485 {
3486 	struct ibmvfc_process_login *prli;
3487 	struct ibmvfc_host *vhost = tgt->vhost;
3488 	struct ibmvfc_event *evt;
3489 
3490 	if (vhost->discovery_threads >= disc_threads)
3491 		return;
3492 
3493 	kref_get(&tgt->kref);
3494 	evt = ibmvfc_get_event(vhost);
3495 	vhost->discovery_threads++;
3496 	ibmvfc_init_event(evt, ibmvfc_tgt_prli_done, IBMVFC_MAD_FORMAT);
3497 	evt->tgt = tgt;
3498 	prli = &evt->iu.prli;
3499 	memset(prli, 0, sizeof(*prli));
3500 	if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
3501 		prli->common.version = cpu_to_be32(2);
3502 		prli->target_wwpn = cpu_to_be64(tgt->wwpn);
3503 	} else {
3504 		prli->common.version = cpu_to_be32(1);
3505 	}
3506 	prli->common.opcode = cpu_to_be32(IBMVFC_PROCESS_LOGIN);
3507 	prli->common.length = cpu_to_be16(sizeof(*prli));
3508 	prli->scsi_id = cpu_to_be64(tgt->scsi_id);
3509 
3510 	prli->parms.type = IBMVFC_SCSI_FCP_TYPE;
3511 	prli->parms.flags = cpu_to_be16(IBMVFC_PRLI_EST_IMG_PAIR);
3512 	prli->parms.service_parms = cpu_to_be32(IBMVFC_PRLI_INITIATOR_FUNC);
3513 	prli->parms.service_parms |= cpu_to_be32(IBMVFC_PRLI_READ_FCP_XFER_RDY_DISABLED);
3514 
3515 	if (cls3_error)
3516 		prli->parms.service_parms |= cpu_to_be32(IBMVFC_PRLI_RETRY);
3517 
3518 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
3519 	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
3520 		vhost->discovery_threads--;
3521 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3522 		kref_put(&tgt->kref, ibmvfc_release_tgt);
3523 	} else
3524 		tgt_dbg(tgt, "Sent process login\n");
3525 }
3526 
3527 /**
3528  * ibmvfc_tgt_plogi_done - Completion handler for Port Login
3529  * @evt:	ibmvfc event struct
3530  *
3531  **/
3532 static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
3533 {
3534 	struct ibmvfc_target *tgt = evt->tgt;
3535 	struct ibmvfc_host *vhost = evt->vhost;
3536 	struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi;
3537 	u32 status = be16_to_cpu(rsp->common.status);
3538 	int level = IBMVFC_DEFAULT_LOG_LEVEL;
3539 
3540 	vhost->discovery_threads--;
3541 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3542 	switch (status) {
3543 	case IBMVFC_MAD_SUCCESS:
3544 		tgt_dbg(tgt, "Port Login succeeded\n");
3545 		if (tgt->ids.port_name &&
3546 		    tgt->ids.port_name != wwn_to_u64(rsp->service_parms.port_name)) {
3547 			vhost->reinit = 1;
3548 			tgt_dbg(tgt, "Port re-init required\n");
3549 			break;
3550 		}
3551 		tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name);
3552 		tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name);
3553 		tgt->ids.port_id = tgt->scsi_id;
3554 		memcpy(&tgt->service_parms, &rsp->service_parms,
3555 		       sizeof(tgt->service_parms));
3556 		memcpy(&tgt->service_parms_change, &rsp->service_parms_change,
3557 		       sizeof(tgt->service_parms_change));
3558 		ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_prli);
3559 		break;
3560 	case IBMVFC_MAD_DRIVER_FAILED:
3561 		break;
3562 	case IBMVFC_MAD_CRQ_ERROR:
3563 		ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
3564 		break;
3565 	case IBMVFC_MAD_FAILED:
3566 	default:
3567 		if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
3568 			level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
3569 		else
3570 			ibmvfc_del_tgt(tgt);
3571 
3572 		tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
3573 			ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
3574 					     be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
3575 			ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
3576 			ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain), status);
3577 		break;
3578 	}
3579 
3580 	kref_put(&tgt->kref, ibmvfc_release_tgt);
3581 	ibmvfc_free_event(evt);
3582 	wake_up(&vhost->work_wait_q);
3583 }
3584 
3585 /**
3586  * ibmvfc_tgt_send_plogi - Send PLOGI to the specified target
3587  * @tgt:	ibmvfc target struct
3588  *
3589  **/
3590 static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *tgt)
3591 {
3592 	struct ibmvfc_port_login *plogi;
3593 	struct ibmvfc_host *vhost = tgt->vhost;
3594 	struct ibmvfc_event *evt;
3595 
3596 	if (vhost->discovery_threads >= disc_threads)
3597 		return;
3598 
3599 	kref_get(&tgt->kref);
3600 	tgt->logo_rcvd = 0;
3601 	evt = ibmvfc_get_event(vhost);
3602 	vhost->discovery_threads++;
3603 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
3604 	ibmvfc_init_event(evt, ibmvfc_tgt_plogi_done, IBMVFC_MAD_FORMAT);
3605 	evt->tgt = tgt;
3606 	plogi = &evt->iu.plogi;
3607 	memset(plogi, 0, sizeof(*plogi));
3608 	if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
3609 		plogi->common.version = cpu_to_be32(2);
3610 		plogi->target_wwpn = cpu_to_be64(tgt->wwpn);
3611 	} else {
3612 		plogi->common.version = cpu_to_be32(1);
3613 	}
3614 	plogi->common.opcode = cpu_to_be32(IBMVFC_PORT_LOGIN);
3615 	plogi->common.length = cpu_to_be16(sizeof(*plogi));
3616 	plogi->scsi_id = cpu_to_be64(tgt->scsi_id);
3617 
3618 	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
3619 		vhost->discovery_threads--;
3620 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3621 		kref_put(&tgt->kref, ibmvfc_release_tgt);
3622 	} else
3623 		tgt_dbg(tgt, "Sent port login\n");
3624 }
3625 
3626 /**
3627  * ibmvfc_tgt_implicit_logout_done - Completion handler for Implicit Logout MAD
3628  * @evt:	ibmvfc event struct
3629  *
3630  **/
3631 static void ibmvfc_tgt_implicit_logout_done(struct ibmvfc_event *evt)
3632 {
3633 	struct ibmvfc_target *tgt = evt->tgt;
3634 	struct ibmvfc_host *vhost = evt->vhost;
3635 	struct ibmvfc_implicit_logout *rsp = &evt->xfer_iu->implicit_logout;
3636 	u32 status = be16_to_cpu(rsp->common.status);
3637 
3638 	vhost->discovery_threads--;
3639 	ibmvfc_free_event(evt);
3640 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3641 
3642 	switch (status) {
3643 	case IBMVFC_MAD_SUCCESS:
3644 		tgt_dbg(tgt, "Implicit Logout succeeded\n");
3645 		break;
3646 	case IBMVFC_MAD_DRIVER_FAILED:
3647 		kref_put(&tgt->kref, ibmvfc_release_tgt);
3648 		wake_up(&vhost->work_wait_q);
3649 		return;
3650 	case IBMVFC_MAD_FAILED:
3651 	default:
3652 		tgt_err(tgt, "Implicit Logout failed: rc=0x%02X\n", status);
3653 		break;
3654 	}
3655 
3656 	ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_plogi);
3657 	kref_put(&tgt->kref, ibmvfc_release_tgt);
3658 	wake_up(&vhost->work_wait_q);
3659 }
3660 
3661 /**
3662  * __ibmvfc_tgt_get_implicit_logout_evt - Allocate and init an event for implicit logout
3663  * @tgt:		ibmvfc target struct
3664  *
3665  * Returns:
3666  *	Allocated and initialized ibmvfc_event struct
3667  **/
3668 static struct ibmvfc_event *__ibmvfc_tgt_get_implicit_logout_evt(struct ibmvfc_target *tgt,
3669 								 void (*done) (struct ibmvfc_event *))
3670 {
3671 	struct ibmvfc_implicit_logout *mad;
3672 	struct ibmvfc_host *vhost = tgt->vhost;
3673 	struct ibmvfc_event *evt;
3674 
3675 	kref_get(&tgt->kref);
3676 	evt = ibmvfc_get_event(vhost);
3677 	ibmvfc_init_event(evt, done, IBMVFC_MAD_FORMAT);
3678 	evt->tgt = tgt;
3679 	mad = &evt->iu.implicit_logout;
3680 	memset(mad, 0, sizeof(*mad));
3681 	mad->common.version = cpu_to_be32(1);
3682 	mad->common.opcode = cpu_to_be32(IBMVFC_IMPLICIT_LOGOUT);
3683 	mad->common.length = cpu_to_be16(sizeof(*mad));
3684 	mad->old_scsi_id = cpu_to_be64(tgt->scsi_id);
3685 	return evt;
3686 }
3687 
3688 /**
3689  * ibmvfc_tgt_implicit_logout - Initiate an Implicit Logout for specified target
3690  * @tgt:		ibmvfc target struct
3691  *
3692  **/
3693 static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
3694 {
3695 	struct ibmvfc_host *vhost = tgt->vhost;
3696 	struct ibmvfc_event *evt;
3697 
3698 	if (vhost->discovery_threads >= disc_threads)
3699 		return;
3700 
3701 	vhost->discovery_threads++;
3702 	evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt,
3703 						   ibmvfc_tgt_implicit_logout_done);
3704 
3705 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
3706 	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
3707 		vhost->discovery_threads--;
3708 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3709 		kref_put(&tgt->kref, ibmvfc_release_tgt);
3710 	} else
3711 		tgt_dbg(tgt, "Sent Implicit Logout\n");
3712 }
3713 
3714 /**
3715  * ibmvfc_tgt_implicit_logout_and_del_done - Completion handler for Implicit Logout MAD
3716  * @evt:	ibmvfc event struct
3717  *
3718  **/
3719 static void ibmvfc_tgt_implicit_logout_and_del_done(struct ibmvfc_event *evt)
3720 {
3721 	struct ibmvfc_target *tgt = evt->tgt;
3722 	struct ibmvfc_host *vhost = evt->vhost;
3723 	struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru;
3724 	u32 status = be16_to_cpu(mad->common.status);
3725 
3726 	vhost->discovery_threads--;
3727 	ibmvfc_free_event(evt);
3728 
3729 	/*
3730 	 * If our state is IBMVFC_HOST_OFFLINE, we could be unloading the
3731 	 * driver in which case we need to free up all the targets. If we are
3732 	 * not unloading, we will still go through a hard reset to get out of
3733 	 * offline state, so there is no need to track the old targets in that
3734 	 * case.
3735 	 */
3736 	if (status == IBMVFC_MAD_SUCCESS || vhost->state == IBMVFC_HOST_OFFLINE)
3737 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3738 	else
3739 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT);
3740 
3741 	tgt_dbg(tgt, "Implicit Logout %s\n", (status == IBMVFC_MAD_SUCCESS) ? "succeeded" : "failed");
3742 	kref_put(&tgt->kref, ibmvfc_release_tgt);
3743 	wake_up(&vhost->work_wait_q);
3744 }
3745 
3746 /**
3747  * ibmvfc_tgt_implicit_logout_and_del - Initiate an Implicit Logout for specified target
3748  * @tgt:		ibmvfc target struct
3749  *
3750  **/
3751 static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *tgt)
3752 {
3753 	struct ibmvfc_host *vhost = tgt->vhost;
3754 	struct ibmvfc_event *evt;
3755 
3756 	if (!vhost->logged_in) {
3757 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3758 		return;
3759 	}
3760 
3761 	if (vhost->discovery_threads >= disc_threads)
3762 		return;
3763 
3764 	vhost->discovery_threads++;
3765 	evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt,
3766 						   ibmvfc_tgt_implicit_logout_and_del_done);
3767 
3768 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT);
3769 	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
3770 		vhost->discovery_threads--;
3771 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3772 		kref_put(&tgt->kref, ibmvfc_release_tgt);
3773 	} else
3774 		tgt_dbg(tgt, "Sent Implicit Logout\n");
3775 }
3776 
3777 /**
3778  * ibmvfc_tgt_move_login_done - Completion handler for Move Login
3779  * @evt:	ibmvfc event struct
3780  *
3781  **/
3782 static void ibmvfc_tgt_move_login_done(struct ibmvfc_event *evt)
3783 {
3784 	struct ibmvfc_target *tgt = evt->tgt;
3785 	struct ibmvfc_host *vhost = evt->vhost;
3786 	struct ibmvfc_move_login *rsp = &evt->xfer_iu->move_login;
3787 	u32 status = be16_to_cpu(rsp->common.status);
3788 	int level = IBMVFC_DEFAULT_LOG_LEVEL;
3789 
3790 	vhost->discovery_threads--;
3791 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3792 	switch (status) {
3793 	case IBMVFC_MAD_SUCCESS:
3794 		tgt_dbg(tgt, "Move Login succeeded for old scsi_id: %llX\n", tgt->old_scsi_id);
3795 		tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name);
3796 		tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name);
3797 		tgt->ids.port_id = tgt->scsi_id;
3798 		memcpy(&tgt->service_parms, &rsp->service_parms,
3799 		       sizeof(tgt->service_parms));
3800 		memcpy(&tgt->service_parms_change, &rsp->service_parms_change,
3801 		       sizeof(tgt->service_parms_change));
3802 		ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_prli);
3803 		break;
3804 	case IBMVFC_MAD_DRIVER_FAILED:
3805 		break;
3806 	case IBMVFC_MAD_CRQ_ERROR:
3807 		ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_move_login);
3808 		break;
3809 	case IBMVFC_MAD_FAILED:
3810 	default:
3811 		level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_move_login);
3812 
3813 		tgt_log(tgt, level,
3814 			"Move Login failed: old scsi_id: %llX, flags:%x, vios_flags:%x, rc=0x%02X\n",
3815 			tgt->old_scsi_id, be32_to_cpu(rsp->flags), be16_to_cpu(rsp->vios_flags),
3816 			status);
3817 		break;
3818 	}
3819 
3820 	kref_put(&tgt->kref, ibmvfc_release_tgt);
3821 	ibmvfc_free_event(evt);
3822 	wake_up(&vhost->work_wait_q);
3823 }
3824 
3825 
3826 /**
3827  * ibmvfc_tgt_move_login - Initiate a move login for specified target
3828  * @tgt:		ibmvfc target struct
3829  *
3830  **/
3831 static void ibmvfc_tgt_move_login(struct ibmvfc_target *tgt)
3832 {
3833 	struct ibmvfc_host *vhost = tgt->vhost;
3834 	struct ibmvfc_move_login *move;
3835 	struct ibmvfc_event *evt;
3836 
3837 	if (vhost->discovery_threads >= disc_threads)
3838 		return;
3839 
3840 	kref_get(&tgt->kref);
3841 	evt = ibmvfc_get_event(vhost);
3842 	vhost->discovery_threads++;
3843 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
3844 	ibmvfc_init_event(evt, ibmvfc_tgt_move_login_done, IBMVFC_MAD_FORMAT);
3845 	evt->tgt = tgt;
3846 	move = &evt->iu.move_login;
3847 	memset(move, 0, sizeof(*move));
3848 	move->common.version = cpu_to_be32(1);
3849 	move->common.opcode = cpu_to_be32(IBMVFC_MOVE_LOGIN);
3850 	move->common.length = cpu_to_be16(sizeof(*move));
3851 
3852 	move->old_scsi_id = cpu_to_be64(tgt->old_scsi_id);
3853 	move->new_scsi_id = cpu_to_be64(tgt->scsi_id);
3854 	move->wwpn = cpu_to_be64(tgt->wwpn);
3855 	move->node_name = cpu_to_be64(tgt->ids.node_name);
3856 
3857 	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
3858 		vhost->discovery_threads--;
3859 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3860 		kref_put(&tgt->kref, ibmvfc_release_tgt);
3861 	} else
3862 		tgt_dbg(tgt, "Sent Move Login for old scsi_id: %llX\n", tgt->old_scsi_id);
3863 }
3864 
3865 /**
3866  * ibmvfc_adisc_needs_plogi - Does device need PLOGI?
3867  * @mad:	ibmvfc passthru mad struct
3868  * @tgt:	ibmvfc target struct
3869  *
3870  * Returns:
3871  *	1 if PLOGI needed / 0 if PLOGI not needed
3872  **/
3873 static int ibmvfc_adisc_needs_plogi(struct ibmvfc_passthru_mad *mad,
3874 				    struct ibmvfc_target *tgt)
3875 {
3876 	if (wwn_to_u64((u8 *)&mad->fc_iu.response[2]) != tgt->ids.port_name)
3877 		return 1;
3878 	if (wwn_to_u64((u8 *)&mad->fc_iu.response[4]) != tgt->ids.node_name)
3879 		return 1;
3880 	if (be32_to_cpu(mad->fc_iu.response[6]) != tgt->scsi_id)
3881 		return 1;
3882 	return 0;
3883 }
3884 
3885 /**
3886  * ibmvfc_tgt_adisc_done - Completion handler for ADISC
3887  * @evt:	ibmvfc event struct
3888  *
3889  **/
3890 static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
3891 {
3892 	struct ibmvfc_target *tgt = evt->tgt;
3893 	struct ibmvfc_host *vhost = evt->vhost;
3894 	struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru;
3895 	u32 status = be16_to_cpu(mad->common.status);
3896 	u8 fc_reason, fc_explain;
3897 
3898 	vhost->discovery_threads--;
3899 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
3900 	del_timer(&tgt->timer);
3901 
3902 	switch (status) {
3903 	case IBMVFC_MAD_SUCCESS:
3904 		tgt_dbg(tgt, "ADISC succeeded\n");
3905 		if (ibmvfc_adisc_needs_plogi(mad, tgt))
3906 			ibmvfc_del_tgt(tgt);
3907 		break;
3908 	case IBMVFC_MAD_DRIVER_FAILED:
3909 		break;
3910 	case IBMVFC_MAD_FAILED:
3911 	default:
3912 		ibmvfc_del_tgt(tgt);
3913 		fc_reason = (be32_to_cpu(mad->fc_iu.response[1]) & 0x00ff0000) >> 16;
3914 		fc_explain = (be32_to_cpu(mad->fc_iu.response[1]) & 0x0000ff00) >> 8;
3915 		tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
3916 			 ibmvfc_get_cmd_error(be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error)),
3917 			 be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error),
3918 			 ibmvfc_get_fc_type(fc_reason), fc_reason,
3919 			 ibmvfc_get_ls_explain(fc_explain), fc_explain, status);
3920 		break;
3921 	}
3922 
3923 	kref_put(&tgt->kref, ibmvfc_release_tgt);
3924 	ibmvfc_free_event(evt);
3925 	wake_up(&vhost->work_wait_q);
3926 }
3927 
3928 /**
3929  * ibmvfc_init_passthru - Initialize an event struct for FC passthru
3930  * @evt:		ibmvfc event struct
3931  *
3932  **/
3933 static void ibmvfc_init_passthru(struct ibmvfc_event *evt)
3934 {
3935 	struct ibmvfc_passthru_mad *mad = &evt->iu.passthru;
3936 
3937 	memset(mad, 0, sizeof(*mad));
3938 	mad->common.version = cpu_to_be32(1);
3939 	mad->common.opcode = cpu_to_be32(IBMVFC_PASSTHRU);
3940 	mad->common.length = cpu_to_be16(sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu));
3941 	mad->cmd_ioba.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
3942 		offsetof(struct ibmvfc_passthru_mad, iu));
3943 	mad->cmd_ioba.len = cpu_to_be32(sizeof(mad->iu));
3944 	mad->iu.cmd_len = cpu_to_be32(sizeof(mad->fc_iu.payload));
3945 	mad->iu.rsp_len = cpu_to_be32(sizeof(mad->fc_iu.response));
3946 	mad->iu.cmd.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
3947 		offsetof(struct ibmvfc_passthru_mad, fc_iu) +
3948 		offsetof(struct ibmvfc_passthru_fc_iu, payload));
3949 	mad->iu.cmd.len = cpu_to_be32(sizeof(mad->fc_iu.payload));
3950 	mad->iu.rsp.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
3951 		offsetof(struct ibmvfc_passthru_mad, fc_iu) +
3952 		offsetof(struct ibmvfc_passthru_fc_iu, response));
3953 	mad->iu.rsp.len = cpu_to_be32(sizeof(mad->fc_iu.response));
3954 }
3955 
3956 /**
3957  * ibmvfc_tgt_adisc_cancel_done - Completion handler when cancelling an ADISC
3958  * @evt:		ibmvfc event struct
3959  *
3960  * Just cleanup this event struct. Everything else is handled by
3961  * the ADISC completion handler. If the ADISC never actually comes
3962  * back, we still have the timer running on the ADISC event struct
3963  * which will fire and cause the CRQ to get reset.
3964  *
3965  **/
3966 static void ibmvfc_tgt_adisc_cancel_done(struct ibmvfc_event *evt)
3967 {
3968 	struct ibmvfc_host *vhost = evt->vhost;
3969 	struct ibmvfc_target *tgt = evt->tgt;
3970 
3971 	tgt_dbg(tgt, "ADISC cancel complete\n");
3972 	vhost->abort_threads--;
3973 	ibmvfc_free_event(evt);
3974 	kref_put(&tgt->kref, ibmvfc_release_tgt);
3975 	wake_up(&vhost->work_wait_q);
3976 }
3977 
3978 /**
3979  * ibmvfc_adisc_timeout - Handle an ADISC timeout
3980  * @tgt:		ibmvfc target struct
3981  *
3982  * If an ADISC times out, send a cancel. If the cancel times
3983  * out, reset the CRQ. When the ADISC comes back as cancelled,
3984  * log back into the target.
3985  **/
3986 static void ibmvfc_adisc_timeout(struct timer_list *t)
3987 {
3988 	struct ibmvfc_target *tgt = from_timer(tgt, t, timer);
3989 	struct ibmvfc_host *vhost = tgt->vhost;
3990 	struct ibmvfc_event *evt;
3991 	struct ibmvfc_tmf *tmf;
3992 	unsigned long flags;
3993 	int rc;
3994 
3995 	tgt_dbg(tgt, "ADISC timeout\n");
3996 	spin_lock_irqsave(vhost->host->host_lock, flags);
3997 	if (vhost->abort_threads >= disc_threads ||
3998 	    tgt->action != IBMVFC_TGT_ACTION_INIT_WAIT ||
3999 	    vhost->state != IBMVFC_INITIALIZING ||
4000 	    vhost->action != IBMVFC_HOST_ACTION_QUERY_TGTS) {
4001 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
4002 		return;
4003 	}
4004 
4005 	vhost->abort_threads++;
4006 	kref_get(&tgt->kref);
4007 	evt = ibmvfc_get_event(vhost);
4008 	ibmvfc_init_event(evt, ibmvfc_tgt_adisc_cancel_done, IBMVFC_MAD_FORMAT);
4009 
4010 	evt->tgt = tgt;
4011 	tmf = &evt->iu.tmf;
4012 	memset(tmf, 0, sizeof(*tmf));
4013 	if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
4014 		tmf->common.version = cpu_to_be32(2);
4015 		tmf->target_wwpn = cpu_to_be64(tgt->wwpn);
4016 	} else {
4017 		tmf->common.version = cpu_to_be32(1);
4018 	}
4019 	tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
4020 	tmf->common.length = cpu_to_be16(sizeof(*tmf));
4021 	tmf->scsi_id = cpu_to_be64(tgt->scsi_id);
4022 	tmf->cancel_key = cpu_to_be32(tgt->cancel_key);
4023 
4024 	rc = ibmvfc_send_event(evt, vhost, default_timeout);
4025 
4026 	if (rc) {
4027 		tgt_err(tgt, "Failed to send cancel event for ADISC. rc=%d\n", rc);
4028 		vhost->abort_threads--;
4029 		kref_put(&tgt->kref, ibmvfc_release_tgt);
4030 		__ibmvfc_reset_host(vhost);
4031 	} else
4032 		tgt_dbg(tgt, "Attempting to cancel ADISC\n");
4033 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
4034 }
4035 
4036 /**
4037  * ibmvfc_tgt_adisc - Initiate an ADISC for specified target
4038  * @tgt:		ibmvfc target struct
4039  *
4040  * When sending an ADISC we end up with two timers running. The
4041  * first timer is the timer in the ibmvfc target struct. If this
4042  * fires, we send a cancel to the target. The second timer is the
4043  * timer on the ibmvfc event for the ADISC, which is longer. If that
4044  * fires, it means the ADISC timed out and our attempt to cancel it
4045  * also failed, so we need to reset the CRQ.
4046  **/
4047 static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt)
4048 {
4049 	struct ibmvfc_passthru_mad *mad;
4050 	struct ibmvfc_host *vhost = tgt->vhost;
4051 	struct ibmvfc_event *evt;
4052 
4053 	if (vhost->discovery_threads >= disc_threads)
4054 		return;
4055 
4056 	kref_get(&tgt->kref);
4057 	evt = ibmvfc_get_event(vhost);
4058 	vhost->discovery_threads++;
4059 	ibmvfc_init_event(evt, ibmvfc_tgt_adisc_done, IBMVFC_MAD_FORMAT);
4060 	evt->tgt = tgt;
4061 
4062 	ibmvfc_init_passthru(evt);
4063 	mad = &evt->iu.passthru;
4064 	mad->iu.flags = cpu_to_be32(IBMVFC_FC_ELS);
4065 	mad->iu.scsi_id = cpu_to_be64(tgt->scsi_id);
4066 	mad->iu.cancel_key = cpu_to_be32(tgt->cancel_key);
4067 
4068 	mad->fc_iu.payload[0] = cpu_to_be32(IBMVFC_ADISC);
4069 	memcpy(&mad->fc_iu.payload[2], &vhost->login_buf->resp.port_name,
4070 	       sizeof(vhost->login_buf->resp.port_name));
4071 	memcpy(&mad->fc_iu.payload[4], &vhost->login_buf->resp.node_name,
4072 	       sizeof(vhost->login_buf->resp.node_name));
4073 	mad->fc_iu.payload[6] = cpu_to_be32(be64_to_cpu(vhost->login_buf->resp.scsi_id) & 0x00ffffff);
4074 
4075 	if (timer_pending(&tgt->timer))
4076 		mod_timer(&tgt->timer, jiffies + (IBMVFC_ADISC_TIMEOUT * HZ));
4077 	else {
4078 		tgt->timer.expires = jiffies + (IBMVFC_ADISC_TIMEOUT * HZ);
4079 		add_timer(&tgt->timer);
4080 	}
4081 
4082 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4083 	if (ibmvfc_send_event(evt, vhost, IBMVFC_ADISC_PLUS_CANCEL_TIMEOUT)) {
4084 		vhost->discovery_threads--;
4085 		del_timer(&tgt->timer);
4086 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4087 		kref_put(&tgt->kref, ibmvfc_release_tgt);
4088 	} else
4089 		tgt_dbg(tgt, "Sent ADISC\n");
4090 }
4091 
4092 /**
4093  * ibmvfc_tgt_query_target_done - Completion handler for Query Target MAD
4094  * @evt:	ibmvfc event struct
4095  *
4096  **/
4097 static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
4098 {
4099 	struct ibmvfc_target *tgt = evt->tgt;
4100 	struct ibmvfc_host *vhost = evt->vhost;
4101 	struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt;
4102 	u32 status = be16_to_cpu(rsp->common.status);
4103 	int level = IBMVFC_DEFAULT_LOG_LEVEL;
4104 
4105 	vhost->discovery_threads--;
4106 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4107 	switch (status) {
4108 	case IBMVFC_MAD_SUCCESS:
4109 		tgt_dbg(tgt, "Query Target succeeded\n");
4110 		if (be64_to_cpu(rsp->scsi_id) != tgt->scsi_id)
4111 			ibmvfc_del_tgt(tgt);
4112 		else
4113 			ibmvfc_init_tgt(tgt, ibmvfc_tgt_adisc);
4114 		break;
4115 	case IBMVFC_MAD_DRIVER_FAILED:
4116 		break;
4117 	case IBMVFC_MAD_CRQ_ERROR:
4118 		ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
4119 		break;
4120 	case IBMVFC_MAD_FAILED:
4121 	default:
4122 		if ((be16_to_cpu(rsp->status) & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED &&
4123 		    be16_to_cpu(rsp->error) == IBMVFC_UNABLE_TO_PERFORM_REQ &&
4124 		    be16_to_cpu(rsp->fc_explain) == IBMVFC_PORT_NAME_NOT_REG)
4125 			ibmvfc_del_tgt(tgt);
4126 		else if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
4127 			level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
4128 		else
4129 			ibmvfc_del_tgt(tgt);
4130 
4131 		tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
4132 			ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
4133 			be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
4134 			ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
4135 			ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain),
4136 			status);
4137 		break;
4138 	}
4139 
4140 	kref_put(&tgt->kref, ibmvfc_release_tgt);
4141 	ibmvfc_free_event(evt);
4142 	wake_up(&vhost->work_wait_q);
4143 }
4144 
4145 /**
4146  * ibmvfc_tgt_query_target - Initiate a Query Target for specified target
4147  * @tgt:	ibmvfc target struct
4148  *
4149  **/
4150 static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt)
4151 {
4152 	struct ibmvfc_query_tgt *query_tgt;
4153 	struct ibmvfc_host *vhost = tgt->vhost;
4154 	struct ibmvfc_event *evt;
4155 
4156 	if (vhost->discovery_threads >= disc_threads)
4157 		return;
4158 
4159 	kref_get(&tgt->kref);
4160 	evt = ibmvfc_get_event(vhost);
4161 	vhost->discovery_threads++;
4162 	evt->tgt = tgt;
4163 	ibmvfc_init_event(evt, ibmvfc_tgt_query_target_done, IBMVFC_MAD_FORMAT);
4164 	query_tgt = &evt->iu.query_tgt;
4165 	memset(query_tgt, 0, sizeof(*query_tgt));
4166 	query_tgt->common.version = cpu_to_be32(1);
4167 	query_tgt->common.opcode = cpu_to_be32(IBMVFC_QUERY_TARGET);
4168 	query_tgt->common.length = cpu_to_be16(sizeof(*query_tgt));
4169 	query_tgt->wwpn = cpu_to_be64(tgt->ids.port_name);
4170 
4171 	ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
4172 	if (ibmvfc_send_event(evt, vhost, default_timeout)) {
4173 		vhost->discovery_threads--;
4174 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
4175 		kref_put(&tgt->kref, ibmvfc_release_tgt);
4176 	} else
4177 		tgt_dbg(tgt, "Sent Query Target\n");
4178 }
4179 
4180 /**
4181  * ibmvfc_alloc_target - Allocate and initialize an ibmvfc target
4182  * @vhost:		ibmvfc host struct
4183  * @scsi_id:	SCSI ID to allocate target for
4184  *
4185  * Returns:
4186  *	0 on success / other on failure
4187  **/
4188 static int ibmvfc_alloc_target(struct ibmvfc_host *vhost,
4189 			       struct ibmvfc_discover_targets_entry *target)
4190 {
4191 	struct ibmvfc_target *stgt = NULL;
4192 	struct ibmvfc_target *wtgt = NULL;
4193 	struct ibmvfc_target *tgt;
4194 	unsigned long flags;
4195 	u64 scsi_id = be32_to_cpu(target->scsi_id) & IBMVFC_DISC_TGT_SCSI_ID_MASK;
4196 	u64 wwpn = be64_to_cpu(target->wwpn);
4197 
4198 	/* Look to see if we already have a target allocated for this SCSI ID or WWPN */
4199 	spin_lock_irqsave(vhost->host->host_lock, flags);
4200 	list_for_each_entry(tgt, &vhost->targets, queue) {
4201 		if (tgt->wwpn == wwpn) {
4202 			wtgt = tgt;
4203 			break;
4204 		}
4205 	}
4206 
4207 	list_for_each_entry(tgt, &vhost->targets, queue) {
4208 		if (tgt->scsi_id == scsi_id) {
4209 			stgt = tgt;
4210 			break;
4211 		}
4212 	}
4213 
4214 	if (wtgt && !stgt) {
4215 		/*
4216 		 * A WWPN target has moved and we still are tracking the old
4217 		 * SCSI ID.  The only way we should be able to get here is if
4218 		 * we attempted to send an implicit logout for the old SCSI ID
4219 		 * and it failed for some reason, such as there being I/O
4220 		 * pending to the target. In this case, we will have already
4221 		 * deleted the rport from the FC transport so we do a move
4222 		 * login, which works even with I/O pending, as it will cancel
4223 		 * any active commands.
4224 		 */
4225 		if (wtgt->action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
4226 			/*
4227 			 * Do a move login here. The old target is no longer
4228 			 * known to the transport layer We don't use the
4229 			 * normal ibmvfc_set_tgt_action to set this, as we
4230 			 * don't normally want to allow this state change.
4231 			 */
4232 			wtgt->old_scsi_id = wtgt->scsi_id;
4233 			wtgt->scsi_id = scsi_id;
4234 			wtgt->action = IBMVFC_TGT_ACTION_INIT;
4235 			ibmvfc_init_tgt(wtgt, ibmvfc_tgt_move_login);
4236 			goto unlock_out;
4237 		} else {
4238 			tgt_err(wtgt, "Unexpected target state: %d, %p\n",
4239 				wtgt->action, wtgt->rport);
4240 		}
4241 	} else if (stgt) {
4242 		if (tgt->need_login)
4243 			ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
4244 		goto unlock_out;
4245 	}
4246 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
4247 
4248 	tgt = mempool_alloc(vhost->tgt_pool, GFP_NOIO);
4249 	memset(tgt, 0, sizeof(*tgt));
4250 	tgt->scsi_id = scsi_id;
4251 	tgt->wwpn = wwpn;
4252 	tgt->vhost = vhost;
4253 	tgt->need_login = 1;
4254 	timer_setup(&tgt->timer, ibmvfc_adisc_timeout, 0);
4255 	kref_init(&tgt->kref);
4256 	ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
4257 	spin_lock_irqsave(vhost->host->host_lock, flags);
4258 	tgt->cancel_key = vhost->task_set++;
4259 	list_add_tail(&tgt->queue, &vhost->targets);
4260 
4261 unlock_out:
4262 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
4263 	return 0;
4264 }
4265 
4266 /**
4267  * ibmvfc_alloc_targets - Allocate and initialize ibmvfc targets
4268  * @vhost:		ibmvfc host struct
4269  *
4270  * Returns:
4271  *	0 on success / other on failure
4272  **/
4273 static int ibmvfc_alloc_targets(struct ibmvfc_host *vhost)
4274 {
4275 	int i, rc;
4276 
4277 	for (i = 0, rc = 0; !rc && i < vhost->num_targets; i++)
4278 		rc = ibmvfc_alloc_target(vhost, &vhost->disc_buf[i]);
4279 
4280 	return rc;
4281 }
4282 
4283 /**
4284  * ibmvfc_discover_targets_done - Completion handler for discover targets MAD
4285  * @evt:	ibmvfc event struct
4286  *
4287  **/
4288 static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
4289 {
4290 	struct ibmvfc_host *vhost = evt->vhost;
4291 	struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets;
4292 	u32 mad_status = be16_to_cpu(rsp->common.status);
4293 	int level = IBMVFC_DEFAULT_LOG_LEVEL;
4294 
4295 	switch (mad_status) {
4296 	case IBMVFC_MAD_SUCCESS:
4297 		ibmvfc_dbg(vhost, "Discover Targets succeeded\n");
4298 		vhost->num_targets = be32_to_cpu(rsp->num_written);
4299 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS);
4300 		break;
4301 	case IBMVFC_MAD_FAILED:
4302 		level += ibmvfc_retry_host_init(vhost);
4303 		ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n",
4304 			   ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
4305 			   be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
4306 		break;
4307 	case IBMVFC_MAD_DRIVER_FAILED:
4308 		break;
4309 	default:
4310 		dev_err(vhost->dev, "Invalid Discover Targets response: 0x%x\n", mad_status);
4311 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4312 		break;
4313 	}
4314 
4315 	ibmvfc_free_event(evt);
4316 	wake_up(&vhost->work_wait_q);
4317 }
4318 
4319 /**
4320  * ibmvfc_discover_targets - Send Discover Targets MAD
4321  * @vhost:	ibmvfc host struct
4322  *
4323  **/
4324 static void ibmvfc_discover_targets(struct ibmvfc_host *vhost)
4325 {
4326 	struct ibmvfc_discover_targets *mad;
4327 	struct ibmvfc_event *evt = ibmvfc_get_event(vhost);
4328 
4329 	ibmvfc_init_event(evt, ibmvfc_discover_targets_done, IBMVFC_MAD_FORMAT);
4330 	mad = &evt->iu.discover_targets;
4331 	memset(mad, 0, sizeof(*mad));
4332 	mad->common.version = cpu_to_be32(1);
4333 	mad->common.opcode = cpu_to_be32(IBMVFC_DISC_TARGETS);
4334 	mad->common.length = cpu_to_be16(sizeof(*mad));
4335 	mad->bufflen = cpu_to_be32(vhost->disc_buf_sz);
4336 	mad->buffer.va = cpu_to_be64(vhost->disc_buf_dma);
4337 	mad->buffer.len = cpu_to_be32(vhost->disc_buf_sz);
4338 	mad->flags = cpu_to_be32(IBMVFC_DISC_TGT_PORT_ID_WWPN_LIST);
4339 	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
4340 
4341 	if (!ibmvfc_send_event(evt, vhost, default_timeout))
4342 		ibmvfc_dbg(vhost, "Sent discover targets\n");
4343 	else
4344 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4345 }
4346 
4347 /**
4348  * ibmvfc_npiv_login_done - Completion handler for NPIV Login
4349  * @evt:	ibmvfc event struct
4350  *
4351  **/
4352 static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
4353 {
4354 	struct ibmvfc_host *vhost = evt->vhost;
4355 	u32 mad_status = be16_to_cpu(evt->xfer_iu->npiv_login.common.status);
4356 	struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp;
4357 	unsigned int npiv_max_sectors;
4358 	int level = IBMVFC_DEFAULT_LOG_LEVEL;
4359 
4360 	switch (mad_status) {
4361 	case IBMVFC_MAD_SUCCESS:
4362 		ibmvfc_free_event(evt);
4363 		break;
4364 	case IBMVFC_MAD_FAILED:
4365 		if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
4366 			level += ibmvfc_retry_host_init(vhost);
4367 		else
4368 			ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4369 		ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n",
4370 			   ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
4371 						be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
4372 		ibmvfc_free_event(evt);
4373 		return;
4374 	case IBMVFC_MAD_CRQ_ERROR:
4375 		ibmvfc_retry_host_init(vhost);
4376 		fallthrough;
4377 	case IBMVFC_MAD_DRIVER_FAILED:
4378 		ibmvfc_free_event(evt);
4379 		return;
4380 	default:
4381 		dev_err(vhost->dev, "Invalid NPIV Login response: 0x%x\n", mad_status);
4382 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4383 		ibmvfc_free_event(evt);
4384 		return;
4385 	}
4386 
4387 	vhost->client_migrated = 0;
4388 
4389 	if (!(be32_to_cpu(rsp->flags) & IBMVFC_NATIVE_FC)) {
4390 		dev_err(vhost->dev, "Virtual adapter does not support FC. %x\n",
4391 			rsp->flags);
4392 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4393 		wake_up(&vhost->work_wait_q);
4394 		return;
4395 	}
4396 
4397 	if (be32_to_cpu(rsp->max_cmds) <= IBMVFC_NUM_INTERNAL_REQ) {
4398 		dev_err(vhost->dev, "Virtual adapter supported queue depth too small: %d\n",
4399 			rsp->max_cmds);
4400 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4401 		wake_up(&vhost->work_wait_q);
4402 		return;
4403 	}
4404 
4405 	vhost->logged_in = 1;
4406 	npiv_max_sectors = min((uint)(be64_to_cpu(rsp->max_dma_len) >> 9), IBMVFC_MAX_SECTORS);
4407 	dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n",
4408 		 rsp->partition_name, rsp->device_name, rsp->port_loc_code,
4409 		 rsp->drc_name, npiv_max_sectors);
4410 
4411 	fc_host_fabric_name(vhost->host) = be64_to_cpu(rsp->node_name);
4412 	fc_host_node_name(vhost->host) = be64_to_cpu(rsp->node_name);
4413 	fc_host_port_name(vhost->host) = be64_to_cpu(rsp->port_name);
4414 	fc_host_port_id(vhost->host) = be64_to_cpu(rsp->scsi_id);
4415 	fc_host_port_type(vhost->host) = FC_PORTTYPE_NPIV;
4416 	fc_host_supported_classes(vhost->host) = 0;
4417 	if (be32_to_cpu(rsp->service_parms.class1_parms[0]) & 0x80000000)
4418 		fc_host_supported_classes(vhost->host) |= FC_COS_CLASS1;
4419 	if (be32_to_cpu(rsp->service_parms.class2_parms[0]) & 0x80000000)
4420 		fc_host_supported_classes(vhost->host) |= FC_COS_CLASS2;
4421 	if (be32_to_cpu(rsp->service_parms.class3_parms[0]) & 0x80000000)
4422 		fc_host_supported_classes(vhost->host) |= FC_COS_CLASS3;
4423 	fc_host_maxframe_size(vhost->host) =
4424 		be16_to_cpu(rsp->service_parms.common.bb_rcv_sz) & 0x0fff;
4425 
4426 	vhost->host->can_queue = be32_to_cpu(rsp->max_cmds) - IBMVFC_NUM_INTERNAL_REQ;
4427 	vhost->host->max_sectors = npiv_max_sectors;
4428 	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
4429 	wake_up(&vhost->work_wait_q);
4430 }
4431 
4432 /**
4433  * ibmvfc_npiv_login - Sends NPIV login
4434  * @vhost:	ibmvfc host struct
4435  *
4436  **/
4437 static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
4438 {
4439 	struct ibmvfc_npiv_login_mad *mad;
4440 	struct ibmvfc_event *evt = ibmvfc_get_event(vhost);
4441 
4442 	ibmvfc_gather_partition_info(vhost);
4443 	ibmvfc_set_login_info(vhost);
4444 	ibmvfc_init_event(evt, ibmvfc_npiv_login_done, IBMVFC_MAD_FORMAT);
4445 
4446 	memcpy(vhost->login_buf, &vhost->login_info, sizeof(vhost->login_info));
4447 	mad = &evt->iu.npiv_login;
4448 	memset(mad, 0, sizeof(struct ibmvfc_npiv_login_mad));
4449 	mad->common.version = cpu_to_be32(1);
4450 	mad->common.opcode = cpu_to_be32(IBMVFC_NPIV_LOGIN);
4451 	mad->common.length = cpu_to_be16(sizeof(struct ibmvfc_npiv_login_mad));
4452 	mad->buffer.va = cpu_to_be64(vhost->login_buf_dma);
4453 	mad->buffer.len = cpu_to_be32(sizeof(*vhost->login_buf));
4454 
4455 	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
4456 
4457 	if (!ibmvfc_send_event(evt, vhost, default_timeout))
4458 		ibmvfc_dbg(vhost, "Sent NPIV login\n");
4459 	else
4460 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4461 }
4462 
4463 /**
4464  * ibmvfc_npiv_logout_done - Completion handler for NPIV Logout
4465  * @vhost:		ibmvfc host struct
4466  *
4467  **/
4468 static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt)
4469 {
4470 	struct ibmvfc_host *vhost = evt->vhost;
4471 	u32 mad_status = be16_to_cpu(evt->xfer_iu->npiv_logout.common.status);
4472 
4473 	ibmvfc_free_event(evt);
4474 
4475 	switch (mad_status) {
4476 	case IBMVFC_MAD_SUCCESS:
4477 		if (list_empty(&vhost->sent) &&
4478 		    vhost->action == IBMVFC_HOST_ACTION_LOGO_WAIT) {
4479 			ibmvfc_init_host(vhost);
4480 			return;
4481 		}
4482 		break;
4483 	case IBMVFC_MAD_FAILED:
4484 	case IBMVFC_MAD_NOT_SUPPORTED:
4485 	case IBMVFC_MAD_CRQ_ERROR:
4486 	case IBMVFC_MAD_DRIVER_FAILED:
4487 	default:
4488 		ibmvfc_dbg(vhost, "NPIV Logout failed. 0x%X\n", mad_status);
4489 		break;
4490 	}
4491 
4492 	ibmvfc_hard_reset_host(vhost);
4493 }
4494 
4495 /**
4496  * ibmvfc_npiv_logout - Issue an NPIV Logout
4497  * @vhost:		ibmvfc host struct
4498  *
4499  **/
4500 static void ibmvfc_npiv_logout(struct ibmvfc_host *vhost)
4501 {
4502 	struct ibmvfc_npiv_logout_mad *mad;
4503 	struct ibmvfc_event *evt;
4504 
4505 	evt = ibmvfc_get_event(vhost);
4506 	ibmvfc_init_event(evt, ibmvfc_npiv_logout_done, IBMVFC_MAD_FORMAT);
4507 
4508 	mad = &evt->iu.npiv_logout;
4509 	memset(mad, 0, sizeof(*mad));
4510 	mad->common.version = cpu_to_be32(1);
4511 	mad->common.opcode = cpu_to_be32(IBMVFC_NPIV_LOGOUT);
4512 	mad->common.length = cpu_to_be16(sizeof(struct ibmvfc_npiv_logout_mad));
4513 
4514 	ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO_WAIT);
4515 
4516 	if (!ibmvfc_send_event(evt, vhost, default_timeout))
4517 		ibmvfc_dbg(vhost, "Sent NPIV logout\n");
4518 	else
4519 		ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4520 }
4521 
4522 /**
4523  * ibmvfc_dev_init_to_do - Is there target initialization work to do?
4524  * @vhost:		ibmvfc host struct
4525  *
4526  * Returns:
4527  *	1 if work to do / 0 if not
4528  **/
4529 static int ibmvfc_dev_init_to_do(struct ibmvfc_host *vhost)
4530 {
4531 	struct ibmvfc_target *tgt;
4532 
4533 	list_for_each_entry(tgt, &vhost->targets, queue) {
4534 		if (tgt->action == IBMVFC_TGT_ACTION_INIT ||
4535 		    tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
4536 			return 1;
4537 	}
4538 
4539 	return 0;
4540 }
4541 
4542 /**
4543  * ibmvfc_dev_logo_to_do - Is there target logout work to do?
4544  * @vhost:		ibmvfc host struct
4545  *
4546  * Returns:
4547  *	1 if work to do / 0 if not
4548  **/
4549 static int ibmvfc_dev_logo_to_do(struct ibmvfc_host *vhost)
4550 {
4551 	struct ibmvfc_target *tgt;
4552 
4553 	list_for_each_entry(tgt, &vhost->targets, queue) {
4554 		if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT ||
4555 		    tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT)
4556 			return 1;
4557 	}
4558 	return 0;
4559 }
4560 
4561 /**
4562  * __ibmvfc_work_to_do - Is there task level work to do? (no locking)
4563  * @vhost:		ibmvfc host struct
4564  *
4565  * Returns:
4566  *	1 if work to do / 0 if not
4567  **/
4568 static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
4569 {
4570 	struct ibmvfc_target *tgt;
4571 
4572 	if (kthread_should_stop())
4573 		return 1;
4574 	switch (vhost->action) {
4575 	case IBMVFC_HOST_ACTION_NONE:
4576 	case IBMVFC_HOST_ACTION_INIT_WAIT:
4577 	case IBMVFC_HOST_ACTION_LOGO_WAIT:
4578 		return 0;
4579 	case IBMVFC_HOST_ACTION_TGT_INIT:
4580 	case IBMVFC_HOST_ACTION_QUERY_TGTS:
4581 		if (vhost->discovery_threads == disc_threads)
4582 			return 0;
4583 		list_for_each_entry(tgt, &vhost->targets, queue)
4584 			if (tgt->action == IBMVFC_TGT_ACTION_INIT)
4585 				return 1;
4586 		list_for_each_entry(tgt, &vhost->targets, queue)
4587 			if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
4588 				return 0;
4589 		return 1;
4590 	case IBMVFC_HOST_ACTION_TGT_DEL:
4591 	case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
4592 		if (vhost->discovery_threads == disc_threads)
4593 			return 0;
4594 		list_for_each_entry(tgt, &vhost->targets, queue)
4595 			if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT)
4596 				return 1;
4597 		list_for_each_entry(tgt, &vhost->targets, queue)
4598 			if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT)
4599 				return 0;
4600 		return 1;
4601 	case IBMVFC_HOST_ACTION_LOGO:
4602 	case IBMVFC_HOST_ACTION_INIT:
4603 	case IBMVFC_HOST_ACTION_ALLOC_TGTS:
4604 	case IBMVFC_HOST_ACTION_QUERY:
4605 	case IBMVFC_HOST_ACTION_RESET:
4606 	case IBMVFC_HOST_ACTION_REENABLE:
4607 	default:
4608 		break;
4609 	}
4610 
4611 	return 1;
4612 }
4613 
4614 /**
4615  * ibmvfc_work_to_do - Is there task level work to do?
4616  * @vhost:		ibmvfc host struct
4617  *
4618  * Returns:
4619  *	1 if work to do / 0 if not
4620  **/
4621 static int ibmvfc_work_to_do(struct ibmvfc_host *vhost)
4622 {
4623 	unsigned long flags;
4624 	int rc;
4625 
4626 	spin_lock_irqsave(vhost->host->host_lock, flags);
4627 	rc = __ibmvfc_work_to_do(vhost);
4628 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
4629 	return rc;
4630 }
4631 
4632 /**
4633  * ibmvfc_log_ae - Log async events if necessary
4634  * @vhost:		ibmvfc host struct
4635  * @events:		events to log
4636  *
4637  **/
4638 static void ibmvfc_log_ae(struct ibmvfc_host *vhost, int events)
4639 {
4640 	if (events & IBMVFC_AE_RSCN)
4641 		fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_RSCN, 0);
4642 	if ((events & IBMVFC_AE_LINKDOWN) &&
4643 	    vhost->state >= IBMVFC_HALTED)
4644 		fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
4645 	if ((events & IBMVFC_AE_LINKUP) &&
4646 	    vhost->state == IBMVFC_INITIALIZING)
4647 		fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKUP, 0);
4648 }
4649 
4650 /**
4651  * ibmvfc_tgt_add_rport - Tell the FC transport about a new remote port
4652  * @tgt:		ibmvfc target struct
4653  *
4654  **/
4655 static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
4656 {
4657 	struct ibmvfc_host *vhost = tgt->vhost;
4658 	struct fc_rport *rport;
4659 	unsigned long flags;
4660 
4661 	tgt_dbg(tgt, "Adding rport\n");
4662 	rport = fc_remote_port_add(vhost->host, 0, &tgt->ids);
4663 	spin_lock_irqsave(vhost->host->host_lock, flags);
4664 
4665 	if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
4666 		tgt_dbg(tgt, "Deleting rport\n");
4667 		list_del(&tgt->queue);
4668 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT);
4669 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
4670 		fc_remote_port_delete(rport);
4671 		del_timer_sync(&tgt->timer);
4672 		kref_put(&tgt->kref, ibmvfc_release_tgt);
4673 		return;
4674 	} else if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
4675 		tgt_dbg(tgt, "Deleting rport with outstanding I/O\n");
4676 		ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT);
4677 		tgt->rport = NULL;
4678 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
4679 		fc_remote_port_delete(rport);
4680 		return;
4681 	} else if (rport && tgt->action == IBMVFC_TGT_ACTION_DELETED_RPORT) {
4682 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
4683 		return;
4684 	}
4685 
4686 	if (rport) {
4687 		tgt_dbg(tgt, "rport add succeeded\n");
4688 		tgt->rport = rport;
4689 		rport->maxframe_size = be16_to_cpu(tgt->service_parms.common.bb_rcv_sz) & 0x0fff;
4690 		rport->supported_classes = 0;
4691 		tgt->target_id = rport->scsi_target_id;
4692 		if (be32_to_cpu(tgt->service_parms.class1_parms[0]) & 0x80000000)
4693 			rport->supported_classes |= FC_COS_CLASS1;
4694 		if (be32_to_cpu(tgt->service_parms.class2_parms[0]) & 0x80000000)
4695 			rport->supported_classes |= FC_COS_CLASS2;
4696 		if (be32_to_cpu(tgt->service_parms.class3_parms[0]) & 0x80000000)
4697 			rport->supported_classes |= FC_COS_CLASS3;
4698 		if (rport->rqst_q)
4699 			blk_queue_max_segments(rport->rqst_q, 1);
4700 	} else
4701 		tgt_dbg(tgt, "rport add failed\n");
4702 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
4703 }
4704 
4705 /**
4706  * ibmvfc_do_work - Do task level work
4707  * @vhost:		ibmvfc host struct
4708  *
4709  **/
4710 static void ibmvfc_do_work(struct ibmvfc_host *vhost)
4711 {
4712 	struct ibmvfc_target *tgt;
4713 	unsigned long flags;
4714 	struct fc_rport *rport;
4715 	int rc;
4716 
4717 	ibmvfc_log_ae(vhost, vhost->events_to_log);
4718 	spin_lock_irqsave(vhost->host->host_lock, flags);
4719 	vhost->events_to_log = 0;
4720 	switch (vhost->action) {
4721 	case IBMVFC_HOST_ACTION_NONE:
4722 	case IBMVFC_HOST_ACTION_LOGO_WAIT:
4723 	case IBMVFC_HOST_ACTION_INIT_WAIT:
4724 		break;
4725 	case IBMVFC_HOST_ACTION_RESET:
4726 		vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
4727 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
4728 		rc = ibmvfc_reset_crq(vhost);
4729 		spin_lock_irqsave(vhost->host->host_lock, flags);
4730 		if (rc == H_CLOSED)
4731 			vio_enable_interrupts(to_vio_dev(vhost->dev));
4732 		if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
4733 		    (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
4734 			ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4735 			dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
4736 		}
4737 		break;
4738 	case IBMVFC_HOST_ACTION_REENABLE:
4739 		vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
4740 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
4741 		rc = ibmvfc_reenable_crq_queue(vhost);
4742 		spin_lock_irqsave(vhost->host->host_lock, flags);
4743 		if (rc || (rc = ibmvfc_send_crq_init(vhost))) {
4744 			ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4745 			dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc);
4746 		}
4747 		break;
4748 	case IBMVFC_HOST_ACTION_LOGO:
4749 		vhost->job_step(vhost);
4750 		break;
4751 	case IBMVFC_HOST_ACTION_INIT:
4752 		BUG_ON(vhost->state != IBMVFC_INITIALIZING);
4753 		if (vhost->delay_init) {
4754 			vhost->delay_init = 0;
4755 			spin_unlock_irqrestore(vhost->host->host_lock, flags);
4756 			ssleep(15);
4757 			return;
4758 		} else
4759 			vhost->job_step(vhost);
4760 		break;
4761 	case IBMVFC_HOST_ACTION_QUERY:
4762 		list_for_each_entry(tgt, &vhost->targets, queue)
4763 			ibmvfc_init_tgt(tgt, ibmvfc_tgt_query_target);
4764 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY_TGTS);
4765 		break;
4766 	case IBMVFC_HOST_ACTION_QUERY_TGTS:
4767 		list_for_each_entry(tgt, &vhost->targets, queue) {
4768 			if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
4769 				tgt->job_step(tgt);
4770 				break;
4771 			}
4772 		}
4773 
4774 		if (!ibmvfc_dev_init_to_do(vhost))
4775 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
4776 		break;
4777 	case IBMVFC_HOST_ACTION_TGT_DEL:
4778 	case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
4779 		list_for_each_entry(tgt, &vhost->targets, queue) {
4780 			if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT) {
4781 				tgt->job_step(tgt);
4782 				break;
4783 			}
4784 		}
4785 
4786 		if (ibmvfc_dev_logo_to_do(vhost)) {
4787 			spin_unlock_irqrestore(vhost->host->host_lock, flags);
4788 			return;
4789 		}
4790 
4791 		list_for_each_entry(tgt, &vhost->targets, queue) {
4792 			if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
4793 				tgt_dbg(tgt, "Deleting rport\n");
4794 				rport = tgt->rport;
4795 				tgt->rport = NULL;
4796 				list_del(&tgt->queue);
4797 				ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT);
4798 				spin_unlock_irqrestore(vhost->host->host_lock, flags);
4799 				if (rport)
4800 					fc_remote_port_delete(rport);
4801 				del_timer_sync(&tgt->timer);
4802 				kref_put(&tgt->kref, ibmvfc_release_tgt);
4803 				return;
4804 			} else if (tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
4805 				tgt_dbg(tgt, "Deleting rport with I/O outstanding\n");
4806 				rport = tgt->rport;
4807 				tgt->rport = NULL;
4808 				ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT);
4809 				spin_unlock_irqrestore(vhost->host->host_lock, flags);
4810 				if (rport)
4811 					fc_remote_port_delete(rport);
4812 				return;
4813 			}
4814 		}
4815 
4816 		if (vhost->state == IBMVFC_INITIALIZING) {
4817 			if (vhost->action == IBMVFC_HOST_ACTION_TGT_DEL_FAILED) {
4818 				if (vhost->reinit) {
4819 					vhost->reinit = 0;
4820 					scsi_block_requests(vhost->host);
4821 					ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
4822 					spin_unlock_irqrestore(vhost->host->host_lock, flags);
4823 				} else {
4824 					ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE);
4825 					ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
4826 					wake_up(&vhost->init_wait_q);
4827 					schedule_work(&vhost->rport_add_work_q);
4828 					vhost->init_retries = 0;
4829 					spin_unlock_irqrestore(vhost->host->host_lock, flags);
4830 					scsi_unblock_requests(vhost->host);
4831 				}
4832 
4833 				return;
4834 			} else {
4835 				ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
4836 				vhost->job_step = ibmvfc_discover_targets;
4837 			}
4838 		} else {
4839 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
4840 			spin_unlock_irqrestore(vhost->host->host_lock, flags);
4841 			scsi_unblock_requests(vhost->host);
4842 			wake_up(&vhost->init_wait_q);
4843 			return;
4844 		}
4845 		break;
4846 	case IBMVFC_HOST_ACTION_ALLOC_TGTS:
4847 		ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_INIT);
4848 		spin_unlock_irqrestore(vhost->host->host_lock, flags);
4849 		ibmvfc_alloc_targets(vhost);
4850 		spin_lock_irqsave(vhost->host->host_lock, flags);
4851 		break;
4852 	case IBMVFC_HOST_ACTION_TGT_INIT:
4853 		list_for_each_entry(tgt, &vhost->targets, queue) {
4854 			if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
4855 				tgt->job_step(tgt);
4856 				break;
4857 			}
4858 		}
4859 
4860 		if (!ibmvfc_dev_init_to_do(vhost))
4861 			ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL_FAILED);
4862 		break;
4863 	default:
4864 		break;
4865 	}
4866 
4867 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
4868 }
4869 
4870 /**
4871  * ibmvfc_work - Do task level work
4872  * @data:		ibmvfc host struct
4873  *
4874  * Returns:
4875  *	zero
4876  **/
4877 static int ibmvfc_work(void *data)
4878 {
4879 	struct ibmvfc_host *vhost = data;
4880 	int rc;
4881 
4882 	set_user_nice(current, MIN_NICE);
4883 
4884 	while (1) {
4885 		rc = wait_event_interruptible(vhost->work_wait_q,
4886 					      ibmvfc_work_to_do(vhost));
4887 
4888 		BUG_ON(rc);
4889 
4890 		if (kthread_should_stop())
4891 			break;
4892 
4893 		ibmvfc_do_work(vhost);
4894 	}
4895 
4896 	ibmvfc_dbg(vhost, "ibmvfc kthread exiting...\n");
4897 	return 0;
4898 }
4899 
4900 /**
4901  * ibmvfc_init_crq - Initializes and registers CRQ with hypervisor
4902  * @vhost:	ibmvfc host struct
4903  *
4904  * Allocates a page for messages, maps it for dma, and registers
4905  * the crq with the hypervisor.
4906  *
4907  * Return value:
4908  *	zero on success / other on failure
4909  **/
4910 static int ibmvfc_init_crq(struct ibmvfc_host *vhost)
4911 {
4912 	int rc, retrc = -ENOMEM;
4913 	struct device *dev = vhost->dev;
4914 	struct vio_dev *vdev = to_vio_dev(dev);
4915 	struct ibmvfc_crq_queue *crq = &vhost->crq;
4916 
4917 	ENTER;
4918 	crq->msgs = (struct ibmvfc_crq *)get_zeroed_page(GFP_KERNEL);
4919 
4920 	if (!crq->msgs)
4921 		return -ENOMEM;
4922 
4923 	crq->size = PAGE_SIZE / sizeof(*crq->msgs);
4924 	crq->msg_token = dma_map_single(dev, crq->msgs,
4925 					PAGE_SIZE, DMA_BIDIRECTIONAL);
4926 
4927 	if (dma_mapping_error(dev, crq->msg_token))
4928 		goto map_failed;
4929 
4930 	retrc = rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
4931 					crq->msg_token, PAGE_SIZE);
4932 
4933 	if (rc == H_RESOURCE)
4934 		/* maybe kexecing and resource is busy. try a reset */
4935 		retrc = rc = ibmvfc_reset_crq(vhost);
4936 
4937 	if (rc == H_CLOSED)
4938 		dev_warn(dev, "Partner adapter not ready\n");
4939 	else if (rc) {
4940 		dev_warn(dev, "Error %d opening adapter\n", rc);
4941 		goto reg_crq_failed;
4942 	}
4943 
4944 	retrc = 0;
4945 
4946 	tasklet_init(&vhost->tasklet, (void *)ibmvfc_tasklet, (unsigned long)vhost);
4947 
4948 	if ((rc = request_irq(vdev->irq, ibmvfc_interrupt, 0, IBMVFC_NAME, vhost))) {
4949 		dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n", vdev->irq, rc);
4950 		goto req_irq_failed;
4951 	}
4952 
4953 	if ((rc = vio_enable_interrupts(vdev))) {
4954 		dev_err(dev, "Error %d enabling interrupts\n", rc);
4955 		goto req_irq_failed;
4956 	}
4957 
4958 	crq->cur = 0;
4959 	LEAVE;
4960 	return retrc;
4961 
4962 req_irq_failed:
4963 	tasklet_kill(&vhost->tasklet);
4964 	do {
4965 		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4966 	} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4967 reg_crq_failed:
4968 	dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
4969 map_failed:
4970 	free_page((unsigned long)crq->msgs);
4971 	return retrc;
4972 }
4973 
4974 /**
4975  * ibmvfc_free_mem - Free memory for vhost
4976  * @vhost:	ibmvfc host struct
4977  *
4978  * Return value:
4979  * 	none
4980  **/
4981 static void ibmvfc_free_mem(struct ibmvfc_host *vhost)
4982 {
4983 	struct ibmvfc_async_crq_queue *async_q = &vhost->async_crq;
4984 
4985 	ENTER;
4986 	mempool_destroy(vhost->tgt_pool);
4987 	kfree(vhost->trace);
4988 	dma_free_coherent(vhost->dev, vhost->disc_buf_sz, vhost->disc_buf,
4989 			  vhost->disc_buf_dma);
4990 	dma_free_coherent(vhost->dev, sizeof(*vhost->login_buf),
4991 			  vhost->login_buf, vhost->login_buf_dma);
4992 	dma_pool_destroy(vhost->sg_pool);
4993 	dma_unmap_single(vhost->dev, async_q->msg_token,
4994 			 async_q->size * sizeof(*async_q->msgs), DMA_BIDIRECTIONAL);
4995 	free_page((unsigned long)async_q->msgs);
4996 	LEAVE;
4997 }
4998 
4999 /**
5000  * ibmvfc_alloc_mem - Allocate memory for vhost
5001  * @vhost:	ibmvfc host struct
5002  *
5003  * Return value:
5004  * 	0 on success / non-zero on failure
5005  **/
5006 static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost)
5007 {
5008 	struct ibmvfc_async_crq_queue *async_q = &vhost->async_crq;
5009 	struct device *dev = vhost->dev;
5010 
5011 	ENTER;
5012 	async_q->msgs = (struct ibmvfc_async_crq *)get_zeroed_page(GFP_KERNEL);
5013 	if (!async_q->msgs) {
5014 		dev_err(dev, "Couldn't allocate async queue.\n");
5015 		goto nomem;
5016 	}
5017 
5018 	async_q->size = PAGE_SIZE / sizeof(struct ibmvfc_async_crq);
5019 	async_q->msg_token = dma_map_single(dev, async_q->msgs,
5020 					    async_q->size * sizeof(*async_q->msgs),
5021 					    DMA_BIDIRECTIONAL);
5022 
5023 	if (dma_mapping_error(dev, async_q->msg_token)) {
5024 		dev_err(dev, "Failed to map async queue\n");
5025 		goto free_async_crq;
5026 	}
5027 
5028 	vhost->sg_pool = dma_pool_create(IBMVFC_NAME, dev,
5029 					 SG_ALL * sizeof(struct srp_direct_buf),
5030 					 sizeof(struct srp_direct_buf), 0);
5031 
5032 	if (!vhost->sg_pool) {
5033 		dev_err(dev, "Failed to allocate sg pool\n");
5034 		goto unmap_async_crq;
5035 	}
5036 
5037 	vhost->login_buf = dma_alloc_coherent(dev, sizeof(*vhost->login_buf),
5038 					      &vhost->login_buf_dma, GFP_KERNEL);
5039 
5040 	if (!vhost->login_buf) {
5041 		dev_err(dev, "Couldn't allocate NPIV login buffer\n");
5042 		goto free_sg_pool;
5043 	}
5044 
5045 	vhost->disc_buf_sz = sizeof(*vhost->disc_buf) * max_targets;
5046 	vhost->disc_buf = dma_alloc_coherent(dev, vhost->disc_buf_sz,
5047 					     &vhost->disc_buf_dma, GFP_KERNEL);
5048 
5049 	if (!vhost->disc_buf) {
5050 		dev_err(dev, "Couldn't allocate Discover Targets buffer\n");
5051 		goto free_login_buffer;
5052 	}
5053 
5054 	vhost->trace = kcalloc(IBMVFC_NUM_TRACE_ENTRIES,
5055 			       sizeof(struct ibmvfc_trace_entry), GFP_KERNEL);
5056 
5057 	if (!vhost->trace)
5058 		goto free_disc_buffer;
5059 
5060 	vhost->tgt_pool = mempool_create_kmalloc_pool(IBMVFC_TGT_MEMPOOL_SZ,
5061 						      sizeof(struct ibmvfc_target));
5062 
5063 	if (!vhost->tgt_pool) {
5064 		dev_err(dev, "Couldn't allocate target memory pool\n");
5065 		goto free_trace;
5066 	}
5067 
5068 	LEAVE;
5069 	return 0;
5070 
5071 free_trace:
5072 	kfree(vhost->trace);
5073 free_disc_buffer:
5074 	dma_free_coherent(dev, vhost->disc_buf_sz, vhost->disc_buf,
5075 			  vhost->disc_buf_dma);
5076 free_login_buffer:
5077 	dma_free_coherent(dev, sizeof(*vhost->login_buf),
5078 			  vhost->login_buf, vhost->login_buf_dma);
5079 free_sg_pool:
5080 	dma_pool_destroy(vhost->sg_pool);
5081 unmap_async_crq:
5082 	dma_unmap_single(dev, async_q->msg_token,
5083 			 async_q->size * sizeof(*async_q->msgs), DMA_BIDIRECTIONAL);
5084 free_async_crq:
5085 	free_page((unsigned long)async_q->msgs);
5086 nomem:
5087 	LEAVE;
5088 	return -ENOMEM;
5089 }
5090 
5091 /**
5092  * ibmvfc_rport_add_thread - Worker thread for rport adds
5093  * @work:	work struct
5094  *
5095  **/
5096 static void ibmvfc_rport_add_thread(struct work_struct *work)
5097 {
5098 	struct ibmvfc_host *vhost = container_of(work, struct ibmvfc_host,
5099 						 rport_add_work_q);
5100 	struct ibmvfc_target *tgt;
5101 	struct fc_rport *rport;
5102 	unsigned long flags;
5103 	int did_work;
5104 
5105 	ENTER;
5106 	spin_lock_irqsave(vhost->host->host_lock, flags);
5107 	do {
5108 		did_work = 0;
5109 		if (vhost->state != IBMVFC_ACTIVE)
5110 			break;
5111 
5112 		list_for_each_entry(tgt, &vhost->targets, queue) {
5113 			if (tgt->add_rport) {
5114 				did_work = 1;
5115 				tgt->add_rport = 0;
5116 				kref_get(&tgt->kref);
5117 				rport = tgt->rport;
5118 				if (!rport) {
5119 					spin_unlock_irqrestore(vhost->host->host_lock, flags);
5120 					ibmvfc_tgt_add_rport(tgt);
5121 				} else if (get_device(&rport->dev)) {
5122 					spin_unlock_irqrestore(vhost->host->host_lock, flags);
5123 					tgt_dbg(tgt, "Setting rport roles\n");
5124 					fc_remote_port_rolechg(rport, tgt->ids.roles);
5125 					put_device(&rport->dev);
5126 				} else {
5127 					spin_unlock_irqrestore(vhost->host->host_lock, flags);
5128 				}
5129 
5130 				kref_put(&tgt->kref, ibmvfc_release_tgt);
5131 				spin_lock_irqsave(vhost->host->host_lock, flags);
5132 				break;
5133 			}
5134 		}
5135 	} while(did_work);
5136 
5137 	if (vhost->state == IBMVFC_ACTIVE)
5138 		vhost->scan_complete = 1;
5139 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
5140 	LEAVE;
5141 }
5142 
5143 /**
5144  * ibmvfc_probe - Adapter hot plug add entry point
5145  * @vdev:	vio device struct
5146  * @id:	vio device id struct
5147  *
5148  * Return value:
5149  * 	0 on success / non-zero on failure
5150  **/
5151 static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
5152 {
5153 	struct ibmvfc_host *vhost;
5154 	struct Scsi_Host *shost;
5155 	struct device *dev = &vdev->dev;
5156 	int rc = -ENOMEM;
5157 
5158 	ENTER;
5159 	shost = scsi_host_alloc(&driver_template, sizeof(*vhost));
5160 	if (!shost) {
5161 		dev_err(dev, "Couldn't allocate host data\n");
5162 		goto out;
5163 	}
5164 
5165 	shost->transportt = ibmvfc_transport_template;
5166 	shost->can_queue = max_requests;
5167 	shost->max_lun = max_lun;
5168 	shost->max_id = max_targets;
5169 	shost->max_sectors = IBMVFC_MAX_SECTORS;
5170 	shost->max_cmd_len = IBMVFC_MAX_CDB_LEN;
5171 	shost->unique_id = shost->host_no;
5172 
5173 	vhost = shost_priv(shost);
5174 	INIT_LIST_HEAD(&vhost->sent);
5175 	INIT_LIST_HEAD(&vhost->free);
5176 	INIT_LIST_HEAD(&vhost->targets);
5177 	sprintf(vhost->name, IBMVFC_NAME);
5178 	vhost->host = shost;
5179 	vhost->dev = dev;
5180 	vhost->partition_number = -1;
5181 	vhost->log_level = log_level;
5182 	vhost->task_set = 1;
5183 	strcpy(vhost->partition_name, "UNKNOWN");
5184 	init_waitqueue_head(&vhost->work_wait_q);
5185 	init_waitqueue_head(&vhost->init_wait_q);
5186 	INIT_WORK(&vhost->rport_add_work_q, ibmvfc_rport_add_thread);
5187 	mutex_init(&vhost->passthru_mutex);
5188 
5189 	if ((rc = ibmvfc_alloc_mem(vhost)))
5190 		goto free_scsi_host;
5191 
5192 	vhost->work_thread = kthread_run(ibmvfc_work, vhost, "%s_%d", IBMVFC_NAME,
5193 					 shost->host_no);
5194 
5195 	if (IS_ERR(vhost->work_thread)) {
5196 		dev_err(dev, "Couldn't create kernel thread: %ld\n",
5197 			PTR_ERR(vhost->work_thread));
5198 		rc = PTR_ERR(vhost->work_thread);
5199 		goto free_host_mem;
5200 	}
5201 
5202 	if ((rc = ibmvfc_init_crq(vhost))) {
5203 		dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
5204 		goto kill_kthread;
5205 	}
5206 
5207 	if ((rc = ibmvfc_init_event_pool(vhost))) {
5208 		dev_err(dev, "Couldn't initialize event pool. rc=%d\n", rc);
5209 		goto release_crq;
5210 	}
5211 
5212 	if ((rc = scsi_add_host(shost, dev)))
5213 		goto release_event_pool;
5214 
5215 	fc_host_dev_loss_tmo(shost) = IBMVFC_DEV_LOSS_TMO;
5216 
5217 	if ((rc = ibmvfc_create_trace_file(&shost->shost_dev.kobj,
5218 					   &ibmvfc_trace_attr))) {
5219 		dev_err(dev, "Failed to create trace file. rc=%d\n", rc);
5220 		goto remove_shost;
5221 	}
5222 
5223 	if (shost_to_fc_host(shost)->rqst_q)
5224 		blk_queue_max_segments(shost_to_fc_host(shost)->rqst_q, 1);
5225 	dev_set_drvdata(dev, vhost);
5226 	spin_lock(&ibmvfc_driver_lock);
5227 	list_add_tail(&vhost->queue, &ibmvfc_head);
5228 	spin_unlock(&ibmvfc_driver_lock);
5229 
5230 	ibmvfc_send_crq_init(vhost);
5231 	scsi_scan_host(shost);
5232 	return 0;
5233 
5234 remove_shost:
5235 	scsi_remove_host(shost);
5236 release_event_pool:
5237 	ibmvfc_free_event_pool(vhost);
5238 release_crq:
5239 	ibmvfc_release_crq_queue(vhost);
5240 kill_kthread:
5241 	kthread_stop(vhost->work_thread);
5242 free_host_mem:
5243 	ibmvfc_free_mem(vhost);
5244 free_scsi_host:
5245 	scsi_host_put(shost);
5246 out:
5247 	LEAVE;
5248 	return rc;
5249 }
5250 
5251 /**
5252  * ibmvfc_remove - Adapter hot plug remove entry point
5253  * @vdev:	vio device struct
5254  *
5255  * Return value:
5256  * 	0
5257  **/
5258 static int ibmvfc_remove(struct vio_dev *vdev)
5259 {
5260 	struct ibmvfc_host *vhost = dev_get_drvdata(&vdev->dev);
5261 	unsigned long flags;
5262 
5263 	ENTER;
5264 	ibmvfc_remove_trace_file(&vhost->host->shost_dev.kobj, &ibmvfc_trace_attr);
5265 
5266 	spin_lock_irqsave(vhost->host->host_lock, flags);
5267 	ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
5268 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
5269 
5270 	ibmvfc_wait_while_resetting(vhost);
5271 	ibmvfc_release_crq_queue(vhost);
5272 	kthread_stop(vhost->work_thread);
5273 	fc_remove_host(vhost->host);
5274 	scsi_remove_host(vhost->host);
5275 
5276 	spin_lock_irqsave(vhost->host->host_lock, flags);
5277 	ibmvfc_purge_requests(vhost, DID_ERROR);
5278 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
5279 	ibmvfc_free_event_pool(vhost);
5280 
5281 	ibmvfc_free_mem(vhost);
5282 	spin_lock(&ibmvfc_driver_lock);
5283 	list_del(&vhost->queue);
5284 	spin_unlock(&ibmvfc_driver_lock);
5285 	scsi_host_put(vhost->host);
5286 	LEAVE;
5287 	return 0;
5288 }
5289 
5290 /**
5291  * ibmvfc_resume - Resume from suspend
5292  * @dev:	device struct
5293  *
5294  * We may have lost an interrupt across suspend/resume, so kick the
5295  * interrupt handler
5296  *
5297  */
5298 static int ibmvfc_resume(struct device *dev)
5299 {
5300 	unsigned long flags;
5301 	struct ibmvfc_host *vhost = dev_get_drvdata(dev);
5302 	struct vio_dev *vdev = to_vio_dev(dev);
5303 
5304 	spin_lock_irqsave(vhost->host->host_lock, flags);
5305 	vio_disable_interrupts(vdev);
5306 	tasklet_schedule(&vhost->tasklet);
5307 	spin_unlock_irqrestore(vhost->host->host_lock, flags);
5308 	return 0;
5309 }
5310 
5311 /**
5312  * ibmvfc_get_desired_dma - Calculate DMA resources needed by the driver
5313  * @vdev:	vio device struct
5314  *
5315  * Return value:
5316  *	Number of bytes the driver will need to DMA map at the same time in
5317  *	order to perform well.
5318  */
5319 static unsigned long ibmvfc_get_desired_dma(struct vio_dev *vdev)
5320 {
5321 	unsigned long pool_dma = max_requests * sizeof(union ibmvfc_iu);
5322 	return pool_dma + ((512 * 1024) * driver_template.cmd_per_lun);
5323 }
5324 
5325 static const struct vio_device_id ibmvfc_device_table[] = {
5326 	{"fcp", "IBM,vfc-client"},
5327 	{ "", "" }
5328 };
5329 MODULE_DEVICE_TABLE(vio, ibmvfc_device_table);
5330 
5331 static const struct dev_pm_ops ibmvfc_pm_ops = {
5332 	.resume = ibmvfc_resume
5333 };
5334 
5335 static struct vio_driver ibmvfc_driver = {
5336 	.id_table = ibmvfc_device_table,
5337 	.probe = ibmvfc_probe,
5338 	.remove = ibmvfc_remove,
5339 	.get_desired_dma = ibmvfc_get_desired_dma,
5340 	.name = IBMVFC_NAME,
5341 	.pm = &ibmvfc_pm_ops,
5342 };
5343 
5344 static struct fc_function_template ibmvfc_transport_functions = {
5345 	.show_host_fabric_name = 1,
5346 	.show_host_node_name = 1,
5347 	.show_host_port_name = 1,
5348 	.show_host_supported_classes = 1,
5349 	.show_host_port_type = 1,
5350 	.show_host_port_id = 1,
5351 	.show_host_maxframe_size = 1,
5352 
5353 	.get_host_port_state = ibmvfc_get_host_port_state,
5354 	.show_host_port_state = 1,
5355 
5356 	.get_host_speed = ibmvfc_get_host_speed,
5357 	.show_host_speed = 1,
5358 
5359 	.issue_fc_host_lip = ibmvfc_issue_fc_host_lip,
5360 	.terminate_rport_io = ibmvfc_terminate_rport_io,
5361 
5362 	.show_rport_maxframe_size = 1,
5363 	.show_rport_supported_classes = 1,
5364 
5365 	.set_rport_dev_loss_tmo = ibmvfc_set_rport_dev_loss_tmo,
5366 	.show_rport_dev_loss_tmo = 1,
5367 
5368 	.get_starget_node_name = ibmvfc_get_starget_node_name,
5369 	.show_starget_node_name = 1,
5370 
5371 	.get_starget_port_name = ibmvfc_get_starget_port_name,
5372 	.show_starget_port_name = 1,
5373 
5374 	.get_starget_port_id = ibmvfc_get_starget_port_id,
5375 	.show_starget_port_id = 1,
5376 
5377 	.bsg_request = ibmvfc_bsg_request,
5378 	.bsg_timeout = ibmvfc_bsg_timeout,
5379 };
5380 
5381 /**
5382  * ibmvfc_module_init - Initialize the ibmvfc module
5383  *
5384  * Return value:
5385  * 	0 on success / other on failure
5386  **/
5387 static int __init ibmvfc_module_init(void)
5388 {
5389 	int rc;
5390 
5391 	if (!firmware_has_feature(FW_FEATURE_VIO))
5392 		return -ENODEV;
5393 
5394 	printk(KERN_INFO IBMVFC_NAME": IBM Virtual Fibre Channel Driver version: %s %s\n",
5395 	       IBMVFC_DRIVER_VERSION, IBMVFC_DRIVER_DATE);
5396 
5397 	ibmvfc_transport_template = fc_attach_transport(&ibmvfc_transport_functions);
5398 	if (!ibmvfc_transport_template)
5399 		return -ENOMEM;
5400 
5401 	rc = vio_register_driver(&ibmvfc_driver);
5402 	if (rc)
5403 		fc_release_transport(ibmvfc_transport_template);
5404 	return rc;
5405 }
5406 
5407 /**
5408  * ibmvfc_module_exit - Teardown the ibmvfc module
5409  *
5410  * Return value:
5411  * 	nothing
5412  **/
5413 static void __exit ibmvfc_module_exit(void)
5414 {
5415 	vio_unregister_driver(&ibmvfc_driver);
5416 	fc_release_transport(ibmvfc_transport_template);
5417 }
5418 
5419 module_init(ibmvfc_module_init);
5420 module_exit(ibmvfc_module_exit);
5421